Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
SOFIE_common.hxx
Go to the documentation of this file.
1#ifndef TMVA_SOFIE_SOFIE_COMMON
2#define TMVA_SOFIE_SOFIE_COMMON
3
4#include "TMVA/RTensor.hxx"
5
6#include "ROOT/RSpan.hxx"
7
8#include <stdexcept>
9#include <type_traits>
10#include <cstdint>
11#include <cstring>
12#include <string>
13#include <vector>
14#include <memory>
15#include <regex>
16#include <sstream>
17#include <iostream>
18
19namespace TMVA{
20namespace Experimental{
21namespace SOFIE{
22
23//typedef RTensor tensor_t;
24
25enum class ETensorType{
26 UNDEFINED = 0, FLOAT = 1, UNINT8 = 2, INT8 = 3, UINT16 = 4, INT16 = 5, INT32 = 6, INT64 = 7, STRING = 8, BOOL = 9, //order sensitive
27 FLOAT16 = 10, DOUBLE = 11, UINT32 = 12, UINT64 = 13, COMPLEX64 = 14, COMPLEX28 = 15, BFLOAT16 = 16
28};
29
30typedef std::int64_t int_t;
31
34
35struct Dim{
36 bool isParam = false;
37 size_t dim = 0;
38 std::string param;
39
40 // default constructor (for I/O)
41 Dim() {}
42
43 // constructor for a parametric dimension with the option to pass a default dim value
44 Dim(const std::string & p, size_t d = 0) : isParam(true), dim(d), param(p) {}
45
46 // constructor for a non-parametric dimension
47 Dim(size_t d) : dim(d) {}
48
49 std::string GetVal() const {
50 return (isParam) ? param : std::to_string(dim);
51 }
52};
53
54
57 std::vector<Dim> shape;
58};
59
62 std::vector<size_t> shape;
63};
64
67 std::vector<Dim> shape;
68};
69
70std::vector<Dim> ConvertShapeToDim(std::vector<size_t> shape);
71
72std::vector<size_t> ConvertShapeToInt(std::vector<Dim> shape);
73
74std::size_t ConvertShapeToLength(std::vector<size_t> shape);
75
76std::string ConvertShapeToString(std::vector<size_t> shape);
77std::string ConvertDynamicShapeToString(std::vector<Dim> shape);
78// std::string ConvertShapeToString(std::vector<Dim> shape) {
79// return ConvertDynamicShapeToString(shape);
80// }
81
82std::string ConvertDynamicShapeToLength(std::vector<Dim> shape);
83
85public:
86 InitializedTensor() = default;
87 InitializedTensor(ETensorType type, std::span<std::size_t> shape, std::shared_ptr<void> data, bool typeConstant = false)
88 : fConstant(typeConstant), fType{type}, fShape{shape.begin(), shape.end()}, fData{data}
89 {
90 }
91
92 ETensorType const &type() const { return fType; }
93 std::vector<std::size_t> const &shape() const { return fShape; }
94 std::shared_ptr<void> const &sharedptr() const { return fData; }
95 // query if tensor comes from a Constant operator
96 bool IsConstantTensor() const { return fConstant;}
97 // query if tensor needs to be written in a weight file
98 bool IsWeightTensor() const { return !fConstant && !fIsNotWritable;}
99
101
102 template <class T = void>
103 T const *data() const
104 {
105 return static_cast<T const *>(fData.get());
106 }
107
109 {
110 // We only calculate fSize here, because it is only used for IO to know
111 // the size of the persistent data.
112 fSize = 1;
113 for (std::size_t item : fShape) {
114 fSize *= static_cast<int>(item);
115 }
116 switch (fType) {
117 case ETensorType::FLOAT: fSize *= sizeof(float); break;
118 case ETensorType::DOUBLE: fSize *= sizeof(double); break;
119 case ETensorType::INT32: fSize *= sizeof(int32_t); break;
120 case ETensorType::INT64: fSize *= sizeof(int64_t); break;
121 case ETensorType::BOOL: fSize *= sizeof(bool); break;
122 default:
123 throw std::runtime_error("TMVA::SOFIE doesn't yet supports serialising data-type " +
125 }
126 fPersistentData = static_cast<char *>(fData.get());
127 }
129 {
130 // If there is no persistent data, do nothing
131 if (fSize == 0 || fPersistentData == nullptr) {
132 return;
133 }
134
135 // Nothing to be done if the pointed-to data is the same
136 if (fPersistentData == static_cast<char *>(fData.get())) {
137 return;
138 }
139
140 // Initialize the shared_ptr
141 fData = std::shared_ptr<void>{malloc(fSize), free};
142 std::memcpy(fData.get(), fPersistentData, fSize);
143
144 // Make sure the data read from disk doesn't leak and delete the
145 // persistent data
146 delete[] fPersistentData;
147 fPersistentData = nullptr;
148 fSize = 0;
149 }
150
151private:
152 bool fConstant = false; ///< Flag specifying if tensor is a Constant one (coming from a Constant operator)
153 bool fIsNotWritable = false; ///< Flag to indicate that tensor values do not need to be written as weight or generated code
154 ETensorType fType; ///< Encodes the type of the data
155 std::vector<std::size_t> fShape; ///< The shape of the data in terms of elements in each dimension
156 std::shared_ptr<void> fData; ///<! Transient shared data
157 int fSize = 0; ///< The size of the persistent data in bytes (not number of elements!)
158 char *fPersistentData = nullptr; ///<[fSize] Persistent version of the data
159};
160
161template <typename T>
163 if (std::is_same<T, float>::value) return ETensorType::FLOAT;
164 if (std::is_same<T, uint8_t>::value) return ETensorType::UNINT8;
165 if (std::is_same<T, int8_t>::value) return ETensorType::INT8;
166 if (std::is_same<T, uint16_t>::value) return ETensorType::UINT16;
167 if (std::is_same<T, int16_t>::value) return ETensorType::INT16;
168 if (std::is_same<T, int32_t>::value) return ETensorType::INT32;
169 if (std::is_same<T, int64_t>::value) return ETensorType::INT64;
170 if (std::is_same<T, std::string>::value) return ETensorType::STRING;
171 if (std::is_same<T, bool>::value) return ETensorType::BOOL;
172 //float16 unimplemented
173 if (std::is_same<T, double>::value) return ETensorType::DOUBLE;
174 if (std::is_same<T, uint32_t>::value) return ETensorType::UINT32;
175 if (std::is_same<T, uint64_t>::value) return ETensorType::UINT64;
176 //complex 64, 28, bfloat 16 unimplemented
177}
178
179namespace UTILITY{
180// Check if two shapes are equal
181bool AreSameShape(const std::vector<size_t>&, const std::vector<size_t>&);
182bool AreSameShape(const std::vector<size_t>&, const std::vector<Dim>&);
183bool AreSameShape(const std::vector<Dim>&, const std::vector<Dim>&);
184
185
186// Multidirectional broadcast a list of tensors to the same shape
187std::vector<size_t> MultidirectionalBroadcastShape(std::vector<std::vector<size_t>>);
188
189// Unidirectional broadcast two shapes to the same shape
190std::vector<size_t> UnidirectionalBroadcastShape(std::vector<size_t>, std::vector<size_t>);
191
192std::string Clean_name(std::string input_tensor_name);
193
194template<typename T>
195T* BroadcastConvBias(const T* data, const size_t channel, const std::vector<size_t>& targetShape) {
196 size_t size = targetShape.size();
197 if (targetShape[1] != channel) {
198 std::stringstream ss;
199 ss << "TMVA::SOFIE - Error broadcasting Conv Bias of shape {";
200 ss << std::to_string(channel);
201 ss << "} to ";
202 ss << ConvertShapeToString(targetShape);
203 throw
204 std::runtime_error(ss.str());
205 }
206
207 size_t targetLength = ConvertShapeToLength(targetShape);
208 T* newData = new T[targetLength];
209
210 if (targetLength == channel) {
211 std::copy(data, data + channel, newData);
212 return newData;
213 }
214
215 // cStride = OutDepth * outHeight * outWidth
216 size_t cStride = 1;
217 for (size_t i = 2; i < size; i++)
218 cStride *= targetShape[i];
219 // Broadcast each element of the bias to a vector of size cStride and concatenate them
220 // into a vector of size channel * cStride
221 for (size_t i = 0; i < channel; i++) {
222 std::fill(newData + i * cStride, newData + (i + 1) * cStride, data[i]);
223 }
224 // Broadcast newData[0...channel * cStride) to newData[0...batch * channel * cStride)
225 size_t batch = targetShape[0];
226 size_t bStride = channel * cStride;
227 for (size_t i = 1; i < batch; i++) {
228 std::copy(newData, newData + bStride, newData + i * bStride);
229 }
230 return newData;
231}
232
233// Broadcast a tensor from shape to targetShape according to numpy broadcasting rules
234// See more at https://numpy.org/doc/stable/user/basics.broadcasting.html
235// and https://github.com/onnx/onnx/blob/main/docs/Broadcasting.md .
236template<typename T>
237T* BroadcastTensor(const T* data, const std::vector<size_t>& shape, const std::vector<size_t>& targetShape) {
238 // Size of the shapes
239 size_t size = shape.size();
240 // Current length of the broadcasted tensor
241 size_t curLength = ConvertShapeToLength(shape);
242 size_t targetLength = ConvertShapeToLength(targetShape);
243 // newShape is an aray of size equal to dimension along which we are broadcasting the tensor
244 T* broadcastedData = new T[targetLength];
245 std::copy(data, data + curLength, broadcastedData);
246 // Product of the previous dimensions of targetShape
247 size_t arrayNum = 1;
248 // New broadcasted data
249 std::vector<T> newData(targetLength);
250
251 for (size_t idx = 0; idx < size; idx++) {
252 size_t dim = shape[idx];
253 size_t targetDim = targetShape[idx];
254 if (dim == 1 && targetDim > 1) {
255 // Set the new length of the data
256 size_t newLength = curLength * targetDim;
257 // View the data as a list of arrayNum arrays of size arrayLength
258 size_t arrayLength = curLength / arrayNum;
259 // Broadcast each array dim times
260 if (arrayLength > 1) {
261 // If each array has at least two elements
262 for (size_t arrayIdx = 0; arrayIdx < arrayNum; arrayIdx++) {
263 for (size_t targetIdx = 0; targetIdx < targetDim; targetIdx++) {
264 size_t offset = arrayIdx * arrayLength * targetDim + targetIdx * arrayLength;
265 std::copy(broadcastedData + arrayIdx * arrayLength,
266 broadcastedData + (arrayIdx + 1) * arrayLength,
267 newData.begin() + offset);
268 }
269 }
270 } else {
271 // If each array has one element
272 for (size_t arrayIdx = 0; arrayIdx < arrayNum; arrayIdx++) {
273 std::fill(newData.begin() + arrayIdx * targetDim,
274 newData.begin() + (arrayIdx + 1) * targetDim, broadcastedData[arrayIdx]);
275 }
276 }
277 // Update current length
278 curLength = newLength;
279 // Update broadcasted data
280 std::copy(newData.begin(), newData.begin() + newLength, broadcastedData);
281 }
282 // Update the number of arrays
283 arrayNum *= targetDim;
284 }
285 return broadcastedData;
286}
287
288// Unidirectional broadcasting shape to targetShape
289template<typename T>
290T* UnidirectionalBroadcast(const T* data, const std::vector<size_t>& shape, const std::vector<size_t>& targetShape) {
291 // Prepend shape with ones
292 if (shape.size() < targetShape.size()) {
293 size_t targetSize = targetShape.size();
294 std::vector<size_t> newShape(targetSize, 1);
295 size_t offset = targetSize - shape.size();
296 std::copy(shape.begin(), shape.end(), newShape.begin() + offset);
297 return BroadcastTensor<T>(data, newShape, targetShape);
298 }
299 return BroadcastTensor<T>(data, shape, targetShape);
300}
301
302/// compute stride of a tensor given its shape (assume layout is row-major)
303std::vector<size_t> ComputeStrideFromShape(const std::vector<size_t> & shape);
304std::vector<Dim> ComputeStrideFromShape(const std::vector<Dim> & shape);
305
306/// function to check if a >> 0 and a < MAX using a single comparison
307//// use trick casting to unsigned values so it becomes a single comparison
308inline bool is_a_ge_zero_and_a_lt_b(int a, int b) {
309 return static_cast<unsigned>(a) < static_cast<unsigned>(b);
310}
311
312
313/// im2col : efficient function to re-arrange input data of convolution to a matrix
314/// that can be used by BLAS
315/// Use trick to loop on each element of filtered region first and follow input data layout
316/// By doing this reads and writes are of consecutive data in memory and one gains in efficiency
317/// The resulting matrix will be already transposed and can be used directly in BLAS
318/// since output will be a matrix : (channels*kernel_h*kernel_w , output_h*output_w)
319/// Example: with an input matrix
320/// a1 a2 a3
321/// b1 b2 b3 and a 2x2 kernel (k1,k2,k3,k4) and padding 1 :
322/// c1 c2 c3
323/// outpout will be a matrix (4 x 16)
324/// the routine will follow output order :
325// first all elements which will be operated by k1 then k2 then k3
326/// -> ( 0 0 0 0 0 a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 ) all elements for k1
327/// ( 0 0 0 0 a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 0 ) for k2
328/// ( 0 a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 0 0 0 0 ) for k3
329/// ( a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 0 0 0 0 0 ) for k4
330///
331
332template <typename T>
333void Im2col(const T *data_im, const int channels, const int height, const int width, const int kernel_h,
334 const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w,
335 const int dilation_h, const int dilation_w, T *data_col)
336{
337 const int output_h = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
338 const int output_w = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
339 const int channel_size = height * width;
340 for (int channel = channels; channel--; data_im += channel_size) {
341 for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
342 for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
343 int input_row = -pad_h + kernel_row * dilation_h;
344 for (int output_rows = output_h; output_rows; output_rows--) {
345 if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
346 for (int output_cols = output_w; output_cols; output_cols--) {
347 *(data_col++) = 0;
348 }
349 } else {
350 int input_col = -pad_w + kernel_col * dilation_w;
351 for (int output_col = output_w; output_col; output_col--) {
352 if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
353 *(data_col++) = data_im[input_row * width + input_col];
354 } else {
355 *(data_col++) = 0;
356 }
357 input_col += stride_w;
358 }
359 }
360 input_row += stride_h;
361 }
362 }
363 }
364 }
365}
366
367/// 3d implementation
368template <typename T>
369void Im2col_3d(const T *data_im, const int channels,
370 const int depth, const int height, const int width,
371 const int kernel_d, const int kernel_h, const int kernel_w,
372 const int pad_d, const int pad_h, const int pad_w,
373 const int stride_d, const int stride_h, const int stride_w,
374 const int dilation_d, const int dilation_h, const int dilation_w, T *data_col)
375{
376 const int output_h = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
377 const int output_w = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
378 const int output_d = (depth + 2 * pad_d - (dilation_d * (kernel_d - 1) + 1)) / stride_d + 1;
379 const int channel_size = height * width * depth;
380 // assume data are c x d x h x w
381 for (int channel = channels; channel--; data_im += channel_size) {
382 for (int kernel_depth = 0; kernel_depth < kernel_d; kernel_depth++) {
383 for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
384 for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
385 int input_dep = -pad_d + kernel_depth * dilation_d;
386 for (int output_dep = output_d; output_dep; output_dep--) {
387 if (!is_a_ge_zero_and_a_lt_b(input_dep, depth)) {
388 for (int output_rows = output_h; output_rows; output_rows--) {
389 for (int output_cols = output_w; output_cols; output_cols--) {
390 *(data_col++) = 0;
391 }
392 }
393 } else {
394 int input_row = -pad_h + kernel_row * dilation_h;
395 for (int output_rows = output_h; output_rows; output_rows--) {
396 if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
397 for (int output_cols = output_w; output_cols; output_cols--) {
398 *(data_col++) = 0;
399 }
400 } else {
401 int input_col = -pad_w + kernel_col * dilation_w;
402 for (int output_col = output_w; output_col; output_col--) {
403 if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
404 *(data_col++) = data_im[input_dep * width * height + input_row * width + input_col];
405 } else {
406 *(data_col++) = 0;
407 }
408 input_col += stride_w;
409 }
410 }
411 input_row += stride_h;
412 }
413 }
414 input_dep += stride_d;
415 }
416 }
417 }
418 }
419 }
420}
421
422template <typename Dtype>
423void col2im(const Dtype* data_col, const int channels,
424 const int height, const int width, const int kernel_h, const int kernel_w,
425 const int pad_h, const int pad_w,
426 const int stride_h, const int stride_w,
427 const int dilation_h, const int dilation_w,
428 Dtype* data_im) {
429 // note that output data_im needs to be set to zero value!!!!
430 std::fill(data_im, data_im + height * width * channels, 0.);
431 //caffe_set(height * width * channels, Dtype(0), data_im);
432 // data_im must be a zero vector
433 //const Dtype * data_col_0 = data_col;
434 const int output_h = (height + 2 * pad_h -
435 (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
436 const int output_w = (width + 2 * pad_w -
437 (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
438 const int channel_size = height * width;
439 for (int channel = channels; channel--; data_im += channel_size) {
440 for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
441 for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
442 int input_row = -pad_h + kernel_row * dilation_h;
443 for (int output_rows = output_h; output_rows; output_rows--) {
444 if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
445 data_col += output_w;
446 } else {
447 int input_col = -pad_w + kernel_col * dilation_w;
448 for (int output_col = output_w; output_col; output_col--) {
449 if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
450 //assert(input_row*width+input_col < height * width * channels);
451 //assert(data_col - data_col_0 < output_h*output_w*channels);
452 // std::cout << "COL2IM: input_row" << " " << input_row << " " << input_col
453 // << " <---- " << data_col - data_col_0 << " values: "
454 // << data_im[input_row * width + input_col] << " <--- " << *data_col << std::endl;
455 data_im[input_row * width + input_col] += *data_col;
456 }
457 data_col++;
458 input_col += stride_w;
459 }
460 }
461 input_row += stride_h;
462 }
463 }
464 }
465 }
466 //std::cout << "finishing col2imp" << std::endl;
467}
468
469
470
471} // end namespace UTILITY
472
473namespace BLAS{
474extern "C" void sgemm_(const char * transa, const char * transb, const int * m, const int * n, const int * k,
475 const float * alpha, const float * A, const int * lda, const float * B, const int * ldb,
476 const float * beta, float * C, const int * ldc);
477}//BLAS
478
479
480struct GNN_Data {
481 RTensor<float> node_data; // the node feature data, tensor with shape (num_nodes, num_node_features)
482 RTensor<float> edge_data; // the edge feature data, tensor with shape (num_edges, num_edge_features)
483 RTensor<float> global_data; // the global features, tensor with shape (1, num_global_features)
484 RTensor<int> edge_index; // the edge index (receivers and senders for each edge), tensor with shape (2, num_edges)
485 // edge_index[0,:] are the receivers and edge_index[1,:] are the senders
486
487
488 // need to have default constructor since RTensor has not one
490
491};
492
493template<typename T>
495{
496 // concatenate tensor along axis. Shape must be the same except in the dimension of the concatenated axis
497 if (t1.GetMemoryLayout() != t2.GetMemoryLayout())
498 throw std::runtime_error("TMVA RTensor Concatenate - tensors have different memory layout");
499 auto & shape1 = t1.GetShape();
500 auto & shape2 = t2.GetShape();
501 if (t1.GetSize()/shape1[axis] != t2.GetSize()/shape2[axis]) {
502 std::cout << "axis " << axis << " sizes " << t1.GetSize() << " " << t2.GetSize() << " ";
503 std::cout << "shape 1 : " << ConvertShapeToString(t1.GetShape());
504 std::cout << " shape 2 : " << ConvertShapeToString(t2.GetShape()) << std::endl;
505 throw std::runtime_error("TMVA RTensor Concatenate - tensors have incompatible shapes");
506 }
507 std::vector<size_t> outShape = shape1;
508 outShape[axis] = shape1[axis] + shape2[axis];
509 TMVA::Experimental::RTensor<T> tout(outShape, t1.GetMemoryLayout());
510 if (t1.GetMemoryLayout() == TMVA::Experimental::MemoryLayout::ColumnMajor) {
511 throw std::runtime_error("TMVA RTensor Concatenate is not yet supported for column major tensors");
512 }
513
514 auto & stride1 = t1.GetStrides();
515 auto & stride2 = t2.GetStrides();
516 auto & outStride = tout.GetStrides();
517
518 size_t s1 = (axis > 0) ? stride1[axis-1] : t1.GetSize(); // block size to copy from first tensor
519 size_t s2 = (axis > 0) ? stride2[axis-1] : t2.GetSize(); // block size to copy from second tensor
520 size_t sout = (axis > 0) ? outStride[axis-1] : tout.GetSize();
521 size_t nb = t1.GetSize()/s1;
522 for (size_t i = 0; i < nb; i++) {
523 std::copy(t1.GetData() + i*s1, t1.GetData() + (i+1)*s1, tout.GetData() + i * sout );
524 std::copy(t2.GetData() + i*s2, t2.GetData() + (i+1)*s2, tout.GetData() + i * sout + s1 );
525 }
526
527 return tout;
528}
529
530
531inline GNN_Data Concatenate(GNN_Data & data1, GNN_Data & data2, int axis = 0) {
532 GNN_Data out;
533 out.node_data = Concatenate(data1.node_data,data2.node_data, axis);
534 out.edge_data = Concatenate(data1.edge_data,data2.edge_data, axis);
535 out.global_data = Concatenate<float>(data1.global_data,data2.global_data, axis-1);
536 // assume sender/receivers of data1 and data2 are the same
537 out.edge_index = data1.edge_index.Copy();
538 return out;
539}
540
541inline GNN_Data Copy(const GNN_Data & data) {
542 GNN_Data out;
543 out.node_data = RTensor<float>(data.node_data.GetShape());
544 out.edge_data = RTensor<float>(data.edge_data.GetShape());
545 out.global_data = RTensor<float>(data.global_data.GetShape());
546 out.edge_index = RTensor<int>(data.edge_index.GetShape());
547 std::copy(data.node_data.GetData(), data.node_data.GetData()+ data.node_data.GetSize(), out.node_data.GetData());
548 std::copy(data.edge_data.GetData(), data.edge_data.GetData()+ data.edge_data.GetSize(), out.edge_data.GetData());
549 std::copy(data.global_data.GetData(), data.global_data.GetData()+ data.global_data.GetSize(), out.global_data.GetData());
550 std::copy(data.edge_index.GetData(), data.edge_index.GetData()+ data.edge_index.GetSize(), out.edge_index.GetData());
551 return out;
552}
553
554}//SOFIE
555}//Experimental
556}//TMVA
557
558#endif //TMVA_SOFIE_RMODEL
#define d(i)
Definition RSha256.hxx:102
#define b(i)
Definition RSha256.hxx:100
#define a(i)
Definition RSha256.hxx:99
#define s1(x)
Definition RSha256.hxx:91
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
Option_t Option_t width
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t height
#define free
Definition civetweb.c:1539
#define malloc
Definition civetweb.c:1536
RTensor is a container with contiguous memory and shape information.
Definition RTensor.hxx:162
MemoryLayout GetMemoryLayout() const
Definition RTensor.hxx:248
RTensor< Value_t, Container_t > Copy(MemoryLayout layout=MemoryLayout::RowMajor) const
Copy RTensor to new object.
Definition RTensor.hxx:563
const Shape_t & GetStrides() const
Definition RTensor.hxx:243
std::size_t GetSize() const
Definition RTensor.hxx:241
const Shape_t & GetShape() const
Definition RTensor.hxx:242
std::shared_ptr< void > const & sharedptr() const
std::shared_ptr< void > fData
! Transient shared data
ETensorType fType
Encodes the type of the data.
std::vector< std::size_t > const & shape() const
char * fPersistentData
[fSize] Persistent version of the data
std::vector< std::size_t > fShape
The shape of the data in terms of elements in each dimension.
bool fIsNotWritable
Flag to indicate that tensor values do not need to be written as weight or generated code.
bool fConstant
Flag specifying if tensor is a Constant one (coming from a Constant operator)
InitializedTensor(ETensorType type, std::span< std::size_t > shape, std::shared_ptr< void > data, bool typeConstant=false)
int fSize
The size of the persistent data in bytes (not number of elements!)
const Int_t n
Definition legend1.C:16
void sgemm_(const char *transa, const char *transb, const int *m, const int *n, const int *k, const float *alpha, const float *A, const int *lda, const float *B, const int *ldb, const float *beta, float *C, const int *ldc)
bool AreSameShape(const std::vector< size_t > &, const std::vector< size_t > &)
void Im2col_3d(const T *data_im, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int pad_d, const int pad_h, const int pad_w, const int stride_d, const int stride_h, const int stride_w, const int dilation_d, const int dilation_h, const int dilation_w, T *data_col)
3d implementation
T * BroadcastConvBias(const T *data, const size_t channel, const std::vector< size_t > &targetShape)
void col2im(const Dtype *data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, Dtype *data_im)
T * BroadcastTensor(const T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape)
std::vector< size_t > UnidirectionalBroadcastShape(std::vector< size_t >, std::vector< size_t >)
std::string Clean_name(std::string input_tensor_name)
bool is_a_ge_zero_and_a_lt_b(int a, int b)
function to check if a >> 0 and a < MAX using a single comparison / use trick casting to unsigned val...
std::vector< size_t > MultidirectionalBroadcastShape(std::vector< std::vector< size_t > >)
T * UnidirectionalBroadcast(const T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape)
void Im2col(const T *data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, T *data_col)
im2col : efficient function to re-arrange input data of convolution to a matrix that can be used by B...
std::vector< size_t > ComputeStrideFromShape(const std::vector< size_t > &shape)
compute stride of a tensor given its shape (assume layout is row-major)
std::vector< Dim > ConvertShapeToDim(std::vector< size_t > shape)
Convert shape from integer format to dynamic one (based on Dim)
std::string ConvertDynamicShapeToLength(std::vector< Dim > shape)
ETensorType GetTemplatedType(T)
std::string ConvertShapeToString(std::vector< size_t > shape)
std::string ConvertTypeToString(ETensorType type)
std::string ConvertDynamicShapeToString(std::vector< Dim > shape)
ETensorType ConvertStringToType(std::string type)
TMVA::Experimental::RTensor< T > Concatenate(TMVA::Experimental::RTensor< T > &t1, TMVA::Experimental::RTensor< T > &t2, int axis=0)
std::vector< size_t > ConvertShapeToInt(std::vector< Dim > shape)
Convert shape based on Dim to integer format.
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
GNN_Data Copy(const GNN_Data &data)
create variable transformations
Dim(const std::string &p, size_t d=0)
TMarker m
Definition textangle.C:8
auto * t1
Definition textangle.C:20