Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
SOFIE_common.hxx
Go to the documentation of this file.
1#ifndef TMVA_SOFIE_SOFIE_COMMON
2#define TMVA_SOFIE_SOFIE_COMMON
3
4#include "TMVA/RTensor.hxx"
5
6#include "ROOT/RSpan.hxx"
7
8#include <stdexcept>
9#include <type_traits>
10#include <cstdint>
11#include <cstring>
12#include <string>
13#include <vector>
14#include <memory>
15#include <regex>
16#include <sstream>
17#include <iostream>
18
19namespace TMVA{
20namespace Experimental{
21namespace SOFIE{
22
23//typedef RTensor tensor_t;
24
25enum class ETensorType{
26 UNDEFINED = 0, FLOAT = 1, UNINT8 = 2, INT8 = 3, UINT16 = 4, INT16 = 5, INT32 = 6, INT64 = 7, STRING = 8, BOOL = 9, //order sensitive
27 FLOAT16 = 10, DOUBLE = 11, UINT32 = 12, UINT64 = 13, COMPLEX64 = 14, COMPLEX28 = 15, BFLOAT16 = 16
28};
29
30typedef std::int64_t int_t;
31
34
35struct Dim{
36 bool isParam = false;
37 size_t dim = 0;
38 std::string param;
39
40 // default constructor (for I/O)
41 Dim() {}
42
43 // constructor for a parametric dimension with the option to pass a default dim value
44 Dim(const std::string & p, size_t d = 0) : isParam(true), dim(d), param(p) {}
45
46 // constructor for a non-parametric dimension
47 Dim(size_t d) : dim(d) {}
48
49 std::string GetVal() const {
50 return (isParam) ? param : std::to_string(dim);
51 }
52};
53
54
57 std::vector<Dim> shape;
58};
59
62 std::vector<size_t> shape;
63};
64
67 std::vector<Dim> shape;
68};
69
70std::vector<Dim> ConvertShapeToDim(std::vector<size_t> shape);
71
72std::vector<size_t> ConvertShapeToInt(std::vector<Dim> shape);
73
74std::size_t ConvertShapeToLength(std::vector<size_t> shape);
75
76std::string ConvertShapeToString(std::vector<size_t> shape);
77std::string ConvertDynamicShapeToString(std::vector<Dim> shape);
78// std::string ConvertShapeToString(std::vector<Dim> shape) {
79// return ConvertDynamicShapeToString(shape);
80// }
81
82std::string ConvertDynamicShapeToLength(std::vector<Dim> shape);
83
84// convert list of values in a string
85template<class T>
86std::string ConvertValuesToString(size_t n, const T * data) {
87 std::stringstream ret;
88 ret << "[ ";
89 for (size_t i = 0; i < n; i++) {
90 ret << data[i];
91 if (i < n-1) ret << ", ";
92 }
93 ret << "]";
94 return ret.str();
95}
96template<class T>
97std::string ConvertValuesToString(const std::vector<T> & data) {
98 return ConvertValuesToString(data.size(), data.data());
99}
100
102public:
103 InitializedTensor() = default;
104 InitializedTensor(ETensorType type, std::span<std::size_t> shape, std::shared_ptr<void> data, bool typeConstant = false)
105 : fConstant(typeConstant), fType{type}, fShape{shape.begin(), shape.end()}, fData{data}
106 {
107 }
108
109 ETensorType const &type() const { return fType; }
110 std::vector<std::size_t> const &shape() const { return fShape; }
111 std::shared_ptr<void> const &sharedptr() const { return fData; }
112 // query if tensor comes from a Constant operator
113 bool IsConstantTensor() const { return fConstant;}
114 // query if tensor needs to be written in a weight file. Constant tensors are not written in a file
115 bool IsWeightTensor() const { return !fConstant && !fIsNotWritable;}
116
118
119 template <class T = void>
120 T const *data() const
121 {
122 return static_cast<T const *>(fData.get());
123 }
124
126 {
127 // We only calculate fSize here, because it is only used for IO to know
128 // the size of the persistent data.
129 fSize = 1;
130 for (std::size_t item : fShape) {
131 fSize *= static_cast<int>(item);
132 }
133 switch (fType) {
134 case ETensorType::FLOAT: fSize *= sizeof(float); break;
135 case ETensorType::DOUBLE: fSize *= sizeof(double); break;
136 case ETensorType::INT32: fSize *= sizeof(int32_t); break;
137 case ETensorType::INT64: fSize *= sizeof(int64_t); break;
138 case ETensorType::BOOL: fSize *= sizeof(bool); break;
139 default:
140 throw std::runtime_error("TMVA::SOFIE doesn't yet supports serialising data-type " +
142 }
143 fPersistentData = static_cast<char *>(fData.get());
144 }
146 {
147 // If there is no persistent data, do nothing
148 if (fSize == 0 || fPersistentData == nullptr) {
149 return;
150 }
151
152 // Nothing to be done if the pointed-to data is the same
153 if (fPersistentData == static_cast<char *>(fData.get())) {
154 return;
155 }
156
157 // Initialize the shared_ptr
158 fData = std::shared_ptr<void>{malloc(fSize), free};
159 std::memcpy(fData.get(), fPersistentData, fSize);
160
161 // Make sure the data read from disk doesn't leak and delete the
162 // persistent data
163 delete[] fPersistentData;
164 fPersistentData = nullptr;
165 fSize = 0;
166 }
167
168private:
169 bool fConstant = false; ///< Flag specifying if tensor is a Constant one (coming from a Constant operator)
170 bool fIsNotWritable = false; ///< Flag to indicate that tensor values do not need to be written as weight or generated code
171 ETensorType fType; ///< Encodes the type of the data
172 std::vector<std::size_t> fShape; ///< The shape of the data in terms of elements in each dimension
173 std::shared_ptr<void> fData; ///<! Transient shared data
174 int fSize = 0; ///< The size of the persistent data in bytes (not number of elements!)
175 char *fPersistentData = nullptr; ///<[fSize] Persistent version of the data
176};
177
178template <typename T>
180 if (std::is_same<T, float>::value) return ETensorType::FLOAT;
181 if (std::is_same<T, uint8_t>::value) return ETensorType::UNINT8;
182 if (std::is_same<T, int8_t>::value) return ETensorType::INT8;
183 if (std::is_same<T, uint16_t>::value) return ETensorType::UINT16;
184 if (std::is_same<T, int16_t>::value) return ETensorType::INT16;
185 if (std::is_same<T, int32_t>::value) return ETensorType::INT32;
186 if (std::is_same<T, int64_t>::value) return ETensorType::INT64;
187 if (std::is_same<T, std::string>::value) return ETensorType::STRING;
188 if (std::is_same<T, bool>::value) return ETensorType::BOOL;
189 //float16 unimplemented
190 if (std::is_same<T, double>::value) return ETensorType::DOUBLE;
191 if (std::is_same<T, uint32_t>::value) return ETensorType::UINT32;
192 if (std::is_same<T, uint64_t>::value) return ETensorType::UINT64;
193 //complex 64, 28, bfloat 16 unimplemented
194}
195
196namespace UTILITY{
197// Check if two shapes are equal
198bool AreSameShape(const std::vector<size_t>&, const std::vector<size_t>&);
199bool AreSameShape(const std::vector<size_t>&, const std::vector<Dim>&);
200bool AreSameShape(const std::vector<Dim>&, const std::vector<Dim>&);
201
202
203// Multidirectional broadcast a list of tensors to the same shape
204std::vector<size_t> MultidirectionalBroadcastShape(std::vector<std::vector<size_t>>);
205
206// Unidirectional broadcast two shapes to the same shape
207std::vector<size_t> UnidirectionalBroadcastShape(std::vector<size_t>, std::vector<size_t>);
208
209std::string Clean_name(std::string input_tensor_name);
210
211template<typename T>
212T* BroadcastConvBias(const T* data, const size_t channel, const std::vector<size_t>& targetShape) {
213 size_t size = targetShape.size();
214 if (targetShape[1] != channel) {
215 std::stringstream ss;
216 ss << "TMVA::SOFIE - Error broadcasting Conv Bias of shape {";
217 ss << std::to_string(channel);
218 ss << "} to ";
219 ss << ConvertShapeToString(targetShape);
220 throw
221 std::runtime_error(ss.str());
222 }
223
224 size_t targetLength = ConvertShapeToLength(targetShape);
225 T* newData = new T[targetLength];
226
227 if (targetLength == channel) {
228 std::copy(data, data + channel, newData);
229 return newData;
230 }
231
232 // cStride = OutDepth * outHeight * outWidth
233 size_t cStride = 1;
234 for (size_t i = 2; i < size; i++)
235 cStride *= targetShape[i];
236 // Broadcast each element of the bias to a vector of size cStride and concatenate them
237 // into a vector of size channel * cStride
238 for (size_t i = 0; i < channel; i++) {
239 std::fill(newData + i * cStride, newData + (i + 1) * cStride, data[i]);
240 }
241 // Broadcast newData[0...channel * cStride) to newData[0...batch * channel * cStride)
242 size_t batch = targetShape[0];
243 size_t bStride = channel * cStride;
244 for (size_t i = 1; i < batch; i++) {
245 std::copy(newData, newData + bStride, newData + i * bStride);
246 }
247 return newData;
248}
249
250// Broadcast a tensor from shape to targetShape according to numpy broadcasting rules
251// See more at https://numpy.org/doc/stable/user/basics.broadcasting.html
252// and https://github.com/onnx/onnx/blob/main/docs/Broadcasting.md .
253template<typename T>
254T* BroadcastTensor(const T* data, const std::vector<size_t>& shape, const std::vector<size_t>& targetShape) {
255 // Size of the shapes
256 size_t size = shape.size();
257 // Current length of the broadcasted tensor
258 size_t curLength = ConvertShapeToLength(shape);
259 size_t targetLength = ConvertShapeToLength(targetShape);
260 // newShape is an aray of size equal to dimension along which we are broadcasting the tensor
261 T* broadcastedData = new T[targetLength];
262 std::copy(data, data + curLength, broadcastedData);
263 // Product of the previous dimensions of targetShape
264 size_t arrayNum = 1;
265 // New broadcasted data
266 std::vector<T> newData(targetLength);
267
268 for (size_t idx = 0; idx < size; idx++) {
269 size_t dim = shape[idx];
270 size_t targetDim = targetShape[idx];
271 if (dim == 1 && targetDim > 1) {
272 // Set the new length of the data
273 size_t newLength = curLength * targetDim;
274 // View the data as a list of arrayNum arrays of size arrayLength
275 size_t arrayLength = curLength / arrayNum;
276 // Broadcast each array dim times
277 if (arrayLength > 1) {
278 // If each array has at least two elements
279 for (size_t arrayIdx = 0; arrayIdx < arrayNum; arrayIdx++) {
280 for (size_t targetIdx = 0; targetIdx < targetDim; targetIdx++) {
281 size_t offset = arrayIdx * arrayLength * targetDim + targetIdx * arrayLength;
282 std::copy(broadcastedData + arrayIdx * arrayLength,
283 broadcastedData + (arrayIdx + 1) * arrayLength,
284 newData.begin() + offset);
285 }
286 }
287 } else {
288 // If each array has one element
289 for (size_t arrayIdx = 0; arrayIdx < arrayNum; arrayIdx++) {
290 std::fill(newData.begin() + arrayIdx * targetDim,
291 newData.begin() + (arrayIdx + 1) * targetDim, broadcastedData[arrayIdx]);
292 }
293 }
294 // Update current length
295 curLength = newLength;
296 // Update broadcasted data
297 std::copy(newData.begin(), newData.begin() + newLength, broadcastedData);
298 }
299 // Update the number of arrays
300 arrayNum *= targetDim;
301 }
302 return broadcastedData;
303}
304
305// Unidirectional broadcasting shape to targetShape
306template<typename T>
307T* UnidirectionalBroadcast(const T* data, const std::vector<size_t>& shape, const std::vector<size_t>& targetShape) {
308 // Prepend shape with ones
309 if (shape.size() < targetShape.size()) {
310 size_t targetSize = targetShape.size();
311 std::vector<size_t> newShape(targetSize, 1);
312 size_t offset = targetSize - shape.size();
313 std::copy(shape.begin(), shape.end(), newShape.begin() + offset);
314 return BroadcastTensor<T>(data, newShape, targetShape);
315 }
316 return BroadcastTensor<T>(data, shape, targetShape);
317}
318
319/// compute stride of a tensor given its shape (assume layout is row-major)
320std::vector<size_t> ComputeStrideFromShape(const std::vector<size_t> & shape);
321std::vector<Dim> ComputeStrideFromShape(const std::vector<Dim> & shape);
322
323/// function to check if a >> 0 and a < MAX using a single comparison
324//// use trick casting to unsigned values so it becomes a single comparison
325inline bool is_a_ge_zero_and_a_lt_b(int a, int b) {
326 return static_cast<unsigned>(a) < static_cast<unsigned>(b);
327}
328
329
330/// im2col : efficient function to re-arrange input data of convolution to a matrix
331/// that can be used by BLAS
332/// Use trick to loop on each element of filtered region first and follow input data layout
333/// By doing this reads and writes are of consecutive data in memory and one gains in efficiency
334/// The resulting matrix will be already transposed and can be used directly in BLAS
335/// since output will be a matrix : (channels*kernel_h*kernel_w , output_h*output_w)
336/// Example: with an input matrix
337/// a1 a2 a3
338/// b1 b2 b3 and a 2x2 kernel (k1,k2,k3,k4) and padding 1 :
339/// c1 c2 c3
340/// outpout will be a matrix (4 x 16)
341/// the routine will follow output order :
342// first all elements which will be operated by k1 then k2 then k3
343/// -> ( 0 0 0 0 0 a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 ) all elements for k1
344/// ( 0 0 0 0 a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 0 ) for k2
345/// ( 0 a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 0 0 0 0 ) for k3
346/// ( a1 a2 a3 0 b1 b2 b3 0 c1 c2 c3 0 0 0 0 0 ) for k4
347///
348
349template <typename T>
350void Im2col(const T *data_im, const int channels, const int height, const int width, const int kernel_h,
351 const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w,
352 const int dilation_h, const int dilation_w, T *data_col)
353{
354 const int output_h = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
355 const int output_w = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
356 const int channel_size = height * width;
357 for (int channel = channels; channel--; data_im += channel_size) {
358 for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
359 for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
360 int input_row = -pad_h + kernel_row * dilation_h;
361 for (int output_rows = output_h; output_rows; output_rows--) {
362 if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
363 for (int output_cols = output_w; output_cols; output_cols--) {
364 *(data_col++) = 0;
365 }
366 } else {
367 int input_col = -pad_w + kernel_col * dilation_w;
368 for (int output_col = output_w; output_col; output_col--) {
369 if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
370 *(data_col++) = data_im[input_row * width + input_col];
371 } else {
372 *(data_col++) = 0;
373 }
374 input_col += stride_w;
375 }
376 }
377 input_row += stride_h;
378 }
379 }
380 }
381 }
382}
383
384/// 3d implementation
385template <typename T>
386void Im2col_3d(const T *data_im, const int channels,
387 const int depth, const int height, const int width,
388 const int kernel_d, const int kernel_h, const int kernel_w,
389 const int pad_d, const int pad_h, const int pad_w,
390 const int stride_d, const int stride_h, const int stride_w,
391 const int dilation_d, const int dilation_h, const int dilation_w, T *data_col)
392{
393 const int output_h = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
394 const int output_w = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
395 const int output_d = (depth + 2 * pad_d - (dilation_d * (kernel_d - 1) + 1)) / stride_d + 1;
396 const int channel_size = height * width * depth;
397 // assume data are c x d x h x w
398 for (int channel = channels; channel--; data_im += channel_size) {
399 for (int kernel_depth = 0; kernel_depth < kernel_d; kernel_depth++) {
400 for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
401 for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
402 int input_dep = -pad_d + kernel_depth * dilation_d;
403 for (int output_dep = output_d; output_dep; output_dep--) {
404 if (!is_a_ge_zero_and_a_lt_b(input_dep, depth)) {
405 for (int output_rows = output_h; output_rows; output_rows--) {
406 for (int output_cols = output_w; output_cols; output_cols--) {
407 *(data_col++) = 0;
408 }
409 }
410 } else {
411 int input_row = -pad_h + kernel_row * dilation_h;
412 for (int output_rows = output_h; output_rows; output_rows--) {
413 if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
414 for (int output_cols = output_w; output_cols; output_cols--) {
415 *(data_col++) = 0;
416 }
417 } else {
418 int input_col = -pad_w + kernel_col * dilation_w;
419 for (int output_col = output_w; output_col; output_col--) {
420 if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
421 *(data_col++) = data_im[input_dep * width * height + input_row * width + input_col];
422 } else {
423 *(data_col++) = 0;
424 }
425 input_col += stride_w;
426 }
427 }
428 input_row += stride_h;
429 }
430 }
431 input_dep += stride_d;
432 }
433 }
434 }
435 }
436 }
437}
438
439template <typename Dtype>
440void col2im(const Dtype* data_col, const int channels,
441 const int height, const int width, const int kernel_h, const int kernel_w,
442 const int pad_h, const int pad_w,
443 const int stride_h, const int stride_w,
444 const int dilation_h, const int dilation_w,
445 Dtype* data_im) {
446 // note that output data_im needs to be set to zero value!!!!
447 std::fill(data_im, data_im + height * width * channels, 0.);
448 //caffe_set(height * width * channels, Dtype(0), data_im);
449 // data_im must be a zero vector
450 //const Dtype * data_col_0 = data_col;
451 const int output_h = (height + 2 * pad_h -
452 (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
453 const int output_w = (width + 2 * pad_w -
454 (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
455 const int channel_size = height * width;
456 for (int channel = channels; channel--; data_im += channel_size) {
457 for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
458 for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
459 int input_row = -pad_h + kernel_row * dilation_h;
460 for (int output_rows = output_h; output_rows; output_rows--) {
461 if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
462 data_col += output_w;
463 } else {
464 int input_col = -pad_w + kernel_col * dilation_w;
465 for (int output_col = output_w; output_col; output_col--) {
466 if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
467 //assert(input_row*width+input_col < height * width * channels);
468 //assert(data_col - data_col_0 < output_h*output_w*channels);
469 // std::cout << "COL2IM: input_row" << " " << input_row << " " << input_col
470 // << " <---- " << data_col - data_col_0 << " values: "
471 // << data_im[input_row * width + input_col] << " <--- " << *data_col << std::endl;
472 data_im[input_row * width + input_col] += *data_col;
473 }
474 data_col++;
475 input_col += stride_w;
476 }
477 }
478 input_row += stride_h;
479 }
480 }
481 }
482 }
483 //std::cout << "finishing col2imp" << std::endl;
484}
485
486
487
488} // end namespace UTILITY
489
490namespace BLAS{
491extern "C" void sgemm_(const char * transa, const char * transb, const int * m, const int * n, const int * k,
492 const float * alpha, const float * A, const int * lda, const float * B, const int * ldb,
493 const float * beta, float * C, const int * ldc);
494}//BLAS
495
496
497struct GNN_Data {
498 RTensor<float> node_data; // the node feature data, tensor with shape (num_nodes, num_node_features)
499 RTensor<float> edge_data; // the edge feature data, tensor with shape (num_edges, num_edge_features)
500 RTensor<float> global_data; // the global features, tensor with shape (1, num_global_features)
501 RTensor<int> edge_index; // the edge index (receivers and senders for each edge), tensor with shape (2, num_edges)
502 // edge_index[0,:] are the receivers and edge_index[1,:] are the senders
503
504
505 // need to have default constructor since RTensor has not one
507
508};
509
510template<typename T>
512{
513 // concatenate tensor along axis. Shape must be the same except in the dimension of the concatenated axis
514 if (t1.GetMemoryLayout() != t2.GetMemoryLayout())
515 throw std::runtime_error("TMVA RTensor Concatenate - tensors have different memory layout");
516 auto & shape1 = t1.GetShape();
517 auto & shape2 = t2.GetShape();
518 if (t1.GetSize()/shape1[axis] != t2.GetSize()/shape2[axis]) {
519 std::cout << "axis " << axis << " sizes " << t1.GetSize() << " " << t2.GetSize() << " ";
520 std::cout << "shape 1 : " << ConvertShapeToString(t1.GetShape());
521 std::cout << " shape 2 : " << ConvertShapeToString(t2.GetShape()) << std::endl;
522 throw std::runtime_error("TMVA RTensor Concatenate - tensors have incompatible shapes");
523 }
524 std::vector<size_t> outShape = shape1;
525 outShape[axis] = shape1[axis] + shape2[axis];
526 TMVA::Experimental::RTensor<T> tout(outShape, t1.GetMemoryLayout());
527 if (t1.GetMemoryLayout() == TMVA::Experimental::MemoryLayout::ColumnMajor) {
528 throw std::runtime_error("TMVA RTensor Concatenate is not yet supported for column major tensors");
529 }
530
531 auto & stride1 = t1.GetStrides();
532 auto & stride2 = t2.GetStrides();
533 auto & outStride = tout.GetStrides();
534
535 size_t s1 = (axis > 0) ? stride1[axis-1] : t1.GetSize(); // block size to copy from first tensor
536 size_t s2 = (axis > 0) ? stride2[axis-1] : t2.GetSize(); // block size to copy from second tensor
537 size_t sout = (axis > 0) ? outStride[axis-1] : tout.GetSize();
538 size_t nb = t1.GetSize()/s1;
539 for (size_t i = 0; i < nb; i++) {
540 std::copy(t1.GetData() + i*s1, t1.GetData() + (i+1)*s1, tout.GetData() + i * sout );
541 std::copy(t2.GetData() + i*s2, t2.GetData() + (i+1)*s2, tout.GetData() + i * sout + s1 );
542 }
543
544 return tout;
545}
546
547
548inline GNN_Data Concatenate(GNN_Data & data1, GNN_Data & data2, int axis = 0) {
549 GNN_Data out;
550 out.node_data = Concatenate(data1.node_data,data2.node_data, axis);
551 out.edge_data = Concatenate(data1.edge_data,data2.edge_data, axis);
552 out.global_data = Concatenate<float>(data1.global_data,data2.global_data, axis-1);
553 // assume sender/receivers of data1 and data2 are the same
554 out.edge_index = data1.edge_index.Copy();
555 return out;
556}
557
558inline GNN_Data Copy(const GNN_Data & data) {
559 GNN_Data out;
560 out.node_data = RTensor<float>(data.node_data.GetShape());
561 out.edge_data = RTensor<float>(data.edge_data.GetShape());
562 out.global_data = RTensor<float>(data.global_data.GetShape());
563 out.edge_index = RTensor<int>(data.edge_index.GetShape());
564 std::copy(data.node_data.GetData(), data.node_data.GetData()+ data.node_data.GetSize(), out.node_data.GetData());
565 std::copy(data.edge_data.GetData(), data.edge_data.GetData()+ data.edge_data.GetSize(), out.edge_data.GetData());
566 std::copy(data.global_data.GetData(), data.global_data.GetData()+ data.global_data.GetSize(), out.global_data.GetData());
567 std::copy(data.edge_index.GetData(), data.edge_index.GetData()+ data.edge_index.GetSize(), out.edge_index.GetData());
568 return out;
569}
570
571}//SOFIE
572}//Experimental
573}//TMVA
574
575#endif //TMVA_SOFIE_RMODEL
#define d(i)
Definition RSha256.hxx:102
#define b(i)
Definition RSha256.hxx:100
#define a(i)
Definition RSha256.hxx:99
#define s1(x)
Definition RSha256.hxx:91
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
Option_t Option_t width
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t height
#define free
Definition civetweb.c:1539
#define malloc
Definition civetweb.c:1536
RTensor is a container with contiguous memory and shape information.
Definition RTensor.hxx:162
MemoryLayout GetMemoryLayout() const
Definition RTensor.hxx:248
RTensor< Value_t, Container_t > Copy(MemoryLayout layout=MemoryLayout::RowMajor) const
Copy RTensor to new object.
Definition RTensor.hxx:563
const Shape_t & GetStrides() const
Definition RTensor.hxx:243
std::size_t GetSize() const
Definition RTensor.hxx:241
const Shape_t & GetShape() const
Definition RTensor.hxx:242
std::shared_ptr< void > const & sharedptr() const
std::shared_ptr< void > fData
! Transient shared data
ETensorType fType
Encodes the type of the data.
std::vector< std::size_t > const & shape() const
char * fPersistentData
[fSize] Persistent version of the data
std::vector< std::size_t > fShape
The shape of the data in terms of elements in each dimension.
bool fIsNotWritable
Flag to indicate that tensor values do not need to be written as weight or generated code.
bool fConstant
Flag specifying if tensor is a Constant one (coming from a Constant operator)
InitializedTensor(ETensorType type, std::span< std::size_t > shape, std::shared_ptr< void > data, bool typeConstant=false)
int fSize
The size of the persistent data in bytes (not number of elements!)
const Int_t n
Definition legend1.C:16
void sgemm_(const char *transa, const char *transb, const int *m, const int *n, const int *k, const float *alpha, const float *A, const int *lda, const float *B, const int *ldb, const float *beta, float *C, const int *ldc)
bool AreSameShape(const std::vector< size_t > &, const std::vector< size_t > &)
void Im2col_3d(const T *data_im, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int pad_d, const int pad_h, const int pad_w, const int stride_d, const int stride_h, const int stride_w, const int dilation_d, const int dilation_h, const int dilation_w, T *data_col)
3d implementation
T * BroadcastConvBias(const T *data, const size_t channel, const std::vector< size_t > &targetShape)
void col2im(const Dtype *data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, Dtype *data_im)
T * BroadcastTensor(const T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape)
std::vector< size_t > UnidirectionalBroadcastShape(std::vector< size_t >, std::vector< size_t >)
std::string Clean_name(std::string input_tensor_name)
bool is_a_ge_zero_and_a_lt_b(int a, int b)
function to check if a >> 0 and a < MAX using a single comparison / use trick casting to unsigned val...
std::vector< size_t > MultidirectionalBroadcastShape(std::vector< std::vector< size_t > >)
T * UnidirectionalBroadcast(const T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape)
void Im2col(const T *data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, T *data_col)
im2col : efficient function to re-arrange input data of convolution to a matrix that can be used by B...
std::vector< size_t > ComputeStrideFromShape(const std::vector< size_t > &shape)
compute stride of a tensor given its shape (assume layout is row-major)
std::vector< Dim > ConvertShapeToDim(std::vector< size_t > shape)
Convert shape from integer format to dynamic one (based on Dim)
std::string ConvertDynamicShapeToLength(std::vector< Dim > shape)
ETensorType GetTemplatedType(T)
std::string ConvertValuesToString(size_t n, const T *data)
std::string ConvertShapeToString(std::vector< size_t > shape)
std::string ConvertTypeToString(ETensorType type)
std::string ConvertDynamicShapeToString(std::vector< Dim > shape)
ETensorType ConvertStringToType(std::string type)
TMVA::Experimental::RTensor< T > Concatenate(TMVA::Experimental::RTensor< T > &t1, TMVA::Experimental::RTensor< T > &t2, int axis=0)
std::vector< size_t > ConvertShapeToInt(std::vector< Dim > shape)
Convert shape based on Dim to integer format.
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
GNN_Data Copy(const GNN_Data &data)
create variable transformations
Dim(const std::string &p, size_t d=0)
TMarker m
Definition textangle.C:8
auto * t1
Definition textangle.C:20