Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
ROperator_Conv.hxx
Go to the documentation of this file.
1#ifndef TMVA_SOFIE_ROPERATOR_CONV
2#define TMVA_SOFIE_ROPERATOR_CONV
3
5#include "TMVA/ROperator.hxx"
6#include "TMVA/RModel.hxx"
7
8#include <memory>
9#include <sstream>
10#include <algorithm>
11#include <stdexcept>
12#include <vector>
13#include <cassert>
14
15namespace TMVA {
16namespace Experimental {
17namespace SOFIE {
18
19template<typename T>
21{
22private:
23 bool fBroadcastBias = false;
24
25 std::string fAttrAutopad;
26 std::vector<size_t> fAttrDilations;
27 size_t fAttrGroup;
28 std::vector<size_t> fAttrKernelShape;
29 std::vector<size_t> fAttrPads;
30 std::vector<size_t> fAttrStrides;
31
32 std::string fNX;
33 std::string fNW;
34 std::string fNB;
35 std::string fNY;
36
37 std::string convK;
38 std::string imcol;
39
40 std::vector<Dim> fShapeX;
41 std::vector<size_t> fShapeW;
42 std::vector<size_t> fShapeB;
43 std::vector<Dim> fShapeY;
44
45 std::string fType;
46
47 size_t fDim; // dimension of the convolution
48
49
50public:
51
53
54 ROperator_Conv(std::string autopad, std::vector<size_t> dilations,
55 size_t group, std::vector<size_t> kernelShape, std::vector<size_t> pads,
56 std::vector<size_t> strides, std::string nameX, std::string nameW,
57 std::string nameB, std::string nameY):
59 fAttrPads(pads), fAttrStrides(strides),
60 fNX(UTILITY::Clean_name(nameX)), fNW(UTILITY::Clean_name(nameW)),
61 fNB(UTILITY::Clean_name(nameB)), fNY(UTILITY::Clean_name(nameY))
62 {
63 if(std::is_same<T, float>::value) {
64 fType = "float";
65 } else {
66 throw
67 std::runtime_error("TMVA SOFIE Encountered unsupported type parsing a Conv operator");
68 }
71 }
72
73 ROperator_Conv(std::string autopad, std::vector<size_t> dilations,
74 size_t group, std::vector<size_t> kernelShape, std::vector<size_t> pads,
75 std::vector<size_t> strides, std::string nameX, std::string nameW,
76 std::string nameY):
78 fAttrPads(pads), fAttrStrides(strides),
79 fNX(UTILITY::Clean_name(nameX)), fNW(UTILITY::Clean_name(nameW)), fNY(UTILITY::Clean_name(nameY))
80 {
81 if(std::is_same<T, float>::value) {
82 fType = "float";
83 } else {
84 throw
85 std::runtime_error("TMVA SOFIE Encountered unsupported type parsing a Conv operator");
86 }
89 }
90
91 std::vector<ETensorType> TypeInference(std::vector<ETensorType> input) override {
92 ETensorType out = input[0];
93 return {out};
94 }
95
96 // function returning output shape given input
97 std::vector<Dim> DoShapeInference(const std::vector<Dim> & input, const std::vector<size_t> & weight) {
98 // shape of convolution input has to be (according to ONNX): N x C x H x W
99 // Where N : batch size, C : input channels, H : input height, W : input width
100
101 if (input.size() -2 != fDim) {
102 throw std::runtime_error("TMVA SOFIE Conv Op Shape inference - invalid input ");
103 }
104 if (weight.size() -2 != fDim) {
105 throw std::runtime_error("TMVA SOFIE Conv Op Shape inference - invalid weights ");
106 }
107 if (fAttrGroup == 0 && input[1].isParam)
108 throw std::runtime_error("TMVA SOFIE Conv - param shapes not supported without group attr");
109 if (fAttrKernelShape.empty()) {
110 if (input[2].isParam || (fDim > 1 && input[3].isParam) || (fDim > 2 && input[4].isParam))
111 throw std::runtime_error("TMVA SOFIE Conv - param shapes not supported without kernel attr");
112 }
113
114 if (fAttrGroup == 0) {
115 fAttrGroup = input[1].dim / weight[1];
116 }
117
118 // kernel shape
119 size_t k1 = ((fAttrKernelShape.empty())? weight[2] : fAttrKernelShape[0]);
120 size_t k2 = (fDim > 1) ? ((fAttrKernelShape.empty()) ? weight[3] : fAttrKernelShape[1]) : 1;
121 size_t k3 = (fDim > 2) ? ((fAttrKernelShape.empty()) ? weight[4] : fAttrKernelShape[2]) : 1;
122
123
124 size_t i1 = (fDim > 1) ? ((fDim > 2) ? 3 : 2) : 1;
125 size_t i2 = (fDim > 2) ? 4 : 3;
126 size_t i3 = 5;
127
128 if (fAttrDilations.empty()) {
129 fAttrDilations = {1, 1, 1};
130 }
131 fAttrDilations.resize(3);
132 if (fDim < 3) {
133 fAttrDilations.resize(3, 1);
134 }
135 // Shape of the kernel
136 fAttrKernelShape = {k1 + (fAttrDilations[0] - 1) * (k1 - 1),
137 k2 + (fAttrDilations[1] - 1) * (k2 - 1),
138 k3 + (fAttrDilations[2] - 1) * (k3 - 1)};
139
140 if (fAttrAutopad == "NOTSET") {
141 if (fAttrPads.empty()) {
142 fAttrPads = {1, 1, 1, 1, 1, 1};
143 }
144 } else if (fAttrAutopad == "SAME_UPPER" || fAttrAutopad == "SAME_LOWER") {
145 if (fDim == 1)
147 else if (fDim == 2)
149 else if (fDim == 3)
151 fAttrKernelShape[0] / 2, fAttrKernelShape[1] / 2, fAttrKernelShape[2] / 2};
152 // add extra padding at beginning or end (depending if SAME_UPPER or SAME_LOWER)
153 // need to check this!
154 if (fAttrKernelShape[0] % 2 == 1) {
155 (fAttrAutopad == "SAME_UPPER") ? fAttrPads[0]++ : fAttrPads[i1]++;
156 }
157 if (fDim > 1 && fAttrKernelShape[1] % 2 == 1) {
158 (fAttrAutopad == "SAME_UPPER") ? fAttrPads[1]++ : fAttrPads[i2]++;
159 }
160 if (fDim > 2 && fAttrKernelShape[2] % 2 == 1) {
161 (fAttrAutopad == "SAME_UPPER") ? fAttrPads[2]++ : fAttrPads[i3]++;
162 }
163 } else if (fAttrAutopad != "VALID") {
164 throw
165 std::runtime_error("TMVA SOFIE Conv Op invalid fAutopad");
166 }
167 // to be sure pad is vector of size 6
168 if (fDim < 3) fAttrPads.resize(6, 0);
169
170 if (fAttrStrides.empty()) {
171 fAttrStrides = {1, 1, 1};
172 }
173 if (fDim < 3)
174 fAttrStrides.resize(3, 1);
175
176
177 Dim input1 = input[2];
178 Dim input2 = (fDim > 1) ? input[3] : Dim{1};
179 Dim input3 = (fDim > 2) ? input[4] : Dim{1};
180
181 size_t pad1 = fAttrPads[0] + fAttrPads[i1];
182
183 // function to get output dimension of convolution given input
184
185 auto computeOutput = [&](Dim inputDim, size_t kernel, size_t pad, size_t stride) {
186 if (!inputDim.isParam) {
187 size_t outSize = (inputDim.dim + pad - kernel) / stride + 1;
188 return Dim{outSize};
189 } else {
190 if (stride == 1){
191 if ((pad - kernel + 1) == 0 )
192 // output is same as input
193 return inputDim;
194 else {
195 int64_t v = pad - kernel + 1;
196 std::string outStr = "(" + inputDim.param + "+" + std::to_string(v) + ")";
197 return Dim{ outStr, static_cast<size_t>(-1)};
198 }
199 } else { // general case (stride not 1)
200 int64_t v = pad - kernel;
201 std::string outStr = "((" + inputDim.param + "+" + std::to_string(v) + ")/"
202 + std::to_string(stride) + "1)";
203 return Dim{ outStr, static_cast<size_t>(-1)};
204 }
205 }
206 std::runtime_error("TMVA SOFIE Conv Op - invalid values");
207 return Dim{};
208 };
209
211
212 Dim batch_size = input[0]; // first element in input tensor
213 Dim output_channels = Dim{weight[0]}; // first element in weight tensor
214
215 std::vector<Dim> ret({ batch_size, output_channels, output1 });
216
217 if (fDim == 1)
218 return ret;
219
220 size_t pad2 = fAttrPads[1] + fAttrPads[i2];
222
223 // output is N x M x OH x OW
224 ret.push_back(output2);
225 if (fDim == 2)
226 return ret;
227
228 size_t pad3 = fAttrPads[2] + fAttrPads[i3];
230
231 // output is N x M x OH x OW x OD
232 ret.push_back(output3);
233 return ret;
234 }
235
236 void Initialize(RModel& model) override {
237 fUseSession = model.UseSession();
238 if (!model.CheckIfTensorAlreadyExist(fNX)) {
239 throw
240 std::runtime_error("TMVA SOFIE Conv op Input Tensor " + fNX + " is not found in model");
241 }
243 if (fShapeX.size() < 3 || fShapeX.size() > 5) {
244 std::cout << fNX << " : " << ConvertShapeToString(fShapeX) << std::endl;
245 throw
246 std::runtime_error("TMVA SOFIE Conv Op input data tensor" + fNX + " is not of 3,4 or 5 dimensions");
247 }
248 fDim = fShapeX.size() - 2;
249 if (!model.CheckIfTensorAlreadyExist(fNW)) {
250 throw
251 std::runtime_error("TMVA SOFIE Conv op Input weight Tensor " + fNW + " is not found in model");
252 }
253 fShapeW = model.GetTensorShape(fNW);
254 if (fShapeW.size() < 3 || fShapeW.size() > 5) {
255 std::cout << fNW << " : " << ConvertShapeToString(fShapeW) << std::endl;
256 throw std::runtime_error("TMVA SOFIE Conv Op input weight tensor" + fNW + " is not of 3,4 or 5 dimensions");
257 }
260 if (fNB != "") {
261 if (!model.CheckIfTensorAlreadyExist(fNB)) {
262 throw
263 std::runtime_error("TMVA SOFIE Conv op Input Tensor " + fNB + " is not found in model");
264 }
265 fShapeB = model.GetTensorShape(fNB);
266 if (fShapeB.size() != 1)
267 throw
268 std::runtime_error("TMVA SOFIE Conv op : invalid shape for Bias tensor (is not 1D)");
269 std::vector<Dim> targetShape(fShapeY.begin() + 1, fShapeY.end());
270 auto shapeDimB = model.GetDimTensorShape(fNB);
272 if (broadcast_needed) {
274 // make bias shape equal to Y shape by adding 1
275 if (fShapeB.size() < 1)
276 throw std::runtime_error("TMVA SOFIE Conv op: Bias Tensor has empty shape");
277 // we assume bias tensor dimension is equal to number of filters that is the second dimension in
278 // the output tensor
279 if (!(shapeDimB[0] == fShapeY[1]))
280 throw std::runtime_error("TMVA SOFIE Conv op: Bias Tensor has wrong shape: " +
282 if (fType != "float")
283 throw std::runtime_error("TMVA SOFIE Conv op: Broadcasting for non-float type tensors is not supported");
284 // here is the actual broadcasting
285 fBroadcastBias = true;
286 if (!fUseSession) {
287 // do here broadcasting
288 std::vector<size_t> shape(fDim + 1, 1);
289 shape[0] = fShapeB[0];
291 std::shared_ptr<void> new_data_ptr(
292 UTILITY::UnidirectionalBroadcast<float>(static_cast<float *>(original_data.get()), shape, intTargetShape),
293 std::default_delete<float[]>());
295 fShapeB = model.GetTensorShape(fNB);
296 }
297 }
298 }
299 // output channel size can be parametric and is an expression
300 std::vector<Dim> outputDims = std::vector<Dim>(fShapeY.begin()+2, fShapeY.end());
301 //check if shape is not parametric
302 std::vector<size_t> outputInts = ConvertShapeToInt(outputDims);
304 if (outputInts.empty()) {
305 auto outputChannelSize = ConvertDimShapeToLength(outputDims); // size/channel = D * H * W
306 channelDim = Dim{ outputChannelSize, static_cast<size_t>(-1)};
307 } else {
310 }
311 size_t kernelSize = fAttrKernelShape[0];
312 for (size_t i = 1; i < fDim; i++) {
314 }
315
316 std::vector<size_t> shape1 = {fShapeW[0], fShapeW[1], kernelSize};
317 std::vector<Dim> shape2 = {Dim{fShapeW[1]}, Dim{kernelSize}, channelDim };
320 convK = fNX +"_f";
321 imcol = fNX +"_xcol";
322 fOutputTensorNames.emplace_back(convK);
323 fOutputTensorNames.emplace_back(imcol);
324 fInputTensorNames.emplace_back(convK);
325 fInputTensorNames.emplace_back(imcol);
326
327 if (model.Verbose()) {
328 std::cout << "Conv - " << fDim << " " << fNX << " : " << ConvertShapeToString(fShapeX)
329 << " --> " << fNY << " : " << ConvertShapeToString(fShapeY) << std::endl;
330 }
331 }
332
333 std::string GenerateInitCode() override {
334 std::stringstream out;
335 // Generate initialization code for broadcasting of bias tensor
336 if (fBroadcastBias) {
337 // include a separate scope to avoid defining unique operator temp variables
338 std::vector<size_t> shape(fDim + 1, 1);
339 // bias (is a 1D tensor)
340 shape[0] = fShapeB[0];
341 std::vector<Dim> targetShape(fShapeY.begin() + 1, fShapeY.end());
342 out << "//--- broadcast bias tensor " << fNB << "for Conv op if needed \n";
343 // in case of dynamic tensors check needs to be done at run time
346 if (isOutDynamic)
347 out << SP << "if (" << length << " > " << ConvertShapeToLength(shape) << ") {\n";
348 else
349 out << SP << "{\n";
350 out << SP << SP << "float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_"
351 << fNB << ", " << ConvertShapeToString(shape) << ", " << ConvertShapeToString(fShapeY) << ");\n";
352 out << SP << SP << "fTensor_" << fNB << ".resize(" << length << ");\n";
353 out << SP << SP << "std::copy(data, data + " << length << ", fTensor_" << fNB << ".begin());\n";
354 out << SP << SP << "tensor_" << fNB << " = fTensor_" << fNB << ".data();\n";
355 out << SP << SP << "delete[] data;\n";
356 out << SP << "}\n";
357 }
358 return out.str();
359 }
360
361 std::string Generate(std::string OpName) override {
362 OpName = "op_" + OpName;
363
364 if (fShapeX.empty() || fShapeW.empty() || (fNB != "" && fShapeB.empty()) || fShapeY.empty()) {
365 throw
366 std::runtime_error("TMVA SOFIE Conv Op called to Generate without being initialized first");
367 }
368
369 std::stringstream out;
370 auto bsize = fShapeX[0];
371 size_t kDepth = (fDim > 2) ? fShapeW[2] : 1; // kernel depth
372 size_t kHeight = (fDim > 1) ? fShapeW[fDim] : 1; // kernel height
373 size_t kWidth = fShapeW[fDim+1]; // kernel width
374 auto iDepth = (fDim > 2) ? fShapeX[2] : Dim{1}; // input depth
375 auto iHeight = (fDim > 1) ? fShapeX[fDim] : Dim{1}; // input height
376 auto iWidth = fShapeX[fDim+1]; // input width
377 auto oDepth = (fDim > 2) ? fShapeY[2] : Dim{1}; // output depth
378 auto oHeight = (fDim > 1) ? fShapeY[fDim] : Dim{1}; // ouput height
379 auto oWidth = fShapeY[fDim+1]; // output width
380 // total output size for a channel
381 auto outputChannelStride = ConvertDimShapeToLength(std::vector<Dim>{oDepth, oHeight, oWidth}); // size of channel = D * H * W
382 auto outputBatchStride = ConvertDimShapeToLength(std::vector<Dim>{fShapeY[1] , oDepth, oHeight, oWidth}); // size of C * D * H * W
383 // input size
385 auto inputBatchStride = ConvertDimShapeToLength(std::vector<Dim>{fShapeX[1] , iDepth, iHeight, iWidth}); // size of C * D * H * W
386
387 out << "\n//---- operator Conv " << OpName << "\n";
388
389 // vectorize the (dilated)convolution kernels into a matrix
390 // no need to transpose the matrix
391 // to fix for 1d and 3d
392
393 size_t id = (fDim > 2) ? fDim-3 : 2;
394 size_t ih = (fDim > 1) ? fDim-2 : 1;
395 size_t iw = fDim-1;
396
397 size_t wstrideDil = fAttrDilations[iw];
398 size_t hstride = kWidth;
399 size_t hstrideDil = fAttrDilations[ih] * fAttrKernelShape[iw]; // stride dilated in the height
400 size_t dstride = kHeight * kWidth;
402 size_t icstride = kHeight * kWidth * kDepth;
404 size_t ocstride = fShapeW[1] * icstride;
405 size_t ocstrideDil = fShapeW[1] * icstrideDil;
406
407 out << SP << "for (std::size_t oc = 0; oc < " << fShapeW[0] << "; oc++) {\n";
408 out << SP << SP << "for (std::size_t ic = 0; ic < " << fShapeW[1] << "; ic++) {\n";
409 if (fDim > 2)
410 out << SP << SP << SP << "for (std::size_t kd = 0; kd < " << kDepth << "; kd++) {\n";
411 if (fDim > 1)
412 out << SP << SP << SP << "for (std::size_t kh = 0; kh < " << kHeight << "; kh++) {\n";
413 out << SP << SP << SP << SP << "for (std::size_t kw = 0; kw < " << kWidth << "; kw++) {\n";
414
415 out << SP << SP << SP << SP << SP << "tensor_" <<fNX << "_f[oc * "
416 << ocstrideDil << " + ic * " << icstrideDil;
417 if (fDim > 2) out << " + kd * " << dstrideDil;
418 if (fDim > 1) out << " + kh * " << hstrideDil;
419 out << " + kw * " << wstrideDil << " ] = tensor_" << fNW << "[oc * " << ocstride << " + ic * " << icstride;
420 if (fDim > 2) out << " + kd * " << dstride;
421 if (fDim > 1) out << " + kh * " << hstride;
422 out << " + kw ];\n";
423
424 out << SP << SP << SP << SP << "}\n";
425 if (fDim > 1) out << SP << SP << SP << "}\n";
426 if (fDim > 2) out << SP << SP << SP << "}\n";
427 out << SP << SP << "}\n";
428 out << SP << "}\n";
429
430 //out << SP << "char " << OpName << "_transA = 'T';\n";
431 out << SP << "char " << OpName << "_transA = 'N';\n";
432 out << SP << "char " << OpName << "_transB = 'N';\n";
433 out << SP << "int " << OpName << "_m = " << outputChannelStride << ";\n"; // output h*w
434 assert(fShapeY[1] == fShapeW[0]);
435 //assert(fShapeW[1] == fShapeX[1] / fAttrGroup);
436 out << SP << "int " << OpName << "_n = " << fShapeW[0] << ";\n"; // output channels
437 out << SP << "int " << OpName << "_k = " << fShapeW[1] * fAttrKernelShape[0] * fAttrKernelShape[1] * fAttrKernelShape[2] << ";\n";
438 out << SP << "float " << OpName << "_alpha = 1.0;\n";
439 out << SP << "float " << OpName << "_beta = 0.0;\n";
440
441
442 // Loop on batch size
443 out << SP << "for (size_t n = 0; n < " << bsize << "; n++) {\n";
444
445 // IM2COL: Unroll the input tensor
446 // order input data as (e.g. kernel 2x2) and (xa,ya) is channel 1 and (xb,yb) is channel 2
447 // (xa1,..,xak,ya1,..yak)(xb1,...,xbk,yb1,..,ybk)
448 // (xa2,...xak+1,ya1,...yak)(......)
449 // trick for speed is using caffe im2col and output a matrix which contains filtered values as rows.
450 // By doing this one has consecutive memory reads and writes
451 // Resulting matrix op_xcol is (input channels * filter_h * filter_w , output_h * output_w)
452 if (fDim ==1) {
453 if (fAttrPads[0] != fAttrPads[1] ) {
454 std::cout << "TMVA SOFIE Operator Conv: asymmetric padding not supported. Assume an average padding "
455 << std::endl;
456 fAttrPads[0] = (fAttrPads[0] + fAttrPads[1]) / 2;
457 }
458 fAttrPads[1] = 0;
459 fAttrStrides[1] = 1;
460 }
461 if (fDim == 2) {
462 if (fAttrPads[0] != fAttrPads[2] || fAttrPads[1] != fAttrPads[3]) {
463 std::cout << "TMVA SOFIE Operator Conv: asymmetric padding not supported. Assume an average padding " << std::endl;
464 fAttrPads[0] = (fAttrPads[0] + fAttrPads[2]) / 2;
465 fAttrPads[1] = (fAttrPads[1] + fAttrPads[3]) / 2;
466 }
467 }
468 if (fDim == 3) {
469 if (fAttrPads[0] != fAttrPads[3] || fAttrPads[1] != fAttrPads[4] || fAttrPads[2] != fAttrPads[5]) {
470 std::cout << "TMVA SOFIE Operator Conv: asymmetric padding not supported. Assume an average padding " << std::endl;
471 fAttrPads[0] = (fAttrPads[0] + fAttrPads[3]) / 2;
472 fAttrPads[1] = (fAttrPads[1] + fAttrPads[4]) / 2;
473 fAttrPads[2] = (fAttrPads[2] + fAttrPads[5]) / 2;
474 }
475 }
476 out << SP << SP << "size_t out_offset = n * " << outputBatchStride << ";\n";
477
478 if (fAttrGroup == 1) {
479 out << SP << SP << "size_t x_offset = n * " << inputBatchStride << ";\n";
480 // when using im2col - resulting matrix is transposed, the dimension is (input_c * filter_h * filter_y, output_h *
481 // output_w)
482 if (fDim < 3) {
483 out << SP << SP << "TMVA::Experimental::SOFIE::UTILITY::Im2col<float>(tensor_" << fNX
484 << " + x_offset,"
485 // channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h,
486 // dilation_w,
487 //
488 << fShapeW[1] << "," << iHeight << "," << iWidth << ",";
489 if (fDim == 1)
490 out << "1, " << fAttrKernelShape[0] << ",0," << fAttrPads[0] << ",1," << fAttrStrides[0] << ",1,"
491 << fAttrDilations[0];
492 else // dim ==2
493 out << fAttrKernelShape[0] << "," << fAttrKernelShape[1] << "," << fAttrPads[0] << "," << fAttrPads[1]
494 << "," << fAttrStrides[0] << "," << fAttrStrides[1] << "," << fAttrDilations[0] << ","
495 << fAttrDilations[1];
496 out << "," << "tensor_" <<fNX << "_xcol);\n\n ";
497 } else {
498 // 3d im2col
499 out << SP << SP << "TMVA::Experimental::SOFIE::UTILITY::Im2col_3d<float>(tensor_" << fNX
500 << " + x_offset,"
501 // channels, d, h, w, k_d, k_h, k_w, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w,
502 // dilation_d, dilation_h, dilation_w,
503 //
504 << fShapeW[1] << "," << iDepth << "," << iHeight << "," << iWidth << ","
505 << fAttrKernelShape[0] << "," << fAttrKernelShape[1] << "," << fAttrKernelShape[2] << ","
506 << fAttrPads[0] << "," << fAttrPads[1] << "," << fAttrPads[2] << ","
507 << fAttrStrides[0] << "," << fAttrStrides[1] << "," << fAttrStrides[2] << ","
508 << fAttrDilations[0] << "," << fAttrDilations[1] << "," << fAttrDilations[2] << ","
509 << "tensor_" << fNX << "_xcol);\n\n ";
510 }
511 // BLAS
512 out << SP << SP << "BLAS::sgemm_(&" << OpName << "_transA, &" << OpName << "_transB, &" << OpName << "_m, &"
513 << OpName << "_n, &" << OpName << "_k, &" << OpName << "_alpha, " << "tensor_" << fNX << "_xcol, &" << OpName
514 << "_m,\n"; // use m if op_xcol is not transpose , otherwise k
515 out << SP << SP << SP << "tensor_" << fNX << "_f, &" << OpName << "_k, &" << OpName << "_beta, tensor_" << fNY
516 << " + out_offset, &" << OpName << "_m);\n";
517 } else {
518 // case of group convolution
519 // Unroll (IM2COL) the input tensor- make loop on groups and repeat operations (IM2COL + GEMM for each
520 // group)
521 // out << SP << SP << "size_t out_offset = n * " << fShapeY[1] * oDepth * oHeight * oWidth << ";\n";
522 out << SP << SP << "for (size_t g = 0; g < " << fAttrGroup << "; g++) {\n";
523 out << SP << SP << "size_t x_offset = n * " << inputBatchStride << " + g * "
524 << fShapeW[1] << " * " << inputChannelStride << ";\n ";
525 out << SP << SP << "size_t out_offset = n * " << outputBatchStride << " + g * "
526 << fShapeW[0] << " * (" << outputChannelStride << ") / " << fAttrGroup << ";\n ";
527
528 if (fDim < 3) {
529 out << SP << SP << "TMVA::Experimental::SOFIE::UTILITY::Im2col<float>(tensor_" << fNX
530 << " + x_offset,"
531 // channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h,
532 // dilation_w,
533 //
534 << fShapeW[1] << "," << iHeight << "," << iWidth << ",";
535 if (fDim == 1)
536 out << "1, " << fAttrKernelShape[0] << ",0," << fAttrPads[0] << ",1," << fAttrStrides[0] << ",1,"
537 << fAttrDilations[0];
538 else // dim ==2
539 out << fAttrKernelShape[0] << "," << fAttrKernelShape[1] << "," << fAttrPads[0] << "," << fAttrPads[1]
540 << "," << fAttrStrides[0] << "," << fAttrStrides[1] << "," << fAttrDilations[0] << ","
541 << fAttrDilations[1];
542 out << ", tensor_" << fNX << "_xcol);\n\n ";
543 } else {
544 // 3d im2col
545 out << SP << SP << "TMVA::Experimental::SOFIE::UTILITY::Im2col_3d<float>(tensor_" << fNX
546 << " + x_offset,"
547 // channels, d, h, w, k_d, k_h, k_w, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w,
548 // dilation_d, dilation_h, dilation_w,
549 //
550 << fShapeW[1] << "," << iDepth << "," << iHeight << "," << iWidth << "," << fAttrKernelShape[0] << ","
551 << fAttrKernelShape[1] << "," << fAttrKernelShape[2] << "," << fAttrPads[0] << "," << fAttrPads[1]
552 << "," << fAttrPads[2] << "," << fAttrStrides[0] << "," << fAttrStrides[1] << "," << fAttrStrides[2]
553 << "," << fAttrDilations[0] << "," << fAttrDilations[1] << "," << fAttrDilations[2] << ",tensor_" << fNX
554 << "_xcol);\n\n ";
555 }
556
557 // BLAS
558 // n must be divided by the number of groups
559 out << SP << SP << SP << OpName << "_n = " << fShapeW[0] / fAttrGroup << ";\n";
560 // offset g must be g * k * n
561 out << SP << SP << SP << "size_t offset_f = g * "
563 << ";\n";
564 out << SP << SP << "BLAS::sgemm_(&" << OpName << "_transA, &" << OpName << "_transB, &" << OpName << "_m, &"
565 << OpName << "_n, &" << OpName << "_k, &" << OpName << "_alpha, tensor_" << fNX << "_xcol, &" << OpName
566 << "_m,\n"; // use m if op_xcol is not transpose , otherwise k
567 out << SP << SP << SP << "tensor_" << fNX << "_f + offset_f, &" << OpName << "_k, &" << OpName << "_beta, tensor_" << fNY
568 << " + out_offset"
569 << ", &" << OpName << "_m);\n";
570
571 out << SP << SP << "}\n"; // end of group loop
572 }
573
574 if (fNB != "") {
575 out << SP << "int " << OpName << "_size = " << outputBatchStride << ";\n";
576 out << SP << "float " << OpName << "_gamma = 1.0;\n";
577 out << SP << "int " << OpName << "_incx = 1;\n";
578 out << SP << "int " << OpName << "_incy = 1;\n";
579
580 out << SP << "BLAS::saxpy_(&" << OpName << "_size, &" << OpName << "_gamma, tensor_" << fNB << ", &"
581 << OpName << "_incx, tensor_" << fNY << " + out_offset, &" << OpName << "_incy);\n";
582
583 }
584 out << SP << "}\n"; // end of batch size loop
585
586 return out.str();
587 }
588
589 /*! \brief Returns the blas routines needed to compile the generated code
590 */
591 std::vector<std::string> GetBlasRoutines() override { return { std::string("Gemm"), std::string("Axpy") }; }
592};
593
594} // namespace SOFIE
595} // namespace Experimental
596} // namespace TMVA
597
598#endif
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize id
std::vector< size_t > GetTensorShape(const std::string &name) const
Definition RModel.cxx:29
std::vector< Dim > GetDimTensorShape(const std::string &name) const
Definition RModel.cxx:65
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< Dim > dim_shape)
Definition RModel.cxx:262
bool CheckIfTensorAlreadyExist(std::string tensor_name)
Definition RModel.cxx:122
std::shared_ptr< void > GetInitializedTensorData(std::string tensor_name)
Definition RModel.cxx:327
ETensorType GetTensorType(std::string name) const
Definition RModel.cxx:90
void UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:318
std::string Generate(std::string OpName) override
ROperator_Conv(std::string autopad, std::vector< size_t > dilations, size_t group, std::vector< size_t > kernelShape, std::vector< size_t > pads, std::vector< size_t > strides, std::string nameX, std::string nameW, std::string nameB, std::string nameY)
std::vector< std::string > GetBlasRoutines() override
Returns the blas routines needed to compile the generated code.
void Initialize(RModel &model) override
std::vector< ETensorType > TypeInference(std::vector< ETensorType > input) override
ROperator_Conv(std::string autopad, std::vector< size_t > dilations, size_t group, std::vector< size_t > kernelShape, std::vector< size_t > pads, std::vector< size_t > strides, std::string nameX, std::string nameW, std::string nameY)
std::vector< Dim > DoShapeInference(const std::vector< Dim > &input, const std::vector< size_t > &weight)
std::vector< std::string_view > fInputTensorNames
Definition ROperator.hxx:47
const std::string SP
space used to correctly indent the generated C++ code
Definition ROperator.hxx:42
bool fUseSession
flag to identify if using the session class
Definition ROperator.hxx:43
std::vector< std::string_view > fOutputTensorNames
Definition ROperator.hxx:48
bool AreSameShape(const std::vector< size_t > &, const std::vector< size_t > &)
std::size_t ConvertShapeToLength(const std::vector< size_t > &shape)
std::vector< size_t > ConvertShapeToInt(const std::vector< Dim > &shape)
Convert shape based on Dim to integer format.
ETensorType ConvertStringToType(std::string type)
std::string ConvertDimShapeToLength(const std::vector< Dim > &shape)
std::string ConvertShapeToString(const std::vector< size_t > &shape)
create variable transformations