Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
ROperator_BasicBinary.hxx
Go to the documentation of this file.
1#ifndef TMVA_SOFIE_ROperator_BasicBinary
2#define TMVA_SOFIE_ROperator_BasicBinary
3
5#include "TMVA/ROperator.hxx"
6#include "TMVA/RModel.hxx"
7
8#include <sstream>
9
10namespace TMVA {
11namespace Experimental {
12namespace SOFIE {
13
15
16template <typename T, EBasicBinaryOperator Op1>
18
19template <typename T>
21 static const std::string Name() { return "Add"; }
22 static std::string Op(const std::string &t1, const std::string t2) { return t1 + " + " + t2; }
23 static T Func(T t1, T t2) { return t1 + t2; }
24};
25
26template <typename T>
28 static const std::string Name() { return "Sub"; }
29 static std::string Op(const std::string &t1, const std::string t2) { return t1 + " - " + t2; }
30 static T Func(T t1, T t2) { return t1 - t2; }
31};
32
33template <typename T>
35 static const std::string Name() { return "Mul"; }
36 static std::string Op(const std::string &t1, const std::string t2) { return t1 + " * " + t2; }
37 static T Func(T t1, T t2) { return t1 * t2; }
38};
39
40template <typename T>
42 static const std::string Name() { return "Div"; }
43 static std::string Op(const std::string &t1, const std::string t2) { return t1 + " / " + t2; }
44 static T Func(T t1, T t2) { return t1 / t2; }
45};
46
47template <typename T>
49 static const std::string Name() { return "Pow"; }
50 static std::string Op(const std::string &t1, const std::string t2) { return "std::pow(" + t1 + "," + t2 + ")"; }
51 static T Func(T t1, T t2) { return std::pow(t1, t2); }
52};
53template <typename T>
55 static const std::string Name() { return "Mod"; }
56 static std::string Op(const std::string & t1, const std::string t2) { return "(" + t1 + " % " + t2 + ")"; }
57 static T Func(T t1, T t2) { return t1 % t2; }
58};
59template <typename T>
61 static const std::string Name() { return "FMod"; }
62 static std::string Op(const std::string & t1, const std::string t2) { return "std::fmod(" + t1 + "," + t2 + ")"; }
63 static T Func(T t1, T t2) { return std::fmod(t1, t2); }
64};
65
66template <typename T, EBasicBinaryOperator Op>
68private:
70 std::string fNA;
71 std::string fNB;
72 std::string fNBroadcastedA;
73 std::string fNBroadcastedB;
74 std::string fNY;
75
76 std::vector<size_t> fShapeA;
77 std::vector<size_t> fShapeB;
78 std::vector<size_t> fShapeY;
79
80 std::vector<Dim> fDimShapeA;
81 std::vector<Dim> fDimShapeB;
82 std::vector<Dim> fDimShapeY;
83
84public:
86 ROperator_BasicBinary(std::string nameA, std::string nameB, std::string nameY)
87 : fNA(UTILITY::Clean_name(nameA)), fNB(UTILITY::Clean_name(nameB)), fNY(UTILITY::Clean_name(nameY))
88 {
91 }
92
93 // type of output given input
94 std::vector<ETensorType> TypeInference(std::vector<ETensorType> input) override { return input; }
95
96 // shape of output tensors given input tensors
97 std::vector<std::vector<size_t>> ShapeInference(std::vector<std::vector<size_t>> input) override
98 {
99 // assume now inputs have same shape (no broadcasting)
100 auto ret = std::vector<std::vector<size_t>>(1, input[0]); // return vector size 1 with first input
101 return ret;
102 }
103
104 void Initialize(RModel &model) override
105 {
106 // input must be a graph input, or already initialized intermediate tensor
107 if (!model.CheckIfTensorAlreadyExist(fNA)) {
108 throw std::runtime_error(std::string("TMVA SOFIE Binary Op Input Tensor ") + fNA + "is not found in model");
109 }
110 if (!model.CheckIfTensorAlreadyExist(fNB)) {
111 throw std::runtime_error(std::string("TMVA SOFIE Binary Op Input Tensor ") + fNB + "is not found in model");
112 }
113 int dynamicInputs = 0;
114 if (model.IsDynamicTensor(fNA)) {
116 dynamicInputs |= 1;
117 } else {
118 fShapeA = model.GetTensorShape(fNA);
120 }
121 if (model.IsDynamicTensor(fNB)) {
122 dynamicInputs |= 2;
124 } else {
125 fShapeB = model.GetTensorShape(fNB);
127 }
128 if (dynamicInputs & 1 && model.Verbose())
129 std::cout << BinaryOperatorTrait<T, Op>::Name() << " : input " << fNA << " is dynamic "
130 << ConvertDimShapeToString(fDimShapeA) << std::endl;
131 if (dynamicInputs & 2 && model.Verbose())
132 std::cout << BinaryOperatorTrait<T, Op>::Name() << " : input " << fNB << " is dynamic "
133 << ConvertDimShapeToString(fDimShapeB) << std::endl;
134
135 // check if need to broadcast at initialization time if shapes are known and different
136 // (we could broadcast the tensor tensor to maximum values of dynamic shapes - to be done)
137 // case of known shapes
138 // if shapes are known find the output shape from broadcasting
139 if (dynamicInputs == 0) {
141 fBroadcastFlag = ret.first;
142 fShapeY = ret.second;
144 if (model.IsConstantTensor(fNA) && model.IsConstantTensor(fNB)) {
145 bool broadcast = fBroadcastFlag > 0;
146 if (broadcast) {
147 // Y is the common shape of A and B
148 bool broadcastA = fBroadcastFlag & 2;
149 bool broadcastB = fBroadcastFlag & 1;
150 // Broadcast A to Y
151 if (broadcastA) {
152 fNBroadcastedA = "Broadcasted" + fNA + "to" + fNY;
153 auto data = model.GetInitializedTensorData(fNA);
154 std::shared_ptr<void> broadcastedData(
155 UTILITY::UnidirectionalBroadcast(static_cast<T *>(data.get()), fShapeA, fShapeY),
156 std::default_delete<T[]>());
157 if (model.Verbose())
158 std::cout << "broadcasted data A " << ConvertShapeToString(fShapeY) << " : "
160 static_cast<T *>(broadcastedData.get()))
161 << std::endl;
162 // Update the data and the shape of A
166 }
167 // Broadcast B to Y
168 if (broadcastB) {
169 fNBroadcastedB = "Broadcasted" + fNB + "to" + fNY;
170 auto data = model.GetInitializedTensorData(fNB);
171 if (model.Verbose())
172 std::cout << "data B " << ConvertShapeToString(fShapeB) << " : "
173 << ConvertValuesToString(ConvertShapeToLength(fShapeB), static_cast<T *>(data.get()))
174 << std::endl;
175 std::shared_ptr<void> broadcastedData(
176 UTILITY::UnidirectionalBroadcast(static_cast<T *>(data.get()), fShapeB, fShapeY),
177 std::default_delete<T[]>());
178 // do not update tensor B but add broadcasted one (since it can be input to some other operators)
179 if (model.Verbose())
180 std::cout << "broadcasted data B " << ConvertShapeToString(fShapeY) << " : "
182 static_cast<T *>(broadcastedData.get()))
183 << std::endl;
187 }
188 } else {
190 }
191 // tensors are constant: perform here the binary operation
192
193 const std::string &nameA = fNBroadcastedA.empty() ? fNA : fNBroadcastedA;
194 const std::string &nameB = fNBroadcastedB.empty() ? fNB : fNBroadcastedB;
195 auto dataA = static_cast<T *>(model.GetInitializedTensorData(nameA).get());
196 auto dataB = static_cast<T *>(model.GetInitializedTensorData(nameB).get());
197 std::vector<T> dataY(lengthY);
198 for (size_t i = 0; i < dataY.size(); i++) {
200 }
201 model.AddConstantTensor<T>(fNY, fShapeY, dataY.data());
202 // flag tensors to not be written in the generated code or weight file
205 fIsOutputConstant = true;
206 if (model.Verbose()) {
207 std::cout << BinaryOperatorTrait<T, Op>::Name() << " : " << fNA << " " << ConvertShapeToString(fShapeA)
208 << " , " << fNB << " " << ConvertShapeToString(fShapeB) << " ---> " << fNY << " "
209 << ConvertShapeToString(fShapeY) << " : " << ConvertValuesToString(dataY) << std::endl;
210 }
211 } else if (((model.IsShapeTensor(fNA) && model.IsShapeTensor(fNB)) ||
212 (model.IsShapeTensor(fNA) && model.IsConstantTensor(fNB)) ||
213 (model.IsShapeTensor(fNB) && model.IsConstantTensor(fNA)))
214 && (fShapeA.size() <=1 && fShapeB.size() <=1 && model.GetTensorType(fNA) == ETensorType::INT64)) {
215 // case of shape tensors ( tensors are of rank 0 or 1 )
216 std::vector<Dim> dimValA;
217 std::vector<Dim> dimValB;
218 if (model.IsShapeTensor(fNA))
220 if (model.IsShapeTensor(fNB))
222 // adjust for broadcasting - repet values until it reaches shapes of Y
223 if (!fShapeY.empty() && fShapeY[0] > 1) {
224 if (dimValA.size() == 1) dimValA = std::vector<Dim>( fShapeY[0], dimValA[0]);
225 if (dimValB.size() == 1) dimValB = std::vector<Dim>( fShapeY[0], dimValB[0]);
226 }
227
228 auto convertDataToDim = [&](const std::string & name, const std::vector<size_t> & shape, std::vector<Dim> & dimValues) {
229 auto data = static_cast<int64_t *>(model.GetInitializedTensorData(name).get());
230 dimValues.resize(lengthY);
231 for (size_t i = 0; i < lengthY; i++) {
232 if (!shape.empty() && lengthY == shape[0])
233 dimValues[i] = Dim{ static_cast<size_t>(data[i])};
234 else // case dataA is a scalar
235 dimValues[i] = Dim{ static_cast<size_t>(data[0])};
236 }
237 };
238 if (model.IsConstantTensor(fNA)) {
240 } else if (model.IsConstantTensor(fNB)) {
242 }
243
244 //perform binary operations on shape tensors
245 std::vector<Dim> dimValY(lengthY);
246 for (size_t i = 0; i < lengthY; i++) {
247 if (!dimValA[i].isParam && !dimValB[i].isParam) {
249 dimValY[i] = Dim{d};
250 } else {
251 auto res = BinaryOperatorTrait<T, Op>::Op(dimValA[i].GetVal(), dimValB[i].GetVal());
252 dimValY[i] = Dim{res, static_cast<size_t>(-1)};
253 }
254 }
255 model.AddShapeTensor(fNY,dimValY, fShapeY.empty()); // cannot be a scalar
256 if (model.Verbose()) {
257 std::cout << BinaryOperatorTrait<T, Op>::Name() << " : " << fNA << " " << ConvertShapeToString(fShapeA)
258 << " , " << fNB << " " << ConvertShapeToString(fShapeB) << " ---> " << fNY << " "
259 << ConvertShapeToString(fShapeY) << " : " << ConvertDimShapeToString(dimValY) << " (shape)" << std::endl;
260 }
261 // no code needs to be generated (flag this as a constant output tensor)
262 fIsOutputConstant = true;
263
264 } else {
265 // case of defined and non-constant tensors
267 if (model.Verbose()) {
268 std::cout << BinaryOperatorTrait<T, Op>::Name() << " : " << fNA << " " << ConvertShapeToString(fShapeA)
269 << " , " << fNB << " " << ConvertShapeToString(fShapeB) << " ---> " << fNY << " "
270 << ConvertShapeToString(fShapeY) << std::endl;
271 }
272 // we convert non-dim shapes to Dim shapes
274 }
275 } else {
276 // case A or B have dynamic shapes. We need to broadcast if shape are not same
278 fBroadcastFlag = ret.first;
279 fDimShapeY = ret.second;
280 // case of all parametric shapes and MultiDirectionalBroadcastShape return the max of the 2
281 // need to do before we declare the output tensor shape and the broadcasted ones
282 if (ret.first & 4) {
283 // check if one of the parameter is an input dimension
284 // define function to find this
285 auto IsInputDimParam = [&](const std::string &p) {
286 auto inputNames = model.GetInputTensorNames();
287 for (auto &input : inputNames) {
288 for (auto &i_s : model.GetDimTensorShape(input)) {
289 if (i_s.isParam && i_s.param == p)
290 return true;
291 }
292 }
293 return false;
294 };
295 for (size_t i = 0; i < fDimShapeY.size(); i++) {
296 auto &s = fDimShapeY[i];
297 if (s.isParam && s.param.find("std::max") != std::string::npos) {
298 if (IsInputDimParam(fDimShapeA[i].param)) {
299 // case dim is 1 we indicate that the input parameter is equal to 1
300 if (fDimShapeA[i].dim != 1)
301 s = fDimShapeA[i];
302 else
303 s = fDimShapeB[i];
304 } else if (IsInputDimParam(fDimShapeB[i].param)) {
305 if (fDimShapeB[i].dim != 1)
306 s = fDimShapeB[i];
307 else
308 s = fDimShapeA[i];
309 }
310 }
311 }
312 }
313
315 if (model.Verbose()) {
316 std::cout << BinaryOperatorTrait<T, Op>::Name() << " : " << ConvertDimShapeToString(fDimShapeA) << " , "
318 }
319 }
320 }
321
322 std::string GenerateInitCode() override
323 {
324 std::stringstream out;
325 return out.str();
326 }
327
328 std::string Generate(std::string opName) override
329 {
330
332 return "";
333
334 opName = "op_" + opName;
335
336 std::stringstream out;
337 out << SP << "\n//------ " << opName << " " << BinaryOperatorTrait<T, Op>::Name() << " --> "
340 std::string typeName = TensorType<T>::Name();
341
342 // we need to check if we can broadcast (case flag has bit 4 set)
343
344 if (fBroadcastFlag & 4) {
345 // need to check if shapes are the same
348 out << SP << "if (" << lengthA << "!=" << lengthB << ") {\n";
349 // check if A->B or B->A
350 // bool broadcastable = true;
351 for (size_t i = 0; i < fDimShapeY.size(); i++) {
352 if (fBroadcastFlag & 5 && fDimShapeY[i] == fDimShapeA[i] && fDimShapeA[i].dim > 1 &&
353 fDimShapeB[i].isParam) {
354 // B->A B[i] needs to be 1
355 out << SP << SP << "if (" << fDimShapeB[i] << "!= 1)\n";
356 out << SP << SP << SP << "throw std::runtime_error(\"SOFIE - Cannot broadcast B->A in operator "
357 << opName << "\");\n";
358 }
359 if (fBroadcastFlag & 6 && fDimShapeY[i] == fDimShapeB[i] && fDimShapeB[i].dim > 1 &&
360 fDimShapeA[i].isParam) {
361 // A-> B A[i] needs to be 1
362 out << SP << SP << "if (" << fDimShapeA[i] << "!= 1)\n";
363 out << SP << SP << SP << "throw std::runtime_error(\"SOFIE - Cannot broadcast A->B in operator "
364 << opName << "\");\n";
365 } else if (fDimShapeA[i].isParam && fDimShapeB[i].isParam) {
366 // both shapes are parametric and we broadcast to maximum
367 // we allocate here output vector
368 out << SP << SP << "if (" << fDimShapeA[i] << " != " << fDimShapeB[i] << " && (" << fDimShapeA[i]
369 << " != 1 || " << fDimShapeB[i] << " != 1))\n";
370 out << SP << SP << SP << "throw std::runtime_error(\"SOFIE - Cannot broadcast shapes in operator " << opName
371 << "\");\n";
372 }
373 }
374 out << SP << "}\n";
375 }
376
380
382 if (fDimShapeA.empty() ||
383 std::all_of(fDimShapeA.begin(), fDimShapeA.end(), [](Dim d) { return d.dim == 1 || d.GetVal() == "1"; })) {
384 compute_idx_A = "0";
385 } else {
386 for (size_t i = 0; i < fDimShapeA.size(); ++i) {
387 if (fDimShapeA[i].dim == 1 || fDimShapeA[i].GetVal() == "1")
388 continue;
389 compute_idx_A += "idx_" + std::to_string(i + (fDimShapeY.size() - fDimShapeA.size()));
390 if (stridesA[i].GetVal() != "1")
391 compute_idx_A += " * " + stridesA[i].GetVal();
392 compute_idx_A += " + ";
393 }
394 // remove last 3 character " + "
395 for (int j = 0; j < 3; j++)
396 compute_idx_A.pop_back();
397 }
398 if (fDimShapeB.empty() ||
399 std::all_of(fDimShapeB.begin(), fDimShapeB.end(), [](Dim d) { return d.dim == 1 || d.GetVal() == "1"; })) {
400 compute_idx_B = "0";
401 } else {
402 for (size_t i = 0; i < fDimShapeB.size(); ++i) {
403 if (fDimShapeB[i].dim == 1 || fDimShapeB[i].GetVal() == "1")
404 continue;
405 compute_idx_B += "idx_" + std::to_string(i + (fDimShapeY.size() - fDimShapeB.size()));
406 if (stridesB[i].GetVal() != "1")
407 compute_idx_B += " * " + stridesB[i].GetVal();
408 compute_idx_B += " + ";
409 }
410 // remove last 3 character " + "
411 for (int j = 0; j < 3; j++)
412 compute_idx_B.pop_back();
413 }
414 int nloop = 0;
415 if (fDimShapeY.empty() ||
416 std::all_of(fDimShapeY.begin(), fDimShapeY.end(), [](Dim d) { return d.dim == 1 || d.GetVal() == "1"; })) {
417 compute_idx_Y = "0";
418 } else {
419 for (size_t i = 0; i < fDimShapeY.size(); ++i) {
420 if (fDimShapeY[i].dim != 1 && fDimShapeY[i].GetVal() != "1") {
421 nloop++;
422 for (int j = 0; j < nloop; j++) out << SP;
423 out << "for (size_t idx_" << i << " = 0; idx_" << i << " < " << fDimShapeY[i]
424 << "; ++idx_" << i << "){\n";
425 compute_idx_Y += "idx_" + std::to_string(i);
426 if (stridesY[i].GetVal() != "1")
427 compute_idx_Y += " * " + stridesY[i].GetVal();
428 compute_idx_Y += " + ";
429 }
430 }
431 // remove last 3 characters " + "
432 for (int j = 0; j < 3; j++)
433 compute_idx_Y.pop_back();
434 }
435 for (int j = 0; j < nloop + 1; j++) out << SP;
436 out << "tensor_" << fNY << "[" << compute_idx_Y << "] = "
437 << BinaryOperatorTrait<T, Op>::Op("tensor_" + fNA + "[" + compute_idx_A + "]",
438 "tensor_" + fNB + "[" + compute_idx_B + "]")
439 << " ;\n";
440
441 for (int i = nloop; i > 0; i--) {
442 for (int j = 0; j < i; j++) out << SP;
443 out << "}\n";
444 }
445 return out.str();
446 }
447
448 std::vector<std::string> GetStdLibs() override
449 {
450 if (Op == EBasicBinaryOperator::Pow) {
451 return {std::string("cmath")};
452 } else {
453 return {};
454 }
455 }
456};
457
458} // namespace SOFIE
459} // namespace Experimental
460} // namespace TMVA
461
462#endif // TMVA_SOFIE_ROperator_BasicBinary
#define d(i)
Definition RSha256.hxx:102
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
char name[80]
Definition TGX11.cxx:145
std::vector< size_t > GetTensorShape(const std::string &name) const
Definition RModel.cxx:51
std::vector< Dim > GetDimTensorShape(const std::string &name) const
Definition RModel.cxx:87
bool IsDynamicTensor(const std::string &name) const
Definition RModel.cxx:269
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< Dim > dim_shape)
Definition RModel.cxx:284
bool CheckIfTensorAlreadyExist(std::string tensor_name)
Definition RModel.cxx:144
void AddConstantTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:215
bool IsShapeTensor(const std::string &name) const
check if a tensor is a shape tensor
Definition RModel.cxx:243
bool IsConstantTensor(const std::string &name) const
Definition RModel.cxx:260
std::vector< Dim > GetDynamicTensorShape(const std::string &name) const
Definition RModel.cxx:98
std::shared_ptr< void > GetInitializedTensorData(std::string tensor_name)
Definition RModel.cxx:349
void SetNotWritableInitializedTensor(const std::string &tensor_name)
Definition RModel.cxx:358
ETensorType GetTensorType(std::string name) const
Definition RModel.cxx:112
const std::vector< std::string > & GetInputTensorNames() const
Definition RModel.hxx:203
const std::vector< Dim > & GetShapeTensorValues(const std::string &tensor_name) const
Definition RModel.cxx:251
void AddShapeTensor(const std::string &name, const std::vector< Dim > &shapeValues, bool scalar=false)
Definition RModel.cxx:225
std::string Generate(std::string opName) override
std::vector< ETensorType > TypeInference(std::vector< ETensorType > input) override
std::vector< std::vector< size_t > > ShapeInference(std::vector< std::vector< size_t > > input) override
ROperator_BasicBinary(std::string nameA, std::string nameB, std::string nameY)
std::vector< std::string_view > fInputTensorNames
Definition ROperator.hxx:50
bool fIsOutputConstant
flag to identify if operator has a constant output (no need to generate code)
Definition ROperator.hxx:47
const std::string SP
space used to correctly indent the generated C++ code
Definition ROperator.hxx:45
std::vector< std::string_view > fOutputTensorNames
Definition ROperator.hxx:51
std::vector< size_t > MultidirectionalBroadcastShape(std::vector< std::vector< size_t > >)
T * UnidirectionalBroadcast(const T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape)
std::vector< size_t > ComputeStrideFromShape(const std::vector< size_t > &shape)
compute stride of a tensor given its shape (assume layout is row-major)
std::string ConvertDimShapeToString(const std::vector< Dim > &shape)
std::size_t ConvertShapeToLength(const std::vector< size_t > &shape)
std::string ConvertValuesToString(size_t n, const T *data, size_t maxprint=-1)
std::vector< Dim > ConvertShapeToDim(const std::vector< size_t > &shape)
Convert shape from integer format to dynamic one (based on Dim)
std::string ConvertDimShapeToLength(const std::vector< Dim > &shape)
std::string ConvertShapeToString(const std::vector< size_t > &shape)
create variable transformations
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
auto * t1
Definition textangle.C:20