Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
ROperator_BasicBinary.hxx
Go to the documentation of this file.
1#ifndef TMVA_SOFIE_ROperator_BasicBinary
2#define TMVA_SOFIE_ROperator_BasicBinary
3
5#include "TMVA/ROperator.hxx"
6#include "TMVA/RModel.hxx"
7
8#include <sstream>
9
10namespace TMVA {
11namespace Experimental {
12namespace SOFIE {
13
15
16template <typename T, EBasicBinaryOperator Op1>
18
19template <typename T>
21 static const std::string Name() { return "Add"; }
22 static std::string Op(const std::string &t1, const std::string t2) { return t1 + " + " + t2; }
23 static T Func(T t1, T t2) { return t1 + t2; }
24};
25
26template <typename T>
28 static const std::string Name() { return "Sub"; }
29 static std::string Op(const std::string &t1, const std::string t2) { return t1 + " - " + t2; }
30 static T Func(T t1, T t2) { return t1 - t2; }
31};
32
33template <typename T>
35 static const std::string Name() { return "Mul"; }
36 static std::string Op(const std::string &t1, const std::string t2) { return t1 + " * " + t2; }
37 static T Func(T t1, T t2) { return t1 * t2; }
38};
39
40template <typename T>
42 static const std::string Name() { return "Div"; }
43 static std::string Op(const std::string &t1, const std::string t2) { return t1 + " / " + t2; }
44 static T Func(T t1, T t2) { return t1 / t2; }
45};
46
47template <typename T>
49 static const std::string Name() { return "Pow"; }
50 static std::string Op(const std::string &t1, const std::string t2) { return "std::pow(" + t1 + "," + t2 + ")"; }
51 static T Func(T t1, T t2) { return std::pow(t1, t2); }
52};
53template <typename T>
55 static const std::string Name() { return "Mod"; }
56 static std::string Op(const std::string & t1, const std::string t2) { return "(" + t1 + " % " + t2 + ")"; }
57 static T Func (T t1, T t2) { return std::pow(t1,t2);}
58};
59template <typename T>
61 static const std::string Name() { return "FMod"; }
62 static std::string Op(const std::string & t1, const std::string t2) { return "std::fmod(" + t1 + "," + t2 + ")"; }
63 static T Func (T t1, T t2) { return std::pow(t1,t2);}
64};
65
66template <typename T, EBasicBinaryOperator Op>
68private:
70 std::string fNA;
71 std::string fNB;
72 std::string fNBroadcastedA;
73 std::string fNBroadcastedB;
74 std::string fNY;
75
76 std::vector<size_t> fShapeA;
77 std::vector<size_t> fShapeB;
78 std::vector<size_t> fShapeY;
79
80 std::vector<Dim> fDimShapeA;
81 std::vector<Dim> fDimShapeB;
82 std::vector<Dim> fDimShapeY;
83
84public:
86 ROperator_BasicBinary(std::string nameA, std::string nameB, std::string nameY)
87 : fNA(UTILITY::Clean_name(nameA)), fNB(UTILITY::Clean_name(nameB)), fNY(UTILITY::Clean_name(nameY))
88 {
91 }
92
93 // type of output given input
94 std::vector<ETensorType> TypeInference(std::vector<ETensorType> input) override { return input; }
95
96 // shape of output tensors given input tensors
97 std::vector<std::vector<size_t>> ShapeInference(std::vector<std::vector<size_t>> input) override
98 {
99 // assume now inputs have same shape (no broadcasting)
100 auto ret = std::vector<std::vector<size_t>>(1, input[0]); // return vector size 1 with first input
101 return ret;
102 }
103
104 void Initialize(RModel &model) override
105 {
106 // input must be a graph input, or already initialized intermediate tensor
107 if (!model.CheckIfTensorAlreadyExist(fNA)) {
108 throw std::runtime_error(std::string("TMVA SOFIE Binary Op Input Tensor ") + fNA + "is not found in model");
109 }
110 if (!model.CheckIfTensorAlreadyExist(fNB)) {
111 throw std::runtime_error(std::string("TMVA SOFIE Binary Op Input Tensor ") + fNB + "is not found in model");
112 }
113 int dynamicInputs = 0;
114 if (model.IsDynamicTensor(fNA)) {
116 dynamicInputs |= 1;
117 } else {
118 fShapeA = model.GetTensorShape(fNA);
120 }
121 if (model.IsDynamicTensor(fNB)) {
122 dynamicInputs |= 2;
124 } else {
125 fShapeB = model.GetTensorShape(fNB);
127 }
128 if (dynamicInputs & 1 && model.Verbose())
129 std::cout << BinaryOperatorTrait<T, Op>::Name() << " : input " << fNA << " is dynamic "
130 << ConvertDimShapeToString(fDimShapeA) << std::endl;
131 if (dynamicInputs & 2 && model.Verbose())
132 std::cout << BinaryOperatorTrait<T, Op>::Name() << " : input " << fNB << " is dynamic "
133 << ConvertDimShapeToString(fDimShapeB) << std::endl;
134
135 // check if need to broadcast at initialization time if shapes are known and different
136 // (we could broadcast the tensor tensor to maximum values of dynamic shapes - to be done)
137 // case of known shapes
138 // if shapes are known find the output shape from broadcasting
139 if (dynamicInputs == 0) {
141 fBroadcastFlag = ret.first;
142 fShapeY = ret.second;
143 if (model.IsConstantTensor(fNA) && model.IsConstantTensor(fNB)) {
144 bool broadcast = fBroadcastFlag > 0;
145 if (broadcast) {
146 // Y is the common shape of A and B
147 bool broadcastA = fBroadcastFlag & 2;
148 bool broadcastB = fBroadcastFlag & 1;
149 // Broadcast A to Y
150 if (broadcastA) {
151 fNBroadcastedA = "Broadcasted" + fNA + "to" + fNY;
152 auto data = model.GetInitializedTensorData(fNA);
153 std::shared_ptr<void> broadcastedData(
154 UTILITY::UnidirectionalBroadcast(static_cast<T *>(data.get()), fShapeA, fShapeY),
155 std::default_delete<T[]>());
156 if (model.Verbose())
157 std::cout << "broadcasted data A " << ConvertShapeToString(fShapeY) << " : "
159 static_cast<T *>(broadcastedData.get()))
160 << std::endl;
161 // Update the data and the shape of A
165 }
166 // Broadcast B to Y
167 if (broadcastB) {
168 fNBroadcastedB = "Broadcasted" + fNB + "to" + fNY;
169 auto data = model.GetInitializedTensorData(fNB);
170 if (model.Verbose())
171 std::cout << "data B " << ConvertShapeToString(fShapeB) << " : "
172 << ConvertValuesToString(ConvertShapeToLength(fShapeB), static_cast<T *>(data.get()))
173 << std::endl;
174 std::shared_ptr<void> broadcastedData(
175 UTILITY::UnidirectionalBroadcast(static_cast<T *>(data.get()), fShapeB, fShapeY),
176 std::default_delete<T[]>());
177 // do not update tensor B but add broadcasted one (since it can be input to some other operators)
178 if (model.Verbose())
179 std::cout << "broadcasted data B " << ConvertShapeToString(fShapeY) << " : "
181 static_cast<T *>(broadcastedData.get()))
182 << std::endl;
186 }
187 } else {
189 }
190 // tensors are constant: perform here the binary operation
191
192 const std::string &nameA = fNBroadcastedA.empty() ? fNA : fNBroadcastedA;
193 const std::string &nameB = fNBroadcastedB.empty() ? fNB : fNBroadcastedB;
194 auto dataA = static_cast<T *>(model.GetInitializedTensorData(nameA).get());
195 auto dataB = static_cast<T *>(model.GetInitializedTensorData(nameB).get());
196 std::vector<T> dataY(ConvertShapeToLength(fShapeY));
197 for (size_t i = 0; i < dataY.size(); i++) {
199 }
200 model.AddConstantTensor<T>(fNY, fShapeY, dataY.data());
201 // flag tensors to not be written in the generated code or weight file
204 fIsOutputConstant = true;
205 if (model.Verbose()) {
206 std::cout << BinaryOperatorTrait<T, Op>::Name() << " : " << fNA << " " << ConvertShapeToString(fShapeA)
207 << " , " << fNB << " " << ConvertShapeToString(fShapeB) << " ---> " << fNY << " "
208 << ConvertShapeToString(fShapeY) << " : " << ConvertValuesToString(dataY) << std::endl;
209 }
210 } else {
211 // case of defined and non-constant tensors
213 if (model.Verbose()) {
214 std::cout << BinaryOperatorTrait<T, Op>::Name() << " : " << fNA << " " << ConvertShapeToString(fShapeA)
215 << " , " << fNB << " " << ConvertShapeToString(fShapeB) << " ---> " << fNY << " "
216 << ConvertShapeToString(fShapeY) << std::endl;
217 }
218 // we convert non-dim shapes to Dim shapes
220 }
221 } else {
222 // case A or B have dynamic shapes. We need to broadcast if shape are not same
224 fBroadcastFlag = ret.first;
225 fDimShapeY = ret.second;
226 // case of all parametric shapes and MultiDirectionalBroadcastShape return the max of the 2
227 // need to do before we declare the output tensor shape and the broadcasted ones
228 if (ret.first & 4) {
229 // check if one of the parameter is an input dimension
230 // define function to find this
231 auto IsInputDimParam = [&](const std::string &p) {
232 auto inputNames = model.GetInputTensorNames();
233 for (auto &input : inputNames) {
234 for (auto &i_s : model.GetDimTensorShape(input)) {
235 if (i_s.isParam && i_s.param == p)
236 return true;
237 }
238 }
239 return false;
240 };
241 for (size_t i = 0; i < fDimShapeY.size(); i++) {
242 auto &s = fDimShapeY[i];
243 if (s.isParam && s.param.find("std::max") != std::string::npos) {
244 if (IsInputDimParam(fDimShapeA[i].param)) {
245 // case dim is 1 we indicate that the input parameter is equal to 1
246 if (fDimShapeA[i].dim != 1)
247 s = fDimShapeA[i];
248 else
249 s = fDimShapeB[i];
250 } else if (IsInputDimParam(fDimShapeB[i].param)) {
251 if (fDimShapeB[i].dim != 1)
252 s = fDimShapeB[i];
253 else
254 s = fDimShapeA[i];
255 }
256 }
257 }
258 }
259
261 if (model.Verbose()) {
262 std::cout << BinaryOperatorTrait<T, Op>::Name() << " : " << ConvertDimShapeToString(fDimShapeA) << " , "
264 }
265 }
266 }
267
268 std::string GenerateInitCode() override
269 {
270 std::stringstream out;
271 return out.str();
272 }
273
274 std::string Generate(std::string opName) override
275 {
276
278 return "";
279
280 opName = "op_" + opName;
281
282 if (fDimShapeY.empty()) {
283 throw std::runtime_error("TMVA SOFIE Binary Op called to Generate without being initialized first");
284 }
285 std::stringstream out;
286 out << SP << "\n//------ " << opName << " " << BinaryOperatorTrait<T, Op>::Name() << " --> "
289 std::string typeName = TensorType<T>::Name();
290
291 // we need to check if we can broadcast (case flag has bit 4 set)
292
293 if (fBroadcastFlag & 4) {
294 // need to check if shapes are the same
297 out << SP << "if (" << lengthA << "!=" << lengthB << ") {\n";
298 // check if A->B or B->A
299 // bool broadcastable = true;
300 for (size_t i = 0; i < fDimShapeY.size(); i++) {
301 if (fBroadcastFlag & 5 && fDimShapeY[i] == fDimShapeA[i] && fDimShapeA[i].dim > 1 &&
302 fDimShapeB[i].isParam) {
303 // B->A B[i] needs to be 1
304 out << SP << SP << "if (" << fDimShapeB[i] << "!= 1)\n";
305 out << SP << SP << SP << "throw std::runtime_error(\"SOFIE - Cannot broadcast B->A in operator "
306 << opName << "\");\n";
307 }
308 if (fBroadcastFlag & 6 && fDimShapeY[i] == fDimShapeB[i] && fDimShapeB[i].dim > 1 &&
309 fDimShapeA[i].isParam) {
310 // A-> B A[i] needs to be 1
311 out << SP << SP << "if (" << fDimShapeA[i] << "!= 1)\n";
312 out << SP << SP << SP << "throw std::runtime_error(\"SOFIE - Cannot broadcast A->B in operator "
313 << opName << "\");\n";
314 } else if (fDimShapeA[i].isParam && fDimShapeB[i].isParam) {
315 // both shapes are parametric and we broadcast to maximum
316 // we allocate here output vector
317 out << SP << SP << "if (" << fDimShapeA[i] << " != " << fDimShapeB[i] << " && (" << fDimShapeA[i]
318 << " != 1 || " << fDimShapeB[i] << " != 1))\n";
319 out << SP << SP << SP << "throw std::runtime_error(\"SOFIE - Cannot broadcast shapes in operator " << opName
320 << "\");\n";
321 }
322 }
323 out << SP << "}\n";
324 }
325
329
331 if (fDimShapeA.empty() ||
332 std::all_of(fDimShapeA.begin(), fDimShapeA.end(), [](Dim d) { return d.dim == 1 || d.GetVal() == "1"; })) {
333 compute_idx_A = "0";
334 } else {
335 for (size_t i = 0; i < fDimShapeA.size(); ++i) {
336 if (fDimShapeA[i].dim == 1 || fDimShapeA[i].GetVal() == "1")
337 continue;
338 compute_idx_A += "idx_" + std::to_string(i + (fDimShapeY.size() - fDimShapeA.size()));
339 if (stridesA[i].GetVal() != "1")
340 compute_idx_A += " * " + stridesA[i].GetVal();
341 compute_idx_A += " + ";
342 }
343 // remove last 3 character " + "
344 for (int j = 0; j < 3; j++)
345 compute_idx_A.pop_back();
346 }
347 if (fDimShapeB.empty() ||
348 std::all_of(fDimShapeB.begin(), fDimShapeB.end(), [](Dim d) { return d.dim == 1 || d.GetVal() == "1"; })) {
349 compute_idx_B = "0";
350 } else {
351 for (size_t i = 0; i < fDimShapeB.size(); ++i) {
352 if (fDimShapeB[i].dim == 1 || fDimShapeB[i].GetVal() == "1")
353 continue;
354 compute_idx_B += "idx_" + std::to_string(i + (fDimShapeY.size() - fDimShapeB.size()));
355 if (stridesB[i].GetVal() != "1")
356 compute_idx_B += " * " + stridesB[i].GetVal();
357 compute_idx_B += " + ";
358 }
359 // remove last 3 character " + "
360 for (int j = 0; j < 3; j++)
361 compute_idx_B.pop_back();
362 }
363 int nloop = 0;
364 if (fDimShapeY.empty() ||
365 std::all_of(fDimShapeY.begin(), fDimShapeY.end(), [](Dim d) { return d.dim == 1 || d.GetVal() == "1"; })) {
366 compute_idx_Y = "0";
367 } else {
368 for (size_t i = 0; i < fDimShapeY.size(); ++i) {
369 if (fDimShapeY[i].dim != 1 && fDimShapeY[i].GetVal() != "1") {
370 nloop++;
371 for (int j = 0; j < nloop; j++) out << SP;
372 out << "for (size_t idx_" << i << " = 0; idx_" << i << " < " << fDimShapeY[i]
373 << "; ++idx_" << i << "){\n";
374 compute_idx_Y += "idx_" + std::to_string(i);
375 if (stridesY[i].GetVal() != "1")
376 compute_idx_Y += " * " + stridesY[i].GetVal();
377 compute_idx_Y += " + ";
378 }
379 }
380 // remove last 3 characters " + "
381 for (int j = 0; j < 3; j++)
382 compute_idx_Y.pop_back();
383 }
384 for (int j = 0; j < nloop + 1; j++) out << SP;
385 out << "tensor_" << fNY << "[" << compute_idx_Y << "] = "
386 << BinaryOperatorTrait<T, Op>::Op("tensor_" + fNA + "[" + compute_idx_A + "]",
387 "tensor_" + fNB + "[" + compute_idx_B + "]")
388 << " ;\n";
389
390 for (int i = nloop; i > 0; i--) {
391 for (int j = 0; j < i; j++) out << SP;
392 out << "}\n";
393 }
394 return out.str();
395 }
396
397 std::vector<std::string> GetStdLibs() override
398 {
399 if (Op == EBasicBinaryOperator::Pow) {
400 return {std::string("cmath")};
401 } else {
402 return {};
403 }
404 }
405};
406
407} // namespace SOFIE
408} // namespace Experimental
409} // namespace TMVA
410
411#endif // TMVA_SOFIE_ROperator_BasicBinary
#define d(i)
Definition RSha256.hxx:102
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
std::vector< size_t > GetTensorShape(const std::string &name) const
Definition RModel.cxx:29
std::vector< Dim > GetDimTensorShape(const std::string &name) const
Definition RModel.cxx:65
bool IsDynamicTensor(const std::string &name) const
Definition RModel.cxx:247
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< Dim > dim_shape)
Definition RModel.cxx:262
bool CheckIfTensorAlreadyExist(std::string tensor_name)
Definition RModel.cxx:122
void AddConstantTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:193
bool IsConstantTensor(const std::string &name) const
Definition RModel.cxx:238
std::vector< Dim > GetDynamicTensorShape(const std::string &name) const
Definition RModel.cxx:76
std::shared_ptr< void > GetInitializedTensorData(std::string tensor_name)
Definition RModel.cxx:327
void SetNotWritableInitializedTensor(const std::string &tensor_name)
Definition RModel.cxx:336
ETensorType GetTensorType(std::string name) const
Definition RModel.cxx:90
const std::vector< std::string > & GetInputTensorNames() const
Definition RModel.hxx:201
std::string Generate(std::string opName) override
std::vector< ETensorType > TypeInference(std::vector< ETensorType > input) override
std::vector< std::vector< size_t > > ShapeInference(std::vector< std::vector< size_t > > input) override
ROperator_BasicBinary(std::string nameA, std::string nameB, std::string nameY)
std::vector< std::string_view > fInputTensorNames
Definition ROperator.hxx:49
bool fIsOutputConstant
flag to identify if operator has a constant output (no need to generate code)
Definition ROperator.hxx:46
const std::string SP
space used to correctly indent the generated C++ code
Definition ROperator.hxx:44
std::vector< std::string_view > fOutputTensorNames
Definition ROperator.hxx:50
std::vector< size_t > MultidirectionalBroadcastShape(std::vector< std::vector< size_t > >)
T * UnidirectionalBroadcast(const T *data, const std::vector< size_t > &shape, const std::vector< size_t > &targetShape)
std::vector< size_t > ComputeStrideFromShape(const std::vector< size_t > &shape)
compute stride of a tensor given its shape (assume layout is row-major)
std::string ConvertDimShapeToString(const std::vector< Dim > &shape)
std::size_t ConvertShapeToLength(const std::vector< size_t > &shape)
std::vector< Dim > ConvertShapeToDim(const std::vector< size_t > &shape)
Convert shape from integer format to dynamic one (based on Dim)
std::string ConvertValuesToString(size_t n, const T *data)
std::string ConvertDimShapeToLength(const std::vector< Dim > &shape)
std::string ConvertShapeToString(const std::vector< size_t > &shape)
create variable transformations
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
auto * t1
Definition textangle.C:20