Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
ROperator_Comparision.hxx
Go to the documentation of this file.
1
2#ifndef TMVA_SOFIE_ROperator_Comparision
3#define TMVA_SOFIE_ROperator_Comparision
4
6#include "TMVA/ROperator.hxx"
7#include "TMVA/RModel.hxx"
8
9#include <sstream>
10
11namespace TMVA{
12namespace Experimental{
13namespace SOFIE{
14
16
17template <typename T, EComparisionOperator Op1>
19
20template <typename T>
21struct ComparisionTrait<T, Eq> {
22 static const std::string Name() { return "Equal"; }
23 static std::string Op(const std::string & t1, const std::string t2) { return t1 + " == " + t2; }
24 static bool Result(T v1, T v2) { return v1 == v2;}
25};
26
27template <typename T>
29 static const std::string Name() { return "Less"; }
30 static std::string Op(const std::string & t1, const std::string t2) { return t1 + " < " + t2; }
31 static bool Result(T v1, T v2) { return v1 < v2;}
32};
33
34template <typename T>
36 static const std::string Name() { return "LessOrEqual"; }
37 static std::string Op(const std::string & t1, const std::string t2) { return t1 + " <= " + t2; }
38 static bool Result(T v1, T v2) { return v1 <= v2;}
39};
40
41template <typename T>
43 static const std::string Name() { return "Greater"; }
44 static std::string Op(const std::string & t1, const std::string t2) { return t1 + " > " + t2; }
45 static bool Result(T v1, T v2) { return v1 > v2;}
46};
47
48template <typename T>
50 static const std::string Name() { return "GreaterOrEqual"; }
51 static std::string Op(const std::string & t1, const std::string t2) { return t1 + " >= " + t2 ; }
52 static bool Result(T v1, T v2) { return v1 >= v2;}
53};
54
55template<typename T, EComparisionOperator Op>
57private:
58
59 std::string fNX1;
60 std::string fNX2;
61 std::string fNY;
62 std::vector<size_t> fShapeX1;
63 std::vector<size_t> fShapeX2;
64 std::vector<Dim> fDimShapeX1;
65 std::vector<Dim> fDimShapeX2;
66 std::vector<size_t> fShapeY;
67 std::vector<Dim> fDimShapeY;
71
72
73public:
75 ROperator_Comparision(const std::string & nameX1, const std::string & nameX2, const std::string & nameY):
76 fNX1(UTILITY::Clean_name(nameX1)), fNX2(UTILITY::Clean_name(nameX2)), fNY(UTILITY::Clean_name(nameY)){
78
79 // output will be a boolean vector so should not be considered for memory optimized pool
81 }
82
83 // type of output given input
84 std::vector<ETensorType> TypeInference(std::vector<ETensorType> input) override {
85 return input;
86 }
87
88 // shape of output tensors given input tensors
89 std::vector<std::vector<size_t>> ShapeInference(std::vector<std::vector<size_t>> input) override {
90 auto ret = input; // return vector size 1 with first input
91 return ret;
92 }
93
94 void Initialize(RModel& model) override {
95 // input must be a graph input, or already initialized intermediate tensor
97 throw std::runtime_error(std::string("TMVA SOFIE Comparision Op Input Tensor ") + fNX1 + "is not found in model");
98 }
99 if (!model.CheckIfTensorAlreadyExist(fNX2)) {
100 throw std::runtime_error(std::string("TMVA SOFIE Comparision Op Input Tensor ") + fNX2 + "is not found in model");
101 }
102 if (model.IsDynamicTensor(fNX1))
104 else {
107 }
108 if (model.IsDynamicTensor(fNX2))
110 else {
113 }
116 // case of non dynamic tensors
117 if (!fShapeX1.empty() && !fShapeX2.empty()) {
118 bool broadcastX1 = false;
119 bool broadcastX2 = false;
121 // no broadcast needed
123 } else {
124 // Y is the common shape of A and B
128 }
129
130
131 // analyze case of constant tensors or shape tensors (which have known shapes but data as Dim values
132 // normal case with non-dynamic tensor is also here
133 T *data1 = nullptr;
134 T *data2 = nullptr;
135 std::unique_ptr<T> broadcastedData1;
136 std::unique_ptr<T> broadcastedData2;
137 // data for shape tensors
138 std::vector<Dim> shapeData1;
139 std::vector<Dim> shapeData2;
141 bool *outData = new bool[length];
142 if (model.IsInitializedTensor(fNX1)) {
143 data1 = static_cast<T *>(model.GetInitializedTensorData(fNX1).get());
144 if (broadcastX1) {
145 broadcastedData1 = std::unique_ptr<T>(
146 UTILITY::UnidirectionalBroadcast<T>(data1, fShapeX1, fShapeY));
147 data1 = broadcastedData1.get();
148 }
149
150 } else if (model.IsShapeTensor(fNX1)) {
152 }
153 if (model.IsInitializedTensor(fNX2)) {
154 data2 = static_cast<T *>(model.GetInitializedTensorData(fNX2).get());
155 if (broadcastX2) {
156 broadcastedData2 = std::unique_ptr<T>(
157 UTILITY::UnidirectionalBroadcast<T>(data2, fShapeX2, fShapeY));
158 data2 = broadcastedData2.get();
159 }
160 } else if (model.IsShapeTensor(fNX2)) {
162 }
163 if (data1 && data2) {
164 fIsOutputConstant = true;
165 for (size_t i = 0; i < length; i++)
168 if (model.Verbose())
169 std::cout << ComparisionTrait<T, Op>::Name() << " op ---> " << fNY << " "
171 << std::endl;
172 } else if ((data1 || !shapeData1.empty()) && (data2 || !shapeData2.empty())) {
173 fIsOutputConstant = true;
174 if (data1 && !data2) {
175 // data 1 is constant and data2 is shape
176 for (size_t i = 0; i < length; i++) {
177 if (shapeData2[i].isParam) {
178 if (shapeData2[i].dim == size_t(-1) || data1[i] > 0) {
179 fIsOutputConstant = false;
180 break;
181 } else {
182 // assume a comparison is done with .dim = 0
183 shapeData2[i].dim = 0;
184 }
185 }
186 outData[i] = ComparisionTrait<T, Op>::Result(data1[i], static_cast<T>(shapeData2[i].dim));
187 }
188 } else if (!data1 && data2) {
189 // data 1 is shape and dat2 is constant
190 for (size_t i = 0; i < length; i++) {
191 if (shapeData1[i].isParam) {
192 if (shapeData1[i].dim == size_t(-1) || data2[i] > 0) {
193 fIsOutputConstant = false;
194 break;
195 } else {
196 // assume a comparison is done with .dim = 0
197 shapeData1[i].dim = 0;
198 }
199 }
200 outData[i] = ComparisionTrait<T, Op>::Result(static_cast<T>(shapeData1[i].dim), data2[i]);
201 }
202 } else if (!shapeData1.empty() && !shapeData2.empty()) {
203 // both data1 and data2 are shape tensors
204 for (size_t i = 0; i < length; i++) {
205 if (!shapeData1[i].isParam && !shapeData2[i].isParam) {
207 } else if (shapeData1[i].isParam && shapeData2[i].isParam) {
208 if (shapeData1[i].param == shapeData2[i].param)
209 outData[i] = ComparisionTrait<int, Op>::Result(1, 1); // comparison of two equal value
210 else {
211 fIsOutputConstant = false;
212 break;
213 }
214 } else {
215 fIsOutputConstant = false;
216 break;
217 }
218 }
219 }
220 if (fIsOutputConstant) {
222 if (model.Verbose())
223 std::cout << ComparisionTrait<T, Op>::Name() << " op ---> " << fNY << " "
225 << " (constant) " << std::endl;
226 }
227 }
228 delete[] outData;
229 // case of non constant output (no constant or shape tensors)
230 if (!fIsOutputConstant && !fShapeY.empty()) {
233 if (model.Verbose())
234 std::cout << ComparisionTrait<T, Op>::Name() << " op ---> " << fNY << " "
235 << ConvertShapeToString(fShapeY) << std::endl;
236 }
237 } else {
238 // case of dynamic tensors
239 // case A or B have dynamic shapes. We need to broadcast if shape are not same
241 fBroadcastFlag = ret.first;
242 fDimShapeY = ret.second;
243 // case of all parametric shapes and MultiDirectionalBroadcastShape return the max of the 2
244 // need to do before we declare the output tensor shape and the broadcasted ones
245 if (ret.first & 4) {
246 // check if one of the parameter is an input dimension
247 // define function to find this
248 auto IsInputDimParam = [&](const std::string &p) {
249 auto inputNames = model.GetInputTensorNames();
250 for (auto &input : inputNames) {
251 for (auto &i_s : model.GetDimTensorShape(input)) {
252 if (i_s.isParam && i_s.param == p)
253 return true;
254 }
255 }
256 return false;
257 };
258 for (size_t i = 0; i < fDimShapeY.size(); i++) {
259 auto &s = fDimShapeY[i];
260 if (s.isParam && s.param.find("std::max") != std::string::npos) {
261 if (IsInputDimParam(fDimShapeX1[i].param)) {
262 // case dim is 1 we indicate that the input parameter is equal to 1
263 if (fDimShapeX1[i].dim != 1)
264 s = fDimShapeX1[i];
265 else
266 s = fDimShapeX2[i];
267 } else if (IsInputDimParam(fDimShapeX2[i].param)) {
268 if (fDimShapeX2[i].dim != 1)
269 s = fDimShapeX2[i];
270 else
271 s = fDimShapeX1[i];
272 }
273 }
274 }
275 }
276
278 if (model.Verbose()) {
279 std::cout << ComparisionTrait<T, Op>::Name() << " : " << fNX1 << " " << ConvertShapeToString(fDimShapeX1) << " , "
280 << fNX2 << " " << ConvertShapeToString(fDimShapeX2) << " --> "
281 << fNY << " " << ConvertShapeToString(fDimShapeY) << std::endl;
283 }
284 }
285 }
286
287 std::string Generate(std::string opName) override {
288 if (fIsOutputConstant) return "";
289 opName = "op_" + opName;
290
291 if (fDimShapeY.empty()) {
292 throw std::runtime_error("TMVA SOFIE Comparision Op called to Generate without being initialized first");
293 }
294 std::stringstream out;
295 out << SP << "\n//------ " << ComparisionTrait<T,Op>::Name() << " " << opName
296 << " --> " << ConvertShapeToString(fShapeY) << "\n";
297
298 // need to add check if tensors are compatible as in binary operator
299
300 // use same code as Binary operator
304
306 if (fDimShapeX1.empty() ||
307 std::all_of(fDimShapeX1.begin(), fDimShapeX1.end(), [](Dim d) { return d.dim == 1 || d.GetVal() == "1"; })) {
308 compute_idx_X1 = "0";
309 } else {
310 for (size_t i = 0; i < fDimShapeX1.size(); ++i) {
311 if (fDimShapeX1[i].dim == 1 || fDimShapeX1[i].GetVal() == "1")
312 continue;
313 compute_idx_X1 += "idx_" + std::to_string(i + (fDimShapeY.size() - fDimShapeX1.size()));
314 if (stridesA[i].GetVal() != "1")
315 compute_idx_X1 += " * " + stridesA[i].GetVal();
316 compute_idx_X1 += " + ";
317 }
318 // remove last 3 character " + "
319 for (int j = 0; j < 3; j++)
320 compute_idx_X1.pop_back();
321 }
322 if (fDimShapeX2.empty() ||
323 std::all_of(fDimShapeX2.begin(), fDimShapeX2.end(), [](Dim d) { return d.dim == 1 || d.GetVal() == "1"; })) {
324 compute_idx_X2 = "0";
325 } else {
326 for (size_t i = 0; i < fDimShapeX2.size(); ++i) {
327 if (fDimShapeX2[i].dim == 1 || fDimShapeX2[i].GetVal() == "1")
328 continue;
329 compute_idx_X2 += "idx_" + std::to_string(i + (fDimShapeY.size() - fDimShapeX2.size()));
330 if (stridesB[i].GetVal() != "1")
331 compute_idx_X2 += " * " + stridesB[i].GetVal();
332 compute_idx_X2 += " + ";
333 }
334 // remove last 3 character " + "
335 for (int j = 0; j < 3; j++)
336 compute_idx_X2.pop_back();
337 }
338 int nloop = 0;
339 if (fDimShapeY.empty() ||
340 std::all_of(fDimShapeY.begin(), fDimShapeY.end(), [](Dim d) { return d.dim == 1 || d.GetVal() == "1"; })) {
341 compute_idx_Y = "0";
342 } else {
343 for (size_t i = 0; i < fDimShapeY.size(); ++i) {
344 if (fDimShapeY[i].dim != 1 && fDimShapeY[i].GetVal() != "1") {
345 nloop++;
346 for (int j = 0; j < nloop; j++) out << SP;
347 out << "for (size_t idx_" << i << " = 0; idx_" << i << " < " << fDimShapeY[i]
348 << "; ++idx_" << i << "){\n";
349 compute_idx_Y += "idx_" + std::to_string(i);
350 if (stridesY[i].GetVal() != "1")
351 compute_idx_Y += " * " + stridesY[i].GetVal();
352 compute_idx_Y += " + ";
353 }
354 }
355 // remove last 3 characters " + "
356 for (int j = 0; j < 3; j++)
357 compute_idx_Y.pop_back();
358 }
359 for (int j = 0; j < nloop + 1; j++) out << SP;
360 out << "tensor_" << fNY << "[" << compute_idx_Y << "] = "
361 << ComparisionTrait<T,Op>::Op( "tensor_" + fNX1 + "[" + compute_idx_X1 + "]" ,
362 "tensor_" + fNX2 + "[" + compute_idx_X2 + "]") << " ;\n";
363
364
365 for (int i = nloop; i > 0; i--) {
366 for (int j = 0; j < i; j++) out << SP;
367 out << "}\n";
368 }
369
370
371 return out.str();
372 }
373
374};
375
376}//SOFIE
377}//Experimental
378}//TMVA
379
380
381#endif //TMVA_SOFIE_ROperator_Comparision
#define d(i)
Definition RSha256.hxx:102
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
std::vector< size_t > GetTensorShape(const std::string &name) const
Definition RModel.cxx:29
std::vector< Dim > GetDimTensorShape(const std::string &name) const
Definition RModel.cxx:65
bool IsDynamicTensor(const std::string &name) const
Definition RModel.cxx:247
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< Dim > dim_shape)
Definition RModel.cxx:262
bool CheckIfTensorAlreadyExist(std::string tensor_name)
Definition RModel.cxx:122
void AddConstantTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:193
bool IsShapeTensor(const std::string &name) const
check if a tensor is a shape tensor
Definition RModel.cxx:221
bool IsInitializedTensor(const std::string &name) const
Definition RModel.cxx:234
std::vector< Dim > GetDynamicTensorShape(const std::string &name) const
Definition RModel.cxx:76
std::shared_ptr< void > GetInitializedTensorData(std::string tensor_name)
Definition RModel.cxx:327
ETensorType GetTensorType(std::string name) const
Definition RModel.cxx:90
const std::vector< std::string > & GetInputTensorNames() const
Definition RModel.hxx:206
const std::vector< Dim > & GetShapeTensorValues(const std::string &tensor_name) const
Definition RModel.cxx:229
std::vector< ETensorType > TypeInference(std::vector< ETensorType > input) override
std::string Generate(std::string opName) override
std::vector< std::vector< size_t > > ShapeInference(std::vector< std::vector< size_t > > input) override
ROperator_Comparision(const std::string &nameX1, const std::string &nameX2, const std::string &nameY)
std::vector< std::string_view > fInputTensorNames
Definition ROperator.hxx:47
bool fIsOutputConstant
flag to identify if operator has a constant output (no need to generate code)
Definition ROperator.hxx:44
const std::string SP
space used to correctly indent the generated C++ code
Definition ROperator.hxx:42
std::vector< std::string_view > fOutputTensorNames
Definition ROperator.hxx:48
bool AreSameShape(const std::vector< size_t > &, const std::vector< size_t > &)
std::vector< size_t > UnidirectionalBroadcastShape(std::vector< size_t > &, std::vector< size_t > &)
std::vector< size_t > MultidirectionalBroadcastShape(std::vector< std::vector< size_t > >)
std::vector< size_t > ComputeStrideFromShape(const std::vector< size_t > &shape)
compute stride of a tensor given its shape (assume layout is row-major)
std::size_t ConvertShapeToLength(const std::vector< size_t > &shape)
std::vector< Dim > ConvertShapeToDim(const std::vector< size_t > &shape)
Convert shape from integer format to dynamic one (based on Dim)
std::string ConvertValuesToString(size_t n, const T *data)
std::string ConvertShapeToString(const std::vector< size_t > &shape)
create variable transformations
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
static std::string Op(const std::string &t1, const std::string t2)
auto * t1
Definition textangle.C:20