Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RModel.cxx
Go to the documentation of this file.
1#include <limits>
2#include <algorithm>
3#include <cctype>
4#include <memory>
5#include <string>
6
7#include "TFile.h"
8
9#include "TMVA/RModel.hxx"
10#include "TMVA/SOFIE_common.hxx"
11
12namespace TMVA {
13namespace Experimental {
14namespace SOFIE {
15
16std::underlying_type_t<Options> operator|(Options opA, Options opB) {
17 return static_cast<std::underlying_type_t<Options>>(opA) | static_cast<std::underlying_type_t<Options>>(opB);
18}
19std::underlying_type_t<Options> operator|(std::underlying_type_t<Options> opA, Options opB) {
20 return opA | static_cast<std::underlying_type_t<Options>>(opB);
21}
22
24 fInputTensorInfos = std::move(other.fInputTensorInfos);
25 fReadyInputTensorInfos = std::move(other.fReadyInputTensorInfos);
26 fOutputTensorNames = other.fOutputTensorNames;
27 fInputTensorNames = other.fInputTensorNames;
28 fOperators = std::move(other.fOperators);
29 fInitializedTensors = std::move(other.fInitializedTensors);
30 fIntermediateTensorInfos = std::move(other.fIntermediateTensorInfos);
31 fName = other.fName;
32 fFileName = other.fFileName;
33 fParseTime = other.fParseTime;
34 fGC = other.fGC;
35 fNeededBlasRoutines = other.fNeededBlasRoutines;
36 fNeededStdLib = other.fNeededStdLib;
37}
38
40 fInputTensorInfos = std::move(other.fInputTensorInfos);
41 fReadyInputTensorInfos = std::move(other.fReadyInputTensorInfos);
42 fOutputTensorNames = other.fOutputTensorNames;
43 fInputTensorNames = other.fInputTensorNames;
44 fOperators = std::move(other.fOperators);
45 fInitializedTensors = std::move(other.fInitializedTensors);
46 fIntermediateTensorInfos = std::move(other.fIntermediateTensorInfos);
47 fName = other.fName;
48 fFileName = other.fFileName;
49 fParseTime = other.fParseTime;
50 fGC = other.fGC;
51 fNeededBlasRoutines = other.fNeededBlasRoutines;
52 fNeededStdLib = other.fNeededStdLib;
53 return *this;
54}
55
56const std::vector<size_t>& RModel::GetTensorShape(std::string name) {
57 auto f = fReadyInputTensorInfos.find(name);
58 if (f != fReadyInputTensorInfos.end()) {
59 return f->second.shape;
60 }
61 auto f2 = fInitializedTensors.find(name);
62 if (f2 != fInitializedTensors.end()) {
63 return f2->second.shape();
64 }
65 auto f3 = fInputTensorInfos.find(name);
66 if (f3 != fInputTensorInfos.end()) {
67 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] is an input tensor with unspecified dimension parameter");
68 }
69 auto f4 = fIntermediateTensorInfos.find(name);
70 if (f4 != fIntermediateTensorInfos.end()) {
71 return f4->second.shape;
72 }
74 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] is a dynamic tensor. Use GetDynamicTensorShape instead of GetTensorShape");
75
78
79 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] for which the shape is requested is not found");
80}
81
82std::vector<Dim> RModel::GetDynamicTensorShape(std::string name) {
83 if (auto f = fDynamicTensorInfos.find(name); f != fDynamicTensorInfos.end()) {
84 return f->second.shape;
85 }
86 if (auto f = fInputTensorInfos.find(name); f != fInputTensorInfos.end()) {
87 return f->second.shape;
88 }
89 // in case is not a dynamic tensor convert normal shape to Dim one
90 // for this we need to return the vector by value
92}
93
95 auto f = fReadyInputTensorInfos.find(name);
96 if (f != fReadyInputTensorInfos.end()) {
97 return f->second.type;
98 }
99 auto f2 = fInitializedTensors.find(name);
100 if (f2 != fInitializedTensors.end()) {
101 return f2->second.type();
102 }
103 auto f3 = fInputTensorInfos.find(name);
104 if (f3 != fInputTensorInfos.end()) {
105 return f3->second.type;
106 }
107 auto f4 = fIntermediateTensorInfos.find(name);
108 if (f4 != fIntermediateTensorInfos.end()) {
109 return f4->second.type;
110 }
111 auto f5 = fDynamicTensorInfos.find(name);
112 if (f5 != fDynamicTensorInfos.end()){
113 return f5->second.type;
114 }
115
118
119 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] for which the type is requested is not found");
120}
121
122bool RModel::CheckIfTensorAlreadyExist(std::string tensor_name) {
123 if (fReadyInputTensorInfos.find(tensor_name) != fReadyInputTensorInfos.end()) return true;
124 if (fInputTensorInfos.find(tensor_name) != fInputTensorInfos.end()) return true;
125 if (fInitializedTensors.find(tensor_name) != fInitializedTensors.end()) return true;
126 if (fIntermediateTensorInfos.find(tensor_name) != fIntermediateTensorInfos.end()) return true;
127 if (fDynamicTensorInfos.find(tensor_name) != fDynamicTensorInfos.end()) return true;
129 return false;
130}
131
132void RModel::AddInputTensorInfo(std::string input_name, ETensorType type, std::vector<Dim> shape) {
133 input_name = UTILITY::Clean_name(input_name);
134 if (CheckIfTensorAlreadyExist(input_name)) {
135 throw std::runtime_error("TMVA-SOFIE: input tensor with name " + input_name + " already exists \n");
136 }
137
138 InputTensorInfo inputInfo { type, shape };
139 fInputTensorInfos[input_name] = inputInfo;
140}
141
142void RModel::AddInputTensorInfo(std::string input_name, ETensorType type, std::vector<size_t> shape) {
143 input_name = UTILITY::Clean_name(input_name);
144 if (CheckIfTensorAlreadyExist(input_name)) {
145 throw std::runtime_error("TMVA-SOFIE: input tensor with name " + input_name + " already exists \n");
146 }
147 TensorInfo inputInfo { type, shape };
148 fReadyInputTensorInfos[input_name] = inputInfo;
149}
150
151void RModel::AddInputTensorName(std::string input_name) {
152 fInputTensorNames.push_back(UTILITY::Clean_name(input_name));
153}
154
155void RModel::AddOperator(std::unique_ptr<ROperator> op, int order_execution) {
156 AddBlasRoutines(op->GetBlasRoutines());
157 auto libs = op->GetStdLibs();
158 for (auto& stdlib : libs) {
159 AddNeededStdLib(stdlib);
160 }
161 if (order_execution >= 0) {
162 fOperators.insert(fOperators.begin() + order_execution, std::move(op));
163 } else {
164 fOperators.push_back(std::move(op));
165 }
166}
167
168void RModel::AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data) {
169 tensor_name = UTILITY::Clean_name(tensor_name);
170 //NB: own data
171 if (CheckIfTensorAlreadyExist(tensor_name)) {
172 throw std::runtime_error("TMVA-SOFIE: initialized tensor with name " + tensor_name + " already exists \n");
173 }
174 InitializedTensor new_tensor {type, shape, data};
175 fInitializedTensors[tensor_name] = new_tensor;
176}
177
178void RModel::AddConstantTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data) {
179 tensor_name = UTILITY::Clean_name(tensor_name);
180 //NB: own data
181 if (CheckIfTensorAlreadyExist(tensor_name)) {
182 throw std::runtime_error("TMVA-SOFIE: initialized tensor with name " + tensor_name + " already exists \n");
183 }
184 InitializedTensor new_tensor {type, shape, data, true}; // add here flag to specify is a constant tensor
185 fInitializedTensors[tensor_name] = new_tensor;
186}
187
188bool RModel::IsInitializedTensor(const std::string& tensorName) const {
189 std::string name = UTILITY::Clean_name(tensorName);
190 return fInitializedTensors.find(name) != fInitializedTensors.end();
191}
192
193bool RModel::IsDynamicTensor(const std::string& tensorName) const {
194 std::string name = UTILITY::Clean_name(tensorName);
195 return fDynamicTensorInfos.find(name) != fDynamicTensorInfos.end();
196}
197bool RModel::IsInputTensor(const std::string& tensorName) const {
198 std::string name = UTILITY::Clean_name(tensorName);
199 return fInputTensorInfos.find(name) != fInputTensorInfos.end();
200}
201
202// generic addition of a tensor
203void RModel::AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector<Dim> dim_shape) {
204 auto int_shape = ConvertShapeToInt(dim_shape);
205 if (!int_shape.empty())
206 AddIntermediateTensor(tensor_name, type, int_shape);
207 else
208 AddDynamicTensor(tensor_name, type, dim_shape);
209}
210
211void RModel::AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape) {
212 tensor_name = UTILITY::Clean_name(tensor_name);
213 if (CheckIfTensorAlreadyExist(tensor_name)) {
214 throw std::runtime_error("TMVA-SOFIE: intermediate tensor with name " + tensor_name + " already exists \n");
215 }
216 TensorInfo new_tensor {type, shape};
217 fIntermediateTensorInfos[tensor_name] = new_tensor;
218}
219
220void RModel::AddDynamicTensor(std::string tensor_name, ETensorType type, std::vector<Dim> shape){
221 tensor_name = UTILITY::Clean_name(tensor_name);
222 if (CheckIfTensorAlreadyExist(tensor_name)){
223 throw std::runtime_error("TMVA-SOFIE: intermediate tensor with name " + tensor_name + " already exists \n");
224 }
225 DynamicTensorInfo new_tensor {type, shape};
226 fDynamicTensorInfos[tensor_name] = new_tensor;
227 // store shape parameter if not existing
228 for (auto &d : shape) {
229 if (d.isParam) {
230 if (fShapeParams.count(d.param) == 0) {
231 // case parameter is an expression of some other existing parameter, no need to
232 // register it
233 if (d.dim != size_t(-1)) {
234 fShapeParams[d.param] = std::to_string(d.dim);
235 }
236 }
237 }
238 }
239}
240
241void RModel::AddOutputTensorNameList(std::vector<std::string> outputtensornames) {
242 fOutputTensorNames.clear();
243 for(auto& it : outputtensornames) {
245 }
246}
247
248void RModel::UpdateOutputTensorList(std::vector<std::string> curr_output_tensors, std::vector<std::string> new_output_tensors) {
249 for(auto& it:curr_output_tensors) {
250 fOutputTensorNames.erase(std::remove(fOutputTensorNames.begin(), fOutputTensorNames.end(), it), fOutputTensorNames.end());
251 }
252 fOutputTensorNames.insert(fOutputTensorNames.end(), new_output_tensors.begin(), new_output_tensors.end());
253}
254
255void RModel::UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data) {
256 tensor_name = UTILITY::Clean_name(tensor_name);
257 if (!CheckIfTensorAlreadyExist(tensor_name)) {
258 throw std::runtime_error("TMVA-SOFIE: tensor " + tensor_name + " not found when trying to update it");
259 }
260 InitializedTensor new_tensor {type, shape, data};
261 fInitializedTensors[tensor_name] = new_tensor;
262}
263
264std::shared_ptr<void> RModel::GetInitializedTensorData(std::string tensor_name) {
265 auto f = fInitializedTensors.find(tensor_name);
266 if (f == fInitializedTensors.end()) {
267 throw std::runtime_error("TMVA-SOFIE: tensor " + tensor_name + " not found when trying to get its data");
268 } else {
269 return f->second.sharedptr();
270 }
271}
272
273void RModel::SetNotWritableInitializedTensor(const std::string & tensor_name) {
274 auto t = fInitializedTensors.find(tensor_name);
275 if (t == fInitializedTensors.end()) {
276 throw std::runtime_error("TMVA-SOFIE: initialized tensor " + tensor_name + " not found when trying to get its info");
277 }
278 t->second.SetNotWritable();
279 }
280
281void RModel::Initialize(int batchSize, bool verbose) {
282 std::map<std::string, size_t> inputParams;
283 if (batchSize > 0) {
284 inputParams["input_size"] = batchSize;
285 inputParams["batch_size"] = batchSize;
286 inputParams["bs"] = batchSize;
287 }
288 Initialize(inputParams, verbose);
289}
290void RModel::Initialize(const std::map<std::string, size_t> & inputParams, bool verbose) {
291
292 fVerbose = int(verbose);
293
294 if (fIsInitialized) {
295 if (verbose)
296 std::cout << "Model is already initialized - skip initialization " << std::endl;
297 return;
298 }
300 fDynamicTensorInfos.clear();
301
302 // loop on inputs and see if shape can be full specified
303 // if the batch size is provided it can be used to specify the full shape
304 // Add the full specified tensors in fReadyInputTensors collection
305 auto originalInputTensorInfos = fInputTensorInfos; // need to copy because we may delete elements
306 for (auto &input : originalInputTensorInfos) {
307 if (verbose) std::cout << "looking at the tensor " << input.first << std::endl;
308 // if a parameter (e.g. batch_size) is specified use for converting parametric shape in defined one
309 if (!inputParams.empty()) {
310 for (auto &d : input.second.shape) {
311 if (d.isParam) {
312 std::string pname = d.param;
313 if (pname == input.first + "_size") pname = "input_size";
314 auto itr = inputParams.find(pname);
315 if (itr != inputParams.end() ) {
316 d = Dim{ itr->second };
317 if (verbose)
318 std::cout << "Tensor: " << input.first << " - fix parametric shape " << itr->first << " to " << itr->second << std::endl;
319 }
320 }
321 }
322 }
323 // see if shape now is fully defined
324 auto shape = ConvertShapeToInt(input.second.shape);
325 if (verbose)
326 std::cout << "converting input shape for " << input.first << " " << ConvertShapeToString(shape) << " from "
327 << ConvertDynamicShapeToString(input.second.shape) << std::endl;
328 if (!shape.empty()) {
329 // case shape is defined (not parametric) we add the tensor in the fReadyInputTensorInfos map and
330 // we remove the tensor from the fInputTensorInfo where th eold parametric shape was stored
331 fInputTensorInfos.erase(input.first);
332 // add to the ready input tensor information the new fixed shape
333 AddInputTensorInfo(input.first, input.second.type, shape);
334 // check consistency
335 assert( fReadyInputTensorInfos.size() + fInputTensorInfos.size() == fInputTensorNames.size());
336 }
337 // store the parameters of the input tensors
338 else {
339 // store the found parametric shape parameters
340 for (auto &d : input.second.shape) {
341 if (d.isParam)
342 fShapeParams[d.param] = std::to_string(d.dim);
343 }
344 }
345 }
346
347 if (verbose) {
350 }
351
352 // check if there are initialized tensors to write in a weight file
353 // support for the time being only weight of FLOAT type
354 if (fUseWeightFile) {
355 bool modelHasWeights = false;
356 for (auto &i : fInitializedTensors) {
357 if (i.second.type() == ETensorType::FLOAT) {
358 modelHasWeights = true;
359 break;
360 }
361 }
362 if (!modelHasWeights)
363 fUseWeightFile = false;
364 }
365 // Go through model and initialize each operator
366 int i = 0;
367 for (auto &op : fOperators) {
368 if (verbose) {
369 auto& r = *op.get();
370 std::cout << "Initializing operator " << i << " " << typeid(r).name() << std::endl;
371 }
372 op->Initialize(*this);
373 i++;
374 }
375 fIsInitialized = true;
376}
377
378void RModel::InitializeSubGraph(std::shared_ptr<RModel> graph) {
379 // add the subgraph to the list
380 fSubGraphs.push_back(graph);
381 //this needs to be done before initializing
382 graph->fParentGraph = this;
383 graph->fIsSubGraph = true;
384
385 graph->Initialize(fBatchSize, fVerbose);
386 // set the same options as parent model
387 graph->fWeightFile = fWeightFile;
388 graph->fUseWeightFile = fUseWeightFile;
389 graph->fUseSession = fUseSession;
390 // add needed blas routines and libs
391 std::vector<std::string> blasRoutines;
392 for (auto & e : graph->fNeededBlasRoutines)
393 blasRoutines.push_back(e);
394 AddBlasRoutines(blasRoutines);
395 for (auto e : graph->fNeededStdLib)
397
398 // add parent input tensors to current graph
399 for (auto & name : fInputTensorNames)
400 graph->fInputTensorNames.push_back(name);
401
402 // clean graph name
403 graph->fName = UTILITY::Clean_name(graph->fName);
404
405}
406
408 if (!fInitializedTensors.empty())
409 fGC += "// initialized tensors\n";
410 for (auto& i: fInitializedTensors) {
411
412 size_t length = ConvertShapeToLength(i.second.shape());
413 // in case we are not using weight files or for tensor created from Constant operator
414 if (!fUseWeightFile || i.second.IsConstantTensor() ) {
415 //std::cout << "write tensor " << i.first << std::endl;
416 std::stringstream strs;
417 if (i.second.type() == ETensorType::FLOAT) {
418 strs << "float tensor_" << i.first << "[" << length << "] = {";
419 float const *data = i.second.data<float>();
420 for (size_t idx = 0; idx < length; idx++) {
421 strs << std::setprecision(std::numeric_limits<float>::max_digits10) << data[idx];
422 if (idx < length-1) strs << ", ";
423 }
424 strs << "};\n";
425 }
426 else if (i.second.type() == ETensorType::INT64) {
427 strs << "int64_t tensor_" << i.first << "[" << length << "] = {";
428 int64_t const *data = i.second.data<int64_t>();
429 for (size_t idx = 0; idx < length; idx++) {
430 strs << data[idx];
431 if (idx < length-1) strs << ", ";
432 }
433 strs << "};\n";
434 }
435 fGC += strs.str();
436 }
437 // case of tensors which are read from a file
438 else {
439 if (i.second.type() == ETensorType::FLOAT) {
440 fGC += "std::vector<float> fTensor_" + i.first + " = std::vector<float>(" + std::to_string(length) + ");\n";
441 fGC += "float * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
442 }
443 }
444 }
445}
446
448 if (!fIntermediateTensorInfos.empty()) {
449 fGC += "\n//--- declare and allocate the intermediate tensors\n";
450 for (auto &i : fIntermediateTensorInfos) {
451 size_t length = ConvertShapeToLength(i.second.shape);
452 if (i.second.type == ETensorType::FLOAT) {
453 fGC += "std::vector<float> fTensor_" + i.first + " = std::vector<float>(" + std::to_string(length) + ");\n";
454 fGC += "float * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
455 }
456 if (i.second.type == ETensorType::DOUBLE) {
457 fGC += "std::vector<double> fTensor_" + i.first + " = std::vector<double>(" + std::to_string(length) + ");\n";
458 fGC += "double * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
459 }
460 if (i.second.type == ETensorType::INT64) {
461 fGC += "std::vector<int64_t> fTensor_" + i.first + " = std::vector<int64_t>(" + std::to_string(length) + ");\n";
462 fGC += "int64_t * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
463 }
464 if (i.second.type == ETensorType::BOOL) {
465 fGC += "std::vector<bool> fTensor_" + i.first + " = std::vector<bool>(" + std::to_string(length) + ");\n";
466 // don't allocate pointer since boolean vector don't have the .data() member
467 }
468 }
469 }
470 // add also the dynamic tensors (only declarations, allocation will be done later)
471 if (!fDynamicTensorInfos.empty()) {
472 fGC += "//--- declare the dynamic tensors\n";
473 for (auto &i : fDynamicTensorInfos) {
474 if (i.second.type == ETensorType::FLOAT) {
475 fGC += "std::vector<float> fTensor_" + i.first + ";\n";
476 fGC += "float * tensor_" + i.first + " = nullptr;\n";
477 } else if (i.second.type == ETensorType::DOUBLE) {
478 fGC += "std::vector<double> fTensor_" + i.first + ";\n";
479 fGC += "double * tensor_" + i.first + " = nullptr;\n";
480 } else if (i.second.type == ETensorType::INT64) {
481 fGC += "std::vector<int64_t> fTensor_" + i.first + ";\n";
482 fGC += "int64_t * tensor_" + i.first + " = nullptr;\n";
483 }
484 }
485 }
486}
487
489 fGC += "//---- allocate the intermediate dynamic tensors\n";
490 std::stringstream out;
491 for (auto & i: fDynamicTensorInfos) {
492 auto length = ConvertDynamicShapeToLength(i.second.shape);
493 out << SP << "if (" << length << " > 0) {\n";
494 out << SP << SP << "fTensor_" << i.first << ".resize(" << length << ");\n";
495 out << SP << SP << "tensor_" << i.first << " = fTensor_" << i.first << ".data();\n";
496 out << SP << "}\n";
497 }
498 fGC += out.str();
499}
500
501std::string RModel::GenerateInferSignature(bool isdecl) {
502 // generate the infer signature given the inputs: eg. "float * tensor1, float * tensor2"
503 // if (decl = false) generate only calling signature (tensor1,tensor2,....)
504 std::string rGC;
505 std::unordered_map<std::string, int> inputParams;
506 int i_input = 0;
507 for (auto &name : fInputTensorNames) {
508 // if is a dynamic tensor pass initial parameters
509 if (IsInputTensor(name)) {
510 auto shape = GetDynamicTensorShape(name);
511 for (auto &d : shape) {
512 std::string pName = d.param;
513 // need to check if the input parameters is already existing in another input tensor
514 if (d.isParam && inputParams.count(pName) == 0) {
515 if (isdecl) rGC += "size_t ";
516 rGC += d.param + ",";
517 inputParams[pName] = i_input;
518 }
519 }
520 }
521 if (isdecl) {
523 if (type == "other")
524 throw std::runtime_error("TMVA-SOFIE: input tensor " + name +
525 " is of a data type which is not yet supported.");
526 rGC += type + "* ";
527 }
528 rGC += "tensor_" + name + ",";
529 i_input++;
530 }
531
532 if (fInputTensorNames.size() > 0) rGC.pop_back();// remove last ","
533 return rGC;
534}
535
537
538 if (fVerbose)
539 std::cout << "Generating main inference code for " << fName << std::endl;
540
541 size_t outputSize = fOutputTensorNames.size();
542 // assume output types are all the same
543 if (outputSize == 0)
544 throw std::runtime_error("TMVA-SOFIE: output size=0 are not supported");
545
546 std::string outputType;
547 ETensorType eOutputType;
548 eOutputType = GetTensorType(fOutputTensorNames[0]);
549 outputType = ConvertTypeToString(eOutputType);
550 if (outputSize == 1) {
551 fGC += "std::vector<" + outputType + "> ";
552 } else {
553 // we assume all output types are the same
554 for (size_t i = 1; i < outputSize; i++) {
555 if (GetTensorType(fOutputTensorNames[i]) != eOutputType)
556 throw std::runtime_error("TMVA-SOFIE: different output tensor types are not supported");
557 }
558 fGC += "std::vector<std::vector<" + outputType + ">> ";
559 }
560
561 fGC += "infer(";
562
564
565 fGC += "){\n";
566
567 for (size_t id = 0; id < fOperators.size(); id++) {
568 if (fVerbose) std::cout << "Generating code for operator .... " << id << std::endl;
569 fGC += (fOperators[id]->Generate(std::to_string(id)));
570 }
571
572 if (outputSize == 1) {
573 std::string tensorName = fOutputTensorNames[0];
574 if (fIntermediateTensorInfos.count(tensorName) > 0) {
575 // need to check is size is the same(don't want to return a vector with larger size)
576 // in that case better to copy
577 fGC += SP + "return fTensor_" + tensorName + ";\n";
578 } else {
579 // include also dynamic tensors since the vectors can be allocated with a size larger than their output
580 // we need a special handling for bool type allocated as vector<bool>
581 auto outputLength = ConvertDynamicShapeToLength(GetDynamicTensorShape(tensorName));
582 if (IsDynamicTensor(tensorName) && eOutputType == ETensorType::BOOL) {
583 fGC += SP + "std::vector<bool> ret (fTensor_" + tensorName + ".begin(), fTensor_" + tensorName +
584 ".begin() + " + outputLength + ");\n";
585 } else {
586 fGC += SP + "std::vector<" + outputType + "> ret (tensor_" + tensorName + ", tensor_" + tensorName + " + " +
587 outputLength + ");\n";
588 }
589 fGC += SP + "return ret;\n";
590 }
591 } else {
592 // here we assume all outputs have same type
593 fGC += SP + "std::vector<std::vector<" + outputType + ">> ret({";
594 for (size_t i = 0; i < outputSize; i++) {
595 std::string tensorName = fOutputTensorNames[i];
596 if (!tensorName.empty()) {
597 if (fIntermediateTensorInfos.count(tensorName) > 0) {
598 fGC += "fTensor_" + tensorName;
599 } else {
600 auto outputLength = ConvertDynamicShapeToLength(GetDynamicTensorShape(tensorName));
601 if (IsDynamicTensor(tensorName) && eOutputType == ETensorType::BOOL) {
602 fGC += "std::vector<bool>(fTensor_" + tensorName + ".begin(), fTensor_" + tensorName + ".begin() + " +
603 outputLength + ");\n";
604 } else {
605 fGC += "std::vector<" + outputType + ">(tensor_" + tensorName + ", tensor_" + tensorName + " + " +
606 outputLength + ")";
607 }
608 }
609 if (i < outputSize - 1)
610 fGC += ",";
611 } else {
612 fGC += "{}";
613 }
614 }
615 fGC += "});\n";
616 fGC += SP + "return ret;\n";
617 }
618 fGC += "}\n"; // end of infer function scope
619}
620
622{
623
624 // define the Session struct (for GNN this is generated in RModel_GNN)
626 if (!fIsSubGraph)
627 fGC += "struct Session {\n";
628 else
629 fGC += "struct Session_" + fName + " {\n";
630 }
631
634
635 // add subgraph session
636 if (!fSubGraphs.empty()) fGC += "// subgraph sessions\n";
637 for (auto & graph : fSubGraphs) {
638 fGC += "Session_" + graph->fName + " fSession_" + graph->fName + ";\n";
639 }
640
641 if (fUseSession) {
642 std::string sessionName = "Session";
643 if (fIsSubGraph)
644 sessionName += "_" + fName;
645 // add here specific operator code that needs to define session data members
646 fGC += "\n";
647 for (size_t id = 0; id < fOperators.size(); id++) {
648 std::string opName = std::to_string(id);
649 fGC += fOperators[id]->GenerateSessionMembersCode(opName);
650 }
651 fGC += "\n";
652 // here add initialization and reading of weight tensors
653 if (fUseWeightFile) {
654 std::string fileName = fName;
656 fileName += ".dat";
657 }
659 fileName += ".root";
660 }
661 fGC += sessionName + "(std::string filename =\"" + fileName + "\"";
662 } else {
663 // no need to pass weight file since it is not used
664 // keep passing a string for compatibility
665 fGC += sessionName + "(std::string = \"\"";
666 }
667 // add initialization of shape parameters
668 // assume all parameters are of type size_t
669 if (!fShapeParams.empty()) {
670 for (auto &p : fShapeParams) {
671 fGC += ",\n";
672 fGC += " size_t " + p.first + " = " + p.second;
673 }
674 }
675 fGC += ") {\n";
676
677 if (fUseWeightFile) {
678 fGC += "\n//--- reading weights from file\n";
680 fGC += "\n";
681 // fUseWeightFile = fUseWeightFile;
682 }
683
684 // now we have passed the parameters we can allocate the dynamic tensors
686
687 // add here initialization code for operator
688 for (size_t id = 0; id < fOperators.size(); id++) {
689 fGC += fOperators[id]->GenerateInitCode();
690 }
691
692 fGC += "}\n\n";
693 }
694
696
697 // end of session
699 fGC += "}; // end of Session\n";
700 }
701}
702
703void RModel::Generate(std::underlying_type_t<Options> options, int batchSize, long pos, bool verbose)
704{
705 fVerbose = verbose;
706 fBatchSize = batchSize;
707 fReadPos = pos;
708
709 // session flag is used in operator initialize
710 if (static_cast<std::underlying_type_t<Options>>(Options::kNoSession) & options) {
711 fUseSession = false;
713 }
714 if (static_cast<std::underlying_type_t<Options>>(Options::kNoWeightFile) & options) {
715 fUseWeightFile = false;
717 }
718 if (static_cast<std::underlying_type_t<Options>>(Options::kRootBinaryWeightFile) & options) {
719 fUseWeightFile = true;
721 }
722 if (fUseWeightFile && !fUseSession) {
723 throw std::runtime_error(
724 "TMVA-SOFIE: RModel::Generate: cannot use a separate weight file without generating a Session class");
725 }
726
727 if (static_cast<std::underlying_type_t<Options>>(Options::kGNN) & options)
728 fIsGNN = true;
729 if (static_cast<std::underlying_type_t<Options>>(Options::kGNNComponent) & options)
730 fIsGNNComponent = true;
731
732 // initialize the model including all operators and sub-graphs
733 Initialize(batchSize, verbose);
734
735 std::string hgname;
736 if (!fIsGNNComponent && !fIsSubGraph) {
737 fGC.clear();
738 GenerateHeaderInfo(hgname);
739 }
740
741 // generate first code for the subgraphs
742 for (auto &graph : fSubGraphs) {
743 if (fVerbose)
744 std::cout << "generate session code for subgraph " << graph->fName << std::endl;
745 graph->GenerateSessionCode();
746 fGC += graph->fGC;
747 }
748
749 if (fVerbose)
750 std::cout << "generate Main session code - model " << fName << std::endl;
751
752 // generate main session code
754
755 if (!fIsGNNComponent && !fIsSubGraph) {
756 fGC += ("} //TMVA_SOFIE_" + fName + "\n");
757 fGC += "\n#endif // " + hgname + "\n";
758 }
759}
760
762 // generate the code to read initialized tensors from a text data file
764 if (fInitializedTensors.empty()) return;
765
766 fGC += " std::ifstream f;\n";
767 fGC += " f.open(filename);\n";
768 fGC += " if (!f.is_open()) {\n";
769 fGC += " throw std::runtime_error(\"tmva-sofie failed to open file \" + filename + \" for input weights\");\n";
770 fGC += " }\n";
771
772 if(fIsGNNComponent) {
773 fGC += " f.seekg(" + std::to_string(pos) + ");\n";
774 }
775
776 fGC += " std::string tensor_name;\n";
777 fGC += " size_t length;\n";
778
779 // loop on tensors and parse the file
780 for (auto& i: fInitializedTensors) {
781 // skip Constant and shape tensors
782 if (!i.second.IsWeightTensor()) continue;
783 std::string tensor_name = "tensor_" + i.first;
784 if (i.second.type() == ETensorType::FLOAT) {
785 size_t length = 1;
786 length = ConvertShapeToLength(i.second.shape());
787 std::string slength = std::to_string(length);
788 fGC += " f >> tensor_name >> length;\n";
789 fGC += " if (tensor_name != \"" + tensor_name + "\" ) {\n";
790 fGC += " std::string err_msg = \"TMVA-SOFIE failed to read the correct tensor name; expected name is " +
791 tensor_name + " , read \" + tensor_name;\n";
792 fGC += " throw std::runtime_error(err_msg);\n";
793 fGC += " }\n";
794 fGC += " if (length != " + slength + ") {\n";
795 fGC += " std::string err_msg = \"TMVA-SOFIE failed to read the correct tensor size; expected size is " +
796 slength + " , read \" + std::to_string(length) ;\n";
797 fGC += " throw std::runtime_error(err_msg);\n";
798 fGC += " }\n";
799 fGC += " for (size_t i = 0; i < length; ++i)\n";
800 fGC += " f >> " + tensor_name + "[i];\n";
801 fGC += " if (f.fail()) {\n";
802 fGC += " throw std::runtime_error(\"TMVA-SOFIE failed to read the values for tensor " + tensor_name + "\");\n";
803 fGC += " }\n";
804 } else {
805 std::runtime_error("tmva-sofie tensor " + tensor_name + " with type " + ConvertTypeToString(i.second.type()) + " cannot be read from a file");
806 }
807 }
808 fGC += " f.close();\n";
809 }
810
811 // generate the code to read initialized tensors from a ROOT data file
813 fGC += " {\n";
814 fGC += " std::unique_ptr<TFile> rootFile(TFile::Open(filename.c_str(), \"READ\"));\n";
815 fGC += " if (!rootFile->IsOpen()) {\n";
816 fGC += " throw std::runtime_error(\"tmva-sofie failed to open ROOT file for input weights\");\n";
817 fGC += " }\n";
818
819 std::string dirName = fName + "_weights";
820 fGC += " if (!rootFile->GetKey(\"" + dirName + "\")) {\n";
821 fGC += " throw std::runtime_error(\"tmva-sofie failed to open ROOT directory for input weights\");\n";
822 fGC += " }\n";
823
824 for (auto &i : fInitializedTensors) {
825 // skip Constant and shape tensors
826 if (!i.second.IsWeightTensor()) continue;
827 fGC += " {\n";
828 std::string tensor_name = "tensor_" + i.first;
829 if (i.second.type() == ETensorType::FLOAT) {
830 fGC += " fTensor_" + i.first + " = *reinterpret_cast<std::vector<float>*>(rootFile->Get(\"";
831 fGC += dirName + "/" + tensor_name + "\"));\n";
832 } else if (i.second.type() == ETensorType::DOUBLE) {
833 fGC += " fTensor_" + i.first + " = *reinterpret_cast<std::vector<double>*>(rootFile->Get(\"";
834 fGC += dirName + + "/" + tensor_name + "\"));\n";
835 } else if (i.second.type() == ETensorType::INT64) {
836 fGC += " fTensor_" + i.first + " = *reinterpret_cast<std::vector<int64_t>*>(rootFile->Get(\"";
837 fGC += dirName + "/" + tensor_name + "\"));\n";
838 } else {
839 std::runtime_error("tmva-sofie tensor " + tensor_name + " with type " + ConvertTypeToString(i.second.type()) + " cannot be read from a ROOT file");
840 }
841 fGC += " }\n";
842 }
843 fGC += " }\n";
844 }
845}
846
848 // Determine the file extension based on the weight file type
849 std::string fileExtension;
850 switch (fWeightFile) {
852 fileExtension = ".dat";
853 break;
855 fileExtension = ".root";
856 break;
858 fileExtension = ".dat";
859 break;
860 }
861
862 // If filename is empty, use the model name as the base filename
863 if (filename.empty()) {
864 filename = fFileName + fileExtension;
865 }
866
867 // Write the initialized tensors to the file
869 if(fIsGNNComponent || fIsGNN) {
870 throw std::runtime_error("SOFIE-GNN yet not supports writing to a ROOT file.");
871 }
872 std::unique_ptr<TFile> outputFile(TFile::Open(filename.c_str(), "UPDATE"));
873
874 std::string dirName = fName + "_weights";
875 // check if directory exists, in case delete to replace with new one
876 if (outputFile->GetKey(dirName.c_str()))
877 outputFile->rmdir(dirName.c_str());
878
879 auto outputDir = outputFile->mkdir(dirName.c_str());
880
881 for (const auto& item : fInitializedTensors) {
882 // skip Constant tensors and tensors which are not writable (e.g. shape tensors)
883 if (!item.second.IsWeightTensor()) continue;
884 std::string tensorName = "tensor_" + item.first;
885 size_t length = 1;
886 length = ConvertShapeToLength(item.second.shape());
887 if(item.second.type() == ETensorType::FLOAT) {
888 const float* data = item.second.data<float>();
889 std::vector<float> tensorDataVector(data, data + length);
890 outputDir->WriteObjectAny(&tensorDataVector, "std::vector<float>", tensorName.c_str());
891 }
892 else if(item.second.type() == ETensorType::DOUBLE) {
893 const double* data = item.second.data<double>();
894 std::vector<double> tensorDataVector(data, data + length);
895 outputDir->WriteObjectAny(&tensorDataVector, "std::vector<double>", tensorName.c_str());
896 }
897 else if(item.second.type() == ETensorType::INT64) {
898 const int64_t* data = item.second.data<int64_t>();
899 std::vector<int64_t> tensorDataVector(data, data + length);
900 outputDir->WriteObjectAny(&tensorDataVector, "std::vector<int64_t>", tensorName.c_str());
901 }
902 else {
903 std::runtime_error("tmva-sofie tensor " + tensorName + " with type " + ConvertTypeToString(item.second.type()) +
904 " cannot be written to a ROOT file");
905 }
906 }
907 outputFile->Write(filename.c_str());
908
909 // this needs to be changed, similar to the text file
910 return -1;
911
912 } else if (fWeightFile == WeightFileType::Text) {
913 std::ofstream f;
914 if(fIsGNNComponent) {
915 // appending all GNN components into the same file
916 f.open(filename, std::ios::app);
917 } else {
918 f.open(filename);
919 }
920 if (!f.is_open())
921 throw
922 std::runtime_error("tmva-sofie failed to open file " + filename + " for tensor weight data");
923 for (auto& i: fInitializedTensors) {
924 // skip Constant tensors and not writable tensors (e.g. shape tensors)
925 if (!i.second.IsWeightTensor()) {
926 continue;
927 }
928 size_t length = ConvertShapeToLength(i.second.shape());
929 std::string tensor_name = "tensor_" + i.first;
930 f << tensor_name << " " << length << "\n";
931 if (i.second.type() == ETensorType::FLOAT) {
932 const float * data = i.second.data<float>();
933 for (size_t idx = 0; idx < length; idx++) {
934 // round to zero sub-normal values
935 float value = data[idx];
936 if (value != 0. && std::abs(value) < std::numeric_limits<float>::min() ) value = 0;
937 f << std::setprecision(std::numeric_limits<float>::max_digits10) << value;
938 f << ( (idx < length-1) ? " " : "\n" );
939 }
940 }
941 else {
942 std::runtime_error("tmva-sofie tensor " + tensor_name + " with type " + ConvertTypeToString(i.second.type()) + " cannot be written to a file");
943 }
944 if (f.fail())
945 std::runtime_error("tmva-sofie failed to write tensor data to file for " + tensor_name);
946 }
947 long curr_pos = f.tellp();
948 f.close();
949 return curr_pos;
950 } else {
951 return -1;
952 }
953}
954
956 std::cout << "Model requires following inputs:\n";
957 for (auto& inputInfo: fInputTensorInfos) {
958 std::cout << "Parametrised Tensor name: " << inputInfo.first << "\t";
959 std::cout << "type: " << ConvertTypeToString(inputInfo.second.type) << "\t";
960 std::cout << "shape: [";
961 for (size_t i = 0; i < inputInfo.second.shape.size(); i++) {
962 if (inputInfo.second.shape[i].isParam) {
963 std::cout << inputInfo.second.shape[i].param;
964 } else {
965 std::cout << inputInfo.second.shape[i].dim ;
966 }
967 if (i < inputInfo.second.shape.size() - 1) std::cout << ",";
968 }
969 std::cout << "]" << std::endl;
970 }
971
972 for (auto& inputInfo: fReadyInputTensorInfos) {
973 std::cout << "Fully Specified Tensor name: " << inputInfo.first << "\t";
974 std::cout << "type: " << ConvertTypeToString(inputInfo.second.type) << "\t";
975 std::cout << "shape: [";
976 for (size_t i = 0; i < inputInfo.second.shape.size(); i++) {
977 std::cout << inputInfo.second.shape[i];
978 if (i < inputInfo.second.shape.size() - 1) std::cout << ",";
979 }
980 std::cout << "]" << std::endl;
981 }
982 std::cout << "\n";
983}
984
986 std::cout << "Model initialized the following tensors:\n";
987 for (auto& it: fInitializedTensors) {
988 std::cout << "Tensor name: \"" << it.first << "\"\t";
989 std::cout << "type: " << ConvertTypeToString(it.second.type()) << "\t";
990 std::cout << "shape: [";
991 for (size_t i = 0; i < it.second.shape().size(); i++) {
992 std::cout << it.second.shape()[i];
993 if (i < it.second.shape().size() - 1) std::cout << ",";
994 }
995 std::cout << "]";
996 if (it.second.IsConstantTensor()) std::cout << " (Constant)";
997 else if (!it.second.IsWeightTensor()) std::cout << " (Not Writable)";
998 std::cout << std::endl;
999 }
1000 std::cout << "\n";
1001}
1002
1004 std::cout << "Model specify the following intermediate tensors:\n";
1005 for (auto& it: fIntermediateTensorInfos) {
1006 std::cout << "Tensor name: \"" << it.first << "\"\t";
1007 std::cout << "type: " << ConvertTypeToString(it.second.type) << "\t";
1008 std::cout << "shape: [";
1009 for (size_t i = 0; i < it.second.shape.size(); i++) {
1010 std::cout << it.second.shape[i];
1011 if (i < it.second.shape.size() - 1) std::cout << ",";
1012 }
1013 std::cout << "]" << std::endl;
1014 }
1015 std::cout << "\n";
1016}
1017
1019 std::cout << "Model specify the following dynamic tensors:\n";
1020 for (auto& it: fDynamicTensorInfos) {
1021 std::cout << "Tensor name: \"" << it.first << "\"\t";
1022 std::cout << "type: " << ConvertTypeToString(it.second.type) << "\t";
1023 std::cout << "shape: [";
1024 for (size_t i = 0; i < it.second.shape.size(); i++) {
1025 std::cout << it.second.shape[i].GetVal();
1026 if (i < it.second.shape.size() - 1) std::cout << ",";
1027 }
1028 std::cout << "]" << std::endl;
1029 }
1030 std::cout << "\n";
1031}
1032
1034 std::cout << "Model specify the following output tensors:\n";
1035 for (auto& it: fOutputTensorNames) {
1036 std::cout << "Tensor name: \"" << it << "\"\t";
1037 if (!IsDynamicTensor(it))
1038 std::cout << "shape: " << ConvertShapeToString(GetTensorShape(it)) << std::endl;
1039 else
1040 std::cout << "shape: " << ConvertDynamicShapeToString(GetDynamicTensorShape(it)) << std::endl;
1041 }
1042 std::cout << "\n";
1043}
1044
1045void RModel::HeadInitializedTensors(std::string name, int n_print) {
1046 auto it = fInitializedTensors.find(name);
1047 if (it == fInitializedTensors.end()) {
1048 std::cout << "Tensor " << name << " not found in model's initialized tensor list" << std::endl;
1049 return;
1050 }
1051
1052 std::cout << "Tensor name: " << it->first << "\t";
1053 std::cout << "type: " << ConvertTypeToString(it->second.type()) << "\t";
1054 int length =1;
1055 std::cout << "shape: [";
1056 for (size_t i = 0; i < it->second.shape().size(); i++) {
1057 std::cout << it->second.shape()[i];
1058 length *= it->second.shape()[i];
1059 if (i < it->second.shape().size() - 1) std::cout << ",";
1060 }
1061 std::cout << "]" << std::endl;
1062 bool ellipsis = true;
1063 if (n_print > length) {
1064 n_print = length;
1065 ellipsis = false;
1066 }
1067
1068 std::cout << "data: [" << std::endl;
1069 if (it->second.type() == ETensorType::FLOAT) {
1070 auto converted_data = it->second.data<float>();
1071 for (int i =0; i < n_print; i++) {
1072 std::cout << converted_data[i];
1073 if (i < n_print - 1) std::cout << " ,";
1074 }
1075 }
1076 if (ellipsis) std::cout << ", ...";
1077 std::cout << "]" << std::endl;
1078
1079}
1080
1081void RModel::OutputGenerated(std::string filename, bool append) {
1082
1084
1085 // write weights in a text file
1086 if (fUseWeightFile) {
1087 if (!filename.empty()) {
1088 size_t pos = filename.find(".hxx");
1090 filename.replace(pos, 4, ".dat");
1092 filename = filename.erase(pos, 4);
1093 filename += ".root";
1094 }
1095 } else {
1096 filename = fName;
1097 filename += fWeightFile == WeightFileType::Text ? ".dat" : ".root";
1098 }
1100 }
1101}
1102
1103void RModel::Streamer(TBuffer &R__b) {
1104 if (R__b.IsReading()) {
1105 RModel::Class()->ReadBuffer(R__b, this);
1106 for(auto i=RModel::fInitializedTensors.begin(); i!=RModel::fInitializedTensors.end(); ++i) {
1107 i->second.CastPersistentToShared();
1108 }
1109 }
1110 else {
1111 for(auto i=RModel::fInitializedTensors.begin(); i!=RModel::fInitializedTensors.end(); ++i) {
1112 i->second.CastSharedToPersistent();
1113 }
1114 RModel::Class()->WriteBuffer(R__b, this);
1115 }
1116}
1117
1118}//SOFIE
1119}//Experimental
1120}//TMVA
#define d(i)
Definition RSha256.hxx:102
#define f(i)
Definition RSha256.hxx:104
#define e(i)
Definition RSha256.hxx:103
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char filename
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t r
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize id
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void value
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
char name[80]
Definition TGX11.cxx:110
Buffer base class used for serializing objects.
Definition TBuffer.h:43
Bool_t IsReading() const
Definition TBuffer.h:86
static TFile * Open(const char *name, Option_t *option="", const char *ftitle="", Int_t compress=ROOT::RCompressionSetting::EDefaults::kUseCompiledDefault, Int_t netopt=0)
Create / open a file.
Definition TFile.cxx:4086
void GenerateHeaderInfo(std::string &hgname)
std::unordered_set< std::string > fNeededBlasRoutines
void OutputGenerated(std::string filename="", bool append=false)
std::unordered_set< std::string > fNeededStdLib
void AddBlasRoutines(std::vector< std::string > routines)
void AddNeededStdLib(std::string libname)
const ETensorType & GetTensorType(std::string name)
Definition RModel.cxx:94
std::unordered_map< std::string, DynamicTensorInfo > fDynamicTensorInfos
Definition RModel.hxx:26
bool IsDynamicTensor(const std::string &name) const
Definition RModel.cxx:193
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< Dim > dim_shape)
Definition RModel.cxx:203
std::vector< Dim > GetDynamicTensorShape(std::string name)
Definition RModel.cxx:82
std::string GenerateInferSignature(bool isdecl=true)
Definition RModel.cxx:501
bool CheckIfTensorAlreadyExist(std::string tensor_name)
Definition RModel.cxx:122
std::vector< std::unique_ptr< ROperator > > fOperators
Definition RModel.hxx:32
void OutputGenerated(std::string filename="", bool append=false)
Definition RModel.cxx:1081
void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector< Dim > shape)
Definition RModel.cxx:132
std::unordered_map< std::string, TensorInfo > fIntermediateTensorInfos
Definition RModel.hxx:25
void AddOutputTensorNameList(std::vector< std::string > output_tensor_names)
Definition RModel.cxx:241
std::unordered_map< std::string, TensorInfo > fReadyInputTensorInfos
Definition RModel.hxx:23
void AddConstantTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:178
void AddDynamicTensor(std::string tensor_name, ETensorType type, std::vector< Dim > shape)
Definition RModel.cxx:220
void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:168
RModel & operator=(RModel &&other)
Definition RModel.cxx:39
void AddInputTensorName(std::string name)
Definition RModel.cxx:151
std::vector< std::string > fOutputTensorNames
Definition RModel.hxx:29
bool IsInitializedTensor(const std::string &name) const
Definition RModel.cxx:188
void AddOperator(std::unique_ptr< ROperator > op, int order_execution=-1)
Definition RModel.cxx:155
RModel()=default
Default constructor.
void HeadInitializedTensors(std::string name, int n_print=50)
Definition RModel.cxx:1045
void Initialize(int batchSize=-1, bool verbose=false)
Definition RModel.cxx:281
const std::vector< size_t > & GetTensorShape(std::string name)
Definition RModel.cxx:56
bool IsInputTensor(const std::string &name) const
Definition RModel.cxx:197
long WriteInitializedTensorsToFile(std::string filename="")
Definition RModel.cxx:847
void Generate(std::underlying_type_t< Options > options, int batchSize=-1, long pos=0, bool verbose=false)
Definition RModel.cxx:703
std::unordered_map< std::string, InputTensorInfo > fInputTensorInfos
Definition RModel.hxx:22
std::shared_ptr< void > GetInitializedTensorData(std::string tensor_name)
Definition RModel.cxx:264
void InitializeSubGraph(std::shared_ptr< RModel > graph)
Definition RModel.cxx:378
std::unordered_map< std::string, std::string > fShapeParams
Definition RModel.hxx:28
void SetNotWritableInitializedTensor(const std::string &tensor_name)
Definition RModel.cxx:273
std::vector< std::string > fInputTensorNames
Definition RModel.hxx:30
std::unordered_map< std::string, InitializedTensor > fInitializedTensors
Definition RModel.hxx:24
void UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:255
std::vector< std::shared_ptr< RModel > > fSubGraphs
! sub-graph models (transient)
Definition RModel.hxx:34
void UpdateOutputTensorList(std::vector< std::string > curr_output_tensor, std::vector< std::string > modify_output_tensor)
Definition RModel.cxx:248
std::string Clean_name(std::string input_tensor_name)
std::vector< Dim > ConvertShapeToDim(std::vector< size_t > shape)
Convert shape from integer format to dynamic one (based on Dim)
std::string ConvertDynamicShapeToLength(std::vector< Dim > shape)
std::string ConvertShapeToString(std::vector< size_t > shape)
std::string ConvertTypeToString(ETensorType type)
std::string ConvertDynamicShapeToString(std::vector< Dim > shape)
std::underlying_type_t< Options > operator|(Options opA, Options opB)
Definition RModel.cxx:16
std::vector< size_t > ConvertShapeToInt(std::vector< Dim > shape)
Convert shape based on Dim to integer format.
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
create variable transformations
Definition graph.py:1