Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RModel.cxx
Go to the documentation of this file.
1#include <limits>
2#include <algorithm>
3#include <cctype>
4#include <memory>
5#include <string>
6
7#include "TFile.h"
8
9#include "TMVA/RModel.hxx"
10#include "TMVA/SOFIE_common.hxx"
11
12namespace TMVA {
13namespace Experimental {
14namespace SOFIE {
15
16std::underlying_type_t<Options> operator|(Options opA, Options opB) {
17 return static_cast<std::underlying_type_t<Options>>(opA) | static_cast<std::underlying_type_t<Options>>(opB);
18}
19std::underlying_type_t<Options> operator|(std::underlying_type_t<Options> opA, Options opB) {
20 return opA | static_cast<std::underlying_type_t<Options>>(opB);
21}
22
24 fInputTensorInfos = std::move(other.fInputTensorInfos);
25 fReadyInputTensorInfos = std::move(other.fReadyInputTensorInfos);
26 fOutputTensorNames = other.fOutputTensorNames;
27 fInputTensorNames = other.fInputTensorNames;
28 fOperators = std::move(other.fOperators);
29 fInitializedTensors = std::move(other.fInitializedTensors);
30 fIntermediateTensorInfos = std::move(other.fIntermediateTensorInfos);
31 fName = other.fName;
32 fFileName = other.fFileName;
33 fParseTime = other.fParseTime;
34 fGC = other.fGC;
35 fNeededBlasRoutines = other.fNeededBlasRoutines;
36 fNeededStdLib = other.fNeededStdLib;
37}
38
40 fInputTensorInfos = std::move(other.fInputTensorInfos);
41 fReadyInputTensorInfos = std::move(other.fReadyInputTensorInfos);
42 fOutputTensorNames = other.fOutputTensorNames;
43 fInputTensorNames = other.fInputTensorNames;
44 fOperators = std::move(other.fOperators);
45 fInitializedTensors = std::move(other.fInitializedTensors);
46 fIntermediateTensorInfos = std::move(other.fIntermediateTensorInfos);
47 fName = other.fName;
48 fFileName = other.fFileName;
49 fParseTime = other.fParseTime;
50 fGC = other.fGC;
51 fNeededBlasRoutines = other.fNeededBlasRoutines;
52 fNeededStdLib = other.fNeededStdLib;
53 return *this;
54}
55
56const std::vector<size_t>& RModel::GetTensorShape(std::string name) {
57 auto f = fReadyInputTensorInfos.find(name);
58 if (f != fReadyInputTensorInfos.end()) {
59 return f->second.shape;
60 }
61 auto f2 = fInitializedTensors.find(name);
62 if (f2 != fInitializedTensors.end()) {
63 return f2->second.shape();
64 }
65 auto f3 = fInputTensorInfos.find(name);
66 if (f3 != fInputTensorInfos.end()) {
67 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] is an input tensor with unspecified dimension parameter");
68 }
69 auto f4 = fIntermediateTensorInfos.find(name);
70 if (f4 != fIntermediateTensorInfos.end()) {
71 return f4->second.shape;
72 }
74 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] is a dynamic tensor. Use GetDynamicTensorShape instead of GetTensorShape");
75
78
79 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] for which the shape is requested is not found");
80}
81
82std::vector<Dim> RModel::GetDynamicTensorShape(std::string name) {
83 if (auto f = fDynamicTensorInfos.find(name); f != fDynamicTensorInfos.end()) {
84 return f->second.shape;
85 }
86 if (auto f = fInputTensorInfos.find(name); f != fInputTensorInfos.end()) {
87 return f->second.shape;
88 }
89 // in case is not a dynamic tensor convert normal shape to Dim one
90 // for this we need to return the vector by value
92}
93
95 auto f = fReadyInputTensorInfos.find(name);
96 if (f != fReadyInputTensorInfos.end()) {
97 return f->second.type;
98 }
99 auto f2 = fInitializedTensors.find(name);
100 if (f2 != fInitializedTensors.end()) {
101 return f2->second.type();
102 }
103 auto f3 = fInputTensorInfos.find(name);
104 if (f3 != fInputTensorInfos.end()) {
105 return f3->second.type;
106 }
107 auto f4 = fIntermediateTensorInfos.find(name);
108 if (f4 != fIntermediateTensorInfos.end()) {
109 return f4->second.type;
110 }
111 auto f5 = fDynamicTensorInfos.find(name);
112 if (f5 != fDynamicTensorInfos.end()){
113 return f5->second.type;
114 }
115
118
119 throw std::runtime_error("TMVA SOFIE tensor [" + name + "] for which the type is requested is not found");
120}
121
123 if (fReadyInputTensorInfos.find(tensor_name) != fReadyInputTensorInfos.end()) return true;
124 if (fInputTensorInfos.find(tensor_name) != fInputTensorInfos.end()) return true;
125 if (fInitializedTensors.find(tensor_name) != fInitializedTensors.end()) return true;
126 if (fIntermediateTensorInfos.find(tensor_name) != fIntermediateTensorInfos.end()) return true;
127 if (fDynamicTensorInfos.find(tensor_name) != fDynamicTensorInfos.end()) return true;
129 return false;
130}
131
132void RModel::AddInputTensorInfo(std::string input_name, ETensorType type, std::vector<Dim> shape) {
135 throw std::runtime_error("TMVA-SOFIE: input tensor with name " + input_name + " already exists \n");
136 }
137
138 InputTensorInfo inputInfo { type, shape };
140}
141
142void RModel::AddInputTensorInfo(std::string input_name, ETensorType type, std::vector<size_t> shape) {
145 throw std::runtime_error("TMVA-SOFIE: input tensor with name " + input_name + " already exists \n");
146 }
147 TensorInfo inputInfo { type, shape };
149}
150
154
155void RModel::AddOperator(std::unique_ptr<ROperator> op, int order_execution) {
156 AddBlasRoutines(op->GetBlasRoutines());
157 auto libs = op->GetStdLibs();
158 for (auto& stdlib : libs) {
160 }
161 if (order_execution >= 0) {
162 fOperators.insert(fOperators.begin() + order_execution, std::move(op));
163 } else {
164 fOperators.push_back(std::move(op));
165 }
166}
167
168void RModel::AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data) {
170 //NB: own data
172 throw std::runtime_error("TMVA-SOFIE: initialized tensor with name " + tensor_name + " already exists \n");
173 }
176}
177
178void RModel::AddConstantTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data) {
180 //NB: own data
182 throw std::runtime_error("TMVA-SOFIE: initialized tensor with name " + tensor_name + " already exists \n");
183 }
184 InitializedTensor new_tensor {type, shape, data, true}; // add here flag to specify is a constant tensor
186}
187
188bool RModel::IsInitializedTensor(const std::string& tensorName) const {
189 std::string name = UTILITY::Clean_name(tensorName);
190 return fInitializedTensors.find(name) != fInitializedTensors.end();
191}
192bool RModel::IsConstantTensor(const std::string& tensorName) const {
193 std::string name = UTILITY::Clean_name(tensorName);
194 auto itr = fInitializedTensors.find(name);
195 if (itr == fInitializedTensors.end()) return false;
196 return itr->second.IsConstantTensor();
197}
198
199bool RModel::IsDynamicTensor(const std::string& tensorName) const {
200 std::string name = UTILITY::Clean_name(tensorName);
201 return fDynamicTensorInfos.find(name) != fDynamicTensorInfos.end();
202}
203bool RModel::IsDimInputTensor(const std::string& tensorName) const {
204 std::string name = UTILITY::Clean_name(tensorName);
205 return fInputTensorInfos.find(name) != fInputTensorInfos.end();
206}
207bool RModel::IsReadyInputTensor(const std::string& tensorName) const {
208 std::string name = UTILITY::Clean_name(tensorName);
210}
211
212// generic addition of a tensor
220
221void RModel::AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape) {
224 throw std::runtime_error("TMVA-SOFIE: intermediate tensor with name " + tensor_name + " already exists \n");
225 }
226 TensorInfo new_tensor {type, shape};
228}
229
230void RModel::AddDynamicTensor(std::string tensor_name, ETensorType type, std::vector<Dim> shape){
233 throw std::runtime_error("TMVA-SOFIE: intermediate tensor with name " + tensor_name + " already exists \n");
234 }
237 // store shape parameter if not existing
238 for (auto &d : shape) {
239 if (d.isParam) {
240 if (fShapeParams.count(d.param) == 0) {
241 // case parameter is an expression of some other existing parameter, no need to
242 // register it
243 if (d.dim != size_t(-1)) {
244 fShapeParams[d.param] = std::to_string(d.dim);
245 }
246 }
247 }
248 }
249}
250
252 fOutputTensorNames.clear();
253 for(auto& it : outputtensornames) {
255 }
256}
257
258void RModel::UpdateOutputTensorList(std::vector<std::string> curr_output_tensors, std::vector<std::string> new_output_tensors) {
259 for(auto& it:curr_output_tensors) {
260 fOutputTensorNames.erase(std::remove(fOutputTensorNames.begin(), fOutputTensorNames.end(), it), fOutputTensorNames.end());
261 }
263}
264
265void RModel::UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector<std::size_t> shape, std::shared_ptr<void> data) {
268 throw std::runtime_error("TMVA-SOFIE: tensor " + tensor_name + " not found when trying to update it");
269 }
272}
273
274std::shared_ptr<void> RModel::GetInitializedTensorData(std::string tensor_name) {
275 auto f = fInitializedTensors.find(tensor_name);
276 if (f == fInitializedTensors.end()) {
277 throw std::runtime_error("TMVA-SOFIE: tensor " + tensor_name + " not found when trying to get its data");
278 } else {
279 return f->second.sharedptr();
280 }
281}
282
284 auto t = fInitializedTensors.find(tensor_name);
285 if (t == fInitializedTensors.end()) {
286 throw std::runtime_error("TMVA-SOFIE: initialized tensor " + tensor_name + " not found when trying to get its info");
287 }
288 t->second.SetNotWritable();
289 }
290
291void RModel::Initialize(int batchSize, bool verbose) {
292 std::map<std::string, size_t> inputParams;
293 if (batchSize > 0) {
294 inputParams["input_size"] = batchSize;
295 inputParams["batch_size"] = batchSize;
296 inputParams["bs"] = batchSize;
297 }
298 Initialize(inputParams, verbose);
299}
300void RModel::Initialize(const std::map<std::string, size_t> & inputParams, bool verbose) {
301
302 fVerbose = int(verbose);
303
304 if (fIsInitialized) {
305 if (verbose)
306 std::cout << "Model is already initialized - skip initialization " << std::endl;
307 return;
308 }
310 fDynamicTensorInfos.clear();
311
312 // loop on inputs and see if shape can be full specified
313 // if the batch size is provided it can be used to specify the full shape
314 // Add the full specified tensors in fReadyInputTensors collection
315 auto originalInputTensorInfos = fInputTensorInfos; // need to copy because we may delete elements
316 for (auto &input : originalInputTensorInfos) {
317 if (verbose) std::cout << "looking at the tensor " << input.first << std::endl;
318 // if a parameter (e.g. batch_size) is specified use for converting parametric shape in defined one
319 if (!inputParams.empty()) {
320 for (auto &d : input.second.shape) {
321 if (d.isParam) {
322 std::string pname = d.param;
323 if (pname == input.first + "_size") pname = "input_size";
324 auto itr = inputParams.find(pname);
325 if (itr != inputParams.end() ) {
326 d = Dim{ itr->second };
327 if (verbose)
328 std::cout << "Tensor: " << input.first << " - fix parametric shape " << itr->first << " to " << itr->second << std::endl;
329 }
330 }
331 }
332 }
333 // see if shape now is fully defined
334 auto shape = ConvertShapeToInt(input.second.shape);
335 if (verbose)
336 std::cout << "converting input shape for " << input.first << " " << ConvertShapeToString(shape) << " from "
337 << ConvertDynamicShapeToString(input.second.shape) << std::endl;
338 if (!shape.empty()) {
339 // case shape is defined (not parametric) we add the tensor in the fReadyInputTensorInfos map and
340 // we remove the tensor from the fInputTensorInfo where th eold parametric shape was stored
341 fInputTensorInfos.erase(input.first);
342 // add to the ready input tensor information the new fixed shape
343 AddInputTensorInfo(input.first, input.second.type, shape);
344 // check consistency
346 }
347 // store the parameters of the input tensors
348 else {
349 // store the found parametric shape parameters
350 for (auto &d : input.second.shape) {
351 if (d.isParam)
352 fShapeParams[d.param] = std::to_string(d.dim);
353 }
354 }
355 }
356
357 if (verbose) {
360 }
361
362 // check if there are initialized tensors to write in a weight file
363 // support for the time being only weight of FLOAT type
364 if (fUseWeightFile) {
365 bool modelHasWeights = false;
366 for (auto &i : fInitializedTensors) {
367 if (i.second.type() == ETensorType::FLOAT) {
368 modelHasWeights = true;
369 break;
370 }
371 }
372 if (!modelHasWeights)
373 fUseWeightFile = false;
374 }
375 // Go through model and initialize each operator
376 int i = 0;
377 for (auto &op : fOperators) {
378 if (verbose) {
379 auto& r = *op.get();
380 std::cout << "Initializing operator " << i << " " << typeid(r).name() << std::endl;
381 }
382 op->Initialize(*this);
383 i++;
384 }
385 fIsInitialized = true;
386}
387
388void RModel::InitializeSubGraph(std::shared_ptr<RModel> graph) {
389 // add the subgraph to the list
390 fSubGraphs.push_back(graph);
391 //this needs to be done before initializing
392 graph->fParentGraph = this;
393 graph->fIsSubGraph = true;
394
395 graph->Initialize(fBatchSize, fVerbose);
396 // set the same options as parent model
397 graph->fWeightFile = fWeightFile;
398 graph->fUseWeightFile = fUseWeightFile;
399 graph->fUseSession = fUseSession;
400 // add needed blas routines and libs
401 std::vector<std::string> blasRoutines;
402 for (auto & e : graph->fNeededBlasRoutines)
403 blasRoutines.push_back(e);
405 for (auto e : graph->fNeededStdLib)
407
408 // add parent input tensors to current graph
409 for (auto & name : fInputTensorNames)
410 graph->fInputTensorNames.push_back(name);
411
412 // clean graph name
413 graph->fName = UTILITY::Clean_name(graph->fName);
414
415}
416
417// Function to generate the code for declaring and initializing constant tensors
418// This is for tensors which are not part of weight files and can be created from the Constant operator
419template <typename T>
420std::string GenerateConstantTensorCode(const std::pair<std::string, InitializedTensor> &t)
421{
422 std::stringstream strs;
423 std::string type = ConvertTypeToString(t.second.type());
424 size_t length = ConvertShapeToLength(t.second.shape());
425 // avoid using stack sizes for constant tensors to reduce compilation time
426 bool allocateOnStack = (length > 100) ? false : true;
427
428 const T *data = t.second.data<T>();
429
430 // and check if all values are the same
431 bool sameData = false;
432 // for non stack allocation check if data are the same
433 if (!allocateOnStack && length > 1) {
434 size_t idx = 1;
435 do {
436 sameData = (data[idx] == data[idx - 1]);
437 idx++;
438 } while (sameData && idx < length);
439 }
440 if (allocateOnStack) {
441 strs << type << " tensor_" << t.first << "[" << length << "] = " << ConvertValuesToString(length, data) << ";\n";
442 } else {
443 strs << "std::vector<" << type << "> fTensor_" << t.first << " = ";
444 if (sameData)
445 strs << "std::vector<" << type << ">(" << length << ", " << ConvertValToString(data[0]) << ");\n";
446 else {
448 }
449 strs << "const " << type << " * tensor_" + t.first + " = fTensor_" + t.first + ".data();\n";
450 }
451 return strs.str();
452}
453
455{
456 if (!fInitializedTensors.empty())
457 fGC += "// initialized tensors\n";
458
459 for (auto &i : fInitializedTensors) {
460 if (!fUseWeightFile || i.second.IsConstantTensor()) {
461 if (i.second.type() == ETensorType::FLOAT)
463 else if (i.second.type() == ETensorType::INT64)
465
466 } else {
467 // case of tensors which are read from a file
468 size_t length = ConvertShapeToLength(i.second.shape());
469 if (i.second.type() == ETensorType::FLOAT) {
470 fGC += "std::vector<float> fTensor_" + i.first + " = std::vector<float>(" + std::to_string(length) + ");\n";
471 fGC += "float * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
472 }
473 }
474 }
475}
476
478 if (!fIntermediateTensorInfos.empty()) {
479 fGC += "\n//--- declare and allocate the intermediate tensors\n";
480 for (auto &i : fIntermediateTensorInfos) {
481 size_t length = ConvertShapeToLength(i.second.shape);
482 if (i.second.type == ETensorType::FLOAT) {
483 fGC += "std::vector<float> fTensor_" + i.first + " = std::vector<float>(" + std::to_string(length) + ");\n";
484 fGC += "float * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
485 }
486 if (i.second.type == ETensorType::DOUBLE) {
487 fGC += "std::vector<double> fTensor_" + i.first + " = std::vector<double>(" + std::to_string(length) + ");\n";
488 fGC += "double * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
489 }
490 if (i.second.type == ETensorType::INT64) {
491 fGC += "std::vector<int64_t> fTensor_" + i.first + " = std::vector<int64_t>(" + std::to_string(length) + ");\n";
492 fGC += "int64_t * tensor_" + i.first + " = fTensor_" + i.first + ".data();\n";
493 }
494 if (i.second.type == ETensorType::BOOL) {
495 fGC += "std::vector<bool> fTensor_" + i.first + " = std::vector<bool>(" + std::to_string(length) + ");\n";
496 // don't allocate pointer since boolean vector don't have the .data() member
497 }
498 }
499 }
500 // add also the dynamic tensors (only declarations, allocation will be done later)
501 if (!fDynamicTensorInfos.empty()) {
502 fGC += "//--- declare the dynamic tensors\n";
503 for (auto &i : fDynamicTensorInfos) {
504 if (i.second.type == ETensorType::FLOAT) {
505 fGC += "std::vector<float> fTensor_" + i.first + ";\n";
506 fGC += "float * tensor_" + i.first + " = nullptr;\n";
507 } else if (i.second.type == ETensorType::DOUBLE) {
508 fGC += "std::vector<double> fTensor_" + i.first + ";\n";
509 fGC += "double * tensor_" + i.first + " = nullptr;\n";
510 } else if (i.second.type == ETensorType::INT64) {
511 fGC += "std::vector<int64_t> fTensor_" + i.first + ";\n";
512 fGC += "int64_t * tensor_" + i.first + " = nullptr;\n";
513 }
514 }
515 }
516}
517
518// generate code for specific operator declarations to be defined in the Session class
520 std::string strcode;
521 for (auto & op : fOperators) {
522 strcode += op->GenerateDeclCode();
523 }
524 if (strcode.empty()) return;
525 fGC += "\n//---- operator declarations \n";
526 fGC += strcode;
527 fGC += "\n";
528}
529
531 fGC += "//---- allocate the intermediate dynamic tensors\n";
532 std::stringstream out;
533 for (auto & i: fDynamicTensorInfos) {
534 auto length = ConvertDynamicShapeToLength(i.second.shape);
535 out << SP << "if (" << length << " > 0) {\n";
536 out << SP << SP << "fTensor_" << i.first << ".resize(" << length << ");\n";
537 out << SP << SP << "tensor_" << i.first << " = fTensor_" << i.first << ".data();\n";
538 out << SP << "}\n";
539 }
540 fGC += out.str();
541}
542
544 // generate the infer signature given the inputs: eg. "float * tensor1, float * tensor2"
545 // if (decl = false) generate only calling signature (tensor1,tensor2,....)
546 std::string rGC;
547 std::unordered_map<std::string, int> inputParams;
548 int i_input = 0;
549 for (auto &name : fInputTensorNames) {
550 // if is a dynamic tensor pass initial parameters
551 if (IsDimInputTensor(name)) {
552 auto shape = GetDynamicTensorShape(name);
553 for (auto &d : shape) {
554 std::string pName = d.param;
555 // need to check if the input parameters is already existing in another input tensor
556 if (d.isParam && inputParams.count(pName) == 0) {
557 if (isdecl) rGC += "size_t ";
558 rGC += d.param + ",";
560 }
561 }
562 }
563 if (isdecl) {
565 if (type == "other")
566 throw std::runtime_error("TMVA-SOFIE: input tensor " + name +
567 " is of a data type which is not yet supported.");
568 rGC += type + "* ";
569 }
570 rGC += "tensor_" + name + ",";
571 i_input++;
572 }
573
574 if (fInputTensorNames.size() > 0) rGC.pop_back();// remove last ","
575 return rGC;
576}
577
579
580 if (fVerbose)
581 std::cout << "Generating main inference code for " << fName << std::endl;
582
583 size_t outputSize = fOutputTensorNames.size();
584 // assume output types are all the same
585 if (outputSize == 0)
586 throw std::runtime_error("TMVA-SOFIE: output size=0 are not supported");
587
588 std::string outputType;
592 if (outputSize == 1) {
593 fGC += "std::vector<" + outputType + "> ";
594 } else {
595 // we assume all output types are the same
596 for (size_t i = 1; i < outputSize; i++) {
598 throw std::runtime_error("TMVA-SOFIE: different output tensor types are not supported");
599 }
600 fGC += "std::vector<std::vector<" + outputType + ">> ";
601 }
602
603 fGC += "infer(";
604
606
607 fGC += "){\n";
608
609 for (size_t id = 0; id < fOperators.size(); id++) {
610 if (fVerbose) std::cout << "Generating code for operator .... " << id << std::endl;
611 fGC += (fOperators[id]->Generate(std::to_string(id)));
612 }
613
614 if (outputSize == 1) {
615 std::string tensorName = fOutputTensorNames[0];
616 if (fIntermediateTensorInfos.count(tensorName) > 0) {
617 // need to check is size is the same(don't want to return a vector with larger size)
618 // in that case better to copy
619 fGC += SP + "return fTensor_" + tensorName + ";\n";
620 } else {
621 // include also dynamic tensors since the vectors can be allocated with a size larger than their output
622 // we need a special handling for bool type allocated as vector<bool>
625 fGC += SP + "std::vector<bool> ret (fTensor_" + tensorName + ".begin(), fTensor_" + tensorName +
626 ".begin() + " + outputLength + ");\n";
627 } else {
628 fGC += SP + "std::vector<" + outputType + "> ret (tensor_" + tensorName + ", tensor_" + tensorName + " + " +
629 outputLength + ");\n";
630 }
631 fGC += SP + "return ret;\n";
632 }
633 } else {
634 // here we assume all outputs have same type
635 fGC += SP + "std::vector<std::vector<" + outputType + ">> ret({";
636 for (size_t i = 0; i < outputSize; i++) {
637 std::string tensorName = fOutputTensorNames[i];
638 if (!tensorName.empty()) {
639 if (fIntermediateTensorInfos.count(tensorName) > 0) {
640 fGC += "fTensor_" + tensorName;
641 } else {
644 fGC += "std::vector<bool>(fTensor_" + tensorName + ".begin(), fTensor_" + tensorName + ".begin() + " +
645 outputLength + ");\n";
646 } else {
647 fGC += "std::vector<" + outputType + ">(tensor_" + tensorName + ", tensor_" + tensorName + " + " +
648 outputLength + ")";
649 }
650 }
651 if (i < outputSize - 1)
652 fGC += ",";
653 } else {
654 fGC += "{}";
655 }
656 }
657 fGC += "});\n";
658 fGC += SP + "return ret;\n";
659 }
660 fGC += "}\n"; // end of infer function scope
661}
662
664{
665
666 // define the Session struct (for GNN this is generated in RModel_GNN)
668 if (!fIsSubGraph)
669 fGC += "struct Session {\n";
670 else
671 fGC += "struct Session_" + fName + " {\n";
672 }
673
674 // generate code for declaring the initialized tensors
676 // generate the declaring the intermediate tensors
678 // generate code for declarations of some specific operators
680
681 // add subgraph session
682 if (!fSubGraphs.empty()) fGC += "// subgraph sessions\n";
683 for (auto & graph : fSubGraphs) {
684 fGC += "Session_" + graph->fName + " fSession_" + graph->fName + ";\n";
685 }
686
687 // Generate code for Session constructor
688 if (fUseSession) {
689 std::string sessionName = "Session";
690 if (fIsSubGraph)
691 sessionName += "_" + fName;
692 // add here specific operator code that needs to define session data members
693 fGC += "\n";
694 for (size_t id = 0; id < fOperators.size(); id++) {
695 std::string opName = std::to_string(id);
696 fGC += fOperators[id]->GenerateSessionMembersCode(opName);
697 }
698 fGC += "\n";
699 // here add initialization and reading of weight tensors
700 if (fUseWeightFile) {
701 std::string fileName = fName;
703 fileName += ".dat";
704 }
706 fileName += ".root";
707 }
708 fGC += sessionName + "(std::string filename =\"" + fileName + "\"";
709 } else {
710 // no need to pass weight file since it is not used
711 // keep passing a string for compatibility
712 fGC += sessionName + "(std::string = \"\"";
713 }
714 // add initialization of shape parameters
715 // assume all parameters are of type size_t
716 if (!fShapeParams.empty()) {
717 for (auto &p : fShapeParams) {
718 fGC += ",\n";
719 fGC += " size_t " + p.first + " = " + p.second;
720 }
721 }
722 fGC += ") {\n";
723
724 if (fUseWeightFile) {
725 fGC += "\n//--- reading weights from file\n";
727 fGC += "\n";
728 // fUseWeightFile = fUseWeightFile;
729 }
730
731 // now we have passed the parameters we can allocate the dynamic tensors
733
734 // add here initialization code for operator
735 for (size_t id = 0; id < fOperators.size(); id++) {
736 fGC += fOperators[id]->GenerateInitCode();
737 }
738
739 fGC += "}\n\n";
740 }
741 // generate the inference code
743
744 // end of session
746 fGC += "}; // end of Session\n";
747 }
748}
749
750void RModel::Generate(std::underlying_type_t<Options> options, int batchSize, long pos, bool verbose)
751{
752 fVerbose = verbose;
753 fBatchSize = batchSize;
754 fReadPos = pos;
755
756 // session flag is used in operator initialize
757 if (static_cast<std::underlying_type_t<Options>>(Options::kNoSession) & options) {
758 fUseSession = false;
760 }
761 if (static_cast<std::underlying_type_t<Options>>(Options::kNoWeightFile) & options) {
762 fUseWeightFile = false;
764 }
765 if (static_cast<std::underlying_type_t<Options>>(Options::kRootBinaryWeightFile) & options) {
766 fUseWeightFile = true;
768 }
769 if (fUseWeightFile && !fUseSession) {
770 throw std::runtime_error(
771 "TMVA-SOFIE: RModel::Generate: cannot use a separate weight file without generating a Session class");
772 }
773
774 if (static_cast<std::underlying_type_t<Options>>(Options::kGNN) & options)
775 fIsGNN = true;
776 if (static_cast<std::underlying_type_t<Options>>(Options::kGNNComponent) & options)
777 fIsGNNComponent = true;
778
779 // initialize the model including all operators and sub-graphs
780 Initialize(batchSize, verbose);
781
782 std::string hgname;
783 if (!fIsGNNComponent && !fIsSubGraph) {
784 fGC.clear();
786 }
787
788 // generate first code for the subgraphs
789 for (auto &graph : fSubGraphs) {
790 if (fVerbose)
791 std::cout << "generate session code for subgraph " << graph->fName << std::endl;
792 graph->GenerateSessionCode();
793 fGC += graph->fGC;
794 }
795
796 if (fVerbose)
797 std::cout << "generate Main session code - model " << fName << std::endl;
798
799 // generate main session code
801
802 if (!fIsGNNComponent && !fIsSubGraph) {
803 fGC += ("} //TMVA_SOFIE_" + fName + "\n");
804 fGC += "\n#endif // " + hgname + "\n";
805 }
806}
807
809 // generate the code to read initialized tensors from a text data file
811 if (fInitializedTensors.empty()) return;
812
813 fGC += " std::ifstream f;\n";
814 fGC += " f.open(filename);\n";
815 fGC += " if (!f.is_open()) {\n";
816 fGC += " throw std::runtime_error(\"tmva-sofie failed to open file \" + filename + \" for input weights\");\n";
817 fGC += " }\n";
818
819 if(fIsGNNComponent) {
820 fGC += " f.seekg(" + std::to_string(pos) + ");\n";
821 }
822
823 fGC += " std::string tensor_name;\n";
824 fGC += " size_t length;\n";
825
826 // loop on tensors and parse the file
827 for (auto& i: fInitializedTensors) {
828 // skip Constant and shape tensors (not written in a file)
829 if (!i.second.IsWeightTensor()) continue;
830 std::string tensor_name = "tensor_" + i.first;
831 if (i.second.type() == ETensorType::FLOAT) {
832 size_t length = 1;
833 length = ConvertShapeToLength(i.second.shape());
834 std::string slength = std::to_string(length);
835 fGC += " f >> tensor_name >> length;\n";
836 fGC += " if (tensor_name != \"" + tensor_name + "\" ) {\n";
837 fGC += " std::string err_msg = \"TMVA-SOFIE failed to read the correct tensor name; expected name is " +
838 tensor_name + " , read \" + tensor_name;\n";
839 fGC += " throw std::runtime_error(err_msg);\n";
840 fGC += " }\n";
841 fGC += " if (length != " + slength + ") {\n";
842 fGC += " std::string err_msg = \"TMVA-SOFIE failed to read the correct tensor size; expected size is " +
843 slength + " , read \" + std::to_string(length) ;\n";
844 fGC += " throw std::runtime_error(err_msg);\n";
845 fGC += " }\n";
846 fGC += " for (size_t i = 0; i < length; ++i)\n";
847 fGC += " f >> " + tensor_name + "[i];\n";
848 fGC += " if (f.fail()) {\n";
849 fGC += " throw std::runtime_error(\"TMVA-SOFIE failed to read the values for tensor " + tensor_name + "\");\n";
850 fGC += " }\n";
851 } else {
852 std::runtime_error("tmva-sofie tensor " + tensor_name + " with type " + ConvertTypeToString(i.second.type()) + " cannot be read from a file");
853 }
854 }
855 fGC += " f.close();\n";
856 }
857
858 // generate the code to read initialized tensors from a ROOT data file
860 fGC += " {\n";
861 fGC += " std::unique_ptr<TFile> rootFile(TFile::Open(filename.c_str(), \"READ\"));\n";
862 fGC += " if (!rootFile->IsOpen()) {\n";
863 fGC += " throw std::runtime_error(\"tmva-sofie failed to open ROOT file for input weights\");\n";
864 fGC += " }\n";
865
866 std::string dirName = fName + "_weights";
867 fGC += " if (!rootFile->GetKey(\"" + dirName + "\")) {\n";
868 fGC += " throw std::runtime_error(\"tmva-sofie failed to open ROOT directory for input weights\");\n";
869 fGC += " }\n";
870
871 for (auto &i : fInitializedTensors) {
872 // skip Constant and shape tensors
873 if (!i.second.IsWeightTensor()) continue;
874 fGC += " {\n";
875 std::string tensor_name = "tensor_" + i.first;
876 if (i.second.type() == ETensorType::FLOAT) {
877 fGC += " fTensor_" + i.first + " = *reinterpret_cast<std::vector<float>*>(rootFile->Get(\"";
878 fGC += dirName + "/" + tensor_name + "\"));\n";
879 } else if (i.second.type() == ETensorType::DOUBLE) {
880 fGC += " fTensor_" + i.first + " = *reinterpret_cast<std::vector<double>*>(rootFile->Get(\"";
881 fGC += dirName + + "/" + tensor_name + "\"));\n";
882 } else if (i.second.type() == ETensorType::INT64) {
883 fGC += " fTensor_" + i.first + " = *reinterpret_cast<std::vector<int64_t>*>(rootFile->Get(\"";
884 fGC += dirName + "/" + tensor_name + "\"));\n";
885 } else {
886 std::runtime_error("tmva-sofie tensor " + tensor_name + " with type " + ConvertTypeToString(i.second.type()) + " cannot be read from a ROOT file");
887 }
888 fGC += " }\n";
889 }
890 fGC += " }\n";
891 }
892}
893
895 // Determine the file extension based on the weight file type
896 std::string fileExtension;
897 switch (fWeightFile) {
899 fileExtension = ".dat";
900 break;
902 fileExtension = ".root";
903 break;
905 fileExtension = ".dat";
906 break;
907 }
908
909 // If filename is empty, use the model name as the base filename
910 if (filename.empty()) {
912 }
913
914 // Write the initialized tensors to the file
916 if(fIsGNNComponent || fIsGNN) {
917 throw std::runtime_error("SOFIE-GNN yet not supports writing to a ROOT file.");
918 }
919 std::unique_ptr<TFile> outputFile(TFile::Open(filename.c_str(), "UPDATE"));
920
921 std::string dirName = fName + "_weights";
922 // check if directory exists, in case delete to replace with new one
923 if (outputFile->GetKey(dirName.c_str()))
924 outputFile->rmdir(dirName.c_str());
925
926 auto outputDir = outputFile->mkdir(dirName.c_str());
927
928 for (const auto& item : fInitializedTensors) {
929 // skip Constant tensors and tensors which are not writable (e.g. shape tensors)
930 if (!item.second.IsWeightTensor()) continue;
931 std::string tensorName = "tensor_" + item.first;
932 size_t length = 1;
933 length = ConvertShapeToLength(item.second.shape());
934 if(item.second.type() == ETensorType::FLOAT) {
935 const float* data = item.second.data<float>();
936 std::vector<float> tensorDataVector(data, data + length);
937 outputDir->WriteObjectAny(&tensorDataVector, "std::vector<float>", tensorName.c_str());
938 }
939 else if(item.second.type() == ETensorType::DOUBLE) {
940 const double* data = item.second.data<double>();
941 std::vector<double> tensorDataVector(data, data + length);
942 outputDir->WriteObjectAny(&tensorDataVector, "std::vector<double>", tensorName.c_str());
943 }
944 else if(item.second.type() == ETensorType::INT64) {
945 const int64_t* data = item.second.data<int64_t>();
946 std::vector<int64_t> tensorDataVector(data, data + length);
947 outputDir->WriteObjectAny(&tensorDataVector, "std::vector<int64_t>", tensorName.c_str());
948 }
949 else {
950 std::runtime_error("tmva-sofie tensor " + tensorName + " with type " + ConvertTypeToString(item.second.type()) +
951 " cannot be written to a ROOT file");
952 }
953 }
954 outputFile->Write(filename.c_str());
955
956 // this needs to be changed, similar to the text file
957 return -1;
958
959 } else if (fWeightFile == WeightFileType::Text) {
960 std::ofstream f;
961 if(fIsGNNComponent) {
962 // appending all GNN components into the same file
963 f.open(filename, std::ios::app);
964 } else {
965 f.open(filename);
966 }
967 if (!f.is_open())
968 throw
969 std::runtime_error("tmva-sofie failed to open file " + filename + " for tensor weight data");
970 for (auto& i: fInitializedTensors) {
971 // skip Constant tensors and not writable tensors (e.g. shape tensors)
972 if (!i.second.IsWeightTensor()) {
973 continue;
974 }
975 size_t length = ConvertShapeToLength(i.second.shape());
976 std::string tensor_name = "tensor_" + i.first;
977 f << tensor_name << " " << length << "\n";
978 if (i.second.type() == ETensorType::FLOAT) {
979 const float * data = i.second.data<float>();
980 for (size_t idx = 0; idx < length; idx++) {
981 // round to zero sub-normal values
982 float value = data[idx];
983 if (value != 0. && std::abs(value) < std::numeric_limits<float>::min() ) value = 0;
984 f << std::setprecision(std::numeric_limits<float>::max_digits10) << value;
985 f << ( (idx < length-1) ? " " : "\n" );
986 }
987 }
988 else {
989 std::runtime_error("tmva-sofie tensor " + tensor_name + " with type " + ConvertTypeToString(i.second.type()) + " cannot be written to a file");
990 }
991 if (f.fail())
992 std::runtime_error("tmva-sofie failed to write tensor data to file for " + tensor_name);
993 }
994 long curr_pos = f.tellp();
995 f.close();
996 return curr_pos;
997 } else {
998 return -1;
999 }
1000}
1001
1003 std::cout << "Model requires following inputs:\n";
1004 for (auto& inputInfo: fInputTensorInfos) {
1005 std::cout << "Parametrised Tensor name: " << inputInfo.first << "\t";
1006 std::cout << "type: " << ConvertTypeToString(inputInfo.second.type) << "\t";
1007 std::cout << "shape: [";
1008 for (size_t i = 0; i < inputInfo.second.shape.size(); i++) {
1009 if (inputInfo.second.shape[i].isParam) {
1010 std::cout << inputInfo.second.shape[i].param;
1011 } else {
1012 std::cout << inputInfo.second.shape[i].dim ;
1013 }
1014 if (i < inputInfo.second.shape.size() - 1) std::cout << ",";
1015 }
1016 std::cout << "]" << std::endl;
1017 }
1018
1019 for (auto& inputInfo: fReadyInputTensorInfos) {
1020 std::cout << "Fully Specified Tensor name: " << inputInfo.first << "\t";
1021 std::cout << "type: " << ConvertTypeToString(inputInfo.second.type) << "\t";
1022 std::cout << "shape: [";
1023 for (size_t i = 0; i < inputInfo.second.shape.size(); i++) {
1024 std::cout << inputInfo.second.shape[i];
1025 if (i < inputInfo.second.shape.size() - 1) std::cout << ",";
1026 }
1027 std::cout << "]" << std::endl;
1028 }
1029 std::cout << "\n";
1030}
1031
1033 std::cout << "Model initialized the following tensors:\n";
1034 for (auto& it: fInitializedTensors) {
1035 std::cout << "Tensor name: \"" << it.first << "\"\t";
1036 std::cout << "type: " << ConvertTypeToString(it.second.type()) << "\t";
1037 std::cout << "shape: [";
1038 for (size_t i = 0; i < it.second.shape().size(); i++) {
1039 std::cout << it.second.shape()[i];
1040 if (i < it.second.shape().size() - 1) std::cout << ",";
1041 }
1042 std::cout << "]";
1043 if (it.second.IsConstantTensor()) std::cout << " (Constant)";
1044 else if (!it.second.IsWeightTensor()) std::cout << " (Not Writable)";
1045 std::cout << std::endl;
1046 }
1047 std::cout << "\n";
1048}
1049
1051 std::cout << "Model specify the following intermediate tensors:\n";
1052 for (auto& it: fIntermediateTensorInfos) {
1053 std::cout << "Tensor name: \"" << it.first << "\"\t";
1054 std::cout << "type: " << ConvertTypeToString(it.second.type) << "\t";
1055 std::cout << "shape: [";
1056 for (size_t i = 0; i < it.second.shape.size(); i++) {
1057 std::cout << it.second.shape[i];
1058 if (i < it.second.shape.size() - 1) std::cout << ",";
1059 }
1060 std::cout << "]" << std::endl;
1061 }
1062 std::cout << "\n";
1063}
1064
1066 std::cout << "Model specify the following dynamic tensors:\n";
1067 for (auto& it: fDynamicTensorInfos) {
1068 std::cout << "Tensor name: \"" << it.first << "\"\t";
1069 std::cout << "type: " << ConvertTypeToString(it.second.type) << "\t";
1070 std::cout << "shape: [";
1071 for (size_t i = 0; i < it.second.shape.size(); i++) {
1072 std::cout << it.second.shape[i].GetVal();
1073 if (i < it.second.shape.size() - 1) std::cout << ",";
1074 }
1075 std::cout << "]" << std::endl;
1076 }
1077 std::cout << "\n";
1078}
1079
1081 std::cout << "Model specify the following output tensors:\n";
1082 for (auto& it: fOutputTensorNames) {
1083 std::cout << "Tensor name: \"" << it << "\"\t";
1084 if (!IsDynamicTensor(it))
1085 std::cout << "shape: " << ConvertShapeToString(GetTensorShape(it)) << std::endl;
1086 else
1087 std::cout << "shape: " << ConvertDynamicShapeToString(GetDynamicTensorShape(it)) << std::endl;
1088 }
1089 std::cout << "\n";
1090}
1091
1093 auto it = fInitializedTensors.find(name);
1094 if (it == fInitializedTensors.end()) {
1095 std::cout << "Tensor " << name << " not found in model's initialized tensor list" << std::endl;
1096 return;
1097 }
1098
1099 std::cout << "Tensor name: " << it->first << "\t";
1100 std::cout << "type: " << ConvertTypeToString(it->second.type()) << "\t";
1101 int length =1;
1102 std::cout << "shape: [";
1103 for (size_t i = 0; i < it->second.shape().size(); i++) {
1104 std::cout << it->second.shape()[i];
1105 length *= it->second.shape()[i];
1106 if (i < it->second.shape().size() - 1) std::cout << ",";
1107 }
1108 std::cout << "]" << std::endl;
1109 bool ellipsis = true;
1110 if (n_print > length) {
1111 n_print = length;
1112 ellipsis = false;
1113 }
1114
1115 std::cout << "data: [" << std::endl;
1116 if (it->second.type() == ETensorType::FLOAT) {
1117 auto converted_data = it->second.data<float>();
1118 for (int i =0; i < n_print; i++) {
1119 std::cout << converted_data[i];
1120 if (i < n_print - 1) std::cout << " ,";
1121 }
1122 }
1123 if (ellipsis) std::cout << ", ...";
1124 std::cout << "]" << std::endl;
1125
1126}
1127
1128void RModel::OutputGenerated(std::string filename, bool append) {
1129
1131
1132 // write weights in a text file
1133 if (fUseWeightFile) {
1134 if (!filename.empty()) {
1135 size_t pos = filename.find(".hxx");
1137 filename.replace(pos, 4, ".dat");
1139 filename = filename.erase(pos, 4);
1140 filename += ".root";
1141 }
1142 } else {
1143 filename = fName;
1144 filename += fWeightFile == WeightFileType::Text ? ".dat" : ".root";
1145 }
1147 }
1148}
1149
1150void RModel::Streamer(TBuffer &R__b) {
1151 if (R__b.IsReading()) {
1152 RModel::Class()->ReadBuffer(R__b, this);
1153 for(auto i=RModel::fInitializedTensors.begin(); i!=RModel::fInitializedTensors.end(); ++i) {
1154 i->second.CastPersistentToShared();
1155 }
1156 }
1157 else {
1158 for(auto i=RModel::fInitializedTensors.begin(); i!=RModel::fInitializedTensors.end(); ++i) {
1159 i->second.CastSharedToPersistent();
1160 }
1161 RModel::Class()->WriteBuffer(R__b, this);
1162 }
1163}
1164
1165}//SOFIE
1166}//Experimental
1167}//TMVA
#define d(i)
Definition RSha256.hxx:102
#define f(i)
Definition RSha256.hxx:104
#define e(i)
Definition RSha256.hxx:103
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char filename
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t r
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize id
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void value
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
char name[80]
Definition TGX11.cxx:110
const_iterator begin() const
const_iterator end() const
Buffer base class used for serializing objects.
Definition TBuffer.h:43
static TFile * Open(const char *name, Option_t *option="", const char *ftitle="", Int_t compress=ROOT::RCompressionSetting::EDefaults::kUseCompiledDefault, Int_t netopt=0)
Create / open a file.
Definition TFile.cxx:4130
void GenerateHeaderInfo(std::string &hgname)
std::unordered_set< std::string > fNeededBlasRoutines
void OutputGenerated(std::string filename="", bool append=false)
std::unordered_set< std::string > fNeededStdLib
void AddBlasRoutines(std::vector< std::string > routines)
void AddNeededStdLib(std::string libname)
const ETensorType & GetTensorType(std::string name)
Definition RModel.cxx:94
std::unordered_map< std::string, DynamicTensorInfo > fDynamicTensorInfos
Definition RModel.hxx:25
bool IsDynamicTensor(const std::string &name) const
Definition RModel.cxx:199
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< Dim > dim_shape)
Definition RModel.cxx:213
std::vector< Dim > GetDynamicTensorShape(std::string name)
Definition RModel.cxx:82
std::string GenerateInferSignature(bool isdecl=true)
Definition RModel.cxx:543
bool CheckIfTensorAlreadyExist(std::string tensor_name)
Definition RModel.cxx:122
std::vector< std::unique_ptr< ROperator > > fOperators
Definition RModel.hxx:31
void OutputGenerated(std::string filename="", bool append=false)
Definition RModel.cxx:1128
void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector< Dim > shape)
Definition RModel.cxx:132
std::unordered_map< std::string, TensorInfo > fIntermediateTensorInfos
Definition RModel.hxx:24
void AddOutputTensorNameList(std::vector< std::string > output_tensor_names)
Definition RModel.cxx:251
std::unordered_map< std::string, TensorInfo > fReadyInputTensorInfos
Definition RModel.hxx:22
void AddConstantTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:178
void AddDynamicTensor(std::string tensor_name, ETensorType type, std::vector< Dim > shape)
Definition RModel.cxx:230
void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:168
RModel & operator=(RModel &&other)
Definition RModel.cxx:39
void AddInputTensorName(std::string name)
Definition RModel.cxx:151
std::vector< std::string > fOutputTensorNames
Definition RModel.hxx:28
bool IsDimInputTensor(const std::string &name) const
Definition RModel.cxx:203
bool IsInitializedTensor(const std::string &name) const
Definition RModel.cxx:188
void AddOperator(std::unique_ptr< ROperator > op, int order_execution=-1)
Definition RModel.cxx:155
RModel()=default
Default constructor.
void HeadInitializedTensors(std::string name, int n_print=50)
Definition RModel.cxx:1092
bool IsConstantTensor(const std::string &name) const
Definition RModel.cxx:192
void Initialize(int batchSize=-1, bool verbose=false)
Definition RModel.cxx:291
const std::vector< size_t > & GetTensorShape(std::string name)
Definition RModel.cxx:56
long WriteInitializedTensorsToFile(std::string filename="")
Definition RModel.cxx:894
void Generate(std::underlying_type_t< Options > options, int batchSize=-1, long pos=0, bool verbose=false)
Definition RModel.cxx:750
std::unordered_map< std::string, InputTensorInfo > fInputTensorInfos
Definition RModel.hxx:21
std::shared_ptr< void > GetInitializedTensorData(std::string tensor_name)
Definition RModel.cxx:274
void InitializeSubGraph(std::shared_ptr< RModel > graph)
Definition RModel.cxx:388
std::unordered_map< std::string, std::string > fShapeParams
Definition RModel.hxx:27
void SetNotWritableInitializedTensor(const std::string &tensor_name)
Definition RModel.cxx:283
std::vector< std::string > fInputTensorNames
Definition RModel.hxx:29
std::unordered_map< std::string, InitializedTensor > fInitializedTensors
Definition RModel.hxx:23
void UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
Definition RModel.cxx:265
std::vector< std::shared_ptr< RModel > > fSubGraphs
! sub-graph models (transient)
Definition RModel.hxx:33
bool IsReadyInputTensor(const std::string &name) const
Definition RModel.cxx:207
void UpdateOutputTensorList(std::vector< std::string > curr_output_tensor, std::vector< std::string > modify_output_tensor)
Definition RModel.cxx:258
std::string Clean_name(std::string input_tensor_name)
std::vector< Dim > ConvertShapeToDim(std::vector< size_t > shape)
Convert shape from integer format to dynamic one (based on Dim)
std::string ConvertDynamicShapeToLength(std::vector< Dim > shape)
std::string ConvertValuesToString(size_t n, const T *data)
std::string ConvertShapeToString(std::vector< size_t > shape)
std::string GenerateConstantTensorCode(const std::pair< std::string, InitializedTensor > &t)
Definition RModel.cxx:420
std::string ConvertTypeToString(ETensorType type)
std::string ConvertDynamicShapeToString(std::vector< Dim > shape)
std::underlying_type_t< Options > operator|(Options opA, Options opB)
Definition RModel.cxx:16
std::vector< size_t > ConvertShapeToInt(std::vector< Dim > shape)
Convert shape based on Dim to integer format.
std::string ConvertValToString(T value)
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
create variable transformations