Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RModelParser_ONNX.cxx
Go to the documentation of this file.
1#include "Byteswap.h"
3#include "onnx_proto3.pb.h"
4
5#include <stdexcept>
6#include <string>
7#include <memory>
8#include <cassert>
9#include <iostream>
10#include <unordered_map>
11#include <functional>
12#include "TMVA/SOFIE_common.hxx"
13
14namespace TMVA {
15namespace Experimental {
16namespace SOFIE {
17
18// Declaration of operators
19// Unary operators
29// Binary operators
35// Nary operators
40//Comparision Operators
46// Reduce operators
51// Others
91// Declaration of fused operators
97
98// Definition of RModelParser_ONNX::OperatorsMap
100 // Registered operators
101 std::unordered_map<std::string, ParserFuncSignature> fOperatorsMap;
102};
103
104// helper function to get initialized tensor data
105template<typename T>
107};
108// trait function to extract data from TensorProto
109template<>
110struct ExtractDataFromTP<float> {
111 static void Copy(onnx::TensorProto * tensor, void * data) {
112 tensor->mutable_float_data()->ExtractSubrange(0, tensor->float_data_size(),
113 static_cast<float *>(data));
114 }
115};
116template<>
118 static void Copy(onnx::TensorProto * tensor, void * data) {
119 tensor->mutable_double_data()->ExtractSubrange(0, tensor->double_data_size(),
120 static_cast<double *>(data));
121 }
122};
123template<>
124struct ExtractDataFromTP<int32_t> {
125 static void Copy(onnx::TensorProto * tensor, void * data) {
126 tensor->mutable_int32_data()->ExtractSubrange(0, tensor->int32_data_size(),
127 static_cast<int32_t *>(data));
128 }
129};
130template<>
131struct ExtractDataFromTP<int64_t> {
132 static void Copy(onnx::TensorProto * tensor, void * data) {
133 tensor->mutable_int64_data()->ExtractSubrange(0, tensor->int64_data_size(),
134 static_cast<int64_t *>(data));
135 }
136};
137template<typename T>
138std::shared_ptr<void> GetInitializedTensorData(onnx::TensorProto * tensorproto, size_t length) {
139 std::shared_ptr<void> data(malloc(length * sizeof(T)), free);
140
141 if (!tensorproto->raw_data().empty()) {
142#ifdef R__BYTESWAP
143 std::memcpy(data.get(), tensorproto->raw_data().c_str(), length * sizeof(T));
144#else
145 for (std::size_t k = 0; k < length; ++k)
146 (reinterpret_cast<typename RByteSwap<sizeof(T)>::value_type *>(data.get()))[k] =
147 RByteSwap<sizeof(T)>::bswap((reinterpret_cast<const typename RByteSwap<sizeof(T)>::value_type *>(tensorproto->raw_data().c_str()))[k]);
148#endif
149 } else {
151 }
152 return data;
153}
154
155// Constructor of the parser
156RModelParser_ONNX::RModelParser_ONNX() noexcept : fOperatorsMapImpl(std::make_unique<OperatorsMapImpl>()) {
157 // Register operators
158 // Unary operators
160 RegisterOperator("Reciprocal", ParseReciprocal);
167 RegisterOperator("Softplus", ParseSoftplus);
168 // Binary operators
174 // Nary operators
179 //Comparision Operators
180 RegisterOperator("Equal", ParseEq);
182 RegisterOperator("LessOrEqual", ParseLessEq);
183 RegisterOperator("Greater", ParseGreater);
184 RegisterOperator("GreaterOrEqual", ParseGreaterEq);
185 // Reduce operators
186 RegisterOperator("ReduceMean", ParseReduceMean);
187 RegisterOperator("ReduceSum", ParseReduceSum);
188 RegisterOperator("ReduceSumSquare", ParseReduceSumSquare);
189 RegisterOperator("ReduceProd", ParseReduceProd);
190 // Others
191 RegisterOperator("BatchNormalization", ParseBatchNormalization);
192 RegisterOperator("Constant", ParseConstant);
193 RegisterOperator("ConstantOfShape", ParseConstant);
195 RegisterOperator("Concat", ParseConcat);
197 RegisterOperator("ConvTranspose", ParseConvTranspose);
200 RegisterOperator("Identity", ParseIdentity);
201 RegisterOperator("LeakyRelu", ParseLeakyRelu);
203 RegisterOperator("AveragePool", ParsePool);
204 RegisterOperator("GlobalAveragePool", ParsePool);
205 RegisterOperator("MaxPool", ParsePool);
207 RegisterOperator("Reshape", ParseReshape);
208 RegisterOperator("Flatten", ParseReshape);
209 RegisterOperator("Squeeze", ParseReshape);
210 RegisterOperator("Unsqueeze", ParseReshape);
214 RegisterOperator("Sigmoid", ParseSigmoid);
216 RegisterOperator("Softmax", ParseSoftmax);
218 RegisterOperator("Transpose", ParseTranspose);
219 RegisterOperator("MatMul", ParseMatMul);
220 RegisterOperator("LayerNormalization", ParseLayerNormalization);
221 RegisterOperator("Expand", ParseExpand);
222 RegisterOperator("Gather", ParseGather);
225 RegisterOperator("EyeLike", ParseEyeLike);
233 RegisterOperator("Einsum", ParseEinsum);
234 RegisterOperator("RandomNormal", ParseRandom);
235 RegisterOperator("RandomNormalLike", ParseRandom);
236 RegisterOperator("RandomUniform", ParseRandom);
237 RegisterOperator("RandomUniformLike", ParseRandom);
238 RegisterOperator("ScatterElements", ParseScatterElements);
239}
240
241// Destructor of the parser
243
245{
246 fOperatorsMapImpl->fOperatorsMap[name] = func;
247}
248
250{
251 return fOperatorsMapImpl->fOperatorsMap.find(name) != fOperatorsMapImpl->fOperatorsMap.end();
252}
253
255{
256 std::vector<std::string> ops;
257 ops.reserve(fOperatorsMapImpl->fOperatorsMap.size());
258 for (auto &it : fOperatorsMapImpl->fOperatorsMap) {
259 ops.emplace_back(it.first);
260 }
261 // return sorted list in alphabetical order
262 std::sort(ops.begin(), ops.end());
263 return ops;
264}
265
270
272{
274}
275
280
281// Parse an operator
282std::unique_ptr<ROperator>
283RModelParser_ONNX::ParseOperator(const size_t i, const onnx::GraphProto &graphproto, const std::vector<size_t> &nodes, const std::vector<int> & children)
284{
285 if (i >= nodes.size())
286 throw std::runtime_error("TMVA::SOFIE - Error in parsing ordered operators " + std::to_string(i) + " is >= " + std::to_string(nodes.size()));
287 int idx = nodes[i];
288 const auto &nodeproto = graphproto.node(idx);
289 const std::string op_type = nodeproto.op_type();
290 if (fVerbose)
291 std::cout << "Parsing operator " << op_type << std::endl;
292
293 // skip already fused operators
294 if (fFusedOperators[idx]) return nullptr;
295
296 // try to fuse with following operator in case it is not last one
297 if (children.size() == 1) {
298 int idx2 = children.front();
299 if (op_type == "MatMul") {
300 // Fuse MatMul and Add
301 if (idx2 < graphproto.node_size() && graphproto.node(idx2).op_type() == "Add") {
302 fFusedOperators[idx2] = true;
303 return ParseFuseMatMulAdd(*this, graphproto.node(idx), graphproto.node(idx2));
304 }
305 else {
306 return ParseMatMul(*this, graphproto.node(idx));
307 }
308 } else if (nodeproto.op_type() == "Conv" || nodeproto.op_type() == "ConvTranspose") {
309 // Fuse Conv or ConvTranspose without bias and Add
310 if (idx2 < graphproto.node_size() && graphproto.node(idx2).op_type() == "Add") {
311 if (nodeproto.op_type() == "Conv") {
312 fFusedOperators[idx2] = true;
313 return ParseFuseConvAdd(*this, graphproto.node(idx), graphproto.node(idx2));
314 } else {
315 fFusedOperators[idx2] = true;
316 return ParseFuseConvTransposeAdd(*this, graphproto.node(idx), graphproto.node(idx2));
317 }
318 }
319 } else if (nodeproto.op_type() == "Gemm") {
320 // Fuse Gemm with activation operators
321 if (idx2 < graphproto.node_size() && graphproto.node(idx2).op_type() == "Relu") {
322 fFusedOperators[idx2] = true;
323 return ParseFuseGemmRelu(*this, graphproto.node(idx), graphproto.node(idx2));
324 }
325 } else if (nodeproto.op_type() == "BatchNormalization") {
326 if (idx2 < graphproto.node_size() && graphproto.node(idx2).op_type() == "Relu") {
327 fFusedOperators[idx2] = true;
328 return ParseFuseBatchnormRelu(*this, graphproto.node(idx), graphproto.node(idx2));
329 }
330 }
331 }
332
333
334
335 auto it = fOperatorsMapImpl->fOperatorsMap.find(op_type);
336 if (it == fOperatorsMapImpl->fOperatorsMap.end()) {
337 std::cout << "operator " << op_type << " is not supported" << std::endl;
338 throw std::runtime_error("TMVA::SOFIE Operator type " + op_type + " is not yet supported");
339 }
340 if (fVerbose) {
341 std::cout << "\tCreating operator " << op_type << std::endl;
342 }
343 return it->second(*this, nodeproto);
344}
345
346// Parse a model
347RModel RModelParser_ONNX::Parse(std::string filename, bool verbose)
348{
349 fVerbose = verbose;
350
351 fTensorTypeMap.clear();
352
353 auto model = LoadModel(filename);
354 if (!model)
355 throw std::runtime_error("TMVA::SOFIE - Failed to load onnx file " + filename);
356
357 const onnx::GraphProto &graph = model->graph(); // not a memory leak. model freed automatically at the end.
358
359
360 std::time_t ttime = std::time(0);
361 std::tm *gmt_time = std::gmtime(&ttime);
362 std::string parsetime(std::asctime(gmt_time));
363
364 // get name of model (filename without directory name)
365 char sep = '/';
366#ifdef _WIN32
367 sep = '\\';
368#endif
369 size_t isep = filename.rfind(sep, filename.length());
370 std::string filename_nodir = filename;
371 if (isep != std::string::npos) {
372 filename_nodir = (filename.substr(isep + 1, filename.length() - isep));
373 }
374
377 return rmodel;
378}
379
380std::unique_ptr<onnx::ModelProto> RModelParser_ONNX::LoadModel(std::string filename) {
381
383 auto model = std::make_unique<onnx::ModelProto>();
384
385 std::fstream input(filename, std::ios::in | std::ios::binary);
386 if (!model->ParseFromIstream(&input)) {
387 std::cerr << "TMVA::SOFIE - Failed to open onnx file " << filename << std::endl;
388 return std::unique_ptr<onnx::ModelProto>();
389 }
390
391 // ONNX version is ir_version() - model_version() returns 0
392 if (fVerbose) {
393 std::cout << "ONNX Version " << model->ir_version() << std::endl;
394 }
395 google::protobuf::ShutdownProtobufLibrary();
396 return model;
397
398}
399
400void RModelParser_ONNX::CheckGraph(const onnx::GraphProto & graph, int & level, std::map<std::string, int> & missingOperators) {
401 if (fVerbose)
402 std::cout << "\n" << graph.name() << " Graph operator list\n";
403 for (int i = 0; i < graph.node_size(); i++) {
404 const auto & node = graph.node(i);
405 const std::string opType = node.op_type();
406 if (fVerbose) {
407 std::cout << "\tOperator " << i << " : " << opType << " (" << node.name() << "), " << graph.node(i).input_size()
408 << " inputs : {";
409 for (int j = 0; j < graph.node(i).input_size(); j++) {
410 std::cout << graph.node(i).input(j);
411 if (j < graph.node(i).input_size() - 1)
412 std::cout << ", ";
413 }
414 std::cout << " }" << std::endl;
415 }
416 // check if operator exists
418 missingOperators[opType] = level;
419 // see if sub-graph exists as node attributes
420 for (int j = 0; j < node.attribute_size(); j++) {
421 const auto & attribute = node.attribute(j);
422 if (attribute.has_g()) {
423 const auto & subGraph = attribute.g();
424 level += 1;
426 }
427 }
428 }
429}
430
431bool RModelParser_ONNX::CheckModel(std::string filename, bool verbose) {
432
433 fVerbose = verbose;
434 auto model = LoadModel(filename);
435 if (!model) return false;
436
437 const onnx::GraphProto &graph = model->graph();
438 // Initial operator order
439 if (fVerbose)
440 std::cout << "\nModel operator list " << model->producer_name() << "\n";
441
442 std::map<std::string, int> missingOperators;
443 int level = 1;
444 CheckGraph(graph, level, missingOperators);
445
446 if (!missingOperators.empty()) {
447 std::cout << "List of missing operators for model loaded from file " << filename << std::endl;
448 for (auto & op : missingOperators) {
449 std::cout << op.first << " " << op.second << std::endl;
450 }
451 return false;
452 }
453 std::cout << "All operators in the loaded model are supported!\n";
454 return true;
455}
456
457void RModelParser_ONNX::ParseONNXGraph(RModel & rmodel, const onnx::GraphProto & graph, std::string graphName)
458{
459 bool verbose = fVerbose;
460
461 if (graphName.empty())
462 graphName = graph.name();
463
464 if (verbose)
465 std::cout << "\nParsing Graph - " << graphName << std::endl;
466
467 std::unordered_set<std::string> initializer_names;
468 for (int i = 0; i < graph.initializer_size(); i++) {
469 initializer_names.insert(graph.initializer(i).name());
470 }
471
472 if (verbose)
473 std::cout << "Parsing model inputs...." << std::endl;
474 /// Loop on model inputs
475 for (int i = 0; i < graph.input_size(); i++) {
476 RegisterTensorType(graph.input(i).name(),
477 static_cast<ETensorType>(graph.input(i).type().tensor_type().elem_type()));
478
479 if (verbose)
480 std::cout << "\tgraph input " << i << " name " << graph.input(i).name() << " type "
481 << graph.input(i).type().tensor_type().elem_type() << std::endl;
482
483 if (initializer_names.find(graph.input(i).name()) != initializer_names.end())
484 continue;
485
486 // input data node is not a weight node (has no initializer)
487 const onnx::ValueInfoProto &valueinfoproto = graph.input(i);
488 std::string input_name = valueinfoproto.name();
489
490 ETensorType type = static_cast<ETensorType>(valueinfoproto.type().tensor_type().elem_type());
491
492 std::vector<Dim> fShape;
493 bool existParam = false;
494 if (!valueinfoproto.type().tensor_type().has_shape())
495 throw std::runtime_error("TMVA::SOFIE data node with no shape restrictions is not supported yet");
496 for (int j = 0; j < valueinfoproto.type().tensor_type().shape().dim_size(); j++) {
497 Dim dim;
498 if (valueinfoproto.type().tensor_type().shape().dim(j).value_case() ==
499 onnx::TensorShapeProto_Dimension::ValueCase::kDimValue) {
500 int dim_value = valueinfoproto.type().tensor_type().shape().dim(j).dim_value();
501 dim.dim = dim_value;
502 // case input dim is -1 - set a parametric shape
503 if (dim_value < 0) {
504 dim.isParam = true;
505 existParam = true;
506 dim.param = UTILITY::Clean_name(input_name) + "_size";
507 }
508 } else if (valueinfoproto.type().tensor_type().shape().dim(j).value_case() ==
509 onnx::TensorShapeProto_Dimension::ValueCase::kDimParam) {
510 dim.isParam = true;
511 existParam = true;
512 dim.param = valueinfoproto.type().tensor_type().shape().dim(j).dim_param();
513 } else {
514 throw std::runtime_error("TMVA::SOFIE ONNX file error: Valueinfoproto " + input_name +
515 " has neither dim_value nor dim_param! \n");
516 }
517 fShape.push_back(dim);
518 }
519 if (valueinfoproto.type().tensor_type().shape().dim_size() == 0) {
520 Dim dim;
521 dim.dim = 1;
522 fShape.push_back(dim);
523 } // in case this TensorShapeProto has no dimension message: ONNX IR defines this to be a scalar
524
525 if (!existParam) {
526 std::vector<size_t> fShape_sizet;
527 for (auto &j : fShape) {
528 fShape_sizet.push_back(j.dim);
529 }
530
531 rmodel.AddInputTensorInfo(input_name, type, fShape_sizet);
532 } else {
533 rmodel.AddInputTensorInfo(input_name, type, fShape);
534 }
535 rmodel.AddInputTensorName(input_name); // store also names in given order
536 }
537
538 std::map<std::string, int> allInitializedTensors;
539
540 if (verbose)
541 std::cout << "\nParsing graph initializer list and fill model initialized tensors" << std::endl;
542
543 for (int i = 0; i < graph.initializer_size(); i++) {
544 onnx::TensorProto *tensorproto = const_cast<onnx::TensorProto *>(&graph.initializer(i));
545 std::vector<std::size_t> shape;
546 std::size_t fLength = 1;
547 for (int j = 0; j < tensorproto->dims_size(); j++) {
548 shape.push_back(tensorproto->dims(j));
549 fLength *= tensorproto->dims(j);
550 }
551 // in case of scalars keep an empty shape but with length =1
552
553 std::string input_name = graph.initializer(i).name();
554
555 if (verbose)
556 std::cout << "\t initializer " << i << " name " << input_name << " type " << graph.initializer(i).data_type()
557 << std::endl;
558
559 // register also the initialized tensors
560 auto tensor_type = static_cast<ETensorType>(graph.initializer(i).data_type());
562
563 switch (tensor_type) {
564 case ETensorType::FLOAT: {
565 std::shared_ptr<void> data = GetInitializedTensorData<float>(tensorproto, fLength);
566 if (verbose) std::cout << "add FLOAT initialized tensor " << input_name << " shape " << ConvertShapeToString(shape) << std::endl;
567 rmodel.AddInitializedTensor(input_name, ETensorType::FLOAT, shape, data);
569 break;
570 }
571 case ETensorType::DOUBLE: {
572 std::shared_ptr<void> data = GetInitializedTensorData<double>(tensorproto, fLength);
573 if (verbose) std::cout << "add DOUBLE initialized tensor " << input_name << " shape " << ConvertShapeToString(shape) << std::endl;
574 rmodel.AddInitializedTensor(input_name, ETensorType::DOUBLE, shape, data);
576 break;
577 }
578 case ETensorType::INT32: {
579 std::shared_ptr<void> data = GetInitializedTensorData<int32_t>(tensorproto, fLength);
580 if (verbose) std::cout << "add INT32 initialized tensor " << input_name << " shape " << ConvertShapeToString(shape) << std::endl;
581 rmodel.AddInitializedTensor(input_name, ETensorType::INT32, shape, data);
583 break;
584 }
585 case ETensorType::INT64: {
586 std::shared_ptr<void> data = GetInitializedTensorData<int64_t>(tensorproto, fLength);
587 if (verbose) std::cout << "add INT64 initialized tensor " << input_name << " shape " << ConvertShapeToString(shape) << std::endl;
588 rmodel.AddInitializedTensor(input_name, ETensorType::INT64, shape, data);
590 break;
591 }
592 default:
593 throw std::runtime_error("Data type in weight tensor " + graph.initializer(i).name() + " not supported!\n");
594 }
595 }
596
597 // Initial operator order
598 if (verbose) {
599 std::cout << "\nGraph operator list (ONNX order)\n";
600 for (int i = 0; i < graph.node_size(); i++) {
601 std::cout << "\tOperator " << i << " : " << graph.node(i).op_type() << " , " << graph.node(i).input_size()
602 << " inputs : {";
603 for (int j = 0; j < graph.node(i).input_size(); j++) {
604 std::cout << graph.node(i).input(j);
605 if (j < graph.node(i).input_size() - 1)
606 std::cout << ", ";
607 }
608 std::cout << " }" << std::endl;
609 }
610 }
611
612 // make order of nodes:
613 if (verbose)
614 std::cout << "\n***********************\nRe-Order graph operator list\n*************************\n";
615 std::vector<size_t> nodesOrder;
616 nodesOrder.reserve(graph.node_size());
617 std::vector<bool> foundNodes(graph.node_size());
618
619 // loop at graph inputs
620 std::map<std::string, int> allInputs;
621 for (int i = 0; i < graph.input_size(); i++) {
622 allInputs[graph.input(i).name()] = -1;
623 }
624 do {
625 auto psize = nodesOrder.size();
626 for (int i = 0; i < graph.node_size(); i++) {
627 if (foundNodes[i])
628 continue;
629 // check if all input exists add to list
630 bool existInputs = true;
631 int input_size = graph.node(i).input_size();
632 // special case for Reshape where shape is input and not a weight tensor
633 if (fVerbose)
634 std::cout << "Checking input of Node " << i << " : " << graph.node(i).name() << std::endl;
635 for (int j = 0; j < input_size; j++) {
636 std::string name = graph.node(i).input(j);
637 // skip empty names
638 if (!name.empty()) {
639 existInputs &= (allInputs.find(name) != allInputs.end() ||
641 if (fVerbose) {
642 std::cout << "\t\t input " << name << " "
643 << bool(allInputs.find(name) != allInputs.end()) << " " <<
645 existInputs << std::endl;
646 }
647 }
648 }
649 if (!existInputs) {
650 if (fVerbose) {
651 std::cout << "skip node " << graph.node(i).op_type() << " " << graph.node(i).name() << " inputs are not existing ";
652 for (int j = 0; j < input_size; j++) {
653 std::cout << graph.node(i).input(j) << " ";
654 }
655 std::cout << std::endl;
656 }
657 continue;
658 }
659
660 // adding node to the currectly ordered list
661 if (verbose)
662 std::cout << "===> New node " << graph.node(i).op_type() << " " << graph.node(i).name() << " order " << i << std::endl;
663
664 nodesOrder.push_back(i);
665 foundNodes[i] = true;
666 // register the outputs
667 for (int j = 0; j < graph.node(i).output_size(); j++) {
668 if (fVerbose) std::cout << "\toutput : " << graph.node(i).output(j) << std::endl;
669 allInputs[graph.node(i).output(j)] = i;
670 }
671 }
672 // no increment in nodes - something wrong
673 if (nodesOrder.size() == psize) {
674 int ilast = nodesOrder.back();
675 std::cout << "cannot find a new node after " << graph.node(ilast).op_type() << " " << graph.node(ilast).name() << std::endl;
676 throw std::runtime_error("TMVA::SOFIE - cannot find a new node ");
677 }
678 } while ((int)nodesOrder.size() < graph.node_size());
679
680
681 // find list of children for each operator (used for fusing oiperators)
682 std::vector<std::vector<int>> nodesChildren(graph.node_size());
683
684 for (int k = 0; k < graph.node_size(); k++) {
685 int i = nodesOrder[k];
686 // compute the number of output for the operators
687 if (graph.node(i).output_size() > 0) nodesChildren[i].reserve(graph.node(i).output_size());
688 for (const auto& output_name : graph.node(i).output()) {
689 // loop on all nodes
690 for (int l = k; l < graph.node_size(); l++) {
691 int j = nodesOrder[l];
692 for (const auto& input_name : graph.node(j).input()) {
693 if (input_name == output_name)
694 nodesChildren[i].push_back(j);
695 }
696 }
697 }
698 }
699
700 // print lit of order operators with list of inputs and list of children nodes
701 if (verbose) {
702 std::cout << "\nGraph operator list (re-ordered)\n";
703 for (int k = 0; k < graph.node_size(); k++) {
704 int i = nodesOrder[k];
705 std::cout << "\tOperator " << i << " : " << graph.node(i).op_type() << " , " << graph.node(i).name() << " input tensors : {";
706 for (int j = 0; j < graph.node(i).input_size(); j++) {
707 std::cout << graph.node(i).input(j);
708 if (j < graph.node(i).input_size() - 1)
709 std::cout << ", ";
710 }
711 std::cout << " } ";
712 std::cout << " children : {";
713 for ( const auto & ichild : nodesChildren[i]) {
714 std::cout << " [ " << ichild << " " << graph.node(ichild).op_type() << " , " << graph.node(ichild).name() << "]";
715 }
716 std::cout << "}" << std::endl;
717 }
718 }
719
720 // fill model with operators
721 if (verbose) {
722 std::cout << "Fill RModel with operators...\n";
723 }
724
725 // we have to record order of node execution separately to
726 // account for fused operators
727 size_t node_order_exec = 0;
728 fFusedOperators = std::vector<bool>(graph.node_size(), false);
729 for (int i = 0; i < graph.node_size(); i++) {
730 std::string op_type = graph.node(nodesOrder[i]).op_type();
731
732 if (verbose) {
733 std::cout << "\t" << i << " " << nodesOrder[i] << " parsing operator " << op_type << std::endl;
734 }
735
736 std::unique_ptr<ROperator> op = ParseOperator(i, graph, nodesOrder, nodesChildren[i]);
737 if (!op) {
738 if (verbose) {
739 std::cout << "\t\tskipping operator since it is fused with previous one" << std::endl;
740 }
741 // for skipping the fused nodes like Add after MatMul
742 continue;
743 }
744 rmodel.AddOperator(std::move(op), node_order_exec++);
745 }
746
747 std::vector<std::string> outputnames;
748 if (verbose)
749 std::cout << "\nParsing Graph output list\n";
750 for (int i = 0; i < graph.output_size(); i++) {
751 if (verbose)
752 std::cout << "\toutput " << i << " name " << graph.output(i).name() << std::endl;
753 outputnames.push_back(graph.output(i).name());
754 }
755 rmodel.AddOutputTensorNameList(outputnames);
756
757 return;
758}
759
760} // namespace SOFIE
761} // namespace Experimental
762} // namespace TMVA
dims_t fShape
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char filename
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
char name[80]
Definition TGX11.cxx:110
#define malloc
Definition civetweb.c:1575
const_iterator begin() const
const_iterator end() const
void RegisterOperator(const std::string &name, ParserFuncSignature func)
std::unique_ptr< ROperator > ParseOperator(const size_t, const onnx::GraphProto &, const std::vector< size_t > &, const std::vector< int > &)
bool IsRegisteredOperator(const std::string &name)
void CheckGraph(const onnx::GraphProto &g, int &level, std::map< std::string, int > &missingOperators)
void ParseONNXGraph(RModel &model, const onnx::GraphProto &g, std::string name="")
std::unordered_map< std::string, ETensorType > fTensorTypeMap
RModel Parse(std::string filename, bool verbose=false)
void RegisterTensorType(const std::string &, ETensorType)
std::unique_ptr< onnx::ModelProto > LoadModel(std::string filename)
ETensorType GetTensorType(const std::string &name)
std::vector< std::string > GetRegisteredOperators()
std::unique_ptr< OperatorsMapImpl > fOperatorsMapImpl
bool CheckModel(std::string filename, bool verbose=false)
std::string Clean_name(std::string input_tensor_name)
ParserFuncSignature ParseSqrt
ParserFuncSignature ParseBatchNormalization
ParserFuncSignature ParseGreater
std::function< std::unique_ptr< ROperator >(RModelParser_ONNX &, const onnx::NodeProto &, const onnx::NodeProto &)> ParserFuseFuncSignature
ParserFuncSignature ParseReshape
ParserFuseFuncSignature ParseFuseConvTransposeAdd
ParserFuncSignature ParseReduceMean
ParserFuseFuncSignature ParseFuseMatMulAdd
ParserFuncSignature ParseGather
ParserFuncSignature ParseNeg
ParserFuncSignature ParseWhere
Definition ParseWhere.cxx:9
ParserFuncSignature ParseCos
ParserFuncSignature ParseLog
ParserFuncSignature ParseLeakyRelu
ParserFuncSignature ParseExp
std::function< std::unique_ptr< ROperator >(RModelParser_ONNX &, const onnx::NodeProto &)> ParserFuncSignature
ParserFuncSignature ParseEinsum
ParserFuncSignature ParsePool
Definition ParsePool.cxx:9
ParserFuncSignature ParseDiv
ParserFuncSignature ParseLayerNormalization
ParserFuncSignature ParseConcat
ParserFuncSignature ParseTopK
Definition ParseTopK.cxx:9
ParserFuncSignature ParseMax
ParserFuncSignature ParseEq
ParserFuncSignature ParseIdentity
ParserFuncSignature ParseConvTranspose
ParserFuncSignature ParseReduceProd
ParserFuncSignature ParseSlice
Definition ParseSlice.cxx:9
ParserFuncSignature ParseRandom
ParserFuncSignature ParseTranspose
ParserFuncSignature ParseLess
ParserFuncSignature ParseShape
Definition ParseShape.cxx:9
ParserFuncSignature ParseGRU
Definition ParseGRU.cxx:9
ParserFuncSignature ParseMatMul
ParserFuncSignature ParseErf
Definition ParseErf.cxx:9
ParserFuncSignature ParseSub
ParserFuncSignature ParseAdd
std::shared_ptr< void > GetInitializedTensorData(onnx::TensorProto *tensorproto, size_t length)
ParserFuncSignature ParseIf
Definition ParseIf.cxx:9
ParserFuncSignature ParseRange
Definition ParseRange.cxx:9
ParserFuncSignature ParseSoftplus
ParserFuncSignature ParseExpand
ParserFuncSignature ParseRNN
Definition ParseRNN.cxx:9
ParserFuncSignature ParseLSTM
Definition ParseLSTM.cxx:9
ParserFuncSignature ParseCast
Definition ParseCast.cxx:9
ParserFuncSignature ParseReciprocal
ParserFuncSignature ParseSigmoid
ParserFuseFuncSignature ParseFuseConvAdd
ParserFuseFuncSignature ParseFuseBatchnormRelu
ParserFuncSignature ParseSoftmax
ParserFuncSignature ParseGreaterEq
ParserFuncSignature ParseMean
ParserFuncSignature ParseSplit
Definition ParseSplit.cxx:9
ParserFuncSignature ParseConstant
ParserFuncSignature ParseSelu
Definition ParseSelu.cxx:9
ParserFuncSignature ParseLessEq
ParserFuncSignature ParseSum
ParserFuncSignature ParseEyeLike
ParserFuncSignature ParsePad
Definition ParsePad.cxx:9
ParserFuncSignature ParseElu
Definition ParseElu.cxx:9
std::string ConvertShapeToString(const std::vector< size_t > &shape)
ParserFuncSignature ParseMin
ParserFuncSignature ParseRelu
Definition ParseRelu.cxx:9
ParserFuncSignature ParseReduceSum
ParserFuncSignature ParseConv
Definition ParseConv.cxx:9
ParserFuncSignature ParseScatterElements
ParserFuncSignature ParseGemm
Definition ParseGemm.cxx:9
ParserFuncSignature ParseTile
Definition ParseTile.cxx:9
ParserFuncSignature ParseMul
ParserFuseFuncSignature ParseFuseGemmRelu
ParserFuncSignature ParsePow
ParserFuncSignature ParseAbs
ParserFuncSignature ParseSin
ParserFuncSignature ParseReduceSumSquare
ParserFuncSignature ParseTanh
Definition ParseTanh.cxx:9
create variable transformations
Helper templated class for swapping bytes; specializations for N={2,4,8} are provided below.
Definition Byteswap.h:124
static void Copy(onnx::TensorProto *tensor, void *data)
static void Copy(onnx::TensorProto *tensor, void *data)
static void Copy(onnx::TensorProto *tensor, void *data)
static void Copy(onnx::TensorProto *tensor, void *data)
std::unordered_map< std::string, ParserFuncSignature > fOperatorsMap
TLine l
Definition textangle.C:4