3#include "onnx_proto3.pb.h"
10#include <unordered_map>
15namespace Experimental {
171 std::vector<std::string> ops;
174 ops.emplace_back(it.first);
195std::unique_ptr<ROperator>
198 if (i >= nodes.size())
199 throw std::runtime_error(
"TMVA::SOFIE - Error in parsing ordered operators " + std::to_string(i) +
" is >= " + std::to_string(nodes.size()));
201 const auto &nodeproto = graphproto.node(idx);
202 const std::string op_type = nodeproto.op_type();
204 std::cout <<
"Parsing an operator " << op_type << std::endl;
207 if (i < nodes.size() - 1) {
208 int idx2 = nodes[i+1];
209 if (op_type ==
"MatMul") {
211 if (idx2 < graphproto.node_size() && graphproto.node(idx2).op_type() ==
"Add") {
217 }
else if (nodeproto.op_type() ==
"Conv" || nodeproto.op_type() ==
"ConvTranspose") {
219 if (idx2 < graphproto.node_size() && graphproto.node(idx2).op_type() ==
"Add") {
220 if (nodeproto.op_type() ==
"Conv") {
221 return ParseFuseConvAdd(*
this, graphproto.node(idx), graphproto.node(idx2));
230 if (idx > 0 && op_type ==
"Add") {
231 int idx0 = nodes[i - 1];
232 if (graphproto.node(idx0).op_type() ==
"MatMul")
234 else if (graphproto.node(idx0).op_type() ==
"ConvTranspose")
240 throw std::runtime_error(
"TMVA::SOFIE Operator type " + op_type +
" is not yet supported");
243 std::cout <<
"\tCreating operator " << op_type << std::endl;
245 return it->second(*
this, nodeproto);
257 std::string filename_nodir =
filename;
258 if (isep != std::string::npos) {
262 std::time_t ttime = std::time(0);
263 std::tm *gmt_time = std::gmtime(&ttime);
264 std::string parsetime(std::asctime(gmt_time));
266 GOOGLE_PROTOBUF_VERIFY_VERSION;
268 onnx::ModelProto model;
269 RModel rmodel(filename_nodir, parsetime);
273 std::fstream
input(
filename, std::ios::in | std::ios::binary);
274 if (!model.ParseFromIstream(&
input)) {
275 throw std::runtime_error(
"TMVA::SOFIE - Failed to parse onnx file " +
filename);
278 const onnx::GraphProto &
graph = model.graph();
279 google::protobuf::ShutdownProtobufLibrary();
283 std::cout <<
"ONNX Version " << model.ir_version() << std::endl;
286 std::unordered_set<std::string> initializer_names;
287 for (
int i = 0; i <
graph.initializer_size(); i++) {
288 initializer_names.insert(
graph.initializer(i).name());
292 std::cout <<
"Parsing model inputs...." << std::endl;
294 for (
int i = 0; i <
graph.input_size(); i++) {
299 std::cout <<
"\tgraph input " << i <<
" name " <<
graph.input(i).name() <<
" type "
300 <<
graph.input(i).type().tensor_type().elem_type() << std::endl;
302 if (initializer_names.find(
graph.input(i).name()) != initializer_names.end())
306 const onnx::ValueInfoProto &valueinfoproto =
graph.input(i);
307 std::string input_name = valueinfoproto.name();
311 throw std::runtime_error(
"TMVA::SOFIE Data type in input tensor " + input_name +
" not supported!\n");
315 bool existParam =
false;
316 if (!valueinfoproto.type().tensor_type().has_shape())
317 throw std::runtime_error(
"TMVA::SOFIE datanode with no shape restrictions is not supported yet");
318 for (
int j = 0; j < valueinfoproto.type().tensor_type().shape().dim_size(); j++) {
320 if (valueinfoproto.type().tensor_type().shape().dim(j).value_case() ==
321 onnx::TensorShapeProto_Dimension::ValueCase::kDimValue) {
322 dim.
dim = valueinfoproto.type().tensor_type().shape().dim(j).dim_value();
323 }
else if (valueinfoproto.type().tensor_type().shape().dim(j).value_case() ==
324 onnx::TensorShapeProto_Dimension::ValueCase::kDimParam) {
327 dim.
param = valueinfoproto.type().tensor_type().shape().dim(j).dim_param();
329 throw std::runtime_error(
"TMVA::SOFIE ONNX file error: Valueinfoproto " + input_name +
330 " has neither dim_value nor dim_param! \n");
334 if (valueinfoproto.type().tensor_type().shape().dim_size() == 0) {
341 std::vector<size_t> fShape_sizet;
343 fShape_sizet.push_back(j.dim);
353 std::map<std::string, int> allInitializedTensors;
356 std::cout <<
"\nParsing graph initializer list and fill model initialized tensors" << std::endl;
358 for (
int i = 0; i <
graph.initializer_size(); i++) {
359 onnx::TensorProto *tensorproto =
const_cast<onnx::TensorProto *
>(&
graph.initializer(i));
360 std::vector<std::size_t> shape;
361 std::size_t fLength = 1;
362 for (
int j = 0; j < tensorproto->dims_size(); j++) {
363 shape.push_back(tensorproto->dims(j));
364 fLength *= tensorproto->dims(j);
368 std::string input_name =
graph.initializer(i).name();
371 std::cout <<
"\t initializer " << i <<
" name " << input_name <<
" type " <<
graph.initializer(i).data_type()
376 std::shared_ptr<void>
data(
malloc(fLength *
sizeof(
float)),
free);
378 if (!tensorproto->raw_data().empty()) {
380 std::memcpy(
data.get(), tensorproto->raw_data().c_str(), fLength *
sizeof(
float));
382 for (std::size_t k = 0; k < fLength; ++k)
383 (
reinterpret_cast<uint32_t *
>(
data.get()))[k] =
384 Rbswap_32((
reinterpret_cast<const uint32_t *
>(tensorproto->raw_data().c_str()))[k]);
387 tensorproto->mutable_float_data()->ExtractSubrange(0, tensorproto->float_data_size(),
388 static_cast<float *
>(
data.get()));
391 if (verbose) std::cout <<
"add FLOAT initialized tensor " << input_name <<
" shape " <<
ConvertShapeToString(shape) << std::endl;
393 allInitializedTensors[input_name] = i;
397 std::shared_ptr<void>
data(
malloc(fLength *
sizeof(int64_t)),
free);
399 if (!tensorproto->raw_data().empty()) {
401 std::memcpy(
data.get(), tensorproto->raw_data().c_str(), fLength *
sizeof(int64_t));
403 for (std::size_t k = 0; k < fLength; ++k)
404 (
reinterpret_cast<uint64_t *
>(
data.get()))[k] =
405 Rbswap_64((
reinterpret_cast<const uint64_t *
>(tensorproto->raw_data().c_str()))[k]);
408 tensorproto->mutable_int64_data()->ExtractSubrange(0, tensorproto->int64_data_size(),
409 static_cast<int64_t *
>(
data.get()));
412 if (verbose) std::cout <<
"add INT64 initialized tensor " << input_name <<
" shape " <<
ConvertShapeToString(shape) << std::endl;
414 allInitializedTensors[input_name] = i;
418 throw std::runtime_error(
"Data type in weight tensor " +
graph.initializer(i).name() +
" not supported!\n");
424 std::cout <<
"\nGraph operator list (ONNX order)\n";
425 for (
int i = 0; i <
graph.node_size(); i++) {
426 std::cout <<
"\tOperator " << i <<
" : " <<
graph.node(i).op_type() <<
" , " <<
graph.node(i).input_size()
428 for (
int j = 0; j <
graph.node(i).input_size(); j++) {
429 std::cout <<
graph.node(i).input(j);
430 if (j <
graph.node(i).input_size() - 1)
433 std::cout <<
" }" << std::endl;
439 std::cout <<
"\nRe-Order graph operator list\n";
440 std::vector<size_t> nodesOrder;
441 nodesOrder.reserve(
graph.node_size());
442 std::vector<bool> foundNodes(
graph.node_size());
444 std::map<std::string, int> allInputs;
445 for (
int i = 0; i <
graph.input_size(); i++) {
446 allInputs[
graph.input(i).name()] = -1;
449 auto psize = nodesOrder.size();
450 for (
int i = 0; i <
graph.node_size(); i++) {
454 bool existInputs =
true;
455 int input_size =
graph.node(i).input_size();
457 for (
int j = 0; j < input_size; j++) {
458 std::string
name =
graph.node(i).input(j);
461 existInputs &= (allInputs.find(
name) != allInputs.end() ||
462 allInitializedTensors.find(
name) != allInitializedTensors.end());
464 std::cout <<
graph.node(i).op_type() <<
" input " <<
name <<
" "
465 <<
bool(allInputs.find(
name) != allInputs.end()) <<
" " <<
466 bool(allInitializedTensors.find(
name) != allInitializedTensors.end()) <<
467 existInputs << std::endl;
473 std::cout <<
"skip op " <<
graph.node(i).op_type() <<
" inputs are ";
474 for (
int j = 0; j < input_size; j++) {
475 std::cout <<
graph.node(i).input(j) <<
" ";
477 std::cout << std::endl;
482 std::cout <<
"\tadd node " <<
graph.node(i).op_type() <<
" order " << i << std::endl;
484 nodesOrder.push_back(i);
485 foundNodes[i] =
true;
487 for (
int j = 0; j <
graph.node(i).output_size(); j++) {
488 allInputs[
graph.node(i).output(j)] = i;
492 if (nodesOrder.size() == psize) {
493 throw std::runtime_error(
"TMVA::SOFIE - cannot find a new node ");
495 }
while ((
int)nodesOrder.size() <
graph.node_size());
499 std::cout <<
"\nGraph operator list (re-ordered)\n";
500 for (
int k = 0; k <
graph.node_size(); k++) {
501 int i = nodesOrder[k];
502 std::cout <<
"\tOperator " << i <<
" : " <<
graph.node(i).op_type() <<
" , " <<
graph.node(i).input_size()
504 for (
int j = 0; j <
graph.node(i).input_size(); j++) {
505 std::cout <<
graph.node(i).input(j);
506 if (j <
graph.node(i).input_size() - 1)
509 std::cout <<
" }" << std::endl;
515 std::cout <<
"Fill RModel with operators...\n";
517 for (
int i = 0; i <
graph.node_size(); i++) {
518 std::string op_type =
graph.node(nodesOrder[i]).op_type();
521 std::cout <<
"\t" << i <<
" " << nodesOrder[i] <<
" parsing operator " << op_type << std::endl;
527 std::cout <<
"\t\tskipping operator since it is fused with previous one" << std::endl;
535 std::vector<std::string> outputnames;
537 std::cout <<
"\nParsing Graph output list\n";
538 for (
int i = 0; i <
graph.output_size(); i++) {
540 std::cout <<
"\toutput " << i <<
" name " <<
graph.output(i).name() << std::endl;
541 outputnames.push_back(
graph.output(i).name());
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char filename
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
void RegisterOperator(const std::string &name, ParserFuncSignature func)
bool IsRegisteredOperator(const std::string &name)
RModelParser_ONNX() noexcept
std::unordered_map< std::string, ETensorType > fTensorTypeMap
RModel Parse(std::string filename, bool verbose=false)
bool IsRegisteredTensorType(const std::string &)
std::unique_ptr< ROperator > ParseOperator(const size_t, const onnx::GraphProto &, const std::vector< size_t > &)
void RegisterTensorType(const std::string &, ETensorType)
ETensorType GetTensorType(const std::string &name)
std::vector< std::string > GetRegisteredOperators()
std::unique_ptr< OperatorsMapImpl > fOperatorsMapImpl
void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector< Dim > shape)
void AddOutputTensorNameList(std::vector< std::string > output_tensor_names)
void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
void AddInputTensorName(std::string name)
void AddOperator(std::unique_ptr< ROperator > op, int order_execution=-1)
std::string Clean_name(std::string input_tensor_name)
std::function< std::unique_ptr< ROperator >(RModelParser_ONNX &, const onnx::NodeProto &, const onnx::NodeProto &)> ParserFuseFuncSignature
ParserFuncSignature ParseSqrt
ParserFuncSignature ParseBatchNormalization
ParserFuncSignature ParseGreater
ParserFuncSignature ParseReshape
ParserFuseFuncSignature ParseFuseConvTransposeAdd
ParserFuncSignature ParseReduceMean
ParserFuseFuncSignature ParseFuseMatMulAdd
ParserFuncSignature ParseGather
ParserFuncSignature ParseNeg
ParserFuncSignature ParseLog
ParserFuncSignature ParseLeakyRelu
ParserFuncSignature ParseExp
ParserFuncSignature ParsePool
ParserFuncSignature ParseDiv
ParserFuncSignature ParseLayerNormalization
ParserFuncSignature ParseConcat
ParserFuncSignature ParseMax
ParserFuncSignature ParseEq
ParserFuncSignature ParseIdentity
ParserFuncSignature ParseConvTranspose
ParserFuncSignature ParseReduceProd
ParserFuncSignature ParseSlice
ParserFuncSignature ParseTranspose
ParserFuncSignature ParseLess
ParserFuncSignature ParseShape
ParserFuncSignature ParseGRU
ParserFuncSignature ParseMatMul
ParserFuncSignature ParseErf
ParserFuncSignature ParseSub
ParserFuncSignature ParseReduceSumsquare
ParserFuncSignature ParseAdd
ParserFuncSignature ParseRange
ParserFuncSignature ParseExpand
ParserFuncSignature ParseRNN
std::function< std::unique_ptr< ROperator >(RModelParser_ONNX &, const onnx::NodeProto &)> ParserFuncSignature
ParserFuncSignature ParseLSTM
ParserFuncSignature ParseCast
ParserFuncSignature ParseReciprocal
std::string ConvertShapeToString(std::vector< size_t > shape)
ParserFuncSignature ParseSigmoid
ParserFuseFuncSignature ParseFuseConvAdd
ParserFuncSignature ParseSoftmax
ParserFuncSignature ParseGreaterEq
ParserFuncSignature ParseMean
ParserFuncSignature ParseSelu
ParserFuncSignature ParseLessEq
ParserFuncSignature ParseSum
ParserFuncSignature ParseEyeLike
ParserFuncSignature ParseElu
ParserFuncSignature ParseMin
ParserFuncSignature ParseRelu
ParserFuncSignature ParseConv
ParserFuncSignature ParseGemm
ParserFuncSignature ParseMul
ParserFuncSignature ParsePow
ParserFuncSignature ParseTanh
create variable transformations
std::unordered_map< std::string, ParserFuncSignature > fOperatorsMap