13namespace Experimental {
17 return static_cast<std::underlying_type_t<Options>
>(
opA) |
static_cast<std::underlying_type_t<Options>
>(
opB);
20 return opA |
static_cast<std::underlying_type_t<Options>
>(
opB);
59 return f->second.shape;
63 return f2->second.shape();
67 throw std::runtime_error(
"TMVA SOFIE tensor [" +
name +
"] is an input tensor with unspecified dimension parameter");
71 return f4->second.shape;
74 throw std::runtime_error(
"TMVA SOFIE tensor [" +
name +
"] is a dynamic tensor. Use GetDynamicTensorShape instead of GetTensorShape");
79 throw std::runtime_error(
"TMVA SOFIE tensor [" +
name +
"] for which the shape is requested is not found");
84 return f->second.shape;
87 return f->second.shape;
97 return f->second.type;
101 return f2->second.type();
105 return f3->second.type;
109 return f4->second.type;
113 return f5->second.type;
119 throw std::runtime_error(
"TMVA SOFIE tensor [" +
name +
"] for which the type is requested is not found");
135 throw std::runtime_error(
"TMVA-SOFIE: input tensor with name " +
input_name +
" already exists \n");
145 throw std::runtime_error(
"TMVA-SOFIE: input tensor with name " +
input_name +
" already exists \n");
157 auto libs =
op->GetStdLibs();
172 throw std::runtime_error(
"TMVA-SOFIE: initialized tensor with name " +
tensor_name +
" already exists \n");
182 throw std::runtime_error(
"TMVA-SOFIE: initialized tensor with name " +
tensor_name +
" already exists \n");
214 throw std::runtime_error(
"TMVA-SOFIE: intermediate tensor with name " +
tensor_name +
" already exists \n");
223 throw std::runtime_error(
"TMVA-SOFIE: intermediate tensor with name " +
tensor_name +
" already exists \n");
228 for (
auto &
d : shape) {
233 if (
d.dim !=
size_t(-1)) {
258 throw std::runtime_error(
"TMVA-SOFIE: tensor " +
tensor_name +
" not found when trying to update it");
267 throw std::runtime_error(
"TMVA-SOFIE: tensor " +
tensor_name +
" not found when trying to get its data");
269 return f->second.sharedptr();
276 throw std::runtime_error(
"TMVA-SOFIE: initialized tensor " +
tensor_name +
" not found when trying to get its info");
278 t->second.SetNotWritable();
296 std::cout <<
"Model is already initialized - skip initialization " << std::endl;
307 if (verbose) std::cout <<
"looking at the tensor " <<
input.first << std::endl;
310 for (
auto &
d :
input.second.shape) {
312 std::string
pname =
d.param;
318 std::cout <<
"Tensor: " <<
input.first <<
" - fix parametric shape " <<
itr->first <<
" to " <<
itr->second << std::endl;
328 if (!shape.empty()) {
340 for (
auto &
d :
input.second.shape) {
370 std::cout <<
"Initializing operator " << i <<
" " <<
typeid(
r).
name() << std::endl;
372 op->Initialize(*
this);
382 graph->fParentGraph =
this;
383 graph->fIsSubGraph =
true;
392 for (
auto &
e : graph->fNeededBlasRoutines)
395 for (
auto e : graph->fNeededStdLib)
400 graph->fInputTensorNames.push_back(
name);
409 fGC +=
"// initialized tensors\n";
416 std::stringstream
strs;
418 strs <<
"float tensor_" << i.first <<
"[" <<
length <<
"] = {";
419 float const *
data = i.second.data<
float>();
420 for (
size_t idx = 0; idx <
length; idx++) {
421 strs << std::setprecision(std::numeric_limits<float>::max_digits10) <<
data[idx];
427 strs <<
"int64_t tensor_" << i.first <<
"[" <<
length <<
"] = {";
428 int64_t
const *
data = i.second.data<int64_t>();
429 for (
size_t idx = 0; idx <
length; idx++) {
440 fGC +=
"std::vector<float> fTensor_" + i.first +
" = std::vector<float>(" + std::to_string(
length) +
");\n";
441 fGC +=
"float * tensor_" + i.first +
" = fTensor_" + i.first +
".data();\n";
449 fGC +=
"\n//--- declare and allocate the intermediate tensors\n";
453 fGC +=
"std::vector<float> fTensor_" + i.first +
" = std::vector<float>(" + std::to_string(
length) +
");\n";
454 fGC +=
"float * tensor_" + i.first +
" = fTensor_" + i.first +
".data();\n";
457 fGC +=
"std::vector<double> fTensor_" + i.first +
" = std::vector<double>(" + std::to_string(
length) +
");\n";
458 fGC +=
"double * tensor_" + i.first +
" = fTensor_" + i.first +
".data();\n";
461 fGC +=
"std::vector<int64_t> fTensor_" + i.first +
" = std::vector<int64_t>(" + std::to_string(
length) +
");\n";
462 fGC +=
"int64_t * tensor_" + i.first +
" = fTensor_" + i.first +
".data();\n";
465 fGC +=
"std::vector<bool> fTensor_" + i.first +
" = std::vector<bool>(" + std::to_string(
length) +
");\n";
472 fGC +=
"//--- declare the dynamic tensors\n";
475 fGC +=
"std::vector<float> fTensor_" + i.first +
";\n";
476 fGC +=
"float * tensor_" + i.first +
" = nullptr;\n";
478 fGC +=
"std::vector<double> fTensor_" + i.first +
";\n";
479 fGC +=
"double * tensor_" + i.first +
" = nullptr;\n";
481 fGC +=
"std::vector<int64_t> fTensor_" + i.first +
";\n";
482 fGC +=
"int64_t * tensor_" + i.first +
" = nullptr;\n";
489 fGC +=
"//---- allocate the intermediate dynamic tensors\n";
490 std::stringstream out;
493 out <<
SP <<
"if (" <<
length <<
" > 0) {\n";
494 out <<
SP <<
SP <<
"fTensor_" << i.first <<
".resize(" <<
length <<
");\n";
495 out <<
SP <<
SP <<
"tensor_" << i.first <<
" = fTensor_" << i.first <<
".data();\n";
511 for (
auto &
d : shape) {
512 std::string
pName =
d.param;
516 rGC +=
d.param +
",";
524 throw std::runtime_error(
"TMVA-SOFIE: input tensor " +
name +
525 " is of a data type which is not yet supported.");
539 std::cout <<
"Generating main inference code for " <<
fName << std::endl;
544 throw std::runtime_error(
"TMVA-SOFIE: output size=0 are not supported");
550 if (outputSize == 1) {
554 for (
size_t i = 1; i < outputSize; i++) {
556 throw std::runtime_error(
"TMVA-SOFIE: different output tensor types are not supported");
567 for (
size_t id = 0;
id <
fOperators.size();
id++) {
568 if (
fVerbose) std::cout <<
"Generating code for operator .... " <<
id << std::endl;
572 if (outputSize == 1) {
589 fGC +=
SP +
"return ret;\n";
594 for (
size_t i = 0; i < outputSize; i++) {
609 if (i < outputSize - 1)
616 fGC +=
SP +
"return ret;\n";
627 fGC +=
"struct Session {\n";
629 fGC +=
"struct Session_" +
fName +
" {\n";
638 fGC +=
"Session_" + graph->fName +
" fSession_" + graph->fName +
";\n";
647 for (
size_t id = 0;
id <
fOperators.size();
id++) {
648 std::string
opName = std::to_string(
id);
654 std::string fileName =
fName;
661 fGC +=
sessionName +
"(std::string filename =\"" + fileName +
"\"";
672 fGC +=
" size_t " +
p.first +
" = " +
p.second;
678 fGC +=
"\n//--- reading weights from file\n";
688 for (
size_t id = 0;
id <
fOperators.size();
id++) {
699 fGC +=
"}; // end of Session\n";
703void RModel::Generate(std::underlying_type_t<Options> options,
int batchSize,
long pos,
bool verbose)
723 throw std::runtime_error(
724 "TMVA-SOFIE: RModel::Generate: cannot use a separate weight file without generating a Session class");
727 if (
static_cast<std::underlying_type_t<Options>
>(
Options::kGNN) & options)
744 std::cout <<
"generate session code for subgraph " << graph->fName << std::endl;
745 graph->GenerateSessionCode();
750 std::cout <<
"generate Main session code - model " <<
fName << std::endl;
756 fGC += (
"} //TMVA_SOFIE_" +
fName +
"\n");
766 fGC +=
" std::ifstream f;\n";
767 fGC +=
" f.open(filename);\n";
768 fGC +=
" if (!f.is_open()) {\n";
769 fGC +=
" throw std::runtime_error(\"tmva-sofie failed to open file \" + filename + \" for input weights\");\n";
773 fGC +=
" f.seekg(" + std::to_string(pos) +
");\n";
776 fGC +=
" std::string tensor_name;\n";
777 fGC +=
" size_t length;\n";
782 if (!i.second.IsWeightTensor())
continue;
788 fGC +=
" f >> tensor_name >> length;\n";
790 fGC +=
" std::string err_msg = \"TMVA-SOFIE failed to read the correct tensor name; expected name is " +
792 fGC +=
" throw std::runtime_error(err_msg);\n";
795 fGC +=
" std::string err_msg = \"TMVA-SOFIE failed to read the correct tensor size; expected size is " +
796 slength +
" , read \" + std::to_string(length) ;\n";
797 fGC +=
" throw std::runtime_error(err_msg);\n";
799 fGC +=
" for (size_t i = 0; i < length; ++i)\n";
801 fGC +=
" if (f.fail()) {\n";
802 fGC +=
" throw std::runtime_error(\"TMVA-SOFIE failed to read the values for tensor " +
tensor_name +
"\");\n";
808 fGC +=
" f.close();\n";
814 fGC +=
" std::unique_ptr<TFile> rootFile(TFile::Open(filename.c_str(), \"READ\"));\n";
815 fGC +=
" if (!rootFile->IsOpen()) {\n";
816 fGC +=
" throw std::runtime_error(\"tmva-sofie failed to open ROOT file for input weights\");\n";
820 fGC +=
" if (!rootFile->GetKey(\"" +
dirName +
"\")) {\n";
821 fGC +=
" throw std::runtime_error(\"tmva-sofie failed to open ROOT directory for input weights\");\n";
826 if (!i.second.IsWeightTensor())
continue;
830 fGC +=
" fTensor_" + i.first +
" = *reinterpret_cast<std::vector<float>*>(rootFile->Get(\"";
833 fGC +=
" fTensor_" + i.first +
" = *reinterpret_cast<std::vector<double>*>(rootFile->Get(\"";
836 fGC +=
" fTensor_" + i.first +
" = *reinterpret_cast<std::vector<int64_t>*>(rootFile->Get(\"";
839 std::runtime_error(
"tmva-sofie tensor " +
tensor_name +
" with type " +
ConvertTypeToString(i.second.type()) +
" cannot be read from a ROOT file");
870 throw std::runtime_error(
"SOFIE-GNN yet not supports writing to a ROOT file.");
883 if (!item.second.IsWeightTensor())
continue;
884 std::string
tensorName =
"tensor_" + item.first;
888 const float*
data = item.second.data<
float>();
893 const double*
data = item.second.data<
double>();
898 const int64_t*
data = item.second.data<int64_t>();
904 " cannot be written to a ROOT file");
922 std::runtime_error(
"tmva-sofie failed to open file " +
filename +
" for tensor weight data");
925 if (!i.second.IsWeightTensor()) {
932 const float *
data = i.second.data<
float>();
933 for (
size_t idx = 0; idx <
length; idx++) {
936 if (
value != 0. && std::abs(
value) < std::numeric_limits<float>::min() )
value = 0;
937 f << std::setprecision(std::numeric_limits<float>::max_digits10) <<
value;
938 f << ( (idx <
length-1) ?
" " :
"\n" );
945 std::runtime_error(
"tmva-sofie failed to write tensor data to file for " +
tensor_name);
956 std::cout <<
"Model requires following inputs:\n";
958 std::cout <<
"Parametrised Tensor name: " <<
inputInfo.first <<
"\t";
960 std::cout <<
"shape: [";
961 for (
size_t i = 0; i <
inputInfo.second.shape.size(); i++) {
963 std::cout <<
inputInfo.second.shape[i].param;
965 std::cout <<
inputInfo.second.shape[i].dim ;
967 if (i <
inputInfo.second.shape.size() - 1) std::cout <<
",";
969 std::cout <<
"]" << std::endl;
973 std::cout <<
"Fully Specified Tensor name: " <<
inputInfo.first <<
"\t";
975 std::cout <<
"shape: [";
976 for (
size_t i = 0; i <
inputInfo.second.shape.size(); i++) {
978 if (i <
inputInfo.second.shape.size() - 1) std::cout <<
",";
980 std::cout <<
"]" << std::endl;
986 std::cout <<
"Model initialized the following tensors:\n";
988 std::cout <<
"Tensor name: \"" << it.first <<
"\"\t";
990 std::cout <<
"shape: [";
991 for (
size_t i = 0; i < it.second.shape().
size(); i++) {
992 std::cout << it.second.shape()[i];
993 if (i < it.second.shape().size() - 1) std::cout <<
",";
996 if (it.second.IsConstantTensor()) std::cout <<
" (Constant)";
997 else if (!it.second.IsWeightTensor()) std::cout <<
" (Not Writable)";
998 std::cout << std::endl;
1004 std::cout <<
"Model specify the following intermediate tensors:\n";
1006 std::cout <<
"Tensor name: \"" << it.first <<
"\"\t";
1008 std::cout <<
"shape: [";
1009 for (
size_t i = 0; i < it.second.shape.size(); i++) {
1010 std::cout << it.second.shape[i];
1011 if (i < it.second.shape.size() - 1) std::cout <<
",";
1013 std::cout <<
"]" << std::endl;
1019 std::cout <<
"Model specify the following dynamic tensors:\n";
1021 std::cout <<
"Tensor name: \"" << it.first <<
"\"\t";
1023 std::cout <<
"shape: [";
1024 for (
size_t i = 0; i < it.second.shape.size(); i++) {
1025 std::cout << it.second.shape[i].GetVal();
1026 if (i < it.second.shape.size() - 1) std::cout <<
",";
1028 std::cout <<
"]" << std::endl;
1034 std::cout <<
"Model specify the following output tensors:\n";
1036 std::cout <<
"Tensor name: \"" << it <<
"\"\t";
1048 std::cout <<
"Tensor " <<
name <<
" not found in model's initialized tensor list" << std::endl;
1052 std::cout <<
"Tensor name: " << it->first <<
"\t";
1055 std::cout <<
"shape: [";
1056 for (
size_t i = 0; i < it->second.shape().
size(); i++) {
1057 std::cout << it->second.shape()[i];
1058 length *= it->second.shape()[i];
1059 if (
i < it->second.shape().size() - 1) std::cout <<
",";
1061 std::cout <<
"]" << std::endl;
1068 std::cout <<
"data: [" << std::endl;
1071 for (
int i =0; i <
n_print; i++) {
1073 if (i <
n_print - 1) std::cout <<
" ,";
1076 if (
ellipsis) std::cout <<
", ...";
1077 std::cout <<
"]" << std::endl;
1088 size_t pos =
filename.find(
".hxx");
1104 if (
R__b.IsReading()) {
1105 RModel::Class()->ReadBuffer(
R__b,
this);
1107 i->second.CastPersistentToShared();
1112 i->second.CastSharedToPersistent();
1114 RModel::Class()->WriteBuffer(
R__b,
this);
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char filename
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t r
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h length
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize id
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void value
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
const_iterator begin() const
const_iterator end() const
Buffer base class used for serializing objects.
static TFile * Open(const char *name, Option_t *option="", const char *ftitle="", Int_t compress=ROOT::RCompressionSetting::EDefaults::kUseCompiledDefault, Int_t netopt=0)
Create / open a file.
void GenerateHeaderInfo(std::string &hgname)
std::unordered_set< std::string > fNeededBlasRoutines
void OutputGenerated(std::string filename="", bool append=false)
std::unordered_set< std::string > fNeededStdLib
WeightFileType fWeightFile
void AddBlasRoutines(std::vector< std::string > routines)
void AddNeededStdLib(std::string libname)
const ETensorType & GetTensorType(std::string name)
std::unordered_map< std::string, DynamicTensorInfo > fDynamicTensorInfos
bool IsDynamicTensor(const std::string &name) const
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< Dim > dim_shape)
void GenerateIntermediateTensorInfo()
std::vector< Dim > GetDynamicTensorShape(std::string name)
void PrintIntermediateTensors()
std::string GenerateInferSignature(bool isdecl=true)
void PrintOutputTensors()
bool CheckIfTensorAlreadyExist(std::string tensor_name)
std::vector< std::unique_ptr< ROperator > > fOperators
void OutputGenerated(std::string filename="", bool append=false)
void AddInputTensorInfo(std::string input_name, ETensorType type, std::vector< Dim > shape)
std::unordered_map< std::string, TensorInfo > fIntermediateTensorInfos
void AddOutputTensorNameList(std::vector< std::string > output_tensor_names)
std::unordered_map< std::string, TensorInfo > fReadyInputTensorInfos
void AddConstantTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
void AddDynamicTensor(std::string tensor_name, ETensorType type, std::vector< Dim > shape)
void AddInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
RModel & operator=(RModel &&other)
void AddInputTensorName(std::string name)
void PrintDynamicTensors()
std::vector< std::string > fOutputTensorNames
void GenerateSessionCode()
void GenerateDynamicTensorInfo()
bool IsInitializedTensor(const std::string &name) const
void PrintInitializedTensors()
void AddOperator(std::unique_ptr< ROperator > op, int order_execution=-1)
RModel()=default
Default constructor.
void HeadInitializedTensors(std::string name, int n_print=50)
void Initialize(int batchSize=-1, bool verbose=false)
const std::vector< size_t > & GetTensorShape(std::string name)
bool IsInputTensor(const std::string &name) const
long WriteInitializedTensorsToFile(std::string filename="")
void Generate(std::underlying_type_t< Options > options, int batchSize=-1, long pos=0, bool verbose=false)
std::unordered_map< std::string, InputTensorInfo > fInputTensorInfos
std::shared_ptr< void > GetInitializedTensorData(std::string tensor_name)
void ReadInitializedTensorsFromFile(long)
void InitializeSubGraph(std::shared_ptr< RModel > graph)
std::unordered_map< std::string, std::string > fShapeParams
void SetNotWritableInitializedTensor(const std::string &tensor_name)
void GenerateInitializedTensorInfo()
std::vector< std::string > fInputTensorNames
std::unordered_map< std::string, InitializedTensor > fInitializedTensors
void UpdateInitializedTensor(std::string tensor_name, ETensorType type, std::vector< std::size_t > shape, std::shared_ptr< void > data)
std::vector< std::shared_ptr< RModel > > fSubGraphs
! sub-graph models (transient)
void UpdateOutputTensorList(std::vector< std::string > curr_output_tensor, std::vector< std::string > modify_output_tensor)
void PrintRequiredInputTensors()
std::string Clean_name(std::string input_tensor_name)
std::vector< Dim > ConvertShapeToDim(std::vector< size_t > shape)
Convert shape from integer format to dynamic one (based on Dim)
std::string ConvertDynamicShapeToLength(std::vector< Dim > shape)
std::string ConvertShapeToString(std::vector< size_t > shape)
std::string ConvertTypeToString(ETensorType type)
std::string ConvertDynamicShapeToString(std::vector< Dim > shape)
std::underlying_type_t< Options > operator|(Options opA, Options opB)
std::vector< size_t > ConvertShapeToInt(std::vector< Dim > shape)
Convert shape based on Dim to integer format.
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
create variable transformations