Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RModel_GraphIndependent.cxx
Go to the documentation of this file.
1#include <limits>
2#include <algorithm>
3#include <cctype>
4
6
7namespace TMVA {
8namespace Experimental {
9namespace SOFIE {
10
12 edges_update_block = std::move(graph_input_struct.edges_update_block);
13 nodes_update_block = std::move(graph_input_struct.nodes_update_block);
14 globals_update_block = std::move(graph_input_struct.globals_update_block);
15
16 num_nodes = graph_input_struct.num_nodes;
17 num_edges = graph_input_struct.edges.size();
18 num_node_features = graph_input_struct.num_node_features;
19 num_edge_features = graph_input_struct.num_edge_features;
20 num_global_features = graph_input_struct.num_global_features;
21
23 fName = fFileName.substr(0, fFileName.rfind("."));
24
25 std::time_t ttime = std::time(0);
26 std::tm* gmt_time = std::gmtime(&ttime);
27 fParseTime = std::asctime(gmt_time);
28}
29
31 std::string hgname;
33
34 std::ofstream f;
35 f.open(fName+".dat");
36 f.close();
37
38
39 long next_pos = 0;
43
44 //Generating Infer function definition for Edge update function
46 size_t block_size = num_edges;
47 fGC += "\n\nnamespace Edge_Update{\nstruct Session {\n";
48 std::vector<std::vector<Dim>> update_Input = { { Dim{"num_edges",block_size}, Dim{num_edge_features}} };
49 edges_update_block->Initialize();
50 edges_update_block->AddInputTensors(update_Input);
51 fGC += edges_update_block->GenerateModel(fName);
52 next_pos = edges_update_block->GetFunctionBlock()->WriteInitializedTensorsToFile(fName + ".dat");
53 fGC += "};\n}\n";
54
55 // the number of output edges features can be smaller, so we need to correct here
56 // assume num_edge_features is not a parametric shape
57 auto edges_update_output_shape = edges_update_block->GetFunctionBlock()->GetDynamicTensorShape(edges_update_block->GetFunctionBlock()->GetOutputTensorNames()[0]);
60 }
61 }
62
64 fGC+="\n\nnamespace Node_Update{\nstruct Session {\n";
65 // Generating Infer function definition for Node Update function
66 // num_node_features is the output one
67 size_t block_size = num_nodes;
68 std::vector<std::vector<Dim>> update_Input = { {Dim{"num_nodes", block_size}, Dim{num_node_features}} };
69 nodes_update_block->Initialize();
70 nodes_update_block->AddInputTensors(update_Input);
71 fGC+=nodes_update_block->GenerateModel(fName,next_pos);
72 next_pos = nodes_update_block->GetFunctionBlock()->WriteInitializedTensorsToFile(fName+".dat");
73 fGC+="};\n}\n";
74
75 // we need to correct the output number of node features
76 auto nodes_update_output_shape = nodes_update_block->GetFunctionBlock()->GetDynamicTensorShape(nodes_update_block->GetFunctionBlock()->GetOutputTensorNames()[0]);
79 }
80 }
81
82 // Generating Infer function definition for Global Update function
84 fGC+="\n\nnamespace Global_Update{\nstruct Session {\n";
85 std::vector<std::vector<std::size_t>> update_Input = {{1, num_global_features}};
86 globals_update_block->Initialize();
87 globals_update_block->AddInputTensors(update_Input);
88 fGC+=globals_update_block->GenerateModel(fName,next_pos);
89 next_pos = globals_update_block->GetFunctionBlock()->WriteInitializedTensorsToFile(fName+".dat");
90 fGC+="};\n}\n";
91
92 // we need to correct the output number of global features
93 // global features are in shape[1]
94#if 0
95 auto globals_update_output_shape = globals_update_block->GetFunctionBlock()->GetDynamicTensorShape(globals_update_block->GetFunctionBlock()->GetOutputTensorNames()[0]);
98 }
99#endif
100 if(globals_update_block->GetFunctionBlock()->GetTensorShape(globals_update_block->GetFunctionBlock()->GetOutputTensorNames()[0])[1] != num_global_features) {
101 num_global_features = globals_update_block->GetFunctionBlock()->GetTensorShape(globals_update_block->GetFunctionBlock()->GetOutputTensorNames()[0])[1];
102 }
103 }
104
105
106 // computing inplace on input graph
107 fGC += "struct Session {\n";
108 fGC += "\n// Instantiating session objects for graph components\n";
109 // create session classes and corresponding temporary vectors
110 if (edges_update_block) {
111 fGC += "Edge_Update::Session edge_update;\n";
112 // this we can remove when we support full dynamic edges and nodes
113 fGC += "std::vector<float> fEdgeInputs = std::vector<float>(" + std::to_string(num_edges) + "*" + std::to_string(num_edge_features_input) + ");\n";
114 //fGC += "std::vector<float> fEdgeUpdates {" + std::to_string(num_edges) + "*" + std::to_string(num_edge_features) + "};";
115 }
116 if (nodes_update_block) {
117 fGC += "Node_Update::Session node_update;\n";
118 fGC += "std::vector<float> fNodeInputs = std::vector<float>(" + std::to_string(num_nodes) + "*" + std::to_string(num_node_features_input) + ");\n";
119 //fGC += "std::vector<float> fNodeUpdates {" + std::to_string(num_nodes) + "*" + std::to_string(num_node_features) + "};";
120 }
122 fGC += "Global_Update::Session global_update;\n\n";
123 //fGC += "std::vector<float> fGlobalUpdates {" + std::to_string(num_global_features) + "};";
124 }
125
126 fGC += "\nvoid infer(TMVA::Experimental::SOFIE::GNN_Data& input_graph){\n";
127
128 // computing updated edge attributes
129 // could use std::span
130 if (edges_update_block) {
131 fGC += "\n// --- Edge Update ---\n";
132
133 std::string e_size_input = std::to_string(num_edge_features_input);
134 fGC += "size_t n_edges = input_graph.edge_data.GetShape()[0];\n";
135 fGC += "for (size_t k = 0; k < n_edges; k++) { \n";
136 fGC += " std::copy(input_graph.edge_data.GetData() + k * " + e_size_input +
137 ", input_graph.edge_data.GetData() + (k + 1) * " + e_size_input + ", fEdgeInputs.begin() + k * " +
138 e_size_input + ");\n";
139 fGC += "}\n";
140
141 fGC += "auto edgeUpdates = " + edges_update_block->Generate({"n_edges","fEdgeInputs.data()"}) + "\n";
142
144 fGC += "\n// resize edge graph data since output feature size is not equal to input size\n";
145 fGC += "input_graph.edge_data = input_graph.edge_data.Resize({ n_edges, " +
146 std::to_string(num_edge_features) + "});\n";
147 }
148 // copy output
149 fGC += "\nfor (size_t k = 0; k < n_edges; k++) { \n";
150 fGC += " std::copy(edgeUpdates.begin()+ k * " + std::to_string(num_edge_features) +
151 ", edgeUpdates.begin()+ (k+1) * " + std::to_string(num_edge_features) +
152 ",input_graph.edge_data.GetData() + k * " + std::to_string(num_edge_features) + ");\n";
153 fGC += "}\n";
154 fGC += "\n";
155 }
156
157 // computing updated node attributes
158 if (nodes_update_block) {
159 std::string n_size_input = std::to_string(num_node_features_input);
160 fGC += "\n// --- Node Update ---\n";
161 fGC += "size_t n_nodes = input_graph.node_data.GetShape()[0];\n";
162 fGC += "for (size_t k = 0; k < n_nodes; k++) { \n";
163 fGC += " std::copy(input_graph.node_data.GetData() + k * " + n_size_input +
164 ", input_graph.node_data.GetData() + (k + 1) * " + n_size_input + ", fNodeInputs.begin() + k * " +
165 n_size_input + ");\n";
166 fGC += "}\n";
167
168 fGC += "auto nodeUpdates = ";
169 fGC += nodes_update_block->Generate({"n_nodes","fNodeInputs.data()"}); // computing updated node attributes
170 fGC += "\n";
171
173 fGC += "\n// resize node graph data since output feature size is not equal to input size\n";
174 fGC += "input_graph.node_data = input_graph.node_data.Resize({ n_nodes, " +
175 std::to_string(num_node_features) + "});\n";
176 }
177 // copy output
178 fGC += "\nfor (size_t k = 0; k < n_nodes; k++) { \n";
179 fGC += " std::copy(nodeUpdates.begin()+ k * " + std::to_string(num_node_features) +
180 ", nodeUpdates.begin() + (k+1) * " + std::to_string(num_node_features) +
181 ",input_graph.node_data.GetData() + k * " + std::to_string(num_node_features) + ");\n";
182 fGC += "}\n";
183 fGC += "\n";
184 }
185
186 // computing updated global attributes
188 fGC += "\n// --- Global Update ---\n";
189 fGC += "std::vector<float> Global_Data = ";
190 fGC += globals_update_block->Generate({"input_graph.global_data.GetData()"});
191 fGC += "\n";
192
194 fGC += "\n// resize global graph data since output feature size is not equal to input size\n";
195 fGC += "input_graph.global_data = input_graph.global_data.Resize({" + std::to_string(num_global_features) +
196 "});\n";
197 }
198
199 fGC += "\nstd::copy(Global_Data.begin(), Global_Data.end(), input_graph.global_data.GetData());";
200 fGC += "\n";
201 }
202
203 fGC += ("}\n};\n} //TMVA_SOFIE_" + fName + "\n");
204 fGC += "\n#endif // TMVA_SOFIE_" + hgname + "\n";
205
206}
207
208}//SOFIE
209}//Experimental
210}//TMVA
#define f(i)
Definition RSha256.hxx:104
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
void GenerateHeaderInfo(std::string &hgname)
RModel_GraphIndependent(GraphIndependent_Init &graph_input_struct)
create variable transformations