Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
ROperator_Reduce.hxx
Go to the documentation of this file.
1#ifndef TMVA_SOFIE_ROPERATOR_Reduce
2#define TMVA_SOFIE_ROPERATOR_Reduce
3
5#include "TMVA/ROperator.hxx"
6#include "TMVA/RModel.hxx"
7
8#include <memory>
9#include <sstream>
10#include <algorithm>
11#include <stdexcept>
12#include <vector>
13#include <cassert>
14
15namespace TMVA{
16namespace Experimental{
17namespace SOFIE{
18
20
21template <typename T, EReduceOpMode Op>
23{
24private:
25 /* Attributes*/
26 int fkeepdims = 1; //default value
27 std::vector<int64_t> fAttrAxes;
29 std::string fNX;
30 std::string fNAxes;
31 std::string fNY;
32 std::vector<size_t> fShapeX;
33 std::vector<size_t> fShapeY;
34 std::vector<size_t> fShapeYNotPruned; // needed for fKeepdims=0
35
36
37public:
38
39 std::string Name() {
40 if (fReduceOpMode == ReduceMean) return "ReduceMean";
41 else if (fReduceOpMode == ReduceSumSquare ) return "ReduceSumSquare";
42 else if (fReduceOpMode == ReduceProd ) return "ReduceProd";
43 else if (fReduceOpMode == ReduceSum) return "ReduceSum";
44 return "Invalid";
45 }
46
48 ROperator_Reduce(int keepdims, std::vector<int64_t> attrAxes, std::string nameX, std::string nameAxes, std::string nameY):
49 fkeepdims(keepdims), fAttrAxes(attrAxes), fNX(UTILITY::Clean_name(nameX)), fNAxes(UTILITY::Clean_name(nameAxes)), fNY(UTILITY::Clean_name(nameY)) {
50 fReduceOpMode = Op;
51
53 if(!fNAxes.empty()){
54 fInputTensorNames.emplace_back(fNAxes);
55 }
56
58 }
59
60 // type of output given input
61 std::vector<ETensorType> TypeInference(std::vector<ETensorType> input){
62 return input;
63 }
64
65 // shape of output tensors given input tensors
66 std::vector<std::vector<size_t>> ShapeInference(std::vector<std::vector<size_t>> input){
67 auto ret = input; //suggest copy to compiler
68 auto & outputShape = ret[0];
69 for (size_t j = 0; j < fAttrAxes.size(); j++) {
70 if (fAttrAxes[j] < 0) fAttrAxes[j] += outputShape.size();
71 if (fAttrAxes[j] < 0 || (size_t) fAttrAxes[j] >= outputShape.size() )
72 throw std::runtime_error("TMVA SOFIE Reduce Op - invalid axes values " + std::to_string(fAttrAxes[j]));
73 // set to 1 the reduced dims
75 }
77 // in case of pruning dimension we need to sort axes attributes
78 if (fkeepdims == 0) {
79 auto ax = fAttrAxes;
80 std::sort(ax.begin(), ax.end());
81 for (size_t j = 0; j < ax.size(); j++) {
82 // erase reduced dimensions, but keep last one
83 if (outputShape.size() > 1) {
84 outputShape.erase(outputShape.begin() + ax[j]);
85 for (size_t k = j+1; k < ax.size(); k++)
86 ax[k] -= 1; // decrease by one since we have removed a value
87 }
88 }
89 }
90 return ret;
91 }
92 void Initialize(RModel& model) override {
93
94 fUseSession = model.UseSession();
95
96 if (!model.CheckIfTensorAlreadyExist(fNX)) {
97 // input must be a graph input, or already initialized intermediate tensor
98 throw std::runtime_error("TMVA SOFIE Reduce Op Input Tensor " + fNX + " is not found in model");
99 }
100 fShapeX = model.GetTensorShape(fNX);
101 // check if tensor with axes is provided
102 if (!fNAxes.empty()) {
104 auto ax_ptr = static_cast<int64_t *>(ax_shptr.get());
105 auto ax_shape = model.GetTensorShape(fNAxes);
107 fAttrAxes = std::vector<int64_t>(ax_ptr, ax_ptr+ax_length);
108 } else if (fAttrAxes.empty()) {
109 // in case no axes is passed assume full reduction
110 fAttrAxes.resize(fShapeX.size());
111 for (size_t i = 0; i < fAttrAxes.size(); i++)
112 fAttrAxes[i] = i;
113 }
114 // find shape of Y and add it in the list of intermediate tensors
117 if (model.Verbose()){
118 std::cout << Name() << " : " << fNX << " -> " << fNY << " shape " << ConvertShapeToString(fShapeY) << std::endl;
119 }
120 model.AddNeededStdLib("algorithm");
121 }
122
123 std::string Generate(std::string opName){
124 opName = "op_" + opName;
125 if (fShapeX.empty() || fShapeY.empty()) {
126 throw std::runtime_error("TMVA SOFIE Reduce Op called to Generate without being initialized first");
127 }
128
131
133 // output stride (or not pruned vector)
135
136 // write here according to size of shape
137 // in generation code can be done automatically
138 // i0 = i / stride0 % shape0; i1 = i / stride1 % shape1 and so on
139 // and we have for the inverse
140 // i = i0 * s0 + i1 * s1 + i2 * s2 + i3 * s3 ....
141
142 // don't need to divide by last stride s[n-1] since it is 1 by definition
143
144 std::stringstream out;
145 out << "\n//---- operator " << Name() << " " << opName << "\n";
146 // check where is reduced axes are first or last one. In these case we can do a faster implementation
147 enum EReduceDim {kFirst, kLast, kMiddle};
148 EReduceDim reduceDims = kLast;
149 int kmin = fShapeX.size()-fAttrAxes.size();
150 for (int k = fShapeX.size()-1; k >= kmin; k--) {
151 // if k is not a reduced axis is not last ones
152 if (std::find(fAttrAxes.begin(), fAttrAxes.end(), k) == fAttrAxes.end()) {
154 break;
155 }
156 }
157 if (reduceDims == kMiddle) {
158 reduceDims = kFirst;
159 // check if at the beginning
160 for (size_t k = 0; k < fAttrAxes.size(); k++) {
161 // if k is not a reduced axis is not first ones
162 if (std::find(fAttrAxes.begin(), fAttrAxes.end(), k) == fAttrAxes.end()) {
164 break;
165 }
166 }
167 }
169 if (reduceDims == kLast) {
170 //std::cout << "reduction for operator " << opName << " is last" << std::endl;
171 // new faster implementation using a single loop
172 // faster to loop first on reduced dimension and then output
173 // reset output tensors
174
175 // loop on output dimensions
176 out << SP << "for (size_t i = 0; i < " << outputLength << "; i++) {\n";
177 // loop on reduce dimensions
178 std::string startingValue = (fReduceOpMode == ReduceProd) ? "1" : "0";
179 out << SP << SP << "tensor_" << fNY << "[i] = " << startingValue << ";\n";
180 out << SP << SP << "for (size_t j = 0; j < " << reducedLength << "; j++) {\n";
181
183 out << SP << SP << SP << "tensor_" << fNY << "[i] *= tensor_" << fNX << "[i * " << reducedLength << " + j];\n";
185 out << SP << SP << SP << "tensor_" << fNY << "[i] += tensor_" << fNX << "[i * " << reducedLength << " + j];\n";
187 out << SP << SP << SP << "tensor_" << fNY << "[i] += tensor_" << fNX << "[i * " << reducedLength << " + j] * tensor_"
188 << fNX << "[i * " << reducedLength << " + j];\n";
189 out << SP << SP << "}\n"; // end j loop
191 out << SP << SP << "tensor_" << fNY << "[i] /= static_cast<float>(" << reducedLength << ");\n";
192
193 out << SP << "}\n"; // end i loop
194 } else if (reduceDims == kFirst) {
195 //std::cout << "reduction for operator " << opName << " is first" << std::endl;
196 // case reduction is at beginning
197 // reset output tensors
199 out << SP << "std::fill(tensor_" << fNY <<", tensor_"<< fNY <<" + "<< outputLength << ", 1);\n";
200 else
201 out << SP << "std::fill(tensor_" << fNY <<", tensor_"<< fNY <<" + "<< outputLength << ", 0);\n";
202
203 out << SP << "for (size_t i = 0; i < " << reducedLength << "; i++) {\n";
204 out << SP << SP << "for (size_t j = 0; j < " << outputLength << "; j++) {\n";
205
207 out << SP << SP << SP << "tensor_" << fNY << "[j] *= tensor_" << fNX << "[i * " << outputLength << " + j];\n";
209 out << SP << SP << SP << "tensor_" << fNY << "[j] += tensor_" << fNX << "[i * " << outputLength << " + j];\n";
211 out << SP << SP << SP << "tensor_" << fNY << "[j] += tensor_" << fNX << "[i * " << outputLength << " + j] * tensor_"
212 << fNX << "[i * " << outputLength << " + j];\n";
213 out << SP << SP << "}\n"; // end j loop
214 out << SP << "}\n"; // end i loop
216 out << SP << "for (size_t j = 0; i < " << outputLength << "; j++) {\n";
217 out << SP << SP << "tensor_" << fNY << "[j] /= static_cast<float>(" << reducedLength << ");\n";
218 out << SP << "}\n"; // end j loop
219 }
220 }
221 else
222 { // standard case
223 //std::cout << "reduction for operator " << opName << " is middle" << std::endl;
224 // reset output tensors
226 out << SP << "std::fill(tensor_" << fNY <<", tensor_"<< fNY <<" + "<< outputLength << ", 1);\n";
227 else
228 out << SP << "std::fill(tensor_" << fNY <<", tensor_"<< fNY <<" + "<< outputLength << ",0);\n";
229
230 out << SP << "for (size_t i = 0; i < " << inputLength << "; i++) {\n";
231
232 size_t dim = fShapeX.size(); // this is the input dimension (e.g. 2, 3 or 4 or more)
233
234 // here we find output index
235 out << SP << SP << "size_t outputIndex = 0;\n";
236 for (size_t k = 0; k < dim; k++) {
237 if (std::find(fAttrAxes.begin(), fAttrAxes.end(), k) == fAttrAxes.end()) {
238 // do for not reducing axes
239 out << SP << SP << "size_t i_" << k << " = i / " << inputStrides[k] << " % " << fShapeX[k] << ";\n";
240 out << SP << SP << "outputIndex += i_" << k << " * " << outputStrides[k] << ";\n";
241 }
242 }
243 // now compute reduction
244 out << SP << SP << "// compute reduction....\n";
246 out << SP << SP << "tensor_" << fNY << "[outputIndex] *= tensor_" << fNX << "[i];\n";
248 out << SP << SP << "tensor_" << fNY << "[outputIndex] += tensor_" << fNX << "[i];\n";
249 else if (fReduceOpMode == ReduceSumSquare) {
250 out << SP << SP << "tensor_" << fNY << "[outputIndex] += tensor_" << fNX << "[i] * tensor_" << fNX
251 << "[i];\n";
252 }
253 out << SP << "}\n"; // end loop on input elements
254 // normalize for reduced mean
255 if (fReduceOpMode == ReduceMean) {
256 out << SP << "for (size_t i = 0; i < " << outputLength << "; i++) {\n";
257 out << SP << SP << "tensor_" << fNY << "[i] /= static_cast<float>(" << reducedLength << ");\n";
258 out << SP << "}\n";
259 }
260 }
261
262 return out.str();
263 }
264
265};
266
267}//SOFIE
268}//Experimental
269}//TMVA
270
271
272#endif //TMVA_SOFIE_ROPERATOR_Reduce
273
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
const_iterator begin() const
const_iterator end() const
void AddNeededStdLib(std::string libname)
const ETensorType & GetTensorType(std::string name)
Definition RModel.cxx:94
void AddIntermediateTensor(std::string tensor_name, ETensorType type, std::vector< Dim > dim_shape)
Definition RModel.cxx:227
bool CheckIfTensorAlreadyExist(std::string tensor_name)
Definition RModel.cxx:122
const std::vector< size_t > & GetTensorShape(std::string name)
Definition RModel.cxx:56
std::shared_ptr< void > GetInitializedTensorData(std::string tensor_name)
Definition RModel.cxx:288
ROperator_Reduce(int keepdims, std::vector< int64_t > attrAxes, std::string nameX, std::string nameAxes, std::string nameY)
std::vector< std::vector< size_t > > ShapeInference(std::vector< std::vector< size_t > > input)
std::vector< ETensorType > TypeInference(std::vector< ETensorType > input)
std::vector< std::string_view > fInputTensorNames
Definition ROperator.hxx:46
const std::string SP
space used to correctly indent the generated C++ code
Definition ROperator.hxx:42
bool fUseSession
flag to identify if using the session class
Definition ROperator.hxx:43
std::vector< std::string_view > fOutputTensorNames
Definition ROperator.hxx:47
std::vector< size_t > ComputeStrideFromShape(const std::vector< size_t > &shape)
compute stride of a tensor given its shape (assume layout is row-major)
std::string ConvertShapeToString(std::vector< size_t > shape)
std::size_t ConvertShapeToLength(std::vector< size_t > shape)
create variable transformations