ONNX Version 6
Parsing Graph - Linear_16.onnx
Parsing model inputs....
graph input 0 name input.1 type 1
Parsing graph initializer list and fill model initialized tensors
initializer 0 name 0.bias type 1
add FLOAT initialized tensor 0.bias shape { 50 }
initializer 1 name 0.weight type 1
add FLOAT initialized tensor 0.weight shape { 50 , 100 }
initializer 2 name 10.bias type 1
add FLOAT initialized tensor 10.bias shape { 50 }
initializer 3 name 10.weight type 1
add FLOAT initialized tensor 10.weight shape { 50 , 50 }
initializer 4 name 12.bias type 1
add FLOAT initialized tensor 12.bias shape { 50 }
initializer 5 name 12.weight type 1
add FLOAT initialized tensor 12.weight shape { 50 , 50 }
initializer 6 name 14.bias type 1
add FLOAT initialized tensor 14.bias shape { 50 }
initializer 7 name 14.weight type 1
add FLOAT initialized tensor 14.weight shape { 50 , 50 }
initializer 8 name 16.bias type 1
add FLOAT initialized tensor 16.bias shape { 50 }
initializer 9 name 16.weight type 1
add FLOAT initialized tensor 16.weight shape { 50 , 50 }
initializer 10 name 18.bias type 1
add FLOAT initialized tensor 18.bias shape { 10 }
initializer 11 name 18.weight type 1
add FLOAT initialized tensor 18.weight shape { 10 , 50 }
initializer 12 name 2.bias type 1
add FLOAT initialized tensor 2.bias shape { 50 }
initializer 13 name 2.weight type 1
add FLOAT initialized tensor 2.weight shape { 50 , 50 }
initializer 14 name 4.bias type 1
add FLOAT initialized tensor 4.bias shape { 50 }
initializer 15 name 4.weight type 1
add FLOAT initialized tensor 4.weight shape { 50 , 50 }
initializer 16 name 6.bias type 1
add FLOAT initialized tensor 6.bias shape { 50 }
initializer 17 name 6.weight type 1
add FLOAT initialized tensor 6.weight shape { 50 , 50 }
initializer 18 name 8.bias type 1
add FLOAT initialized tensor 8.bias shape { 50 }
initializer 19 name 8.weight type 1
add FLOAT initialized tensor 8.weight shape { 50 , 50 }
Graph operator list (ONNX order)
Operator 0 : Gemm , 3 inputs : {input.1, 0.weight, 0.bias }
Operator 1 : Relu , 1 inputs : {21 }
Operator 2 : Gemm , 3 inputs : {22, 2.weight, 2.bias }
Operator 3 : Relu , 1 inputs : {23 }
Operator 4 : Gemm , 3 inputs : {24, 4.weight, 4.bias }
Operator 5 : Relu , 1 inputs : {25 }
Operator 6 : Gemm , 3 inputs : {26, 6.weight, 6.bias }
Operator 7 : Relu , 1 inputs : {27 }
Operator 8 : Gemm , 3 inputs : {28, 8.weight, 8.bias }
Operator 9 : Relu , 1 inputs : {29 }
Operator 10 : Gemm , 3 inputs : {30, 10.weight, 10.bias }
Operator 11 : Relu , 1 inputs : {31 }
Operator 12 : Gemm , 3 inputs : {32, 12.weight, 12.bias }
Operator 13 : Relu , 1 inputs : {33 }
Operator 14 : Gemm , 3 inputs : {34, 14.weight, 14.bias }
Operator 15 : Relu , 1 inputs : {35 }
Operator 16 : Gemm , 3 inputs : {36, 16.weight, 16.bias }
Operator 17 : Relu , 1 inputs : {37 }
Operator 18 : Gemm , 3 inputs : {38, 18.weight, 18.bias }
***********************
Re-Order graph operator list
*************************
Checking input of Node 0 : Gemm_0
input input.1 1 0 1
input 0.weight 0 1 1
input 0.bias 0 1 1
===> New node Gemm Gemm_0 order 0
output : 21
Checking input of Node 1 : Relu_1
input 21 1 0 1
===> New node Relu Relu_1 order 1
output : 22
Checking input of Node 2 : Gemm_2
input 22 1 0 1
input 2.weight 0 1 1
input 2.bias 0 1 1
===> New node Gemm Gemm_2 order 2
output : 23
Checking input of Node 3 : Relu_3
input 23 1 0 1
===> New node Relu Relu_3 order 3
output : 24
Checking input of Node 4 : Gemm_4
input 24 1 0 1
input 4.weight 0 1 1
input 4.bias 0 1 1
===> New node Gemm Gemm_4 order 4
output : 25
Checking input of Node 5 : Relu_5
input 25 1 0 1
===> New node Relu Relu_5 order 5
output : 26
Checking input of Node 6 : Gemm_6
input 26 1 0 1
input 6.weight 0 1 1
input 6.bias 0 1 1
===> New node Gemm Gemm_6 order 6
output : 27
Checking input of Node 7 : Relu_7
input 27 1 0 1
===> New node Relu Relu_7 order 7
output : 28
Checking input of Node 8 : Gemm_8
input 28 1 0 1
input 8.weight 0 1 1
input 8.bias 0 1 1
===> New node Gemm Gemm_8 order 8
output : 29
Checking input of Node 9 : Relu_9
input 29 1 0 1
===> New node Relu Relu_9 order 9
output : 30
Checking input of Node 10 : Gemm_10
input 30 1 0 1
input 10.weight 0 1 1
input 10.bias 0 1 1
===> New node Gemm Gemm_10 order 10
output : 31
Checking input of Node 11 : Relu_11
input 31 1 0 1
===> New node Relu Relu_11 order 11
output : 32
Checking input of Node 12 : Gemm_12
input 32 1 0 1
input 12.weight 0 1 1
input 12.bias 0 1 1
===> New node Gemm Gemm_12 order 12
output : 33
Checking input of Node 13 : Relu_13
input 33 1 0 1
===> New node Relu Relu_13 order 13
output : 34
Checking input of Node 14 : Gemm_14
input 34 1 0 1
input 14.weight 0 1 1
input 14.bias 0 1 1
===> New node Gemm Gemm_14 order 14
output : 35
Checking input of Node 15 : Relu_15
input 35 1 0 1
===> New node Relu Relu_15 order 15
output : 36
Checking input of Node 16 : Gemm_16
input 36 1 0 1
input 16.weight 0 1 1
input 16.bias 0 1 1
===> New node Gemm Gemm_16 order 16
output : 37
Checking input of Node 17 : Relu_17
input 37 1 0 1
===> New node Relu Relu_17 order 17
output : 38
Checking input of Node 18 : Gemm_18
input 38 1 0 1
input 18.weight 0 1 1
input 18.bias 0 1 1
===> New node Gemm Gemm_18 order 18
output : 39
Graph operator list (re-ordered)
Operator 0 : Gemm , Gemm_0 input tensors : {input.1, 0.weight, 0.bias } children : { [ 1 Relu , Relu_1]}
Operator 1 : Relu , Relu_1 input tensors : {21 } children : { [ 2 Gemm , Gemm_2]}
Operator 2 : Gemm , Gemm_2 input tensors : {22, 2.weight, 2.bias } children : { [ 3 Relu , Relu_3]}
Operator 3 : Relu , Relu_3 input tensors : {23 } children : { [ 4 Gemm , Gemm_4]}
Operator 4 : Gemm , Gemm_4 input tensors : {24, 4.weight, 4.bias } children : { [ 5 Relu , Relu_5]}
Operator 5 : Relu , Relu_5 input tensors : {25 } children : { [ 6 Gemm , Gemm_6]}
Operator 6 : Gemm , Gemm_6 input tensors : {26, 6.weight, 6.bias } children : { [ 7 Relu , Relu_7]}
Operator 7 : Relu , Relu_7 input tensors : {27 } children : { [ 8 Gemm , Gemm_8]}
Operator 8 : Gemm , Gemm_8 input tensors : {28, 8.weight, 8.bias } children : { [ 9 Relu , Relu_9]}
Operator 9 : Relu , Relu_9 input tensors : {29 } children : { [ 10 Gemm , Gemm_10]}
Operator 10 : Gemm , Gemm_10 input tensors : {30, 10.weight, 10.bias } children : { [ 11 Relu , Relu_11]}
Operator 11 : Relu , Relu_11 input tensors : {31 } children : { [ 12 Gemm , Gemm_12]}
Operator 12 : Gemm , Gemm_12 input tensors : {32, 12.weight, 12.bias } children : { [ 13 Relu , Relu_13]}
Operator 13 : Relu , Relu_13 input tensors : {33 } children : { [ 14 Gemm , Gemm_14]}
Operator 14 : Gemm , Gemm_14 input tensors : {34, 14.weight, 14.bias } children : { [ 15 Relu , Relu_15]}
Operator 15 : Relu , Relu_15 input tensors : {35 } children : { [ 16 Gemm , Gemm_16]}
Operator 16 : Gemm , Gemm_16 input tensors : {36, 16.weight, 16.bias } children : { [ 17 Relu , Relu_17]}
Operator 17 : Relu , Relu_17 input tensors : {37 } children : { [ 18 Gemm , Gemm_18]}
Operator 18 : Gemm , Gemm_18 input tensors : {38, 18.weight, 18.bias } children : {}
Fill RModel with operators...
0 0 parsing operator Gemm
Parsing operator Gemm
1 1 parsing operator Relu
Parsing operator Relu
skipping operator since it is fused with previous one
2 2 parsing operator Gemm
Parsing operator Gemm
3 3 parsing operator Relu
Parsing operator Relu
skipping operator since it is fused with previous one
4 4 parsing operator Gemm
Parsing operator Gemm
5 5 parsing operator Relu
Parsing operator Relu
skipping operator since it is fused with previous one
6 6 parsing operator Gemm
Parsing operator Gemm
7 7 parsing operator Relu
Parsing operator Relu
skipping operator since it is fused with previous one
8 8 parsing operator Gemm
Parsing operator Gemm
9 9 parsing operator Relu
Parsing operator Relu
skipping operator since it is fused with previous one
10 10 parsing operator Gemm
Parsing operator Gemm
11 11 parsing operator Relu
Parsing operator Relu
skipping operator since it is fused with previous one
12 12 parsing operator Gemm
Parsing operator Gemm
13 13 parsing operator Relu
Parsing operator Relu
skipping operator since it is fused with previous one
14 14 parsing operator Gemm
Parsing operator Gemm
15 15 parsing operator Relu
Parsing operator Relu
skipping operator since it is fused with previous one
16 16 parsing operator Gemm
Parsing operator Gemm
17 17 parsing operator Relu
Parsing operator Relu
skipping operator since it is fused with previous one
18 18 parsing operator Gemm
Parsing operator Gemm
Creating operator Gemm
Parsing Graph output list
output 0 name 39
Model requires following inputs:
Fully Specified Tensor name: input1 type: float shape: [16,100]
Model initialized the following tensors:
Tensor name: "8weight" type: float shape: [50,50]
Tensor name: "8bias" type: float shape: [50]
Tensor name: "4bias" type: float shape: [50]
Tensor name: "2weight" type: float shape: [50,50]
Tensor name: "0bias" type: float shape: [50]
Tensor name: "12bias" type: float shape: [50]
Tensor name: "18bias" type: float shape: [10]
Tensor name: "14bias" type: float shape: [50]
Tensor name: "4weight" type: float shape: [50,50]
Tensor name: "10weight" type: float shape: [50,50]
Tensor name: "6bias" type: float shape: [50]
Tensor name: "18weight" type: float shape: [10,50]
Tensor name: "0weight" type: float shape: [50,100]
Tensor name: "10bias" type: float shape: [50]
Tensor name: "2bias" type: float shape: [50]
Tensor name: "6weight" type: float shape: [50,50]
Tensor name: "14weight" type: float shape: [50,50]
Tensor name: "16weight" type: float shape: [50,50]
Tensor name: "12weight" type: float shape: [50,50]
Tensor name: "16bias" type: float shape: [50]
Model specify the following intermediate tensors:
Tensor name: "39" type: float shape: [16,10]
Tensor name: "18biasbcast" type: float shape: [16,10]
Tensor name: "38" type: float shape: [16,50]
Tensor name: "14biasbcast" type: float shape: [16,50]
Tensor name: "34" type: float shape: [16,50]
Tensor name: "22" type: float shape: [16,50]
Tensor name: "2biasbcast" type: float shape: [16,50]
Tensor name: "24" type: float shape: [16,50]
Tensor name: "0biasbcast" type: float shape: [16,50]
Tensor name: "6biasbcast" type: float shape: [16,50]
Tensor name: "4biasbcast" type: float shape: [16,50]
Tensor name: "16biasbcast" type: float shape: [16,50]
Tensor name: "8biasbcast" type: float shape: [16,50]
Tensor name: "26" type: float shape: [16,50]
Tensor name: "28" type: float shape: [16,50]
Tensor name: "10biasbcast" type: float shape: [16,50]
Tensor name: "30" type: float shape: [16,50]
Tensor name: "32" type: float shape: [16,50]
Tensor name: "36" type: float shape: [16,50]
Tensor name: "12biasbcast" type: float shape: [16,50]
Tensor "16weight" already exist: true
Shape of tensor "16weight": 50,50,
Data type of tensor "16weight": float
//Code generated automatically by TMVA for Inference of Model file [Linear_16.onnx] at [Fri Jan 9 02:32:53 2026]
#ifndef ROOT_TMVA_SOFIE_LINEAR_16
#define ROOT_TMVA_SOFIE_LINEAR_16
#include <algorithm>
#include <vector>
#include "TMVA/SOFIE_common.hxx"
#include <fstream>
namespace TMVA_SOFIE_Linear_16{
namespace BLAS{
extern "C" void sgemv_(const char * trans, const int * m, const int * n, const float * alpha, const float * A,
const int * lda, const float * X, const int * incx, const float * beta, const float * Y, const int * incy);
extern "C" void sgemm_(const char * transa, const char * transb, const int * m, const int * n, const int * k,
const float * alpha, const float * A, const int * lda, const float * B, const int * ldb,
const float * beta, float * C, const int * ldc);
}//BLAS
struct Session {
// initialized tensors
std::vector<float> fTensor_8weight = std::vector<float>(2500);
float * tensor_8weight = fTensor_8weight.data();
std::vector<float> fTensor_8bias = std::vector<float>(50);
float * tensor_8bias = fTensor_8bias.data();
std::vector<float> fTensor_4bias = std::vector<float>(50);
float * tensor_4bias = fTensor_4bias.data();
std::vector<float> fTensor_2weight = std::vector<float>(2500);
float * tensor_2weight = fTensor_2weight.data();
std::vector<float> fTensor_0bias = std::vector<float>(50);
float * tensor_0bias = fTensor_0bias.data();
std::vector<float> fTensor_12bias = std::vector<float>(50);
float * tensor_12bias = fTensor_12bias.data();
std::vector<float> fTensor_18bias = std::vector<float>(10);
float * tensor_18bias = fTensor_18bias.data();
std::vector<float> fTensor_14bias = std::vector<float>(50);
float * tensor_14bias = fTensor_14bias.data();
std::vector<float> fTensor_4weight = std::vector<float>(2500);
float * tensor_4weight = fTensor_4weight.data();
std::vector<float> fTensor_10weight = std::vector<float>(2500);
float * tensor_10weight = fTensor_10weight.data();
std::vector<float> fTensor_6bias = std::vector<float>(50);
float * tensor_6bias = fTensor_6bias.data();
std::vector<float> fTensor_18weight = std::vector<float>(500);
float * tensor_18weight = fTensor_18weight.data();
std::vector<float> fTensor_0weight = std::vector<float>(5000);
float * tensor_0weight = fTensor_0weight.data();
std::vector<float> fTensor_10bias = std::vector<float>(50);
float * tensor_10bias = fTensor_10bias.data();
std::vector<float> fTensor_2bias = std::vector<float>(50);
float * tensor_2bias = fTensor_2bias.data();
std::vector<float> fTensor_6weight = std::vector<float>(2500);
float * tensor_6weight = fTensor_6weight.data();
std::vector<float> fTensor_14weight = std::vector<float>(2500);
float * tensor_14weight = fTensor_14weight.data();
std::vector<float> fTensor_16weight = std::vector<float>(2500);
float * tensor_16weight = fTensor_16weight.data();
std::vector<float> fTensor_12weight = std::vector<float>(2500);
float * tensor_12weight = fTensor_12weight.data();
std::vector<float> fTensor_16bias = std::vector<float>(50);
float * tensor_16bias = fTensor_16bias.data();
//--- Allocating session memory pool to be used for allocating intermediate tensors
std::vector<char> fIntermediateMemoryPool = std::vector<char>(6400);
// --- Positioning intermediate tensor memory --
// Allocating memory for intermediate tensor 22 with size 3200 bytes
float* tensor_22 = reinterpret_cast<float*>(fIntermediateMemoryPool.data() + 0);
// Allocating memory for intermediate tensor 24 with size 3200 bytes
float* tensor_24 = reinterpret_cast<float*>(fIntermediateMemoryPool.data() + 3200);
// Allocating memory for intermediate tensor 26 with size 3200 bytes
float* tensor_26 = reinterpret_cast<float*>(fIntermediateMemoryPool.data() + 0);
// Allocating memory for intermediate tensor 28 with size 3200 bytes
float* tensor_28 = reinterpret_cast<float*>(fIntermediateMemoryPool.data() + 3200);
// Allocating memory for intermediate tensor 30 with size 3200 bytes
float* tensor_30 = reinterpret_cast<float*>(fIntermediateMemoryPool.data() + 0);
// Allocating memory for intermediate tensor 32 with size 3200 bytes
float* tensor_32 = reinterpret_cast<float*>(fIntermediateMemoryPool.data() + 3200);
// Allocating memory for intermediate tensor 34 with size 3200 bytes
float* tensor_34 = reinterpret_cast<float*>(fIntermediateMemoryPool.data() + 0);
// Allocating memory for intermediate tensor 36 with size 3200 bytes
float* tensor_36 = reinterpret_cast<float*>(fIntermediateMemoryPool.data() + 3200);
// Allocating memory for intermediate tensor 38 with size 3200 bytes
float* tensor_38 = reinterpret_cast<float*>(fIntermediateMemoryPool.data() + 0);
// Allocating memory for intermediate tensor 39 with size 640 bytes
float* tensor_39 = reinterpret_cast<float*>(fIntermediateMemoryPool.data() + 5760);
//--- declare and allocate the intermediate tensors
std::vector<float> fTensor_18biasbcast = std::vector<float>(160);
float * tensor_18biasbcast = fTensor_18biasbcast.data();
std::vector<float> fTensor_14biasbcast = std::vector<float>(800);
float * tensor_14biasbcast = fTensor_14biasbcast.data();
std::vector<float> fTensor_2biasbcast = std::vector<float>(800);
float * tensor_2biasbcast = fTensor_2biasbcast.data();
std::vector<float> fTensor_0biasbcast = std::vector<float>(800);
float * tensor_0biasbcast = fTensor_0biasbcast.data();
std::vector<float> fTensor_6biasbcast = std::vector<float>(800);
float * tensor_6biasbcast = fTensor_6biasbcast.data();
std::vector<float> fTensor_4biasbcast = std::vector<float>(800);
float * tensor_4biasbcast = fTensor_4biasbcast.data();
std::vector<float> fTensor_16biasbcast = std::vector<float>(800);
float * tensor_16biasbcast = fTensor_16biasbcast.data();
std::vector<float> fTensor_8biasbcast = std::vector<float>(800);
float * tensor_8biasbcast = fTensor_8biasbcast.data();
std::vector<float> fTensor_10biasbcast = std::vector<float>(800);
float * tensor_10biasbcast = fTensor_10biasbcast.data();
std::vector<float> fTensor_12biasbcast = std::vector<float>(800);
float * tensor_12biasbcast = fTensor_12biasbcast.data();
Session(std::string filename ="Linear_16.dat") {
//--- reading weights from file
std::ifstream f;
f.open(filename);
if (!f.is_open()) {
throw std::runtime_error("tmva-sofie failed to open file " + filename + " for input weights");
}
using TMVA::Experimental::SOFIE::ReadTensorFromStream;
ReadTensorFromStream(f, tensor_8weight, "tensor_8weight", 2500);
ReadTensorFromStream(f, tensor_8bias, "tensor_8bias", 50);
ReadTensorFromStream(f, tensor_4bias, "tensor_4bias", 50);
ReadTensorFromStream(f, tensor_2weight, "tensor_2weight", 2500);
ReadTensorFromStream(f, tensor_0bias, "tensor_0bias", 50);
ReadTensorFromStream(f, tensor_12bias, "tensor_12bias", 50);
ReadTensorFromStream(f, tensor_18bias, "tensor_18bias", 10);
ReadTensorFromStream(f, tensor_14bias, "tensor_14bias", 50);
ReadTensorFromStream(f, tensor_4weight, "tensor_4weight", 2500);
ReadTensorFromStream(f, tensor_10weight, "tensor_10weight", 2500);
ReadTensorFromStream(f, tensor_6bias, "tensor_6bias", 50);
ReadTensorFromStream(f, tensor_18weight, "tensor_18weight", 500);
ReadTensorFromStream(f, tensor_0weight, "tensor_0weight", 5000);
ReadTensorFromStream(f, tensor_10bias, "tensor_10bias", 50);
ReadTensorFromStream(f, tensor_2bias, "tensor_2bias", 50);
ReadTensorFromStream(f, tensor_6weight, "tensor_6weight", 2500);
ReadTensorFromStream(f, tensor_14weight, "tensor_14weight", 2500);
ReadTensorFromStream(f, tensor_16weight, "tensor_16weight", 2500);
ReadTensorFromStream(f, tensor_12weight, "tensor_12weight", 2500);
ReadTensorFromStream(f, tensor_16bias, "tensor_16bias", 50);
f.close();
//--- broadcast bias tensor 0biasfor Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_0bias,{ 50 }, { 16 , 50 });
std::copy(data, data + 800, tensor_0biasbcast);
delete [] data;
}
//--- broadcast bias tensor 2biasfor Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_2bias,{ 50 }, { 16 , 50 });
std::copy(data, data + 800, tensor_2biasbcast);
delete [] data;
}
//--- broadcast bias tensor 4biasfor Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_4bias,{ 50 }, { 16 , 50 });
std::copy(data, data + 800, tensor_4biasbcast);
delete [] data;
}
//--- broadcast bias tensor 6biasfor Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_6bias,{ 50 }, { 16 , 50 });
std::copy(data, data + 800, tensor_6biasbcast);
delete [] data;
}
//--- broadcast bias tensor 8biasfor Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_8bias,{ 50 }, { 16 , 50 });
std::copy(data, data + 800, tensor_8biasbcast);
delete [] data;
}
//--- broadcast bias tensor 10biasfor Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_10bias,{ 50 }, { 16 , 50 });
std::copy(data, data + 800, tensor_10biasbcast);
delete [] data;
}
//--- broadcast bias tensor 12biasfor Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_12bias,{ 50 }, { 16 , 50 });
std::copy(data, data + 800, tensor_12biasbcast);
delete [] data;
}
//--- broadcast bias tensor 14biasfor Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_14bias,{ 50 }, { 16 , 50 });
std::copy(data, data + 800, tensor_14biasbcast);
delete [] data;
}
//--- broadcast bias tensor 16biasfor Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_16bias,{ 50 }, { 16 , 50 });
std::copy(data, data + 800, tensor_16biasbcast);
delete [] data;
}
//--- broadcast bias tensor 18biasfor Gemm op
{
float * data = TMVA::Experimental::SOFIE::UTILITY::UnidirectionalBroadcast<float>(tensor_18bias,{ 10 }, { 16 , 10 });
std::copy(data, data + 160, tensor_18biasbcast);
delete [] data;
}
}
void doInfer(float const* tensor_input1, std::vector<float> &output_tensor_39 ){
//--------- Gemm
TMVA::Experimental::SOFIE::Gemm_Call(tensor_22, true, false, 50, 16, 100, 1,tensor_0weight, tensor_input1, 1,tensor_0biasbcast);
for (int id = 0; id < 800 ; id++){
tensor_22[id] = ((tensor_22[id] > 0 )? tensor_22[id] : 0);
}
//--------- Gemm
TMVA::Experimental::SOFIE::Gemm_Call(tensor_24, true, false, 50, 16, 50, 1,tensor_2weight, tensor_22, 1,tensor_2biasbcast);
for (int id = 0; id < 800 ; id++){
tensor_24[id] = ((tensor_24[id] > 0 )? tensor_24[id] : 0);
}
//--------- Gemm
TMVA::Experimental::SOFIE::Gemm_Call(tensor_26, true, false, 50, 16, 50, 1,tensor_4weight, tensor_24, 1,tensor_4biasbcast);
for (int id = 0; id < 800 ; id++){
tensor_26[id] = ((tensor_26[id] > 0 )? tensor_26[id] : 0);
}
//--------- Gemm
TMVA::Experimental::SOFIE::Gemm_Call(tensor_28, true, false, 50, 16, 50, 1,tensor_6weight, tensor_26, 1,tensor_6biasbcast);
for (int id = 0; id < 800 ; id++){
tensor_28[id] = ((tensor_28[id] > 0 )? tensor_28[id] : 0);
}
//--------- Gemm
TMVA::Experimental::SOFIE::Gemm_Call(tensor_30, true, false, 50, 16, 50, 1,tensor_8weight, tensor_28, 1,tensor_8biasbcast);
for (int id = 0; id < 800 ; id++){
tensor_30[id] = ((tensor_30[id] > 0 )? tensor_30[id] : 0);
}
//--------- Gemm
TMVA::Experimental::SOFIE::Gemm_Call(tensor_32, true, false, 50, 16, 50, 1,tensor_10weight, tensor_30, 1,tensor_10biasbcast);
for (int id = 0; id < 800 ; id++){
tensor_32[id] = ((tensor_32[id] > 0 )? tensor_32[id] : 0);
}
//--------- Gemm
TMVA::Experimental::SOFIE::Gemm_Call(tensor_34, true, false, 50, 16, 50, 1,tensor_12weight, tensor_32, 1,tensor_12biasbcast);
for (int id = 0; id < 800 ; id++){
tensor_34[id] = ((tensor_34[id] > 0 )? tensor_34[id] : 0);
}
//--------- Gemm
TMVA::Experimental::SOFIE::Gemm_Call(tensor_36, true, false, 50, 16, 50, 1,tensor_14weight, tensor_34, 1,tensor_14biasbcast);
for (int id = 0; id < 800 ; id++){
tensor_36[id] = ((tensor_36[id] > 0 )? tensor_36[id] : 0);
}
//--------- Gemm
TMVA::Experimental::SOFIE::Gemm_Call(tensor_38, true, false, 50, 16, 50, 1,tensor_16weight, tensor_36, 1,tensor_16biasbcast);
for (int id = 0; id < 800 ; id++){
tensor_38[id] = ((tensor_38[id] > 0 )? tensor_38[id] : 0);
}
//--------- Gemm
TMVA::Experimental::SOFIE::Gemm_Call(tensor_39, true, false, 10, 16, 50, 1,tensor_18weight, tensor_38, 1,tensor_18biasbcast);
using TMVA::Experimental::SOFIE::UTILITY::FillOutput;
FillOutput(tensor_39, output_tensor_39, 160);
}
std::vector<float> infer(float const* tensor_input1){
std::vector<float > output_tensor_39;
doInfer(tensor_input1, output_tensor_39 );
return {output_tensor_39};
}
}; // end of Session
} //TMVA_SOFIE_Linear_16
#endif // ROOT_TMVA_SOFIE_LINEAR_16