Logo ROOT   6.08/07
Reference Guide
TMVAMulticlass_MLP.class.C
Go to the documentation of this file.
1 // Class: ReadMLP
2 // Automatically generated by MethodBase::MakeClass
3 //
4 
5 /* configuration options =====================================================
6 
7 #GEN -*-*-*-*-*-*-*-*-*-*-*- general info -*-*-*-*-*-*-*-*-*-*-*-
8 
9 Method : MLP::MLP
10 TMVA Release : 4.2.1 [262657]
11 ROOT Release : 6.08/07 [395271]
12 Creator : sftnight
13 Date : Thu May 31 21:38:10 2018
14 Host : Linux SFT-ubuntu-1710-1 4.13.0-31-generic #34-Ubuntu SMP Fri Jan 19 16:34:46 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
15 Dir : /mnt/build/workspace/root-makedoc-v608/rootspi/rdoc/src/v6-08-00-patches/documentation/doxygen
16 Training events: 4000
17 Analysis type : [Classification]
18 
19 
20 #OPT -*-*-*-*-*-*-*-*-*-*-*-*- options -*-*-*-*-*-*-*-*-*-*-*-*-
21 
22 # Set by User:
23 NCycles: "1000" [Number of training cycles]
24 HiddenLayers: "N+5,5" [Specification of hidden layer architecture]
25 NeuronType: "tanh" [Neuron activation function type]
26 EstimatorType: "MSE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood]
27 V: "False" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
28 H: "False" [Print method-specific help message]
29 TestRate: "5" [Test for overtraining performed at each #th epochs]
30 # Default:
31 RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')]
32 NeuronInputType: "sum" [Neuron input function type]
33 VerbosityLevel: "Default" [Verbosity level]
34 VarTransform: "None" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
35 CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
36 IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
37 TrainingMethod: "BP" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)]
38 LearningRate: "2.000000e-02" [ANN learning rate parameter]
39 DecayRate: "1.000000e-02" [Decay rate for learning parameter]
40 EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)]
41 Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch]
42 SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training]
43 SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.]
44 SamplingTraining: "True" [The training sample is sampled]
45 SamplingTesting: "False" [The testing sample is sampled]
46 ResetStep: "50" [How often BFGS should reset history]
47 Tau: "3.000000e+00" [LineSearch "size step"]
48 BPMode: "sequential" [Back-propagation learning mode: sequential or batch]
49 BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events]
50 ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)]
51 ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)]
52 UseRegulator: "False" [Use regulator to avoid over-training]
53 UpdateLimit: "10000" [Maximum times of regulator update]
54 CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value]
55 WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range]
56 ##
57 
58 
59 #VAR -*-*-*-*-*-*-*-*-*-*-*-* variables *-*-*-*-*-*-*-*-*-*-*-*-
60 
61 NVar 4
62 var1 var1 var1 var1 'F' [-4.05916023254,3.58076572418]
63 var2 var2 var2 Variable 2 'F' [-3.68905711174,3.78774046898]
64 var3 var3 var3 Variable 3 units 'F' [-3.61478614807,4.56402540207]
65 var4 var4 var4 Variable 4 units 'F' [-4.84856987,5.04116535187]
66 NSpec 0
67 
68 
69 ============================================================================ */
70 
71 #include <vector>
72 #include <cmath>
73 #include <string>
74 #include <iostream>
75 
76 #ifndef IClassifierReader__def
77 #define IClassifierReader__def
78 
79 class IClassifierReader {
80 
81  public:
82 
83  // constructor
84  IClassifierReader() : fStatusIsClean( true ) {}
85  virtual ~IClassifierReader() {}
86 
87  // return classifier response
88  virtual double GetMvaValue( const std::vector<double>& inputValues ) const = 0;
89 
90  // returns classifier status
91  bool IsStatusClean() const { return fStatusIsClean; }
92 
93  protected:
94 
95  bool fStatusIsClean;
96 };
97 
98 #endif
99 
100 class ReadMLP : public IClassifierReader {
101 
102  public:
103 
104  // constructor
105  ReadMLP( std::vector<std::string>& theInputVars )
106  : IClassifierReader(),
107  fClassName( "ReadMLP" ),
108  fNvars( 4 ),
109  fIsNormalised( false )
110  {
111  // the training input variables
112  const char* inputVars[] = { "var1", "var2", "var3", "var4" };
113 
114  // sanity checks
115  if (theInputVars.size() <= 0) {
116  std::cout << "Problem in class \"" << fClassName << "\": empty input vector" << std::endl;
117  fStatusIsClean = false;
118  }
119 
120  if (theInputVars.size() != fNvars) {
121  std::cout << "Problem in class \"" << fClassName << "\": mismatch in number of input values: "
122  << theInputVars.size() << " != " << fNvars << std::endl;
123  fStatusIsClean = false;
124  }
125 
126  // validate input variables
127  for (size_t ivar = 0; ivar < theInputVars.size(); ivar++) {
128  if (theInputVars[ivar] != inputVars[ivar]) {
129  std::cout << "Problem in class \"" << fClassName << "\": mismatch in input variable names" << std::endl
130  << " for variable [" << ivar << "]: " << theInputVars[ivar].c_str() << " != " << inputVars[ivar] << std::endl;
131  fStatusIsClean = false;
132  }
133  }
134 
135  // initialize min and max vectors (for normalisation)
136  fVmin[0] = 0;
137  fVmax[0] = 0;
138  fVmin[1] = 0;
139  fVmax[1] = 0;
140  fVmin[2] = 0;
141  fVmax[2] = 0;
142  fVmin[3] = 0;
143  fVmax[3] = 0;
144 
145  // initialize input variable types
146  fType[0] = 'F';
147  fType[1] = 'F';
148  fType[2] = 'F';
149  fType[3] = 'F';
150 
151  // initialize constants
152  Initialize();
153 
154  }
155 
156  // destructor
157  virtual ~ReadMLP() {
158  Clear(); // method-specific
159  }
160 
161  // the classifier response
162  // "inputValues" is a vector of input values in the same order as the
163  // variables given to the constructor
164  double GetMvaValue( const std::vector<double>& inputValues ) const;
165 
166  private:
167 
168  // method-specific destructor
169  void Clear();
170 
171  // common member variables
172  const char* fClassName;
173 
174  const size_t fNvars;
175  size_t GetNvar() const { return fNvars; }
176  char GetType( int ivar ) const { return fType[ivar]; }
177 
178  // normalisation of input variables
179  const bool fIsNormalised;
180  bool IsNormalised() const { return fIsNormalised; }
181  double fVmin[4];
182  double fVmax[4];
183  double NormVariable( double x, double xmin, double xmax ) const {
184  // normalise to output range: [-1, 1]
185  return 2*(x - xmin)/(xmax - xmin) - 1.0;
186  }
187 
188  // type of input variable: 'F' or 'I'
189  char fType[4];
190 
191  // initialize internal variables
192  void Initialize();
193  double GetMvaValue__( const std::vector<double>& inputValues ) const;
194 
195  // private members (method specific)
196 
197  double ActivationFnc(double x) const;
198  double OutputActivationFnc(double x) const;
199 
200  int fLayers;
201  int fLayerSize[4];
202  double fWeightMatrix0to1[10][5]; // weight matrix from layer 0 to 1
203  double fWeightMatrix1to2[6][10]; // weight matrix from layer 1 to 2
204  double fWeightMatrix2to3[4][6]; // weight matrix from layer 2 to 3
205 
206  double * fWeights[4];
207 };
208 
209 inline void ReadMLP::Initialize()
210 {
211  // build network structure
212  fLayers = 4;
213  fLayerSize[0] = 5; fWeights[0] = new double[5];
214  fLayerSize[1] = 10; fWeights[1] = new double[10];
215  fLayerSize[2] = 6; fWeights[2] = new double[6];
216  fLayerSize[3] = 4; fWeights[3] = new double[4];
217  // weight matrix from layer 0 to 1
218  fWeightMatrix0to1[0][0] = -0.38778850670751;
219  fWeightMatrix0to1[1][0] = 1.77075352763869;
220  fWeightMatrix0to1[2][0] = -0.495887946364255;
221  fWeightMatrix0to1[3][0] = -1.07287478074003;
222  fWeightMatrix0to1[4][0] = -1.27493222222149;
223  fWeightMatrix0to1[5][0] = -0.232068331688551;
224  fWeightMatrix0to1[6][0] = -0.0100184567840386;
225  fWeightMatrix0to1[7][0] = -0.504687461356197;
226  fWeightMatrix0to1[8][0] = -0.41456998867676;
227  fWeightMatrix0to1[0][1] = 0.344484078249326;
228  fWeightMatrix0to1[1][1] = 0.982416309163123;
229  fWeightMatrix0to1[2][1] = -0.0419129952225741;
230  fWeightMatrix0to1[3][1] = -0.703283427306373;
231  fWeightMatrix0to1[4][1] = -0.768397705168322;
232  fWeightMatrix0to1[5][1] = -0.166873265311981;
233  fWeightMatrix0to1[6][1] = 0.551561437331797;
234  fWeightMatrix0to1[7][1] = -2.49983402360569;
235  fWeightMatrix0to1[8][1] = 3.22680407145307;
236  fWeightMatrix0to1[0][2] = 0.384100424207592;
237  fWeightMatrix0to1[1][2] = 0.872517592990635;
238  fWeightMatrix0to1[2][2] = -0.0871975702555971;
239  fWeightMatrix0to1[3][2] = -1.52887190551762;
240  fWeightMatrix0to1[4][2] = -0.168358013186753;
241  fWeightMatrix0to1[5][2] = 1.50557472966874;
242  fWeightMatrix0to1[6][2] = -1.17198963689223;
243  fWeightMatrix0to1[7][2] = -0.0740450592612006;
244  fWeightMatrix0to1[8][2] = -0.157477458717336;
245  fWeightMatrix0to1[0][3] = -0.381185082592116;
246  fWeightMatrix0to1[1][3] = -3.17908830387991;
247  fWeightMatrix0to1[2][3] = -0.422381779159728;
248  fWeightMatrix0to1[3][3] = 2.54234963914718;
249  fWeightMatrix0to1[4][3] = 1.72413680540312;
250  fWeightMatrix0to1[5][3] = -1.03648108259518;
251  fWeightMatrix0to1[6][3] = 0.75010463707696;
252  fWeightMatrix0to1[7][3] = -0.120045773739285;
253  fWeightMatrix0to1[8][3] = 0.00520098351826005;
254  fWeightMatrix0to1[0][4] = -2.23811315621248;
255  fWeightMatrix0to1[1][4] = 0.299577800759728;
256  fWeightMatrix0to1[2][4] = 0.173737774048086;
257  fWeightMatrix0to1[3][4] = 2.01992950842827;
258  fWeightMatrix0to1[4][4] = 2.47067034525462;
259  fWeightMatrix0to1[5][4] = 2.69933718728504;
260  fWeightMatrix0to1[6][4] = 1.24979124792121;
261  fWeightMatrix0to1[7][4] = 4.37768351492713;
262  fWeightMatrix0to1[8][4] = -3.6655386905835;
263  // weight matrix from layer 1 to 2
264  fWeightMatrix1to2[0][0] = -2.47375060347935;
265  fWeightMatrix1to2[1][0] = 1.84237698308281;
266  fWeightMatrix1to2[2][0] = -0.915246344719473;
267  fWeightMatrix1to2[3][0] = 0.742030598942647;
268  fWeightMatrix1to2[4][0] = 1.54286295830321;
269  fWeightMatrix1to2[0][1] = 1.04760275173647;
270  fWeightMatrix1to2[1][1] = 0.251274036343028;
271  fWeightMatrix1to2[2][1] = 0.133311422450511;
272  fWeightMatrix1to2[3][1] = -0.362651699534128;
273  fWeightMatrix1to2[4][1] = -1.26560770951767;
274  fWeightMatrix1to2[0][2] = -0.542093135020647;
275  fWeightMatrix1to2[1][2] = -0.60714945425257;
276  fWeightMatrix1to2[2][2] = -0.429759617883957;
277  fWeightMatrix1to2[3][2] = 1.49053600605261;
278  fWeightMatrix1to2[4][2] = 0.186321270572984;
279  fWeightMatrix1to2[0][3] = -1.26885413818901;
280  fWeightMatrix1to2[1][3] = 0.403278533237819;
281  fWeightMatrix1to2[2][3] = -2.37515789916025;
282  fWeightMatrix1to2[3][3] = -0.682549634822121;
283  fWeightMatrix1to2[4][3] = 0.895671716449018;
284  fWeightMatrix1to2[0][4] = 1.00468665089631;
285  fWeightMatrix1to2[1][4] = 0.00294619057952214;
286  fWeightMatrix1to2[2][4] = -0.858376459553123;
287  fWeightMatrix1to2[3][4] = 2.31562800007637;
288  fWeightMatrix1to2[4][4] = 0.39209065116205;
289  fWeightMatrix1to2[0][5] = 0.453890573142123;
290  fWeightMatrix1to2[1][5] = -1.65086032608904;
291  fWeightMatrix1to2[2][5] = -1.38809189733852;
292  fWeightMatrix1to2[3][5] = 2.00030576315926;
293  fWeightMatrix1to2[4][5] = 1.28885335051608;
294  fWeightMatrix1to2[0][6] = 0.737008121249608;
295  fWeightMatrix1to2[1][6] = -2.07776319325285;
296  fWeightMatrix1to2[2][6] = -0.395007874498507;
297  fWeightMatrix1to2[3][6] = 0.71934456144467;
298  fWeightMatrix1to2[4][6] = 0.0157924737451731;
299  fWeightMatrix1to2[0][7] = 1.1303787673988;
300  fWeightMatrix1to2[1][7] = 0.245023395607365;
301  fWeightMatrix1to2[2][7] = 1.37767650460826;
302  fWeightMatrix1to2[3][7] = -2.94220491047599;
303  fWeightMatrix1to2[4][7] = -1.04119716294614;
304  fWeightMatrix1to2[0][8] = 0.230803202575464;
305  fWeightMatrix1to2[1][8] = 0.693532198172888;
306  fWeightMatrix1to2[2][8] = 1.05733862485925;
307  fWeightMatrix1to2[3][8] = -1.97011105977166;
308  fWeightMatrix1to2[4][8] = -0.897528569439193;
309  fWeightMatrix1to2[0][9] = 0.953931221485957;
310  fWeightMatrix1to2[1][9] = -1.26759017153591;
311  fWeightMatrix1to2[2][9] = 2.74759900060607;
312  fWeightMatrix1to2[3][9] = -0.656795897499413;
313  fWeightMatrix1to2[4][9] = -1.23312229444889;
314  // weight matrix from layer 2 to 3
315  fWeightMatrix2to3[0][0] = 0.809374089520293;
316  fWeightMatrix2to3[1][0] = -0.217255510142443;
317  fWeightMatrix2to3[2][0] = -0.452803002823739;
318  fWeightMatrix2to3[3][0] = -0.50457845423497;
319  fWeightMatrix2to3[0][1] = 0.782281118310111;
320  fWeightMatrix2to3[1][1] = -0.665086851180649;
321  fWeightMatrix2to3[2][1] = -1.10864578434138;
322  fWeightMatrix2to3[3][1] = 0.171136991984684;
323  fWeightMatrix2to3[0][2] = 0.0224037450653728;
324  fWeightMatrix2to3[1][2] = -0.514585812191245;
325  fWeightMatrix2to3[2][2] = 0.498674855353497;
326  fWeightMatrix2to3[3][2] = -0.0063816407743405;
327  fWeightMatrix2to3[0][3] = 0.0129312070692756;
328  fWeightMatrix2to3[1][3] = 0.048805104141024;
329  fWeightMatrix2to3[2][3] = 0.463802128086056;
330  fWeightMatrix2to3[3][3] = -0.526403437932214;
331  fWeightMatrix2to3[0][4] = 0.586629132427303;
332  fWeightMatrix2to3[1][4] = -0.56516260388251;
333  fWeightMatrix2to3[2][4] = -0.0193992692312397;
334  fWeightMatrix2to3[3][4] = -0.00243559497633667;
335  fWeightMatrix2to3[0][5] = 0.529319697003467;
336  fWeightMatrix2to3[1][5] = -0.466670469159831;
337  fWeightMatrix2to3[2][5] = -0.694783758016973;
338  fWeightMatrix2to3[3][5] = 1.17756234424668;
339 }
340 
341 inline double ReadMLP::GetMvaValue__( const std::vector<double>& inputValues ) const
342 {
343  if (inputValues.size() != (unsigned int)fLayerSize[0]-1) {
344  std::cout << "Input vector needs to be of size " << fLayerSize[0]-1 << std::endl;
345  return 0;
346  }
347 
348  for (int l=0; l<fLayers; l++)
349  for (int i=0; i<fLayerSize[l]; i++) fWeights[l][i]=0;
350 
351  for (int l=0; l<fLayers-1; l++)
352  fWeights[l][fLayerSize[l]-1]=1;
353 
354  for (int i=0; i<fLayerSize[0]-1; i++)
355  fWeights[0][i]=inputValues[i];
356 
357  // layer 0 to 1
358  for (int o=0; o<fLayerSize[1]-1; o++) {
359  for (int i=0; i<fLayerSize[0]; i++) {
360  double inputVal = fWeightMatrix0to1[o][i] * fWeights[0][i];
361  fWeights[1][o] += inputVal;
362  }
363  fWeights[1][o] = ActivationFnc(fWeights[1][o]);
364  }
365  // layer 1 to 2
366  for (int o=0; o<fLayerSize[2]-1; o++) {
367  for (int i=0; i<fLayerSize[1]; i++) {
368  double inputVal = fWeightMatrix1to2[o][i] * fWeights[1][i];
369  fWeights[2][o] += inputVal;
370  }
371  fWeights[2][o] = ActivationFnc(fWeights[2][o]);
372  }
373  // layer 2 to 3
374  for (int o=0; o<fLayerSize[3]; o++) {
375  for (int i=0; i<fLayerSize[2]; i++) {
376  double inputVal = fWeightMatrix2to3[o][i] * fWeights[2][i];
377  fWeights[3][o] += inputVal;
378  }
379  fWeights[3][o] = OutputActivationFnc(fWeights[3][o]);
380  }
381 
382  return fWeights[3][0];
383 }
384 
385 double ReadMLP::ActivationFnc(double x) const {
386  // hyperbolic tan
387  return tanh(x);
388 }
389 double ReadMLP::OutputActivationFnc(double x) const {
390  // identity
391  return x;
392 }
393 
394 // Clean up
395 inline void ReadMLP::Clear()
396 {
397  // clean up the arrays
398  for (int lIdx = 0; lIdx < 4; lIdx++) {
399  delete[] fWeights[lIdx];
400  }
401 }
402  inline double ReadMLP::GetMvaValue( const std::vector<double>& inputValues ) const
403  {
404  // classifier response value
405  double retval = 0;
406 
407  // classifier response, sanity check first
408  if (!IsStatusClean()) {
409  std::cout << "Problem in class \"" << fClassName << "\": cannot return classifier response"
410  << " because status is dirty" << std::endl;
411  retval = 0;
412  }
413  else {
414  if (IsNormalised()) {
415  // normalise variables
416  std::vector<double> iV;
417  iV.reserve(inputValues.size());
418  int ivar = 0;
419  for (std::vector<double>::const_iterator varIt = inputValues.begin();
420  varIt != inputValues.end(); varIt++, ivar++) {
421  iV.push_back(NormVariable( *varIt, fVmin[ivar], fVmax[ivar] ));
422  }
423  retval = GetMvaValue__( iV );
424  }
425  else {
426  retval = GetMvaValue__( inputValues );
427  }
428  }
429 
430  return retval;
431  }
float xmin
Definition: THbookFile.cxx:93
double tanh(double)
Type GetType(const std::string &Name)
Definition: Systematics.cxx:34
Double_t x[n]
Definition: legend1.C:17
void Initialize(Bool_t useTMVAStyle=kTRUE)
Definition: tmvaglob.cxx:176
TLine * l
Definition: textangle.C:4
float xmax
Definition: THbookFile.cxx:93
PyObject * fType