Logo ROOT   6.12/07
Reference Guide
TMVAMulticlass_MLP.class.C
Go to the documentation of this file.
1 // Class: ReadMLP
2 // Automatically generated by MethodBase::MakeClass
3 //
4 
5 /* configuration options =====================================================
6 
7 #GEN -*-*-*-*-*-*-*-*-*-*-*- general info -*-*-*-*-*-*-*-*-*-*-*-
8 
9 Method : MLP::MLP
10 TMVA Release : 4.2.1 [262657]
11 ROOT Release : 6.12/07 [396295]
12 Creator : sftnight
13 Date : Sat Sep 29 23:27:12 2018
14 Host : Linux ec-ubuntu-14-04-x86-64-2 3.13.0-157-generic #207-Ubuntu SMP Mon Aug 20 16:44:59 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
15 Dir : /mnt/build/workspace/root-makedoc-v612/rootspi/rdoc/src/v6-12-00-patches/documentation/doxygen
16 Training events: 4000
17 Analysis type : [Classification]
18 
19 
20 #OPT -*-*-*-*-*-*-*-*-*-*-*-*- options -*-*-*-*-*-*-*-*-*-*-*-*-
21 
22 # Set by User:
23 NCycles: "1000" [Number of training cycles]
24 HiddenLayers: "N+5,5" [Specification of hidden layer architecture]
25 NeuronType: "tanh" [Neuron activation function type]
26 EstimatorType: "MSE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood]
27 V: "False" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
28 H: "False" [Print method-specific help message]
29 TestRate: "5" [Test for overtraining performed at each #th epochs]
30 # Default:
31 RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')]
32 NeuronInputType: "sum" [Neuron input function type]
33 VerbosityLevel: "Default" [Verbosity level]
34 VarTransform: "None" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
35 CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
36 IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
37 TrainingMethod: "BP" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)]
38 LearningRate: "2.000000e-02" [ANN learning rate parameter]
39 DecayRate: "1.000000e-02" [Decay rate for learning parameter]
40 EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)]
41 Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch]
42 SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training]
43 SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.]
44 SamplingTraining: "True" [The training sample is sampled]
45 SamplingTesting: "False" [The testing sample is sampled]
46 ResetStep: "50" [How often BFGS should reset history]
47 Tau: "3.000000e+00" [LineSearch "size step"]
48 BPMode: "sequential" [Back-propagation learning mode: sequential or batch]
49 BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events]
50 ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)]
51 ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)]
52 UseRegulator: "False" [Use regulator to avoid over-training]
53 UpdateLimit: "10000" [Maximum times of regulator update]
54 CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value]
55 WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range]
56 ##
57 
58 
59 #VAR -*-*-*-*-*-*-*-*-*-*-*-* variables *-*-*-*-*-*-*-*-*-*-*-*-
60 
61 NVar 4
62 var1 var1 var1 var1 'F' [-4.05916023254,3.58076572418]
63 var2 var2 var2 Variable 2 'F' [-3.68905711174,3.78774046898]
64 var3 var3 var3 Variable 3 units 'F' [-3.61478614807,4.56402540207]
65 var4 var4 var4 Variable 4 units 'F' [-4.84856987,5.04116535187]
66 NSpec 0
67 
68 
69 ============================================================================ */
70 
71 #include <array>
72 #include <vector>
73 #include <cmath>
74 #include <string>
75 #include <iostream>
76 
77 #ifndef IClassifierReader__def
78 #define IClassifierReader__def
79 
80 class IClassifierReader {
81 
82  public:
83 
84  // constructor
85  IClassifierReader() : fStatusIsClean( true ) {}
86  virtual ~IClassifierReader() {}
87 
88  // return classifier response
89  virtual double GetMvaValue( const std::vector<double>& inputValues ) const = 0;
90 
91  // returns classifier status
92  bool IsStatusClean() const { return fStatusIsClean; }
93 
94  protected:
95 
96  bool fStatusIsClean;
97 };
98 
99 #endif
100 
101 class ReadMLP : public IClassifierReader {
102 
103  public:
104 
105  // constructor
106  ReadMLP( std::vector<std::string>& theInputVars )
107  : IClassifierReader(),
108  fClassName( "ReadMLP" ),
109  fNvars( 4 ),
110  fIsNormalised( false )
111  {
112  // the training input variables
113  const char* inputVars[] = { "var1", "var2", "var3", "var4" };
114 
115  // sanity checks
116  if (theInputVars.size() <= 0) {
117  std::cout << "Problem in class \"" << fClassName << "\": empty input vector" << std::endl;
118  fStatusIsClean = false;
119  }
120 
121  if (theInputVars.size() != fNvars) {
122  std::cout << "Problem in class \"" << fClassName << "\": mismatch in number of input values: "
123  << theInputVars.size() << " != " << fNvars << std::endl;
124  fStatusIsClean = false;
125  }
126 
127  // validate input variables
128  for (size_t ivar = 0; ivar < theInputVars.size(); ivar++) {
129  if (theInputVars[ivar] != inputVars[ivar]) {
130  std::cout << "Problem in class \"" << fClassName << "\": mismatch in input variable names" << std::endl
131  << " for variable [" << ivar << "]: " << theInputVars[ivar].c_str() << " != " << inputVars[ivar] << std::endl;
132  fStatusIsClean = false;
133  }
134  }
135 
136  // initialize min and max vectors (for normalisation)
137  fVmin[0] = 0;
138  fVmax[0] = 0;
139  fVmin[1] = 0;
140  fVmax[1] = 0;
141  fVmin[2] = 0;
142  fVmax[2] = 0;
143  fVmin[3] = 0;
144  fVmax[3] = 0;
145 
146  // initialize input variable types
147  fType[0] = 'F';
148  fType[1] = 'F';
149  fType[2] = 'F';
150  fType[3] = 'F';
151 
152  // initialize constants
153  Initialize();
154 
155  }
156 
157  // destructor
158  virtual ~ReadMLP() {
159  Clear(); // method-specific
160  }
161 
162  // the classifier response
163  // "inputValues" is a vector of input values in the same order as the
164  // variables given to the constructor
165  double GetMvaValue( const std::vector<double>& inputValues ) const;
166 
167  private:
168 
169  // method-specific destructor
170  void Clear();
171 
172  // common member variables
173  const char* fClassName;
174 
175  const size_t fNvars;
176  size_t GetNvar() const { return fNvars; }
177  char GetType( int ivar ) const { return fType[ivar]; }
178 
179  // normalisation of input variables
180  const bool fIsNormalised;
181  bool IsNormalised() const { return fIsNormalised; }
182  double fVmin[4];
183  double fVmax[4];
184  double NormVariable( double x, double xmin, double xmax ) const {
185  // normalise to output range: [-1, 1]
186  return 2*(x - xmin)/(xmax - xmin) - 1.0;
187  }
188 
189  // type of input variable: 'F' or 'I'
190  char fType[4];
191 
192  // initialize internal variables
193  void Initialize();
194  double GetMvaValue__( const std::vector<double>& inputValues ) const;
195 
196  // private members (method specific)
197 
198  double ActivationFnc(double x) const;
199  double OutputActivationFnc(double x) const;
200 
201  int fLayers;
202  int fLayerSize[4];
203  double fWeightMatrix0to1[10][5]; // weight matrix from layer 0 to 1
204  double fWeightMatrix1to2[6][10]; // weight matrix from layer 1 to 2
205  double fWeightMatrix2to3[4][6]; // weight matrix from layer 2 to 3
206 
207 };
208 
209 inline void ReadMLP::Initialize()
210 {
211  // build network structure
212  fLayers = 4;
213  fLayerSize[0] = 5;
214  fLayerSize[1] = 10;
215  fLayerSize[2] = 6;
216  fLayerSize[3] = 4;
217  // weight matrix from layer 0 to 1
218  fWeightMatrix0to1[0][0] = -2.56827151871145;
219  fWeightMatrix0to1[1][0] = 1.9675350707796;
220  fWeightMatrix0to1[2][0] = 0.0945211430434372;
221  fWeightMatrix0to1[3][0] = -0.534652842263711;
222  fWeightMatrix0to1[4][0] = -1.20582392866552;
223  fWeightMatrix0to1[5][0] = -1.15295712224926;
224  fWeightMatrix0to1[6][0] = 0.062068122332932;
225  fWeightMatrix0to1[7][0] = 1.77909967887342;
226  fWeightMatrix0to1[8][0] = -0.465417129592263;
227  fWeightMatrix0to1[0][1] = -1.14980631440935;
228  fWeightMatrix0to1[1][1] = 0.997243902140403;
229  fWeightMatrix0to1[2][1] = 0.23295461257945;
230  fWeightMatrix0to1[3][1] = 0.597331762575396;
231  fWeightMatrix0to1[4][1] = -1.07812121147322;
232  fWeightMatrix0to1[5][1] = -0.904010711693679;
233  fWeightMatrix0to1[6][1] = 0.763987509344495;
234  fWeightMatrix0to1[7][1] = -0.42730495957135;
235  fWeightMatrix0to1[8][1] = 1.98468529543297;
236  fWeightMatrix0to1[0][2] = 0.255102649182503;
237  fWeightMatrix0to1[1][2] = 1.33042896557683;
238  fWeightMatrix0to1[2][2] = -1.01135742504311;
239  fWeightMatrix0to1[3][2] = -0.163148696405109;
240  fWeightMatrix0to1[4][2] = -0.874674280440262;
241  fWeightMatrix0to1[5][2] = -0.605186137715753;
242  fWeightMatrix0to1[6][2] = -1.29503636367852;
243  fWeightMatrix0to1[7][2] = 0.0166465979160239;
244  fWeightMatrix0to1[8][2] = -0.0838533085354363;
245  fWeightMatrix0to1[0][3] = -0.298765733434783;
246  fWeightMatrix0to1[1][3] = -3.61669821007974;
247  fWeightMatrix0to1[2][3] = 0.230635988926395;
248  fWeightMatrix0to1[3][3] = 1.41124122416393;
249  fWeightMatrix0to1[4][3] = 2.50578787969626;
250  fWeightMatrix0to1[5][3] = 1.24044263856194;
251  fWeightMatrix0to1[6][3] = 0.444402811598472;
252  fWeightMatrix0to1[7][3] = -0.0135466357087942;
253  fWeightMatrix0to1[8][3] = 0.0114644267404459;
254  fWeightMatrix0to1[0][4] = -2.80469640642522;
255  fWeightMatrix0to1[1][4] = 2.03677268622447;
256  fWeightMatrix0to1[2][4] = -1.2252550607933;
257  fWeightMatrix0to1[3][4] = -0.400363817658847;
258  fWeightMatrix0to1[4][4] = 1.74778853696658;
259  fWeightMatrix0to1[5][4] = 3.02549270492086;
260  fWeightMatrix0to1[6][4] = 2.62632004739209;
261  fWeightMatrix0to1[7][4] = -1.05661183010273;
262  fWeightMatrix0to1[8][4] = -2.27738442555204;
263  // weight matrix from layer 1 to 2
264  fWeightMatrix1to2[0][0] = -1.22570337882136;
265  fWeightMatrix1to2[1][0] = -0.0931186540811387;
266  fWeightMatrix1to2[2][0] = 0.0272542341322508;
267  fWeightMatrix1to2[3][0] = 2.20635629364256;
268  fWeightMatrix1to2[4][0] = 0.0433728010577296;
269  fWeightMatrix1to2[0][1] = 0.273962369245395;
270  fWeightMatrix1to2[1][1] = 0.0930582916641537;
271  fWeightMatrix1to2[2][1] = -1.19664292914285;
272  fWeightMatrix1to2[3][1] = 0.157749723688285;
273  fWeightMatrix1to2[4][1] = -4.93727193311647;
274  fWeightMatrix1to2[0][2] = -0.715714091172125;
275  fWeightMatrix1to2[1][2] = 0.867788095978986;
276  fWeightMatrix1to2[2][2] = -1.96198302938044;
277  fWeightMatrix1to2[3][2] = -0.464404359512948;
278  fWeightMatrix1to2[4][2] = 0.0595675643343206;
279  fWeightMatrix1to2[0][3] = 0.758204927770786;
280  fWeightMatrix1to2[1][3] = -2.33053777721601;
281  fWeightMatrix1to2[2][3] = -0.0596569719756188;
282  fWeightMatrix1to2[3][3] = -0.437338637677644;
283  fWeightMatrix1to2[4][3] = 0.166445159553785;
284  fWeightMatrix1to2[0][4] = 0.254272880836908;
285  fWeightMatrix1to2[1][4] = -1.35815091193781;
286  fWeightMatrix1to2[2][4] = -0.364973793826042;
287  fWeightMatrix1to2[3][4] = 0.461639459085397;
288  fWeightMatrix1to2[4][4] = 2.61649708222458;
289  fWeightMatrix1to2[0][5] = -0.607652087990045;
290  fWeightMatrix1to2[1][5] = -1.59745372261442;
291  fWeightMatrix1to2[2][5] = -0.497497443898413;
292  fWeightMatrix1to2[3][5] = 1.55870109321722;
293  fWeightMatrix1to2[4][5] = 1.76169097043581;
294  fWeightMatrix1to2[0][6] = 0.636994330715913;
295  fWeightMatrix1to2[1][6] = -0.437626501220008;
296  fWeightMatrix1to2[2][6] = -1.64640596581009;
297  fWeightMatrix1to2[3][6] = 1.81557603834963;
298  fWeightMatrix1to2[4][6] = 0.213460988352797;
299  fWeightMatrix1to2[0][7] = 2.87318117712971;
300  fWeightMatrix1to2[1][7] = -0.3992286418755;
301  fWeightMatrix1to2[2][7] = -0.0828774633250926;
302  fWeightMatrix1to2[3][7] = -2.32908344616873;
303  fWeightMatrix1to2[4][7] = -0.00287196406204667;
304  fWeightMatrix1to2[0][8] = 3.18538225792951;
305  fWeightMatrix1to2[1][8] = 0.00204371301044828;
306  fWeightMatrix1to2[2][8] = 0.00883819846358683;
307  fWeightMatrix1to2[3][8] = -3.25311571046186;
308  fWeightMatrix1to2[4][8] = 0.0863656137054146;
309  fWeightMatrix1to2[0][9] = -2.58613271127948;
310  fWeightMatrix1to2[1][9] = -1.61491969085433;
311  fWeightMatrix1to2[2][9] = 1.46349048563992;
312  fWeightMatrix1to2[3][9] = -3.52557440843157;
313  fWeightMatrix1to2[4][9] = 0.774477596633972;
314  // weight matrix from layer 2 to 3
315  fWeightMatrix2to3[0][0] = 1.00887795674738;
316  fWeightMatrix2to3[1][0] = 0.901934446106593;
317  fWeightMatrix2to3[2][0] = 0.0103134082552144;
318  fWeightMatrix2to3[3][0] = -4.15846758201171;
319  fWeightMatrix2to3[0][1] = -1.76663219370744;
320  fWeightMatrix2to3[1][1] = -3.16703807597721;
321  fWeightMatrix2to3[2][1] = -0.721676163411028;
322  fWeightMatrix2to3[3][1] = 2.88184594216196;
323  fWeightMatrix2to3[0][2] = 1.05464986697278;
324  fWeightMatrix2to3[1][2] = -1.33953128311584;
325  fWeightMatrix2to3[2][2] = 2.06932178843474;
326  fWeightMatrix2to3[3][2] = -0.566502440872972;
327  fWeightMatrix2to3[0][3] = 1.1754576459947;
328  fWeightMatrix2to3[1][3] = 1.04229738415628;
329  fWeightMatrix2to3[2][3] = 0.409096508127509;
330  fWeightMatrix2to3[3][3] = -4.91383178074721;
331  fWeightMatrix2to3[0][4] = 2.89032187783364;
332  fWeightMatrix2to3[1][4] = -0.640305503795561;
333  fWeightMatrix2to3[2][4] = -3.0600188888794;
334  fWeightMatrix2to3[3][4] = 0.412788618104668;
335  fWeightMatrix2to3[0][5] = 0.441316337386854;
336  fWeightMatrix2to3[1][5] = -0.0238371766698862;
337  fWeightMatrix2to3[2][5] = 0.903246146264539;
338  fWeightMatrix2to3[3][5] = 0.705588849148349;
339 }
340 
341 inline double ReadMLP::GetMvaValue__( const std::vector<double>& inputValues ) const
342 {
343  if (inputValues.size() != (unsigned int)fLayerSize[0]-1) {
344  std::cout << "Input vector needs to be of size " << fLayerSize[0]-1 << std::endl;
345  return 0;
346  }
347 
348  std::array<double, 5> fWeights0 {{}};
349  std::array<double, 10> fWeights1 {{}};
350  std::array<double, 6> fWeights2 {{}};
351  std::array<double, 4> fWeights3 {{}};
352  fWeights0.back() = 1.;
353  fWeights1.back() = 1.;
354  fWeights2.back() = 1.;
355 
356  for (int i=0; i<fLayerSize[0]-1; i++)
357  fWeights0[i]=inputValues[i];
358 
359  // layer 0 to 1
360  for (int o=0; o<fLayerSize[1]-1; o++) {
361  for (int i=0; i<fLayerSize[0]; i++) {
362  double inputVal = fWeightMatrix0to1[o][i] * fWeights0[i];
363  fWeights1[o] += inputVal;
364  } // loop over i
365  fWeights1[o] = ActivationFnc(fWeights1[o]);
366  } // loop over o
367  // layer 1 to 2
368  for (int o=0; o<fLayerSize[2]-1; o++) {
369  for (int i=0; i<fLayerSize[1]; i++) {
370  double inputVal = fWeightMatrix1to2[o][i] * fWeights1[i];
371  fWeights2[o] += inputVal;
372  } // loop over i
373  fWeights2[o] = ActivationFnc(fWeights2[o]);
374  } // loop over o
375  // layer 2 to 3
376  for (int o=0; o<fLayerSize[3]; o++) {
377  for (int i=0; i<fLayerSize[2]; i++) {
378  double inputVal = fWeightMatrix2to3[o][i] * fWeights2[i];
379  fWeights3[o] += inputVal;
380  } // loop over i
381  fWeights3[o] = OutputActivationFnc(fWeights3[o]);
382  } // loop over o
383 
384  return fWeights3[0];
385 }
386 
387 double ReadMLP::ActivationFnc(double x) const {
388  // hyperbolic tan
389  return tanh(x);
390 }
391 double ReadMLP::OutputActivationFnc(double x) const {
392  // identity
393  return x;
394 }
395 
396 // Clean up
397 inline void ReadMLP::Clear()
398 {
399 }
400  inline double ReadMLP::GetMvaValue( const std::vector<double>& inputValues ) const
401  {
402  // classifier response value
403  double retval = 0;
404 
405  // classifier response, sanity check first
406  if (!IsStatusClean()) {
407  std::cout << "Problem in class \"" << fClassName << "\": cannot return classifier response"
408  << " because status is dirty" << std::endl;
409  retval = 0;
410  }
411  else {
412  if (IsNormalised()) {
413  // normalise variables
414  std::vector<double> iV;
415  iV.reserve(inputValues.size());
416  int ivar = 0;
417  for (std::vector<double>::const_iterator varIt = inputValues.begin();
418  varIt != inputValues.end(); varIt++, ivar++) {
419  iV.push_back(NormVariable( *varIt, fVmin[ivar], fVmax[ivar] ));
420  }
421  retval = GetMvaValue__( iV );
422  }
423  else {
424  retval = GetMvaValue__( inputValues );
425  }
426  }
427 
428  return retval;
429  }
float xmin
Definition: THbookFile.cxx:93
double tanh(double)
Type GetType(const std::string &Name)
Definition: Systematics.cxx:34
Double_t x[n]
Definition: legend1.C:17
void Initialize(Bool_t useTMVAStyle=kTRUE)
Definition: tmvaglob.cxx:176
float xmax
Definition: THbookFile.cxx:93
PyObject * fType