Logo ROOT   6.10/09
Reference Guide
TMVAMulticlass_MLP.class.C
Go to the documentation of this file.
1 // Class: ReadMLP
2 // Automatically generated by MethodBase::MakeClass
3 //
4 
5 /* configuration options =====================================================
6 
7 #GEN -*-*-*-*-*-*-*-*-*-*-*- general info -*-*-*-*-*-*-*-*-*-*-*-
8 
9 Method : MLP::MLP
10 TMVA Release : 4.2.1 [262657]
11 ROOT Release : 6.10/09 [395785]
12 Creator : sftnight
13 Date : Thu May 31 12:05:20 2018
14 Host : Linux SFT-ubuntu-1710-1 4.13.0-31-generic #34-Ubuntu SMP Fri Jan 19 16:34:46 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
15 Dir : /mnt/build/workspace/root-makedoc-v610/rootspi/rdoc/src/v6-10-00-patches/documentation/doxygen
16 Training events: 4000
17 Analysis type : [Classification]
18 
19 
20 #OPT -*-*-*-*-*-*-*-*-*-*-*-*- options -*-*-*-*-*-*-*-*-*-*-*-*-
21 
22 # Set by User:
23 NCycles: "1000" [Number of training cycles]
24 HiddenLayers: "N+5,5" [Specification of hidden layer architecture]
25 NeuronType: "tanh" [Neuron activation function type]
26 EstimatorType: "MSE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood]
27 V: "False" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
28 H: "False" [Print method-specific help message]
29 TestRate: "5" [Test for overtraining performed at each #th epochs]
30 # Default:
31 RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')]
32 NeuronInputType: "sum" [Neuron input function type]
33 VerbosityLevel: "Default" [Verbosity level]
34 VarTransform: "None" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
35 CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
36 IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
37 TrainingMethod: "BP" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)]
38 LearningRate: "2.000000e-02" [ANN learning rate parameter]
39 DecayRate: "1.000000e-02" [Decay rate for learning parameter]
40 EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)]
41 Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch]
42 SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training]
43 SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.]
44 SamplingTraining: "True" [The training sample is sampled]
45 SamplingTesting: "False" [The testing sample is sampled]
46 ResetStep: "50" [How often BFGS should reset history]
47 Tau: "3.000000e+00" [LineSearch "size step"]
48 BPMode: "sequential" [Back-propagation learning mode: sequential or batch]
49 BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events]
50 ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)]
51 ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)]
52 UseRegulator: "False" [Use regulator to avoid over-training]
53 UpdateLimit: "10000" [Maximum times of regulator update]
54 CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value]
55 WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range]
56 ##
57 
58 
59 #VAR -*-*-*-*-*-*-*-*-*-*-*-* variables *-*-*-*-*-*-*-*-*-*-*-*-
60 
61 NVar 4
62 var1 var1 var1 var1 'F' [-4.05916023254,3.58076572418]
63 var2 var2 var2 Variable 2 'F' [-3.68905711174,3.78774046898]
64 var3 var3 var3 Variable 3 units 'F' [-3.61478614807,4.56402540207]
65 var4 var4 var4 Variable 4 units 'F' [-4.84856987,5.04116535187]
66 NSpec 0
67 
68 
69 ============================================================================ */
70 
71 #include <vector>
72 #include <cmath>
73 #include <string>
74 #include <iostream>
75 
76 #ifndef IClassifierReader__def
77 #define IClassifierReader__def
78 
79 class IClassifierReader {
80 
81  public:
82 
83  // constructor
84  IClassifierReader() : fStatusIsClean( true ) {}
85  virtual ~IClassifierReader() {}
86 
87  // return classifier response
88  virtual double GetMvaValue( const std::vector<double>& inputValues ) const = 0;
89 
90  // returns classifier status
91  bool IsStatusClean() const { return fStatusIsClean; }
92 
93  protected:
94 
95  bool fStatusIsClean;
96 };
97 
98 #endif
99 
100 class ReadMLP : public IClassifierReader {
101 
102  public:
103 
104  // constructor
105  ReadMLP( std::vector<std::string>& theInputVars )
106  : IClassifierReader(),
107  fClassName( "ReadMLP" ),
108  fNvars( 4 ),
109  fIsNormalised( false )
110  {
111  // the training input variables
112  const char* inputVars[] = { "var1", "var2", "var3", "var4" };
113 
114  // sanity checks
115  if (theInputVars.size() <= 0) {
116  std::cout << "Problem in class \"" << fClassName << "\": empty input vector" << std::endl;
117  fStatusIsClean = false;
118  }
119 
120  if (theInputVars.size() != fNvars) {
121  std::cout << "Problem in class \"" << fClassName << "\": mismatch in number of input values: "
122  << theInputVars.size() << " != " << fNvars << std::endl;
123  fStatusIsClean = false;
124  }
125 
126  // validate input variables
127  for (size_t ivar = 0; ivar < theInputVars.size(); ivar++) {
128  if (theInputVars[ivar] != inputVars[ivar]) {
129  std::cout << "Problem in class \"" << fClassName << "\": mismatch in input variable names" << std::endl
130  << " for variable [" << ivar << "]: " << theInputVars[ivar].c_str() << " != " << inputVars[ivar] << std::endl;
131  fStatusIsClean = false;
132  }
133  }
134 
135  // initialize min and max vectors (for normalisation)
136  fVmin[0] = 0;
137  fVmax[0] = 0;
138  fVmin[1] = 0;
139  fVmax[1] = 0;
140  fVmin[2] = 0;
141  fVmax[2] = 0;
142  fVmin[3] = 0;
143  fVmax[3] = 0;
144 
145  // initialize input variable types
146  fType[0] = 'F';
147  fType[1] = 'F';
148  fType[2] = 'F';
149  fType[3] = 'F';
150 
151  // initialize constants
152  Initialize();
153 
154  }
155 
156  // destructor
157  virtual ~ReadMLP() {
158  Clear(); // method-specific
159  }
160 
161  // the classifier response
162  // "inputValues" is a vector of input values in the same order as the
163  // variables given to the constructor
164  double GetMvaValue( const std::vector<double>& inputValues ) const;
165 
166  private:
167 
168  // method-specific destructor
169  void Clear();
170 
171  // common member variables
172  const char* fClassName;
173 
174  const size_t fNvars;
175  size_t GetNvar() const { return fNvars; }
176  char GetType( int ivar ) const { return fType[ivar]; }
177 
178  // normalisation of input variables
179  const bool fIsNormalised;
180  bool IsNormalised() const { return fIsNormalised; }
181  double fVmin[4];
182  double fVmax[4];
183  double NormVariable( double x, double xmin, double xmax ) const {
184  // normalise to output range: [-1, 1]
185  return 2*(x - xmin)/(xmax - xmin) - 1.0;
186  }
187 
188  // type of input variable: 'F' or 'I'
189  char fType[4];
190 
191  // initialize internal variables
192  void Initialize();
193  double GetMvaValue__( const std::vector<double>& inputValues ) const;
194 
195  // private members (method specific)
196 
197  double ActivationFnc(double x) const;
198  double OutputActivationFnc(double x) const;
199 
200  int fLayers;
201  int fLayerSize[4];
202  double fWeightMatrix0to1[10][5]; // weight matrix from layer 0 to 1
203  double fWeightMatrix1to2[6][10]; // weight matrix from layer 1 to 2
204  double fWeightMatrix2to3[4][6]; // weight matrix from layer 2 to 3
205 
206  double * fWeights[4];
207 };
208 
209 inline void ReadMLP::Initialize()
210 {
211  // build network structure
212  fLayers = 4;
213  fLayerSize[0] = 5; fWeights[0] = new double[5];
214  fLayerSize[1] = 10; fWeights[1] = new double[10];
215  fLayerSize[2] = 6; fWeights[2] = new double[6];
216  fLayerSize[3] = 4; fWeights[3] = new double[4];
217  // weight matrix from layer 0 to 1
218  fWeightMatrix0to1[0][0] = -2.56827151871145;
219  fWeightMatrix0to1[1][0] = 1.9675350707796;
220  fWeightMatrix0to1[2][0] = 0.0945211430434372;
221  fWeightMatrix0to1[3][0] = -0.534652842263711;
222  fWeightMatrix0to1[4][0] = -1.20582392866552;
223  fWeightMatrix0to1[5][0] = -1.15295712224926;
224  fWeightMatrix0to1[6][0] = 0.062068122332932;
225  fWeightMatrix0to1[7][0] = 1.77909967887342;
226  fWeightMatrix0to1[8][0] = -0.465417129592263;
227  fWeightMatrix0to1[0][1] = -1.14980631440935;
228  fWeightMatrix0to1[1][1] = 0.997243902140403;
229  fWeightMatrix0to1[2][1] = 0.23295461257945;
230  fWeightMatrix0to1[3][1] = 0.597331762575396;
231  fWeightMatrix0to1[4][1] = -1.07812121147322;
232  fWeightMatrix0to1[5][1] = -0.904010711693679;
233  fWeightMatrix0to1[6][1] = 0.763987509344495;
234  fWeightMatrix0to1[7][1] = -0.42730495957135;
235  fWeightMatrix0to1[8][1] = 1.98468529543297;
236  fWeightMatrix0to1[0][2] = 0.255102649182503;
237  fWeightMatrix0to1[1][2] = 1.33042896557683;
238  fWeightMatrix0to1[2][2] = -1.01135742504311;
239  fWeightMatrix0to1[3][2] = -0.163148696405109;
240  fWeightMatrix0to1[4][2] = -0.874674280440262;
241  fWeightMatrix0to1[5][2] = -0.605186137715753;
242  fWeightMatrix0to1[6][2] = -1.29503636367852;
243  fWeightMatrix0to1[7][2] = 0.0166465979160239;
244  fWeightMatrix0to1[8][2] = -0.0838533085354363;
245  fWeightMatrix0to1[0][3] = -0.298765733434783;
246  fWeightMatrix0to1[1][3] = -3.61669821007974;
247  fWeightMatrix0to1[2][3] = 0.230635988926395;
248  fWeightMatrix0to1[3][3] = 1.41124122416393;
249  fWeightMatrix0to1[4][3] = 2.50578787969626;
250  fWeightMatrix0to1[5][3] = 1.24044263856194;
251  fWeightMatrix0to1[6][3] = 0.444402811598472;
252  fWeightMatrix0to1[7][3] = -0.0135466357087942;
253  fWeightMatrix0to1[8][3] = 0.0114644267404459;
254  fWeightMatrix0to1[0][4] = -2.80469640642522;
255  fWeightMatrix0to1[1][4] = 2.03677268622447;
256  fWeightMatrix0to1[2][4] = -1.2252550607933;
257  fWeightMatrix0to1[3][4] = -0.400363817658847;
258  fWeightMatrix0to1[4][4] = 1.74778853696658;
259  fWeightMatrix0to1[5][4] = 3.02549270492086;
260  fWeightMatrix0to1[6][4] = 2.62632004739209;
261  fWeightMatrix0to1[7][4] = -1.05661183010273;
262  fWeightMatrix0to1[8][4] = -2.27738442555204;
263  // weight matrix from layer 1 to 2
264  fWeightMatrix1to2[0][0] = -1.22570337882136;
265  fWeightMatrix1to2[1][0] = -0.0931186540811387;
266  fWeightMatrix1to2[2][0] = 0.0272542341322508;
267  fWeightMatrix1to2[3][0] = 2.20635629364256;
268  fWeightMatrix1to2[4][0] = 0.0433728010577296;
269  fWeightMatrix1to2[0][1] = 0.273962369245395;
270  fWeightMatrix1to2[1][1] = 0.0930582916641537;
271  fWeightMatrix1to2[2][1] = -1.19664292914285;
272  fWeightMatrix1to2[3][1] = 0.157749723688285;
273  fWeightMatrix1to2[4][1] = -4.93727193311647;
274  fWeightMatrix1to2[0][2] = -0.715714091172125;
275  fWeightMatrix1to2[1][2] = 0.867788095978986;
276  fWeightMatrix1to2[2][2] = -1.96198302938044;
277  fWeightMatrix1to2[3][2] = -0.464404359512948;
278  fWeightMatrix1to2[4][2] = 0.0595675643343206;
279  fWeightMatrix1to2[0][3] = 0.758204927770786;
280  fWeightMatrix1to2[1][3] = -2.33053777721601;
281  fWeightMatrix1to2[2][3] = -0.0596569719756188;
282  fWeightMatrix1to2[3][3] = -0.437338637677644;
283  fWeightMatrix1to2[4][3] = 0.166445159553785;
284  fWeightMatrix1to2[0][4] = 0.254272880836908;
285  fWeightMatrix1to2[1][4] = -1.35815091193781;
286  fWeightMatrix1to2[2][4] = -0.364973793826042;
287  fWeightMatrix1to2[3][4] = 0.461639459085397;
288  fWeightMatrix1to2[4][4] = 2.61649708222458;
289  fWeightMatrix1to2[0][5] = -0.607652087990045;
290  fWeightMatrix1to2[1][5] = -1.59745372261442;
291  fWeightMatrix1to2[2][5] = -0.497497443898413;
292  fWeightMatrix1to2[3][5] = 1.55870109321722;
293  fWeightMatrix1to2[4][5] = 1.76169097043581;
294  fWeightMatrix1to2[0][6] = 0.636994330715913;
295  fWeightMatrix1to2[1][6] = -0.437626501220008;
296  fWeightMatrix1to2[2][6] = -1.64640596581009;
297  fWeightMatrix1to2[3][6] = 1.81557603834963;
298  fWeightMatrix1to2[4][6] = 0.213460988352797;
299  fWeightMatrix1to2[0][7] = 2.87318117712971;
300  fWeightMatrix1to2[1][7] = -0.3992286418755;
301  fWeightMatrix1to2[2][7] = -0.0828774633250926;
302  fWeightMatrix1to2[3][7] = -2.32908344616873;
303  fWeightMatrix1to2[4][7] = -0.00287196406204667;
304  fWeightMatrix1to2[0][8] = 3.18538225792951;
305  fWeightMatrix1to2[1][8] = 0.00204371301044828;
306  fWeightMatrix1to2[2][8] = 0.00883819846358683;
307  fWeightMatrix1to2[3][8] = -3.25311571046186;
308  fWeightMatrix1to2[4][8] = 0.0863656137054146;
309  fWeightMatrix1to2[0][9] = -2.58613271127948;
310  fWeightMatrix1to2[1][9] = -1.61491969085433;
311  fWeightMatrix1to2[2][9] = 1.46349048563992;
312  fWeightMatrix1to2[3][9] = -3.52557440843157;
313  fWeightMatrix1to2[4][9] = 0.774477596633972;
314  // weight matrix from layer 2 to 3
315  fWeightMatrix2to3[0][0] = 1.00887795674738;
316  fWeightMatrix2to3[1][0] = 0.901934446106593;
317  fWeightMatrix2to3[2][0] = 0.0103134082552144;
318  fWeightMatrix2to3[3][0] = -4.15846758201171;
319  fWeightMatrix2to3[0][1] = -1.76663219370744;
320  fWeightMatrix2to3[1][1] = -3.16703807597721;
321  fWeightMatrix2to3[2][1] = -0.721676163411028;
322  fWeightMatrix2to3[3][1] = 2.88184594216196;
323  fWeightMatrix2to3[0][2] = 1.05464986697278;
324  fWeightMatrix2to3[1][2] = -1.33953128311584;
325  fWeightMatrix2to3[2][2] = 2.06932178843474;
326  fWeightMatrix2to3[3][2] = -0.566502440872972;
327  fWeightMatrix2to3[0][3] = 1.1754576459947;
328  fWeightMatrix2to3[1][3] = 1.04229738415628;
329  fWeightMatrix2to3[2][3] = 0.409096508127509;
330  fWeightMatrix2to3[3][3] = -4.91383178074721;
331  fWeightMatrix2to3[0][4] = 2.89032187783364;
332  fWeightMatrix2to3[1][4] = -0.640305503795561;
333  fWeightMatrix2to3[2][4] = -3.0600188888794;
334  fWeightMatrix2to3[3][4] = 0.412788618104668;
335  fWeightMatrix2to3[0][5] = 0.441316337386854;
336  fWeightMatrix2to3[1][5] = -0.0238371766698862;
337  fWeightMatrix2to3[2][5] = 0.903246146264539;
338  fWeightMatrix2to3[3][5] = 0.705588849148349;
339 }
340 
341 inline double ReadMLP::GetMvaValue__( const std::vector<double>& inputValues ) const
342 {
343  if (inputValues.size() != (unsigned int)fLayerSize[0]-1) {
344  std::cout << "Input vector needs to be of size " << fLayerSize[0]-1 << std::endl;
345  return 0;
346  }
347 
348  for (int l=0; l<fLayers; l++)
349  for (int i=0; i<fLayerSize[l]; i++) fWeights[l][i]=0;
350 
351  for (int l=0; l<fLayers-1; l++)
352  fWeights[l][fLayerSize[l]-1]=1;
353 
354  for (int i=0; i<fLayerSize[0]-1; i++)
355  fWeights[0][i]=inputValues[i];
356 
357  // layer 0 to 1
358  for (int o=0; o<fLayerSize[1]-1; o++) {
359  for (int i=0; i<fLayerSize[0]; i++) {
360  double inputVal = fWeightMatrix0to1[o][i] * fWeights[0][i];
361  fWeights[1][o] += inputVal;
362  }
363  fWeights[1][o] = ActivationFnc(fWeights[1][o]);
364  }
365  // layer 1 to 2
366  for (int o=0; o<fLayerSize[2]-1; o++) {
367  for (int i=0; i<fLayerSize[1]; i++) {
368  double inputVal = fWeightMatrix1to2[o][i] * fWeights[1][i];
369  fWeights[2][o] += inputVal;
370  }
371  fWeights[2][o] = ActivationFnc(fWeights[2][o]);
372  }
373  // layer 2 to 3
374  for (int o=0; o<fLayerSize[3]; o++) {
375  for (int i=0; i<fLayerSize[2]; i++) {
376  double inputVal = fWeightMatrix2to3[o][i] * fWeights[2][i];
377  fWeights[3][o] += inputVal;
378  }
379  fWeights[3][o] = OutputActivationFnc(fWeights[3][o]);
380  }
381 
382  return fWeights[3][0];
383 }
384 
385 double ReadMLP::ActivationFnc(double x) const {
386  // hyperbolic tan
387  return tanh(x);
388 }
389 double ReadMLP::OutputActivationFnc(double x) const {
390  // identity
391  return x;
392 }
393 
394 // Clean up
395 inline void ReadMLP::Clear()
396 {
397  // clean up the arrays
398  for (int lIdx = 0; lIdx < 4; lIdx++) {
399  delete[] fWeights[lIdx];
400  }
401 }
402  inline double ReadMLP::GetMvaValue( const std::vector<double>& inputValues ) const
403  {
404  // classifier response value
405  double retval = 0;
406 
407  // classifier response, sanity check first
408  if (!IsStatusClean()) {
409  std::cout << "Problem in class \"" << fClassName << "\": cannot return classifier response"
410  << " because status is dirty" << std::endl;
411  retval = 0;
412  }
413  else {
414  if (IsNormalised()) {
415  // normalise variables
416  std::vector<double> iV;
417  iV.reserve(inputValues.size());
418  int ivar = 0;
419  for (std::vector<double>::const_iterator varIt = inputValues.begin();
420  varIt != inputValues.end(); varIt++, ivar++) {
421  iV.push_back(NormVariable( *varIt, fVmin[ivar], fVmax[ivar] ));
422  }
423  retval = GetMvaValue__( iV );
424  }
425  else {
426  retval = GetMvaValue__( inputValues );
427  }
428  }
429 
430  return retval;
431  }
float xmin
Definition: THbookFile.cxx:93
double tanh(double)
Type GetType(const std::string &Name)
Definition: Systematics.cxx:34
Double_t x[n]
Definition: legend1.C:17
void Initialize(Bool_t useTMVAStyle=kTRUE)
Definition: tmvaglob.cxx:176
TLine * l
Definition: textangle.C:4
float xmax
Definition: THbookFile.cxx:93
PyObject * fType