Logo ROOT   6.10/09
Reference Guide
TMVAClassification_MLPBNN.class.C
Go to the documentation of this file.
1 // Class: ReadMLPBNN
2 // Automatically generated by MethodBase::MakeClass
3 //
4 
5 /* configuration options =====================================================
6 
7 #GEN -*-*-*-*-*-*-*-*-*-*-*- general info -*-*-*-*-*-*-*-*-*-*-*-
8 
9 Method : MLP::MLPBNN
10 TMVA Release : 4.2.1 [262657]
11 ROOT Release : 6.10/09 [395785]
12 Creator : sftnight
13 Date : Thu May 31 12:04:27 2018
14 Host : Linux SFT-ubuntu-1710-1 4.13.0-31-generic #34-Ubuntu SMP Fri Jan 19 16:34:46 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
15 Dir : /mnt/build/workspace/root-makedoc-v610/rootspi/rdoc/src/v6-10-00-patches/documentation/doxygen
16 Training events: 2000
17 Analysis type : [Classification]
18 
19 
20 #OPT -*-*-*-*-*-*-*-*-*-*-*-*- options -*-*-*-*-*-*-*-*-*-*-*-*-
21 
22 # Set by User:
23 NCycles: "60" [Number of training cycles]
24 HiddenLayers: "N+5" [Specification of hidden layer architecture]
25 NeuronType: "tanh" [Neuron activation function type]
26 V: "False" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
27 VarTransform: "N" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
28 H: "True" [Print method-specific help message]
29 TrainingMethod: "BFGS" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)]
30 TestRate: "5" [Test for overtraining performed at each #th epochs]
31 UseRegulator: "True" [Use regulator to avoid over-training]
32 # Default:
33 RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')]
34 EstimatorType: "CE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood]
35 NeuronInputType: "sum" [Neuron input function type]
36 VerbosityLevel: "Default" [Verbosity level]
37 CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
38 IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
39 LearningRate: "2.000000e-02" [ANN learning rate parameter]
40 DecayRate: "1.000000e-02" [Decay rate for learning parameter]
41 EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)]
42 Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch]
43 SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training]
44 SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.]
45 SamplingTraining: "True" [The training sample is sampled]
46 SamplingTesting: "False" [The testing sample is sampled]
47 ResetStep: "50" [How often BFGS should reset history]
48 Tau: "3.000000e+00" [LineSearch "size step"]
49 BPMode: "sequential" [Back-propagation learning mode: sequential or batch]
50 BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events]
51 ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)]
52 ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)]
53 UpdateLimit: "10000" [Maximum times of regulator update]
54 CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value]
55 WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range]
56 ##
57 
58 
59 #VAR -*-*-*-*-*-*-*-*-*-*-*-* variables *-*-*-*-*-*-*-*-*-*-*-*-
60 
61 NVar 4
62 var1+var2 myvar1 myvar1 myvar1 'F' [-8.14423561096,7.26972866058]
63 var1-var2 myvar2 myvar2 Expression 2 'F' [-3.96643972397,4.0258936882]
64 var3 var3 var3 Variable 3 units 'F' [-5.03730010986,4.27845287323]
65 var4 var4 var4 Variable 4 units 'F' [-5.95050764084,4.64035463333]
66 NSpec 2
67 var1*2 spec1 spec1 Spectator 1 units 'F' [-9.91655540466,8.67800140381]
68 var1*3 spec2 spec2 Spectator 2 units 'F' [-14.874833107,13.0170021057]
69 
70 
71 ============================================================================ */
72 
73 #include <vector>
74 #include <cmath>
75 #include <string>
76 #include <iostream>
77 
78 #ifndef IClassifierReader__def
79 #define IClassifierReader__def
80 
81 class IClassifierReader {
82 
83  public:
84 
85  // constructor
86  IClassifierReader() : fStatusIsClean( true ) {}
87  virtual ~IClassifierReader() {}
88 
89  // return classifier response
90  virtual double GetMvaValue( const std::vector<double>& inputValues ) const = 0;
91 
92  // returns classifier status
93  bool IsStatusClean() const { return fStatusIsClean; }
94 
95  protected:
96 
97  bool fStatusIsClean;
98 };
99 
100 #endif
101 
102 class ReadMLPBNN : public IClassifierReader {
103 
104  public:
105 
106  // constructor
107  ReadMLPBNN( std::vector<std::string>& theInputVars )
108  : IClassifierReader(),
109  fClassName( "ReadMLPBNN" ),
110  fNvars( 4 ),
111  fIsNormalised( false )
112  {
113  // the training input variables
114  const char* inputVars[] = { "var1+var2", "var1-var2", "var3", "var4" };
115 
116  // sanity checks
117  if (theInputVars.size() <= 0) {
118  std::cout << "Problem in class \"" << fClassName << "\": empty input vector" << std::endl;
119  fStatusIsClean = false;
120  }
121 
122  if (theInputVars.size() != fNvars) {
123  std::cout << "Problem in class \"" << fClassName << "\": mismatch in number of input values: "
124  << theInputVars.size() << " != " << fNvars << std::endl;
125  fStatusIsClean = false;
126  }
127 
128  // validate input variables
129  for (size_t ivar = 0; ivar < theInputVars.size(); ivar++) {
130  if (theInputVars[ivar] != inputVars[ivar]) {
131  std::cout << "Problem in class \"" << fClassName << "\": mismatch in input variable names" << std::endl
132  << " for variable [" << ivar << "]: " << theInputVars[ivar].c_str() << " != " << inputVars[ivar] << std::endl;
133  fStatusIsClean = false;
134  }
135  }
136 
137  // initialize min and max vectors (for normalisation)
138  fVmin[0] = -1;
139  fVmax[0] = 1;
140  fVmin[1] = -1;
141  fVmax[1] = 1;
142  fVmin[2] = -1;
143  fVmax[2] = 1;
144  fVmin[3] = -1;
145  fVmax[3] = 1;
146 
147  // initialize input variable types
148  fType[0] = 'F';
149  fType[1] = 'F';
150  fType[2] = 'F';
151  fType[3] = 'F';
152 
153  // initialize constants
154  Initialize();
155 
156  // initialize transformation
157  InitTransform();
158  }
159 
160  // destructor
161  virtual ~ReadMLPBNN() {
162  Clear(); // method-specific
163  }
164 
165  // the classifier response
166  // "inputValues" is a vector of input values in the same order as the
167  // variables given to the constructor
168  double GetMvaValue( const std::vector<double>& inputValues ) const;
169 
170  private:
171 
172  // method-specific destructor
173  void Clear();
174 
175  // input variable transformation
176 
177  double fMin_1[3][4];
178  double fMax_1[3][4];
179  void InitTransform_1();
180  void Transform_1( std::vector<double> & iv, int sigOrBgd ) const;
181  void InitTransform();
182  void Transform( std::vector<double> & iv, int sigOrBgd ) const;
183 
184  // common member variables
185  const char* fClassName;
186 
187  const size_t fNvars;
188  size_t GetNvar() const { return fNvars; }
189  char GetType( int ivar ) const { return fType[ivar]; }
190 
191  // normalisation of input variables
192  const bool fIsNormalised;
193  bool IsNormalised() const { return fIsNormalised; }
194  double fVmin[4];
195  double fVmax[4];
196  double NormVariable( double x, double xmin, double xmax ) const {
197  // normalise to output range: [-1, 1]
198  return 2*(x - xmin)/(xmax - xmin) - 1.0;
199  }
200 
201  // type of input variable: 'F' or 'I'
202  char fType[4];
203 
204  // initialize internal variables
205  void Initialize();
206  double GetMvaValue__( const std::vector<double>& inputValues ) const;
207 
208  // private members (method specific)
209 
210  double ActivationFnc(double x) const;
211  double OutputActivationFnc(double x) const;
212 
213  int fLayers;
214  int fLayerSize[3];
215  double fWeightMatrix0to1[10][5]; // weight matrix from layer 0 to 1
216  double fWeightMatrix1to2[1][10]; // weight matrix from layer 1 to 2
217 
218  double * fWeights[3];
219 };
220 
221 inline void ReadMLPBNN::Initialize()
222 {
223  // build network structure
224  fLayers = 3;
225  fLayerSize[0] = 5; fWeights[0] = new double[5];
226  fLayerSize[1] = 10; fWeights[1] = new double[10];
227  fLayerSize[2] = 1; fWeights[2] = new double[1];
228  // weight matrix from layer 0 to 1
229  fWeightMatrix0to1[0][0] = 0.0814937806335718;
230  fWeightMatrix0to1[1][0] = 1.66608493904044;
231  fWeightMatrix0to1[2][0] = 0.907620667288285;
232  fWeightMatrix0to1[3][0] = -0.82104985351804;
233  fWeightMatrix0to1[4][0] = -2.08201625319282;
234  fWeightMatrix0to1[5][0] = -2.67922549404247;
235  fWeightMatrix0to1[6][0] = -0.812128025540134;
236  fWeightMatrix0to1[7][0] = 2.34318790716582;
237  fWeightMatrix0to1[8][0] = -2.24296070100105;
238  fWeightMatrix0to1[0][1] = -1.06657449869395;
239  fWeightMatrix0to1[1][1] = -1.19325408648747;
240  fWeightMatrix0to1[2][1] = -0.355524904352707;
241  fWeightMatrix0to1[3][1] = -0.208303498149552;
242  fWeightMatrix0to1[4][1] = -0.949201145643711;
243  fWeightMatrix0to1[5][1] = -0.50055065217611;
244  fWeightMatrix0to1[6][1] = 0.504887665889982;
245  fWeightMatrix0to1[7][1] = -0.416190783244244;
246  fWeightMatrix0to1[8][1] = 1.1704574908932;
247  fWeightMatrix0to1[0][2] = -0.34301751949142;
248  fWeightMatrix0to1[1][2] = 1.18544046118725;
249  fWeightMatrix0to1[2][2] = -0.0337431028634733;
250  fWeightMatrix0to1[3][2] = -2.10928755356298;
251  fWeightMatrix0to1[4][2] = -1.02354627299503;
252  fWeightMatrix0to1[5][2] = -0.389081350821253;
253  fWeightMatrix0to1[6][2] = -1.09998880400102;
254  fWeightMatrix0to1[7][2] = -0.219992405129252;
255  fWeightMatrix0to1[8][2] = 0.794576815354533;
256  fWeightMatrix0to1[0][3] = -3.16844563782956;
257  fWeightMatrix0to1[1][3] = -1.54039625590493;
258  fWeightMatrix0to1[2][3] = 0.790940325402751;
259  fWeightMatrix0to1[3][3] = 2.88071956340261;
260  fWeightMatrix0to1[4][3] = 4.3908734623996;
261  fWeightMatrix0to1[5][3] = 1.12949208619052;
262  fWeightMatrix0to1[6][3] = 0.185644097902448;
263  fWeightMatrix0to1[7][3] = -0.543046087435345;
264  fWeightMatrix0to1[8][3] = 1.44766636064841;
265  fWeightMatrix0to1[0][4] = 0.33202073563451;
266  fWeightMatrix0to1[1][4] = 3.0800970895301;
267  fWeightMatrix0to1[2][4] = -1.02289215335637;
268  fWeightMatrix0to1[3][4] = -0.155435781193619;
269  fWeightMatrix0to1[4][4] = -0.882861078422949;
270  fWeightMatrix0to1[5][4] = 0.019309808309592;
271  fWeightMatrix0to1[6][4] = 1.91549261652052;
272  fWeightMatrix0to1[7][4] = 0.13021332110229;
273  fWeightMatrix0to1[8][4] = -1.32984315105756;
274  // weight matrix from layer 1 to 2
275  fWeightMatrix1to2[0][0] = -3.10575533370833;
276  fWeightMatrix1to2[0][1] = -0.124247930938538;
277  fWeightMatrix1to2[0][2] = -0.813882862721867;
278  fWeightMatrix1to2[0][3] = 1.47280482989785;
279  fWeightMatrix1to2[0][4] = 5.44297027047592;
280  fWeightMatrix1to2[0][5] = 3.21864846820081;
281  fWeightMatrix1to2[0][6] = 2.32953640660092;
282  fWeightMatrix1to2[0][7] = -1.01190744015599;
283  fWeightMatrix1to2[0][8] = 0.4327075992155;
284  fWeightMatrix1to2[0][9] = -0.61477579296756;
285 }
286 
287 inline double ReadMLPBNN::GetMvaValue__( const std::vector<double>& inputValues ) const
288 {
289  if (inputValues.size() != (unsigned int)fLayerSize[0]-1) {
290  std::cout << "Input vector needs to be of size " << fLayerSize[0]-1 << std::endl;
291  return 0;
292  }
293 
294  for (int l=0; l<fLayers; l++)
295  for (int i=0; i<fLayerSize[l]; i++) fWeights[l][i]=0;
296 
297  for (int l=0; l<fLayers-1; l++)
298  fWeights[l][fLayerSize[l]-1]=1;
299 
300  for (int i=0; i<fLayerSize[0]-1; i++)
301  fWeights[0][i]=inputValues[i];
302 
303  // layer 0 to 1
304  for (int o=0; o<fLayerSize[1]-1; o++) {
305  for (int i=0; i<fLayerSize[0]; i++) {
306  double inputVal = fWeightMatrix0to1[o][i] * fWeights[0][i];
307  fWeights[1][o] += inputVal;
308  }
309  fWeights[1][o] = ActivationFnc(fWeights[1][o]);
310  }
311  // layer 1 to 2
312  for (int o=0; o<fLayerSize[2]; o++) {
313  for (int i=0; i<fLayerSize[1]; i++) {
314  double inputVal = fWeightMatrix1to2[o][i] * fWeights[1][i];
315  fWeights[2][o] += inputVal;
316  }
317  fWeights[2][o] = OutputActivationFnc(fWeights[2][o]);
318  }
319 
320  return fWeights[2][0];
321 }
322 
323 double ReadMLPBNN::ActivationFnc(double x) const {
324  // hyperbolic tan
325  return tanh(x);
326 }
327 double ReadMLPBNN::OutputActivationFnc(double x) const {
328  // sigmoid
329  return 1.0/(1.0+exp(-x));
330 }
331 
332 // Clean up
333 inline void ReadMLPBNN::Clear()
334 {
335  // clean up the arrays
336  for (int lIdx = 0; lIdx < 3; lIdx++) {
337  delete[] fWeights[lIdx];
338  }
339 }
340  inline double ReadMLPBNN::GetMvaValue( const std::vector<double>& inputValues ) const
341  {
342  // classifier response value
343  double retval = 0;
344 
345  // classifier response, sanity check first
346  if (!IsStatusClean()) {
347  std::cout << "Problem in class \"" << fClassName << "\": cannot return classifier response"
348  << " because status is dirty" << std::endl;
349  retval = 0;
350  }
351  else {
352  if (IsNormalised()) {
353  // normalise variables
354  std::vector<double> iV;
355  iV.reserve(inputValues.size());
356  int ivar = 0;
357  for (std::vector<double>::const_iterator varIt = inputValues.begin();
358  varIt != inputValues.end(); varIt++, ivar++) {
359  iV.push_back(NormVariable( *varIt, fVmin[ivar], fVmax[ivar] ));
360  }
361  Transform( iV, -1 );
362  retval = GetMvaValue__( iV );
363  }
364  else {
365  std::vector<double> iV;
366  int ivar = 0;
367  for (std::vector<double>::const_iterator varIt = inputValues.begin();
368  varIt != inputValues.end(); varIt++, ivar++) {
369  iV.push_back(*varIt);
370  }
371  Transform( iV, -1 );
372  retval = GetMvaValue__( iV );
373  }
374  }
375 
376  return retval;
377  }
378 
379 //_______________________________________________________________________
380 inline void ReadMLPBNN::InitTransform_1()
381 {
382  // Normalization transformation, initialisation
383  fMin_1[0][0] = -4.94358778;
384  fMax_1[0][0] = 6.3994679451;
385  fMin_1[1][0] = -8.14423561096;
386  fMax_1[1][0] = 7.26972866058;
387  fMin_1[2][0] = -8.14423561096;
388  fMax_1[2][0] = 7.26972866058;
389  fMin_1[0][1] = -3.96643972397;
390  fMax_1[0][1] = 3.11266636848;
391  fMin_1[1][1] = -3.25508260727;
392  fMax_1[1][1] = 4.0258936882;
393  fMin_1[2][1] = -3.96643972397;
394  fMax_1[2][1] = 4.0258936882;
395  fMin_1[0][2] = -2.78645992279;
396  fMax_1[0][2] = 3.50111722946;
397  fMin_1[1][2] = -5.03730010986;
398  fMax_1[1][2] = 4.27845287323;
399  fMin_1[2][2] = -5.03730010986;
400  fMax_1[2][2] = 4.27845287323;
401  fMin_1[0][3] = -2.42712664604;
402  fMax_1[0][3] = 4.5351858139;
403  fMin_1[1][3] = -5.95050764084;
404  fMax_1[1][3] = 4.64035463333;
405  fMin_1[2][3] = -5.95050764084;
406  fMax_1[2][3] = 4.64035463333;
407 }
408 
409 //_______________________________________________________________________
410 inline void ReadMLPBNN::Transform_1( std::vector<double>& iv, int cls) const
411 {
412  // Normalization transformation
413  if (cls < 0 || cls > 2) {
414  if (2 > 1 ) cls = 2;
415  else cls = 2;
416  }
417  const int nVar = 4;
418 
419  // get indices of used variables
420 
421  // define the indices of the variables which are transformed by this transformation
422  static std::vector<int> indicesGet;
423  static std::vector<int> indicesPut;
424 
425  if ( indicesGet.empty() ) {
426  indicesGet.reserve(fNvars);
427  indicesGet.push_back( 0);
428  indicesGet.push_back( 1);
429  indicesGet.push_back( 2);
430  indicesGet.push_back( 3);
431  }
432  if ( indicesPut.empty() ) {
433  indicesPut.reserve(fNvars);
434  indicesPut.push_back( 0);
435  indicesPut.push_back( 1);
436  indicesPut.push_back( 2);
437  indicesPut.push_back( 3);
438  }
439 
440  static std::vector<double> dv;
441  dv.resize(nVar);
442  for (int ivar=0; ivar<nVar; ivar++) dv[ivar] = iv[indicesGet.at(ivar)];
443  for (int ivar=0;ivar<4;ivar++) {
444  double offset = fMin_1[cls][ivar];
445  double scale = 1.0/(fMax_1[cls][ivar]-fMin_1[cls][ivar]);
446  iv[indicesPut.at(ivar)] = (dv[ivar]-offset)*scale * 2 - 1;
447  }
448 }
449 
450 //_______________________________________________________________________
451 inline void ReadMLPBNN::InitTransform()
452 {
453  InitTransform_1();
454 }
455 
456 //_______________________________________________________________________
457 inline void ReadMLPBNN::Transform( std::vector<double>& iv, int sigOrBgd ) const
458 {
459  Transform_1( iv, sigOrBgd );
460 }
float xmin
Definition: THbookFile.cxx:93
double tanh(double)
Type GetType(const std::string &Name)
Definition: Systematics.cxx:34
Double_t x[n]
Definition: legend1.C:17
void Initialize(Bool_t useTMVAStyle=kTRUE)
Definition: tmvaglob.cxx:176
TLine * l
Definition: textangle.C:4
float xmax
Definition: THbookFile.cxx:93
PyObject * fType
double exp(double)