Logo ROOT   6.14/05
Reference Guide
TMVAClassification_MLPBNN.class.C
Go to the documentation of this file.
1 // Class: ReadMLPBNN
2 // Automatically generated by MethodBase::MakeClass
3 //
4 
5 /* configuration options =====================================================
6 
7 #GEN -*-*-*-*-*-*-*-*-*-*-*- general info -*-*-*-*-*-*-*-*-*-*-*-
8 
9 Method : MLP::MLPBNN
10 TMVA Release : 4.2.1 [262657]
11 ROOT Release : 6.14/05 [396805]
12 Creator : sftnight
13 Date : Fri Nov 2 10:41:08 2018
14 Host : Linux ec-ubuntu-14-04-x86-64-2 3.13.0-157-generic #207-Ubuntu SMP Mon Aug 20 16:44:59 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
15 Dir : /mnt/build/workspace/root-makedoc-v614/rootspi/rdoc/src/v6-14-00-patches/documentation/doxygen
16 Training events: 2000
17 Analysis type : [Classification]
18 
19 
20 #OPT -*-*-*-*-*-*-*-*-*-*-*-*- options -*-*-*-*-*-*-*-*-*-*-*-*-
21 
22 # Set by User:
23 NCycles: "60" [Number of training cycles]
24 HiddenLayers: "N+5" [Specification of hidden layer architecture]
25 NeuronType: "tanh" [Neuron activation function type]
26 V: "False" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
27 VarTransform: "N" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
28 H: "True" [Print method-specific help message]
29 TrainingMethod: "BFGS" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)]
30 TestRate: "5" [Test for overtraining performed at each #th epochs]
31 UseRegulator: "True" [Use regulator to avoid over-training]
32 # Default:
33 RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')]
34 EstimatorType: "CE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood]
35 NeuronInputType: "sum" [Neuron input function type]
36 VerbosityLevel: "Default" [Verbosity level]
37 CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
38 IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
39 LearningRate: "2.000000e-02" [ANN learning rate parameter]
40 DecayRate: "1.000000e-02" [Decay rate for learning parameter]
41 EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)]
42 Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch]
43 SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training]
44 SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.]
45 SamplingTraining: "True" [The training sample is sampled]
46 SamplingTesting: "False" [The testing sample is sampled]
47 ResetStep: "50" [How often BFGS should reset history]
48 Tau: "3.000000e+00" [LineSearch "size step"]
49 BPMode: "sequential" [Back-propagation learning mode: sequential or batch]
50 BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events]
51 ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)]
52 ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)]
53 UpdateLimit: "10000" [Maximum times of regulator update]
54 CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value]
55 WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range]
56 ##
57 
58 
59 #VAR -*-*-*-*-*-*-*-*-*-*-*-* variables *-*-*-*-*-*-*-*-*-*-*-*-
60 
61 NVar 4
62 var1+var2 myvar1 myvar1 myvar1 'F' [-8.14423561096,7.26972866058]
63 var1-var2 myvar2 myvar2 Expression 2 'F' [-3.96643972397,4.0258936882]
64 var3 var3 var3 Variable 3 units 'F' [-5.03730010986,4.27845287323]
65 var4 var4 var4 Variable 4 units 'F' [-5.95050764084,4.64035463333]
66 NSpec 2
67 var1*2 spec1 spec1 Spectator 1 units 'F' [-9.91655540466,8.67800140381]
68 var1*3 spec2 spec2 Spectator 2 units 'F' [-14.874833107,13.0170021057]
69 
70 
71 ============================================================================ */
72 
73 #include <array>
74 #include <vector>
75 #include <cmath>
76 #include <string>
77 #include <iostream>
78 
79 #ifndef IClassifierReader__def
80 #define IClassifierReader__def
81 
82 class IClassifierReader {
83 
84  public:
85 
86  // constructor
87  IClassifierReader() : fStatusIsClean( true ) {}
88  virtual ~IClassifierReader() {}
89 
90  // return classifier response
91  virtual double GetMvaValue( const std::vector<double>& inputValues ) const = 0;
92 
93  // returns classifier status
94  bool IsStatusClean() const { return fStatusIsClean; }
95 
96  protected:
97 
98  bool fStatusIsClean;
99 };
100 
101 #endif
102 
103 class ReadMLPBNN : public IClassifierReader {
104 
105  public:
106 
107  // constructor
108  ReadMLPBNN( std::vector<std::string>& theInputVars )
109  : IClassifierReader(),
110  fClassName( "ReadMLPBNN" ),
111  fNvars( 4 ),
112  fIsNormalised( false )
113  {
114  // the training input variables
115  const char* inputVars[] = { "var1+var2", "var1-var2", "var3", "var4" };
116 
117  // sanity checks
118  if (theInputVars.size() <= 0) {
119  std::cout << "Problem in class \"" << fClassName << "\": empty input vector" << std::endl;
120  fStatusIsClean = false;
121  }
122 
123  if (theInputVars.size() != fNvars) {
124  std::cout << "Problem in class \"" << fClassName << "\": mismatch in number of input values: "
125  << theInputVars.size() << " != " << fNvars << std::endl;
126  fStatusIsClean = false;
127  }
128 
129  // validate input variables
130  for (size_t ivar = 0; ivar < theInputVars.size(); ivar++) {
131  if (theInputVars[ivar] != inputVars[ivar]) {
132  std::cout << "Problem in class \"" << fClassName << "\": mismatch in input variable names" << std::endl
133  << " for variable [" << ivar << "]: " << theInputVars[ivar].c_str() << " != " << inputVars[ivar] << std::endl;
134  fStatusIsClean = false;
135  }
136  }
137 
138  // initialize min and max vectors (for normalisation)
139  fVmin[0] = -1;
140  fVmax[0] = 1;
141  fVmin[1] = -1;
142  fVmax[1] = 1;
143  fVmin[2] = -1;
144  fVmax[2] = 1;
145  fVmin[3] = -1;
146  fVmax[3] = 1;
147 
148  // initialize input variable types
149  fType[0] = 'F';
150  fType[1] = 'F';
151  fType[2] = 'F';
152  fType[3] = 'F';
153 
154  // initialize constants
155  Initialize();
156 
157  // initialize transformation
158  InitTransform();
159  }
160 
161  // destructor
162  virtual ~ReadMLPBNN() {
163  Clear(); // method-specific
164  }
165 
166  // the classifier response
167  // "inputValues" is a vector of input values in the same order as the
168  // variables given to the constructor
169  double GetMvaValue( const std::vector<double>& inputValues ) const override;
170 
171  private:
172 
173  // method-specific destructor
174  void Clear();
175 
176  // input variable transformation
177 
178  double fMin_1[3][4];
179  double fMax_1[3][4];
180  void InitTransform_1();
181  void Transform_1( std::vector<double> & iv, int sigOrBgd ) const;
182  void InitTransform();
183  void Transform( std::vector<double> & iv, int sigOrBgd ) const;
184 
185  // common member variables
186  const char* fClassName;
187 
188  const size_t fNvars;
189  size_t GetNvar() const { return fNvars; }
190  char GetType( int ivar ) const { return fType[ivar]; }
191 
192  // normalisation of input variables
193  const bool fIsNormalised;
194  bool IsNormalised() const { return fIsNormalised; }
195  double fVmin[4];
196  double fVmax[4];
197  double NormVariable( double x, double xmin, double xmax ) const {
198  // normalise to output range: [-1, 1]
199  return 2*(x - xmin)/(xmax - xmin) - 1.0;
200  }
201 
202  // type of input variable: 'F' or 'I'
203  char fType[4];
204 
205  // initialize internal variables
206  void Initialize();
207  double GetMvaValue__( const std::vector<double>& inputValues ) const;
208 
209  // private members (method specific)
210 
211  double ActivationFnc(double x) const;
212  double OutputActivationFnc(double x) const;
213 
214  int fLayers;
215  int fLayerSize[3];
216  double fWeightMatrix0to1[10][5]; // weight matrix from layer 0 to 1
217  double fWeightMatrix1to2[1][10]; // weight matrix from layer 1 to 2
218 
219 };
220 
221 inline void ReadMLPBNN::Initialize()
222 {
223  // build network structure
224  fLayers = 3;
225  fLayerSize[0] = 5;
226  fLayerSize[1] = 10;
227  fLayerSize[2] = 1;
228  // weight matrix from layer 0 to 1
229  fWeightMatrix0to1[0][0] = -0.0500313211768323;
230  fWeightMatrix0to1[1][0] = 1.7094566516032;
231  fWeightMatrix0to1[2][0] = 0.93080142586609;
232  fWeightMatrix0to1[3][0] = -0.480727310995468;
233  fWeightMatrix0to1[4][0] = -2.43366441685054;
234  fWeightMatrix0to1[5][0] = -2.5252185908736;
235  fWeightMatrix0to1[6][0] = -0.792746559485682;
236  fWeightMatrix0to1[7][0] = 2.12833556673629;
237  fWeightMatrix0to1[8][0] = -2.17383777190916;
238  fWeightMatrix0to1[0][1] = -1.13105790138881;
239  fWeightMatrix0to1[1][1] = -1.2627263837014;
240  fWeightMatrix0to1[2][1] = -0.399373485376653;
241  fWeightMatrix0to1[3][1] = -0.546688911449644;
242  fWeightMatrix0to1[4][1] = -0.977739357549186;
243  fWeightMatrix0to1[5][1] = -0.316705000878231;
244  fWeightMatrix0to1[6][1] = 0.460688878476911;
245  fWeightMatrix0to1[7][1] = -0.600184930633648;
246  fWeightMatrix0to1[8][1] = 1.36008294033747;
247  fWeightMatrix0to1[0][2] = 0.0164823350100334;
248  fWeightMatrix0to1[1][2] = 1.22292494112964;
249  fWeightMatrix0to1[2][2] = -0.0739797017126651;
250  fWeightMatrix0to1[3][2] = -2.20881551110752;
251  fWeightMatrix0to1[4][2] = -0.699094360711102;
252  fWeightMatrix0to1[5][2] = -0.617450611502115;
253  fWeightMatrix0to1[6][2] = -1.09406069992126;
254  fWeightMatrix0to1[7][2] = -0.244288559372016;
255  fWeightMatrix0to1[8][2] = 0.851765786859981;
256  fWeightMatrix0to1[0][3] = -3.05457127456281;
257  fWeightMatrix0to1[1][3] = -1.57468318053161;
258  fWeightMatrix0to1[2][3] = 0.734731808606291;
259  fWeightMatrix0to1[3][3] = 2.54004306064239;
260  fWeightMatrix0to1[4][3] = 4.41806952492387;
261  fWeightMatrix0to1[5][3] = 0.739452971127836;
262  fWeightMatrix0to1[6][3] = 0.196992348311525;
263  fWeightMatrix0to1[7][3] = -0.563671368370465;
264  fWeightMatrix0to1[8][3] = 1.33179184844964;
265  fWeightMatrix0to1[0][4] = 0.274652139714499;
266  fWeightMatrix0to1[1][4] = 3.0367778346693;
267  fWeightMatrix0to1[2][4] = -1.03526804982809;
268  fWeightMatrix0to1[3][4] = -0.519272686570194;
269  fWeightMatrix0to1[4][4] = -0.728282952959732;
270  fWeightMatrix0to1[5][4] = 0.147779857805122;
271  fWeightMatrix0to1[6][4] = 2.00157488875002;
272  fWeightMatrix0to1[7][4] = 0.338330302768732;
273  fWeightMatrix0to1[8][4] = -1.06817074486656;
274  // weight matrix from layer 1 to 2
275  fWeightMatrix1to2[0][0] = -3.07156048041761;
276  fWeightMatrix1to2[0][1] = -0.0691376707884641;
277  fWeightMatrix1to2[0][2] = -0.722861328862227;
278  fWeightMatrix1to2[0][3] = 1.63802320738992;
279  fWeightMatrix1to2[0][4] = 5.34057061614789;
280  fWeightMatrix1to2[0][5] = 2.85130125091591;
281  fWeightMatrix1to2[0][6] = 2.18425548381207;
282  fWeightMatrix1to2[0][7] = -0.884293411297697;
283  fWeightMatrix1to2[0][8] = 0.494073713733155;
284  fWeightMatrix1to2[0][9] = -0.770031394310278;
285 }
286 
287 inline double ReadMLPBNN::GetMvaValue__( const std::vector<double>& inputValues ) const
288 {
289  if (inputValues.size() != (unsigned int)fLayerSize[0]-1) {
290  std::cout << "Input vector needs to be of size " << fLayerSize[0]-1 << std::endl;
291  return 0;
292  }
293 
294  std::array<double, 5> fWeights0 {{}};
295  std::array<double, 10> fWeights1 {{}};
296  std::array<double, 1> fWeights2 {{}};
297  fWeights0.back() = 1.;
298  fWeights1.back() = 1.;
299 
300  for (int i=0; i<fLayerSize[0]-1; i++)
301  fWeights0[i]=inputValues[i];
302 
303  // layer 0 to 1
304  for (int o=0; o<fLayerSize[1]-1; o++) {
305  for (int i=0; i<fLayerSize[0]; i++) {
306  double inputVal = fWeightMatrix0to1[o][i] * fWeights0[i];
307  fWeights1[o] += inputVal;
308  } // loop over i
309  fWeights1[o] = ActivationFnc(fWeights1[o]);
310  } // loop over o
311  // layer 1 to 2
312  for (int o=0; o<fLayerSize[2]; o++) {
313  for (int i=0; i<fLayerSize[1]; i++) {
314  double inputVal = fWeightMatrix1to2[o][i] * fWeights1[i];
315  fWeights2[o] += inputVal;
316  } // loop over i
317  fWeights2[o] = OutputActivationFnc(fWeights2[o]);
318  } // loop over o
319 
320  return fWeights2[0];
321 }
322 
323 double ReadMLPBNN::ActivationFnc(double x) const {
324  // hyperbolic tan
325  return tanh(x);
326 }
327 double ReadMLPBNN::OutputActivationFnc(double x) const {
328  // sigmoid
329  return 1.0/(1.0+exp(-x));
330 }
331 
332 // Clean up
333 inline void ReadMLPBNN::Clear()
334 {
335 }
336  inline double ReadMLPBNN::GetMvaValue( const std::vector<double>& inputValues ) const
337  {
338  // classifier response value
339  double retval = 0;
340 
341  // classifier response, sanity check first
342  if (!IsStatusClean()) {
343  std::cout << "Problem in class \"" << fClassName << "\": cannot return classifier response"
344  << " because status is dirty" << std::endl;
345  retval = 0;
346  }
347  else {
348  if (IsNormalised()) {
349  // normalise variables
350  std::vector<double> iV;
351  iV.reserve(inputValues.size());
352  int ivar = 0;
353  for (std::vector<double>::const_iterator varIt = inputValues.begin();
354  varIt != inputValues.end(); varIt++, ivar++) {
355  iV.push_back(NormVariable( *varIt, fVmin[ivar], fVmax[ivar] ));
356  }
357  Transform( iV, -1 );
358  retval = GetMvaValue__( iV );
359  }
360  else {
361  std::vector<double> iV;
362  int ivar = 0;
363  for (std::vector<double>::const_iterator varIt = inputValues.begin();
364  varIt != inputValues.end(); varIt++, ivar++) {
365  iV.push_back(*varIt);
366  }
367  Transform( iV, -1 );
368  retval = GetMvaValue__( iV );
369  }
370  }
371 
372  return retval;
373  }
374 
375 //_______________________________________________________________________
376 inline void ReadMLPBNN::InitTransform_1()
377 {
378  // Normalization transformation, initialisation
379  fMin_1[0][0] = -4.94358778;
380  fMax_1[0][0] = 6.3994679451;
381  fMin_1[1][0] = -8.14423561096;
382  fMax_1[1][0] = 7.26972866058;
383  fMin_1[2][0] = -8.14423561096;
384  fMax_1[2][0] = 7.26972866058;
385  fMin_1[0][1] = -3.96643972397;
386  fMax_1[0][1] = 3.11266636848;
387  fMin_1[1][1] = -3.25508260727;
388  fMax_1[1][1] = 4.0258936882;
389  fMin_1[2][1] = -3.96643972397;
390  fMax_1[2][1] = 4.0258936882;
391  fMin_1[0][2] = -2.78645992279;
392  fMax_1[0][2] = 3.50111722946;
393  fMin_1[1][2] = -5.03730010986;
394  fMax_1[1][2] = 4.27845287323;
395  fMin_1[2][2] = -5.03730010986;
396  fMax_1[2][2] = 4.27845287323;
397  fMin_1[0][3] = -2.42712664604;
398  fMax_1[0][3] = 4.5351858139;
399  fMin_1[1][3] = -5.95050764084;
400  fMax_1[1][3] = 4.64035463333;
401  fMin_1[2][3] = -5.95050764084;
402  fMax_1[2][3] = 4.64035463333;
403 }
404 
405 //_______________________________________________________________________
406 inline void ReadMLPBNN::Transform_1( std::vector<double>& iv, int cls) const
407 {
408  // Normalization transformation
409  if (cls < 0 || cls > 2) {
410  if (2 > 1 ) cls = 2;
411  else cls = 2;
412  }
413  const int nVar = 4;
414 
415  // get indices of used variables
416 
417  // define the indices of the variables which are transformed by this transformation
418  static std::vector<int> indicesGet;
419  static std::vector<int> indicesPut;
420 
421  if ( indicesGet.empty() ) {
422  indicesGet.reserve(fNvars);
423  indicesGet.push_back( 0);
424  indicesGet.push_back( 1);
425  indicesGet.push_back( 2);
426  indicesGet.push_back( 3);
427  }
428  if ( indicesPut.empty() ) {
429  indicesPut.reserve(fNvars);
430  indicesPut.push_back( 0);
431  indicesPut.push_back( 1);
432  indicesPut.push_back( 2);
433  indicesPut.push_back( 3);
434  }
435 
436  static std::vector<double> dv;
437  dv.resize(nVar);
438  for (int ivar=0; ivar<nVar; ivar++) dv[ivar] = iv[indicesGet.at(ivar)];
439  for (int ivar=0;ivar<4;ivar++) {
440  double offset = fMin_1[cls][ivar];
441  double scale = 1.0/(fMax_1[cls][ivar]-fMin_1[cls][ivar]);
442  iv[indicesPut.at(ivar)] = (dv[ivar]-offset)*scale * 2 - 1;
443  }
444 }
445 
446 //_______________________________________________________________________
447 inline void ReadMLPBNN::InitTransform()
448 {
449  InitTransform_1();
450 }
451 
452 //_______________________________________________________________________
453 inline void ReadMLPBNN::Transform( std::vector<double>& iv, int sigOrBgd ) const
454 {
455  Transform_1( iv, sigOrBgd );
456 }
float xmin
Definition: THbookFile.cxx:93
double tanh(double)
Type GetType(const std::string &Name)
Definition: Systematics.cxx:34
Double_t x[n]
Definition: legend1.C:17
void Initialize(Bool_t useTMVAStyle=kTRUE)
Definition: tmvaglob.cxx:176
float xmax
Definition: THbookFile.cxx:93
PyObject * fType
double exp(double)