ROOT  6.06/09
Reference Guide
MethodCFMlpANN.cxx
Go to the documentation of this file.
1 // @(#)root/tmva $Id$
2 // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
3 
4 /**********************************************************************************
5  * Project: TMVA - a Root-integrated toolkit for multivariate Data analysis *
6  * Package: TMVA *
7  * Class : TMVA::MethodCFMlpANN *
8  * Web : http://tmva.sourceforge.net *
9  * *
10  * Description: *
11  * Implementation (see header for description) *
12  * *
13  * Authors (alphabetical): *
14  * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
15  * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France *
16  * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany *
17  * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada *
18  * *
19  * Copyright (c) 2005: *
20  * CERN, Switzerland *
21  * U. of Victoria, Canada *
22  * MPI-K Heidelberg, Germany *
23  * LAPP, Annecy, France *
24  * *
25  * Redistribution and use in source and binary forms, with or without *
26  * modification, are permitted according to the terms listed in LICENSE *
27  * (http://tmva.sourceforge.net/LICENSE) *
28  **********************************************************************************/
29 
30 //_______________________________________________________________________
31 //
32 // Begin_Html
33 /*
34  Interface to Clermond-Ferrand artificial neural network
35 
36  <p>
37  The CFMlpANN belong to the class of Multilayer Perceptrons (MLP), which are
38  feed-forward networks according to the following propagation schema:<br>
39  <center>
40  <img vspace=10 src="gif/tmva_mlp.gif" align="bottom" alt="Schema for artificial neural network">
41  </center>
42  The input layer contains as many neurons as input variables used in the MVA.
43  The output layer contains two neurons for the signal and background
44  event classes. In between the input and output layers are a variable number
45  of <i>k</i> hidden layers with arbitrary numbers of neurons. (While the
46  structure of the input and output layers is determined by the problem, the
47  hidden layers can be configured by the user through the option string
48  of the method booking.) <br>
49 
50  As indicated in the sketch, all neuron inputs to a layer are linear
51  combinations of the neuron output of the previous layer. The transfer
52  from input to output within a neuron is performed by means of an "activation
53  function". In general, the activation function of a neuron can be
54  zero (deactivated), one (linear), or non-linear. The above example uses
55  a sigmoid activation function. The transfer function of the output layer
56  is usually linear. As a consequence: an ANN without hidden layer should
57  give identical discrimination power as a linear discriminant analysis (Fisher).
58  In case of one hidden layer, the ANN computes a linear combination of
59  sigmoid. <br>
60 
61  The learning method used by the CFMlpANN is only stochastic.
62 */
63 // End_Html
64 //_______________________________________________________________________
65 
66 #include <string>
67 #include <cstdlib>
68 #include <iostream>
69 
70 #include "TMatrix.h"
71 #include "TObjString.h"
72 #include "Riostream.h"
73 #include "TMath.h"
74 
75 #include "TMVA/ClassifierFactory.h"
76 #include "TMVA/MethodCFMlpANN.h"
78 #include "TMVA/Tools.h"
79 
80 REGISTER_METHOD(CFMlpANN)
81 
82 using std::stringstream;
83 using std::make_pair;
84 using std::atoi;
85 
86 ClassImp(TMVA::MethodCFMlpANN)
87 
88 // initialization of global variable
89 namespace TMVA {
90  Int_t MethodCFMlpANN_nsel = 0;
91 }
92 
93 TMVA::MethodCFMlpANN* TMVA::MethodCFMlpANN::fgThis = 0;
94 
95 ////////////////////////////////////////////////////////////////////////////////
96 /// standard constructor
97 /// option string: "n_training_cycles:n_hidden_layers"
98 /// default is: n_training_cycles = 5000, n_layers = 4
99 ///
100 /// * note that the number of hidden layers in the NN is:
101 /// n_hidden_layers = n_layers - 2
102 ///
103 /// * since there is one input and one output layer. The number of
104 /// nodes (neurons) is predefined to be:
105 /// n_nodes[i] = nvars + 1 - i (where i=1..n_layers)
106 ///
107 /// with nvars being the number of variables used in the NN.
108 ///
109 /// Hence, the default case is: n_neurons(layer 1 (input)) : nvars
110 /// n_neurons(layer 2 (hidden)): nvars-1
111 /// n_neurons(layer 3 (hidden)): nvars-1
112 /// n_neurons(layer 4 (out)) : 2
113 ///
114 /// This artificial neural network usually needs a relatively large
115 /// number of cycles to converge (8000 and more). Overtraining can
116 /// be efficienctly tested by comparing the signal and background
117 /// output of the NN for the events that were used for training and
118 /// an independent data sample (with equal properties). If the separation
119 /// performance is significantly better for the training sample, the
120 /// NN interprets statistical effects, and is hence overtrained. In
121 /// this case, the number of cycles should be reduced, or the size
122 /// of the training sample increased.
123 
125  const TString& methodTitle,
126  DataSetInfo& theData,
127  const TString& theOption,
128  TDirectory* theTargetDir ) :
129  TMVA::MethodBase( jobName, Types::kCFMlpANN, methodTitle, theData, theOption, theTargetDir ),
130  fData(0),
131  fClass(0),
132  fNlayers(0),
133  fNcycles(0),
134  fNodes(0),
135  fYNN(0)
136 {
138 
139 }
140 
141 ////////////////////////////////////////////////////////////////////////////////
142 /// constructor from weight file
143 
145  const TString& theWeightFile,
146  TDirectory* theTargetDir ):
147  TMVA::MethodBase( Types::kCFMlpANN, theData, theWeightFile, theTargetDir ),
148  fData(0),
149  fClass(0),
150  fNlayers(0),
151  fNcycles(0),
152  fNodes(0),
153  fYNN(0)
154 {
155 }
156 
157 ////////////////////////////////////////////////////////////////////////////////
158 /// CFMlpANN can handle classification with 2 classes
159 
161 {
162  if (type == Types::kClassification && numberClasses == 2) return kTRUE;
163  return kFALSE;
164 }
165 
166 ////////////////////////////////////////////////////////////////////////////////
167 /// define the options (their key words) that can be set in the option string
168 /// know options: NCycles=xx :the number of training cycles
169 /// HiddenLayser="N-1,N-2" :the specification of the hidden layers
170 
172 {
173  DeclareOptionRef( fNcycles =3000, "NCycles", "Number of training cycles" );
174  DeclareOptionRef( fLayerSpec="N,N-1", "HiddenLayers", "Specification of hidden layer architecture" );
175 }
176 
177 ////////////////////////////////////////////////////////////////////////////////
178 /// decode the options in the option string
179 
181 {
182  fNodes = new Int_t[20]; // number of nodes per layer (maximum 20 layers)
183  fNlayers = 2;
184  Int_t currentHiddenLayer = 1;
185  TString layerSpec(fLayerSpec);
186  while(layerSpec.Length()>0) {
187  TString sToAdd = "";
188  if (layerSpec.First(',')<0) {
189  sToAdd = layerSpec;
190  layerSpec = "";
191  }
192  else {
193  sToAdd = layerSpec(0,layerSpec.First(','));
194  layerSpec = layerSpec(layerSpec.First(',')+1,layerSpec.Length());
195  }
196  Int_t nNodes = 0;
197  if (sToAdd.BeginsWith("N") || sToAdd.BeginsWith("n")) { sToAdd.Remove(0,1); nNodes = GetNvar(); }
198  nNodes += atoi(sToAdd);
199  fNodes[currentHiddenLayer++] = nNodes;
200  fNlayers++;
201  }
202  fNodes[0] = GetNvar(); // number of input nodes
203  fNodes[fNlayers-1] = 2; // number of output nodes
204 
205  if (IgnoreEventsWithNegWeightsInTraining()) {
206  Log() << kFATAL << "Mechanism to ignore events with negative weights in training not yet available for method: "
207  << GetMethodTypeName()
208  << " --> please remove \"IgnoreNegWeightsInTraining\" option from booking string."
209  << Endl;
210  }
211 
212  Log() << kINFO << "Use configuration (nodes per layer): in=";
213  for (Int_t i=0; i<fNlayers-1; i++) Log() << kINFO << fNodes[i] << ":";
214  Log() << kINFO << fNodes[fNlayers-1] << "=out" << Endl;
215 
216  // some info
217  Log() << "Use " << fNcycles << " training cycles" << Endl;
218 
219  Int_t nEvtTrain = Data()->GetNTrainingEvents();
220 
221  // note that one variable is type
222  if (nEvtTrain>0) {
223 
224  // Data LUT
225  fData = new TMatrix( nEvtTrain, GetNvar() );
226  fClass = new std::vector<Int_t>( nEvtTrain );
227 
228  // ---- fill LUTs
229 
230  UInt_t ivar;
231  for (Int_t ievt=0; ievt<nEvtTrain; ievt++) {
232  const Event * ev = GetEvent(ievt);
233 
234  // identify signal and background events
235  (*fClass)[ievt] = DataInfo().IsSignal(ev) ? 1 : 2;
236 
237  // use normalized input Data
238  for (ivar=0; ivar<GetNvar(); ivar++) {
239  (*fData)( ievt, ivar ) = ev->GetValue(ivar);
240  }
241  }
242 
243  //Log() << kVERBOSE << Data()->GetNEvtSigTrain() << " Signal and "
244  // << Data()->GetNEvtBkgdTrain() << " background" << " events in trainingTree" << Endl;
245  }
246 
247 }
248 
249 ////////////////////////////////////////////////////////////////////////////////
250 /// default initialisation called by all constructors
251 
253 {
254  // CFMlpANN prefers normalised input variables
255  SetNormalised( kTRUE );
256 
257  // initialize all pointers
258  fgThis = this;
259 
260  // initialize dimensions
261  TMVA::MethodCFMlpANN_nsel = 0;
262 }
263 
264 ////////////////////////////////////////////////////////////////////////////////
265 /// destructor
266 
268 {
269  delete fData;
270  delete fClass;
271  delete[] fNodes;
272 
273  if (fYNN!=0) {
274  for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
275  delete[] fYNN;
276  fYNN=0;
277  }
278 }
279 
280 ////////////////////////////////////////////////////////////////////////////////
281 /// training of the Clement-Ferrand NN classifier
282 
284 {
285  Double_t dumDat(0);
286  Int_t ntrain(Data()->GetNTrainingEvents());
287  Int_t ntest(0);
288  Int_t nvar(GetNvar());
289  Int_t nlayers(fNlayers);
290  Int_t *nodes = new Int_t[nlayers];
291  Int_t ncycles(fNcycles);
292 
293  for (Int_t i=0; i<nlayers; i++) nodes[i] = fNodes[i]; // full copy of class member
294 
295  if (fYNN != 0) {
296  for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
297  delete[] fYNN;
298  fYNN = 0;
299  }
300  fYNN = new Double_t*[nlayers];
301  for (Int_t layer=0; layer<nlayers; layer++)
302  fYNN[layer] = new Double_t[fNodes[layer]];
303 
304  // please check
305 #ifndef R__WIN32
306  Train_nn( &dumDat, &dumDat, &ntrain, &ntest, &nvar, &nlayers, nodes, &ncycles );
307 #else
308  Log() << kWARNING << "<Train> sorry CFMlpANN does not run on Windows" << Endl;
309 #endif
310 
311  delete [] nodes;
312 }
313 
314 ////////////////////////////////////////////////////////////////////////////////
315 /// returns CFMlpANN output (normalised within [0,1])
316 
318 {
319  Bool_t isOK = kTRUE;
320 
321  const Event* ev = GetEvent();
322 
323  // copy of input variables
324  std::vector<Double_t> inputVec( GetNvar() );
325  for (UInt_t ivar=0; ivar<GetNvar(); ivar++) inputVec[ivar] = ev->GetValue(ivar);
326 
327  Double_t myMVA = EvalANN( inputVec, isOK );
328  if (!isOK) Log() << kFATAL << "EvalANN returns (!isOK) for event " << Endl;
329 
330  // cannot determine error
331  NoErrorCalc(err, errUpper);
332 
333  return myMVA;
334 }
335 
336 ////////////////////////////////////////////////////////////////////////////////
337 /// evaluates NN value as function of input variables
338 
339 Double_t TMVA::MethodCFMlpANN::EvalANN( std::vector<Double_t>& inVar, Bool_t& isOK )
340 {
341  // hardcopy of input variables (necessary because they are update later)
342  Double_t* xeev = new Double_t[GetNvar()];
343  for (UInt_t ivar=0; ivar<GetNvar(); ivar++) xeev[ivar] = inVar[ivar];
344 
345  // ---- now apply the weights: get NN output
346  isOK = kTRUE;
347  for (UInt_t jvar=0; jvar<GetNvar(); jvar++) {
348 
349  if (fVarn_1.xmax[jvar] < xeev[jvar]) xeev[jvar] = fVarn_1.xmax[jvar];
350  if (fVarn_1.xmin[jvar] > xeev[jvar]) xeev[jvar] = fVarn_1.xmin[jvar];
351  if (fVarn_1.xmax[jvar] == fVarn_1.xmin[jvar]) {
352  isOK = kFALSE;
353  xeev[jvar] = 0;
354  }
355  else {
356  xeev[jvar] = xeev[jvar] - ((fVarn_1.xmax[jvar] + fVarn_1.xmin[jvar])/2);
357  xeev[jvar] = xeev[jvar] / ((fVarn_1.xmax[jvar] - fVarn_1.xmin[jvar])/2);
358  }
359  }
360 
361  NN_ava( xeev );
362 
363  Double_t retval = 0.5*(1.0 + fYNN[fParam_1.layerm-1][0]);
364 
365  delete [] xeev;
366 
367  return retval;
368 }
369 
370 ////////////////////////////////////////////////////////////////////////////////
371 /// auxiliary functions
372 
374 {
375  for (Int_t ivar=0; ivar<fNeur_1.neuron[0]; ivar++) fYNN[0][ivar] = xeev[ivar];
376 
377  for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
378  for (Int_t j=1; j<=fNeur_1.neuron[layer]; j++) {
379 
380  Double_t x = Ww_ref(fNeur_1.ww, layer+1,j); // init with the bias layer
381 
382  for (Int_t k=1; k<=fNeur_1.neuron[layer-1]; k++) { // neurons of originating layer
383  x += fYNN[layer-1][k-1]*W_ref(fNeur_1.w, layer+1, j, k);
384  }
385  fYNN[layer][j-1] = NN_fonc( layer, x );
386  }
387  }
388 }
389 
390 ////////////////////////////////////////////////////////////////////////////////
391 /// activation function
392 
394 {
395  Double_t f(0);
396 
397  if (u/fDel_1.temp[i] > 170) f = +1;
398  else if (u/fDel_1.temp[i] < -170) f = -1;
399  else {
400  Double_t yy = TMath::Exp(-u/fDel_1.temp[i]);
401  f = (1 - yy)/(1 + yy);
402  }
403 
404  return f;
405 }
406 
407 ////////////////////////////////////////////////////////////////////////////////
408 /// read back the weight from the training from file (stream)
409 
411 {
412  TString var;
413 
414  // read number of variables and classes
415  UInt_t nva(0), lclass(0);
416  istr >> nva >> lclass;
417 
418  if (GetNvar() != nva) // wrong file
419  Log() << kFATAL << "<ReadWeightsFromFile> mismatch in number of variables" << Endl;
420 
421  // number of output classes must be 2
422  if (lclass != 2) // wrong file
423  Log() << kFATAL << "<ReadWeightsFromFile> mismatch in number of classes" << Endl;
424 
425  // check that we are not at the end of the file
426  if (istr.eof( ))
427  Log() << kFATAL << "<ReadWeightsFromStream> reached EOF prematurely " << Endl;
428 
429  // read extrema of input variables
430  for (UInt_t ivar=0; ivar<GetNvar(); ivar++)
431  istr >> fVarn_1.xmax[ivar] >> fVarn_1.xmin[ivar];
432 
433  // read number of layers (sum of: input + output + hidden)
434  istr >> fParam_1.layerm;
435 
436  if (fYNN != 0) {
437  for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
438  delete[] fYNN;
439  fYNN = 0;
440  }
441  fYNN = new Double_t*[fParam_1.layerm];
442  for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
443  // read number of neurons for each layer
444  // coverity[tainted_data_argument]
445  istr >> fNeur_1.neuron[layer];
446  fYNN[layer] = new Double_t[fNeur_1.neuron[layer]];
447  }
448 
449  // to read dummy lines
450  const Int_t nchar( 100 );
451  char* dumchar = new char[nchar];
452 
453  // read weights
454  for (Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
455 
456  Int_t nq = fNeur_1.neuron[layer]/10;
457  Int_t nr = fNeur_1.neuron[layer] - nq*10;
458 
459  Int_t kk(0);
460  if (nr==0) kk = nq;
461  else kk = nq+1;
462 
463  for (Int_t k=1; k<=kk; k++) {
464  Int_t jmin = 10*k - 9;
465  Int_t jmax = 10*k;
466  if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
467  for (Int_t j=jmin; j<=jmax; j++) {
468  istr >> Ww_ref(fNeur_1.ww, layer+1, j);
469  }
470  for (Int_t i=1; i<=fNeur_1.neuron[layer-1]; i++) {
471  for (Int_t j=jmin; j<=jmax; j++) {
472  istr >> W_ref(fNeur_1.w, layer+1, j, i);
473  }
474  }
475  // skip two empty lines
476  istr.getline( dumchar, nchar );
477  }
478  }
479 
480  for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
481 
482  // skip 2 empty lines
483  istr.getline( dumchar, nchar );
484  istr.getline( dumchar, nchar );
485 
486  istr >> fDel_1.temp[layer];
487  }
488 
489  // sanity check
490  if ((Int_t)GetNvar() != fNeur_1.neuron[0]) {
491  Log() << kFATAL << "<ReadWeightsFromFile> mismatch in zeroth layer:"
492  << GetNvar() << " " << fNeur_1.neuron[0] << Endl;
493  }
494 
495  fNlayers = fParam_1.layerm;
496  delete[] dumchar;
497 }
498 
499 ////////////////////////////////////////////////////////////////////////////////
500 /// data interface function
501 
503  Int_t* /* icode*/, Int_t* /*flag*/,
504  Int_t* /*nalire*/, Int_t* nvar,
505  Double_t* xpg, Int_t* iclass, Int_t* ikend )
506 {
507  // icode and ikend are dummies needed to match f2c mlpl3 functions
508  *ikend = 0;
509 
510  // retrieve pointer to current object (CFMlpANN must be a singleton class!)
512 
513  // sanity checks
514  if (0 == xpg) {
515  Log() << kFATAL << "ERROR in MethodCFMlpANN_DataInterface zero pointer xpg" << Endl;
516  }
517  if (*nvar != (Int_t)opt->GetNvar()) {
518  Log() << kFATAL << "ERROR in MethodCFMlpANN_DataInterface mismatch in num of variables: "
519  << *nvar << " " << opt->GetNvar() << Endl;
520  }
521 
522  // fill variables
523  *iclass = (int)opt->GetClass( TMVA::MethodCFMlpANN_nsel );
524  for (UInt_t ivar=0; ivar<opt->GetNvar(); ivar++)
525  xpg[ivar] = (double)opt->GetData( TMVA::MethodCFMlpANN_nsel, ivar );
526 
527  ++TMVA::MethodCFMlpANN_nsel;
528 
529  return 0;
530 }
531 
532 ////////////////////////////////////////////////////////////////////////////////
533 /// write weights to xml file
534 
535 void TMVA::MethodCFMlpANN::AddWeightsXMLTo( void* parent ) const
536 {
537  void *wght = gTools().AddChild(parent, "Weights");
538  gTools().AddAttr(wght,"NVars",fParam_1.nvar);
539  gTools().AddAttr(wght,"NClasses",fParam_1.lclass);
540  gTools().AddAttr(wght,"NLayers",fParam_1.layerm);
541  void* minmaxnode = gTools().AddChild(wght, "VarMinMax");
542  stringstream s;
543  s.precision( 16 );
544  for (Int_t ivar=0; ivar<fParam_1.nvar; ivar++)
545  s << std::scientific << fVarn_1.xmin[ivar] << " " << fVarn_1.xmax[ivar] << " ";
546  gTools().AddRawLine( minmaxnode, s.str().c_str() );
547  void* neurons = gTools().AddChild(wght, "NNeurons");
548  stringstream n;
549  n.precision( 16 );
550  for (Int_t layer=0; layer<fParam_1.layerm; layer++)
551  n << std::scientific << fNeur_1.neuron[layer] << " ";
552  gTools().AddRawLine( neurons, n.str().c_str() );
553  for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
554  void* layernode = gTools().AddChild(wght, "Layer"+gTools().StringFromInt(layer));
555  gTools().AddAttr(layernode,"NNeurons",fNeur_1.neuron[layer]);
556  void* neuronnode=NULL;
557  for (Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
558  neuronnode = gTools().AddChild(layernode,"Neuron"+gTools().StringFromInt(neuron));
559  stringstream weights;
560  weights.precision( 16 );
561  weights << std::scientific << Ww_ref(fNeur_1.ww, layer+1, neuron+1);
562  for (Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
563  weights << " " << std::scientific << W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
564  }
565  gTools().AddRawLine( neuronnode, weights.str().c_str() );
566  }
567  }
568  void* tempnode = gTools().AddChild(wght, "LayerTemp");
569  stringstream temp;
570  temp.precision( 16 );
571  for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
572  temp << std::scientific << fDel_1.temp[layer] << " ";
573  }
574  gTools().AddRawLine(tempnode, temp.str().c_str() );
575 }
576 ////////////////////////////////////////////////////////////////////////////////
577 /// read weights from xml file
578 
580 {
581  gTools().ReadAttr( wghtnode, "NLayers",fParam_1.layerm );
582  void* minmaxnode = gTools().GetChild(wghtnode);
583  const char* minmaxcontent = gTools().GetContent(minmaxnode);
584  stringstream content(minmaxcontent);
585  for (UInt_t ivar=0; ivar<GetNvar(); ivar++)
586  content >> fVarn_1.xmin[ivar] >> fVarn_1.xmax[ivar];
587  if (fYNN != 0) {
588  for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
589  delete[] fYNN;
590  fYNN = 0;
591  }
592  fYNN = new Double_t*[fParam_1.layerm];
593  void *layernode=gTools().GetNextChild(minmaxnode);
594  const char* neuronscontent = gTools().GetContent(layernode);
595  stringstream ncontent(neuronscontent);
596  for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
597  // read number of neurons for each layer;
598  // coverity[tainted_data_argument]
599  ncontent >> fNeur_1.neuron[layer];
600  fYNN[layer] = new Double_t[fNeur_1.neuron[layer]];
601  }
602  for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
603  layernode=gTools().GetNextChild(layernode);
604  void* neuronnode=NULL;
605  neuronnode = gTools().GetChild(layernode);
606  for (Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
607  const char* neuronweights = gTools().GetContent(neuronnode);
608  stringstream weights(neuronweights);
609  weights >> Ww_ref(fNeur_1.ww, layer+1, neuron+1);
610  for (Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
611  weights >> W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
612  }
613  neuronnode=gTools().GetNextChild(neuronnode);
614  }
615  }
616  void* tempnode=gTools().GetNextChild(layernode);
617  const char* temp = gTools().GetContent(tempnode);
618  stringstream t(temp);
619  for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
620  t >> fDel_1.temp[layer];
621  }
622  fNlayers = fParam_1.layerm;
623 }
624 
625 ////////////////////////////////////////////////////////////////////////////////
626 /// write the weights of the neural net
627 
628 void TMVA::MethodCFMlpANN::PrintWeights( std::ostream & o ) const
629 {
630  // write number of variables and classes
631  o << "Number of vars " << fParam_1.nvar << std::endl;
632  o << "Output nodes " << fParam_1.lclass << std::endl;
633 
634  // write extrema of input variables
635  for (Int_t ivar=0; ivar<fParam_1.nvar; ivar++)
636  o << "Var " << ivar << " [" << fVarn_1.xmin[ivar] << " - " << fVarn_1.xmax[ivar] << "]" << std::endl;
637 
638  // write number of layers (sum of: input + output + hidden)
639  o << "Number of layers " << fParam_1.layerm << std::endl;
640 
641  o << "Nodes per layer ";
642  for (Int_t layer=0; layer<fParam_1.layerm; layer++)
643  // write number of neurons for each layer
644  o << fNeur_1.neuron[layer] << " ";
645  o << std::endl;
646 
647  // write weights
648  for (Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
649 
650  Int_t nq = fNeur_1.neuron[layer]/10;
651  Int_t nr = fNeur_1.neuron[layer] - nq*10;
652 
653  Int_t kk(0);
654  if (nr==0) kk = nq;
655  else kk = nq+1;
656 
657  for (Int_t k=1; k<=kk; k++) {
658  Int_t jmin = 10*k - 9;
659  Int_t jmax = 10*k;
660  Int_t i, j;
661  if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
662  for (j=jmin; j<=jmax; j++) {
663 
664  //o << fNeur_1.ww[j*max_nLayers_ + layer - 6] << " ";
665  o << Ww_ref(fNeur_1.ww, layer+1, j) << " ";
666 
667  }
668  o << std::endl;
669  //for (i=1; i<=fNeur_1.neuron[layer-1]; i++) {
670  for (i=1; i<=fNeur_1.neuron[layer-1]; i++) {
671  for (j=jmin; j<=jmax; j++) {
672  // o << fNeur_1.w[(i*max_nNodes_ + j)*max_nLayers_ + layer - 186] << " ";
673  o << W_ref(fNeur_1.w, layer+1, j, i) << " ";
674  }
675  o << std::endl;
676  }
677 
678  // skip two empty lines
679  o << std::endl;
680  }
681  }
682  for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
683  o << "Del.temp in layer " << layer << " : " << fDel_1.temp[layer] << std::endl;
684  }
685 }
686 ////////////////////////////////////////////////////////////////////////////////
687 /// static pointer to this object (required for external functions
688 
690 {
691  return fgThis;
692 }
693 void TMVA::MethodCFMlpANN::MakeClassSpecific( std::ostream& fout, const TString& className ) const
694 {
695  // write specific classifier response
696  fout << " // not implemented for class: \"" << className << "\"" << std::endl;
697  fout << "};" << std::endl;
698 }
699 
700 ////////////////////////////////////////////////////////////////////////////////
701 /// write specific classifier response for header
702 
703 void TMVA::MethodCFMlpANN::MakeClassSpecificHeader( std::ostream& , const TString& ) const
704 {
705 }
706 
707 ////////////////////////////////////////////////////////////////////////////////
708 /// get help message text
709 ///
710 /// typical length of text line:
711 /// "|--------------------------------------------------------------|"
712 
714 {
715  Log() << Endl;
716  Log() << gTools().Color("bold") << "--- Short description:" << gTools().Color("reset") << Endl;
717  Log() << Endl;
718  Log() << "<None>" << Endl;
719  Log() << Endl;
720  Log() << gTools().Color("bold") << "--- Performance optimisation:" << gTools().Color("reset") << Endl;
721  Log() << Endl;
722  Log() << "<None>" << Endl;
723  Log() << Endl;
724  Log() << gTools().Color("bold") << "--- Performance tuning via configuration options:" << gTools().Color("reset") << Endl;
725  Log() << Endl;
726  Log() << "<None>" << Endl;
727 }
for(Int_t i=0;i< n;i++)
Definition: legend1.C:18
void Train(void)
training of the Clement-Ferrand NN classifier
void DeclareOptions()
define the options (their key words) that can be set in the option string know options: NCycles=xx :t...
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:162
void NN_ava(Double_t *)
auxiliary functions
Ssiz_t Length() const
Definition: TString.h:390
void ReadWeightsFromXML(void *wghtnode)
read weights from xml file
Int_t GetClass(Int_t ivar) const
UInt_t GetNvar() const
Definition: MethodBase.h:309
EAnalysisType
Definition: Types.h:124
Basic string class.
Definition: TString.h:137
int Int_t
Definition: RtypesCore.h:41
bool Bool_t
Definition: RtypesCore.h:59
const Bool_t kFALSE
Definition: Rtypes.h:92
STL namespace.
void AddAttr(void *node, const char *, const T &value, Int_t precision=16)
Definition: Tools.h:308
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
Definition: TString.h:558
Int_t DataInterface(Double_t *, Double_t *, Int_t *, Int_t *, Int_t *, Int_t *, Double_t *, Int_t *, Int_t *)
data interface function
void * AddChild(void *parent, const char *childname, const char *content=0, bool isRootNode=false)
add child node
Definition: Tools.cxx:1134
Float_t GetValue(UInt_t ivar) const
return value of i'th variable
Definition: Event.cxx:231
virtual ~MethodCFMlpANN(void)
destructor
Tools & gTools()
Definition: Tools.cxx:79
Double_t x[n]
Definition: legend1.C:17
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t)
CFMlpANN can handle classification with 2 classes.
void * GetChild(void *parent, const char *childname=0)
get child node
Definition: Tools.cxx:1158
if(pyself &&pyself!=Py_None)
std::vector< std::vector< double > > Data
TClass * fClass
pointer to the foreign object
Bool_t AddRawLine(void *node, const char *raw)
XML helpers.
Definition: Tools.cxx:1198
Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
returns CFMlpANN output (normalised within [0,1])
MethodCFMlpANN(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption="3000:N-1:N-2", TDirectory *theTargetDir=0)
standard constructor option string: "n_training_cycles:n_hidden_layers" default is: n_training_cycles...
unsigned int UInt_t
Definition: RtypesCore.h:42
void MakeClassSpecificHeader(std::ostream &, const TString &="") const
write specific classifier response for header
const char * GetContent(void *node)
XML helpers.
Definition: Tools.cxx:1182
void ReadAttr(void *node, const char *, T &value)
Definition: Tools.h:295
void AddWeightsXMLTo(void *parent) const
write weights to xml file
Double_t EvalANN(std::vector< Double_t > &, Bool_t &isOK)
evaluates NN value as function of input variables
void GetHelpMessage() const
get help message text
void ReadWeightsFromStream(std::istream &istr)
read back the weight from the training from file (stream)
TString & Remove(Ssiz_t pos)
Definition: TString.h:616
Double_t Exp(Double_t x)
Definition: TMath.h:495
Double_t NN_fonc(Int_t, Double_t) const
activation function
double f(double x)
double Double_t
Definition: RtypesCore.h:55
Describe directory structure in memory.
Definition: TDirectory.h:41
int type
Definition: TGX11.cxx:120
void * GetNextChild(void *prevchild, const char *childname=0)
XML helpers.
Definition: Tools.cxx:1170
MsgLogger & Log() const
Definition: Configurable.h:130
TMatrixT< Float_t > TMatrix
Definition: TMatrix.h:26
void Init(void)
default initialisation called by all constructors
void ProcessOptions()
decode the options in the option string
const TString & Color(const TString &)
human readable color strings
Definition: Tools.cxx:837
ClassImp(TMVA::MethodCFMlpANN) namespace TMVA
#define REGISTER_METHOD(CLASS)
for example
Abstract ClassifierFactory template that handles arbitrary types.
void MakeClassSpecific(std::ostream &, const TString &) const
#define NULL
Definition: Rtypes.h:82
void PrintWeights(std::ostream &o) const
write the weights of the neural net
const Bool_t kTRUE
Definition: Rtypes.h:91
const Int_t n
Definition: legend1.C:16
static MethodCFMlpANN * This(void)
static pointer to this object (required for external functions
Definition: math.cpp:60
Ssiz_t First(char c) const
Find first occurrence of a character c.
Definition: TString.cxx:466
Double_t GetData(Int_t isel, Int_t ivar) const