Logo ROOT   6.14/05
Reference Guide
MethodANNBase.cxx
Go to the documentation of this file.
1 // @(#)root/tmva $Id$
2 // Author: Andreas Hoecker, Peter Speckmayer, Matt Jachowski, Jan Therhaag, Jiahang Zhong
3 
4 /**********************************************************************************
5  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6  * Package: TMVA *
7  * Class : MethodANNBase *
8  * Web : http://tmva.sourceforge.net *
9  * *
10  * Description: *
11  * Artificial neural network base class for the discrimination of signal *
12  * from background. *
13  * *
14  * Authors (alphabetical): *
15  * Krzysztof Danielowski <danielow@cern.ch> - IFJ & AGH, Poland *
16  * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
17  * Matt Jachowski <jachowski@stanford.edu> - Stanford University, USA *
18  * Kamil Kraszewski <kalq@cern.ch> - IFJ & UJ, Poland *
19  * Maciej Kruk <mkruk@cern.ch> - IFJ & AGH, Poland *
20  * Peter Speckmayer <peter.speckmayer@cern.ch> - CERN, Switzerland *
21  * Joerg Stelzer <stelzer@cern.ch> - DESY, Germany *
22  * Jan Therhaag <Jan.Therhaag@cern.ch> - U of Bonn, Germany *
23  * Jiahang Zhong <Jiahang.Zhong@cern.ch> - Academia Sinica, Taipei *
24  * *
25  * Copyright (c) 2005-2011: *
26  * CERN, Switzerland *
27  * U. of Bonn, Germany *
28  * *
29  * Redistribution and use in source and binary forms, with or without *
30  * modification, are permitted according to the terms listed in LICENSE *
31  * (http://tmva.sourceforge.net/LICENSE) *
32  **********************************************************************************/
33 
34 /*! \class TMVA::MethodANNBase
35 \ingroup TMVA
36 
37 Base class for all TMVA methods using artificial neural networks.
38 
39 */
40 
41 #include "TMVA/MethodBase.h"
42 
43 #include "TMVA/Configurable.h"
44 #include "TMVA/DataSetInfo.h"
45 #include "TMVA/MethodANNBase.h"
46 #include "TMVA/MsgLogger.h"
47 #include "TMVA/TNeuron.h"
48 #include "TMVA/TSynapse.h"
50 #include "TMVA/TActivationTanh.h"
51 #include "TMVA/Types.h"
52 #include "TMVA/Tools.h"
54 #include "TMVA/Ranking.h"
55 #include "TMVA/Version.h"
56 
57 #include "TString.h"
58 #include "TTree.h"
59 #include "TDirectory.h"
60 #include "Riostream.h"
61 #include "TRandom3.h"
62 #include "TH2F.h"
63 #include "TH1.h"
64 #include "TMath.h"
65 #include "TMatrixT.h"
66 
67 #include <vector>
68 #include <cstdlib>
69 #include <stdexcept>
70 #if __cplusplus > 199711L
71 #include <atomic>
72 #endif
73 
74 
75 using std::vector;
76 
78 
79 ////////////////////////////////////////////////////////////////////////////////
80 /// standard constructor
81 /// Note: Right now it is an option to choose the neuron input function,
82 /// but only the input function "sum" leads to weight convergence --
83 /// otherwise the weights go to nan and lead to an ABORT.
84 
86  Types::EMVA methodType,
87  const TString& methodTitle,
88  DataSetInfo& theData,
89  const TString& theOption )
90 : TMVA::MethodBase( jobName, methodType, methodTitle, theData, theOption)
91  , fEstimator(kMSE)
92  , fUseRegulator(kFALSE)
93  , fRandomSeed(0)
94 {
95  InitANNBase();
96 
98 }
99 
100 ////////////////////////////////////////////////////////////////////////////////
101 /// construct the Method from the weight file
102 
104  DataSetInfo& theData,
105  const TString& theWeightFile)
106  : TMVA::MethodBase( methodType, theData, theWeightFile)
107  , fEstimator(kMSE)
109  , fRandomSeed(0)
110 {
111  InitANNBase();
112 
113  DeclareOptions();
114 }
115 
116 ////////////////////////////////////////////////////////////////////////////////
117 /// define the options (their key words) that can be set in the option string
118 /// here the options valid for ALL MVA methods are declared.
119 ///
120 /// know options:
121 ///
122 /// - NCycles=xx :the number of training cycles
123 /// - Normalize=kTRUE,kFALSe :if normalised in put variables should be used
124 /// - HiddenLayser="N-1,N-2" :the specification of the hidden layers
125 /// - NeuronType=sigmoid,tanh,radial,linar : the type of activation function
126 /// used at the neuron
127 
129 {
130  DeclareOptionRef( fNcycles = 500, "NCycles", "Number of training cycles" );
131  DeclareOptionRef( fLayerSpec = "N,N-1", "HiddenLayers", "Specification of hidden layer architecture" );
132  DeclareOptionRef( fNeuronType = "sigmoid", "NeuronType", "Neuron activation function type" );
133  DeclareOptionRef( fRandomSeed = 1, "RandomSeed", "Random seed for initial synapse weights (0 means unique seed for each run; default value '1')");
134 
135  DeclareOptionRef(fEstimatorS="MSE", "EstimatorType",
136  "MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood" ); //zjh
137  AddPreDefVal(TString("MSE")); //zjh
138  AddPreDefVal(TString("CE")); //zjh
139 
140 
141  TActivationChooser aChooser;
142  std::vector<TString>* names = aChooser.GetAllActivationNames();
143  Int_t nTypes = names->size();
144  for (Int_t i = 0; i < nTypes; i++)
145  AddPreDefVal(names->at(i));
146  delete names;
147 
148  DeclareOptionRef(fNeuronInputType="sum", "NeuronInputType","Neuron input function type");
149  TNeuronInputChooser iChooser;
150  names = iChooser.GetAllNeuronInputNames();
151  nTypes = names->size();
152  for (Int_t i = 0; i < nTypes; i++) AddPreDefVal(names->at(i));
153  delete names;
154 }
155 
156 
157 ////////////////////////////////////////////////////////////////////////////////
158 /// do nothing specific at this moment
159 
161 {
162  if ( DoRegression() || DoMulticlass()) fEstimatorS = "MSE"; //zjh
163  else fEstimatorS = "CE" ; //hhv
164  if (fEstimatorS == "MSE" ) fEstimator = kMSE;
165  else if (fEstimatorS == "CE") fEstimator = kCE; //zjh
166  std::vector<Int_t>* layout = ParseLayoutString(fLayerSpec);
167  BuildNetwork(layout);
168  delete layout;
169 }
170 
171 ////////////////////////////////////////////////////////////////////////////////
172 /// parse layout specification string and return a vector, each entry
173 /// containing the number of neurons to go in each successive layer
174 
175 std::vector<Int_t>* TMVA::MethodANNBase::ParseLayoutString(TString layerSpec)
176 {
177  std::vector<Int_t>* layout = new std::vector<Int_t>();
178  layout->push_back((Int_t)GetNvar());
179  while(layerSpec.Length()>0) {
180  TString sToAdd="";
181  if (layerSpec.First(',')<0) {
182  sToAdd = layerSpec;
183  layerSpec = "";
184  }
185  else {
186  sToAdd = layerSpec(0,layerSpec.First(','));
187  layerSpec = layerSpec(layerSpec.First(',')+1,layerSpec.Length());
188  }
189  int nNodes = 0;
190  if (sToAdd.BeginsWith("n") || sToAdd.BeginsWith("N")) { sToAdd.Remove(0,1); nNodes = GetNvar(); }
191  nNodes += atoi(sToAdd);
192  layout->push_back(nNodes);
193  }
194  if( DoRegression() )
195  layout->push_back( DataInfo().GetNTargets() ); // one output node for each target
196  else if( DoMulticlass() )
197  layout->push_back( DataInfo().GetNClasses() ); // one output node for each class
198  else
199  layout->push_back(1); // one output node (for signal/background classification)
200 
201  int n = 0;
202  for( std::vector<Int_t>::iterator it = layout->begin(); it != layout->end(); ++it ){
203  n++;
204  }
205 
206  return layout;
207 }
208 
209 ////////////////////////////////////////////////////////////////////////////////
210 /// initialize ANNBase object
211 
213 {
214  fNetwork = NULL;
215  frgen = NULL;
216  fActivation = NULL;
217  fOutput = NULL; //zjh
218  fIdentity = NULL;
219  fInputCalculator = NULL;
220  fSynapses = NULL;
221  fEstimatorHistTrain = NULL;
222  fEstimatorHistTest = NULL;
223 
224  // reset monitoring histogram vectors
225  fEpochMonHistS.clear();
226  fEpochMonHistB.clear();
227  fEpochMonHistW.clear();
228 
229  // these will be set in BuildNetwork()
230  fInputLayer = NULL;
231  fOutputNeurons.clear();
232 
233  frgen = new TRandom3(fRandomSeed);
234 
235  fSynapses = new TObjArray();
236 }
237 
238 ////////////////////////////////////////////////////////////////////////////////
239 /// destructor
240 
242 {
243  DeleteNetwork();
244 }
245 
246 ////////////////////////////////////////////////////////////////////////////////
247 /// delete/clear network
248 
250 {
251  if (fNetwork != NULL) {
252  TObjArray *layer;
253  Int_t numLayers = fNetwork->GetEntriesFast();
254  for (Int_t i = 0; i < numLayers; i++) {
255  layer = (TObjArray*)fNetwork->At(i);
256  DeleteNetworkLayer(layer);
257  }
258  delete fNetwork;
259  }
260 
261  if (frgen != NULL) delete frgen;
262  if (fActivation != NULL) delete fActivation;
263  if (fOutput != NULL) delete fOutput; //zjh
264  if (fIdentity != NULL) delete fIdentity;
265  if (fInputCalculator != NULL) delete fInputCalculator;
266  if (fSynapses != NULL) delete fSynapses;
267 
268  fNetwork = NULL;
269  frgen = NULL;
270  fActivation = NULL;
271  fOutput = NULL; //zjh
272  fIdentity = NULL;
273  fInputCalculator = NULL;
274  fSynapses = NULL;
275 }
276 
277 ////////////////////////////////////////////////////////////////////////////////
278 /// delete a network layer
279 
281 {
282  TNeuron* neuron;
283  Int_t numNeurons = layer->GetEntriesFast();
284  for (Int_t i = 0; i < numNeurons; i++) {
285  neuron = (TNeuron*)layer->At(i);
286  neuron->DeletePreLinks();
287  delete neuron;
288  }
289  delete layer;
290 }
291 
292 ////////////////////////////////////////////////////////////////////////////////
293 /// build network given a layout (number of neurons in each layer)
294 /// and optional weights array
295 
296 void TMVA::MethodANNBase::BuildNetwork( std::vector<Int_t>* layout, std::vector<Double_t>* weights, Bool_t fromFile )
297 {
298  if (fEstimatorS == "MSE") fEstimator = kMSE; //zjh
299  else if (fEstimatorS == "CE") fEstimator = kCE; //zjh
300  else Log()<<kWARNING<<"fEstimator="<<fEstimator<<"\tfEstimatorS="<<fEstimatorS<<Endl;
301  if (fEstimator!=kMSE && fEstimator!=kCE) Log()<<kWARNING<<"Estimator type unspecified \t"<<Endl; //zjh
302 
303 
304  Log() << kHEADER << "Building Network. " << Endl;
305 
306  DeleteNetwork();
307  InitANNBase();
308 
309  // set activation and input functions
310  TActivationChooser aChooser;
312  fIdentity = aChooser.CreateActivation("linear");
313  if (fEstimator==kMSE) fOutput = aChooser.CreateActivation("linear"); //zjh
314  else if (fEstimator==kCE) fOutput = aChooser.CreateActivation("sigmoid"); //zjh
315  TNeuronInputChooser iChooser;
317 
318  fNetwork = new TObjArray();
319  fRegulatorIdx.clear();
320  fRegulators.clear();
321  BuildLayers( layout, fromFile );
322 
323  // cache input layer and output neuron for fast access
325  TObjArray* outputLayer = (TObjArray*)fNetwork->At(fNetwork->GetEntriesFast()-1);
326  fOutputNeurons.clear();
327  for (Int_t i = 0; i < outputLayer->GetEntries(); i++) {
328  fOutputNeurons.push_back( (TNeuron*)outputLayer->At(i) );
329  }
330 
331  if (weights == NULL) InitWeights();
332  else ForceWeights(weights);
333 }
334 
335 ////////////////////////////////////////////////////////////////////////////////
336 /// build the network layers
337 
338 void TMVA::MethodANNBase::BuildLayers( std::vector<Int_t>* layout, Bool_t fromFile )
339 {
340  TObjArray* curLayer;
341  TObjArray* prevLayer = NULL;
342 
343  Int_t numLayers = layout->size();
344 
345  for (Int_t i = 0; i < numLayers; i++) {
346  curLayer = new TObjArray();
347  BuildLayer(layout->at(i), curLayer, prevLayer, i, numLayers, fromFile);
348  prevLayer = curLayer;
349  fNetwork->Add(curLayer);
350  }
351 
352  // cache pointers to synapses for fast access, the order matters
353  for (Int_t i = 0; i < numLayers; i++) {
354  TObjArray* layer = (TObjArray*)fNetwork->At(i);
355  Int_t numNeurons = layer->GetEntriesFast();
356  if (i!=0 && i!=numLayers-1) fRegulators.push_back(0.); //zjh
357  for (Int_t j = 0; j < numNeurons; j++) {
358  if (i==0) fRegulators.push_back(0.);//zjh
359  TNeuron* neuron = (TNeuron*)layer->At(j);
360  Int_t numSynapses = neuron->NumPostLinks();
361  for (Int_t k = 0; k < numSynapses; k++) {
362  TSynapse* synapse = neuron->PostLinkAt(k);
363  fSynapses->Add(synapse);
364  fRegulatorIdx.push_back(fRegulators.size()-1);//zjh
365  }
366  }
367  }
368 }
369 
370 ////////////////////////////////////////////////////////////////////////////////
371 /// build a single layer with neurons and synapses connecting this
372 /// layer to the previous layer
373 
374 void TMVA::MethodANNBase::BuildLayer( Int_t numNeurons, TObjArray* curLayer,
375  TObjArray* prevLayer, Int_t layerIndex,
376  Int_t numLayers, Bool_t fromFile )
377 {
378  TNeuron* neuron;
379  for (Int_t j = 0; j < numNeurons; j++) {
380  if (fromFile && (layerIndex != numLayers-1) && (j==numNeurons-1)){
381  neuron = new TNeuron();
382  neuron->SetActivationEqn(fIdentity);
383  neuron->SetBiasNeuron();
384  neuron->ForceValue(1.0);
385  curLayer->Add(neuron);
386  }
387  else {
388  neuron = new TNeuron();
390 
391  // input layer
392  if (layerIndex == 0) {
393  neuron->SetActivationEqn(fIdentity);
394  neuron->SetInputNeuron();
395  }
396  else {
397  // output layer
398  if (layerIndex == numLayers-1) {
399  neuron->SetOutputNeuron();
400  neuron->SetActivationEqn(fOutput); //zjh
401  }
402  // hidden layers
403  else neuron->SetActivationEqn(fActivation);
404  AddPreLinks(neuron, prevLayer);
405  }
406 
407  curLayer->Add(neuron);
408  }
409  }
410 
411  // add bias neutron (except to output layer)
412  if(!fromFile){
413  if (layerIndex != numLayers-1) {
414  neuron = new TNeuron();
415  neuron->SetActivationEqn(fIdentity);
416  neuron->SetBiasNeuron();
417  neuron->ForceValue(1.0);
418  curLayer->Add(neuron);
419  }
420  }
421 }
422 
423 ////////////////////////////////////////////////////////////////////////////////
424 /// add synapses connecting a neuron to its preceding layer
425 
427 {
428  TSynapse* synapse;
429  int numNeurons = prevLayer->GetEntriesFast();
430  TNeuron* preNeuron;
431 
432  for (Int_t i = 0; i < numNeurons; i++) {
433  preNeuron = (TNeuron*)prevLayer->At(i);
434  synapse = new TSynapse();
435  synapse->SetPreNeuron(preNeuron);
436  synapse->SetPostNeuron(neuron);
437  preNeuron->AddPostLink(synapse);
438  neuron->AddPreLink(synapse);
439  }
440 }
441 
442 ////////////////////////////////////////////////////////////////////////////////
443 /// initialize the synapse weights randomly
444 
446 {
447  PrintMessage("Initializing weights");
448 
449  // init synapse weights
450  Int_t numSynapses = fSynapses->GetEntriesFast();
451  TSynapse* synapse;
452  for (Int_t i = 0; i < numSynapses; i++) {
453  synapse = (TSynapse*)fSynapses->At(i);
454  synapse->SetWeight(4.0*frgen->Rndm() - 2.0);
455  }
456 }
457 
458 ////////////////////////////////////////////////////////////////////////////////
459 /// force the synapse weights
460 
461 void TMVA::MethodANNBase::ForceWeights(std::vector<Double_t>* weights)
462 {
463  PrintMessage("Forcing weights");
464 
465  Int_t numSynapses = fSynapses->GetEntriesFast();
466  TSynapse* synapse;
467  for (Int_t i = 0; i < numSynapses; i++) {
468  synapse = (TSynapse*)fSynapses->At(i);
469  synapse->SetWeight(weights->at(i));
470  }
471 }
472 
473 ////////////////////////////////////////////////////////////////////////////////
474 /// force the input values of the input neurons
475 /// force the value for each input neuron
476 
478 {
479  Double_t x;
480  TNeuron* neuron;
481 
482  // const Event* ev = GetEvent();
483  for (UInt_t j = 0; j < GetNvar(); j++) {
484 
485  x = (j != (UInt_t)ignoreIndex)?ev->GetValue(j):0;
486 
487  neuron = GetInputNeuron(j);
488  neuron->ForceValue(x);
489  }
490 }
491 
492 ////////////////////////////////////////////////////////////////////////////////
493 /// calculate input values to each neuron
494 
496 {
497  TObjArray* curLayer;
498  TNeuron* neuron;
499  Int_t numLayers = fNetwork->GetEntriesFast();
500  Int_t numNeurons;
501 
502  for (Int_t i = 0; i < numLayers; i++) {
503  curLayer = (TObjArray*)fNetwork->At(i);
504  numNeurons = curLayer->GetEntriesFast();
505 
506  for (Int_t j = 0; j < numNeurons; j++) {
507  neuron = (TNeuron*) curLayer->At(j);
508  neuron->CalculateValue();
509  neuron->CalculateActivationValue();
510 
511  }
512  }
513 }
514 
515 ////////////////////////////////////////////////////////////////////////////////
516 /// print messages, turn off printing by setting verbose and debug flag appropriately
517 
519 {
520  if (Verbose() || Debug() || force) Log() << kINFO << message << Endl;
521 }
522 
523 ////////////////////////////////////////////////////////////////////////////////
524 /// wait for keyboard input, for debugging
525 
527 {
528  std::string dummy;
529  Log() << kINFO << "***Type anything to continue (q to quit): ";
530  std::getline(std::cin, dummy);
531  if (dummy == "q" || dummy == "Q") {
532  PrintMessage( "quit" );
533  delete this;
534  exit(0);
535  }
536 }
537 
538 ////////////////////////////////////////////////////////////////////////////////
539 /// print network representation, for debugging
540 
542 {
543  if (!Debug()) return;
544 
545  Log() << kINFO << Endl;
546  PrintMessage( "Printing network " );
547  Log() << kINFO << "-------------------------------------------------------------------" << Endl;
548 
549  TObjArray* curLayer;
550  Int_t numLayers = fNetwork->GetEntriesFast();
551 
552  for (Int_t i = 0; i < numLayers; i++) {
553 
554  curLayer = (TObjArray*)fNetwork->At(i);
555  Int_t numNeurons = curLayer->GetEntriesFast();
556 
557  Log() << kINFO << "Layer #" << i << " (" << numNeurons << " neurons):" << Endl;
558  PrintLayer( curLayer );
559  }
560 }
561 
562 ////////////////////////////////////////////////////////////////////////////////
563 /// print a single layer, for debugging
564 
566 {
567  Int_t numNeurons = layer->GetEntriesFast();
568  TNeuron* neuron;
569 
570  for (Int_t j = 0; j < numNeurons; j++) {
571  neuron = (TNeuron*) layer->At(j);
572  Log() << kINFO << "\tNeuron #" << j << " (LinksIn: " << neuron->NumPreLinks()
573  << " , LinksOut: " << neuron->NumPostLinks() << ")" << Endl;
574  PrintNeuron( neuron );
575  }
576 }
577 
578 ////////////////////////////////////////////////////////////////////////////////
579 /// print a neuron, for debugging
580 
582 {
583  Log() << kINFO
584  << "\t\tValue:\t" << neuron->GetValue()
585  << "\t\tActivation: " << neuron->GetActivationValue()
586  << "\t\tDelta: " << neuron->GetDelta() << Endl;
587  Log() << kINFO << "\t\tActivationEquation:\t";
588  neuron->PrintActivationEqn();
589  Log() << kINFO << "\t\tLinksIn:" << Endl;
590  neuron->PrintPreLinks();
591  Log() << kINFO << "\t\tLinksOut:" << Endl;
592  neuron->PrintPostLinks();
593 }
594 
595 ////////////////////////////////////////////////////////////////////////////////
596 /// get the mva value generated by the NN
597 
599 {
600  TNeuron* neuron;
601 
602  TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
603 
604  const Event * ev = GetEvent();
605 
606  for (UInt_t i = 0; i < GetNvar(); i++) {
607  neuron = (TNeuron*)inputLayer->At(i);
608  neuron->ForceValue( ev->GetValue(i) );
609  }
611 
612  // check the output of the network
613  TObjArray* outputLayer = (TObjArray*)fNetwork->At( fNetwork->GetEntriesFast()-1 );
614  neuron = (TNeuron*)outputLayer->At(0);
615 
616  // cannot determine error
617  NoErrorCalc(err, errUpper);
618 
619  return neuron->GetActivationValue();
620 }
621 
622 ////////////////////////////////////////////////////////////////////////////////
623 /// get the regression value generated by the NN
624 
625 const std::vector<Float_t> &TMVA::MethodANNBase::GetRegressionValues()
626 {
627  TNeuron* neuron;
628 
629  TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
630 
631  const Event * ev = GetEvent();
632 
633  for (UInt_t i = 0; i < GetNvar(); i++) {
634  neuron = (TNeuron*)inputLayer->At(i);
635  neuron->ForceValue( ev->GetValue(i) );
636  }
638 
639  // check the output of the network
640  TObjArray* outputLayer = (TObjArray*)fNetwork->At( fNetwork->GetEntriesFast()-1 );
641 
642  if (fRegressionReturnVal == NULL) fRegressionReturnVal = new std::vector<Float_t>();
643  fRegressionReturnVal->clear();
644 
645  Event * evT = new Event(*ev);
646  UInt_t ntgts = outputLayer->GetEntriesFast();
647  for (UInt_t itgt = 0; itgt < ntgts; itgt++) {
648  evT->SetTarget(itgt,((TNeuron*)outputLayer->At(itgt))->GetActivationValue());
649  }
650 
651  const Event* evT2 = GetTransformationHandler().InverseTransform( evT );
652  for (UInt_t itgt = 0; itgt < ntgts; itgt++) {
653  fRegressionReturnVal->push_back( evT2->GetTarget(itgt) );
654  }
655 
656  delete evT;
657 
658  return *fRegressionReturnVal;
659 }
660 
661 ////////////////////////////////////////////////////////////////////////////////
662 /// get the multiclass classification values generated by the NN
663 
664 const std::vector<Float_t> &TMVA::MethodANNBase::GetMulticlassValues()
665 {
666  TNeuron* neuron;
667 
668  TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
669 
670  const Event * ev = GetEvent();
671 
672  for (UInt_t i = 0; i < GetNvar(); i++) {
673  neuron = (TNeuron*)inputLayer->At(i);
674  neuron->ForceValue( ev->GetValue(i) );
675  }
677 
678  // check the output of the network
679 
680  if (fMulticlassReturnVal == NULL) fMulticlassReturnVal = new std::vector<Float_t>();
681  fMulticlassReturnVal->clear();
682  std::vector<Float_t> temp;
683 
684  UInt_t nClasses = DataInfo().GetNClasses();
685  for (UInt_t icls = 0; icls < nClasses; icls++) {
686  temp.push_back(GetOutputNeuron( icls )->GetActivationValue() );
687  }
688 
689  for(UInt_t iClass=0; iClass<nClasses; iClass++){
690  Double_t norm = 0.0;
691  for(UInt_t j=0;j<nClasses;j++){
692  if(iClass!=j)
693  norm+=exp(temp[j]-temp[iClass]);
694  }
695  (*fMulticlassReturnVal).push_back(1.0/(1.0+norm));
696  }
697 
698 
699 
700  return *fMulticlassReturnVal;
701 }
702 
703 
704 ////////////////////////////////////////////////////////////////////////////////
705 /// create XML description of ANN classifier
706 
707 void TMVA::MethodANNBase::AddWeightsXMLTo( void* parent ) const
708 {
709  Int_t numLayers = fNetwork->GetEntriesFast();
710  void* wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
711  void* xmlLayout = gTools().xmlengine().NewChild(wght, 0, "Layout");
712  gTools().xmlengine().NewAttr(xmlLayout, 0, "NLayers", gTools().StringFromInt(fNetwork->GetEntriesFast()) );
713  TString weights = "";
714  for (Int_t i = 0; i < numLayers; i++) {
715  TObjArray* layer = (TObjArray*)fNetwork->At(i);
716  Int_t numNeurons = layer->GetEntriesFast();
717  void* layerxml = gTools().xmlengine().NewChild(xmlLayout, 0, "Layer");
718  gTools().xmlengine().NewAttr(layerxml, 0, "Index", gTools().StringFromInt(i) );
719  gTools().xmlengine().NewAttr(layerxml, 0, "NNeurons", gTools().StringFromInt(numNeurons) );
720  for (Int_t j = 0; j < numNeurons; j++) {
721  TNeuron* neuron = (TNeuron*)layer->At(j);
722  Int_t numSynapses = neuron->NumPostLinks();
723  void* neuronxml = gTools().AddChild(layerxml, "Neuron");
724  gTools().AddAttr(neuronxml, "NSynapses", gTools().StringFromInt(numSynapses) );
725  if(numSynapses==0) continue;
726  std::stringstream s("");
727  s.precision( 16 );
728  for (Int_t k = 0; k < numSynapses; k++) {
729  TSynapse* synapse = neuron->PostLinkAt(k);
730  s << std::scientific << synapse->GetWeight() << " ";
731  }
732  gTools().AddRawLine( neuronxml, s.str().c_str() );
733  }
734  }
735 
736  // if inverse hessian exists, write inverse hessian to weight file
737  if( fInvHessian.GetNcols()>0 ){
738  void* xmlInvHessian = gTools().xmlengine().NewChild(wght, 0, "InverseHessian");
739 
740  // get the matrix dimensions
741  Int_t nElements = fInvHessian.GetNoElements();
742  Int_t nRows = fInvHessian.GetNrows();
743  Int_t nCols = fInvHessian.GetNcols();
744  gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NElements", gTools().StringFromInt(nElements) );
745  gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NRows", gTools().StringFromInt(nRows) );
746  gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NCols", gTools().StringFromInt(nCols) );
747 
748  // read in the matrix elements
749  Double_t* elements = new Double_t[nElements+10];
750  fInvHessian.GetMatrix2Array( elements );
751 
752  // store the matrix elements row-wise
753  Int_t index = 0;
754  for( Int_t row = 0; row < nRows; ++row ){
755  void* xmlRow = gTools().xmlengine().NewChild(xmlInvHessian, 0, "Row");
756  gTools().xmlengine().NewAttr(xmlRow, 0, "Index", gTools().StringFromInt(row) );
757 
758  // create the rows
759  std::stringstream s("");
760  s.precision( 16 );
761  for( Int_t col = 0; col < nCols; ++col ){
762  s << std::scientific << (*(elements+index)) << " ";
763  ++index;
764  }
765  gTools().xmlengine().AddRawLine( xmlRow, s.str().c_str() );
766  }
767  delete[] elements;
768  }
769 }
770 
771 
772 ////////////////////////////////////////////////////////////////////////////////
773 /// read MLP from xml weight file
774 
776 {
777  // build the layout first
778  Bool_t fromFile = kTRUE;
779  std::vector<Int_t>* layout = new std::vector<Int_t>();
780 
781  void* xmlLayout = NULL;
782  xmlLayout = gTools().GetChild(wghtnode, "Layout");
783  if( !xmlLayout )
784  xmlLayout = wghtnode;
785 
786  UInt_t nLayers;
787  gTools().ReadAttr( xmlLayout, "NLayers", nLayers );
788  layout->resize( nLayers );
789 
790  void* ch = gTools().xmlengine().GetChild(xmlLayout);
791  UInt_t index;
792  UInt_t nNeurons;
793  while (ch) {
794  gTools().ReadAttr( ch, "Index", index );
795  gTools().ReadAttr( ch, "NNeurons", nNeurons );
796  layout->at(index) = nNeurons;
797  ch = gTools().GetNextChild(ch);
798  }
799 
800  BuildNetwork( layout, NULL, fromFile );
801  // use 'slow' (exact) TanH if processing old weigh file to ensure 100% compatible results
802  // otherwise use the new default, the 'tast tanh' approximation
804  TActivationTanh* act = dynamic_cast<TActivationTanh*>( fActivation );
805  if (act) act->SetSlow();
806  }
807 
808  // fill the weights of the synapses
809  UInt_t nSyn;
810  Float_t weight;
811  ch = gTools().xmlengine().GetChild(xmlLayout);
812  UInt_t iLayer = 0;
813  while (ch) { // layers
814  TObjArray* layer = (TObjArray*)fNetwork->At(iLayer);
815  gTools().ReadAttr( ch, "Index", index );
816  gTools().ReadAttr( ch, "NNeurons", nNeurons );
817 
818  void* nodeN = gTools().GetChild(ch);
819  UInt_t iNeuron = 0;
820  while( nodeN ){ // neurons
821  TNeuron *neuron = (TNeuron*)layer->At(iNeuron);
822  gTools().ReadAttr( nodeN, "NSynapses", nSyn );
823  if( nSyn > 0 ){
824  const char* content = gTools().GetContent(nodeN);
825  std::stringstream s(content);
826  for (UInt_t iSyn = 0; iSyn<nSyn; iSyn++) { // synapses
827 
828  TSynapse* synapse = neuron->PostLinkAt(iSyn);
829  s >> weight;
830  //Log() << kWARNING << neuron << " " << weight << Endl;
831  synapse->SetWeight(weight);
832  }
833  }
834  nodeN = gTools().GetNextChild(nodeN);
835  iNeuron++;
836  }
837  ch = gTools().GetNextChild(ch);
838  iLayer++;
839  }
840 
841  delete layout;
842 
843  void* xmlInvHessian = NULL;
844  xmlInvHessian = gTools().GetChild(wghtnode, "InverseHessian");
845  if( !xmlInvHessian )
846  // no inverse hessian available
847  return;
848 
850 
851  Int_t nElements = 0;
852  Int_t nRows = 0;
853  Int_t nCols = 0;
854  gTools().ReadAttr( xmlInvHessian, "NElements", nElements );
855  gTools().ReadAttr( xmlInvHessian, "NRows", nRows );
856  gTools().ReadAttr( xmlInvHessian, "NCols", nCols );
857 
858  // adjust the matrix dimensions
859  fInvHessian.ResizeTo( nRows, nCols );
860 
861  // prepare an array to read in the values
862  Double_t* elements;
863  if (nElements > std::numeric_limits<int>::max()-100){
864  Log() << kFATAL << "you tried to read a hessian matrix with " << nElements << " elements, --> too large, guess s.th. went wrong reading from the weight file" << Endl;
865  return;
866  } else {
867  elements = new Double_t[nElements+10];
868  }
869 
870 
871 
872  void* xmlRow = gTools().xmlengine().GetChild(xmlInvHessian);
873  Int_t row = 0;
874  index = 0;
875  while (xmlRow) { // rows
876  gTools().ReadAttr( xmlRow, "Index", row );
877 
878  const char* content = gTools().xmlengine().GetNodeContent(xmlRow);
879 
880  std::stringstream s(content);
881  for (Int_t iCol = 0; iCol<nCols; iCol++) { // columns
882  s >> (*(elements+index));
883  ++index;
884  }
885  xmlRow = gTools().xmlengine().GetNext(xmlRow);
886  ++row;
887  }
888 
889  fInvHessian.SetMatrixArray( elements );
890 
891  delete[] elements;
892 }
893 
894 ////////////////////////////////////////////////////////////////////////////////
895 /// destroy/clear the network then read it back in from the weights file
896 
898 {
899  // delete network so we can reconstruct network from scratch
900 
901  TString dummy;
902 
903  // synapse weights
904  Double_t weight;
905  std::vector<Double_t>* weights = new std::vector<Double_t>();
906  istr>> dummy;
907  while (istr>> dummy >> weight) weights->push_back(weight); // use w/ slower write-out
908 
909  ForceWeights(weights);
910 
911 
912  delete weights;
913 }
914 
915 ////////////////////////////////////////////////////////////////////////////////
916 /// compute ranking of input variables by summing function of weights
917 
919 {
920  // create the ranking object
921  fRanking = new Ranking( GetName(), "Importance" );
922 
923  TNeuron* neuron;
924  TSynapse* synapse;
925  Double_t importance, avgVal;
926  TString varName;
927 
928  for (UInt_t ivar = 0; ivar < GetNvar(); ivar++) {
929 
930  neuron = GetInputNeuron(ivar);
931  Int_t numSynapses = neuron->NumPostLinks();
932  importance = 0;
933  varName = GetInputVar(ivar); // fix this line
934 
935  // figure out average value of variable i
936  Double_t meanS, meanB, rmsS, rmsB, xmin, xmax;
938  meanS, meanB, rmsS, rmsB, xmin, xmax );
939 
940  avgVal = (TMath::Abs(meanS) + TMath::Abs(meanB))/2.0;
941  double meanrms = (TMath::Abs(rmsS) + TMath::Abs(rmsB))/2.;
942  if (avgVal<meanrms) avgVal = meanrms;
943  if (IsNormalised()) avgVal = 0.5*(1 + gTools().NormVariable( avgVal, GetXmin( ivar ), GetXmax( ivar )));
944 
945  for (Int_t j = 0; j < numSynapses; j++) {
946  synapse = neuron->PostLinkAt(j);
947  importance += synapse->GetWeight() * synapse->GetWeight();
948  }
949 
950  importance *= avgVal * avgVal;
951 
952  fRanking->AddRank( Rank( varName, importance ) );
953  }
954 
955  return fRanking;
956 }
957 
958 ////////////////////////////////////////////////////////////////////////////////
959 
961  std::vector<TH1*>* hv ) const
962 {
963  TH2F* hist;
964  Int_t numLayers = fNetwork->GetEntriesFast();
965 
966  for (Int_t i = 0; i < numLayers-1; i++) {
967 
968  TObjArray* layer1 = (TObjArray*)fNetwork->At(i);
969  TObjArray* layer2 = (TObjArray*)fNetwork->At(i+1);
970  Int_t numNeurons1 = layer1->GetEntriesFast();
971  Int_t numNeurons2 = layer2->GetEntriesFast();
972 
973  TString name = Form("%s%i%i", bulkname.Data(), i, i+1);
974  hist = new TH2F(name + "", name + "",
975  numNeurons1, 0, numNeurons1, numNeurons2, 0, numNeurons2);
976 
977  for (Int_t j = 0; j < numNeurons1; j++) {
978 
979  TNeuron* neuron = (TNeuron*)layer1->At(j);
980  Int_t numSynapses = neuron->NumPostLinks();
981 
982  for (Int_t k = 0; k < numSynapses; k++) {
983 
984  TSynapse* synapse = neuron->PostLinkAt(k);
985  hist->SetBinContent(j+1, k+1, synapse->GetWeight());
986 
987  }
988  }
989 
990  if (hv) hv->push_back( hist );
991  else {
992  hist->Write();
993  delete hist;
994  }
995  }
996 }
997 
998 ////////////////////////////////////////////////////////////////////////////////
999 /// write histograms to file
1000 
1002 {
1003  PrintMessage(Form("Write special histos to file: %s", BaseDir()->GetPath()), kTRUE);
1004 
1007 
1008  // histograms containing weights for architecture plotting (used in macro "network.cxx")
1009  CreateWeightMonitoringHists( "weights_hist" );
1010 
1011  // now save all the epoch-wise monitoring information
1012 #if __cplusplus > 199711L
1013  static std::atomic<int> epochMonitoringDirectoryNumber{0};
1014 #else
1015  static int epochMonitoringDirectoryNumber = 0;
1016 #endif
1017  int epochVal = epochMonitoringDirectoryNumber++;
1018  TDirectory* epochdir = NULL;
1019  if( epochVal == 0 )
1020  epochdir = BaseDir()->mkdir( "EpochMonitoring" );
1021  else
1022  epochdir = BaseDir()->mkdir( Form("EpochMonitoring_%4d",epochVal) );
1023 
1024  epochdir->cd();
1025  for (std::vector<TH1*>::const_iterator it = fEpochMonHistS.begin(); it != fEpochMonHistS.end(); ++it) {
1026  (*it)->Write();
1027  delete (*it);
1028  }
1029  for (std::vector<TH1*>::const_iterator it = fEpochMonHistB.begin(); it != fEpochMonHistB.end(); ++it) {
1030  (*it)->Write();
1031  delete (*it);
1032  }
1033  for (std::vector<TH1*>::const_iterator it = fEpochMonHistW.begin(); it != fEpochMonHistW.end(); ++it) {
1034  (*it)->Write();
1035  delete (*it);
1036  }
1037  BaseDir()->cd();
1038 }
1039 
1040 ////////////////////////////////////////////////////////////////////////////////
1041 /// write specific classifier response
1042 
1043 void TMVA::MethodANNBase::MakeClassSpecific( std::ostream& fout, const TString& className ) const
1044 {
1045  Int_t numLayers = fNetwork->GetEntries();
1046 
1047  fout << std::endl;
1048  fout << " double ActivationFnc(double x) const;" << std::endl;
1049  fout << " double OutputActivationFnc(double x) const;" << std::endl; //zjh
1050  fout << std::endl;
1051  fout << " int fLayers;" << std::endl;
1052  fout << " int fLayerSize["<<numLayers<<"];" << std::endl;
1053  int numNodesFrom = -1;
1054  for (Int_t lIdx = 0; lIdx < numLayers; lIdx++) {
1055  int numNodesTo = ((TObjArray*)fNetwork->At(lIdx))->GetEntries();
1056  if (numNodesFrom<0) { numNodesFrom=numNodesTo; continue; }
1057  fout << " double fWeightMatrix" << lIdx-1 << "to" << lIdx << "[" << numNodesTo << "][" << numNodesFrom << "];";
1058  fout << " // weight matrix from layer " << lIdx-1 << " to " << lIdx << std::endl;
1059  numNodesFrom = numNodesTo;
1060  }
1061  fout << std::endl;
1062  fout << "};" << std::endl;
1063 
1064  fout << std::endl;
1065 
1066  fout << "inline void " << className << "::Initialize()" << std::endl;
1067  fout << "{" << std::endl;
1068  fout << " // build network structure" << std::endl;
1069  fout << " fLayers = " << numLayers << ";" << std::endl;
1070  for (Int_t lIdx = 0; lIdx < numLayers; lIdx++) {
1071  TObjArray* layer = (TObjArray*)fNetwork->At(lIdx);
1072  int numNodes = layer->GetEntries();
1073  fout << " fLayerSize[" << lIdx << "] = " << numNodes << ";" << std::endl;
1074  }
1075 
1076  for (Int_t i = 0; i < numLayers-1; i++) {
1077  fout << " // weight matrix from layer " << i << " to " << i+1 << std::endl;
1078  TObjArray* layer = (TObjArray*)fNetwork->At(i);
1079  Int_t numNeurons = layer->GetEntriesFast();
1080  for (Int_t j = 0; j < numNeurons; j++) {
1081  TNeuron* neuron = (TNeuron*)layer->At(j);
1082  Int_t numSynapses = neuron->NumPostLinks();
1083  for (Int_t k = 0; k < numSynapses; k++) {
1084  TSynapse* synapse = neuron->PostLinkAt(k);
1085  fout << " fWeightMatrix" << i << "to" << i+1 << "[" << k << "][" << j << "] = " << synapse->GetWeight() << ";" << std::endl;
1086  }
1087  }
1088  }
1089 
1090  fout << "}" << std::endl;
1091  fout << std::endl;
1092 
1093  // writing of the GetMvaValue__ method
1094  fout << "inline double " << className << "::GetMvaValue__( const std::vector<double>& inputValues ) const" << std::endl;
1095  fout << "{" << std::endl;
1096  fout << " if (inputValues.size() != (unsigned int)fLayerSize[0]-1) {" << std::endl;
1097  fout << " std::cout << \"Input vector needs to be of size \" << fLayerSize[0]-1 << std::endl;" << std::endl;
1098  fout << " return 0;" << std::endl;
1099  fout << " }" << std::endl;
1100  fout << std::endl;
1101  for (Int_t lIdx = 0; lIdx < numLayers; lIdx++) {
1102  TObjArray *layer = (TObjArray *)fNetwork->At(lIdx);
1103  int numNodes = layer->GetEntries();
1104  fout << " std::array<double, " << numNodes << "> fWeights" << lIdx << " {{}};" << std::endl;
1105  }
1106  for (Int_t lIdx = 0; lIdx < numLayers - 1; lIdx++) {
1107  fout << " fWeights" << lIdx << ".back() = 1.;" << std::endl;
1108  }
1109  fout << std::endl;
1110  fout << " for (int i=0; i<fLayerSize[0]-1; i++)" << std::endl;
1111  fout << " fWeights0[i]=inputValues[i];" << std::endl;
1112  fout << std::endl;
1113  for (Int_t i = 0; i < numLayers-1; i++) {
1114  fout << " // layer " << i << " to " << i+1 << std::endl;
1115  if (i+1 == numLayers-1) {
1116  fout << " for (int o=0; o<fLayerSize[" << i+1 << "]; o++) {" << std::endl;
1117  }
1118  else {
1119  fout << " for (int o=0; o<fLayerSize[" << i+1 << "]-1; o++) {" << std::endl;
1120  }
1121  fout << " for (int i=0; i<fLayerSize[" << i << "]; i++) {" << std::endl;
1122  fout << " double inputVal = fWeightMatrix" << i << "to" << i + 1 << "[o][i] * fWeights" << i << "[i];"
1123  << std::endl;
1124 
1125  if ( fNeuronInputType == "sum") {
1126  fout << " fWeights" << i + 1 << "[o] += inputVal;" << std::endl;
1127  }
1128  else if ( fNeuronInputType == "sqsum") {
1129  fout << " fWeights" << i + 1 << "[o] += inputVal*inputVal;" << std::endl;
1130  }
1131  else { // fNeuronInputType == TNeuronInputChooser::kAbsSum
1132  fout << " fWeights" << i + 1 << "[o] += fabs(inputVal);" << std::endl;
1133  }
1134  fout << " } // loop over i" << std::endl;
1135  if (i+1 != numLayers-1) // in the last layer no activation function is applied
1136  fout << " fWeights" << i + 1 << "[o] = ActivationFnc(fWeights" << i + 1 << "[o]);" << std::endl;
1137  else
1138  fout << " fWeights" << i + 1 << "[o] = OutputActivationFnc(fWeights" << i + 1 << "[o]);"
1139  << std::endl; // zjh
1140  fout << " } // loop over o" << std::endl;
1141  }
1142  fout << std::endl;
1143  fout << " return fWeights" << numLayers - 1 << "[0];" << std::endl;
1144  fout << "}" << std::endl;
1145 
1146  fout << std::endl;
1147  TString fncName = className+"::ActivationFnc";
1148  fActivation->MakeFunction(fout, fncName);
1149  fncName = className+"::OutputActivationFnc"; //zjh
1150  fOutput->MakeFunction(fout, fncName);//zjh
1151 
1152  fout << std::endl;
1153  fout << "// Clean up" << std::endl;
1154  fout << "inline void " << className << "::Clear()" << std::endl;
1155  fout << "{" << std::endl;
1156  fout << "}" << std::endl;
1157 }
1158 
1159 ////////////////////////////////////////////////////////////////////////////////
1160 /// who the hell makes such strange Debug flags that even use "global pointers"..
1161 
1163 {
1164  return fgDEBUG;
1165 }
void WaitForKeyboard()
wait for keyboard input, for debugging
virtual void WriteMonitoringHistosToFile() const
write histograms to file
virtual Int_t Write(const char *name=0, Int_t option=0, Int_t bufsize=0)
Write this object to the current directory.
Definition: TObject.cxx:785
virtual Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
get the mva value generated by the NN
void BuildLayer(Int_t numNeurons, TObjArray *curLayer, TObjArray *prevLayer, Int_t layerIndex, Int_t numLayers, Bool_t from_file=false)
build a single layer with neurons and synapses connecting this layer to the previous layer ...
An array of TObjects.
Definition: TObjArray.h:37
TXMLEngine & xmlengine()
Definition: Tools.h:270
float xmin
Definition: THbookFile.cxx:93
Random number generator class based on M.
Definition: TRandom3.h:27
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:158
void ForceNetworkCalculations()
calculate input values to each neuron
virtual TString GetExpression()=0
void CreateWeightMonitoringHists(const TString &bulkname, std::vector< TH1 *> *hv=0) const
virtual TMatrixTBase< Element > & SetMatrixArray(const Element *data, Option_t *option="")
Copy array data to matrix .
void DeleteNetwork()
delete/clear network
virtual Double_t Rndm()
Machine independent random number generator.
Definition: TRandom3.cxx:100
MethodANNBase(const TString &jobName, Types::EMVA methodType, const TString &methodTitle, DataSetInfo &theData, const TString &theOption)
standard constructor Note: Right now it is an option to choose the neuron input function, but only the input function "sum" leads to weight convergence – otherwise the weights go to nan and lead to an ABORT.
TActivation * fActivation
float Float_t
Definition: RtypesCore.h:53
void AddPreLinks(TNeuron *neuron, TObjArray *prevLayer)
add synapses connecting a neuron to its preceding layer
Synapse class used by TMVA artificial neural network methods.
Definition: TSynapse.h:44
UInt_t GetNvar() const
Definition: MethodBase.h:335
const Ranking * CreateRanking()
compute ranking of input variables by summing function of weights
Int_t GetNcols() const
Definition: TMatrixTBase.h:125
void SetPostNeuron(TNeuron *post)
Definition: TSynapse.h:70
MsgLogger & Log() const
Definition: Configurable.h:122
OptionBase * DeclareOptionRef(T &ref, const TString &name, const TString &desc="")
void PrintActivationEqn()
print activation equation, for debugging
Definition: TNeuron.cxx:327
virtual void ReadWeightsFromStream(std::istream &istr)
destroy/clear the network then read it back in from the weights file
void ForceValue(Double_t value)
force the value, typically for input and bias neurons
Definition: TNeuron.cxx:84
Virtual base Class for all MVA method.
Definition: MethodBase.h:109
virtual void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
void SetInputNeuron()
Definition: TNeuron.h:112
TNeuronInput * fInputCalculator
TObjArray * fInputLayer
XMLNodePointer_t GetNext(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
return next to xmlnode node if realnode==kTRUE, any special nodes in between will be skipped ...
Basic string class.
Definition: TString.h:131
TransformationHandler & GetTransformationHandler(Bool_t takeReroutedIfAvailable=true)
Definition: MethodBase.h:385
Ranking for variables in method (implementation)
Definition: Ranking.h:48
int Int_t
Definition: RtypesCore.h:41
virtual TDirectory * mkdir(const char *name, const char *title="")
Create a sub-directory "a" or a hierarchy of sub-directories "a/b/c/...".
bool Bool_t
Definition: RtypesCore.h:59
UInt_t GetNClasses() const
Definition: DataSetInfo.h:136
void DeletePreLinks()
delete all pre-links
Definition: TNeuron.cxx:187
UInt_t GetNTargets() const
Definition: MethodBase.h:337
Double_t GetActivationValue() const
Definition: TNeuron.h:105
void AddWeightsXMLTo(void *parent) const
create XML description of ANN classifier
virtual void DeclareOptions()
define the options (their key words) that can be set in the option string here the options valid for ...
TObject * At(Int_t idx) const
Definition: TObjArray.h:165
virtual TMatrixTBase< Element > & ResizeTo(Int_t nrows, Int_t ncols, Int_t=-1)
Set size of the matrix to nrows x ncols New dynamic elements are created, the overlapping part of the...
Definition: TMatrixT.cxx:1210
void AddAttr(void *node, const char *, const T &value, Int_t precision=16)
add attribute to xml
Definition: Tools.h:353
void * AddChild(void *parent, const char *childname, const char *content=0, bool isRootNode=false)
add child node
Definition: Tools.cxx:1136
void SetInputCalculator(TNeuronInput *calculator)
set input calculator
Definition: TNeuron.cxx:151
static const Bool_t fgDEBUG
Short_t Abs(Short_t d)
Definition: TMathBase.h:108
const char * GetNodeContent(XMLNodePointer_t xmlnode)
get contents (if any) of xmlnode
Int_t GetNoElements() const
Definition: TMatrixTBase.h:126
void ForceWeights(std::vector< Double_t > *weights)
force the synapse weights
Bool_t Debug() const
who the hell makes such strange Debug flags that even use "global pointers"..
TActivation * fOutput
const TString & GetInputVar(Int_t i) const
Definition: MethodBase.h:340
Double_t x[n]
Definition: legend1.C:17
std::vector< TH1 * > fEpochMonHistB
TSynapse * PostLinkAt(Int_t index) const
Definition: TNeuron.h:111
UInt_t GetTrainingTMVAVersionCode() const
Definition: MethodBase.h:380
void PrintMessage(TString message, Bool_t force=kFALSE) const
print messages, turn off printing by setting verbose and debug flag appropriately ...
const Event * GetEvent() const
Definition: MethodBase.h:740
TObjArray * fNetwork
Neuron class used by TMVA artificial neural network methods.
Definition: TNeuron.h:49
void * GetChild(void *parent, const char *childname=0)
get child node
Definition: Tools.cxx:1162
Double_t GetXmin(Int_t ivar) const
Definition: MethodBase.h:347
DataSetInfo & DataInfo() const
Definition: MethodBase.h:401
Bool_t DoRegression() const
Definition: MethodBase.h:429
virtual void ProcessOptions()
do nothing specific at this moment
Ssiz_t First(char c) const
Find first occurrence of a character c.
Definition: TString.cxx:487
Class that contains all the data information.
Definition: DataSetInfo.h:60
void AddPostLink(TSynapse *post)
add synapse as a post-link to this neuron
Definition: TNeuron.cxx:178
Int_t NumPreLinks() const
Definition: TNeuron.h:108
TActivation * fIdentity
virtual void BuildNetwork(std::vector< Int_t > *layout, std::vector< Double_t > *weights=NULL, Bool_t fromFile=kFALSE)
build network given a layout (number of neurons in each layer) and optional weights array ...
Bool_t AddRawLine(void *node, const char *raw)
XML helpers.
Definition: Tools.cxx:1202
void CalculateActivationValue()
calculate neuron activation/output
Definition: TNeuron.cxx:102
TActivation * CreateActivation(EActivationType type) const
instantiate the correct activation object according to the type chosen (given as the enumeration type...
Bool_t Verbose() const
Definition: MethodBase.h:492
void ReadWeightsFromXML(void *wghtnode)
read MLP from xml weight file
Double_t GetXmax(Int_t ivar) const
Definition: MethodBase.h:348
TObjArray * fSynapses
Bool_t DoMulticlass() const
Definition: MethodBase.h:430
Tanh activation function for ANN.
Int_t NumPostLinks() const
Definition: TNeuron.h:109
virtual void PrintNetwork() const
print network representation, for debugging
Float_t GetTarget(UInt_t itgt) const
Definition: Event.h:97
virtual void MakeFunction(std::ostream &fout, const TString &fncName)=0
std::vector< TString > * GetAllNeuronInputNames() const
Bool_t AddRawLine(XMLNodePointer_t parent, const char *line)
Add just line into xml file Line should has correct xml syntax that later it can be decoded by xml pa...
Definition: TXMLEngine.cxx:907
void SetWeight(Double_t weight)
set synapse weight
Definition: TSynapse.cxx:69
const char * GetName() const
Definition: MethodBase.h:325
2-D histogram with a float per channel (see TH1 documentation)}
Definition: TH2.h:250
void Statistics(Types::ETreeType treeType, const TString &theVarName, Double_t &, Double_t &, Double_t &, Double_t &, Double_t &, Double_t &)
calculates rms,mean, xmin, xmax of the event variable this can be either done for the variables as th...
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
Definition: TString.h:610
TNeuronInput * CreateNeuronInput(ENeuronInputType type) const
void AddPreLink(TSynapse *pre)
add synapse as a pre-link to this neuron
Definition: TNeuron.cxx:169
Double_t GetDelta() const
Definition: TNeuron.h:106
unsigned int UInt_t
Definition: RtypesCore.h:42
Int_t GetEntriesFast() const
Definition: TObjArray.h:64
char * Form(const char *fmt,...)
virtual void GetMatrix2Array(Element *data, Option_t *option="") const
Copy matrix data to array .
const Event * InverseTransform(const Event *, Bool_t suppressIfNoTargets=true) const
Ssiz_t Length() const
Definition: TString.h:405
std::vector< TH1 * > fEpochMonHistW
void PrintPostLinks() const
Definition: TNeuron.h:119
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
Definition: Event.cxx:360
const char * GetContent(void *node)
XML helpers.
Definition: Tools.cxx:1186
void SetBiasNeuron()
Definition: TNeuron.h:114
void ReadAttr(void *node, const char *, T &value)
read attribute from xml
Definition: Tools.h:335
std::vector< Double_t > fRegulators
float xmax
Definition: THbookFile.cxx:93
TNeuron * GetInputNeuron(Int_t index)
Tools & gTools()
Class for easily choosing neuron input functions.
std::vector< Int_t > fRegulatorIdx
void InitWeights()
initialize the synapse weights randomly
Double_t GetValue() const
Definition: TNeuron.h:104
Int_t GetNrows() const
Definition: TMatrixTBase.h:122
const Bool_t kFALSE
Definition: RtypesCore.h:88
Float_t GetValue(UInt_t ivar) const
return value of i&#39;th variable
Definition: Event.cxx:237
Double_t GetWeight()
Definition: TSynapse.h:55
TString & Remove(Ssiz_t pos)
Definition: TString.h:668
#define TMVA_VERSION(a, b, c)
Definition: Version.h:48
std::vector< Int_t > * ParseLayoutString(TString layerSpec)
parse layout specification string and return a vector, each entry containing the number of neurons to...
XMLAttrPointer_t NewAttr(XMLNodePointer_t xmlnode, XMLNsPointer_t, const char *name, const char *value)
creates new attribute for xmlnode, namespaces are not supported for attributes
Definition: TXMLEngine.cxx:578
#define ClassImp(name)
Definition: Rtypes.h:359
double Double_t
Definition: RtypesCore.h:55
virtual const std::vector< Float_t > & GetMulticlassValues()
get the multiclass classification values generated by the NN
TNeuron * GetOutputNeuron(Int_t index=0)
Describe directory structure in memory.
Definition: TDirectory.h:34
std::vector< Float_t > * fMulticlassReturnVal
Definition: MethodBase.h:587
void PrintPreLinks() const
Definition: TNeuron.h:118
void CalculateValue()
calculate neuron input
Definition: TNeuron.cxx:93
Bool_t IsNormalised() const
Definition: MethodBase.h:485
void ForceNetworkInputs(const Event *ev, Int_t ignoreIndex=-1)
force the input values of the input neurons force the value for each input neuron ...
void SetOutputNeuron()
Definition: TNeuron.h:113
static RooMathCoreReg dummy
void * GetNextChild(void *prevchild, const char *childname=0)
XML helpers.
Definition: Tools.cxx:1174
Bool_t Contains(const char *pat, ECaseCompare cmp=kExact) const
Definition: TString.h:619
static constexpr double s
void AddPreDefVal(const T &)
Definition: Configurable.h:168
std::vector< TNeuron * > fOutputNeurons
virtual ~MethodANNBase()
destructor
void SetPreNeuron(TNeuron *pre)
Definition: TSynapse.h:67
std::vector< TString > * GetAllActivationNames() const
returns the names of all know activation functions
Abstract ClassifierFactory template that handles arbitrary types.
Ranking * fRanking
Definition: MethodBase.h:576
virtual Bool_t cd(const char *path=0)
Change current directory to "this" directory.
Definition: TDirectory.cxx:497
XMLNodePointer_t GetChild(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
returns first child of xmlnode
TDirectory * BaseDir() const
returns the ROOT directory where info/histograms etc of the corresponding MVA method instance are sto...
virtual void AddRank(const Rank &rank)
Add a new rank take ownership of it.
Definition: Ranking.cxx:86
XMLNodePointer_t NewChild(XMLNodePointer_t parent, XMLNsPointer_t ns, const char *name, const char *content=0)
create new child element for parent node
Definition: TXMLEngine.cxx:707
virtual const std::vector< Float_t > & GetRegressionValues()
get the regression value generated by the NN
void PrintNeuron(TNeuron *neuron) const
print a neuron, for debugging
Int_t GetEntries() const
Return the number of objects in array (i.e.
Definition: TObjArray.cxx:522
std::vector< TH1 * > fEpochMonHistS
std::vector< Float_t > * fRegressionReturnVal
Definition: MethodBase.h:586
void Add(TObject *obj)
Definition: TObjArray.h:73
Double_t NormVariable(Double_t x, Double_t xmin, Double_t xmax)
normalise to output range: [-1, 1]
Definition: Tools.cxx:122
Class for easily choosing activation functions.
void DeleteNetworkLayer(TObjArray *&layer)
delete a network layer
double exp(double)
THist< 2, float, THistStatContent, THistStatUncertainty > TH2F
Definition: THist.hxx:291
void BuildLayers(std::vector< Int_t > *layout, Bool_t from_file=false)
build the network layers
const Bool_t kTRUE
Definition: RtypesCore.h:87
Base class for all TMVA methods using artificial neural networks.
Definition: MethodANNBase.h:62
const Int_t n
Definition: legend1.C:16
char name[80]
Definition: TGX11.cxx:109
void NoErrorCalc(Double_t *const err, Double_t *const errUpper)
Definition: MethodBase.cxx:841
void PrintLayer(TObjArray *layer) const
print a single layer, for debugging
void InitANNBase()
initialize ANNBase object
void SetActivationEqn(TActivation *activation)
set activation equation
Definition: TNeuron.cxx:160
const char * Data() const
Definition: TString.h:364