Logo ROOT   6.08/07
Reference Guide
MethodANNBase.cxx
Go to the documentation of this file.
1 // @(#)root/tmva $Id$
2 // Author: Andreas Hoecker, Peter Speckmayer, Matt Jachowski, Jan Therhaag, Jiahang Zhong
3 
4 /**********************************************************************************
5  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6  * Package: TMVA *
7  * Class : MethodANNBase *
8  * Web : http://tmva.sourceforge.net *
9  * *
10  * Description: *
11  * Artificial neural network base class for the discrimination of signal *
12  * from background. *
13  * *
14  * Authors (alphabetical): *
15  * Krzysztof Danielowski <danielow@cern.ch> - IFJ & AGH, Poland *
16  * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
17  * Matt Jachowski <jachowski@stanford.edu> - Stanford University, USA *
18  * Kamil Kraszewski <kalq@cern.ch> - IFJ & UJ, Poland *
19  * Maciej Kruk <mkruk@cern.ch> - IFJ & AGH, Poland *
20  * Peter Speckmayer <peter.speckmayer@cern.ch> - CERN, Switzerland *
21  * Joerg Stelzer <stelzer@cern.ch> - DESY, Germany *
22  * Jan Therhaag <Jan.Therhaag@cern.ch> - U of Bonn, Germany *
23  * Jiahang Zhong <Jiahang.Zhong@cern.ch> - Academia Sinica, Taipei *
24  * *
25  * Copyright (c) 2005-2011: *
26  * CERN, Switzerland *
27  * U. of Bonn, Germany *
28  * *
29  * Redistribution and use in source and binary forms, with or without *
30  * modification, are permitted according to the terms listed in LICENSE *
31  * (http://tmva.sourceforge.net/LICENSE) *
32  **********************************************************************************/
33 
34 //_______________________________________________________________________
35 //
36 // Base class for all TMVA methods using artificial neural networks
37 //
38 //_______________________________________________________________________
39 
40 #include "TMVA/MethodBase.h"
41 
42 #include "TMVA/Configurable.h"
43 #include "TMVA/DataSetInfo.h"
44 #include "TMVA/MethodANNBase.h"
45 #include "TMVA/MsgLogger.h"
46 #include "TMVA/TNeuron.h"
47 #include "TMVA/TSynapse.h"
49 #include "TMVA/TActivationTanh.h"
50 #include "TMVA/Types.h"
51 #include "TMVA/Tools.h"
53 #include "TMVA/Ranking.h"
54 #include "TMVA/Version.h"
55 
56 #include "TString.h"
57 #include "TTree.h"
58 #include "TDirectory.h"
59 #include "Riostream.h"
60 #include "TRandom3.h"
61 #include "TH2F.h"
62 #include "TH1.h"
63 #include "TMath.h"
64 #include "TMatrixT.h"
65 
66 #include <vector>
67 #include <cstdlib>
68 #include <stdexcept>
69 #if __cplusplus > 199711L
70 #include <atomic>
71 #endif
72 
73 
74 using std::vector;
75 
77 
78 ////////////////////////////////////////////////////////////////////////////////
79 /// standard constructor
80 /// Note: Right now it is an option to choose the neuron input function,
81 /// but only the input function "sum" leads to weight convergence --
82 /// otherwise the weights go to nan and lead to an ABORT.
83 
85  Types::EMVA methodType,
86  const TString& methodTitle,
87  DataSetInfo& theData,
88  const TString& theOption )
89 : TMVA::MethodBase( jobName, methodType, methodTitle, theData, theOption)
90  , fEstimator(kMSE)
91  , fUseRegulator(kFALSE)
92  , fRandomSeed(0)
93 {
94  InitANNBase();
95 
96  DeclareOptions();
97 }
98 
99 ////////////////////////////////////////////////////////////////////////////////
100 /// construct the Method from the weight file
101 
103  DataSetInfo& theData,
104  const TString& theWeightFile)
105  : TMVA::MethodBase( methodType, theData, theWeightFile)
106  , fEstimator(kMSE)
107  , fUseRegulator(kFALSE)
108  , fRandomSeed(0)
109 {
110  InitANNBase();
111 
112  DeclareOptions();
113 }
114 
115 ////////////////////////////////////////////////////////////////////////////////
116 /// define the options (their key words) that can be set in the option string
117 /// here the options valid for ALL MVA methods are declared.
118 /// know options: NCycles=xx :the number of training cycles
119 /// Normalize=kTRUE,kFALSe :if normalised in put variables should be used
120 /// HiddenLayser="N-1,N-2" :the specification of the hidden layers
121 /// NeuronType=sigmoid,tanh,radial,linar : the type of activation function
122 /// used at the neuronn
123 ///
124 
126 {
127  DeclareOptionRef( fNcycles = 500, "NCycles", "Number of training cycles" );
128  DeclareOptionRef( fLayerSpec = "N,N-1", "HiddenLayers", "Specification of hidden layer architecture" );
129  DeclareOptionRef( fNeuronType = "sigmoid", "NeuronType", "Neuron activation function type" );
130  DeclareOptionRef( fRandomSeed = 1, "RandomSeed", "Random seed for initial synapse weights (0 means unique seed for each run; default value '1')");
131 
132  DeclareOptionRef(fEstimatorS="MSE", "EstimatorType",
133  "MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood" ); //zjh
134  AddPreDefVal(TString("MSE")); //zjh
135  AddPreDefVal(TString("CE")); //zjh
136 
137 
138  TActivationChooser aChooser;
139  std::vector<TString>* names = aChooser.GetAllActivationNames();
140  Int_t nTypes = names->size();
141  for (Int_t i = 0; i < nTypes; i++)
142  AddPreDefVal(names->at(i));
143  delete names;
144 
145  DeclareOptionRef(fNeuronInputType="sum", "NeuronInputType","Neuron input function type");
146  TNeuronInputChooser iChooser;
147  names = iChooser.GetAllNeuronInputNames();
148  nTypes = names->size();
149  for (Int_t i = 0; i < nTypes; i++) AddPreDefVal(names->at(i));
150  delete names;
151 }
152 
153 
154 ////////////////////////////////////////////////////////////////////////////////
155 /// do nothing specific at this moment
156 
158 {
159  if ( DoRegression() || DoMulticlass()) fEstimatorS = "MSE"; //zjh
160  else fEstimatorS = "CE" ; //hhv
161  if (fEstimatorS == "MSE" ) fEstimator = kMSE;
162  else if (fEstimatorS == "CE") fEstimator = kCE; //zjh
163  std::vector<Int_t>* layout = ParseLayoutString(fLayerSpec);
164  BuildNetwork(layout);
165  delete layout;
166 }
167 
168 ////////////////////////////////////////////////////////////////////////////////
169 /// parse layout specification string and return a vector, each entry
170 /// containing the number of neurons to go in each successive layer
171 
172 std::vector<Int_t>* TMVA::MethodANNBase::ParseLayoutString(TString layerSpec)
173 {
174  std::vector<Int_t>* layout = new std::vector<Int_t>();
175  layout->push_back((Int_t)GetNvar());
176  while(layerSpec.Length()>0) {
177  TString sToAdd="";
178  if (layerSpec.First(',')<0) {
179  sToAdd = layerSpec;
180  layerSpec = "";
181  }
182  else {
183  sToAdd = layerSpec(0,layerSpec.First(','));
184  layerSpec = layerSpec(layerSpec.First(',')+1,layerSpec.Length());
185  }
186  int nNodes = 0;
187  if (sToAdd.BeginsWith("n") || sToAdd.BeginsWith("N")) { sToAdd.Remove(0,1); nNodes = GetNvar(); }
188  nNodes += atoi(sToAdd);
189  layout->push_back(nNodes);
190  }
191  if( DoRegression() )
192  layout->push_back( DataInfo().GetNTargets() ); // one output node for each target
193  else if( DoMulticlass() )
194  layout->push_back( DataInfo().GetNClasses() ); // one output node for each class
195  else
196  layout->push_back(1); // one output node (for signal/background classification)
197 
198  int n = 0;
199  for( std::vector<Int_t>::iterator it = layout->begin(); it != layout->end(); it++ ){
200  n++;
201  }
202 
203  return layout;
204 }
205 
206 ////////////////////////////////////////////////////////////////////////////////
207 /// initialize ANNBase object
208 
210 {
211  fNetwork = NULL;
212  frgen = NULL;
213  fActivation = NULL;
214  fOutput = NULL; //zjh
215  fIdentity = NULL;
217  fSynapses = NULL;
220 
221  // reset monitorign histogram vectors
222  fEpochMonHistS.clear();
223  fEpochMonHistB.clear();
224  fEpochMonHistW.clear();
225 
226  // these will be set in BuildNetwork()
227  fInputLayer = NULL;
228  fOutputNeurons.clear();
229 
230  frgen = new TRandom3(fRandomSeed);
231 
232  fSynapses = new TObjArray();
233 }
234 
235 ////////////////////////////////////////////////////////////////////////////////
236 /// destructor
237 
239 {
240  DeleteNetwork();
241 }
242 
243 ////////////////////////////////////////////////////////////////////////////////
244 /// delete/clear network
245 
247 {
248  if (fNetwork != NULL) {
249  TObjArray *layer;
250  Int_t numLayers = fNetwork->GetEntriesFast();
251  for (Int_t i = 0; i < numLayers; i++) {
252  layer = (TObjArray*)fNetwork->At(i);
253  DeleteNetworkLayer(layer);
254  }
255  delete fNetwork;
256  }
257 
258  if (frgen != NULL) delete frgen;
259  if (fActivation != NULL) delete fActivation;
260  if (fOutput != NULL) delete fOutput; //zjh
261  if (fIdentity != NULL) delete fIdentity;
262  if (fInputCalculator != NULL) delete fInputCalculator;
263  if (fSynapses != NULL) delete fSynapses;
264 
265  fNetwork = NULL;
266  frgen = NULL;
267  fActivation = NULL;
268  fOutput = NULL; //zjh
269  fIdentity = NULL;
271  fSynapses = NULL;
272 }
273 
274 ////////////////////////////////////////////////////////////////////////////////
275 /// delete a network layer
276 
278 {
279  TNeuron* neuron;
280  Int_t numNeurons = layer->GetEntriesFast();
281  for (Int_t i = 0; i < numNeurons; i++) {
282  neuron = (TNeuron*)layer->At(i);
283  neuron->DeletePreLinks();
284  delete neuron;
285  }
286  delete layer;
287 }
288 
289 ////////////////////////////////////////////////////////////////////////////////
290 /// build network given a layout (number of neurons in each layer)
291 /// and optional weights array
292 
293 void TMVA::MethodANNBase::BuildNetwork( std::vector<Int_t>* layout, std::vector<Double_t>* weights, Bool_t fromFile )
294 {
295  if (fEstimatorS == "MSE") fEstimator = kMSE; //zjh
296  else if (fEstimatorS == "CE") fEstimator = kCE; //zjh
297  else Log()<<kWARNING<<"fEstimator="<<fEstimator<<"\tfEstimatorS="<<fEstimatorS<<Endl;
298  if (fEstimator!=kMSE && fEstimator!=kCE) Log()<<kWARNING<<"Estimator type unspecified \t"<<Endl; //zjh
299 
300 
301  Log() << kHEADER << "Building Network. " << Endl;
302 
303  DeleteNetwork();
304  InitANNBase();
305 
306  // set activation and input functions
307  TActivationChooser aChooser;
309  fIdentity = aChooser.CreateActivation("linear");
310  if (fEstimator==kMSE) fOutput = aChooser.CreateActivation("linear"); //zjh
311  else if (fEstimator==kCE) fOutput = aChooser.CreateActivation("sigmoid"); //zjh
312  TNeuronInputChooser iChooser;
314 
315  fNetwork = new TObjArray();
316  fRegulatorIdx.clear();
317  fRegulators.clear();
318  BuildLayers( layout, fromFile );
319 
320  // cache input layer and output neuron for fast access
322  TObjArray* outputLayer = (TObjArray*)fNetwork->At(fNetwork->GetEntriesFast()-1);
323  fOutputNeurons.clear();
324  for (Int_t i = 0; i < outputLayer->GetEntries(); i++) {
325  fOutputNeurons.push_back( (TNeuron*)outputLayer->At(i) );
326  }
327 
328  if (weights == NULL) InitWeights();
329  else ForceWeights(weights);
330 }
331 
332 
333 
334 
335 ////////////////////////////////////////////////////////////////////////////////
336 /// build the network layers
337 
338 void TMVA::MethodANNBase::BuildLayers( std::vector<Int_t>* layout, Bool_t fromFile )
339 {
340  TObjArray* curLayer;
341  TObjArray* prevLayer = NULL;
342 
343  Int_t numLayers = layout->size();
344 
345  for (Int_t i = 0; i < numLayers; i++) {
346  curLayer = new TObjArray();
347  BuildLayer(layout->at(i), curLayer, prevLayer, i, numLayers, fromFile);
348  prevLayer = curLayer;
349  fNetwork->Add(curLayer);
350  }
351 
352  // cache pointers to synapses for fast access, the order matters
353  for (Int_t i = 0; i < numLayers; i++) {
354  TObjArray* layer = (TObjArray*)fNetwork->At(i);
355  Int_t numNeurons = layer->GetEntriesFast();
356  if (i!=0 && i!=numLayers-1) fRegulators.push_back(0.); //zjh
357  for (Int_t j = 0; j < numNeurons; j++) {
358  if (i==0) fRegulators.push_back(0.);//zjh
359  TNeuron* neuron = (TNeuron*)layer->At(j);
360  Int_t numSynapses = neuron->NumPostLinks();
361  for (Int_t k = 0; k < numSynapses; k++) {
362  TSynapse* synapse = neuron->PostLinkAt(k);
363  fSynapses->Add(synapse);
364  fRegulatorIdx.push_back(fRegulators.size()-1);//zjh
365  }
366  }
367  }
368 }
369 
370 ////////////////////////////////////////////////////////////////////////////////
371 /// build a single layer with neurons and synapses connecting this
372 /// layer to the previous layer
373 
374 void TMVA::MethodANNBase::BuildLayer( Int_t numNeurons, TObjArray* curLayer,
375  TObjArray* prevLayer, Int_t layerIndex,
376  Int_t numLayers, Bool_t fromFile )
377 {
378  TNeuron* neuron;
379  for (Int_t j = 0; j < numNeurons; j++) {
380  if (fromFile && (layerIndex != numLayers-1) && (j==numNeurons-1)){
381  neuron = new TNeuron();
382  neuron->SetActivationEqn(fIdentity);
383  neuron->SetBiasNeuron();
384  neuron->ForceValue(1.0);
385  curLayer->Add(neuron);
386  }
387  else {
388  neuron = new TNeuron();
390 
391  // input layer
392  if (layerIndex == 0) {
393  neuron->SetActivationEqn(fIdentity);
394  neuron->SetInputNeuron();
395  }
396  else {
397  // output layer
398  if (layerIndex == numLayers-1) {
399  neuron->SetOutputNeuron();
400  neuron->SetActivationEqn(fOutput); //zjh
401  }
402  // hidden layers
403  else neuron->SetActivationEqn(fActivation);
404  AddPreLinks(neuron, prevLayer);
405  }
406 
407  curLayer->Add(neuron);
408  }
409  }
410 
411  // add bias neutron (except to output layer)
412  if(!fromFile){
413  if (layerIndex != numLayers-1) {
414  neuron = new TNeuron();
415  neuron->SetActivationEqn(fIdentity);
416  neuron->SetBiasNeuron();
417  neuron->ForceValue(1.0);
418  curLayer->Add(neuron);
419  }
420  }
421 }
422 
423 ////////////////////////////////////////////////////////////////////////////////
424 /// add synapses connecting a neuron to its preceding layer
425 
427 {
428  TSynapse* synapse;
429  int numNeurons = prevLayer->GetEntriesFast();
430  TNeuron* preNeuron;
431 
432  for (Int_t i = 0; i < numNeurons; i++) {
433  preNeuron = (TNeuron*)prevLayer->At(i);
434  synapse = new TSynapse();
435  synapse->SetPreNeuron(preNeuron);
436  synapse->SetPostNeuron(neuron);
437  preNeuron->AddPostLink(synapse);
438  neuron->AddPreLink(synapse);
439  }
440 }
441 
442 ////////////////////////////////////////////////////////////////////////////////
443 /// initialize the synapse weights randomly
444 
446 {
447  PrintMessage("Initializing weights");
448 
449  // init synapse weights
450  Int_t numSynapses = fSynapses->GetEntriesFast();
451  TSynapse* synapse;
452  for (Int_t i = 0; i < numSynapses; i++) {
453  synapse = (TSynapse*)fSynapses->At(i);
454  synapse->SetWeight(4.0*frgen->Rndm() - 2.0);
455  }
456 }
457 
458 ////////////////////////////////////////////////////////////////////////////////
459 /// force the synapse weights
460 
461 void TMVA::MethodANNBase::ForceWeights(std::vector<Double_t>* weights)
462 {
463  PrintMessage("Forcing weights");
464 
465  Int_t numSynapses = fSynapses->GetEntriesFast();
466  TSynapse* synapse;
467  for (Int_t i = 0; i < numSynapses; i++) {
468  synapse = (TSynapse*)fSynapses->At(i);
469  synapse->SetWeight(weights->at(i));
470  }
471 }
472 
473 ////////////////////////////////////////////////////////////////////////////////
474 /// force the input values of the input neurons
475 /// force the value for each input neuron
476 
478 {
479  Double_t x;
480  TNeuron* neuron;
481 
482  // const Event* ev = GetEvent();
483  for (UInt_t j = 0; j < GetNvar(); j++) {
484 
485  x = (j != (UInt_t)ignoreIndex)?ev->GetValue(j):0;
486 
487  neuron = GetInputNeuron(j);
488  neuron->ForceValue(x);
489  }
490 }
491 
492 ////////////////////////////////////////////////////////////////////////////////
493 /// calculate input values to each neuron
494 
496 {
497  TObjArray* curLayer;
498  TNeuron* neuron;
499  Int_t numLayers = fNetwork->GetEntriesFast();
500  Int_t numNeurons;
501 
502  for (Int_t i = 0; i < numLayers; i++) {
503  curLayer = (TObjArray*)fNetwork->At(i);
504  numNeurons = curLayer->GetEntriesFast();
505 
506  for (Int_t j = 0; j < numNeurons; j++) {
507  neuron = (TNeuron*) curLayer->At(j);
508  neuron->CalculateValue();
509  neuron->CalculateActivationValue();
510 
511  }
512  }
513 }
514 
515 ////////////////////////////////////////////////////////////////////////////////
516 /// print messages, turn off printing by setting verbose and debug flag appropriately
517 
519 {
520  if (Verbose() || Debug() || force) Log() << kINFO << message << Endl;
521 }
522 
523 ////////////////////////////////////////////////////////////////////////////////
524 /// wait for keyboard input, for debugging
525 
527 {
528  std::string dummy;
529  Log() << kINFO << "***Type anything to continue (q to quit): ";
530  std::getline(std::cin, dummy);
531  if (dummy == "q" || dummy == "Q") {
532  PrintMessage( "quit" );
533  delete this;
534  exit(0);
535  }
536 }
537 
538 ////////////////////////////////////////////////////////////////////////////////
539 /// print network representation, for debugging
540 
542 {
543  if (!Debug()) return;
544 
545  Log() << kINFO << Endl;
546  PrintMessage( "Printing network " );
547  Log() << kINFO << "-------------------------------------------------------------------" << Endl;
548 
549  TObjArray* curLayer;
550  Int_t numLayers = fNetwork->GetEntriesFast();
551 
552  for (Int_t i = 0; i < numLayers; i++) {
553 
554  curLayer = (TObjArray*)fNetwork->At(i);
555  Int_t numNeurons = curLayer->GetEntriesFast();
556 
557  Log() << kINFO << "Layer #" << i << " (" << numNeurons << " neurons):" << Endl;
558  PrintLayer( curLayer );
559  }
560 }
561 
562 ////////////////////////////////////////////////////////////////////////////////
563 /// print a single layer, for debugging
564 
566 {
567  Int_t numNeurons = layer->GetEntriesFast();
568  TNeuron* neuron;
569 
570  for (Int_t j = 0; j < numNeurons; j++) {
571  neuron = (TNeuron*) layer->At(j);
572  Log() << kINFO << "\tNeuron #" << j << " (LinksIn: " << neuron->NumPreLinks()
573  << " , LinksOut: " << neuron->NumPostLinks() << ")" << Endl;
574  PrintNeuron( neuron );
575  }
576 }
577 
578 ////////////////////////////////////////////////////////////////////////////////
579 /// print a neuron, for debugging
580 
582 {
583  Log() << kINFO
584  << "\t\tValue:\t" << neuron->GetValue()
585  << "\t\tActivation: " << neuron->GetActivationValue()
586  << "\t\tDelta: " << neuron->GetDelta() << Endl;
587  Log() << kINFO << "\t\tActivationEquation:\t";
588  neuron->PrintActivationEqn();
589  Log() << kINFO << "\t\tLinksIn:" << Endl;
590  neuron->PrintPreLinks();
591  Log() << kINFO << "\t\tLinksOut:" << Endl;
592  neuron->PrintPostLinks();
593 }
594 
595 ////////////////////////////////////////////////////////////////////////////////
596 /// get the mva value generated by the NN
597 
599 {
600  TNeuron* neuron;
601 
602  TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
603 
604  const Event * ev = GetEvent();
605 
606  for (UInt_t i = 0; i < GetNvar(); i++) {
607  neuron = (TNeuron*)inputLayer->At(i);
608  neuron->ForceValue( ev->GetValue(i) );
609  }
611 
612  // check the output of the network
613  TObjArray* outputLayer = (TObjArray*)fNetwork->At( fNetwork->GetEntriesFast()-1 );
614  neuron = (TNeuron*)outputLayer->At(0);
615 
616  // cannot determine error
617  NoErrorCalc(err, errUpper);
618 
619  return neuron->GetActivationValue();
620 }
621 
622 ////////////////////////////////////////////////////////////////////////////////
623 /// get the regression value generated by the NN
624 
625 const std::vector<Float_t> &TMVA::MethodANNBase::GetRegressionValues()
626 {
627  TNeuron* neuron;
628 
629  TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
630 
631  const Event * ev = GetEvent();
632 
633  for (UInt_t i = 0; i < GetNvar(); i++) {
634  neuron = (TNeuron*)inputLayer->At(i);
635  neuron->ForceValue( ev->GetValue(i) );
636  }
638 
639  // check the output of the network
640  TObjArray* outputLayer = (TObjArray*)fNetwork->At( fNetwork->GetEntriesFast()-1 );
641 
642  if (fRegressionReturnVal == NULL) fRegressionReturnVal = new std::vector<Float_t>();
643  fRegressionReturnVal->clear();
644 
645  Event * evT = new Event(*ev);
646  UInt_t ntgts = outputLayer->GetEntriesFast();
647  for (UInt_t itgt = 0; itgt < ntgts; itgt++) {
648  evT->SetTarget(itgt,((TNeuron*)outputLayer->At(itgt))->GetActivationValue());
649  }
650 
651  const Event* evT2 = GetTransformationHandler().InverseTransform( evT );
652  for (UInt_t itgt = 0; itgt < ntgts; itgt++) {
653  fRegressionReturnVal->push_back( evT2->GetTarget(itgt) );
654  }
655 
656  delete evT;
657 
658  return *fRegressionReturnVal;
659 }
660 
661 
662 
663 
664 
665 
666 
667 
668 
669 ////////////////////////////////////////////////////////////////////////////////
670 /// get the multiclass classification values generated by the NN
671 
672 const std::vector<Float_t> &TMVA::MethodANNBase::GetMulticlassValues()
673 {
674  TNeuron* neuron;
675 
676  TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
677 
678  const Event * ev = GetEvent();
679 
680  for (UInt_t i = 0; i < GetNvar(); i++) {
681  neuron = (TNeuron*)inputLayer->At(i);
682  neuron->ForceValue( ev->GetValue(i) );
683  }
685 
686  // check the output of the network
687 
688  if (fMulticlassReturnVal == NULL) fMulticlassReturnVal = new std::vector<Float_t>();
689  fMulticlassReturnVal->clear();
690  std::vector<Float_t> temp;
691 
692  UInt_t nClasses = DataInfo().GetNClasses();
693  for (UInt_t icls = 0; icls < nClasses; icls++) {
694  temp.push_back(GetOutputNeuron( icls )->GetActivationValue() );
695  }
696 
697  for(UInt_t iClass=0; iClass<nClasses; iClass++){
698  Double_t norm = 0.0;
699  for(UInt_t j=0;j<nClasses;j++){
700  if(iClass!=j)
701  norm+=exp(temp[j]-temp[iClass]);
702  }
703  (*fMulticlassReturnVal).push_back(1.0/(1.0+norm));
704  }
705 
706 
707 
708  return *fMulticlassReturnVal;
709 }
710 
711 
712 ////////////////////////////////////////////////////////////////////////////////
713 /// create XML description of ANN classifier
714 
715 void TMVA::MethodANNBase::AddWeightsXMLTo( void* parent ) const
716 {
717  Int_t numLayers = fNetwork->GetEntriesFast();
718  void* wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
719  void* xmlLayout = gTools().xmlengine().NewChild(wght, 0, "Layout");
720  gTools().xmlengine().NewAttr(xmlLayout, 0, "NLayers", gTools().StringFromInt(fNetwork->GetEntriesFast()) );
721  TString weights = "";
722  for (Int_t i = 0; i < numLayers; i++) {
723  TObjArray* layer = (TObjArray*)fNetwork->At(i);
724  Int_t numNeurons = layer->GetEntriesFast();
725  void* layerxml = gTools().xmlengine().NewChild(xmlLayout, 0, "Layer");
726  gTools().xmlengine().NewAttr(layerxml, 0, "Index", gTools().StringFromInt(i) );
727  gTools().xmlengine().NewAttr(layerxml, 0, "NNeurons", gTools().StringFromInt(numNeurons) );
728  for (Int_t j = 0; j < numNeurons; j++) {
729  TNeuron* neuron = (TNeuron*)layer->At(j);
730  Int_t numSynapses = neuron->NumPostLinks();
731  void* neuronxml = gTools().AddChild(layerxml, "Neuron");
732  gTools().AddAttr(neuronxml, "NSynapses", gTools().StringFromInt(numSynapses) );
733  if(numSynapses==0) continue;
734  std::stringstream s("");
735  s.precision( 16 );
736  for (Int_t k = 0; k < numSynapses; k++) {
737  TSynapse* synapse = neuron->PostLinkAt(k);
738  s << std::scientific << synapse->GetWeight() << " ";
739  }
740  gTools().AddRawLine( neuronxml, s.str().c_str() );
741  }
742  }
743 
744  // if inverse hessian exists, write inverse hessian to weight file
745  if( fInvHessian.GetNcols()>0 ){
746  void* xmlInvHessian = gTools().xmlengine().NewChild(wght, 0, "InverseHessian");
747 
748  // get the matrix dimensions
749  Int_t nElements = fInvHessian.GetNoElements();
750  Int_t nRows = fInvHessian.GetNrows();
751  Int_t nCols = fInvHessian.GetNcols();
752  gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NElements", gTools().StringFromInt(nElements) );
753  gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NRows", gTools().StringFromInt(nRows) );
754  gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NCols", gTools().StringFromInt(nCols) );
755 
756  // read in the matrix elements
757  Double_t* elements = new Double_t[nElements+10];
758  fInvHessian.GetMatrix2Array( elements );
759 
760  // store the matrix elements row-wise
761  Int_t index = 0;
762  for( Int_t row = 0; row < nRows; ++row ){
763  void* xmlRow = gTools().xmlengine().NewChild(xmlInvHessian, 0, "Row");
764  gTools().xmlengine().NewAttr(xmlRow, 0, "Index", gTools().StringFromInt(row) );
765 
766  // create the rows
767  std::stringstream s("");
768  s.precision( 16 );
769  for( Int_t col = 0; col < nCols; ++col ){
770  s << std::scientific << (*(elements+index)) << " ";
771  ++index;
772  }
773  gTools().xmlengine().AddRawLine( xmlRow, s.str().c_str() );
774  }
775  delete[] elements;
776  }
777 }
778 
779 
780 ////////////////////////////////////////////////////////////////////////////////
781 /// read MLP from xml weight file
782 
784 {
785  // build the layout first
786  Bool_t fromFile = kTRUE;
787  std::vector<Int_t>* layout = new std::vector<Int_t>();
788 
789  void* xmlLayout = NULL;
790  xmlLayout = gTools().GetChild(wghtnode, "Layout");
791  if( !xmlLayout )
792  xmlLayout = wghtnode;
793 
794  UInt_t nLayers;
795  gTools().ReadAttr( xmlLayout, "NLayers", nLayers );
796  layout->resize( nLayers );
797 
798  void* ch = gTools().xmlengine().GetChild(xmlLayout);
799  UInt_t index;
800  UInt_t nNeurons;
801  while (ch) {
802  gTools().ReadAttr( ch, "Index", index );
803  gTools().ReadAttr( ch, "NNeurons", nNeurons );
804  layout->at(index) = nNeurons;
805  ch = gTools().GetNextChild(ch);
806  }
807 
808  BuildNetwork( layout, NULL, fromFile );
809  // use 'slow' (exact) TanH if processing old weighfile to ensure 100% compatible results
810  // otherwise use the new default, the 'tast tanh' approximation
812  TActivationTanh* act = dynamic_cast<TActivationTanh*>( fActivation );
813  if (act) act->SetSlow();
814  }
815 
816  // fill the weights of the synapses
817  UInt_t nSyn;
818  Float_t weight;
819  ch = gTools().xmlengine().GetChild(xmlLayout);
820  UInt_t iLayer = 0;
821  while (ch) { // layers
822  TObjArray* layer = (TObjArray*)fNetwork->At(iLayer);
823  gTools().ReadAttr( ch, "Index", index );
824  gTools().ReadAttr( ch, "NNeurons", nNeurons );
825 
826  void* nodeN = gTools().GetChild(ch);
827  UInt_t iNeuron = 0;
828  while( nodeN ){ // neurons
829  TNeuron *neuron = (TNeuron*)layer->At(iNeuron);
830  gTools().ReadAttr( nodeN, "NSynapses", nSyn );
831  if( nSyn > 0 ){
832  const char* content = gTools().GetContent(nodeN);
833  std::stringstream s(content);
834  for (UInt_t iSyn = 0; iSyn<nSyn; iSyn++) { // synapses
835 
836  TSynapse* synapse = neuron->PostLinkAt(iSyn);
837  s >> weight;
838  //Log() << kWARNING << neuron << " " << weight << Endl;
839  synapse->SetWeight(weight);
840  }
841  }
842  nodeN = gTools().GetNextChild(nodeN);
843  iNeuron++;
844  }
845  ch = gTools().GetNextChild(ch);
846  iLayer++;
847  }
848 
849  delete layout;
850 
851  void* xmlInvHessian = NULL;
852  xmlInvHessian = gTools().GetChild(wghtnode, "InverseHessian");
853  if( !xmlInvHessian )
854  // no inverse hessian available
855  return;
856 
858 
859  Int_t nElements = 0;
860  Int_t nRows = 0;
861  Int_t nCols = 0;
862  gTools().ReadAttr( xmlInvHessian, "NElements", nElements );
863  gTools().ReadAttr( xmlInvHessian, "NRows", nRows );
864  gTools().ReadAttr( xmlInvHessian, "NCols", nCols );
865 
866  // adjust the matrix dimensions
867  fInvHessian.ResizeTo( nRows, nCols );
868 
869  // prepare an array to read in the values
870  Double_t* elements;
871  if (nElements > std::numeric_limits<int>::max()-100){
872  Log() << kFATAL << "you tried to read a hessian matrix with " << nElements << " elements, --> too large, guess s.th. went wrong reading from the weight file" << Endl;
873  return;
874  } else {
875  elements = new Double_t[nElements+10];
876  }
877 
878 
879 
880  void* xmlRow = gTools().xmlengine().GetChild(xmlInvHessian);
881  Int_t row = 0;
882  index = 0;
883  while (xmlRow) { // rows
884  gTools().ReadAttr( xmlRow, "Index", row );
885 
886  const char* content = gTools().xmlengine().GetNodeContent(xmlRow);
887 
888  std::stringstream s(content);
889  for (Int_t iCol = 0; iCol<nCols; iCol++) { // columns
890  s >> (*(elements+index));
891  ++index;
892  }
893  xmlRow = gTools().xmlengine().GetNext(xmlRow);
894  ++row;
895  }
896 
897  fInvHessian.SetMatrixArray( elements );
898 
899  delete[] elements;
900 }
901 
902 
903 ////////////////////////////////////////////////////////////////////////////////
904 /// destroy/clear the network then read it back in from the weights file
905 
907 {
908  // delete network so we can reconstruct network from scratch
909 
910  TString dummy;
911 
912  // synapse weights
913  Double_t weight;
914  std::vector<Double_t>* weights = new std::vector<Double_t>();
915  istr>> dummy;
916  while (istr>> dummy >> weight) weights->push_back(weight); // use w/ slower write-out
917 
918  ForceWeights(weights);
919 
920 
921  delete weights;
922 }
923 
924 ////////////////////////////////////////////////////////////////////////////////
925 /// compute ranking of input variables by summing function of weights
926 
928 {
929  // create the ranking object
930  fRanking = new Ranking( GetName(), "Importance" );
931 
932  TNeuron* neuron;
933  TSynapse* synapse;
934  Double_t importance, avgVal;
935  TString varName;
936 
937  for (UInt_t ivar = 0; ivar < GetNvar(); ivar++) {
938 
939  neuron = GetInputNeuron(ivar);
940  Int_t numSynapses = neuron->NumPostLinks();
941  importance = 0;
942  varName = GetInputVar(ivar); // fix this line
943 
944  // figure out average value of variable i
945  Double_t meanS, meanB, rmsS, rmsB, xmin, xmax;
947  meanS, meanB, rmsS, rmsB, xmin, xmax );
948 
949  avgVal = (TMath::Abs(meanS) + TMath::Abs(meanB))/2.0;
950  double meanrms = (TMath::Abs(rmsS) + TMath::Abs(rmsB))/2.;
951  if (avgVal<meanrms) avgVal = meanrms;
952  if (IsNormalised()) avgVal = 0.5*(1 + gTools().NormVariable( avgVal, GetXmin( ivar ), GetXmax( ivar )));
953 
954  for (Int_t j = 0; j < numSynapses; j++) {
955  synapse = neuron->PostLinkAt(j);
956  importance += synapse->GetWeight() * synapse->GetWeight();
957  }
958 
959  importance *= avgVal * avgVal;
960 
961  fRanking->AddRank( Rank( varName, importance ) );
962  }
963 
964  return fRanking;
965 }
966 
967 ////////////////////////////////////////////////////////////////////////////////
968 
970  std::vector<TH1*>* hv ) const
971 {
972  TH2F* hist;
973  Int_t numLayers = fNetwork->GetEntriesFast();
974 
975  for (Int_t i = 0; i < numLayers-1; i++) {
976 
977  TObjArray* layer1 = (TObjArray*)fNetwork->At(i);
978  TObjArray* layer2 = (TObjArray*)fNetwork->At(i+1);
979  Int_t numNeurons1 = layer1->GetEntriesFast();
980  Int_t numNeurons2 = layer2->GetEntriesFast();
981 
982  TString name = Form("%s%i%i", bulkname.Data(), i, i+1);
983  hist = new TH2F(name + "", name + "",
984  numNeurons1, 0, numNeurons1, numNeurons2, 0, numNeurons2);
985 
986  for (Int_t j = 0; j < numNeurons1; j++) {
987 
988  TNeuron* neuron = (TNeuron*)layer1->At(j);
989  Int_t numSynapses = neuron->NumPostLinks();
990 
991  for (Int_t k = 0; k < numSynapses; k++) {
992 
993  TSynapse* synapse = neuron->PostLinkAt(k);
994  hist->SetBinContent(j+1, k+1, synapse->GetWeight());
995 
996  }
997  }
998 
999  if (hv) hv->push_back( hist );
1000  else {
1001  hist->Write();
1002  delete hist;
1003  }
1004  }
1005 }
1006 
1007 ////////////////////////////////////////////////////////////////////////////////
1008 /// write histograms to file
1009 
1011 {
1012  PrintMessage(Form("Write special histos to file: %s", BaseDir()->GetPath()), kTRUE);
1013 
1016 
1017  // histograms containing weights for architecture plotting (used in macro "network.cxx")
1018  CreateWeightMonitoringHists( "weights_hist" );
1019 
1020  // now save all the epoch-wise monitoring information
1021 #if __cplusplus > 199711L
1022  static std::atomic<int> epochMonitoringDirectoryNumber{0};
1023 #else
1024  static int epochMonitoringDirectoryNumber = 0;
1025 #endif
1026  int epochVal = epochMonitoringDirectoryNumber++;
1027  TDirectory* epochdir = NULL;
1028  if( epochVal == 0 )
1029  epochdir = BaseDir()->mkdir( "EpochMonitoring" );
1030  else
1031  epochdir = BaseDir()->mkdir( Form("EpochMonitoring_%4d",epochVal) );
1032 
1033  epochdir->cd();
1034  for (std::vector<TH1*>::const_iterator it = fEpochMonHistS.begin(); it != fEpochMonHistS.end(); it++) {
1035  (*it)->Write();
1036  delete (*it);
1037  }
1038  for (std::vector<TH1*>::const_iterator it = fEpochMonHistB.begin(); it != fEpochMonHistB.end(); it++) {
1039  (*it)->Write();
1040  delete (*it);
1041  }
1042  for (std::vector<TH1*>::const_iterator it = fEpochMonHistW.begin(); it != fEpochMonHistW.end(); it++) {
1043  (*it)->Write();
1044  delete (*it);
1045  }
1046  BaseDir()->cd();
1047 }
1048 
1049 ////////////////////////////////////////////////////////////////////////////////
1050 /// write specific classifier response
1051 
1052 void TMVA::MethodANNBase::MakeClassSpecific( std::ostream& fout, const TString& className ) const
1053 {
1054  Int_t numLayers = fNetwork->GetEntries();
1055 
1056  fout << std::endl;
1057  fout << " double ActivationFnc(double x) const;" << std::endl;
1058  fout << " double OutputActivationFnc(double x) const;" << std::endl; //zjh
1059  fout << std::endl;
1060  fout << " int fLayers;" << std::endl;
1061  fout << " int fLayerSize["<<numLayers<<"];" << std::endl;
1062  int numNodesFrom = -1;
1063  for (Int_t lIdx = 0; lIdx < numLayers; lIdx++) {
1064  int numNodesTo = ((TObjArray*)fNetwork->At(lIdx))->GetEntries();
1065  if (numNodesFrom<0) { numNodesFrom=numNodesTo; continue; }
1066  fout << " double fWeightMatrix" << lIdx-1 << "to" << lIdx << "[" << numNodesTo << "][" << numNodesFrom << "];";
1067  fout << " // weight matrix from layer " << lIdx-1 << " to " << lIdx << std::endl;
1068  numNodesFrom = numNodesTo;
1069  }
1070  fout << std::endl;
1071  fout << " double * fWeights["<<numLayers<<"];" << std::endl;
1072  fout << "};" << std::endl;
1073 
1074  fout << std::endl;
1075 
1076  fout << "inline void " << className << "::Initialize()" << std::endl;
1077  fout << "{" << std::endl;
1078  fout << " // build network structure" << std::endl;
1079  fout << " fLayers = " << numLayers << ";" << std::endl;
1080  for (Int_t lIdx = 0; lIdx < numLayers; lIdx++) {
1081  TObjArray* layer = (TObjArray*)fNetwork->At(lIdx);
1082  int numNodes = layer->GetEntries();
1083  fout << " fLayerSize[" << lIdx << "] = " << numNodes << "; fWeights["<<lIdx<<"] = new double["<<numNodes<<"]; " << std::endl;
1084  }
1085 
1086  for (Int_t i = 0; i < numLayers-1; i++) {
1087  fout << " // weight matrix from layer " << i << " to " << i+1 << std::endl;
1088  TObjArray* layer = (TObjArray*)fNetwork->At(i);
1089  Int_t numNeurons = layer->GetEntriesFast();
1090  for (Int_t j = 0; j < numNeurons; j++) {
1091  TNeuron* neuron = (TNeuron*)layer->At(j);
1092  Int_t numSynapses = neuron->NumPostLinks();
1093  for (Int_t k = 0; k < numSynapses; k++) {
1094  TSynapse* synapse = neuron->PostLinkAt(k);
1095  fout << " fWeightMatrix" << i << "to" << i+1 << "[" << k << "][" << j << "] = " << synapse->GetWeight() << ";" << std::endl;
1096  }
1097  }
1098  }
1099 
1100  fout << "}" << std::endl;
1101  fout << std::endl;
1102 
1103  // writing of the GetMvaValue__ method
1104  fout << "inline double " << className << "::GetMvaValue__( const std::vector<double>& inputValues ) const" << std::endl;
1105  fout << "{" << std::endl;
1106  fout << " if (inputValues.size() != (unsigned int)fLayerSize[0]-1) {" << std::endl;
1107  fout << " std::cout << \"Input vector needs to be of size \" << fLayerSize[0]-1 << std::endl;" << std::endl;
1108  fout << " return 0;" << std::endl;
1109  fout << " }" << std::endl;
1110  fout << std::endl;
1111  fout << " for (int l=0; l<fLayers; l++)" << std::endl;
1112  fout << " for (int i=0; i<fLayerSize[l]; i++) fWeights[l][i]=0;" << std::endl;
1113  fout << std::endl;
1114  fout << " for (int l=0; l<fLayers-1; l++)" << std::endl;
1115  fout << " fWeights[l][fLayerSize[l]-1]=1;" << std::endl;
1116  fout << std::endl;
1117  fout << " for (int i=0; i<fLayerSize[0]-1; i++)" << std::endl;
1118  fout << " fWeights[0][i]=inputValues[i];" << std::endl;
1119  fout << std::endl;
1120  for (Int_t i = 0; i < numLayers-1; i++) {
1121  fout << " // layer " << i << " to " << i+1 << std::endl;
1122  if (i+1 == numLayers-1) {
1123  fout << " for (int o=0; o<fLayerSize[" << i+1 << "]; o++) {" << std::endl;
1124  }
1125  else {
1126  fout << " for (int o=0; o<fLayerSize[" << i+1 << "]-1; o++) {" << std::endl;
1127  }
1128  fout << " for (int i=0; i<fLayerSize[" << i << "]; i++) {" << std::endl;
1129  fout << " double inputVal = fWeightMatrix" << i << "to" << i+1 << "[o][i] * fWeights[" << i << "][i];" << std::endl;
1130 
1131  if ( fNeuronInputType == "sum") {
1132  fout << " fWeights[" << i+1 << "][o] += inputVal;" << std::endl;
1133  }
1134  else if ( fNeuronInputType == "sqsum") {
1135  fout << " fWeights[" << i+1 << "][o] += inputVal*inputVal;" << std::endl;
1136  }
1137  else { // fNeuronInputType == TNeuronInputChooser::kAbsSum
1138  fout << " fWeights[" << i+1 << "][o] += fabs(inputVal);" << std::endl;
1139  }
1140  fout << " }" << std::endl;
1141  if (i+1 != numLayers-1) // in the last layer no activation function is applied
1142  fout << " fWeights[" << i+1 << "][o] = ActivationFnc(fWeights[" << i+1 << "][o]);" << std::endl;
1143  else fout << " fWeights[" << i+1 << "][o] = OutputActivationFnc(fWeights[" << i+1 << "][o]);" << std::endl; //zjh
1144  fout << " }" << std::endl;
1145  }
1146  fout << std::endl;
1147  fout << " return fWeights[" << numLayers-1 << "][0];" << std::endl;
1148  fout << "}" << std::endl;
1149 
1150  fout << std::endl;
1151  TString fncName = className+"::ActivationFnc";
1152  fActivation->MakeFunction(fout, fncName);
1153  fncName = className+"::OutputActivationFnc"; //zjh
1154  fOutput->MakeFunction(fout, fncName);//zjh
1155 
1156  fout << " " << std::endl;
1157  fout << "// Clean up" << std::endl;
1158  fout << "inline void " << className << "::Clear() " << std::endl;
1159  fout << "{" << std::endl;
1160  fout << " // clean up the arrays" << std::endl;
1161  fout << " for (int lIdx = 0; lIdx < "<<numLayers<<"; lIdx++) {" << std::endl;
1162  fout << " delete[] fWeights[lIdx];" << std::endl;
1163  fout << " }" << std::endl;
1164  fout << "}" << std::endl;
1165 }
1166 
1167 ////////////////////////////////////////////////////////////////////////////////
1168 /// who the hell makes such strange Debug flags that even use "global pointers"..
1169 
1171 {
1172  return fgDEBUG;
1173 }
void WaitForKeyboard()
wait for keyboard input, for debugging
virtual void WriteMonitoringHistosToFile() const
write histograms to file
virtual Int_t Write(const char *name=0, Int_t option=0, Int_t bufsize=0)
Write this object to the current directory.
Definition: TObject.cxx:830
virtual Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
get the mva value generated by the NN
void BuildLayer(Int_t numNeurons, TObjArray *curLayer, TObjArray *prevLayer, Int_t layerIndex, Int_t numLayers, Bool_t from_file=false)
build a single layer with neurons and synapses connecting this layer to the previous layer ...
An array of TObjects.
Definition: TObjArray.h:39
TXMLEngine & xmlengine()
Definition: Tools.h:278
float xmin
Definition: THbookFile.cxx:93
Random number generator class based on M.
Definition: TRandom3.h:29
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:162
void ForceNetworkCalculations()
calculate input values to each neuron
virtual TString GetExpression()=0
void CreateWeightMonitoringHists(const TString &bulkname, std::vector< TH1 *> *hv=0) const
virtual TMatrixTBase< Element > & SetMatrixArray(const Element *data, Option_t *option="")
Copy array data to matrix .
void ForceValue(Double_t value)
force the value, typically for input and bias neurons
Definition: TNeuron.cxx:89
void DeleteNetwork()
delete/clear network
virtual Double_t Rndm()
Machine independent random number generator.
Definition: TRandom3.cxx:94
MethodANNBase(const TString &jobName, Types::EMVA methodType, const TString &methodTitle, DataSetInfo &theData, const TString &theOption)
standard constructor Note: Right now it is an option to choose the neuron input function, but only the input function "sum" leads to weight convergence – otherwise the weights go to nan and lead to an ABORT.
TActivation * fActivation
float Float_t
Definition: RtypesCore.h:53
void AddPreLinks(TNeuron *neuron, TObjArray *prevLayer)
add synapses connecting a neuron to its preceding layer
UInt_t GetNvar() const
Definition: MethodBase.h:340
const Ranking * CreateRanking()
compute ranking of input variables by summing function of weights
Int_t GetNcols() const
Definition: TMatrixTBase.h:137
void SetPostNeuron(TNeuron *post)
Definition: TSynapse.h:74
MsgLogger & Log() const
Definition: Configurable.h:128
OptionBase * DeclareOptionRef(T &ref, const TString &name, const TString &desc="")
virtual void ReadWeightsFromStream(std::istream &istr)
destroy/clear the network then read it back in from the weights file
virtual void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
void SetInputNeuron()
Definition: TNeuron.h:124
TNeuronInput * fInputCalculator
TObjArray * fInputLayer
XMLNodePointer_t GetNext(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
return next to xmlnode node if realnode==kTRUE, any special nodes in between will be skipped ...
Basic string class.
Definition: TString.h:137
TransformationHandler & GetTransformationHandler(Bool_t takeReroutedIfAvailable=true)
Definition: MethodBase.h:390
int Int_t
Definition: RtypesCore.h:41
virtual TDirectory * mkdir(const char *name, const char *title="")
Create a sub-directory and return a pointer to the created directory.
Definition: TDirectory.cxx:957
bool Bool_t
Definition: RtypesCore.h:59
const Bool_t kFALSE
Definition: Rtypes.h:92
UInt_t GetNClasses() const
Definition: DataSetInfo.h:154
UInt_t GetNTargets() const
Definition: MethodBase.h:342
Double_t GetActivationValue() const
Definition: TNeuron.h:117
void AddWeightsXMLTo(void *parent) const
create XML description of ANN classifier
virtual void DeclareOptions()
define the options (their key words) that can be set in the option string here the options valid for ...
TObject * At(Int_t idx) const
Definition: TObjArray.h:167
void SetActivationEqn(TActivation *activation)
set activation equation
Definition: TNeuron.cxx:167
virtual TMatrixTBase< Element > & ResizeTo(Int_t nrows, Int_t ncols, Int_t=-1)
Set size of the matrix to nrows x ncols New dynamic elements are created, the overlapping part of the...
Definition: TMatrixT.cxx:1210
void AddAttr(void *node, const char *, const T &value, Int_t precision=16)
Definition: Tools.h:309
void * AddChild(void *parent, const char *childname, const char *content=0, bool isRootNode=false)
add child node
Definition: Tools.cxx:1134
static const Bool_t fgDEBUG
Short_t Abs(Short_t d)
Definition: TMathBase.h:110
const char * GetNodeContent(XMLNodePointer_t xmlnode)
get contents (if any) of xml node
Definition: TXMLEngine.cxx:938
Int_t GetNoElements() const
Definition: TMatrixTBase.h:138
void ForceWeights(std::vector< Double_t > *weights)
force the synapse weights
Bool_t Debug() const
who the hell makes such strange Debug flags that even use "global pointers"..
TActivation * fOutput
const TString & GetInputVar(Int_t i) const
Definition: MethodBase.h:345
Tools & gTools()
Definition: Tools.cxx:79
Double_t x[n]
Definition: legend1.C:17
std::vector< TH1 * > fEpochMonHistB
TSynapse * PostLinkAt(Int_t index) const
Definition: TNeuron.h:123
UInt_t GetTrainingTMVAVersionCode() const
Definition: MethodBase.h:385
void PrintMessage(TString message, Bool_t force=kFALSE) const
print messages, turn off printing by setting verbose and debug flag appropriately ...
const Event * GetEvent() const
Definition: MethodBase.h:745
TObjArray * fNetwork
void * GetChild(void *parent, const char *childname=0)
get child node
Definition: Tools.cxx:1158
Double_t GetXmin(Int_t ivar) const
Definition: MethodBase.h:352
DataSetInfo & DataInfo() const
Definition: MethodBase.h:406
Bool_t DoRegression() const
Definition: MethodBase.h:434
virtual void ProcessOptions()
do nothing specific at this moment
Ssiz_t First(char c) const
Find first occurrence of a character c.
Definition: TString.cxx:467
Int_t NumPreLinks() const
Definition: TNeuron.h:120
TActivation * fIdentity
virtual void BuildNetwork(std::vector< Int_t > *layout, std::vector< Double_t > *weights=NULL, Bool_t fromFile=kFALSE)
build network given a layout (number of neurons in each layer) and optional weights array ...
Bool_t AddRawLine(void *node, const char *raw)
XML helpers.
Definition: Tools.cxx:1198
TActivation * CreateActivation(EActivationType type) const
Bool_t Verbose() const
Definition: MethodBase.h:497
void ReadWeightsFromXML(void *wghtnode)
read MLP from xml weight file
Double_t GetXmax(Int_t ivar) const
Definition: MethodBase.h:353
TObjArray * fSynapses
Bool_t DoMulticlass() const
Definition: MethodBase.h:435
Int_t NumPostLinks() const
Definition: TNeuron.h:121
virtual void PrintNetwork() const
print network representation, for debugging
Float_t GetTarget(UInt_t itgt) const
Definition: Event.h:104
virtual void MakeFunction(std::ostream &fout, const TString &fncName)=0
std::vector< TString > * GetAllNeuronInputNames() const
Bool_t AddRawLine(XMLNodePointer_t parent, const char *line)
Add just line into xml file Line should has correct xml syntax that later it can be decoded by xml pa...
Definition: TXMLEngine.cxx:769
const char * GetName() const
Definition: MethodBase.h:330
tomato 2-D histogram with a float per channel (see TH1 documentation)}
Definition: TH2.h:255
void Statistics(Types::ETreeType treeType, const TString &theVarName, Double_t &, Double_t &, Double_t &, Double_t &, Double_t &, Double_t &)
calculates rms,mean, xmin, xmax of the event variable this can be either done for the variables as th...
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
Definition: TString.h:558
TNeuronInput * CreateNeuronInput(ENeuronInputType type) const
Double_t GetDelta() const
Definition: TNeuron.h:118
unsigned int UInt_t
Definition: RtypesCore.h:42
Int_t GetEntriesFast() const
Definition: TObjArray.h:66
void PrintActivationEqn()
print activation equation, for debugging
Definition: TNeuron.cxx:334
char * Form(const char *fmt,...)
virtual void GetMatrix2Array(Element *data, Option_t *option="") const
Copy matrix data to array .
const Event * InverseTransform(const Event *, Bool_t suppressIfNoTargets=true) const
Ssiz_t Length() const
Definition: TString.h:390
std::vector< TH1 * > fEpochMonHistW
void PrintPostLinks() const
Definition: TNeuron.h:131
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
Definition: Event.cxx:356
const char * GetContent(void *node)
XML helpers.
Definition: Tools.cxx:1182
void SetBiasNeuron()
Definition: TNeuron.h:126
void ReadAttr(void *node, const char *, T &value)
Definition: Tools.h:296
std::vector< Double_t > fRegulators
float xmax
Definition: THbookFile.cxx:93
TNeuron * GetInputNeuron(Int_t index)
std::vector< Int_t > fRegulatorIdx
void InitWeights()
initialize the synapse weights randomly
Double_t GetValue() const
Definition: TNeuron.h:116
Int_t GetNrows() const
Definition: TMatrixTBase.h:134
Float_t GetValue(UInt_t ivar) const
return value of i&#39;th variable
Definition: Event.cxx:233
Double_t GetWeight()
Definition: TSynapse.h:59
TString & Remove(Ssiz_t pos)
Definition: TString.h:616
#define TMVA_VERSION(a, b, c)
Definition: Version.h:48
std::vector< Int_t > * ParseLayoutString(TString layerSpec)
parse layout specification string and return a vector, each entry containing the number of neurons to...
XMLAttrPointer_t NewAttr(XMLNodePointer_t xmlnode, XMLNsPointer_t, const char *name, const char *value)
creates new attribute for xmlnode, namespaces are not supported for attributes
Definition: TXMLEngine.cxx:488
#define ClassImp(name)
Definition: Rtypes.h:279
double Double_t
Definition: RtypesCore.h:55
virtual const std::vector< Float_t > & GetMulticlassValues()
get the multiclass classification values generated by the NN
TNeuron * GetOutputNeuron(Int_t index=0)
Describe directory structure in memory.
Definition: TDirectory.h:44
std::vector< Float_t > * fMulticlassReturnVal
Definition: MethodBase.h:592
void PrintPreLinks() const
Definition: TNeuron.h:130
Bool_t IsNormalised() const
Definition: MethodBase.h:490
void ForceNetworkInputs(const Event *ev, Int_t ignoreIndex=-1)
force the input values of the input neurons force the value for each input neuron ...
void SetOutputNeuron()
Definition: TNeuron.h:125
static RooMathCoreReg dummy
void * GetNextChild(void *prevchild, const char *childname=0)
XML helpers.
Definition: Tools.cxx:1170
Bool_t Contains(const char *pat, ECaseCompare cmp=kExact) const
Definition: TString.h:567
void CalculateValue()
calculate neuron input
Definition: TNeuron.cxx:98
void AddPreDefVal(const T &)
Definition: Configurable.h:174
std::vector< TNeuron * > fOutputNeurons
virtual ~MethodANNBase()
destructor
void SetPreNeuron(TNeuron *pre)
Definition: TSynapse.h:71
void SetWeight(Double_t weight)
set synapse weight
Definition: TSynapse.cxx:74
void CalculateActivationValue()
calculate neuron activation/output
Definition: TNeuron.cxx:107
std::vector< TString > * GetAllActivationNames() const
Abstract ClassifierFactory template that handles arbitrary types.
Ranking * fRanking
Definition: MethodBase.h:581
void AddPostLink(TSynapse *post)
add synapse as a post-link to this neuron
Definition: TNeuron.cxx:185
virtual Bool_t cd(const char *path=0)
Change current directory to "this" directory.
Definition: TDirectory.cxx:435
XMLNodePointer_t GetChild(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
returns first child of xml node
Definition: TXMLEngine.cxx:993
TDirectory * BaseDir() const
returns the ROOT directory where info/histograms etc of the corresponding MVA method instance are sto...
virtual void AddRank(const Rank &rank)
Add a new rank take ownership of it.
Definition: Ranking.cxx:86
XMLNodePointer_t NewChild(XMLNodePointer_t parent, XMLNsPointer_t ns, const char *name, const char *content=0)
create new child element for parent node
Definition: TXMLEngine.cxx:614
virtual const std::vector< Float_t > & GetRegressionValues()
get the regression value generated by the NN
#define NULL
Definition: Rtypes.h:82
void PrintNeuron(TNeuron *neuron) const
print a neuron, for debugging
Int_t GetEntries() const
Return the number of objects in array (i.e.
Definition: TObjArray.cxx:494
std::vector< TH1 * > fEpochMonHistS
std::vector< Float_t > * fRegressionReturnVal
Definition: MethodBase.h:591
void Add(TObject *obj)
Definition: TObjArray.h:75
Double_t NormVariable(Double_t x, Double_t xmin, Double_t xmax)
normalise to output range: [-1, 1]
Definition: Tools.cxx:127
void AddPreLink(TSynapse *pre)
add synapse as a pre-link to this neuron
Definition: TNeuron.cxx:176
void DeletePreLinks()
delete all pre-links
Definition: TNeuron.cxx:194
void DeleteNetworkLayer(TObjArray *&layer)
delete a network layer
double exp(double)
const Bool_t kTRUE
Definition: Rtypes.h:91
THist< 2, float, THistStatContent, THistStatUncertainty > TH2F
Definition: THist.hxx:308
void BuildLayers(std::vector< Int_t > *layout, Bool_t from_file=false)
build the network layers
double norm(double *x, double *p)
Definition: unuranDistr.cxx:40
const Int_t n
Definition: legend1.C:16
void SetInputCalculator(TNeuronInput *calculator)
set input calculator
Definition: TNeuron.cxx:158
char name[80]
Definition: TGX11.cxx:109
void NoErrorCalc(Double_t *const err, Double_t *const errUpper)
Definition: MethodBase.cxx:819
void PrintLayer(TObjArray *layer) const
print a single layer, for debugging
void InitANNBase()
initialize ANNBase object
const char * Data() const
Definition: TString.h:349