Logo ROOT  
Reference Guide
MethodANNBase.cxx
Go to the documentation of this file.
1// @(#)root/tmva $Id$
2// Author: Andreas Hoecker, Peter Speckmayer, Matt Jachowski, Jan Therhaag, Jiahang Zhong
3
4/**********************************************************************************
5 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6 * Package: TMVA *
7 * Class : MethodANNBase *
8 * Web : http://tmva.sourceforge.net *
9 * *
10 * Description: *
11 * Artificial neural network base class for the discrimination of signal *
12 * from background. *
13 * *
14 * Authors (alphabetical): *
15 * Krzysztof Danielowski <danielow@cern.ch> - IFJ & AGH, Poland *
16 * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
17 * Matt Jachowski <jachowski@stanford.edu> - Stanford University, USA *
18 * Kamil Kraszewski <kalq@cern.ch> - IFJ & UJ, Poland *
19 * Maciej Kruk <mkruk@cern.ch> - IFJ & AGH, Poland *
20 * Peter Speckmayer <peter.speckmayer@cern.ch> - CERN, Switzerland *
21 * Joerg Stelzer <stelzer@cern.ch> - DESY, Germany *
22 * Jan Therhaag <Jan.Therhaag@cern.ch> - U of Bonn, Germany *
23 * Jiahang Zhong <Jiahang.Zhong@cern.ch> - Academia Sinica, Taipei *
24 * *
25 * Copyright (c) 2005-2011: *
26 * CERN, Switzerland *
27 * U. of Bonn, Germany *
28 * *
29 * Redistribution and use in source and binary forms, with or without *
30 * modification, are permitted according to the terms listed in LICENSE *
31 * (http://tmva.sourceforge.net/LICENSE) *
32 **********************************************************************************/
33
34/*! \class TMVA::MethodANNBase
35\ingroup TMVA
36
37Base class for all TMVA methods using artificial neural networks.
38
39*/
40
41#include "TMVA/MethodBase.h"
42
43#include "TMVA/Configurable.h"
44#include "TMVA/DataSetInfo.h"
45#include "TMVA/MethodANNBase.h"
46#include "TMVA/MsgLogger.h"
47#include "TMVA/TNeuron.h"
48#include "TMVA/TSynapse.h"
51#include "TMVA/Types.h"
52#include "TMVA/Tools.h"
54#include "TMVA/Ranking.h"
55#include "TMVA/Version.h"
56
57#include "TString.h"
58#include "TTree.h"
59#include "TDirectory.h"
60#include "Riostream.h"
61#include "TRandom3.h"
62#include "TH2F.h"
63#include "TH1.h"
64#include "TMath.h"
65#include "TMatrixT.h"
66
67#include <vector>
68#include <cstdlib>
69#include <stdexcept>
70#if __cplusplus > 199711L
71#include <atomic>
72#endif
73
74
75using std::vector;
76
78
79////////////////////////////////////////////////////////////////////////////////
80/// standard constructor
81/// Note: Right now it is an option to choose the neuron input function,
82/// but only the input function "sum" leads to weight convergence --
83/// otherwise the weights go to nan and lead to an ABORT.
84
86 Types::EMVA methodType,
87 const TString& methodTitle,
88 DataSetInfo& theData,
89 const TString& theOption )
90: TMVA::MethodBase( jobName, methodType, methodTitle, theData, theOption)
91 , fEstimator(kMSE)
92 , fUseRegulator(kFALSE)
93 , fRandomSeed(0)
94{
96
98}
99
100////////////////////////////////////////////////////////////////////////////////
101/// construct the Method from the weight file
102
104 DataSetInfo& theData,
105 const TString& theWeightFile)
106 : TMVA::MethodBase( methodType, theData, theWeightFile)
107 , fEstimator(kMSE)
108 , fUseRegulator(kFALSE)
109 , fRandomSeed(0)
110{
111 InitANNBase();
112
114}
115
116////////////////////////////////////////////////////////////////////////////////
117/// define the options (their key words) that can be set in the option string
118/// here the options valid for ALL MVA methods are declared.
119///
120/// know options:
121///
122/// - NCycles=xx :the number of training cycles
123/// - Normalize=kTRUE,kFALSe :if normalised in put variables should be used
124/// - HiddenLayser="N-1,N-2" :the specification of the hidden layers
125/// - NeuronType=sigmoid,tanh,radial,linar : the type of activation function
126/// used at the neuron
127
129{
130 DeclareOptionRef( fNcycles = 500, "NCycles", "Number of training cycles" );
131 DeclareOptionRef( fLayerSpec = "N,N-1", "HiddenLayers", "Specification of hidden layer architecture" );
132 DeclareOptionRef( fNeuronType = "sigmoid", "NeuronType", "Neuron activation function type" );
133 DeclareOptionRef( fRandomSeed = 1, "RandomSeed", "Random seed for initial synapse weights (0 means unique seed for each run; default value '1')");
134
135 DeclareOptionRef(fEstimatorS="MSE", "EstimatorType",
136 "MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood" ); //zjh
137 AddPreDefVal(TString("MSE")); //zjh
138 AddPreDefVal(TString("CE")); //zjh
139
140
141 TActivationChooser aChooser;
142 std::vector<TString>* names = aChooser.GetAllActivationNames();
143 Int_t nTypes = names->size();
144 for (Int_t i = 0; i < nTypes; i++)
145 AddPreDefVal(names->at(i));
146 delete names;
147
148 DeclareOptionRef(fNeuronInputType="sum", "NeuronInputType","Neuron input function type");
149 TNeuronInputChooser iChooser;
150 names = iChooser.GetAllNeuronInputNames();
151 nTypes = names->size();
152 for (Int_t i = 0; i < nTypes; i++) AddPreDefVal(names->at(i));
153 delete names;
154}
155
156
157////////////////////////////////////////////////////////////////////////////////
158/// do nothing specific at this moment
159
161{
162 if ( DoRegression() || DoMulticlass()) fEstimatorS = "MSE"; //zjh
163 else fEstimatorS = "CE" ; //hhv
164 if (fEstimatorS == "MSE" ) fEstimator = kMSE;
165 else if (fEstimatorS == "CE") fEstimator = kCE; //zjh
166 std::vector<Int_t>* layout = ParseLayoutString(fLayerSpec);
167 BuildNetwork(layout);
168 delete layout;
169}
170
171////////////////////////////////////////////////////////////////////////////////
172/// parse layout specification string and return a vector, each entry
173/// containing the number of neurons to go in each successive layer
174
176{
177 std::vector<Int_t>* layout = new std::vector<Int_t>();
178 layout->push_back((Int_t)GetNvar());
179 while(layerSpec.Length()>0) {
180 TString sToAdd="";
181 if (layerSpec.First(',')<0) {
182 sToAdd = layerSpec;
183 layerSpec = "";
184 }
185 else {
186 sToAdd = layerSpec(0,layerSpec.First(','));
187 layerSpec = layerSpec(layerSpec.First(',')+1,layerSpec.Length());
188 }
189 int nNodes = 0;
190 if (sToAdd.BeginsWith("n") || sToAdd.BeginsWith("N")) { sToAdd.Remove(0,1); nNodes = GetNvar(); }
191 nNodes += atoi(sToAdd);
192 layout->push_back(nNodes);
193 }
194 if( DoRegression() )
195 layout->push_back( DataInfo().GetNTargets() ); // one output node for each target
196 else if( DoMulticlass() )
197 layout->push_back( DataInfo().GetNClasses() ); // one output node for each class
198 else
199 layout->push_back(1); // one output node (for signal/background classification)
200
201 int n = 0;
202 for( std::vector<Int_t>::iterator it = layout->begin(); it != layout->end(); ++it ){
203 n++;
204 }
205
206 return layout;
207}
208
209////////////////////////////////////////////////////////////////////////////////
210/// initialize ANNBase object
211
213{
214 fNetwork = NULL;
215 frgen = NULL;
216 fActivation = NULL;
217 fOutput = NULL; //zjh
218 fIdentity = NULL;
219 fInputCalculator = NULL;
220 fSynapses = NULL;
221 fEstimatorHistTrain = NULL;
222 fEstimatorHistTest = NULL;
223
224 // reset monitoring histogram vectors
225 fEpochMonHistS.clear();
226 fEpochMonHistB.clear();
227 fEpochMonHistW.clear();
228
229 // these will be set in BuildNetwork()
230 fInputLayer = NULL;
231 fOutputNeurons.clear();
232
233 frgen = new TRandom3(fRandomSeed);
234
235 fSynapses = new TObjArray();
236}
237
238////////////////////////////////////////////////////////////////////////////////
239/// destructor
240
242{
243 DeleteNetwork();
244}
245
246////////////////////////////////////////////////////////////////////////////////
247/// delete/clear network
248
250{
251 if (fNetwork != NULL) {
252 TObjArray *layer;
253 Int_t numLayers = fNetwork->GetEntriesFast();
254 for (Int_t i = 0; i < numLayers; i++) {
255 layer = (TObjArray*)fNetwork->At(i);
256 DeleteNetworkLayer(layer);
257 }
258 delete fNetwork;
259 }
260
261 if (frgen != NULL) delete frgen;
262 if (fActivation != NULL) delete fActivation;
263 if (fOutput != NULL) delete fOutput; //zjh
264 if (fIdentity != NULL) delete fIdentity;
265 if (fInputCalculator != NULL) delete fInputCalculator;
266 if (fSynapses != NULL) delete fSynapses;
267
268 fNetwork = NULL;
269 frgen = NULL;
270 fActivation = NULL;
271 fOutput = NULL; //zjh
272 fIdentity = NULL;
273 fInputCalculator = NULL;
274 fSynapses = NULL;
275}
276
277////////////////////////////////////////////////////////////////////////////////
278/// delete a network layer
279
281{
282 TNeuron* neuron;
283 Int_t numNeurons = layer->GetEntriesFast();
284 for (Int_t i = 0; i < numNeurons; i++) {
285 neuron = (TNeuron*)layer->At(i);
286 neuron->DeletePreLinks();
287 delete neuron;
288 }
289 delete layer;
290}
291
292////////////////////////////////////////////////////////////////////////////////
293/// build network given a layout (number of neurons in each layer)
294/// and optional weights array
295
296void TMVA::MethodANNBase::BuildNetwork( std::vector<Int_t>* layout, std::vector<Double_t>* weights, Bool_t fromFile )
297{
298 if (fEstimatorS == "MSE") fEstimator = kMSE; //zjh
299 else if (fEstimatorS == "CE") fEstimator = kCE; //zjh
300 else Log()<<kWARNING<<"fEstimator="<<fEstimator<<"\tfEstimatorS="<<fEstimatorS<<Endl;
301 if (fEstimator!=kMSE && fEstimator!=kCE) Log()<<kWARNING<<"Estimator type unspecified \t"<<Endl; //zjh
302
303
304 Log() << kHEADER << "Building Network. " << Endl;
305
306 DeleteNetwork();
307 InitANNBase();
308
309 // set activation and input functions
310 TActivationChooser aChooser;
311 fActivation = aChooser.CreateActivation(fNeuronType);
312 fIdentity = aChooser.CreateActivation("linear");
313 if (fEstimator==kMSE) fOutput = aChooser.CreateActivation("linear"); //zjh
314 else if (fEstimator==kCE) fOutput = aChooser.CreateActivation("sigmoid"); //zjh
315 TNeuronInputChooser iChooser;
316 fInputCalculator = iChooser.CreateNeuronInput(fNeuronInputType);
317
318 fNetwork = new TObjArray();
319 fRegulatorIdx.clear();
320 fRegulators.clear();
321 BuildLayers( layout, fromFile );
322
323 // cache input layer and output neuron for fast access
324 fInputLayer = (TObjArray*)fNetwork->At(0);
325 TObjArray* outputLayer = (TObjArray*)fNetwork->At(fNetwork->GetEntriesFast()-1);
326 fOutputNeurons.clear();
327 for (Int_t i = 0; i < outputLayer->GetEntries(); i++) {
328 fOutputNeurons.push_back( (TNeuron*)outputLayer->At(i) );
329 }
330
331 if (weights == NULL) InitWeights();
332 else ForceWeights(weights);
333}
334
335////////////////////////////////////////////////////////////////////////////////
336/// build the network layers
337
338void TMVA::MethodANNBase::BuildLayers( std::vector<Int_t>* layout, Bool_t fromFile )
339{
340 TObjArray* curLayer;
341 TObjArray* prevLayer = NULL;
342
343 Int_t numLayers = layout->size();
344
345 for (Int_t i = 0; i < numLayers; i++) {
346 curLayer = new TObjArray();
347 BuildLayer(layout->at(i), curLayer, prevLayer, i, numLayers, fromFile);
348 prevLayer = curLayer;
349 fNetwork->Add(curLayer);
350 }
351
352 // cache pointers to synapses for fast access, the order matters
353 for (Int_t i = 0; i < numLayers; i++) {
354 TObjArray* layer = (TObjArray*)fNetwork->At(i);
355 Int_t numNeurons = layer->GetEntriesFast();
356 if (i!=0 && i!=numLayers-1) fRegulators.push_back(0.); //zjh
357 for (Int_t j = 0; j < numNeurons; j++) {
358 if (i==0) fRegulators.push_back(0.);//zjh
359 TNeuron* neuron = (TNeuron*)layer->At(j);
360 Int_t numSynapses = neuron->NumPostLinks();
361 for (Int_t k = 0; k < numSynapses; k++) {
362 TSynapse* synapse = neuron->PostLinkAt(k);
363 fSynapses->Add(synapse);
364 fRegulatorIdx.push_back(fRegulators.size()-1);//zjh
365 }
366 }
367 }
368}
369
370////////////////////////////////////////////////////////////////////////////////
371/// build a single layer with neurons and synapses connecting this
372/// layer to the previous layer
373
375 TObjArray* prevLayer, Int_t layerIndex,
376 Int_t numLayers, Bool_t fromFile )
377{
378 TNeuron* neuron;
379 for (Int_t j = 0; j < numNeurons; j++) {
380 if (fromFile && (layerIndex != numLayers-1) && (j==numNeurons-1)){
381 neuron = new TNeuron();
382 neuron->SetActivationEqn(fIdentity);
383 neuron->SetBiasNeuron();
384 neuron->ForceValue(1.0);
385 curLayer->Add(neuron);
386 }
387 else {
388 neuron = new TNeuron();
389 neuron->SetInputCalculator(fInputCalculator);
390
391 // input layer
392 if (layerIndex == 0) {
393 neuron->SetActivationEqn(fIdentity);
394 neuron->SetInputNeuron();
395 }
396 else {
397 // output layer
398 if (layerIndex == numLayers-1) {
399 neuron->SetOutputNeuron();
400 neuron->SetActivationEqn(fOutput); //zjh
401 }
402 // hidden layers
403 else neuron->SetActivationEqn(fActivation);
404 AddPreLinks(neuron, prevLayer);
405 }
406
407 curLayer->Add(neuron);
408 }
409 }
410
411 // add bias neutron (except to output layer)
412 if(!fromFile){
413 if (layerIndex != numLayers-1) {
414 neuron = new TNeuron();
415 neuron->SetActivationEqn(fIdentity);
416 neuron->SetBiasNeuron();
417 neuron->ForceValue(1.0);
418 curLayer->Add(neuron);
419 }
420 }
421}
422
423////////////////////////////////////////////////////////////////////////////////
424/// add synapses connecting a neuron to its preceding layer
425
427{
428 TSynapse* synapse;
429 int numNeurons = prevLayer->GetEntriesFast();
430 TNeuron* preNeuron;
431
432 for (Int_t i = 0; i < numNeurons; i++) {
433 preNeuron = (TNeuron*)prevLayer->At(i);
434 synapse = new TSynapse();
435 synapse->SetPreNeuron(preNeuron);
436 synapse->SetPostNeuron(neuron);
437 preNeuron->AddPostLink(synapse);
438 neuron->AddPreLink(synapse);
439 }
440}
441
442////////////////////////////////////////////////////////////////////////////////
443/// initialize the synapse weights randomly
444
446{
447 PrintMessage("Initializing weights");
448
449 // init synapse weights
450 Int_t numSynapses = fSynapses->GetEntriesFast();
451 TSynapse* synapse;
452 for (Int_t i = 0; i < numSynapses; i++) {
453 synapse = (TSynapse*)fSynapses->At(i);
454 synapse->SetWeight(4.0*frgen->Rndm() - 2.0);
455 }
456}
457
458////////////////////////////////////////////////////////////////////////////////
459/// force the synapse weights
460
461void TMVA::MethodANNBase::ForceWeights(std::vector<Double_t>* weights)
462{
463 PrintMessage("Forcing weights");
464
465 Int_t numSynapses = fSynapses->GetEntriesFast();
466 TSynapse* synapse;
467 for (Int_t i = 0; i < numSynapses; i++) {
468 synapse = (TSynapse*)fSynapses->At(i);
469 synapse->SetWeight(weights->at(i));
470 }
471}
472
473////////////////////////////////////////////////////////////////////////////////
474/// force the input values of the input neurons
475/// force the value for each input neuron
476
478{
479 Double_t x;
480 TNeuron* neuron;
481
482 // const Event* ev = GetEvent();
483 for (UInt_t j = 0; j < GetNvar(); j++) {
484
485 x = (j != (UInt_t)ignoreIndex)?ev->GetValue(j):0;
486
487 neuron = GetInputNeuron(j);
488 neuron->ForceValue(x);
489 }
490}
491
492////////////////////////////////////////////////////////////////////////////////
493/// calculate input values to each neuron
494
496{
497 TObjArray* curLayer;
498 TNeuron* neuron;
499 Int_t numLayers = fNetwork->GetEntriesFast();
500 Int_t numNeurons;
501
502 for (Int_t i = 0; i < numLayers; i++) {
503 curLayer = (TObjArray*)fNetwork->At(i);
504 numNeurons = curLayer->GetEntriesFast();
505
506 for (Int_t j = 0; j < numNeurons; j++) {
507 neuron = (TNeuron*) curLayer->At(j);
508 neuron->CalculateValue();
509 neuron->CalculateActivationValue();
510
511 }
512 }
513}
514
515////////////////////////////////////////////////////////////////////////////////
516/// print messages, turn off printing by setting verbose and debug flag appropriately
517
519{
520 if (Verbose() || Debug() || force) Log() << kINFO << message << Endl;
521}
522
523////////////////////////////////////////////////////////////////////////////////
524/// wait for keyboard input, for debugging
525
527{
528 std::string dummy;
529 Log() << kINFO << "***Type anything to continue (q to quit): ";
530 std::getline(std::cin, dummy);
531 if (dummy == "q" || dummy == "Q") {
532 PrintMessage( "quit" );
533 delete this;
534 exit(0);
535 }
536}
537
538////////////////////////////////////////////////////////////////////////////////
539/// print network representation, for debugging
540
542{
543 if (!Debug()) return;
544
545 Log() << kINFO << Endl;
546 PrintMessage( "Printing network " );
547 Log() << kINFO << "-------------------------------------------------------------------" << Endl;
548
549 TObjArray* curLayer;
550 Int_t numLayers = fNetwork->GetEntriesFast();
551
552 for (Int_t i = 0; i < numLayers; i++) {
553
554 curLayer = (TObjArray*)fNetwork->At(i);
555 Int_t numNeurons = curLayer->GetEntriesFast();
556
557 Log() << kINFO << "Layer #" << i << " (" << numNeurons << " neurons):" << Endl;
558 PrintLayer( curLayer );
559 }
560}
561
562////////////////////////////////////////////////////////////////////////////////
563/// print a single layer, for debugging
564
566{
567 Int_t numNeurons = layer->GetEntriesFast();
568 TNeuron* neuron;
569
570 for (Int_t j = 0; j < numNeurons; j++) {
571 neuron = (TNeuron*) layer->At(j);
572 Log() << kINFO << "\tNeuron #" << j << " (LinksIn: " << neuron->NumPreLinks()
573 << " , LinksOut: " << neuron->NumPostLinks() << ")" << Endl;
574 PrintNeuron( neuron );
575 }
576}
577
578////////////////////////////////////////////////////////////////////////////////
579/// print a neuron, for debugging
580
582{
583 Log() << kINFO
584 << "\t\tValue:\t" << neuron->GetValue()
585 << "\t\tActivation: " << neuron->GetActivationValue()
586 << "\t\tDelta: " << neuron->GetDelta() << Endl;
587 Log() << kINFO << "\t\tActivationEquation:\t";
588 neuron->PrintActivationEqn();
589 Log() << kINFO << "\t\tLinksIn:" << Endl;
590 neuron->PrintPreLinks();
591 Log() << kINFO << "\t\tLinksOut:" << Endl;
592 neuron->PrintPostLinks();
593}
594
595////////////////////////////////////////////////////////////////////////////////
596/// get the mva value generated by the NN
597
599{
600 TNeuron* neuron;
601
602 TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
603
604 const Event * ev = GetEvent();
605
606 for (UInt_t i = 0; i < GetNvar(); i++) {
607 neuron = (TNeuron*)inputLayer->At(i);
608 neuron->ForceValue( ev->GetValue(i) );
609 }
610 ForceNetworkCalculations();
611
612 // check the output of the network
613 TObjArray* outputLayer = (TObjArray*)fNetwork->At( fNetwork->GetEntriesFast()-1 );
614 neuron = (TNeuron*)outputLayer->At(0);
615
616 // cannot determine error
617 NoErrorCalc(err, errUpper);
618
619 return neuron->GetActivationValue();
620}
621
622////////////////////////////////////////////////////////////////////////////////
623/// get the regression value generated by the NN
624
625const std::vector<Float_t> &TMVA::MethodANNBase::GetRegressionValues()
626{
627 TNeuron* neuron;
628
629 TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
630
631 const Event * ev = GetEvent();
632
633 for (UInt_t i = 0; i < GetNvar(); i++) {
634 neuron = (TNeuron*)inputLayer->At(i);
635 neuron->ForceValue( ev->GetValue(i) );
636 }
637 ForceNetworkCalculations();
638
639 // check the output of the network
640 TObjArray* outputLayer = (TObjArray*)fNetwork->At( fNetwork->GetEntriesFast()-1 );
641
642 if (fRegressionReturnVal == NULL) fRegressionReturnVal = new std::vector<Float_t>();
643 fRegressionReturnVal->clear();
644
645 Event * evT = new Event(*ev);
646 UInt_t ntgts = outputLayer->GetEntriesFast();
647 for (UInt_t itgt = 0; itgt < ntgts; itgt++) {
648 evT->SetTarget(itgt,((TNeuron*)outputLayer->At(itgt))->GetActivationValue());
649 }
650
651 const Event* evT2 = GetTransformationHandler().InverseTransform( evT );
652 for (UInt_t itgt = 0; itgt < ntgts; itgt++) {
653 fRegressionReturnVal->push_back( evT2->GetTarget(itgt) );
654 }
655
656 delete evT;
657
658 return *fRegressionReturnVal;
659}
660
661////////////////////////////////////////////////////////////////////////////////
662/// get the multiclass classification values generated by the NN
663
664const std::vector<Float_t> &TMVA::MethodANNBase::GetMulticlassValues()
665{
666 TNeuron* neuron;
667
668 TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
669
670 const Event * ev = GetEvent();
671
672 for (UInt_t i = 0; i < GetNvar(); i++) {
673 neuron = (TNeuron*)inputLayer->At(i);
674 neuron->ForceValue( ev->GetValue(i) );
675 }
676 ForceNetworkCalculations();
677
678 // check the output of the network
679
680 if (fMulticlassReturnVal == NULL) fMulticlassReturnVal = new std::vector<Float_t>();
681 fMulticlassReturnVal->clear();
682 std::vector<Float_t> temp;
683
684 UInt_t nClasses = DataInfo().GetNClasses();
685 for (UInt_t icls = 0; icls < nClasses; icls++) {
686 temp.push_back(GetOutputNeuron( icls )->GetActivationValue() );
687 }
688
689 for(UInt_t iClass=0; iClass<nClasses; iClass++){
690 Double_t norm = 0.0;
691 for(UInt_t j=0;j<nClasses;j++){
692 if(iClass!=j)
693 norm+=exp(temp[j]-temp[iClass]);
694 }
695 (*fMulticlassReturnVal).push_back(1.0/(1.0+norm));
696 }
697
698
699
700 return *fMulticlassReturnVal;
701}
702
703
704////////////////////////////////////////////////////////////////////////////////
705/// create XML description of ANN classifier
706
707void TMVA::MethodANNBase::AddWeightsXMLTo( void* parent ) const
708{
709 Int_t numLayers = fNetwork->GetEntriesFast();
710 void* wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
711 void* xmlLayout = gTools().xmlengine().NewChild(wght, 0, "Layout");
712 gTools().xmlengine().NewAttr(xmlLayout, 0, "NLayers", gTools().StringFromInt(fNetwork->GetEntriesFast()) );
713 TString weights = "";
714 for (Int_t i = 0; i < numLayers; i++) {
715 TObjArray* layer = (TObjArray*)fNetwork->At(i);
716 Int_t numNeurons = layer->GetEntriesFast();
717 void* layerxml = gTools().xmlengine().NewChild(xmlLayout, 0, "Layer");
718 gTools().xmlengine().NewAttr(layerxml, 0, "Index", gTools().StringFromInt(i) );
719 gTools().xmlengine().NewAttr(layerxml, 0, "NNeurons", gTools().StringFromInt(numNeurons) );
720 for (Int_t j = 0; j < numNeurons; j++) {
721 TNeuron* neuron = (TNeuron*)layer->At(j);
722 Int_t numSynapses = neuron->NumPostLinks();
723 void* neuronxml = gTools().AddChild(layerxml, "Neuron");
724 gTools().AddAttr(neuronxml, "NSynapses", gTools().StringFromInt(numSynapses) );
725 if(numSynapses==0) continue;
726 std::stringstream s("");
727 s.precision( 16 );
728 for (Int_t k = 0; k < numSynapses; k++) {
729 TSynapse* synapse = neuron->PostLinkAt(k);
730 s << std::scientific << synapse->GetWeight() << " ";
731 }
732 gTools().AddRawLine( neuronxml, s.str().c_str() );
733 }
734 }
735
736 // if inverse hessian exists, write inverse hessian to weight file
737 if( fInvHessian.GetNcols()>0 ){
738 void* xmlInvHessian = gTools().xmlengine().NewChild(wght, 0, "InverseHessian");
739
740 // get the matrix dimensions
741 Int_t nElements = fInvHessian.GetNoElements();
742 Int_t nRows = fInvHessian.GetNrows();
743 Int_t nCols = fInvHessian.GetNcols();
744 gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NElements", gTools().StringFromInt(nElements) );
745 gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NRows", gTools().StringFromInt(nRows) );
746 gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NCols", gTools().StringFromInt(nCols) );
747
748 // read in the matrix elements
749 Double_t* elements = new Double_t[nElements+10];
750 fInvHessian.GetMatrix2Array( elements );
751
752 // store the matrix elements row-wise
753 Int_t index = 0;
754 for( Int_t row = 0; row < nRows; ++row ){
755 void* xmlRow = gTools().xmlengine().NewChild(xmlInvHessian, 0, "Row");
756 gTools().xmlengine().NewAttr(xmlRow, 0, "Index", gTools().StringFromInt(row) );
757
758 // create the rows
759 std::stringstream s("");
760 s.precision( 16 );
761 for( Int_t col = 0; col < nCols; ++col ){
762 s << std::scientific << (*(elements+index)) << " ";
763 ++index;
764 }
765 gTools().xmlengine().AddRawLine( xmlRow, s.str().c_str() );
766 }
767 delete[] elements;
768 }
769}
770
771
772////////////////////////////////////////////////////////////////////////////////
773/// read MLP from xml weight file
774
776{
777 // build the layout first
778 Bool_t fromFile = kTRUE;
779 std::vector<Int_t>* layout = new std::vector<Int_t>();
780
781 void* xmlLayout = NULL;
782 xmlLayout = gTools().GetChild(wghtnode, "Layout");
783 if( !xmlLayout )
784 xmlLayout = wghtnode;
785
786 UInt_t nLayers;
787 gTools().ReadAttr( xmlLayout, "NLayers", nLayers );
788 layout->resize( nLayers );
789
790 void* ch = gTools().xmlengine().GetChild(xmlLayout);
791 UInt_t index;
792 UInt_t nNeurons;
793 while (ch) {
794 gTools().ReadAttr( ch, "Index", index );
795 gTools().ReadAttr( ch, "NNeurons", nNeurons );
796 layout->at(index) = nNeurons;
797 ch = gTools().GetNextChild(ch);
798 }
799
800 BuildNetwork( layout, NULL, fromFile );
801 // use 'slow' (exact) TanH if processing old weigh file to ensure 100% compatible results
802 // otherwise use the new default, the 'tast tanh' approximation
803 if (GetTrainingTMVAVersionCode() < TMVA_VERSION(4,2,1) && fActivation->GetExpression().Contains("tanh")){
804 TActivationTanh* act = dynamic_cast<TActivationTanh*>( fActivation );
805 if (act) act->SetSlow();
806 }
807
808 // fill the weights of the synapses
809 UInt_t nSyn;
810 Float_t weight;
811 ch = gTools().xmlengine().GetChild(xmlLayout);
812 UInt_t iLayer = 0;
813 while (ch) { // layers
814 TObjArray* layer = (TObjArray*)fNetwork->At(iLayer);
815 gTools().ReadAttr( ch, "Index", index );
816 gTools().ReadAttr( ch, "NNeurons", nNeurons );
817
818 void* nodeN = gTools().GetChild(ch);
819 UInt_t iNeuron = 0;
820 while( nodeN ){ // neurons
821 TNeuron *neuron = (TNeuron*)layer->At(iNeuron);
822 gTools().ReadAttr( nodeN, "NSynapses", nSyn );
823 if( nSyn > 0 ){
824 const char* content = gTools().GetContent(nodeN);
825 std::stringstream s(content);
826 for (UInt_t iSyn = 0; iSyn<nSyn; iSyn++) { // synapses
827
828 TSynapse* synapse = neuron->PostLinkAt(iSyn);
829 s >> weight;
830 //Log() << kWARNING << neuron << " " << weight << Endl;
831 synapse->SetWeight(weight);
832 }
833 }
834 nodeN = gTools().GetNextChild(nodeN);
835 iNeuron++;
836 }
837 ch = gTools().GetNextChild(ch);
838 iLayer++;
839 }
840
841 delete layout;
842
843 void* xmlInvHessian = NULL;
844 xmlInvHessian = gTools().GetChild(wghtnode, "InverseHessian");
845 if( !xmlInvHessian )
846 // no inverse hessian available
847 return;
848
849 fUseRegulator = kTRUE;
850
851 Int_t nElements = 0;
852 Int_t nRows = 0;
853 Int_t nCols = 0;
854 gTools().ReadAttr( xmlInvHessian, "NElements", nElements );
855 gTools().ReadAttr( xmlInvHessian, "NRows", nRows );
856 gTools().ReadAttr( xmlInvHessian, "NCols", nCols );
857
858 // adjust the matrix dimensions
859 fInvHessian.ResizeTo( nRows, nCols );
860
861 // prepare an array to read in the values
862 Double_t* elements;
863 if (nElements > std::numeric_limits<int>::max()-100){
864 Log() << kFATAL << "you tried to read a hessian matrix with " << nElements << " elements, --> too large, guess s.th. went wrong reading from the weight file" << Endl;
865 return;
866 } else {
867 elements = new Double_t[nElements+10];
868 }
869
870
871
872 void* xmlRow = gTools().xmlengine().GetChild(xmlInvHessian);
873 Int_t row = 0;
874 index = 0;
875 while (xmlRow) { // rows
876 gTools().ReadAttr( xmlRow, "Index", row );
877
878 const char* content = gTools().xmlengine().GetNodeContent(xmlRow);
879
880 std::stringstream s(content);
881 for (Int_t iCol = 0; iCol<nCols; iCol++) { // columns
882 s >> (*(elements+index));
883 ++index;
884 }
885 xmlRow = gTools().xmlengine().GetNext(xmlRow);
886 ++row;
887 }
888
889 fInvHessian.SetMatrixArray( elements );
890
891 delete[] elements;
892}
893
894////////////////////////////////////////////////////////////////////////////////
895/// destroy/clear the network then read it back in from the weights file
896
898{
899 // delete network so we can reconstruct network from scratch
900
902
903 // synapse weights
904 Double_t weight;
905 std::vector<Double_t>* weights = new std::vector<Double_t>();
906 istr>> dummy;
907 while (istr>> dummy >> weight) weights->push_back(weight); // use w/ slower write-out
908
909 ForceWeights(weights);
910
911
912 delete weights;
913}
914
915////////////////////////////////////////////////////////////////////////////////
916/// compute ranking of input variables by summing function of weights
917
919{
920 // create the ranking object
921 fRanking = new Ranking( GetName(), "Importance" );
922
923 TNeuron* neuron;
924 TSynapse* synapse;
925 Double_t importance, avgVal;
926 TString varName;
927
928 for (UInt_t ivar = 0; ivar < GetNvar(); ivar++) {
929
930 neuron = GetInputNeuron(ivar);
931 Int_t numSynapses = neuron->NumPostLinks();
932 importance = 0;
933 varName = GetInputVar(ivar); // fix this line
934
935 // figure out average value of variable i
936 Double_t meanS, meanB, rmsS, rmsB, xmin, xmax;
937 Statistics( TMVA::Types::kTraining, varName,
938 meanS, meanB, rmsS, rmsB, xmin, xmax );
939
940 avgVal = (TMath::Abs(meanS) + TMath::Abs(meanB))/2.0;
941 double meanrms = (TMath::Abs(rmsS) + TMath::Abs(rmsB))/2.;
942 if (avgVal<meanrms) avgVal = meanrms;
943 if (IsNormalised()) avgVal = 0.5*(1 + gTools().NormVariable( avgVal, GetXmin( ivar ), GetXmax( ivar )));
944
945 for (Int_t j = 0; j < numSynapses; j++) {
946 synapse = neuron->PostLinkAt(j);
947 importance += synapse->GetWeight() * synapse->GetWeight();
948 }
949
950 importance *= avgVal * avgVal;
951
952 fRanking->AddRank( Rank( varName, importance ) );
953 }
954
955 return fRanking;
956}
957
958////////////////////////////////////////////////////////////////////////////////
959
961 std::vector<TH1*>* hv ) const
962{
963 TH2F* hist;
964 Int_t numLayers = fNetwork->GetEntriesFast();
965
966 for (Int_t i = 0; i < numLayers-1; i++) {
967
968 TObjArray* layer1 = (TObjArray*)fNetwork->At(i);
969 TObjArray* layer2 = (TObjArray*)fNetwork->At(i+1);
970 Int_t numNeurons1 = layer1->GetEntriesFast();
971 Int_t numNeurons2 = layer2->GetEntriesFast();
972
973 TString name = Form("%s%i%i", bulkname.Data(), i, i+1);
974 hist = new TH2F(name + "", name + "",
975 numNeurons1, 0, numNeurons1, numNeurons2, 0, numNeurons2);
976
977 for (Int_t j = 0; j < numNeurons1; j++) {
978
979 TNeuron* neuron = (TNeuron*)layer1->At(j);
980 Int_t numSynapses = neuron->NumPostLinks();
981
982 for (Int_t k = 0; k < numSynapses; k++) {
983
984 TSynapse* synapse = neuron->PostLinkAt(k);
985 hist->SetBinContent(j+1, k+1, synapse->GetWeight());
986
987 }
988 }
989
990 if (hv) hv->push_back( hist );
991 else {
992 hist->Write();
993 delete hist;
994 }
995 }
996}
997
998////////////////////////////////////////////////////////////////////////////////
999/// write histograms to file
1000
1002{
1003 PrintMessage(Form("Write special histos to file: %s", BaseDir()->GetPath()), kTRUE);
1004
1005 if (fEstimatorHistTrain) fEstimatorHistTrain->Write();
1006 if (fEstimatorHistTest ) fEstimatorHistTest ->Write();
1007
1008 // histograms containing weights for architecture plotting (used in macro "network.cxx")
1009 CreateWeightMonitoringHists( "weights_hist" );
1010
1011 // now save all the epoch-wise monitoring information
1012#if __cplusplus > 199711L
1013 static std::atomic<int> epochMonitoringDirectoryNumber{0};
1014#else
1015 static int epochMonitoringDirectoryNumber = 0;
1016#endif
1017 int epochVal = epochMonitoringDirectoryNumber++;
1018 TDirectory* epochdir = NULL;
1019 if( epochVal == 0 )
1020 epochdir = BaseDir()->mkdir( "EpochMonitoring" );
1021 else
1022 epochdir = BaseDir()->mkdir( Form("EpochMonitoring_%4d",epochVal) );
1023
1024 epochdir->cd();
1025 for (std::vector<TH1*>::const_iterator it = fEpochMonHistS.begin(); it != fEpochMonHistS.end(); ++it) {
1026 (*it)->Write();
1027 delete (*it);
1028 }
1029 for (std::vector<TH1*>::const_iterator it = fEpochMonHistB.begin(); it != fEpochMonHistB.end(); ++it) {
1030 (*it)->Write();
1031 delete (*it);
1032 }
1033 for (std::vector<TH1*>::const_iterator it = fEpochMonHistW.begin(); it != fEpochMonHistW.end(); ++it) {
1034 (*it)->Write();
1035 delete (*it);
1036 }
1037 BaseDir()->cd();
1038}
1039
1040////////////////////////////////////////////////////////////////////////////////
1041/// write specific classifier response
1042
1043void TMVA::MethodANNBase::MakeClassSpecific( std::ostream& fout, const TString& className ) const
1044{
1045 Int_t numLayers = fNetwork->GetEntries();
1046
1047 fout << std::endl;
1048 fout << " double ActivationFnc(double x) const;" << std::endl;
1049 fout << " double OutputActivationFnc(double x) const;" << std::endl; //zjh
1050 fout << std::endl;
1051 int numNodesFrom = -1;
1052 for (Int_t lIdx = 0; lIdx < numLayers; lIdx++) {
1053 int numNodesTo = ((TObjArray*)fNetwork->At(lIdx))->GetEntries();
1054 if (numNodesFrom<0) { numNodesFrom=numNodesTo; continue; }
1055 fout << " double fWeightMatrix" << lIdx-1 << "to" << lIdx << "[" << numNodesTo << "][" << numNodesFrom << "];";
1056 fout << " // weight matrix from layer " << lIdx-1 << " to " << lIdx << std::endl;
1057 numNodesFrom = numNodesTo;
1058 }
1059 fout << std::endl;
1060 fout << "};" << std::endl;
1061
1062 fout << std::endl;
1063
1064 fout << "inline void " << className << "::Initialize()" << std::endl;
1065 fout << "{" << std::endl;
1066 fout << " // build network structure" << std::endl;
1067
1068 for (Int_t i = 0; i < numLayers-1; i++) {
1069 fout << " // weight matrix from layer " << i << " to " << i+1 << std::endl;
1070 TObjArray* layer = (TObjArray*)fNetwork->At(i);
1071 Int_t numNeurons = layer->GetEntriesFast();
1072 for (Int_t j = 0; j < numNeurons; j++) {
1073 TNeuron* neuron = (TNeuron*)layer->At(j);
1074 Int_t numSynapses = neuron->NumPostLinks();
1075 for (Int_t k = 0; k < numSynapses; k++) {
1076 TSynapse* synapse = neuron->PostLinkAt(k);
1077 fout << " fWeightMatrix" << i << "to" << i+1 << "[" << k << "][" << j << "] = " << synapse->GetWeight() << ";" << std::endl;
1078 }
1079 }
1080 }
1081
1082 fout << "}" << std::endl;
1083 fout << std::endl;
1084
1085 // writing of the GetMvaValue__ method
1086 fout << "inline double " << className << "::GetMvaValue__( const std::vector<double>& inputValues ) const" << std::endl;
1087 fout << "{" << std::endl;
1088 fout << " if (inputValues.size() != (unsigned int)" << ((TObjArray *)fNetwork->At(0))->GetEntries() - 1 << ") {"
1089 << std::endl;
1090 fout << " std::cout << \"Input vector needs to be of size \" << "
1091 << ((TObjArray *)fNetwork->At(0))->GetEntries() - 1 << " << std::endl;" << std::endl;
1092 fout << " return 0;" << std::endl;
1093 fout << " }" << std::endl;
1094 fout << std::endl;
1095 for (Int_t lIdx = 1; lIdx < numLayers; lIdx++) {
1096 TObjArray *layer = (TObjArray *)fNetwork->At(lIdx);
1097 int numNodes = layer->GetEntries();
1098 fout << " std::array<double, " << numNodes << "> fWeights" << lIdx << " {{}};" << std::endl;
1099 }
1100 for (Int_t lIdx = 1; lIdx < numLayers - 1; lIdx++) {
1101 fout << " fWeights" << lIdx << ".back() = 1.;" << std::endl;
1102 }
1103 fout << std::endl;
1104 for (Int_t i = 0; i < numLayers - 1; i++) {
1105 fout << " // layer " << i << " to " << i + 1 << std::endl;
1106 if (i + 1 == numLayers - 1) {
1107 fout << " for (int o=0; o<" << ((TObjArray *)fNetwork->At(i + 1))->GetEntries() << "; o++) {" << std::endl;
1108 } else {
1109 fout << " for (int o=0; o<" << ((TObjArray *)fNetwork->At(i + 1))->GetEntries() - 1 << "; o++) {"
1110 << std::endl;
1111 }
1112 if (0 == i) {
1113 fout << " std::array<double, " << ((TObjArray *)fNetwork->At(i))->GetEntries()
1114 << "> buffer; // no need to initialise" << std::endl;
1115 fout << " for (int i = 0; i<" << ((TObjArray *)fNetwork->At(i))->GetEntries() << " - 1; i++) {"
1116 << std::endl;
1117 fout << " buffer[i] = fWeightMatrix" << i << "to" << i + 1 << "[o][i] * inputValues[i];" << std::endl;
1118 fout << " } // loop over i" << std::endl;
1119 fout << " buffer.back() = fWeightMatrix" << i << "to" << i + 1 << "[o]["
1120 << ((TObjArray *)fNetwork->At(i))->GetEntries() - 1 << "];" << std::endl;
1121 } else {
1122 fout << " std::array<double, " << ((TObjArray *)fNetwork->At(i))->GetEntries()
1123 << "> buffer; // no need to initialise" << std::endl;
1124 fout << " for (int i=0; i<" << ((TObjArray *)fNetwork->At(i))->GetEntries() << "; i++) {" << std::endl;
1125 fout << " buffer[i] = fWeightMatrix" << i << "to" << i + 1 << "[o][i] * fWeights" << i << "[i];"
1126 << std::endl;
1127 fout << " } // loop over i" << std::endl;
1128 }
1129 fout << " for (int i=0; i<" << ((TObjArray *)fNetwork->At(i))->GetEntries() << "; i++) {" << std::endl;
1130 if (fNeuronInputType == "sum") {
1131 fout << " fWeights" << i + 1 << "[o] += buffer[i];" << std::endl;
1132 } else if (fNeuronInputType == "sqsum") {
1133 fout << " fWeights" << i + 1 << "[o] += buffer[i]*buffer[i];" << std::endl;
1134 } else { // fNeuronInputType == TNeuronInputChooser::kAbsSum
1135 fout << " fWeights" << i + 1 << "[o] += fabs(buffer[i]);" << std::endl;
1136 }
1137 fout << " } // loop over i" << std::endl;
1138 fout << " } // loop over o" << std::endl;
1139 if (i + 1 == numLayers - 1) {
1140 fout << " for (int o=0; o<" << ((TObjArray *)fNetwork->At(i + 1))->GetEntries() << "; o++) {" << std::endl;
1141 } else {
1142 fout << " for (int o=0; o<" << ((TObjArray *)fNetwork->At(i + 1))->GetEntries() - 1 << "; o++) {"
1143 << std::endl;
1144 }
1145 if (i+1 != numLayers-1) // in the last layer no activation function is applied
1146 fout << " fWeights" << i + 1 << "[o] = ActivationFnc(fWeights" << i + 1 << "[o]);" << std::endl;
1147 else
1148 fout << " fWeights" << i + 1 << "[o] = OutputActivationFnc(fWeights" << i + 1 << "[o]);"
1149 << std::endl; // zjh
1150 fout << " } // loop over o" << std::endl;
1151 }
1152 fout << std::endl;
1153 fout << " return fWeights" << numLayers - 1 << "[0];" << std::endl;
1154 fout << "}" << std::endl;
1155
1156 fout << std::endl;
1157 TString fncName = className+"::ActivationFnc";
1158 fActivation->MakeFunction(fout, fncName);
1159 fncName = className+"::OutputActivationFnc"; //zjh
1160 fOutput->MakeFunction(fout, fncName);//zjh
1161
1162 fout << std::endl;
1163 fout << "// Clean up" << std::endl;
1164 fout << "inline void " << className << "::Clear()" << std::endl;
1165 fout << "{" << std::endl;
1166 fout << "}" << std::endl;
1167}
1168
1169////////////////////////////////////////////////////////////////////////////////
1170/// who the hell makes such strange Debug flags that even use "global pointers"..
1171
1173{
1174 return fgDEBUG;
1175}
static RooMathCoreReg dummy
unsigned int UInt_t
Definition: RtypesCore.h:44
const Bool_t kFALSE
Definition: RtypesCore.h:90
double Double_t
Definition: RtypesCore.h:57
float Float_t
Definition: RtypesCore.h:55
const Bool_t kTRUE
Definition: RtypesCore.h:89
#define ClassImp(name)
Definition: Rtypes.h:361
char name[80]
Definition: TGX11.cxx:109
float xmin
Definition: THbookFile.cxx:93
float xmax
Definition: THbookFile.cxx:93
double exp(double)
char * Form(const char *fmt,...)
void Debug(Int_t level, const char *va_(fmt),...)
#define TMVA_VERSION(a, b, c)
Definition: Version.h:48
Describe directory structure in memory.
Definition: TDirectory.h:40
virtual TDirectory * mkdir(const char *name, const char *title="", Bool_t returnExistingDirectory=kFALSE)
Create a sub-directory "a" or a hierarchy of sub-directories "a/b/c/...".
virtual Bool_t cd(const char *path=nullptr)
Change current directory to "this" directory.
Definition: TDirectory.cxx:498
2-D histogram with a float per channel (see TH1 documentation)}
Definition: TH2.h:251
virtual void SetBinContent(Int_t bin, Double_t content)
Set bin content.
Definition: TH2.cxx:2480
Class that contains all the data information.
Definition: DataSetInfo.h:60
Float_t GetValue(UInt_t ivar) const
return value of i'th variable
Definition: Event.cxx:236
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
Definition: Event.cxx:359
Float_t GetTarget(UInt_t itgt) const
Definition: Event.h:102
Base class for all TMVA methods using artificial neural networks.
Definition: MethodANNBase.h:62
std::vector< Int_t > * ParseLayoutString(TString layerSpec)
parse layout specification string and return a vector, each entry containing the number of neurons to...
virtual void ProcessOptions()
do nothing specific at this moment
virtual ~MethodANNBase()
destructor
void DeleteNetworkLayer(TObjArray *&layer)
delete a network layer
const Ranking * CreateRanking()
compute ranking of input variables by summing function of weights
void DeleteNetwork()
delete/clear network
void WaitForKeyboard()
wait for keyboard input, for debugging
MethodANNBase(const TString &jobName, Types::EMVA methodType, const TString &methodTitle, DataSetInfo &theData, const TString &theOption)
standard constructor Note: Right now it is an option to choose the neuron input function,...
void AddPreLinks(TNeuron *neuron, TObjArray *prevLayer)
add synapses connecting a neuron to its preceding layer
void CreateWeightMonitoringHists(const TString &bulkname, std::vector< TH1 * > *hv=0) const
void PrintNeuron(TNeuron *neuron) const
print a neuron, for debugging
void PrintMessage(TString message, Bool_t force=kFALSE) const
print messages, turn off printing by setting verbose and debug flag appropriately
void AddWeightsXMLTo(void *parent) const
create XML description of ANN classifier
void InitANNBase()
initialize ANNBase object
void PrintLayer(TObjArray *layer) const
print a single layer, for debugging
virtual void BuildNetwork(std::vector< Int_t > *layout, std::vector< Double_t > *weights=NULL, Bool_t fromFile=kFALSE)
build network given a layout (number of neurons in each layer) and optional weights array
void InitWeights()
initialize the synapse weights randomly
virtual void DeclareOptions()
define the options (their key words) that can be set in the option string here the options valid for ...
virtual void ReadWeightsFromStream(std::istream &istr)
destroy/clear the network then read it back in from the weights file
void BuildLayers(std::vector< Int_t > *layout, Bool_t from_file=false)
build the network layers
virtual void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
void ForceWeights(std::vector< Double_t > *weights)
force the synapse weights
virtual Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
get the mva value generated by the NN
void BuildLayer(Int_t numNeurons, TObjArray *curLayer, TObjArray *prevLayer, Int_t layerIndex, Int_t numLayers, Bool_t from_file=false)
build a single layer with neurons and synapses connecting this layer to the previous layer
void ForceNetworkCalculations()
calculate input values to each neuron
void ForceNetworkInputs(const Event *ev, Int_t ignoreIndex=-1)
force the input values of the input neurons force the value for each input neuron
virtual const std::vector< Float_t > & GetMulticlassValues()
get the multiclass classification values generated by the NN
void ReadWeightsFromXML(void *wghtnode)
read MLP from xml weight file
Bool_t Debug() const
who the hell makes such strange Debug flags that even use "global pointers"..
virtual void WriteMonitoringHistosToFile() const
write histograms to file
virtual const std::vector< Float_t > & GetRegressionValues()
get the regression value generated by the NN
virtual void PrintNetwork() const
print network representation, for debugging
Virtual base Class for all MVA method.
Definition: MethodBase.h:111
Ranking for variables in method (implementation)
Definition: Ranking.h:48
Class for easily choosing activation functions.
std::vector< TString > * GetAllActivationNames() const
returns the names of all know activation functions
TActivation * CreateActivation(EActivationType type) const
instantiate the correct activation object according to the type chosen (given as the enumeration type...
Tanh activation function for ANN.
Class for easily choosing neuron input functions.
TNeuronInput * CreateNeuronInput(ENeuronInputType type) const
std::vector< TString > * GetAllNeuronInputNames() const
Neuron class used by TMVA artificial neural network methods.
Definition: TNeuron.h:49
Double_t GetActivationValue() const
Definition: TNeuron.h:105
void ForceValue(Double_t value)
force the value, typically for input and bias neurons
Definition: TNeuron.cxx:84
TSynapse * PostLinkAt(Int_t index) const
Definition: TNeuron.h:111
void SetActivationEqn(TActivation *activation)
set activation equation
Definition: TNeuron.cxx:160
Double_t GetDelta() const
Definition: TNeuron.h:106
void AddPostLink(TSynapse *post)
add synapse as a post-link to this neuron
Definition: TNeuron.cxx:178
void SetInputCalculator(TNeuronInput *calculator)
set input calculator
Definition: TNeuron.cxx:151
void SetInputNeuron()
Definition: TNeuron.h:112
Int_t NumPreLinks() const
Definition: TNeuron.h:108
void PrintActivationEqn()
print activation equation, for debugging
Definition: TNeuron.cxx:327
void CalculateValue()
calculate neuron input
Definition: TNeuron.cxx:93
void SetBiasNeuron()
Definition: TNeuron.h:114
void CalculateActivationValue()
calculate neuron activation/output
Definition: TNeuron.cxx:102
void SetOutputNeuron()
Definition: TNeuron.h:113
void PrintPostLinks() const
Definition: TNeuron.h:119
Int_t NumPostLinks() const
Definition: TNeuron.h:109
void AddPreLink(TSynapse *pre)
add synapse as a pre-link to this neuron
Definition: TNeuron.cxx:169
Double_t GetValue() const
Definition: TNeuron.h:104
void DeletePreLinks()
delete all pre-links
Definition: TNeuron.cxx:187
void PrintPreLinks() const
Definition: TNeuron.h:118
Synapse class used by TMVA artificial neural network methods.
Definition: TSynapse.h:44
void SetWeight(Double_t weight)
set synapse weight
Definition: TSynapse.cxx:69
Double_t GetWeight()
Definition: TSynapse.h:55
void SetPostNeuron(TNeuron *post)
Definition: TSynapse.h:70
void SetPreNeuron(TNeuron *pre)
Definition: TSynapse.h:67
Double_t NormVariable(Double_t x, Double_t xmin, Double_t xmax)
normalise to output range: [-1, 1]
Definition: Tools.cxx:121
void * GetNextChild(void *prevchild, const char *childname=0)
XML helpers.
Definition: Tools.cxx:1173
void * AddChild(void *parent, const char *childname, const char *content=0, bool isRootNode=false)
add child node
Definition: Tools.cxx:1135
Bool_t AddRawLine(void *node, const char *raw)
XML helpers.
Definition: Tools.cxx:1201
const char * GetContent(void *node)
XML helpers.
Definition: Tools.cxx:1185
void * GetChild(void *parent, const char *childname=0)
get child node
Definition: Tools.cxx:1161
TXMLEngine & xmlengine()
Definition: Tools.h:268
void ReadAttr(void *node, const char *, T &value)
read attribute from xml
Definition: Tools.h:335
void AddAttr(void *node, const char *, const T &value, Int_t precision=16)
add attribute to xml
Definition: Tools.h:353
@ kTraining
Definition: Types.h:144
An array of TObjects.
Definition: TObjArray.h:37
Int_t GetEntriesFast() const
Definition: TObjArray.h:64
void Add(TObject *obj)
Definition: TObjArray.h:74
Int_t GetEntries() const
Return the number of objects in array (i.e.
Definition: TObjArray.cxx:523
TObject * At(Int_t idx) const
Definition: TObjArray.h:166
virtual Int_t Write(const char *name=0, Int_t option=0, Int_t bufsize=0)
Write this object to the current directory.
Definition: TObject.cxx:796
Random number generator class based on M.
Definition: TRandom3.h:27
Basic string class.
Definition: TString.h:131
Ssiz_t Length() const
Definition: TString.h:405
Ssiz_t First(char c) const
Find first occurrence of a character c.
Definition: TString.cxx:499
const char * Data() const
Definition: TString.h:364
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
Definition: TString.h:610
TString & Remove(Ssiz_t pos)
Definition: TString.h:668
Bool_t AddRawLine(XMLNodePointer_t parent, const char *line)
Add just line into xml file Line should has correct xml syntax that later it can be decoded by xml pa...
Definition: TXMLEngine.cxx:909
XMLNodePointer_t NewChild(XMLNodePointer_t parent, XMLNsPointer_t ns, const char *name, const char *content=nullptr)
create new child element for parent node
Definition: TXMLEngine.cxx:709
XMLNodePointer_t GetChild(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
returns first child of xmlnode
XMLAttrPointer_t NewAttr(XMLNodePointer_t xmlnode, XMLNsPointer_t, const char *name, const char *value)
creates new attribute for xmlnode, namespaces are not supported for attributes
Definition: TXMLEngine.cxx:580
const char * GetNodeContent(XMLNodePointer_t xmlnode)
get contents (if any) of xmlnode
XMLNodePointer_t GetNext(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
return next to xmlnode node if realnode==kTRUE, any special nodes in between will be skipped
RooCmdArg Verbose(Bool_t flag=kTRUE)
Double_t x[n]
Definition: legend1.C:17
const Int_t n
Definition: legend1.C:16
static constexpr double s
create variable transformations
Tools & gTools()
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:158
Double_t Log(Double_t x)
Definition: TMath.h:750
Short_t Abs(Short_t d)
Definition: TMathBase.h:120