Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
MethodANNBase.cxx
Go to the documentation of this file.
1// @(#)root/tmva $Id$
2// Author: Andreas Hoecker, Peter Speckmayer, Matt Jachowski, Jan Therhaag, Jiahang Zhong
3
4/**********************************************************************************
5 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6 * Package: TMVA *
7 * Class : MethodANNBase *
8 * *
9 * *
10 * Description: *
11 * Artificial neural network base class for the discrimination of signal *
12 * from background. *
13 * *
14 * Authors (alphabetical): *
15 * Krzysztof Danielowski <danielow@cern.ch> - IFJ & AGH, Poland *
16 * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
17 * Matt Jachowski <jachowski@stanford.edu> - Stanford University, USA *
18 * Kamil Kraszewski <kalq@cern.ch> - IFJ & UJ, Poland *
19 * Maciej Kruk <mkruk@cern.ch> - IFJ & AGH, Poland *
20 * Peter Speckmayer <peter.speckmayer@cern.ch> - CERN, Switzerland *
21 * Joerg Stelzer <stelzer@cern.ch> - DESY, Germany *
22 * Jan Therhaag <Jan.Therhaag@cern.ch> - U of Bonn, Germany *
23 * Jiahang Zhong <Jiahang.Zhong@cern.ch> - Academia Sinica, Taipei *
24 * *
25 * Copyright (c) 2005-2011: *
26 * CERN, Switzerland *
27 * U. of Bonn, Germany *
28 * *
29 * Redistribution and use in source and binary forms, with or without *
30 * modification, are permitted according to the terms listed in LICENSE *
31 * (see tmva/doc/LICENSE) *
32 **********************************************************************************/
33
34/*! \class TMVA::MethodANNBase
35\ingroup TMVA
36
37Base class for all TMVA methods using artificial neural networks.
38
39*/
40
41#include "TMVA/MethodBase.h"
42
43#include "TMVA/Configurable.h"
44#include "TMVA/DataSetInfo.h"
45#include "TMVA/MethodANNBase.h"
46#include "TMVA/MsgLogger.h"
47#include "TMVA/TNeuron.h"
48#include "TMVA/TSynapse.h"
51#include "TMVA/Types.h"
52#include "TMVA/Tools.h"
54#include "TMVA/Ranking.h"
55#include "TMVA/Version.h"
56
57#include "TString.h"
58#include "TDirectory.h"
59#include "TRandom3.h"
60#include "TH2F.h"
61#include "TH1.h"
62#include "TMath.h"
63#include "TMatrixT.h"
64
65#include <iostream>
66#include <vector>
67#include <cstdlib>
68#include <stdexcept>
69#include <atomic>
70
71
72using std::vector;
73
75
76////////////////////////////////////////////////////////////////////////////////
77/// standard constructor
78/// Note: Right now it is an option to choose the neuron input function,
79/// but only the input function "sum" leads to weight convergence --
80/// otherwise the weights go to nan and lead to an ABORT.
81
83 Types::EMVA methodType,
84 const TString& methodTitle,
85 DataSetInfo& theData,
86 const TString& theOption )
87: TMVA::MethodBase( jobName, methodType, methodTitle, theData, theOption)
88 , fEstimator(kMSE)
89 , fUseRegulator(kFALSE)
90 , fRandomSeed(0)
91{
93
95}
96
97////////////////////////////////////////////////////////////////////////////////
98/// construct the Method from the weight file
99
101 DataSetInfo& theData,
102 const TString& theWeightFile)
103 : TMVA::MethodBase( methodType, theData, theWeightFile)
104 , fEstimator(kMSE)
105 , fUseRegulator(kFALSE)
106 , fRandomSeed(0)
107{
108 InitANNBase();
109
111}
112
113////////////////////////////////////////////////////////////////////////////////
114/// define the options (their key words) that can be set in the option string
115/// here the options valid for ALL MVA methods are declared.
116///
117/// know options:
118///
119/// - NCycles=xx :the number of training cycles
120/// - Normalize=kTRUE,kFALSe :if normalised in put variables should be used
121/// - HiddenLayser="N-1,N-2" :the specification of the hidden layers
122/// - NeuronType=sigmoid,tanh,radial,linar : the type of activation function
123/// used at the neuron
124
126{
127 DeclareOptionRef( fNcycles = 500, "NCycles", "Number of training cycles" );
128 DeclareOptionRef( fLayerSpec = "N,N-1", "HiddenLayers", "Specification of hidden layer architecture" );
129 DeclareOptionRef( fNeuronType = "sigmoid", "NeuronType", "Neuron activation function type" );
130 DeclareOptionRef( fRandomSeed = 1, "RandomSeed", "Random seed for initial synapse weights (0 means unique seed for each run; default value '1')");
131
132 DeclareOptionRef(fEstimatorS="MSE", "EstimatorType",
133 "MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood" ); //zjh
134 AddPreDefVal(TString("MSE")); //zjh
135 AddPreDefVal(TString("CE")); //zjh
136
137
138 TActivationChooser aChooser;
139 std::vector<TString>* names = aChooser.GetAllActivationNames();
140 Int_t nTypes = names->size();
141 for (Int_t i = 0; i < nTypes; i++)
142 AddPreDefVal(names->at(i));
143 delete names;
144
145 DeclareOptionRef(fNeuronInputType="sum", "NeuronInputType","Neuron input function type");
146 TNeuronInputChooser iChooser;
147 names = iChooser.GetAllNeuronInputNames();
148 nTypes = names->size();
149 for (Int_t i = 0; i < nTypes; i++) AddPreDefVal(names->at(i));
150 delete names;
151}
152
153
154////////////////////////////////////////////////////////////////////////////////
155/// do nothing specific at this moment
156
158{
159 if ( DoRegression() || DoMulticlass()) fEstimatorS = "MSE"; //zjh
160 else fEstimatorS = "CE" ; //hhv
161 if (fEstimatorS == "MSE" ) fEstimator = kMSE;
162 else if (fEstimatorS == "CE") fEstimator = kCE; //zjh
163 std::vector<Int_t>* layout = ParseLayoutString(fLayerSpec);
164 BuildNetwork(layout);
165 delete layout;
166}
167
168////////////////////////////////////////////////////////////////////////////////
169/// parse layout specification string and return a vector, each entry
170/// containing the number of neurons to go in each successive layer
171
173{
174 std::vector<Int_t>* layout = new std::vector<Int_t>();
175 layout->push_back((Int_t)GetNvar());
176 while(layerSpec.Length()>0) {
177 TString sToAdd="";
178 if (layerSpec.First(',')<0) {
179 sToAdd = layerSpec;
180 layerSpec = "";
181 }
182 else {
183 sToAdd = layerSpec(0,layerSpec.First(','));
184 layerSpec = layerSpec(layerSpec.First(',')+1,layerSpec.Length());
185 }
186 int nNodes = 0;
187 if (sToAdd.BeginsWith("n") || sToAdd.BeginsWith("N")) { sToAdd.Remove(0,1); nNodes = GetNvar(); }
188 nNodes += atoi(sToAdd);
189 layout->push_back(nNodes);
190 }
191 if( DoRegression() )
192 layout->push_back( DataInfo().GetNTargets() ); // one output node for each target
193 else if( DoMulticlass() )
194 layout->push_back( DataInfo().GetNClasses() ); // one output node for each class
195 else
196 layout->push_back(1); // one output node (for signal/background classification)
197
198 return layout;
199}
200
201////////////////////////////////////////////////////////////////////////////////
202/// initialize ANNBase object
203
205{
206 fNetwork = NULL;
207 frgen = NULL;
208 fActivation = NULL;
209 fOutput = NULL; //zjh
210 fIdentity = NULL;
211 fInputCalculator = NULL;
212 fSynapses = NULL;
213 fEstimatorHistTrain = NULL;
214 fEstimatorHistTest = NULL;
215
216 // reset monitoring histogram vectors
217 fEpochMonHistS.clear();
218 fEpochMonHistB.clear();
219 fEpochMonHistW.clear();
220
221 // these will be set in BuildNetwork()
222 fInputLayer = NULL;
223 fOutputNeurons.clear();
224
225 frgen = new TRandom3(fRandomSeed);
226
227 fSynapses = new TObjArray();
228}
229
230////////////////////////////////////////////////////////////////////////////////
231/// destructor
232
234{
235 DeleteNetwork();
236}
237
238////////////////////////////////////////////////////////////////////////////////
239/// delete/clear network
240
242{
243 if (fNetwork != NULL) {
244 TObjArray *layer;
245 Int_t numLayers = fNetwork->GetEntriesFast();
246 for (Int_t i = 0; i < numLayers; i++) {
247 layer = (TObjArray*)fNetwork->At(i);
248 DeleteNetworkLayer(layer);
249 }
250 delete fNetwork;
251 }
252
253 if (frgen != NULL) delete frgen;
254 if (fActivation != NULL) delete fActivation;
255 if (fOutput != NULL) delete fOutput; //zjh
256 if (fIdentity != NULL) delete fIdentity;
257 if (fInputCalculator != NULL) delete fInputCalculator;
258 if (fSynapses != NULL) delete fSynapses;
259
260 fNetwork = NULL;
261 frgen = NULL;
262 fActivation = NULL;
263 fOutput = NULL; //zjh
264 fIdentity = NULL;
265 fInputCalculator = NULL;
266 fSynapses = NULL;
267}
268
269////////////////////////////////////////////////////////////////////////////////
270/// delete a network layer
271
273{
274 TNeuron* neuron;
275 Int_t numNeurons = layer->GetEntriesFast();
276 for (Int_t i = 0; i < numNeurons; i++) {
277 neuron = (TNeuron*)layer->At(i);
278 neuron->DeletePreLinks();
279 delete neuron;
280 }
281 delete layer;
282}
283
284////////////////////////////////////////////////////////////////////////////////
285/// build network given a layout (number of neurons in each layer)
286/// and optional weights array
287
288void TMVA::MethodANNBase::BuildNetwork( std::vector<Int_t>* layout, std::vector<Double_t>* weights, Bool_t fromFile )
289{
290 if (fEstimatorS == "MSE") fEstimator = kMSE; //zjh
291 else if (fEstimatorS == "CE") fEstimator = kCE; //zjh
292 else Log()<<kWARNING<<"fEstimator="<<fEstimator<<"\tfEstimatorS="<<fEstimatorS<<Endl;
293 if (fEstimator!=kMSE && fEstimator!=kCE) Log()<<kWARNING<<"Estimator type unspecified \t"<<Endl; //zjh
294
295
296 Log() << kHEADER << "Building Network. " << Endl;
297
298 DeleteNetwork();
299 InitANNBase();
300
301 // set activation and input functions
302 TActivationChooser aChooser;
303 fActivation = aChooser.CreateActivation(fNeuronType);
304 fIdentity = aChooser.CreateActivation("linear");
305 if (fEstimator==kMSE) fOutput = aChooser.CreateActivation("linear"); //zjh
306 else if (fEstimator==kCE) fOutput = aChooser.CreateActivation("sigmoid"); //zjh
307 TNeuronInputChooser iChooser;
308 fInputCalculator = iChooser.CreateNeuronInput(fNeuronInputType);
309
310 fNetwork = new TObjArray();
311 fRegulatorIdx.clear();
312 fRegulators.clear();
313 BuildLayers( layout, fromFile );
314
315 // cache input layer and output neuron for fast access
316 fInputLayer = (TObjArray*)fNetwork->At(0);
317 TObjArray* outputLayer = (TObjArray*)fNetwork->At(fNetwork->GetEntriesFast()-1);
318 fOutputNeurons.clear();
319 for (Int_t i = 0; i < outputLayer->GetEntries(); i++) {
320 fOutputNeurons.push_back( (TNeuron*)outputLayer->At(i) );
321 }
322
323 if (weights == NULL) InitWeights();
324 else ForceWeights(weights);
325}
326
327////////////////////////////////////////////////////////////////////////////////
328/// build the network layers
329
330void TMVA::MethodANNBase::BuildLayers( std::vector<Int_t>* layout, Bool_t fromFile )
331{
332 TObjArray* curLayer;
333 TObjArray* prevLayer = NULL;
334
335 Int_t numLayers = layout->size();
336
337 for (Int_t i = 0; i < numLayers; i++) {
338 curLayer = new TObjArray();
339 BuildLayer(layout->at(i), curLayer, prevLayer, i, numLayers, fromFile);
340 prevLayer = curLayer;
341 fNetwork->Add(curLayer);
342 }
343
344 // cache pointers to synapses for fast access, the order matters
345 for (Int_t i = 0; i < numLayers; i++) {
346 TObjArray* layer = (TObjArray*)fNetwork->At(i);
347 Int_t numNeurons = layer->GetEntriesFast();
348 if (i!=0 && i!=numLayers-1) fRegulators.push_back(0.); //zjh
349 for (Int_t j = 0; j < numNeurons; j++) {
350 if (i==0) fRegulators.push_back(0.);//zjh
351 TNeuron* neuron = (TNeuron*)layer->At(j);
352 Int_t numSynapses = neuron->NumPostLinks();
353 for (Int_t k = 0; k < numSynapses; k++) {
354 TSynapse* synapse = neuron->PostLinkAt(k);
355 fSynapses->Add(synapse);
356 fRegulatorIdx.push_back(fRegulators.size()-1);//zjh
357 }
358 }
359 }
360}
361
362////////////////////////////////////////////////////////////////////////////////
363/// build a single layer with neurons and synapses connecting this
364/// layer to the previous layer
365
367 TObjArray* prevLayer, Int_t layerIndex,
368 Int_t numLayers, Bool_t fromFile )
369{
370 TNeuron* neuron;
371 for (Int_t j = 0; j < numNeurons; j++) {
372 if (fromFile && (layerIndex != numLayers-1) && (j==numNeurons-1)){
373 neuron = new TNeuron();
374 neuron->SetActivationEqn(fIdentity);
375 neuron->SetBiasNeuron();
376 neuron->ForceValue(1.0);
377 curLayer->Add(neuron);
378 }
379 else {
380 neuron = new TNeuron();
381 neuron->SetInputCalculator(fInputCalculator);
382
383 // input layer
384 if (layerIndex == 0) {
385 neuron->SetActivationEqn(fIdentity);
386 neuron->SetInputNeuron();
387 }
388 else {
389 // output layer
390 if (layerIndex == numLayers-1) {
391 neuron->SetOutputNeuron();
392 neuron->SetActivationEqn(fOutput); //zjh
393 }
394 // hidden layers
395 else neuron->SetActivationEqn(fActivation);
396 AddPreLinks(neuron, prevLayer);
397 }
398
399 curLayer->Add(neuron);
400 }
401 }
402
403 // add bias neutron (except to output layer)
404 if(!fromFile){
405 if (layerIndex != numLayers-1) {
406 neuron = new TNeuron();
407 neuron->SetActivationEqn(fIdentity);
408 neuron->SetBiasNeuron();
409 neuron->ForceValue(1.0);
410 curLayer->Add(neuron);
411 }
412 }
413}
414
415////////////////////////////////////////////////////////////////////////////////
416/// add synapses connecting a neuron to its preceding layer
417
419{
420 TSynapse* synapse;
421 int numNeurons = prevLayer->GetEntriesFast();
422 TNeuron* preNeuron;
423
424 for (Int_t i = 0; i < numNeurons; i++) {
425 preNeuron = (TNeuron*)prevLayer->At(i);
426 synapse = new TSynapse();
427 synapse->SetPreNeuron(preNeuron);
428 synapse->SetPostNeuron(neuron);
429 preNeuron->AddPostLink(synapse);
430 neuron->AddPreLink(synapse);
431 }
432}
433
434////////////////////////////////////////////////////////////////////////////////
435/// initialize the synapse weights randomly
436
438{
439 PrintMessage("Initializing weights");
440
441 // init synapse weights
442 Int_t numSynapses = fSynapses->GetEntriesFast();
443 TSynapse* synapse;
444 for (Int_t i = 0; i < numSynapses; i++) {
445 synapse = (TSynapse*)fSynapses->At(i);
446 synapse->SetWeight(4.0*frgen->Rndm() - 2.0);
447 }
448}
449
450////////////////////////////////////////////////////////////////////////////////
451/// force the synapse weights
452
453void TMVA::MethodANNBase::ForceWeights(std::vector<Double_t>* weights)
454{
455 PrintMessage("Forcing weights");
456
457 Int_t numSynapses = fSynapses->GetEntriesFast();
458 TSynapse* synapse;
459 for (Int_t i = 0; i < numSynapses; i++) {
460 synapse = (TSynapse*)fSynapses->At(i);
461 synapse->SetWeight(weights->at(i));
462 }
463}
464
465////////////////////////////////////////////////////////////////////////////////
466/// force the input values of the input neurons
467/// force the value for each input neuron
468
470{
471 Double_t x;
472 TNeuron* neuron;
473
474 // const Event* ev = GetEvent();
475 for (UInt_t j = 0; j < GetNvar(); j++) {
476
477 x = (j != (UInt_t)ignoreIndex)?ev->GetValue(j):0;
478
479 neuron = GetInputNeuron(j);
480 neuron->ForceValue(x);
481 }
482}
483
484////////////////////////////////////////////////////////////////////////////////
485/// calculate input values to each neuron
486
488{
489 TObjArray* curLayer;
490 TNeuron* neuron;
491 Int_t numLayers = fNetwork->GetEntriesFast();
492 Int_t numNeurons;
493
494 for (Int_t i = 0; i < numLayers; i++) {
495 curLayer = (TObjArray*)fNetwork->At(i);
496 numNeurons = curLayer->GetEntriesFast();
497
498 for (Int_t j = 0; j < numNeurons; j++) {
499 neuron = (TNeuron*) curLayer->At(j);
500 neuron->CalculateValue();
501 neuron->CalculateActivationValue();
502
503 }
504 }
505}
506
507////////////////////////////////////////////////////////////////////////////////
508/// print messages, turn off printing by setting verbose and debug flag appropriately
509
511{
512 if (Verbose() || Debug() || force) Log() << kINFO << message << Endl;
513}
514
515////////////////////////////////////////////////////////////////////////////////
516/// wait for keyboard input, for debugging
517
519{
520 std::string dummy;
521 Log() << kINFO << "***Type anything to continue (q to quit): ";
522 std::getline(std::cin, dummy);
523 if (dummy == "q" || dummy == "Q") {
524 PrintMessage( "quit" );
525 delete this;
526 exit(0);
527 }
528}
529
530////////////////////////////////////////////////////////////////////////////////
531/// print network representation, for debugging
532
534{
535 if (!Debug()) return;
536
537 Log() << kINFO << Endl;
538 PrintMessage( "Printing network " );
539 Log() << kINFO << "-------------------------------------------------------------------" << Endl;
540
541 TObjArray* curLayer;
542 Int_t numLayers = fNetwork->GetEntriesFast();
543
544 for (Int_t i = 0; i < numLayers; i++) {
545
546 curLayer = (TObjArray*)fNetwork->At(i);
547 Int_t numNeurons = curLayer->GetEntriesFast();
548
549 Log() << kINFO << "Layer #" << i << " (" << numNeurons << " neurons):" << Endl;
550 PrintLayer( curLayer );
551 }
552}
553
554////////////////////////////////////////////////////////////////////////////////
555/// print a single layer, for debugging
556
558{
559 Int_t numNeurons = layer->GetEntriesFast();
560 TNeuron* neuron;
561
562 for (Int_t j = 0; j < numNeurons; j++) {
563 neuron = (TNeuron*) layer->At(j);
564 Log() << kINFO << "\tNeuron #" << j << " (LinksIn: " << neuron->NumPreLinks()
565 << " , LinksOut: " << neuron->NumPostLinks() << ")" << Endl;
566 PrintNeuron( neuron );
567 }
568}
569
570////////////////////////////////////////////////////////////////////////////////
571/// print a neuron, for debugging
572
574{
575 Log() << kINFO
576 << "\t\tValue:\t" << neuron->GetValue()
577 << "\t\tActivation: " << neuron->GetActivationValue()
578 << "\t\tDelta: " << neuron->GetDelta() << Endl;
579 Log() << kINFO << "\t\tActivationEquation:\t";
580 neuron->PrintActivationEqn();
581 Log() << kINFO << "\t\tLinksIn:" << Endl;
582 neuron->PrintPreLinks();
583 Log() << kINFO << "\t\tLinksOut:" << Endl;
584 neuron->PrintPostLinks();
585}
586
587////////////////////////////////////////////////////////////////////////////////
588/// get the mva value generated by the NN
589
591{
592 TNeuron* neuron;
593
594 TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
595
596 const Event * ev = GetEvent();
597
598 for (UInt_t i = 0; i < GetNvar(); i++) {
599 neuron = (TNeuron*)inputLayer->At(i);
600 neuron->ForceValue( ev->GetValue(i) );
601 }
602 ForceNetworkCalculations();
603
604 // check the output of the network
605 TObjArray* outputLayer = (TObjArray*)fNetwork->At( fNetwork->GetEntriesFast()-1 );
606 neuron = (TNeuron*)outputLayer->At(0);
607
608 // cannot determine error
609 NoErrorCalc(err, errUpper);
610
611 return neuron->GetActivationValue();
612}
613
614////////////////////////////////////////////////////////////////////////////////
615/// get the regression value generated by the NN
616
617const std::vector<Float_t> &TMVA::MethodANNBase::GetRegressionValues()
618{
619 TNeuron* neuron;
620
621 TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
622
623 const Event * ev = GetEvent();
624
625 for (UInt_t i = 0; i < GetNvar(); i++) {
626 neuron = (TNeuron*)inputLayer->At(i);
627 neuron->ForceValue( ev->GetValue(i) );
628 }
629 ForceNetworkCalculations();
630
631 // check the output of the network
632 TObjArray* outputLayer = (TObjArray*)fNetwork->At( fNetwork->GetEntriesFast()-1 );
633
634 if (fRegressionReturnVal == NULL) fRegressionReturnVal = new std::vector<Float_t>();
635 fRegressionReturnVal->clear();
636
637 Event * evT = new Event(*ev);
638 UInt_t ntgts = outputLayer->GetEntriesFast();
639 for (UInt_t itgt = 0; itgt < ntgts; itgt++) {
640 evT->SetTarget(itgt,((TNeuron*)outputLayer->At(itgt))->GetActivationValue());
641 }
642
643 const Event* evT2 = GetTransformationHandler().InverseTransform( evT );
644 for (UInt_t itgt = 0; itgt < ntgts; itgt++) {
645 fRegressionReturnVal->push_back( evT2->GetTarget(itgt) );
646 }
647
648 delete evT;
649
650 return *fRegressionReturnVal;
651}
652
653////////////////////////////////////////////////////////////////////////////////
654/// get the multiclass classification values generated by the NN
655
656const std::vector<Float_t> &TMVA::MethodANNBase::GetMulticlassValues()
657{
658 TNeuron* neuron;
659
660 TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
661
662 const Event * ev = GetEvent();
663
664 for (UInt_t i = 0; i < GetNvar(); i++) {
665 neuron = (TNeuron*)inputLayer->At(i);
666 neuron->ForceValue( ev->GetValue(i) );
667 }
668 ForceNetworkCalculations();
669
670 // check the output of the network
671
672 if (fMulticlassReturnVal == NULL) fMulticlassReturnVal = new std::vector<Float_t>();
673 fMulticlassReturnVal->clear();
674 std::vector<Float_t> temp;
675
676 UInt_t nClasses = DataInfo().GetNClasses();
677 for (UInt_t icls = 0; icls < nClasses; icls++) {
678 temp.push_back(GetOutputNeuron( icls )->GetActivationValue() );
679 }
680
681 for(UInt_t iClass=0; iClass<nClasses; iClass++){
682 Double_t norm = 0.0;
683 for(UInt_t j=0;j<nClasses;j++){
684 if(iClass!=j)
685 norm+=exp(temp[j]-temp[iClass]);
686 }
687 (*fMulticlassReturnVal).push_back(1.0/(1.0+norm));
688 }
689
690
691
692 return *fMulticlassReturnVal;
693}
694
695
696////////////////////////////////////////////////////////////////////////////////
697/// create XML description of ANN classifier
698
699void TMVA::MethodANNBase::AddWeightsXMLTo( void* parent ) const
700{
701 Int_t numLayers = fNetwork->GetEntriesFast();
702 void* wght = gTools().xmlengine().NewChild(parent, nullptr, "Weights");
703 void* xmlLayout = gTools().xmlengine().NewChild(wght, nullptr, "Layout");
704 gTools().xmlengine().NewAttr(xmlLayout, nullptr, "NLayers", gTools().StringFromInt(fNetwork->GetEntriesFast()) );
705 TString weights = "";
706 for (Int_t i = 0; i < numLayers; i++) {
707 TObjArray* layer = (TObjArray*)fNetwork->At(i);
708 Int_t numNeurons = layer->GetEntriesFast();
709 void* layerxml = gTools().xmlengine().NewChild(xmlLayout, nullptr, "Layer");
710 gTools().xmlengine().NewAttr(layerxml, nullptr, "Index", gTools().StringFromInt(i) );
711 gTools().xmlengine().NewAttr(layerxml, nullptr, "NNeurons", gTools().StringFromInt(numNeurons) );
712 for (Int_t j = 0; j < numNeurons; j++) {
713 TNeuron* neuron = (TNeuron*)layer->At(j);
714 Int_t numSynapses = neuron->NumPostLinks();
715 void* neuronxml = gTools().AddChild(layerxml, "Neuron");
716 gTools().AddAttr(neuronxml, "NSynapses", gTools().StringFromInt(numSynapses) );
717 if(numSynapses==0) continue;
718 std::stringstream s("");
719 s.precision( 16 );
720 for (Int_t k = 0; k < numSynapses; k++) {
721 TSynapse* synapse = neuron->PostLinkAt(k);
722 s << std::scientific << synapse->GetWeight() << " ";
723 }
724 gTools().AddRawLine( neuronxml, s.str().c_str() );
725 }
726 }
727
728 // if inverse hessian exists, write inverse hessian to weight file
729 if( fInvHessian.GetNcols()>0 ){
730 void* xmlInvHessian = gTools().xmlengine().NewChild(wght, nullptr, "InverseHessian");
731
732 // get the matrix dimensions
733 Int_t nElements = fInvHessian.GetNoElements();
734 Int_t nRows = fInvHessian.GetNrows();
735 Int_t nCols = fInvHessian.GetNcols();
736 gTools().xmlengine().NewAttr(xmlInvHessian, nullptr, "NElements", gTools().StringFromInt(nElements) );
737 gTools().xmlengine().NewAttr(xmlInvHessian, nullptr, "NRows", gTools().StringFromInt(nRows) );
738 gTools().xmlengine().NewAttr(xmlInvHessian, nullptr, "NCols", gTools().StringFromInt(nCols) );
739
740 // read in the matrix elements
741 Double_t* elements = new Double_t[nElements+10];
742 fInvHessian.GetMatrix2Array( elements );
743
744 // store the matrix elements row-wise
745 Int_t index = 0;
746 for( Int_t row = 0; row < nRows; ++row ){
747 void* xmlRow = gTools().xmlengine().NewChild(xmlInvHessian, nullptr, "Row");
748 gTools().xmlengine().NewAttr(xmlRow, nullptr, "Index", gTools().StringFromInt(row) );
749
750 // create the rows
751 std::stringstream s("");
752 s.precision( 16 );
753 for( Int_t col = 0; col < nCols; ++col ){
754 s << std::scientific << (*(elements+index)) << " ";
755 ++index;
756 }
757 gTools().xmlengine().AddRawLine( xmlRow, s.str().c_str() );
758 }
759 delete[] elements;
760 }
761}
762
763
764////////////////////////////////////////////////////////////////////////////////
765/// read MLP from xml weight file
766
768{
769 // build the layout first
770 Bool_t fromFile = kTRUE;
771 std::vector<Int_t>* layout = new std::vector<Int_t>();
772
773 void* xmlLayout = NULL;
774 xmlLayout = gTools().GetChild(wghtnode, "Layout");
775 if( !xmlLayout )
776 xmlLayout = wghtnode;
777
778 UInt_t nLayers;
779 gTools().ReadAttr( xmlLayout, "NLayers", nLayers );
780 layout->resize( nLayers );
781
782 void* ch = gTools().xmlengine().GetChild(xmlLayout);
784 UInt_t nNeurons;
785 while (ch) {
786 gTools().ReadAttr( ch, "Index", index );
787 gTools().ReadAttr( ch, "NNeurons", nNeurons );
788 layout->at(index) = nNeurons;
789 ch = gTools().GetNextChild(ch);
790 }
791
792 BuildNetwork( layout, NULL, fromFile );
793 // use 'slow' (exact) TanH if processing old weigh file to ensure 100% compatible results
794 // otherwise use the new default, the 'tast tanh' approximation
795 if (GetTrainingTMVAVersionCode() < TMVA_VERSION(4,2,1) && fActivation->GetExpression().Contains("tanh")){
796 TActivationTanh* act = dynamic_cast<TActivationTanh*>( fActivation );
797 if (act) act->SetSlow();
798 }
799
800 // fill the weights of the synapses
801 UInt_t nSyn;
802 Float_t weight;
803 ch = gTools().xmlengine().GetChild(xmlLayout);
804 UInt_t iLayer = 0;
805 while (ch) { // layers
806 TObjArray* layer = (TObjArray*)fNetwork->At(iLayer);
807 gTools().ReadAttr( ch, "Index", index );
808 gTools().ReadAttr( ch, "NNeurons", nNeurons );
809
810 void* nodeN = gTools().GetChild(ch);
811 UInt_t iNeuron = 0;
812 while( nodeN ){ // neurons
813 TNeuron *neuron = (TNeuron*)layer->At(iNeuron);
814 gTools().ReadAttr( nodeN, "NSynapses", nSyn );
815 if( nSyn > 0 ){
816 const char* content = gTools().GetContent(nodeN);
817 std::stringstream s(content);
818 for (UInt_t iSyn = 0; iSyn<nSyn; iSyn++) { // synapses
819
820 TSynapse* synapse = neuron->PostLinkAt(iSyn);
821 s >> weight;
822 //Log() << kWARNING << neuron << " " << weight << Endl;
823 synapse->SetWeight(weight);
824 }
825 }
826 nodeN = gTools().GetNextChild(nodeN);
827 iNeuron++;
828 }
829 ch = gTools().GetNextChild(ch);
830 iLayer++;
831 }
832
833 delete layout;
834
835 void* xmlInvHessian = NULL;
836 xmlInvHessian = gTools().GetChild(wghtnode, "InverseHessian");
837 if( !xmlInvHessian )
838 // no inverse hessian available
839 return;
840
841 fUseRegulator = kTRUE;
842
843 Int_t nElements = 0;
844 Int_t nRows = 0;
845 Int_t nCols = 0;
846 gTools().ReadAttr( xmlInvHessian, "NElements", nElements );
847 gTools().ReadAttr( xmlInvHessian, "NRows", nRows );
848 gTools().ReadAttr( xmlInvHessian, "NCols", nCols );
849
850 // adjust the matrix dimensions
851 fInvHessian.ResizeTo( nRows, nCols );
852
853 // prepare an array to read in the values
854 Double_t* elements;
855 if (nElements > std::numeric_limits<int>::max()-100){
856 Log() << kFATAL << "you tried to read a hessian matrix with " << nElements << " elements, --> too large, guess s.th. went wrong reading from the weight file" << Endl;
857 return;
858 } else {
859 elements = new Double_t[nElements+10];
860 }
861
862
863
864 void* xmlRow = gTools().xmlengine().GetChild(xmlInvHessian);
865 Int_t row = 0;
866 index = 0;
867 while (xmlRow) { // rows
868 gTools().ReadAttr( xmlRow, "Index", row );
869
870 const char* content = gTools().xmlengine().GetNodeContent(xmlRow);
871
872 std::stringstream s(content);
873 for (Int_t iCol = 0; iCol<nCols; iCol++) { // columns
874 s >> (*(elements+index));
875 ++index;
876 }
877 xmlRow = gTools().xmlengine().GetNext(xmlRow);
878 ++row;
879 }
880
881 fInvHessian.SetMatrixArray( elements );
882
883 delete[] elements;
884}
885
886////////////////////////////////////////////////////////////////////////////////
887/// destroy/clear the network then read it back in from the weights file
888
890{
891 // delete network so we can reconstruct network from scratch
892
893 TString dummy;
894
895 // synapse weights
896 Double_t weight;
897 std::vector<Double_t>* weights = new std::vector<Double_t>();
898 istr>> dummy;
899 while (istr>> dummy >> weight) weights->push_back(weight); // use w/ slower write-out
900
901 ForceWeights(weights);
902
903
904 delete weights;
905}
906
907////////////////////////////////////////////////////////////////////////////////
908/// compute ranking of input variables by summing function of weights
909
911{
912 // create the ranking object
913 fRanking = new Ranking( GetName(), "Importance" );
914
915 TNeuron* neuron;
916 TSynapse* synapse;
917 Double_t importance, avgVal;
918 TString varName;
919
920 for (UInt_t ivar = 0; ivar < GetNvar(); ivar++) {
921
922 neuron = GetInputNeuron(ivar);
923 Int_t numSynapses = neuron->NumPostLinks();
924 importance = 0;
925 varName = GetInputVar(ivar); // fix this line
926
927 // figure out average value of variable i
928 Double_t meanS, meanB, rmsS, rmsB, xmin, xmax;
929 Statistics( TMVA::Types::kTraining, varName,
930 meanS, meanB, rmsS, rmsB, xmin, xmax );
931
932 avgVal = (TMath::Abs(meanS) + TMath::Abs(meanB))/2.0;
933 double meanrms = (TMath::Abs(rmsS) + TMath::Abs(rmsB))/2.;
934 if (avgVal<meanrms) avgVal = meanrms;
935 if (IsNormalised()) avgVal = 0.5*(1 + gTools().NormVariable( avgVal, GetXmin( ivar ), GetXmax( ivar )));
936
937 for (Int_t j = 0; j < numSynapses; j++) {
938 synapse = neuron->PostLinkAt(j);
939 importance += synapse->GetWeight() * synapse->GetWeight();
940 }
941
942 importance *= avgVal * avgVal;
943
944 fRanking->AddRank( Rank( varName, importance ) );
945 }
946
947 return fRanking;
948}
949
950////////////////////////////////////////////////////////////////////////////////
951
953 std::vector<TH1*>* hv ) const
954{
955 TH2F* hist;
956 Int_t numLayers = fNetwork->GetEntriesFast();
957
958 for (Int_t i = 0; i < numLayers-1; i++) {
959
960 TObjArray* layer1 = (TObjArray*)fNetwork->At(i);
961 TObjArray* layer2 = (TObjArray*)fNetwork->At(i+1);
962 Int_t numNeurons1 = layer1->GetEntriesFast();
963 Int_t numNeurons2 = layer2->GetEntriesFast();
964
965 TString name = TString::Format("%s%i%i", bulkname.Data(), i, i+1);
966 hist = new TH2F(name.Data(), name.Data(),
967 numNeurons1, 0, numNeurons1, numNeurons2, 0, numNeurons2);
968
969 for (Int_t j = 0; j < numNeurons1; j++) {
970
971 TNeuron* neuron = (TNeuron*)layer1->At(j);
972 Int_t numSynapses = neuron->NumPostLinks();
973
974 for (Int_t k = 0; k < numSynapses; k++) {
975
976 TSynapse* synapse = neuron->PostLinkAt(k);
977 hist->SetBinContent(j+1, k+1, synapse->GetWeight());
978
979 }
980 }
981
982 if (hv) hv->push_back( hist );
983 else {
984 hist->Write();
985 delete hist;
986 }
987 }
988}
989
990////////////////////////////////////////////////////////////////////////////////
991/// write histograms to file
992
994{
995 PrintMessage(TString::Format("Write special histos to file: %s", BaseDir()->GetPath()).Data(), kTRUE);
996
997 if (fEstimatorHistTrain) fEstimatorHistTrain->Write();
998 if (fEstimatorHistTest ) fEstimatorHistTest ->Write();
999
1000 // histograms containing weights for architecture plotting (used in macro "network.cxx")
1001 CreateWeightMonitoringHists( "weights_hist" );
1002
1003 // now save all the epoch-wise monitoring information
1004 static std::atomic<int> epochMonitoringDirectoryNumber{0};
1005 int epochVal = epochMonitoringDirectoryNumber++;
1006 TDirectory* epochdir = nullptr;
1007 if( epochVal == 0 )
1008 epochdir = BaseDir()->mkdir( "EpochMonitoring" );
1009 else
1010 epochdir = BaseDir()->mkdir( TString::Format("EpochMonitoring_%4d",epochVal).Data() );
1011
1012 epochdir->cd();
1013 for (std::vector<TH1*>::const_iterator it = fEpochMonHistS.begin(); it != fEpochMonHistS.end(); ++it) {
1014 (*it)->Write();
1015 delete (*it);
1016 }
1017 for (std::vector<TH1*>::const_iterator it = fEpochMonHistB.begin(); it != fEpochMonHistB.end(); ++it) {
1018 (*it)->Write();
1019 delete (*it);
1020 }
1021 for (std::vector<TH1*>::const_iterator it = fEpochMonHistW.begin(); it != fEpochMonHistW.end(); ++it) {
1022 (*it)->Write();
1023 delete (*it);
1024 }
1025 BaseDir()->cd();
1026}
1027
1028////////////////////////////////////////////////////////////////////////////////
1029/// write specific classifier response
1030
1031void TMVA::MethodANNBase::MakeClassSpecific( std::ostream& fout, const TString& className ) const
1032{
1033 Int_t numLayers = fNetwork->GetEntries();
1034
1035 fout << std::endl;
1036 fout << " double ActivationFnc(double x) const;" << std::endl;
1037 fout << " double OutputActivationFnc(double x) const;" << std::endl; //zjh
1038 fout << std::endl;
1039 int numNodesFrom = -1;
1040 for (Int_t lIdx = 0; lIdx < numLayers; lIdx++) {
1041 int numNodesTo = ((TObjArray*)fNetwork->At(lIdx))->GetEntries();
1042 if (numNodesFrom<0) { numNodesFrom=numNodesTo; continue; }
1043 fout << " double fWeightMatrix" << lIdx-1 << "to" << lIdx << "[" << numNodesTo << "][" << numNodesFrom << "];";
1044 fout << " // weight matrix from layer " << lIdx-1 << " to " << lIdx << std::endl;
1045 numNodesFrom = numNodesTo;
1046 }
1047 fout << std::endl;
1048 fout << "};" << std::endl;
1049
1050 fout << std::endl;
1051
1052 fout << "inline void " << className << "::Initialize()" << std::endl;
1053 fout << "{" << std::endl;
1054 fout << " // build network structure" << std::endl;
1055
1056 for (Int_t i = 0; i < numLayers-1; i++) {
1057 fout << " // weight matrix from layer " << i << " to " << i+1 << std::endl;
1058 TObjArray* layer = (TObjArray*)fNetwork->At(i);
1059 Int_t numNeurons = layer->GetEntriesFast();
1060 for (Int_t j = 0; j < numNeurons; j++) {
1061 TNeuron* neuron = (TNeuron*)layer->At(j);
1062 Int_t numSynapses = neuron->NumPostLinks();
1063 for (Int_t k = 0; k < numSynapses; k++) {
1064 TSynapse* synapse = neuron->PostLinkAt(k);
1065 fout << " fWeightMatrix" << i << "to" << i+1 << "[" << k << "][" << j << "] = " << synapse->GetWeight() << ";" << std::endl;
1066 }
1067 }
1068 }
1069
1070 fout << "}" << std::endl;
1071 fout << std::endl;
1072
1073 // writing of the GetMvaValue__ method
1074 fout << "inline double " << className << "::GetMvaValue__( const std::vector<double>& inputValues ) const" << std::endl;
1075 fout << "{" << std::endl;
1076 fout << " if (inputValues.size() != (unsigned int)" << ((TObjArray *)fNetwork->At(0))->GetEntries() - 1 << ") {"
1077 << std::endl;
1078 fout << " std::cout << \"Input vector needs to be of size \" << "
1079 << ((TObjArray *)fNetwork->At(0))->GetEntries() - 1 << " << std::endl;" << std::endl;
1080 fout << " return 0;" << std::endl;
1081 fout << " }" << std::endl;
1082 fout << std::endl;
1083 for (Int_t lIdx = 1; lIdx < numLayers; lIdx++) {
1084 TObjArray *layer = (TObjArray *)fNetwork->At(lIdx);
1085 int numNodes = layer->GetEntries();
1086 fout << " std::array<double, " << numNodes << "> fWeights" << lIdx << " {{}};" << std::endl;
1087 }
1088 for (Int_t lIdx = 1; lIdx < numLayers - 1; lIdx++) {
1089 fout << " fWeights" << lIdx << ".back() = 1.;" << std::endl;
1090 }
1091 fout << std::endl;
1092 for (Int_t i = 0; i < numLayers - 1; i++) {
1093 fout << " // layer " << i << " to " << i + 1 << std::endl;
1094 if (i + 1 == numLayers - 1) {
1095 fout << " for (int o=0; o<" << ((TObjArray *)fNetwork->At(i + 1))->GetEntries() << "; o++) {" << std::endl;
1096 } else {
1097 fout << " for (int o=0; o<" << ((TObjArray *)fNetwork->At(i + 1))->GetEntries() - 1 << "; o++) {"
1098 << std::endl;
1099 }
1100 if (0 == i) {
1101 fout << " std::array<double, " << ((TObjArray *)fNetwork->At(i))->GetEntries()
1102 << "> buffer; // no need to initialise" << std::endl;
1103 fout << " for (int i = 0; i<" << ((TObjArray *)fNetwork->At(i))->GetEntries() << " - 1; i++) {"
1104 << std::endl;
1105 fout << " buffer[i] = fWeightMatrix" << i << "to" << i + 1 << "[o][i] * inputValues[i];" << std::endl;
1106 fout << " } // loop over i" << std::endl;
1107 fout << " buffer.back() = fWeightMatrix" << i << "to" << i + 1 << "[o]["
1108 << ((TObjArray *)fNetwork->At(i))->GetEntries() - 1 << "];" << std::endl;
1109 } else {
1110 fout << " std::array<double, " << ((TObjArray *)fNetwork->At(i))->GetEntries()
1111 << "> buffer; // no need to initialise" << std::endl;
1112 fout << " for (int i=0; i<" << ((TObjArray *)fNetwork->At(i))->GetEntries() << "; i++) {" << std::endl;
1113 fout << " buffer[i] = fWeightMatrix" << i << "to" << i + 1 << "[o][i] * fWeights" << i << "[i];"
1114 << std::endl;
1115 fout << " } // loop over i" << std::endl;
1116 }
1117 fout << " for (int i=0; i<" << ((TObjArray *)fNetwork->At(i))->GetEntries() << "; i++) {" << std::endl;
1118 if (fNeuronInputType == "sum") {
1119 fout << " fWeights" << i + 1 << "[o] += buffer[i];" << std::endl;
1120 } else if (fNeuronInputType == "sqsum") {
1121 fout << " fWeights" << i + 1 << "[o] += buffer[i]*buffer[i];" << std::endl;
1122 } else { // fNeuronInputType == TNeuronInputChooser::kAbsSum
1123 fout << " fWeights" << i + 1 << "[o] += fabs(buffer[i]);" << std::endl;
1124 }
1125 fout << " } // loop over i" << std::endl;
1126 fout << " } // loop over o" << std::endl;
1127 if (i + 1 == numLayers - 1) {
1128 fout << " for (int o=0; o<" << ((TObjArray *)fNetwork->At(i + 1))->GetEntries() << "; o++) {" << std::endl;
1129 } else {
1130 fout << " for (int o=0; o<" << ((TObjArray *)fNetwork->At(i + 1))->GetEntries() - 1 << "; o++) {"
1131 << std::endl;
1132 }
1133 if (i+1 != numLayers-1) // in the last layer no activation function is applied
1134 fout << " fWeights" << i + 1 << "[o] = ActivationFnc(fWeights" << i + 1 << "[o]);" << std::endl;
1135 else
1136 fout << " fWeights" << i + 1 << "[o] = OutputActivationFnc(fWeights" << i + 1 << "[o]);"
1137 << std::endl; // zjh
1138 fout << " } // loop over o" << std::endl;
1139 }
1140 fout << std::endl;
1141 fout << " return fWeights" << numLayers - 1 << "[0];" << std::endl;
1142 fout << "}" << std::endl;
1143
1144 fout << std::endl;
1145 TString fncName = className+"::ActivationFnc";
1146 fActivation->MakeFunction(fout, fncName);
1147 fncName = className+"::OutputActivationFnc"; //zjh
1148 fOutput->MakeFunction(fout, fncName);//zjh
1149
1150 fout << std::endl;
1151 fout << "// Clean up" << std::endl;
1152 fout << "inline void " << className << "::Clear()" << std::endl;
1153 fout << "{" << std::endl;
1154 fout << "}" << std::endl;
1155}
1156
1157////////////////////////////////////////////////////////////////////////////////
1158/// who the hell makes such strange Debug flags that even use "global pointers"..
1159
1161{
1162 return fgDEBUG;
1163}
unsigned int UInt_t
Definition RtypesCore.h:46
float Float_t
Definition RtypesCore.h:57
constexpr Bool_t kFALSE
Definition RtypesCore.h:101
constexpr Bool_t kTRUE
Definition RtypesCore.h:100
#define ClassImp(name)
Definition Rtypes.h:377
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t index
char name[80]
Definition TGX11.cxx:110
float xmin
float xmax
void Debug(Int_t level, const char *fmt,...)
#define TMVA_VERSION(a, b, c)
Definition Version.h:48
Describe directory structure in memory.
Definition TDirectory.h:45
virtual Bool_t cd()
Change current directory to "this" directory.
virtual TDirectory * mkdir(const char *name, const char *title="", Bool_t returnExistingDirectory=kFALSE)
Create a sub-directory "a" or a hierarchy of sub-directories "a/b/c/...".
2-D histogram with a float per channel (see TH1 documentation)
Definition TH2.h:307
void SetBinContent(Int_t bin, Double_t content) override
Set bin content.
Definition TH2.cxx:2616
Class that contains all the data information.
Definition DataSetInfo.h:62
Float_t GetValue(UInt_t ivar) const
return value of i'th variable
Definition Event.cxx:236
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
Definition Event.cxx:367
Float_t GetTarget(UInt_t itgt) const
Definition Event.h:102
Base class for all TMVA methods using artificial neural networks.
std::vector< Int_t > * ParseLayoutString(TString layerSpec)
parse layout specification string and return a vector, each entry containing the number of neurons to...
virtual void ProcessOptions()
do nothing specific at this moment
virtual ~MethodANNBase()
destructor
virtual Double_t GetMvaValue(Double_t *err=nullptr, Double_t *errUpper=nullptr)
get the mva value generated by the NN
void DeleteNetworkLayer(TObjArray *&layer)
delete a network layer
virtual void BuildNetwork(std::vector< Int_t > *layout, std::vector< Double_t > *weights=nullptr, Bool_t fromFile=kFALSE)
build network given a layout (number of neurons in each layer) and optional weights array
const Ranking * CreateRanking()
compute ranking of input variables by summing function of weights
void DeleteNetwork()
delete/clear network
void WaitForKeyboard()
wait for keyboard input, for debugging
MethodANNBase(const TString &jobName, Types::EMVA methodType, const TString &methodTitle, DataSetInfo &theData, const TString &theOption)
standard constructor Note: Right now it is an option to choose the neuron input function,...
void AddPreLinks(TNeuron *neuron, TObjArray *prevLayer)
add synapses connecting a neuron to its preceding layer
void PrintNeuron(TNeuron *neuron) const
print a neuron, for debugging
void PrintMessage(TString message, Bool_t force=kFALSE) const
print messages, turn off printing by setting verbose and debug flag appropriately
void AddWeightsXMLTo(void *parent) const
create XML description of ANN classifier
void InitANNBase()
initialize ANNBase object
void PrintLayer(TObjArray *layer) const
print a single layer, for debugging
void InitWeights()
initialize the synapse weights randomly
virtual void DeclareOptions()
define the options (their key words) that can be set in the option string here the options valid for ...
virtual void ReadWeightsFromStream(std::istream &istr)
destroy/clear the network then read it back in from the weights file
void BuildLayers(std::vector< Int_t > *layout, Bool_t from_file=false)
build the network layers
virtual void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
void ForceWeights(std::vector< Double_t > *weights)
force the synapse weights
void BuildLayer(Int_t numNeurons, TObjArray *curLayer, TObjArray *prevLayer, Int_t layerIndex, Int_t numLayers, Bool_t from_file=false)
build a single layer with neurons and synapses connecting this layer to the previous layer
void ForceNetworkCalculations()
calculate input values to each neuron
void ForceNetworkInputs(const Event *ev, Int_t ignoreIndex=-1)
force the input values of the input neurons force the value for each input neuron
virtual const std::vector< Float_t > & GetMulticlassValues()
get the multiclass classification values generated by the NN
void ReadWeightsFromXML(void *wghtnode)
read MLP from xml weight file
Bool_t Debug() const
who the hell makes such strange Debug flags that even use "global pointers"..
virtual void WriteMonitoringHistosToFile() const
write histograms to file
virtual const std::vector< Float_t > & GetRegressionValues()
get the regression value generated by the NN
virtual void PrintNetwork() const
print network representation, for debugging
void CreateWeightMonitoringHists(const TString &bulkname, std::vector< TH1 * > *hv=nullptr) const
Virtual base Class for all MVA method.
Definition MethodBase.h:111
Ranking for variables in method (implementation)
Definition Ranking.h:48
Class for easily choosing activation functions.
std::vector< TString > * GetAllActivationNames() const
returns the names of all know activation functions
TActivation * CreateActivation(EActivationType type) const
instantiate the correct activation object according to the type chosen (given as the enumeration type...
Tanh activation function for ANN.
Class for easily choosing neuron input functions.
TNeuronInput * CreateNeuronInput(ENeuronInputType type) const
std::vector< TString > * GetAllNeuronInputNames() const
Neuron class used by TMVA artificial neural network methods.
Definition TNeuron.h:49
Double_t GetActivationValue() const
Definition TNeuron.h:105
void ForceValue(Double_t value)
force the value, typically for input and bias neurons
Definition TNeuron.cxx:84
TSynapse * PostLinkAt(Int_t index) const
Definition TNeuron.h:111
void SetActivationEqn(TActivation *activation)
set activation equation
Definition TNeuron.cxx:160
Double_t GetDelta() const
Definition TNeuron.h:106
void AddPostLink(TSynapse *post)
add synapse as a post-link to this neuron
Definition TNeuron.cxx:178
void SetInputCalculator(TNeuronInput *calculator)
set input calculator
Definition TNeuron.cxx:151
void SetInputNeuron()
Definition TNeuron.h:112
Int_t NumPreLinks() const
Definition TNeuron.h:108
void PrintActivationEqn()
print activation equation, for debugging
Definition TNeuron.cxx:327
void CalculateValue()
calculate neuron input
Definition TNeuron.cxx:93
void SetBiasNeuron()
Definition TNeuron.h:114
void CalculateActivationValue()
calculate neuron activation/output
Definition TNeuron.cxx:102
void SetOutputNeuron()
Definition TNeuron.h:113
void PrintPostLinks() const
Definition TNeuron.h:119
Int_t NumPostLinks() const
Definition TNeuron.h:109
void AddPreLink(TSynapse *pre)
add synapse as a pre-link to this neuron
Definition TNeuron.cxx:169
Double_t GetValue() const
Definition TNeuron.h:104
void DeletePreLinks()
delete all pre-links
Definition TNeuron.cxx:187
void PrintPreLinks() const
Definition TNeuron.h:118
Synapse class used by TMVA artificial neural network methods.
Definition TSynapse.h:42
void SetWeight(Double_t weight)
set synapse weight
Definition TSynapse.cxx:68
Double_t GetWeight()
Definition TSynapse.h:53
void SetPostNeuron(TNeuron *post)
Definition TSynapse.h:68
void SetPreNeuron(TNeuron *pre)
Definition TSynapse.h:65
Double_t NormVariable(Double_t x, Double_t xmin, Double_t xmax)
normalise to output range: [-1, 1]
Definition Tools.cxx:110
Bool_t AddRawLine(void *node, const char *raw)
XML helpers.
Definition Tools.cxx:1190
const char * GetContent(void *node)
XML helpers.
Definition Tools.cxx:1174
TXMLEngine & xmlengine()
Definition Tools.h:262
void ReadAttr(void *node, const char *, T &value)
read attribute from xml
Definition Tools.h:329
void * GetChild(void *parent, const char *childname=nullptr)
get child node
Definition Tools.cxx:1150
void AddAttr(void *node, const char *, const T &value, Int_t precision=16)
add attribute to xml
Definition Tools.h:347
void * AddChild(void *parent, const char *childname, const char *content=nullptr, bool isRootNode=false)
add child node
Definition Tools.cxx:1124
void * GetNextChild(void *prevchild, const char *childname=nullptr)
XML helpers.
Definition Tools.cxx:1162
@ kTraining
Definition Types.h:143
An array of TObjects.
Definition TObjArray.h:31
Int_t GetEntriesFast() const
Definition TObjArray.h:58
Int_t GetEntries() const override
Return the number of objects in array (i.e.
TObject * At(Int_t idx) const override
Definition TObjArray.h:164
void Add(TObject *obj) override
Definition TObjArray.h:68
virtual Int_t Write(const char *name=nullptr, Int_t option=0, Int_t bufsize=0)
Write this object to the current directory.
Definition TObject.cxx:880
Random number generator class based on M.
Definition TRandom3.h:27
Basic string class.
Definition TString.h:139
Ssiz_t Length() const
Definition TString.h:417
Ssiz_t First(char c) const
Find first occurrence of a character c.
Definition TString.cxx:538
const char * Data() const
Definition TString.h:376
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
Definition TString.h:623
TString & Remove(Ssiz_t pos)
Definition TString.h:685
static TString Format(const char *fmt,...)
Static method which formats a string using a printf style format descriptor and return a TString.
Definition TString.cxx:2378
Bool_t AddRawLine(XMLNodePointer_t parent, const char *line)
Add just line into xml file Line should has correct xml syntax that later it can be decoded by xml pa...
XMLNodePointer_t NewChild(XMLNodePointer_t parent, XMLNsPointer_t ns, const char *name, const char *content=nullptr)
create new child element for parent node
XMLNodePointer_t GetChild(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
returns first child of xmlnode
XMLAttrPointer_t NewAttr(XMLNodePointer_t xmlnode, XMLNsPointer_t, const char *name, const char *value)
creates new attribute for xmlnode, namespaces are not supported for attributes
const char * GetNodeContent(XMLNodePointer_t xmlnode)
get contents (if any) of xmlnode
XMLNodePointer_t GetNext(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
return next to xmlnode node if realnode==kTRUE, any special nodes in between will be skipped
Double_t x[n]
Definition legend1.C:17
create variable transformations
Tools & gTools()
MsgLogger & Endl(MsgLogger &ml)
Definition MsgLogger.h:148
Short_t Abs(Short_t d)
Returns the absolute value of parameter Short_t d.
Definition TMathBase.h:123