Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
MethodANNBase.cxx
Go to the documentation of this file.
1// @(#)root/tmva $Id$
2// Author: Andreas Hoecker, Peter Speckmayer, Matt Jachowski, Jan Therhaag, Jiahang Zhong
3
4/**********************************************************************************
5 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6 * Package: TMVA *
7 * Class : MethodANNBase *
8 * *
9 * *
10 * Description: *
11 * Artificial neural network base class for the discrimination of signal *
12 * from background. *
13 * *
14 * Authors (alphabetical): *
15 * Krzysztof Danielowski <danielow@cern.ch> - IFJ & AGH, Poland *
16 * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
17 * Matt Jachowski <jachowski@stanford.edu> - Stanford University, USA *
18 * Kamil Kraszewski <kalq@cern.ch> - IFJ & UJ, Poland *
19 * Maciej Kruk <mkruk@cern.ch> - IFJ & AGH, Poland *
20 * Peter Speckmayer <peter.speckmayer@cern.ch> - CERN, Switzerland *
21 * Joerg Stelzer <stelzer@cern.ch> - DESY, Germany *
22 * Jan Therhaag <Jan.Therhaag@cern.ch> - U of Bonn, Germany *
23 * Jiahang Zhong <Jiahang.Zhong@cern.ch> - Academia Sinica, Taipei *
24 * *
25 * Copyright (c) 2005-2011: *
26 * CERN, Switzerland *
27 * U. of Bonn, Germany *
28 * *
29 * Redistribution and use in source and binary forms, with or without *
30 * modification, are permitted according to the terms listed in LICENSE *
31 * (see tmva/doc/LICENSE) *
32 **********************************************************************************/
33
34/*! \class TMVA::MethodANNBase
35\ingroup TMVA
36
37Base class for all TMVA methods using artificial neural networks.
38
39*/
40
41#include "TMVA/MethodBase.h"
42
43#include "TMVA/Configurable.h"
44#include "TMVA/DataSetInfo.h"
45#include "TMVA/MethodANNBase.h"
46#include "TMVA/MsgLogger.h"
47#include "TMVA/TNeuron.h"
48#include "TMVA/TSynapse.h"
51#include "TMVA/Types.h"
52#include "TMVA/Tools.h"
54#include "TMVA/Ranking.h"
55#include "TMVA/Version.h"
56
57#include "TString.h"
58#include "TDirectory.h"
59#include "TRandom3.h"
60#include "TH2F.h"
61#include "TH1.h"
62#include "TMath.h"
63#include "TMatrixT.h"
64
65#include <iostream>
66#include <vector>
67#include <cstdlib>
68#include <stdexcept>
69#include <atomic>
70
71
72using std::vector;
73
74
75////////////////////////////////////////////////////////////////////////////////
76/// standard constructor
77/// Note: Right now it is an option to choose the neuron input function,
78/// but only the input function "sum" leads to weight convergence --
79/// otherwise the weights go to nan and lead to an ABORT.
80
83 const TString& methodTitle,
85 const TString& theOption )
87 , fEstimator(kMSE)
88 , fUseRegulator(kFALSE)
89 , fRandomSeed(0)
90{
92
94}
95
96////////////////////////////////////////////////////////////////////////////////
97/// construct the Method from the weight file
98
101 const TString& theWeightFile)
103 , fEstimator(kMSE)
104 , fUseRegulator(kFALSE)
105 , fRandomSeed(0)
106{
107 InitANNBase();
108
110}
111
112////////////////////////////////////////////////////////////////////////////////
113/// define the options (their key words) that can be set in the option string
114/// here the options valid for ALL MVA methods are declared.
115///
116/// know options:
117///
118/// - NCycles=xx :the number of training cycles
119/// - Normalize=kTRUE,kFALSe :if normalised in put variables should be used
120/// - HiddenLayser="N-1,N-2" :the specification of the hidden layers
121/// - NeuronType=sigmoid,tanh,radial,linar : the type of activation function
122/// used at the neuron
123
125{
126 DeclareOptionRef( fNcycles = 500, "NCycles", "Number of training cycles" );
127 DeclareOptionRef( fLayerSpec = "N,N-1", "HiddenLayers", "Specification of hidden layer architecture" );
128 DeclareOptionRef( fNeuronType = "sigmoid", "NeuronType", "Neuron activation function type" );
129 DeclareOptionRef( fRandomSeed = 1, "RandomSeed", "Random seed for initial synapse weights (0 means unique seed for each run; default value '1')");
130
131 DeclareOptionRef(fEstimatorS="MSE", "EstimatorType",
132 "MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood" ); //zjh
133 AddPreDefVal(TString("MSE")); //zjh
134 AddPreDefVal(TString("CE")); //zjh
135
136
138 std::vector<TString>* names = aChooser.GetAllActivationNames();
139 Int_t nTypes = names->size();
140 for (Int_t i = 0; i < nTypes; i++)
141 AddPreDefVal(names->at(i));
142 delete names;
143
144 DeclareOptionRef(fNeuronInputType="sum", "NeuronInputType","Neuron input function type");
146 names = iChooser.GetAllNeuronInputNames();
147 nTypes = names->size();
148 for (Int_t i = 0; i < nTypes; i++) AddPreDefVal(names->at(i));
149 delete names;
150}
151
152
153////////////////////////////////////////////////////////////////////////////////
154/// do nothing specific at this moment
155
157{
158 if ( DoRegression() || DoMulticlass()) fEstimatorS = "MSE"; //zjh
159 else fEstimatorS = "CE" ; //hhv
160 if (fEstimatorS == "MSE" ) fEstimator = kMSE;
161 else if (fEstimatorS == "CE") fEstimator = kCE; //zjh
162 std::vector<Int_t>* layout = ParseLayoutString(fLayerSpec);
163 BuildNetwork(layout);
164 delete layout;
165}
166
167////////////////////////////////////////////////////////////////////////////////
168/// parse layout specification string and return a vector, each entry
169/// containing the number of neurons to go in each successive layer
170
172{
173 std::vector<Int_t>* layout = new std::vector<Int_t>();
174 layout->push_back((Int_t)GetNvar());
175 while(layerSpec.Length()>0) {
176 TString sToAdd="";
177 if (layerSpec.First(',')<0) {
179 layerSpec = "";
180 }
181 else {
182 sToAdd = layerSpec(0,layerSpec.First(','));
183 layerSpec = layerSpec(layerSpec.First(',')+1,layerSpec.Length());
184 }
185 int nNodes = 0;
186 if (sToAdd.BeginsWith("n") || sToAdd.BeginsWith("N")) { sToAdd.Remove(0,1); nNodes = GetNvar(); }
187 nNodes += atoi(sToAdd);
188 layout->push_back(nNodes);
189 }
190 if( DoRegression() )
191 layout->push_back( DataInfo().GetNTargets() ); // one output node for each target
192 else if( DoMulticlass() )
193 layout->push_back( DataInfo().GetNClasses() ); // one output node for each class
194 else
195 layout->push_back(1); // one output node (for signal/background classification)
196
197 return layout;
198}
199
200////////////////////////////////////////////////////////////////////////////////
201/// initialize ANNBase object
202
204{
205 fNetwork = NULL;
206 frgen = NULL;
207 fActivation = NULL;
208 fOutput = NULL; //zjh
209 fIdentity = NULL;
210 fInputCalculator = NULL;
211 fSynapses = NULL;
212 fEstimatorHistTrain = NULL;
213 fEstimatorHistTest = NULL;
214
215 // reset monitoring histogram vectors
216 fEpochMonHistS.clear();
217 fEpochMonHistB.clear();
218 fEpochMonHistW.clear();
219
220 // these will be set in BuildNetwork()
221 fInputLayer = NULL;
222 fOutputNeurons.clear();
223
224 frgen = new TRandom3(fRandomSeed);
225
226 fSynapses = new TObjArray();
227}
228
229////////////////////////////////////////////////////////////////////////////////
230/// destructor
231
233{
234 DeleteNetwork();
235}
236
237////////////////////////////////////////////////////////////////////////////////
238/// delete/clear network
239
241{
242 if (fNetwork != NULL) {
244 Int_t numLayers = fNetwork->GetEntriesFast();
245 for (Int_t i = 0; i < numLayers; i++) {
246 layer = (TObjArray*)fNetwork->At(i);
247 DeleteNetworkLayer(layer);
248 }
249 delete fNetwork;
250 }
251
252 if (frgen != NULL) delete frgen;
253 if (fActivation != NULL) delete fActivation;
254 if (fOutput != NULL) delete fOutput; //zjh
255 if (fIdentity != NULL) delete fIdentity;
256 if (fInputCalculator != NULL) delete fInputCalculator;
257 if (fSynapses != NULL) delete fSynapses;
258
259 fNetwork = NULL;
260 frgen = NULL;
261 fActivation = NULL;
262 fOutput = NULL; //zjh
263 fIdentity = NULL;
264 fInputCalculator = NULL;
265 fSynapses = NULL;
266}
267
268////////////////////////////////////////////////////////////////////////////////
269/// delete a network layer
270
272{
273 TNeuron* neuron;
274 Int_t numNeurons = layer->GetEntriesFast();
275 for (Int_t i = 0; i < numNeurons; i++) {
276 neuron = (TNeuron*)layer->At(i);
277 neuron->DeletePreLinks();
278 delete neuron;
279 }
280 delete layer;
281}
282
283////////////////////////////////////////////////////////////////////////////////
284/// build network given a layout (number of neurons in each layer)
285/// and optional weights array
286
287void TMVA::MethodANNBase::BuildNetwork( std::vector<Int_t>* layout, std::vector<Double_t>* weights, Bool_t fromFile )
288{
289 if (fEstimatorS == "MSE") fEstimator = kMSE; //zjh
290 else if (fEstimatorS == "CE") fEstimator = kCE; //zjh
291 else Log()<<kWARNING<<"fEstimator="<<fEstimator<<"\tfEstimatorS="<<fEstimatorS<<Endl;
292 if (fEstimator!=kMSE && fEstimator!=kCE) Log()<<kWARNING<<"Estimator type unspecified \t"<<Endl; //zjh
293
294
295 Log() << kHEADER << "Building Network. " << Endl;
296
297 DeleteNetwork();
298 InitANNBase();
299
300 // set activation and input functions
302 fActivation = aChooser.CreateActivation(fNeuronType);
303 fIdentity = aChooser.CreateActivation("linear");
304 if (fEstimator==kMSE) fOutput = aChooser.CreateActivation("linear"); //zjh
305 else if (fEstimator==kCE) fOutput = aChooser.CreateActivation("sigmoid"); //zjh
307 fInputCalculator = iChooser.CreateNeuronInput(fNeuronInputType);
308
309 fNetwork = new TObjArray();
310 fRegulatorIdx.clear();
311 fRegulators.clear();
312 BuildLayers( layout, fromFile );
313
314 // cache input layer and output neuron for fast access
315 fInputLayer = (TObjArray*)fNetwork->At(0);
316 TObjArray* outputLayer = (TObjArray*)fNetwork->At(fNetwork->GetEntriesFast()-1);
317 fOutputNeurons.clear();
318 for (Int_t i = 0; i < outputLayer->GetEntries(); i++) {
319 fOutputNeurons.push_back( (TNeuron*)outputLayer->At(i) );
320 }
321
322 if (weights == NULL) InitWeights();
323 else ForceWeights(weights);
324}
325
326////////////////////////////////////////////////////////////////////////////////
327/// build the network layers
328
330{
333
334 Int_t numLayers = layout->size();
335
336 for (Int_t i = 0; i < numLayers; i++) {
337 curLayer = new TObjArray();
338 BuildLayer(layout->at(i), curLayer, prevLayer, i, numLayers, fromFile);
340 fNetwork->Add(curLayer);
341 }
342
343 // cache pointers to synapses for fast access, the order matters
344 for (Int_t i = 0; i < numLayers; i++) {
345 TObjArray* layer = (TObjArray*)fNetwork->At(i);
346 Int_t numNeurons = layer->GetEntriesFast();
347 if (i!=0 && i!=numLayers-1) fRegulators.push_back(0.); //zjh
348 for (Int_t j = 0; j < numNeurons; j++) {
349 if (i==0) fRegulators.push_back(0.);//zjh
350 TNeuron* neuron = (TNeuron*)layer->At(j);
351 Int_t numSynapses = neuron->NumPostLinks();
352 for (Int_t k = 0; k < numSynapses; k++) {
353 TSynapse* synapse = neuron->PostLinkAt(k);
354 fSynapses->Add(synapse);
355 fRegulatorIdx.push_back(fRegulators.size()-1);//zjh
356 }
357 }
358 }
359}
360
361////////////////////////////////////////////////////////////////////////////////
362/// build a single layer with neurons and synapses connecting this
363/// layer to the previous layer
364
368{
369 TNeuron* neuron;
370 for (Int_t j = 0; j < numNeurons; j++) {
371 if (fromFile && (layerIndex != numLayers-1) && (j==numNeurons-1)){
372 neuron = new TNeuron();
373 neuron->SetActivationEqn(fIdentity);
374 neuron->SetBiasNeuron();
375 neuron->ForceValue(1.0);
376 curLayer->Add(neuron);
377 }
378 else {
379 neuron = new TNeuron();
380 neuron->SetInputCalculator(fInputCalculator);
381
382 // input layer
383 if (layerIndex == 0) {
384 neuron->SetActivationEqn(fIdentity);
385 neuron->SetInputNeuron();
386 }
387 else {
388 // output layer
389 if (layerIndex == numLayers-1) {
390 neuron->SetOutputNeuron();
391 neuron->SetActivationEqn(fOutput); //zjh
392 }
393 // hidden layers
394 else neuron->SetActivationEqn(fActivation);
395 AddPreLinks(neuron, prevLayer);
396 }
397
398 curLayer->Add(neuron);
399 }
400 }
401
402 // add bias neutron (except to output layer)
403 if(!fromFile){
404 if (layerIndex != numLayers-1) {
405 neuron = new TNeuron();
406 neuron->SetActivationEqn(fIdentity);
407 neuron->SetBiasNeuron();
408 neuron->ForceValue(1.0);
409 curLayer->Add(neuron);
410 }
411 }
412}
413
414////////////////////////////////////////////////////////////////////////////////
415/// add synapses connecting a neuron to its preceding layer
416
418{
420 int numNeurons = prevLayer->GetEntriesFast();
422
423 for (Int_t i = 0; i < numNeurons; i++) {
424 preNeuron = (TNeuron*)prevLayer->At(i);
425 synapse = new TSynapse();
426 synapse->SetPreNeuron(preNeuron);
427 synapse->SetPostNeuron(neuron);
428 preNeuron->AddPostLink(synapse);
429 neuron->AddPreLink(synapse);
430 }
431}
432
433////////////////////////////////////////////////////////////////////////////////
434/// initialize the synapse weights randomly
435
437{
438 PrintMessage("Initializing weights");
439
440 // init synapse weights
441 Int_t numSynapses = fSynapses->GetEntriesFast();
443 for (Int_t i = 0; i < numSynapses; i++) {
444 synapse = (TSynapse*)fSynapses->At(i);
445 synapse->SetWeight(4.0*frgen->Rndm() - 2.0);
446 }
447}
448
449////////////////////////////////////////////////////////////////////////////////
450/// force the synapse weights
451
452void TMVA::MethodANNBase::ForceWeights(std::vector<Double_t>* weights)
453{
454 PrintMessage("Forcing weights");
455
456 Int_t numSynapses = fSynapses->GetEntriesFast();
458 for (Int_t i = 0; i < numSynapses; i++) {
459 synapse = (TSynapse*)fSynapses->At(i);
460 synapse->SetWeight(weights->at(i));
461 }
462}
463
464////////////////////////////////////////////////////////////////////////////////
465/// force the input values of the input neurons
466/// force the value for each input neuron
467
469{
470 Double_t x;
471 TNeuron* neuron;
472
473 // const Event* ev = GetEvent();
474 for (UInt_t j = 0; j < GetNvar(); j++) {
475
476 x = (j != (UInt_t)ignoreIndex)?ev->GetValue(j):0;
477
478 neuron = GetInputNeuron(j);
479 neuron->ForceValue(x);
480 }
481}
482
483////////////////////////////////////////////////////////////////////////////////
484/// calculate input values to each neuron
485
487{
489 TNeuron* neuron;
490 Int_t numLayers = fNetwork->GetEntriesFast();
492
493 for (Int_t i = 0; i < numLayers; i++) {
494 curLayer = (TObjArray*)fNetwork->At(i);
495 numNeurons = curLayer->GetEntriesFast();
496
497 for (Int_t j = 0; j < numNeurons; j++) {
498 neuron = (TNeuron*) curLayer->At(j);
499 neuron->CalculateValue();
500 neuron->CalculateActivationValue();
501
502 }
503 }
504}
505
506////////////////////////////////////////////////////////////////////////////////
507/// print messages, turn off printing by setting verbose and debug flag appropriately
508
510{
511 if (Verbose() || Debug() || force) Log() << kINFO << message << Endl;
512}
513
514////////////////////////////////////////////////////////////////////////////////
515/// wait for keyboard input, for debugging
516
518{
519 std::string dummy;
520 Log() << kINFO << "***Type anything to continue (q to quit): ";
521 std::getline(std::cin, dummy);
522 if (dummy == "q" || dummy == "Q") {
523 PrintMessage( "quit" );
524 delete this;
525 exit(0);
526 }
527}
528
529////////////////////////////////////////////////////////////////////////////////
530/// print network representation, for debugging
531
533{
534 if (!Debug()) return;
535
536 Log() << kINFO << Endl;
537 PrintMessage( "Printing network " );
538 Log() << kINFO << "-------------------------------------------------------------------" << Endl;
539
541 Int_t numLayers = fNetwork->GetEntriesFast();
542
543 for (Int_t i = 0; i < numLayers; i++) {
544
545 curLayer = (TObjArray*)fNetwork->At(i);
546 Int_t numNeurons = curLayer->GetEntriesFast();
547
548 Log() << kINFO << "Layer #" << i << " (" << numNeurons << " neurons):" << Endl;
549 PrintLayer( curLayer );
550 }
551}
552
553////////////////////////////////////////////////////////////////////////////////
554/// print a single layer, for debugging
555
557{
558 Int_t numNeurons = layer->GetEntriesFast();
559 TNeuron* neuron;
560
561 for (Int_t j = 0; j < numNeurons; j++) {
562 neuron = (TNeuron*) layer->At(j);
563 Log() << kINFO << "\tNeuron #" << j << " (LinksIn: " << neuron->NumPreLinks()
564 << " , LinksOut: " << neuron->NumPostLinks() << ")" << Endl;
565 PrintNeuron( neuron );
566 }
567}
568
569////////////////////////////////////////////////////////////////////////////////
570/// print a neuron, for debugging
571
573{
574 Log() << kINFO
575 << "\t\tValue:\t" << neuron->GetValue()
576 << "\t\tActivation: " << neuron->GetActivationValue()
577 << "\t\tDelta: " << neuron->GetDelta() << Endl;
578 Log() << kINFO << "\t\tActivationEquation:\t";
579 neuron->PrintActivationEqn();
580 Log() << kINFO << "\t\tLinksIn:" << Endl;
581 neuron->PrintPreLinks();
582 Log() << kINFO << "\t\tLinksOut:" << Endl;
583 neuron->PrintPostLinks();
584}
585
586////////////////////////////////////////////////////////////////////////////////
587/// get the mva value generated by the NN
588
590{
591 TNeuron* neuron;
592
593 TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
594
595 const Event * ev = GetEvent();
596
597 for (UInt_t i = 0; i < GetNvar(); i++) {
598 neuron = (TNeuron*)inputLayer->At(i);
599 neuron->ForceValue( ev->GetValue(i) );
600 }
601 ForceNetworkCalculations();
602
603 // check the output of the network
604 TObjArray* outputLayer = (TObjArray*)fNetwork->At( fNetwork->GetEntriesFast()-1 );
605 neuron = (TNeuron*)outputLayer->At(0);
606
607 // cannot determine error
608 NoErrorCalc(err, errUpper);
609
610 return neuron->GetActivationValue();
611}
612
613////////////////////////////////////////////////////////////////////////////////
614/// get the regression value generated by the NN
615
616const std::vector<Float_t> &TMVA::MethodANNBase::GetRegressionValues()
617{
618 TNeuron* neuron;
619
620 TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
621
622 const Event * ev = GetEvent();
623
624 for (UInt_t i = 0; i < GetNvar(); i++) {
625 neuron = (TNeuron*)inputLayer->At(i);
626 neuron->ForceValue( ev->GetValue(i) );
627 }
628 ForceNetworkCalculations();
629
630 // check the output of the network
631 TObjArray* outputLayer = (TObjArray*)fNetwork->At( fNetwork->GetEntriesFast()-1 );
632
633 if (fRegressionReturnVal == NULL) fRegressionReturnVal = new std::vector<Float_t>();
634 fRegressionReturnVal->clear();
635
636 Event * evT = new Event(*ev);
637 UInt_t ntgts = outputLayer->GetEntriesFast();
638 for (UInt_t itgt = 0; itgt < ntgts; itgt++) {
639 evT->SetTarget(itgt,((TNeuron*)outputLayer->At(itgt))->GetActivationValue());
640 }
641
642 const Event* evT2 = GetTransformationHandler().InverseTransform( evT );
643 for (UInt_t itgt = 0; itgt < ntgts; itgt++) {
644 fRegressionReturnVal->push_back( evT2->GetTarget(itgt) );
645 }
646
647 delete evT;
648
649 return *fRegressionReturnVal;
650}
651
652////////////////////////////////////////////////////////////////////////////////
653/// get the multiclass classification values generated by the NN
654
655const std::vector<Float_t> &TMVA::MethodANNBase::GetMulticlassValues()
656{
657 TNeuron* neuron;
658
659 TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
660
661 const Event * ev = GetEvent();
662
663 for (UInt_t i = 0; i < GetNvar(); i++) {
664 neuron = (TNeuron*)inputLayer->At(i);
665 neuron->ForceValue( ev->GetValue(i) );
666 }
667 ForceNetworkCalculations();
668
669 // check the output of the network
670
671 if (fMulticlassReturnVal == NULL) fMulticlassReturnVal = new std::vector<Float_t>();
672 fMulticlassReturnVal->clear();
673 std::vector<Float_t> temp;
674
675 UInt_t nClasses = DataInfo().GetNClasses();
676 for (UInt_t icls = 0; icls < nClasses; icls++) {
677 temp.push_back(GetOutputNeuron( icls )->GetActivationValue() );
678 }
679
681 Double_t norm = 0.0;
682 for(UInt_t j=0;j<nClasses;j++){
683 if(iClass!=j)
684 norm+=exp(temp[j]-temp[iClass]);
685 }
686 (*fMulticlassReturnVal).push_back(1.0/(1.0+norm));
687 }
688
689
690
691 return *fMulticlassReturnVal;
692}
693
694
695////////////////////////////////////////////////////////////////////////////////
696/// create XML description of ANN classifier
697
698void TMVA::MethodANNBase::AddWeightsXMLTo( void* parent ) const
699{
700 Int_t numLayers = fNetwork->GetEntriesFast();
701 void* wght = gTools().xmlengine().NewChild(parent, nullptr, "Weights");
702 void* xmlLayout = gTools().xmlengine().NewChild(wght, nullptr, "Layout");
703 gTools().xmlengine().NewAttr(xmlLayout, nullptr, "NLayers", gTools().StringFromInt(fNetwork->GetEntriesFast()) );
704 TString weights = "";
705 for (Int_t i = 0; i < numLayers; i++) {
706 TObjArray* layer = (TObjArray*)fNetwork->At(i);
707 Int_t numNeurons = layer->GetEntriesFast();
708 void* layerxml = gTools().xmlengine().NewChild(xmlLayout, nullptr, "Layer");
709 gTools().xmlengine().NewAttr(layerxml, nullptr, "Index", gTools().StringFromInt(i) );
710 gTools().xmlengine().NewAttr(layerxml, nullptr, "NNeurons", gTools().StringFromInt(numNeurons) );
711 for (Int_t j = 0; j < numNeurons; j++) {
712 TNeuron* neuron = (TNeuron*)layer->At(j);
713 Int_t numSynapses = neuron->NumPostLinks();
714 void* neuronxml = gTools().AddChild(layerxml, "Neuron");
715 gTools().AddAttr(neuronxml, "NSynapses", gTools().StringFromInt(numSynapses) );
716 if(numSynapses==0) continue;
717 std::stringstream s("");
718 s.precision( 16 );
719 for (Int_t k = 0; k < numSynapses; k++) {
720 TSynapse* synapse = neuron->PostLinkAt(k);
721 s << std::scientific << synapse->GetWeight() << " ";
722 }
723 gTools().AddRawLine( neuronxml, s.str().c_str() );
724 }
725 }
726
727 // if inverse hessian exists, write inverse hessian to weight file
728 if( fInvHessian.GetNcols()>0 ){
729 void* xmlInvHessian = gTools().xmlengine().NewChild(wght, nullptr, "InverseHessian");
730
731 // get the matrix dimensions
732 Int_t nElements = fInvHessian.GetNoElements();
733 Int_t nRows = fInvHessian.GetNrows();
734 Int_t nCols = fInvHessian.GetNcols();
735 gTools().xmlengine().NewAttr(xmlInvHessian, nullptr, "NElements", gTools().StringFromInt(nElements) );
736 gTools().xmlengine().NewAttr(xmlInvHessian, nullptr, "NRows", gTools().StringFromInt(nRows) );
737 gTools().xmlengine().NewAttr(xmlInvHessian, nullptr, "NCols", gTools().StringFromInt(nCols) );
738
739 // read in the matrix elements
740 Double_t* elements = new Double_t[nElements+10];
741 fInvHessian.GetMatrix2Array( elements );
742
743 // store the matrix elements row-wise
744 Int_t index = 0;
745 for( Int_t row = 0; row < nRows; ++row ){
746 void* xmlRow = gTools().xmlengine().NewChild(xmlInvHessian, nullptr, "Row");
747 gTools().xmlengine().NewAttr(xmlRow, nullptr, "Index", gTools().StringFromInt(row) );
748
749 // create the rows
750 std::stringstream s("");
751 s.precision( 16 );
752 for( Int_t col = 0; col < nCols; ++col ){
753 s << std::scientific << (*(elements+index)) << " ";
754 ++index;
755 }
756 gTools().xmlengine().AddRawLine( xmlRow, s.str().c_str() );
757 }
758 delete[] elements;
759 }
760}
761
762
763////////////////////////////////////////////////////////////////////////////////
764/// read MLP from xml weight file
765
767{
768 // build the layout first
770 std::vector<Int_t>* layout = new std::vector<Int_t>();
771
772 void* xmlLayout = NULL;
773 xmlLayout = gTools().GetChild(wghtnode, "Layout");
774 if( !xmlLayout )
776
778 gTools().ReadAttr( xmlLayout, "NLayers", nLayers );
779 layout->resize( nLayers );
780
781 void* ch = gTools().xmlengine().GetChild(xmlLayout);
784 while (ch) {
785 gTools().ReadAttr( ch, "Index", index );
786 gTools().ReadAttr( ch, "NNeurons", nNeurons );
787 layout->at(index) = nNeurons;
788 ch = gTools().GetNextChild(ch);
789 }
790
791 BuildNetwork( layout, NULL, fromFile );
792 // use 'slow' (exact) TanH if processing old weigh file to ensure 100% compatible results
793 // otherwise use the new default, the 'tast tanh' approximation
794 if (GetTrainingTMVAVersionCode() < TMVA_VERSION(4,2,1) && fActivation->GetExpression().Contains("tanh")){
795 TActivationTanh* act = dynamic_cast<TActivationTanh*>( fActivation );
796 if (act) act->SetSlow();
797 }
798
799 // fill the weights of the synapses
800 UInt_t nSyn;
801 Float_t weight;
803 UInt_t iLayer = 0;
804 while (ch) { // layers
805 TObjArray* layer = (TObjArray*)fNetwork->At(iLayer);
806 gTools().ReadAttr( ch, "Index", index );
807 gTools().ReadAttr( ch, "NNeurons", nNeurons );
808
809 void* nodeN = gTools().GetChild(ch);
810 UInt_t iNeuron = 0;
811 while( nodeN ){ // neurons
812 TNeuron *neuron = (TNeuron*)layer->At(iNeuron);
813 gTools().ReadAttr( nodeN, "NSynapses", nSyn );
814 if( nSyn > 0 ){
815 const char* content = gTools().GetContent(nodeN);
816 std::stringstream s(content);
817 for (UInt_t iSyn = 0; iSyn<nSyn; iSyn++) { // synapses
818
819 TSynapse* synapse = neuron->PostLinkAt(iSyn);
820 s >> weight;
821 //Log() << kWARNING << neuron << " " << weight << Endl;
822 synapse->SetWeight(weight);
823 }
824 }
826 iNeuron++;
827 }
828 ch = gTools().GetNextChild(ch);
829 iLayer++;
830 }
831
832 delete layout;
833
834 void* xmlInvHessian = NULL;
835 xmlInvHessian = gTools().GetChild(wghtnode, "InverseHessian");
836 if( !xmlInvHessian )
837 // no inverse hessian available
838 return;
839
840 fUseRegulator = kTRUE;
841
842 Int_t nElements = 0;
843 Int_t nRows = 0;
844 Int_t nCols = 0;
845 gTools().ReadAttr( xmlInvHessian, "NElements", nElements );
846 gTools().ReadAttr( xmlInvHessian, "NRows", nRows );
847 gTools().ReadAttr( xmlInvHessian, "NCols", nCols );
848
849 // adjust the matrix dimensions
850 fInvHessian.ResizeTo( nRows, nCols );
851
852 // prepare an array to read in the values
853 Double_t* elements;
854 if (nElements > std::numeric_limits<int>::max()-100){
855 Log() << kFATAL << "you tried to read a hessian matrix with " << nElements << " elements, --> too large, guess s.th. went wrong reading from the weight file" << Endl;
856 return;
857 } else {
858 elements = new Double_t[nElements+10];
859 }
860
861
862
864 Int_t row = 0;
865 index = 0;
866 while (xmlRow) { // rows
867 gTools().ReadAttr( xmlRow, "Index", row );
868
869 const char* content = gTools().xmlengine().GetNodeContent(xmlRow);
870
871 std::stringstream s(content);
872 for (Int_t iCol = 0; iCol<nCols; iCol++) { // columns
873 s >> (*(elements+index));
874 ++index;
875 }
877 ++row;
878 }
879
880 fInvHessian.SetMatrixArray( elements );
881
882 delete[] elements;
883}
884
885////////////////////////////////////////////////////////////////////////////////
886/// destroy/clear the network then read it back in from the weights file
887
889{
890 // delete network so we can reconstruct network from scratch
891
892 TString dummy;
893
894 // synapse weights
895 Double_t weight;
896 std::vector<Double_t>* weights = new std::vector<Double_t>();
897 istr>> dummy;
898 while (istr>> dummy >> weight) weights->push_back(weight); // use w/ slower write-out
899
900 ForceWeights(weights);
901
902
903 delete weights;
904}
905
906////////////////////////////////////////////////////////////////////////////////
907/// compute ranking of input variables by summing function of weights
908
910{
911 // create the ranking object
912 fRanking = new Ranking( GetName(), "Importance" );
913
914 TNeuron* neuron;
918
919 for (UInt_t ivar = 0; ivar < GetNvar(); ivar++) {
920
921 neuron = GetInputNeuron(ivar);
922 Int_t numSynapses = neuron->NumPostLinks();
923 importance = 0;
924 varName = GetInputVar(ivar); // fix this line
925
926 // figure out average value of variable i
928 Statistics( TMVA::Types::kTraining, varName,
929 meanS, meanB, rmsS, rmsB, xmin, xmax );
930
932 double meanrms = (TMath::Abs(rmsS) + TMath::Abs(rmsB))/2.;
934 if (IsNormalised()) avgVal = 0.5*(1 + gTools().NormVariable( avgVal, GetXmin( ivar ), GetXmax( ivar )));
935
936 for (Int_t j = 0; j < numSynapses; j++) {
937 synapse = neuron->PostLinkAt(j);
938 importance += synapse->GetWeight() * synapse->GetWeight();
939 }
940
942
943 fRanking->AddRank( Rank( varName, importance ) );
944 }
945
946 return fRanking;
947}
948
949////////////////////////////////////////////////////////////////////////////////
950
952 std::vector<TH1*>* hv ) const
953{
954 TH2F* hist;
955 Int_t numLayers = fNetwork->GetEntriesFast();
956
957 for (Int_t i = 0; i < numLayers-1; i++) {
958
959 TObjArray* layer1 = (TObjArray*)fNetwork->At(i);
960 TObjArray* layer2 = (TObjArray*)fNetwork->At(i+1);
961 Int_t numNeurons1 = layer1->GetEntriesFast();
962 Int_t numNeurons2 = layer2->GetEntriesFast();
963
964 TString name = TString::Format("%s%i%i", bulkname.Data(), i, i+1);
965 hist = new TH2F(name.Data(), name.Data(),
967
968 for (Int_t j = 0; j < numNeurons1; j++) {
969
970 TNeuron* neuron = (TNeuron*)layer1->At(j);
971 Int_t numSynapses = neuron->NumPostLinks();
972
973 for (Int_t k = 0; k < numSynapses; k++) {
974
975 TSynapse* synapse = neuron->PostLinkAt(k);
976 hist->SetBinContent(j+1, k+1, synapse->GetWeight());
977
978 }
979 }
980
981 if (hv) hv->push_back( hist );
982 else {
983 hist->Write();
984 delete hist;
985 }
986 }
987}
988
989////////////////////////////////////////////////////////////////////////////////
990/// write histograms to file
991
993{
994 PrintMessage(TString::Format("Write special histos to file: %s", BaseDir()->GetPath()).Data(), kTRUE);
995
996 if (fEstimatorHistTrain) fEstimatorHistTrain->Write();
997 if (fEstimatorHistTest ) fEstimatorHistTest ->Write();
998
999 // histograms containing weights for architecture plotting (used in macro "network.cxx")
1000 CreateWeightMonitoringHists( "weights_hist" );
1001
1002 // now save all the epoch-wise monitoring information
1003 static std::atomic<int> epochMonitoringDirectoryNumber{0};
1005 TDirectory* epochdir = nullptr;
1006 if( epochVal == 0 )
1007 epochdir = BaseDir()->mkdir( "EpochMonitoring" );
1008 else
1009 epochdir = BaseDir()->mkdir( TString::Format("EpochMonitoring_%4d",epochVal).Data() );
1010
1011 epochdir->cd();
1012 for (std::vector<TH1*>::const_iterator it = fEpochMonHistS.begin(); it != fEpochMonHistS.end(); ++it) {
1013 (*it)->Write();
1014 delete (*it);
1015 }
1016 for (std::vector<TH1*>::const_iterator it = fEpochMonHistB.begin(); it != fEpochMonHistB.end(); ++it) {
1017 (*it)->Write();
1018 delete (*it);
1019 }
1020 for (std::vector<TH1*>::const_iterator it = fEpochMonHistW.begin(); it != fEpochMonHistW.end(); ++it) {
1021 (*it)->Write();
1022 delete (*it);
1023 }
1024 BaseDir()->cd();
1025}
1026
1027////////////////////////////////////////////////////////////////////////////////
1028/// write specific classifier response
1029
1030void TMVA::MethodANNBase::MakeClassSpecific( std::ostream& fout, const TString& className ) const
1031{
1032 Int_t numLayers = fNetwork->GetEntries();
1033
1034 fout << std::endl;
1035 fout << " double ActivationFnc(double x) const;" << std::endl;
1036 fout << " double OutputActivationFnc(double x) const;" << std::endl; //zjh
1037 fout << std::endl;
1038 int numNodesFrom = -1;
1039 for (Int_t lIdx = 0; lIdx < numLayers; lIdx++) {
1040 int numNodesTo = ((TObjArray*)fNetwork->At(lIdx))->GetEntries();
1041 if (numNodesFrom<0) { numNodesFrom=numNodesTo; continue; }
1042 fout << " double fWeightMatrix" << lIdx-1 << "to" << lIdx << "[" << numNodesTo << "][" << numNodesFrom << "];";
1043 fout << " // weight matrix from layer " << lIdx-1 << " to " << lIdx << std::endl;
1045 }
1046 fout << std::endl;
1047 fout << "};" << std::endl;
1048
1049 fout << std::endl;
1050
1051 fout << "inline void " << className << "::Initialize()" << std::endl;
1052 fout << "{" << std::endl;
1053 fout << " // build network structure" << std::endl;
1054
1055 for (Int_t i = 0; i < numLayers-1; i++) {
1056 fout << " // weight matrix from layer " << i << " to " << i+1 << std::endl;
1057 TObjArray* layer = (TObjArray*)fNetwork->At(i);
1058 Int_t numNeurons = layer->GetEntriesFast();
1059 for (Int_t j = 0; j < numNeurons; j++) {
1060 TNeuron* neuron = (TNeuron*)layer->At(j);
1061 Int_t numSynapses = neuron->NumPostLinks();
1062 for (Int_t k = 0; k < numSynapses; k++) {
1063 TSynapse* synapse = neuron->PostLinkAt(k);
1064 fout << " fWeightMatrix" << i << "to" << i+1 << "[" << k << "][" << j << "] = " << synapse->GetWeight() << ";" << std::endl;
1065 }
1066 }
1067 }
1068
1069 fout << "}" << std::endl;
1070 fout << std::endl;
1071
1072 // writing of the GetMvaValue__ method
1073 fout << "inline double " << className << "::GetMvaValue__( const std::vector<double>& inputValues ) const" << std::endl;
1074 fout << "{" << std::endl;
1075 fout << " if (inputValues.size() != (unsigned int)" << ((TObjArray *)fNetwork->At(0))->GetEntries() - 1 << ") {"
1076 << std::endl;
1077 fout << " std::cout << \"Input vector needs to be of size \" << "
1078 << ((TObjArray *)fNetwork->At(0))->GetEntries() - 1 << " << std::endl;" << std::endl;
1079 fout << " return 0;" << std::endl;
1080 fout << " }" << std::endl;
1081 fout << std::endl;
1082 for (Int_t lIdx = 1; lIdx < numLayers; lIdx++) {
1083 TObjArray *layer = (TObjArray *)fNetwork->At(lIdx);
1084 int numNodes = layer->GetEntries();
1085 fout << " std::array<double, " << numNodes << "> fWeights" << lIdx << " {{}};" << std::endl;
1086 }
1087 for (Int_t lIdx = 1; lIdx < numLayers - 1; lIdx++) {
1088 fout << " fWeights" << lIdx << ".back() = 1.;" << std::endl;
1089 }
1090 fout << std::endl;
1091 for (Int_t i = 0; i < numLayers - 1; i++) {
1092 fout << " // layer " << i << " to " << i + 1 << std::endl;
1093 if (i + 1 == numLayers - 1) {
1094 fout << " for (int o=0; o<" << ((TObjArray *)fNetwork->At(i + 1))->GetEntries() << "; o++) {" << std::endl;
1095 } else {
1096 fout << " for (int o=0; o<" << ((TObjArray *)fNetwork->At(i + 1))->GetEntries() - 1 << "; o++) {"
1097 << std::endl;
1098 }
1099 if (0 == i) {
1100 fout << " std::array<double, " << ((TObjArray *)fNetwork->At(i))->GetEntries()
1101 << "> buffer; // no need to initialise" << std::endl;
1102 fout << " for (int i = 0; i<" << ((TObjArray *)fNetwork->At(i))->GetEntries() << " - 1; i++) {"
1103 << std::endl;
1104 fout << " buffer[i] = fWeightMatrix" << i << "to" << i + 1 << "[o][i] * inputValues[i];" << std::endl;
1105 fout << " } // loop over i" << std::endl;
1106 fout << " buffer.back() = fWeightMatrix" << i << "to" << i + 1 << "[o]["
1107 << ((TObjArray *)fNetwork->At(i))->GetEntries() - 1 << "];" << std::endl;
1108 } else {
1109 fout << " std::array<double, " << ((TObjArray *)fNetwork->At(i))->GetEntries()
1110 << "> buffer; // no need to initialise" << std::endl;
1111 fout << " for (int i=0; i<" << ((TObjArray *)fNetwork->At(i))->GetEntries() << "; i++) {" << std::endl;
1112 fout << " buffer[i] = fWeightMatrix" << i << "to" << i + 1 << "[o][i] * fWeights" << i << "[i];"
1113 << std::endl;
1114 fout << " } // loop over i" << std::endl;
1115 }
1116 fout << " for (int i=0; i<" << ((TObjArray *)fNetwork->At(i))->GetEntries() << "; i++) {" << std::endl;
1117 if (fNeuronInputType == "sum") {
1118 fout << " fWeights" << i + 1 << "[o] += buffer[i];" << std::endl;
1119 } else if (fNeuronInputType == "sqsum") {
1120 fout << " fWeights" << i + 1 << "[o] += buffer[i]*buffer[i];" << std::endl;
1121 } else { // fNeuronInputType == TNeuronInputChooser::kAbsSum
1122 fout << " fWeights" << i + 1 << "[o] += fabs(buffer[i]);" << std::endl;
1123 }
1124 fout << " } // loop over i" << std::endl;
1125 fout << " } // loop over o" << std::endl;
1126 if (i + 1 == numLayers - 1) {
1127 fout << " for (int o=0; o<" << ((TObjArray *)fNetwork->At(i + 1))->GetEntries() << "; o++) {" << std::endl;
1128 } else {
1129 fout << " for (int o=0; o<" << ((TObjArray *)fNetwork->At(i + 1))->GetEntries() - 1 << "; o++) {"
1130 << std::endl;
1131 }
1132 if (i+1 != numLayers-1) // in the last layer no activation function is applied
1133 fout << " fWeights" << i + 1 << "[o] = ActivationFnc(fWeights" << i + 1 << "[o]);" << std::endl;
1134 else
1135 fout << " fWeights" << i + 1 << "[o] = OutputActivationFnc(fWeights" << i + 1 << "[o]);"
1136 << std::endl; // zjh
1137 fout << " } // loop over o" << std::endl;
1138 }
1139 fout << std::endl;
1140 fout << " return fWeights" << numLayers - 1 << "[0];" << std::endl;
1141 fout << "}" << std::endl;
1142
1143 fout << std::endl;
1144 TString fncName = className+"::ActivationFnc";
1145 fActivation->MakeFunction(fout, fncName);
1146 fncName = className+"::OutputActivationFnc"; //zjh
1147 fOutput->MakeFunction(fout, fncName);//zjh
1148
1149 fout << std::endl;
1150 fout << "// Clean up" << std::endl;
1151 fout << "inline void " << className << "::Clear()" << std::endl;
1152 fout << "{" << std::endl;
1153 fout << "}" << std::endl;
1154}
1155
1156////////////////////////////////////////////////////////////////////////////////
1157/// who the hell makes such strange Debug flags that even use "global pointers"..
1158
1160{
1161 return fgDEBUG;
1162}
unsigned int UInt_t
Unsigned integer 4 bytes (unsigned int)
Definition RtypesCore.h:60
float Float_t
Float 4 bytes (float)
Definition RtypesCore.h:71
constexpr Bool_t kFALSE
Definition RtypesCore.h:108
constexpr Bool_t kTRUE
Definition RtypesCore.h:107
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t index
char name[80]
Definition TGX11.cxx:110
float xmin
float xmax
void Debug(Int_t level, const char *fmt,...)
#define TMVA_VERSION(a, b, c)
Definition Version.h:48
Describe directory structure in memory.
Definition TDirectory.h:45
2-D histogram with a float per channel (see TH1 documentation)
Definition TH2.h:307
void SetBinContent(Int_t bin, Double_t content) override
Set bin content.
Definition TH2.cxx:2578
Class that contains all the data information.
Definition DataSetInfo.h:62
void ProcessOptions() override
do nothing specific at this moment
std::vector< Int_t > * ParseLayoutString(TString layerSpec)
parse layout specification string and return a vector, each entry containing the number of neurons to...
virtual ~MethodANNBase()
destructor
void DeclareOptions() override
define the options (their key words) that can be set in the option string here the options valid for ...
void DeleteNetworkLayer(TObjArray *&layer)
delete a network layer
void WriteMonitoringHistosToFile() const override
write histograms to file
virtual void BuildNetwork(std::vector< Int_t > *layout, std::vector< Double_t > *weights=nullptr, Bool_t fromFile=kFALSE)
build network given a layout (number of neurons in each layer) and optional weights array
void DeleteNetwork()
delete/clear network
const Ranking * CreateRanking() override
compute ranking of input variables by summing function of weights
void WaitForKeyboard()
wait for keyboard input, for debugging
MethodANNBase(const TString &jobName, Types::EMVA methodType, const TString &methodTitle, DataSetInfo &theData, const TString &theOption)
standard constructor Note: Right now it is an option to choose the neuron input function,...
void AddPreLinks(TNeuron *neuron, TObjArray *prevLayer)
add synapses connecting a neuron to its preceding layer
void PrintNeuron(TNeuron *neuron) const
print a neuron, for debugging
void MakeClassSpecific(std::ostream &, const TString &) const override
write specific classifier response
void PrintMessage(TString message, Bool_t force=kFALSE) const
print messages, turn off printing by setting verbose and debug flag appropriately
void InitANNBase()
initialize ANNBase object
const std::vector< Float_t > & GetMulticlassValues() override
get the multiclass classification values generated by the NN
void PrintLayer(TObjArray *layer) const
print a single layer, for debugging
void ReadWeightsFromStream(std::istream &istr) override
destroy/clear the network then read it back in from the weights file
void InitWeights()
initialize the synapse weights randomly
void ReadWeightsFromXML(void *wghtnode) override
read MLP from xml weight file
void BuildLayers(std::vector< Int_t > *layout, Bool_t from_file=false)
build the network layers
const std::vector< Float_t > & GetRegressionValues() override
get the regression value generated by the NN
void ForceWeights(std::vector< Double_t > *weights)
force the synapse weights
void BuildLayer(Int_t numNeurons, TObjArray *curLayer, TObjArray *prevLayer, Int_t layerIndex, Int_t numLayers, Bool_t from_file=false)
build a single layer with neurons and synapses connecting this layer to the previous layer
void ForceNetworkCalculations()
calculate input values to each neuron
Double_t GetMvaValue(Double_t *err=nullptr, Double_t *errUpper=nullptr) override
get the mva value generated by the NN
void ForceNetworkInputs(const Event *ev, Int_t ignoreIndex=-1)
force the input values of the input neurons force the value for each input neuron
void AddWeightsXMLTo(void *parent) const override
create XML description of ANN classifier
Bool_t Debug() const
who the hell makes such strange Debug flags that even use "global pointers"..
virtual void PrintNetwork() const
print network representation, for debugging
void CreateWeightMonitoringHists(const TString &bulkname, std::vector< TH1 * > *hv=nullptr) const
Virtual base Class for all MVA method.
Definition MethodBase.h:111
Ranking for variables in method (implementation)
Definition Ranking.h:48
Class for easily choosing activation functions.
Tanh activation function for ANN.
Class for easily choosing neuron input functions.
Neuron class used by TMVA artificial neural network methods.
Definition TNeuron.h:49
Double_t GetActivationValue() const
Definition TNeuron.h:105
void ForceValue(Double_t value)
force the value, typically for input and bias neurons
Definition TNeuron.cxx:83
TSynapse * PostLinkAt(Int_t index) const
Definition TNeuron.h:111
void SetActivationEqn(TActivation *activation)
set activation equation
Definition TNeuron.cxx:159
Double_t GetDelta() const
Definition TNeuron.h:106
void SetInputCalculator(TNeuronInput *calculator)
set input calculator
Definition TNeuron.cxx:150
void SetInputNeuron()
Definition TNeuron.h:112
Int_t NumPreLinks() const
Definition TNeuron.h:108
void PrintActivationEqn()
print activation equation, for debugging
Definition TNeuron.cxx:326
void CalculateValue()
calculate neuron input
Definition TNeuron.cxx:92
void SetBiasNeuron()
Definition TNeuron.h:114
void CalculateActivationValue()
calculate neuron activation/output
Definition TNeuron.cxx:101
void SetOutputNeuron()
Definition TNeuron.h:113
void PrintPostLinks() const
Definition TNeuron.h:119
Int_t NumPostLinks() const
Definition TNeuron.h:109
void AddPreLink(TSynapse *pre)
add synapse as a pre-link to this neuron
Definition TNeuron.cxx:168
Double_t GetValue() const
Definition TNeuron.h:104
void DeletePreLinks()
delete all pre-links
Definition TNeuron.cxx:186
void PrintPreLinks() const
Definition TNeuron.h:118
Synapse class used by TMVA artificial neural network methods.
Definition TSynapse.h:42
Double_t NormVariable(Double_t x, Double_t xmin, Double_t xmax)
normalise to output range: [-1, 1]
Definition Tools.cxx:110
Bool_t AddRawLine(void *node, const char *raw)
XML helpers.
Definition Tools.cxx:1190
const char * GetContent(void *node)
XML helpers.
Definition Tools.cxx:1174
TXMLEngine & xmlengine()
Definition Tools.h:262
void ReadAttr(void *node, const char *, T &value)
read attribute from xml
Definition Tools.h:329
void * GetChild(void *parent, const char *childname=nullptr)
get child node
Definition Tools.cxx:1150
void AddAttr(void *node, const char *, const T &value, Int_t precision=16)
add attribute to xml
Definition Tools.h:347
void * AddChild(void *parent, const char *childname, const char *content=nullptr, bool isRootNode=false)
add child node
Definition Tools.cxx:1124
void * GetNextChild(void *prevchild, const char *childname=nullptr)
XML helpers.
Definition Tools.cxx:1162
@ kTraining
Definition Types.h:143
An array of TObjects.
Definition TObjArray.h:31
virtual Int_t Write(const char *name=nullptr, Int_t option=0, Int_t bufsize=0)
Write this object to the current directory.
Definition TObject.cxx:964
Random number generator class based on M.
Definition TRandom3.h:27
Basic string class.
Definition TString.h:138
static TString Format(const char *fmt,...)
Static method which formats a string using a printf style format descriptor and return a TString.
Definition TString.cxx:2384
Bool_t AddRawLine(XMLNodePointer_t parent, const char *line)
Add just line into xml file Line should has correct xml syntax that later it can be decoded by xml pa...
XMLNodePointer_t NewChild(XMLNodePointer_t parent, XMLNsPointer_t ns, const char *name, const char *content=nullptr)
create new child element for parent node
XMLNodePointer_t GetChild(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
returns first child of xmlnode
XMLAttrPointer_t NewAttr(XMLNodePointer_t xmlnode, XMLNsPointer_t, const char *name, const char *value)
creates new attribute for xmlnode, namespaces are not supported for attributes
const char * GetNodeContent(XMLNodePointer_t xmlnode)
get contents (if any) of xmlnode
XMLNodePointer_t GetNext(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
return next to xmlnode node if realnode==kTRUE, any special nodes in between will be skipped
Double_t x[n]
Definition legend1.C:17
create variable transformations
Tools & gTools()
MsgLogger & Endl(MsgLogger &ml)
Definition MsgLogger.h:148
Short_t Abs(Short_t d)
Returns the absolute value of parameter Short_t d.
Definition TMathBase.h:124