Logo ROOT  
Reference Guide
MethodCFMlpANN.cxx
Go to the documentation of this file.
1// @(#)root/tmva $Id$
2// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
3
4/**********************************************************************************
5 * Project: TMVA - a Root-integrated toolkit for multivariate Data analysis *
6 * Package: TMVA *
7 * Class : TMVA::MethodCFMlpANN *
8 * Web : http://tmva.sourceforge.net *
9 * *
10 * Description: *
11 * Implementation (see header for description) *
12 * *
13 * Authors (alphabetical): *
14 * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
15 * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France *
16 * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany *
17 * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada *
18 * *
19 * Copyright (c) 2005: *
20 * CERN, Switzerland *
21 * U. of Victoria, Canada *
22 * MPI-K Heidelberg, Germany *
23 * LAPP, Annecy, France *
24 * *
25 * Redistribution and use in source and binary forms, with or without *
26 * modification, are permitted according to the terms listed in LICENSE *
27 * (http://tmva.sourceforge.net/LICENSE) *
28 **********************************************************************************/
29
30/*! \class TMVA::MethodCFMlpANN
31\ingroup TMVA
32
33Interface to Clermond-Ferrand artificial neural network
34
35
36The CFMlpANN belong to the class of Multilayer Perceptrons (MLP), which are
37feed-forward networks according to the following propagation schema:
38
39\image html tmva_mlp.png Schema for artificial neural network.
40
41The input layer contains as many neurons as input variables used in the MVA.
42The output layer contains two neurons for the signal and background
43event classes. In between the input and output layers are a variable number
44of <i>k</i> hidden layers with arbitrary numbers of neurons. (While the
45structure of the input and output layers is determined by the problem, the
46hidden layers can be configured by the user through the option string
47of the method booking.)
48
49As indicated in the sketch, all neuron inputs to a layer are linear
50combinations of the neuron output of the previous layer. The transfer
51from input to output within a neuron is performed by means of an "activation
52function". In general, the activation function of a neuron can be
53zero (deactivated), one (linear), or non-linear. The above example uses
54a sigmoid activation function. The transfer function of the output layer
55is usually linear. As a consequence: an ANN without hidden layer should
56give identical discrimination power as a linear discriminant analysis (Fisher).
57In case of one hidden layer, the ANN computes a linear combination of
58sigmoid.
59
60The learning method used by the CFMlpANN is only stochastic.
61*/
62
63
64#include "TMVA/MethodCFMlpANN.h"
65
67#include "TMVA/Configurable.h"
68#include "TMVA/DataSet.h"
69#include "TMVA/DataSetInfo.h"
70#include "TMVA/IMethod.h"
71#include "TMVA/MethodBase.h"
73#include "TMVA/MsgLogger.h"
74#include "TMVA/Tools.h"
75#include "TMVA/Types.h"
76
77#include "TMatrix.h"
78#include "Riostream.h"
79#include "TMath.h"
80
81#include <cstdlib>
82#include <iostream>
83#include <string>
84
85
86
87REGISTER_METHOD(CFMlpANN)
88
89using std::stringstream;
90using std::make_pair;
91using std::atoi;
92
94
95
96
97////////////////////////////////////////////////////////////////////////////////
98/// standard constructor
99///
100/// option string: "n_training_cycles:n_hidden_layers"
101///
102/// default is: n_training_cycles = 5000, n_layers = 4
103///
104/// * note that the number of hidden layers in the NN is:
105/// n_hidden_layers = n_layers - 2
106///
107/// * since there is one input and one output layer. The number of
108/// nodes (neurons) is predefined to be:
109///
110/// n_nodes[i] = nvars + 1 - i (where i=1..n_layers)
111///
112/// with nvars being the number of variables used in the NN.
113///
114/// Hence, the default case is:
115///
116/// n_neurons(layer 1 (input)) : nvars
117/// n_neurons(layer 2 (hidden)): nvars-1
118/// n_neurons(layer 3 (hidden)): nvars-1
119/// n_neurons(layer 4 (out)) : 2
120///
121/// This artificial neural network usually needs a relatively large
122/// number of cycles to converge (8000 and more). Overtraining can
123/// be efficiently tested by comparing the signal and background
124/// output of the NN for the events that were used for training and
125/// an independent data sample (with equal properties). If the separation
126/// performance is significantly better for the training sample, the
127/// NN interprets statistical effects, and is hence overtrained. In
128/// this case, the number of cycles should be reduced, or the size
129/// of the training sample increased.
130
132 const TString& methodTitle,
133 DataSetInfo& theData,
134 const TString& theOption ) :
135 TMVA::MethodBase( jobName, Types::kCFMlpANN, methodTitle, theData, theOption),
136 fData(0),
137 fClass(0),
138 fNlayers(0),
139 fNcycles(0),
140 fNodes(0),
141 fYNN(0),
142 MethodCFMlpANN_nsel(0)
143{
145}
146
147////////////////////////////////////////////////////////////////////////////////
148/// constructor from weight file
149
151 const TString& theWeightFile):
152 TMVA::MethodBase( Types::kCFMlpANN, theData, theWeightFile),
153 fData(0),
154 fClass(0),
155 fNlayers(0),
156 fNcycles(0),
157 fNodes(0),
158 fYNN(0),
159 MethodCFMlpANN_nsel(0)
160{
161}
162
163////////////////////////////////////////////////////////////////////////////////
164/// CFMlpANN can handle classification with 2 classes
165
167{
168 if (type == Types::kClassification && numberClasses == 2) return kTRUE;
169 return kFALSE;
170}
171
172////////////////////////////////////////////////////////////////////////////////
173/// define the options (their key words) that can be set in the option string
174/// know options: NCycles=xx :the number of training cycles
175/// HiddenLayser="N-1,N-2" :the specification of the hidden layers
176
178{
179 DeclareOptionRef( fNcycles =3000, "NCycles", "Number of training cycles" );
180 DeclareOptionRef( fLayerSpec="N,N-1", "HiddenLayers", "Specification of hidden layer architecture" );
181}
182
183////////////////////////////////////////////////////////////////////////////////
184/// decode the options in the option string
185
187{
188 fNodes = new Int_t[20]; // number of nodes per layer (maximum 20 layers)
189 fNlayers = 2;
190 Int_t currentHiddenLayer = 1;
191 TString layerSpec(fLayerSpec);
192 while(layerSpec.Length()>0) {
193 TString sToAdd = "";
194 if (layerSpec.First(',')<0) {
195 sToAdd = layerSpec;
196 layerSpec = "";
197 }
198 else {
199 sToAdd = layerSpec(0,layerSpec.First(','));
200 layerSpec = layerSpec(layerSpec.First(',')+1,layerSpec.Length());
201 }
202 Int_t nNodes = 0;
203 if (sToAdd.BeginsWith("N") || sToAdd.BeginsWith("n")) { sToAdd.Remove(0,1); nNodes = GetNvar(); }
204 nNodes += atoi(sToAdd);
205 fNodes[currentHiddenLayer++] = nNodes;
206 fNlayers++;
207 }
208 fNodes[0] = GetNvar(); // number of input nodes
209 fNodes[fNlayers-1] = 2; // number of output nodes
210
211 if (IgnoreEventsWithNegWeightsInTraining()) {
212 Log() << kFATAL << "Mechanism to ignore events with negative weights in training not yet available for method: "
213 << GetMethodTypeName()
214 << " --> please remove \"IgnoreNegWeightsInTraining\" option from booking string."
215 << Endl;
216 }
217
218 Log() << kINFO << "Use configuration (nodes per layer): in=";
219 for (Int_t i=0; i<fNlayers-1; i++) Log() << kINFO << fNodes[i] << ":";
220 Log() << kINFO << fNodes[fNlayers-1] << "=out" << Endl;
221
222 // some info
223 Log() << "Use " << fNcycles << " training cycles" << Endl;
224
225 Int_t nEvtTrain = Data()->GetNTrainingEvents();
226
227 // note that one variable is type
228 if (nEvtTrain>0) {
229
230 // Data LUT
231 fData = new TMatrix( nEvtTrain, GetNvar() );
232 fClass = new std::vector<Int_t>( nEvtTrain );
233
234 // ---- fill LUTs
235
236 UInt_t ivar;
237 for (Int_t ievt=0; ievt<nEvtTrain; ievt++) {
238 const Event * ev = GetEvent(ievt);
239
240 // identify signal and background events
241 (*fClass)[ievt] = DataInfo().IsSignal(ev) ? 1 : 2;
242
243 // use normalized input Data
244 for (ivar=0; ivar<GetNvar(); ivar++) {
245 (*fData)( ievt, ivar ) = ev->GetValue(ivar);
246 }
247 }
248
249 //Log() << kVERBOSE << Data()->GetNEvtSigTrain() << " Signal and "
250 // << Data()->GetNEvtBkgdTrain() << " background" << " events in trainingTree" << Endl;
251 }
252
253}
254
255////////////////////////////////////////////////////////////////////////////////
256/// default initialisation called by all constructors
257
259{
260 // CFMlpANN prefers normalised input variables
261 SetNormalised( kTRUE );
262
263 // initialize dimensions
264 MethodCFMlpANN_nsel = 0;
265}
266
267////////////////////////////////////////////////////////////////////////////////
268/// destructor
269
271{
272 delete fData;
273 delete fClass;
274 delete[] fNodes;
275
276 if (fYNN!=0) {
277 for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
278 delete[] fYNN;
279 fYNN=0;
280 }
281}
282
283////////////////////////////////////////////////////////////////////////////////
284/// training of the Clement-Ferrand NN classifier
285
287{
288 Double_t dumDat(0);
289 Int_t ntrain(Data()->GetNTrainingEvents());
290 Int_t ntest(0);
291 Int_t nvar(GetNvar());
292 Int_t nlayers(fNlayers);
293 Int_t *nodes = new Int_t[nlayers];
294 Int_t ncycles(fNcycles);
295
296 for (Int_t i=0; i<nlayers; i++) nodes[i] = fNodes[i]; // full copy of class member
297
298 if (fYNN != 0) {
299 for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
300 delete[] fYNN;
301 fYNN = 0;
302 }
303 fYNN = new Double_t*[nlayers];
304 for (Int_t layer=0; layer<nlayers; layer++)
305 fYNN[layer] = new Double_t[fNodes[layer]];
306
307 // please check
308#ifndef R__WIN32
309 Train_nn( &dumDat, &dumDat, &ntrain, &ntest, &nvar, &nlayers, nodes, &ncycles );
310#else
311 Log() << kWARNING << "<Train> sorry CFMlpANN does not run on Windows" << Endl;
312#endif
313
314 delete [] nodes;
315
316 ExitFromTraining();
317}
318
319////////////////////////////////////////////////////////////////////////////////
320/// returns CFMlpANN output (normalised within [0,1])
321
323{
324 Bool_t isOK = kTRUE;
325
326 const Event* ev = GetEvent();
327
328 // copy of input variables
329 std::vector<Double_t> inputVec( GetNvar() );
330 for (UInt_t ivar=0; ivar<GetNvar(); ivar++) inputVec[ivar] = ev->GetValue(ivar);
331
332 Double_t myMVA = EvalANN( inputVec, isOK );
333 if (!isOK) Log() << kFATAL << "EvalANN returns (!isOK) for event " << Endl;
334
335 // cannot determine error
336 NoErrorCalc(err, errUpper);
337
338 return myMVA;
339}
340
341////////////////////////////////////////////////////////////////////////////////
342/// evaluates NN value as function of input variables
343
344Double_t TMVA::MethodCFMlpANN::EvalANN( std::vector<Double_t>& inVar, Bool_t& isOK )
345{
346 // hardcopy of input variables (necessary because they are update later)
347 Double_t* xeev = new Double_t[GetNvar()];
348 for (UInt_t ivar=0; ivar<GetNvar(); ivar++) xeev[ivar] = inVar[ivar];
349
350 // ---- now apply the weights: get NN output
351 isOK = kTRUE;
352 for (UInt_t jvar=0; jvar<GetNvar(); jvar++) {
353
354 if (fVarn_1.xmax[jvar] < xeev[jvar]) xeev[jvar] = fVarn_1.xmax[jvar];
355 if (fVarn_1.xmin[jvar] > xeev[jvar]) xeev[jvar] = fVarn_1.xmin[jvar];
356 if (fVarn_1.xmax[jvar] == fVarn_1.xmin[jvar]) {
357 isOK = kFALSE;
358 xeev[jvar] = 0;
359 }
360 else {
361 xeev[jvar] = xeev[jvar] - ((fVarn_1.xmax[jvar] + fVarn_1.xmin[jvar])/2);
362 xeev[jvar] = xeev[jvar] / ((fVarn_1.xmax[jvar] - fVarn_1.xmin[jvar])/2);
363 }
364 }
365
366 NN_ava( xeev );
367
368 Double_t retval = 0.5*(1.0 + fYNN[fParam_1.layerm-1][0]);
369
370 delete [] xeev;
371
372 return retval;
373}
374
375////////////////////////////////////////////////////////////////////////////////
376/// auxiliary functions
377
379{
380 for (Int_t ivar=0; ivar<fNeur_1.neuron[0]; ivar++) fYNN[0][ivar] = xeev[ivar];
381
382 for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
383 for (Int_t j=1; j<=fNeur_1.neuron[layer]; j++) {
384
385 Double_t x = Ww_ref(fNeur_1.ww, layer+1,j); // init with the bias layer
386
387 for (Int_t k=1; k<=fNeur_1.neuron[layer-1]; k++) { // neurons of originating layer
388 x += fYNN[layer-1][k-1]*W_ref(fNeur_1.w, layer+1, j, k);
389 }
390 fYNN[layer][j-1] = NN_fonc( layer, x );
391 }
392 }
393}
394
395////////////////////////////////////////////////////////////////////////////////
396/// activation function
397
399{
400 Double_t f(0);
401
402 if (u/fDel_1.temp[i] > 170) f = +1;
403 else if (u/fDel_1.temp[i] < -170) f = -1;
404 else {
405 Double_t yy = TMath::Exp(-u/fDel_1.temp[i]);
406 f = (1 - yy)/(1 + yy);
407 }
408
409 return f;
410}
411
412////////////////////////////////////////////////////////////////////////////////
413/// read back the weight from the training from file (stream)
414
416{
417 TString var;
418
419 // read number of variables and classes
420 UInt_t nva(0), lclass(0);
421 istr >> nva >> lclass;
422
423 if (GetNvar() != nva) // wrong file
424 Log() << kFATAL << "<ReadWeightsFromFile> mismatch in number of variables" << Endl;
425
426 // number of output classes must be 2
427 if (lclass != 2) // wrong file
428 Log() << kFATAL << "<ReadWeightsFromFile> mismatch in number of classes" << Endl;
429
430 // check that we are not at the end of the file
431 if (istr.eof( ))
432 Log() << kFATAL << "<ReadWeightsFromStream> reached EOF prematurely " << Endl;
433
434 // read extrema of input variables
435 for (UInt_t ivar=0; ivar<GetNvar(); ivar++)
436 istr >> fVarn_1.xmax[ivar] >> fVarn_1.xmin[ivar];
437
438 // read number of layers (sum of: input + output + hidden)
439 istr >> fParam_1.layerm;
440
441 if (fYNN != 0) {
442 for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
443 delete[] fYNN;
444 fYNN = 0;
445 }
446 fYNN = new Double_t*[fParam_1.layerm];
447 for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
448 // read number of neurons for each layer
449 // coverity[tainted_data_argument]
450 istr >> fNeur_1.neuron[layer];
451 fYNN[layer] = new Double_t[fNeur_1.neuron[layer]];
452 }
453
454 // to read dummy lines
455 const Int_t nchar( 100 );
456 char* dumchar = new char[nchar];
457
458 // read weights
459 for (Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
460
461 Int_t nq = fNeur_1.neuron[layer]/10;
462 Int_t nr = fNeur_1.neuron[layer] - nq*10;
463
464 Int_t kk(0);
465 if (nr==0) kk = nq;
466 else kk = nq+1;
467
468 for (Int_t k=1; k<=kk; k++) {
469 Int_t jmin = 10*k - 9;
470 Int_t jmax = 10*k;
471 if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
472 for (Int_t j=jmin; j<=jmax; j++) {
473 istr >> Ww_ref(fNeur_1.ww, layer+1, j);
474 }
475 for (Int_t i=1; i<=fNeur_1.neuron[layer-1]; i++) {
476 for (Int_t j=jmin; j<=jmax; j++) {
477 istr >> W_ref(fNeur_1.w, layer+1, j, i);
478 }
479 }
480 // skip two empty lines
481 istr.getline( dumchar, nchar );
482 }
483 }
484
485 for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
486
487 // skip 2 empty lines
488 istr.getline( dumchar, nchar );
489 istr.getline( dumchar, nchar );
490
491 istr >> fDel_1.temp[layer];
492 }
493
494 // sanity check
495 if ((Int_t)GetNvar() != fNeur_1.neuron[0]) {
496 Log() << kFATAL << "<ReadWeightsFromFile> mismatch in zeroth layer:"
497 << GetNvar() << " " << fNeur_1.neuron[0] << Endl;
498 }
499
500 fNlayers = fParam_1.layerm;
501 delete[] dumchar;
502}
503
504////////////////////////////////////////////////////////////////////////////////
505/// data interface function
506
508 Int_t* /* icode*/, Int_t* /*flag*/,
509 Int_t* /*nalire*/, Int_t* nvar,
510 Double_t* xpg, Int_t* iclass, Int_t* ikend )
511{
512 // icode and ikend are dummies needed to match f2c mlpl3 functions
513 *ikend = 0;
514
515
516 // sanity checks
517 if (0 == xpg) {
518 Log() << kFATAL << "ERROR in MethodCFMlpANN_DataInterface zero pointer xpg" << Endl;
519 }
520 if (*nvar != (Int_t)this->GetNvar()) {
521 Log() << kFATAL << "ERROR in MethodCFMlpANN_DataInterface mismatch in num of variables: "
522 << *nvar << " " << this->GetNvar() << Endl;
523 }
524
525 // fill variables
526 *iclass = (int)this->GetClass( MethodCFMlpANN_nsel );
527 for (UInt_t ivar=0; ivar<this->GetNvar(); ivar++)
528 xpg[ivar] = (double)this->GetData( MethodCFMlpANN_nsel, ivar );
529
530 ++MethodCFMlpANN_nsel;
531
532 return 0;
533}
534
535////////////////////////////////////////////////////////////////////////////////
536/// write weights to xml file
537
539{
540 void *wght = gTools().AddChild(parent, "Weights");
541 gTools().AddAttr(wght,"NVars",fParam_1.nvar);
542 gTools().AddAttr(wght,"NClasses",fParam_1.lclass);
543 gTools().AddAttr(wght,"NLayers",fParam_1.layerm);
544 void* minmaxnode = gTools().AddChild(wght, "VarMinMax");
545 stringstream s;
546 s.precision( 16 );
547 for (Int_t ivar=0; ivar<fParam_1.nvar; ivar++)
548 s << std::scientific << fVarn_1.xmin[ivar] << " " << fVarn_1.xmax[ivar] << " ";
549 gTools().AddRawLine( minmaxnode, s.str().c_str() );
550 void* neurons = gTools().AddChild(wght, "NNeurons");
551 stringstream n;
552 n.precision( 16 );
553 for (Int_t layer=0; layer<fParam_1.layerm; layer++)
554 n << std::scientific << fNeur_1.neuron[layer] << " ";
555 gTools().AddRawLine( neurons, n.str().c_str() );
556 for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
557 void* layernode = gTools().AddChild(wght, "Layer"+gTools().StringFromInt(layer));
558 gTools().AddAttr(layernode,"NNeurons",fNeur_1.neuron[layer]);
559 void* neuronnode=NULL;
560 for (Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
561 neuronnode = gTools().AddChild(layernode,"Neuron"+gTools().StringFromInt(neuron));
562 stringstream weights;
563 weights.precision( 16 );
564 weights << std::scientific << Ww_ref(fNeur_1.ww, layer+1, neuron+1);
565 for (Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
566 weights << " " << std::scientific << W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
567 }
568 gTools().AddRawLine( neuronnode, weights.str().c_str() );
569 }
570 }
571 void* tempnode = gTools().AddChild(wght, "LayerTemp");
572 stringstream temp;
573 temp.precision( 16 );
574 for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
575 temp << std::scientific << fDel_1.temp[layer] << " ";
576 }
577 gTools().AddRawLine(tempnode, temp.str().c_str() );
578}
579////////////////////////////////////////////////////////////////////////////////
580/// read weights from xml file
581
583{
584 gTools().ReadAttr( wghtnode, "NLayers",fParam_1.layerm );
585 void* minmaxnode = gTools().GetChild(wghtnode);
586 const char* minmaxcontent = gTools().GetContent(minmaxnode);
587 stringstream content(minmaxcontent);
588 for (UInt_t ivar=0; ivar<GetNvar(); ivar++)
589 content >> fVarn_1.xmin[ivar] >> fVarn_1.xmax[ivar];
590 if (fYNN != 0) {
591 for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
592 delete[] fYNN;
593 fYNN = 0;
594 }
595 fYNN = new Double_t*[fParam_1.layerm];
596 void *layernode=gTools().GetNextChild(minmaxnode);
597 const char* neuronscontent = gTools().GetContent(layernode);
598 stringstream ncontent(neuronscontent);
599 for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
600 // read number of neurons for each layer;
601 // coverity[tainted_data_argument]
602 ncontent >> fNeur_1.neuron[layer];
603 fYNN[layer] = new Double_t[fNeur_1.neuron[layer]];
604 }
605 for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
606 layernode=gTools().GetNextChild(layernode);
607 void* neuronnode=NULL;
608 neuronnode = gTools().GetChild(layernode);
609 for (Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
610 const char* neuronweights = gTools().GetContent(neuronnode);
611 stringstream weights(neuronweights);
612 weights >> Ww_ref(fNeur_1.ww, layer+1, neuron+1);
613 for (Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
614 weights >> W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
615 }
616 neuronnode=gTools().GetNextChild(neuronnode);
617 }
618 }
619 void* tempnode=gTools().GetNextChild(layernode);
620 const char* temp = gTools().GetContent(tempnode);
621 stringstream t(temp);
622 for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
623 t >> fDel_1.temp[layer];
624 }
625 fNlayers = fParam_1.layerm;
626}
627
628////////////////////////////////////////////////////////////////////////////////
629/// write the weights of the neural net
630
631void TMVA::MethodCFMlpANN::PrintWeights( std::ostream & o ) const
632{
633 // write number of variables and classes
634 o << "Number of vars " << fParam_1.nvar << std::endl;
635 o << "Output nodes " << fParam_1.lclass << std::endl;
636
637 // write extrema of input variables
638 for (Int_t ivar=0; ivar<fParam_1.nvar; ivar++)
639 o << "Var " << ivar << " [" << fVarn_1.xmin[ivar] << " - " << fVarn_1.xmax[ivar] << "]" << std::endl;
640
641 // write number of layers (sum of: input + output + hidden)
642 o << "Number of layers " << fParam_1.layerm << std::endl;
643
644 o << "Nodes per layer ";
645 for (Int_t layer=0; layer<fParam_1.layerm; layer++)
646 // write number of neurons for each layer
647 o << fNeur_1.neuron[layer] << " ";
648 o << std::endl;
649
650 // write weights
651 for (Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
652
653 Int_t nq = fNeur_1.neuron[layer]/10;
654 Int_t nr = fNeur_1.neuron[layer] - nq*10;
655
656 Int_t kk(0);
657 if (nr==0) kk = nq;
658 else kk = nq+1;
659
660 for (Int_t k=1; k<=kk; k++) {
661 Int_t jmin = 10*k - 9;
662 Int_t jmax = 10*k;
663 Int_t i, j;
664 if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
665 for (j=jmin; j<=jmax; j++) {
666
667 //o << fNeur_1.ww[j*max_nLayers_ + layer - 6] << " ";
668 o << Ww_ref(fNeur_1.ww, layer+1, j) << " ";
669
670 }
671 o << std::endl;
672 //for (i=1; i<=fNeur_1.neuron[layer-1]; i++) {
673 for (i=1; i<=fNeur_1.neuron[layer-1]; i++) {
674 for (j=jmin; j<=jmax; j++) {
675 // o << fNeur_1.w[(i*max_nNodes_ + j)*max_nLayers_ + layer - 186] << " ";
676 o << W_ref(fNeur_1.w, layer+1, j, i) << " ";
677 }
678 o << std::endl;
679 }
680
681 // skip two empty lines
682 o << std::endl;
683 }
684 }
685 for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
686 o << "Del.temp in layer " << layer << " : " << fDel_1.temp[layer] << std::endl;
687 }
688}
689
690////////////////////////////////////////////////////////////////////////////////
691
692void TMVA::MethodCFMlpANN::MakeClassSpecific( std::ostream& fout, const TString& className ) const
693{
694 // write specific classifier response
695 fout << " // not implemented for class: \"" << className << "\"" << std::endl;
696 fout << "};" << std::endl;
697}
698
699////////////////////////////////////////////////////////////////////////////////
700/// write specific classifier response for header
701
702void TMVA::MethodCFMlpANN::MakeClassSpecificHeader( std::ostream& , const TString& ) const
703{
704}
705
706////////////////////////////////////////////////////////////////////////////////
707/// get help message text
708///
709/// typical length of text line:
710/// "|--------------------------------------------------------------|"
711
713{
714 Log() << Endl;
715 Log() << gTools().Color("bold") << "--- Short description:" << gTools().Color("reset") << Endl;
716 Log() << Endl;
717 Log() << "<None>" << Endl;
718 Log() << Endl;
719 Log() << gTools().Color("bold") << "--- Performance optimisation:" << gTools().Color("reset") << Endl;
720 Log() << Endl;
721 Log() << "<None>" << Endl;
722 Log() << Endl;
723 Log() << gTools().Color("bold") << "--- Performance tuning via configuration options:" << gTools().Color("reset") << Endl;
724 Log() << Endl;
725 Log() << "<None>" << Endl;
726}
#define REGISTER_METHOD(CLASS)
for example
Cppyy::TCppType_t fClass
#define f(i)
Definition: RSha256.hxx:104
const Bool_t kFALSE
Definition: RtypesCore.h:90
double Double_t
Definition: RtypesCore.h:57
const Bool_t kTRUE
Definition: RtypesCore.h:89
#define ClassImp(name)
Definition: Rtypes.h:361
int type
Definition: TGX11.cxx:120
TMatrixT< Float_t > TMatrix
Definition: TMatrix.h:24
MsgLogger & Log() const
Definition: Configurable.h:122
Class that contains all the data information.
Definition: DataSetInfo.h:60
Float_t GetValue(UInt_t ivar) const
return value of i'th variable
Definition: Event.cxx:236
Virtual base Class for all MVA method.
Definition: MethodBase.h:111
Interface to Clermond-Ferrand artificial neural network.
void PrintWeights(std::ostream &o) const
write the weights of the neural net
void MakeClassSpecific(std::ostream &, const TString &) const
Double_t EvalANN(std::vector< Double_t > &, Bool_t &isOK)
evaluates NN value as function of input variables
Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
returns CFMlpANN output (normalised within [0,1])
void DeclareOptions()
define the options (their key words) that can be set in the option string know options: NCycles=xx :t...
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t)
CFMlpANN can handle classification with 2 classes.
void NN_ava(Double_t *)
auxiliary functions
void AddWeightsXMLTo(void *parent) const
write weights to xml file
void ProcessOptions()
decode the options in the option string
void Train(void)
training of the Clement-Ferrand NN classifier
Double_t NN_fonc(Int_t, Double_t) const
activation function
void ReadWeightsFromStream(std::istream &istr)
read back the weight from the training from file (stream)
void MakeClassSpecificHeader(std::ostream &, const TString &="") const
write specific classifier response for header
virtual ~MethodCFMlpANN(void)
destructor
MethodCFMlpANN(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption="3000:N-1:N-2")
standard constructor
void Init(void)
default initialisation called by all constructors
Int_t DataInterface(Double_t *, Double_t *, Int_t *, Int_t *, Int_t *, Int_t *, Double_t *, Int_t *, Int_t *)
data interface function
void ReadWeightsFromXML(void *wghtnode)
read weights from xml file
void GetHelpMessage() const
get help message text
void * GetNextChild(void *prevchild, const char *childname=0)
XML helpers.
Definition: Tools.cxx:1173
void * AddChild(void *parent, const char *childname, const char *content=0, bool isRootNode=false)
add child node
Definition: Tools.cxx:1135
Bool_t AddRawLine(void *node, const char *raw)
XML helpers.
Definition: Tools.cxx:1201
const TString & Color(const TString &)
human readable color strings
Definition: Tools.cxx:839
const char * GetContent(void *node)
XML helpers.
Definition: Tools.cxx:1185
void * GetChild(void *parent, const char *childname=0)
get child node
Definition: Tools.cxx:1161
void ReadAttr(void *node, const char *, T &value)
read attribute from xml
Definition: Tools.h:335
void AddAttr(void *node, const char *, const T &value, Int_t precision=16)
add attribute to xml
Definition: Tools.h:353
Singleton class for Global types used by TMVA.
Definition: Types.h:73
EAnalysisType
Definition: Types.h:127
@ kClassification
Definition: Types.h:128
Basic string class.
Definition: TString.h:131
Ssiz_t Length() const
Definition: TString.h:405
Ssiz_t First(char c) const
Find first occurrence of a character c.
Definition: TString.cxx:499
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
Definition: TString.h:610
TString & Remove(Ssiz_t pos)
Definition: TString.h:668
Double_t x[n]
Definition: legend1.C:17
const Int_t n
Definition: legend1.C:16
for(Int_t i=0;i< n;i++)
Definition: legend1.C:18
TClass * GetClass(T *)
Definition: TClass.h:658
static constexpr double s
create variable transformations
Tools & gTools()
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:158
Double_t Exp(Double_t x)
Definition: TMath.h:717
Double_t Log(Double_t x)
Definition: TMath.h:750