Logo ROOT   6.18/05
Reference Guide
MethodDNN.cxx
Go to the documentation of this file.
1// @(#)root/tmva $Id$
2// Author: Peter Speckmayer
3
4/**********************************************************************************
5 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6 * Package: TMVA *
7 * Class : MethodDNN *
8 * Web : http://tmva.sourceforge.net *
9 * *
10 * Description: *
11 * A neural network implementation *
12 * *
13 * Authors (alphabetical): *
14 * Simon Pfreundschuh <s.pfreundschuh@gmail.com> - CERN, Switzerland *
15 * Peter Speckmayer <peter.speckmayer@gmx.ch> - CERN, Switzerland *
16 * *
17 * Copyright (c) 2005-2015: *
18 * CERN, Switzerland *
19 * U. of Victoria, Canada *
20 * MPI-K Heidelberg, Germany *
21 * U. of Bonn, Germany *
22 * *
23 * Redistribution and use in source and binary forms, with or without *
24 * modification, are permitted according to the terms listed in LICENSE *
25 * (http://tmva.sourceforge.net/LICENSE) *
26 **********************************************************************************/
27
28/*! \class TMVA::MethodDNN
29\ingroup TMVA
30Deep Neural Network Implementation.
31*/
32
33#include "TMVA/MethodDNN.h"
34
35#include "TString.h"
36#include "TTree.h"
37#include "TFile.h"
38#include "TFormula.h"
39
41#include "TMVA/Configurable.h"
42#include "TMVA/IMethod.h"
43#include "TMVA/MsgLogger.h"
44#include "TMVA/MethodBase.h"
45#include "TMVA/Timer.h"
46#include "TMVA/Types.h"
47#include "TMVA/Tools.h"
48#include "TMVA/Config.h"
49#include "TMVA/Ranking.h"
50
51#include "TMVA/DNN/Net.h"
53
54#include "TMVA/NeuralNet.h"
55#include "TMVA/Monitoring.h"
56
57#include <algorithm>
58#include <iostream>
59#include <string>
60#include <iomanip>
61
63
65
66namespace TMVA
67{
68 using namespace DNN;
69
70 ////////////////////////////////////////////////////////////////////////////////
71 /// standard constructor
72
73 TMVA::MethodDNN::MethodDNN(const TString &jobName, const TString &methodTitle, DataSetInfo &theData,
74 const TString &theOption)
75 : MethodBase(jobName, Types::kDNN, methodTitle, theData, theOption), fWeightInitialization(), fOutputFunction(),
76 fLayoutString(), fErrorStrategy(), fTrainingStrategyString(), fWeightInitializationString(),
77 fArchitectureString(), fTrainingSettings(), fResume(false), fSettings()
78 {
79}
80
81////////////////////////////////////////////////////////////////////////////////
82/// constructor from a weight file
83
84TMVA::MethodDNN::MethodDNN(DataSetInfo& theData,
85 const TString& theWeightFile)
86 : MethodBase( Types::kDNN, theData, theWeightFile),
87 fWeightInitialization(), fOutputFunction(), fLayoutString(), fErrorStrategy(),
88 fTrainingStrategyString(), fWeightInitializationString(), fArchitectureString(),
89 fTrainingSettings(), fResume(false), fSettings()
90{
91 fWeightInitialization = DNN::EInitialization::kGauss;
92 fOutputFunction = DNN::EOutputFunction::kSigmoid;
93}
94
95////////////////////////////////////////////////////////////////////////////////
96/// destructor
97
99{
102}
103
104////////////////////////////////////////////////////////////////////////////////
105/// MLP can handle classification with 2 classes and regression with
106/// one regression-target
107
109 UInt_t numberClasses,
110 UInt_t /*numberTargets*/ )
111{
112 if (type == Types::kClassification && numberClasses == 2 ) return kTRUE;
113 if (type == Types::kMulticlass ) return kTRUE;
114 if (type == Types::kRegression ) return kTRUE;
115
116 return kFALSE;
117}
118
119////////////////////////////////////////////////////////////////////////////////
120/// default initializations
121
122void TMVA::MethodDNN::Init() {}
123
124////////////////////////////////////////////////////////////////////////////////
125/// Options to be set in the option string:
126///
127/// - LearningRate <float> DNN learning rate parameter.
128/// - DecayRate <float> Decay rate for learning parameter.
129/// - TestRate <int> Period of validation set error computation.
130/// - BatchSize <int> Number of event per batch.
131///
132/// - ValidationSize <string> How many events to use for validation. "0.2"
133/// or "20%" indicates that a fifth of the
134/// training data should be used. "100"
135/// indicates that 100 events should be used.
136
138{
139
140 DeclareOptionRef(fLayoutString="SOFTSIGN|(N+100)*2,LINEAR",
141 "Layout",
142 "Layout of the network.");
143
144 DeclareOptionRef(fValidationSize = "20%", "ValidationSize",
145 "Part of the training data to use for "
146 "validation. Specify as 0.2 or 20% to use a "
147 "fifth of the data set as validation set. "
148 "Specify as 100 to use exactly 100 events. "
149 "(Default: 20%)");
150
151 DeclareOptionRef(fErrorStrategy="CROSSENTROPY",
152 "ErrorStrategy",
153 "Loss function: Mean squared error (regression)"
154 " or cross entropy (binary classification).");
155 AddPreDefVal(TString("CROSSENTROPY"));
156 AddPreDefVal(TString("SUMOFSQUARES"));
157 AddPreDefVal(TString("MUTUALEXCLUSIVE"));
158
159 DeclareOptionRef(fWeightInitializationString="XAVIER",
160 "WeightInitialization",
161 "Weight initialization strategy");
162 AddPreDefVal(TString("XAVIER"));
163 AddPreDefVal(TString("XAVIERUNIFORM"));
164
165 DeclareOptionRef(fArchitectureString = "CPU", "Architecture", "Which architecture to perform the training on.");
166 AddPreDefVal(TString("STANDARD"));
167 AddPreDefVal(TString("CPU"));
168 AddPreDefVal(TString("GPU"));
169 AddPreDefVal(TString("OPENCL"));
170
171 DeclareOptionRef(
172 fTrainingStrategyString = "LearningRate=1e-1,"
173 "Momentum=0.3,"
174 "Repetitions=3,"
175 "ConvergenceSteps=50,"
176 "BatchSize=30,"
177 "TestRepetitions=7,"
178 "WeightDecay=0.0,"
179 "Renormalize=L2,"
180 "DropConfig=0.0,"
181 "DropRepetitions=5|LearningRate=1e-4,"
182 "Momentum=0.3,"
183 "Repetitions=3,"
184 "ConvergenceSteps=50,"
185 "BatchSize=20,"
186 "TestRepetitions=7,"
187 "WeightDecay=0.001,"
188 "Renormalize=L2,"
189 "DropConfig=0.0+0.5+0.5,"
190 "DropRepetitions=5,"
191 "Multithreading=True",
192 "TrainingStrategy",
193 "Defines the training strategies.");
194}
195
196////////////////////////////////////////////////////////////////////////////////
197/// parse layout specification string and return a vector, each entry
198/// containing the number of neurons to go in each successive layer
199
201 -> LayoutVector_t
202{
203 LayoutVector_t layout;
204 const TString layerDelimiter(",");
205 const TString subDelimiter("|");
206
207 const size_t inputSize = GetNvar();
208
209 TObjArray* layerStrings = layoutString.Tokenize(layerDelimiter);
210 TIter nextLayer (layerStrings);
211 TObjString* layerString = (TObjString*)nextLayer ();
212
213 for (; layerString != nullptr; layerString = (TObjString*) nextLayer()) {
214 int numNodes = 0;
216
217 TObjArray* subStrings = layerString->GetString().Tokenize(subDelimiter);
218 TIter nextToken (subStrings);
219 TObjString* token = (TObjString *) nextToken();
220 int idxToken = 0;
221 for (; token != nullptr; token = (TObjString *) nextToken()) {
222 switch (idxToken)
223 {
224 case 0:
225 {
226 TString strActFnc (token->GetString ());
227 if (strActFnc == "RELU") {
228 activationFunction = DNN::EActivationFunction::kRelu;
229 } else if (strActFnc == "TANH") {
230 activationFunction = DNN::EActivationFunction::kTanh;
231 } else if (strActFnc == "SYMMRELU") {
232 activationFunction = DNN::EActivationFunction::kSymmRelu;
233 } else if (strActFnc == "SOFTSIGN") {
234 activationFunction = DNN::EActivationFunction::kSoftSign;
235 } else if (strActFnc == "SIGMOID") {
236 activationFunction = DNN::EActivationFunction::kSigmoid;
237 } else if (strActFnc == "LINEAR") {
238 activationFunction = DNN::EActivationFunction::kIdentity;
239 } else if (strActFnc == "GAUSS") {
240 activationFunction = DNN::EActivationFunction::kGauss;
241 }
242 }
243 break;
244 case 1: // number of nodes
245 {
246 TString strNumNodes (token->GetString ());
247 TString strN ("x");
248 strNumNodes.ReplaceAll ("N", strN);
249 strNumNodes.ReplaceAll ("n", strN);
250 TFormula fml ("tmp",strNumNodes);
251 numNodes = fml.Eval (inputSize);
252 }
253 break;
254 }
255 ++idxToken;
256 }
257 layout.push_back(std::make_pair(numNodes, activationFunction));
258 }
259 return layout;
260}
261
262////////////////////////////////////////////////////////////////////////////////
263/// parse key value pairs in blocks -> return vector of blocks with map of key value pairs
264
266 TString blockDelim,
267 TString tokenDelim)
268 -> KeyValueVector_t
269{
270 KeyValueVector_t blockKeyValues;
271 const TString keyValueDelim ("=");
272
273 TObjArray* blockStrings = parseString.Tokenize (blockDelim);
274 TIter nextBlock (blockStrings);
275 TObjString* blockString = (TObjString *) nextBlock();
276
277 for (; blockString != nullptr; blockString = (TObjString *) nextBlock())
278 {
279 blockKeyValues.push_back (std::map<TString,TString>());
280 std::map<TString,TString>& currentBlock = blockKeyValues.back ();
281
282 TObjArray* subStrings = blockString->GetString ().Tokenize (tokenDelim);
283 TIter nextToken (subStrings);
284 TObjString* token = (TObjString*)nextToken ();
285
286 for (; token != nullptr; token = (TObjString *)nextToken())
287 {
288 TString strKeyValue (token->GetString ());
289 int delimPos = strKeyValue.First (keyValueDelim.Data ());
290 if (delimPos <= 0)
291 continue;
292
293 TString strKey = TString (strKeyValue (0, delimPos));
294 strKey.ToUpper();
295 TString strValue = TString (strKeyValue (delimPos+1, strKeyValue.Length ()));
296
297 strKey.Strip (TString::kBoth, ' ');
298 strValue.Strip (TString::kBoth, ' ');
299
300 currentBlock.insert (std::make_pair (strKey, strValue));
301 }
302 }
303 return blockKeyValues;
304}
305
306////////////////////////////////////////////////////////////////////////////////
307
308TString fetchValue (const std::map<TString, TString>& keyValueMap, TString key)
309{
310 key.ToUpper ();
311 std::map<TString, TString>::const_iterator it = keyValueMap.find (key);
312 if (it == keyValueMap.end()) {
313 return TString ("");
314 }
315 return it->second;
316}
317
318////////////////////////////////////////////////////////////////////////////////
319
320template <typename T>
321T fetchValue(const std::map<TString,TString>& keyValueMap,
322 TString key,
323 T defaultValue);
324
325////////////////////////////////////////////////////////////////////////////////
326
327template <>
328int fetchValue(const std::map<TString,TString>& keyValueMap,
329 TString key,
330 int defaultValue)
331{
332 TString value (fetchValue (keyValueMap, key));
333 if (value == "") {
334 return defaultValue;
335 }
336 return value.Atoi ();
337}
338
339////////////////////////////////////////////////////////////////////////////////
340
341template <>
342double fetchValue (const std::map<TString,TString>& keyValueMap,
343 TString key, double defaultValue)
344{
345 TString value (fetchValue (keyValueMap, key));
346 if (value == "") {
347 return defaultValue;
348 }
349 return value.Atof ();
350}
351
352////////////////////////////////////////////////////////////////////////////////
353
354template <>
355TString fetchValue (const std::map<TString,TString>& keyValueMap,
356 TString key, TString defaultValue)
357{
358 TString value (fetchValue (keyValueMap, key));
359 if (value == "") {
360 return defaultValue;
361 }
362 return value;
363}
364
365////////////////////////////////////////////////////////////////////////////////
366
367template <>
368bool fetchValue (const std::map<TString,TString>& keyValueMap,
369 TString key, bool defaultValue)
370{
371 TString value (fetchValue (keyValueMap, key));
372 if (value == "") {
373 return defaultValue;
374 }
375 value.ToUpper ();
376 if (value == "TRUE" || value == "T" || value == "1") {
377 return true;
378 }
379 return false;
380}
381
382////////////////////////////////////////////////////////////////////////////////
383
384template <>
385std::vector<double> fetchValue(const std::map<TString, TString> & keyValueMap,
386 TString key,
387 std::vector<double> defaultValue)
388{
389 TString parseString (fetchValue (keyValueMap, key));
390 if (parseString == "") {
391 return defaultValue;
392 }
393 parseString.ToUpper ();
394 std::vector<double> values;
395
396 const TString tokenDelim ("+");
397 TObjArray* tokenStrings = parseString.Tokenize (tokenDelim);
398 TIter nextToken (tokenStrings);
399 TObjString* tokenString = (TObjString*)nextToken ();
400 for (; tokenString != NULL; tokenString = (TObjString*)nextToken ()) {
401 std::stringstream sstr;
402 double currentValue;
403 sstr << tokenString->GetString ().Data ();
404 sstr >> currentValue;
405 values.push_back (currentValue);
406 }
407 return values;
408}
409
410////////////////////////////////////////////////////////////////////////////////
411
413{
414 if (IgnoreEventsWithNegWeightsInTraining()) {
415 Log() << kINFO
416 << "Will ignore negative events in training!"
417 << Endl;
418 }
419
420 if (fArchitectureString == "STANDARD") {
421 Log() << kERROR << "The STANDARD architecture has been deprecated. "
422 "Please use Architecture=CPU or Architecture=CPU."
423 "See the TMVA Users' Guide for instructions if you "
424 "encounter problems."
425 << Endl;
426 Log() << kFATAL << "The STANDARD architecture has been deprecated. "
427 "Please use Architecture=CPU or Architecture=CPU."
428 "See the TMVA Users' Guide for instructions if you "
429 "encounter problems."
430 << Endl;
431 }
432
433 if (fArchitectureString == "OPENCL") {
434 Log() << kERROR << "The OPENCL architecture has not been implemented yet. "
435 "Please use Architecture=CPU or Architecture=CPU for the "
436 "time being. See the TMVA Users' Guide for instructions "
437 "if you encounter problems."
438 << Endl;
439 Log() << kFATAL << "The OPENCL architecture has not been implemented yet. "
440 "Please use Architecture=CPU or Architecture=CPU for the "
441 "time being. See the TMVA Users' Guide for instructions "
442 "if you encounter problems."
443 << Endl;
444 }
445
446 if (fArchitectureString == "GPU") {
447#ifndef DNNCUDA // Included only if DNNCUDA flag is _not_ set.
448 Log() << kERROR << "CUDA backend not enabled. Please make sure "
449 "you have CUDA installed and it was successfully "
450 "detected by CMAKE."
451 << Endl;
452 Log() << kFATAL << "CUDA backend not enabled. Please make sure "
453 "you have CUDA installed and it was successfully "
454 "detected by CMAKE."
455 << Endl;
456#endif // DNNCUDA
457 }
458
459 if (fArchitectureString == "CPU") {
460#ifndef DNNCPU // Included only if DNNCPU flag is _not_ set.
461 Log() << kERROR << "Multi-core CPU backend not enabled. Please make sure "
462 "you have a BLAS implementation and it was successfully "
463 "detected by CMake as well that the imt CMake flag is set."
464 << Endl;
465 Log() << kFATAL << "Multi-core CPU backend not enabled. Please make sure "
466 "you have a BLAS implementation and it was successfully "
467 "detected by CMake as well that the imt CMake flag is set."
468 << Endl;
469#endif // DNNCPU
470 }
471
472 //
473 // Set network structure.
474 //
475
476 fLayout = TMVA::MethodDNN::ParseLayoutString (fLayoutString);
477 size_t inputSize = GetNVariables ();
478 size_t outputSize = 1;
479 if (fAnalysisType == Types::kRegression && GetNTargets() != 0) {
480 outputSize = GetNTargets();
481 } else if (fAnalysisType == Types::kMulticlass && DataInfo().GetNClasses() >= 2) {
482 outputSize = DataInfo().GetNClasses();
483 }
484
485 fNet.SetBatchSize(1);
486 fNet.SetInputWidth(inputSize);
487
488 auto itLayout = std::begin (fLayout);
489 auto itLayoutEnd = std::end (fLayout)-1;
490 for ( ; itLayout != itLayoutEnd; ++itLayout) {
491 fNet.AddLayer((*itLayout).first, (*itLayout).second);
492 }
493 fNet.AddLayer(outputSize, EActivationFunction::kIdentity);
494
495 //
496 // Loss function and output.
497 //
498
499 fOutputFunction = EOutputFunction::kSigmoid;
500 if (fAnalysisType == Types::kClassification)
501 {
502 if (fErrorStrategy == "SUMOFSQUARES") {
503 fNet.SetLossFunction(ELossFunction::kMeanSquaredError);
504 }
505 if (fErrorStrategy == "CROSSENTROPY") {
506 fNet.SetLossFunction(ELossFunction::kCrossEntropy);
507 }
508 fOutputFunction = EOutputFunction::kSigmoid;
509 } else if (fAnalysisType == Types::kRegression) {
510 if (fErrorStrategy != "SUMOFSQUARES") {
511 Log () << kWARNING << "For regression only SUMOFSQUARES is a valid "
512 << " neural net error function. Setting error function to "
513 << " SUMOFSQUARES now." << Endl;
514 }
515 fNet.SetLossFunction(ELossFunction::kMeanSquaredError);
516 fOutputFunction = EOutputFunction::kIdentity;
517 } else if (fAnalysisType == Types::kMulticlass) {
518 if (fErrorStrategy == "SUMOFSQUARES") {
519 fNet.SetLossFunction(ELossFunction::kMeanSquaredError);
520 }
521 if (fErrorStrategy == "CROSSENTROPY") {
522 fNet.SetLossFunction(ELossFunction::kCrossEntropy);
523 }
524 if (fErrorStrategy == "MUTUALEXCLUSIVE") {
525 fNet.SetLossFunction(ELossFunction::kSoftmaxCrossEntropy);
526 }
527 fOutputFunction = EOutputFunction::kSoftmax;
528 }
529
530 //
531 // Initialization
532 //
533
534 if (fWeightInitializationString == "XAVIER") {
535 fWeightInitialization = DNN::EInitialization::kGauss;
536 }
537 else if (fWeightInitializationString == "XAVIERUNIFORM") {
538 fWeightInitialization = DNN::EInitialization::kUniform;
539 }
540 else {
541 fWeightInitialization = DNN::EInitialization::kGauss;
542 }
543
544 //
545 // Training settings.
546 //
547
548 // Force validation of the ValidationSize option
549 GetNumValidationSamples();
550
551 KeyValueVector_t strategyKeyValues = ParseKeyValueString(fTrainingStrategyString,
552 TString ("|"),
553 TString (","));
554
555 std::cout << "Parsed Training DNN string " << fTrainingStrategyString << std::endl;
556 std::cout << "STring has size " << strategyKeyValues.size() << std::endl;
557 for (auto& block : strategyKeyValues) {
558 TTrainingSettings settings;
559
560 settings.convergenceSteps = fetchValue(block, "ConvergenceSteps", 100);
561 settings.batchSize = fetchValue(block, "BatchSize", 30);
562 settings.testInterval = fetchValue(block, "TestRepetitions", 7);
563 settings.weightDecay = fetchValue(block, "WeightDecay", 0.0);
564 settings.learningRate = fetchValue(block, "LearningRate", 1e-5);
565 settings.momentum = fetchValue(block, "Momentum", 0.3);
566 settings.dropoutProbabilities = fetchValue(block, "DropConfig",
567 std::vector<Double_t>());
568
569 TString regularization = fetchValue(block, "Regularization",
570 TString ("NONE"));
571 if (regularization == "L1") {
573 } else if (regularization == "L2") {
575 } else {
577 }
578
579 TString strMultithreading = fetchValue(block, "Multithreading",
580 TString ("True"));
581 if (strMultithreading.BeginsWith ("T")) {
582 settings.multithreading = true;
583 } else {
584 settings.multithreading = false;
585 }
586
587 fTrainingSettings.push_back(settings);
588 }
589}
590
591////////////////////////////////////////////////////////////////////////////////
592/// Validation of the ValidationSize option. Allowed formats are 20%, 0.2 and
593/// 100 etc.
594/// - 20% and 0.2 selects 20% of the training set as validation data.
595/// - 100 selects 100 events as the validation data.
596///
597/// @return number of samples in validation set
598///
599
601{
602 Int_t nValidationSamples = 0;
603 UInt_t trainingSetSize = GetEventCollection(Types::kTraining).size();
604
605 // Parsing + Validation
606 // --------------------
607 if (fValidationSize.EndsWith("%")) {
608 // Relative spec. format 20%
609 TString intValStr = TString(fValidationSize.Strip(TString::kTrailing, '%'));
610
611 if (intValStr.IsFloat()) {
612 Double_t valSizeAsDouble = fValidationSize.Atof() / 100.0;
613 nValidationSamples = GetEventCollection(Types::kTraining).size() * valSizeAsDouble;
614 } else {
615 Log() << kFATAL << "Cannot parse number \"" << fValidationSize
616 << "\". Expected string like \"20%\" or \"20.0%\"." << Endl;
617 }
618 } else if (fValidationSize.IsFloat()) {
619 Double_t valSizeAsDouble = fValidationSize.Atof();
620
621 if (valSizeAsDouble < 1.0) {
622 // Relative spec. format 0.2
623 nValidationSamples = GetEventCollection(Types::kTraining).size() * valSizeAsDouble;
624 } else {
625 // Absolute spec format 100 or 100.0
626 nValidationSamples = valSizeAsDouble;
627 }
628 } else {
629 Log() << kFATAL << "Cannot parse number \"" << fValidationSize << "\". Expected string like \"0.2\" or \"100\"."
630 << Endl;
631 }
632
633 // Value validation
634 // ----------------
635 if (nValidationSamples < 0) {
636 Log() << kFATAL << "Validation size \"" << fValidationSize << "\" is negative." << Endl;
637 }
638
639 if (nValidationSamples == 0) {
640 Log() << kFATAL << "Validation size \"" << fValidationSize << "\" is zero." << Endl;
641 }
642
643 if (nValidationSamples >= (Int_t)trainingSetSize) {
644 Log() << kFATAL << "Validation size \"" << fValidationSize
645 << "\" is larger than or equal in size to training set (size=\"" << trainingSetSize << "\")." << Endl;
646 }
647
648 return nValidationSamples;
649}
650
651////////////////////////////////////////////////////////////////////////////////
652
654{
655 if (fInteractive && fInteractive->NotInitialized()){
656 std::vector<TString> titles = {"Error on training set", "Error on test set"};
657 fInteractive->Init(titles);
658 // JsMVA progress bar maximum (100%)
659 fIPyMaxIter = 100;
660 }
661
662 for (TTrainingSettings & settings : fTrainingSettings) {
663 size_t nValidationSamples = GetNumValidationSamples();
664 size_t nTrainingSamples = GetEventCollection(Types::kTraining).size() - nValidationSamples;
665 size_t nTestSamples = nValidationSamples;
666
667 if (nTrainingSamples < settings.batchSize or
668 nValidationSamples < settings.batchSize or
669 nTestSamples < settings.batchSize) {
670 Log() << kFATAL << "Number of samples in the datasets are train: "
671 << nTrainingSamples << " valid: " << nValidationSamples
672 << " test: " << nTestSamples << ". "
673 << "One of these is smaller than the batch size of "
674 << settings.batchSize << ". Please increase the batch"
675 << " size to be at least the same size as the smallest"
676 << " of these values." << Endl;
677 }
678 }
679
680 if (fArchitectureString == "GPU") {
681 TrainGpu();
682 if (!fExitFromTraining) fIPyMaxIter = fIPyCurrentIter;
683 ExitFromTraining();
684 return;
685 } else if (fArchitectureString == "OpenCL") {
686 Log() << kFATAL << "OpenCL backend not yet supported." << Endl;
687 return;
688 } else if (fArchitectureString == "CPU") {
689 TrainCpu();
690 if (!fExitFromTraining) fIPyMaxIter = fIPyCurrentIter;
691 ExitFromTraining();
692 return;
693 }
694
695 Log() << kINFO << "Using Standard Implementation.";
696
697 std::vector<Pattern> trainPattern;
698 std::vector<Pattern> testPattern;
699
700 size_t nValidationSamples = GetNumValidationSamples();
701 size_t nTrainingSamples = GetEventCollection(Types::kTraining).size() - nValidationSamples;
702
703 const std::vector<TMVA::Event *> &allData = GetEventCollection(Types::kTraining);
704 const std::vector<TMVA::Event *> eventCollectionTraining{allData.begin(), allData.begin() + nTrainingSamples};
705 const std::vector<TMVA::Event *> eventCollectionTesting{allData.begin() + nTrainingSamples, allData.end()};
706
707 for (auto &event : eventCollectionTraining) {
708 const std::vector<Float_t>& values = event->GetValues();
709 if (fAnalysisType == Types::kClassification) {
710 double outputValue = event->GetClass () == 0 ? 0.9 : 0.1;
711 trainPattern.push_back(Pattern (values.begin(),
712 values.end(),
713 outputValue,
714 event->GetWeight()));
715 trainPattern.back().addInput(1.0);
716 } else if (fAnalysisType == Types::kMulticlass) {
717 std::vector<Float_t> oneHot(DataInfo().GetNClasses(), 0.0);
718 oneHot[event->GetClass()] = 1.0;
719 trainPattern.push_back(Pattern (values.begin(), values.end(),
720 oneHot.cbegin(), oneHot.cend(),
721 event->GetWeight()));
722 trainPattern.back().addInput(1.0);
723 } else {
724 const std::vector<Float_t>& targets = event->GetTargets ();
725 trainPattern.push_back(Pattern(values.begin(),
726 values.end(),
727 targets.begin(),
728 targets.end(),
729 event->GetWeight ()));
730 trainPattern.back ().addInput (1.0); // bias node
731 }
732 }
733
734 for (auto &event : eventCollectionTesting) {
735 const std::vector<Float_t>& values = event->GetValues();
736 if (fAnalysisType == Types::kClassification) {
737 double outputValue = event->GetClass () == 0 ? 0.9 : 0.1;
738 testPattern.push_back(Pattern (values.begin(),
739 values.end(),
740 outputValue,
741 event->GetWeight()));
742 testPattern.back().addInput(1.0);
743 } else if (fAnalysisType == Types::kMulticlass) {
744 std::vector<Float_t> oneHot(DataInfo().GetNClasses(), 0.0);
745 oneHot[event->GetClass()] = 1.0;
746 testPattern.push_back(Pattern (values.begin(), values.end(),
747 oneHot.cbegin(), oneHot.cend(),
748 event->GetWeight()));
749 testPattern.back().addInput(1.0);
750 } else {
751 const std::vector<Float_t>& targets = event->GetTargets ();
752 testPattern.push_back(Pattern(values.begin(),
753 values.end(),
754 targets.begin(),
755 targets.end(),
756 event->GetWeight ()));
757 testPattern.back ().addInput (1.0); // bias node
758 }
759 }
760
761 TMVA::DNN::Net net;
762 std::vector<double> weights;
763
764 net.SetIpythonInteractive(fInteractive, &fExitFromTraining, &fIPyMaxIter, &fIPyCurrentIter);
765
766 net.setInputSize(fNet.GetInputWidth() + 1);
767 net.setOutputSize(fNet.GetOutputWidth() + 1);
768
769 for (size_t i = 0; i < fNet.GetDepth(); i++) {
770 EActivationFunction f = fNet.GetLayer(i).GetActivationFunction();
771 EnumFunction g = EnumFunction::LINEAR;
772 switch(f) {
773 case EActivationFunction::kIdentity: g = EnumFunction::LINEAR; break;
774 case EActivationFunction::kRelu: g = EnumFunction::RELU; break;
775 case EActivationFunction::kSigmoid: g = EnumFunction::SIGMOID; break;
776 case EActivationFunction::kTanh: g = EnumFunction::TANH; break;
777 case EActivationFunction::kSymmRelu: g = EnumFunction::SYMMRELU; break;
778 case EActivationFunction::kSoftSign: g = EnumFunction::SOFTSIGN; break;
779 case EActivationFunction::kGauss: g = EnumFunction::GAUSS; break;
780 }
781 if (i < fNet.GetDepth() - 1) {
782 net.addLayer(Layer(fNet.GetLayer(i).GetWidth(), g));
783 } else {
784 ModeOutputValues h = ModeOutputValues::DIRECT;
785 switch(fOutputFunction) {
786 case EOutputFunction::kIdentity: h = ModeOutputValues::DIRECT; break;
787 case EOutputFunction::kSigmoid: h = ModeOutputValues::SIGMOID; break;
788 case EOutputFunction::kSoftmax: h = ModeOutputValues::SOFTMAX; break;
789 }
790 net.addLayer(Layer(fNet.GetLayer(i).GetWidth(), g, h));
791 }
792 }
793
794 switch(fNet.GetLossFunction()) {
795 case ELossFunction::kMeanSquaredError:
796 net.setErrorFunction(ModeErrorFunction::SUMOFSQUARES);
797 break;
799 net.setErrorFunction(ModeErrorFunction::CROSSENTROPY);
800 break;
801 case ELossFunction::kSoftmaxCrossEntropy:
802 net.setErrorFunction(ModeErrorFunction::CROSSENTROPY_MUTUALEXCLUSIVE);
803 break;
804 }
805
806 switch(fWeightInitialization) {
808 net.initializeWeights(WeightInitializationStrategy::XAVIER,
809 std::back_inserter(weights));
810 break;
812 net.initializeWeights(WeightInitializationStrategy::XAVIERUNIFORM,
813 std::back_inserter(weights));
814 break;
815 default:
816 net.initializeWeights(WeightInitializationStrategy::XAVIER,
817 std::back_inserter(weights));
818 break;
819 }
820
821 int idxSetting = 0;
822 for (auto s : fTrainingSettings) {
823
825 switch(s.regularization) {
827 case ERegularization::kL1: r = EnumRegularization::L1; break;
828 case ERegularization::kL2: r = EnumRegularization::L2; break;
829 }
830
831 Settings * settings = new Settings(TString(), s.convergenceSteps, s.batchSize,
832 s.testInterval, s.weightDecay, r,
833 MinimizerType::fSteepest, s.learningRate,
834 s.momentum, 1, s.multithreading);
835 std::shared_ptr<Settings> ptrSettings(settings);
836 ptrSettings->setMonitoring (0);
837 Log() << kINFO
838 << "Training with learning rate = " << ptrSettings->learningRate ()
839 << ", momentum = " << ptrSettings->momentum ()
840 << ", repetitions = " << ptrSettings->repetitions ()
841 << Endl;
842
843 ptrSettings->setProgressLimits ((idxSetting)*100.0/(fSettings.size ()),
844 (idxSetting+1)*100.0/(fSettings.size ()));
845
846 const std::vector<double>& dropConfig = ptrSettings->dropFractions ();
847 if (!dropConfig.empty ()) {
848 Log () << kINFO << "Drop configuration" << Endl
849 << " drop repetitions = " << ptrSettings->dropRepetitions()
850 << Endl;
851 }
852
853 int idx = 0;
854 for (auto f : dropConfig) {
855 Log () << kINFO << " Layer " << idx << " = " << f << Endl;
856 ++idx;
857 }
858 Log () << kINFO << Endl;
859
860 DNN::Steepest minimizer(ptrSettings->learningRate(),
861 ptrSettings->momentum(),
862 ptrSettings->repetitions());
863 net.train(weights, trainPattern, testPattern, minimizer, *ptrSettings.get());
864 ptrSettings.reset();
865 Log () << kINFO << Endl;
866 idxSetting++;
867 }
868 size_t weightIndex = 0;
869 for (size_t l = 0; l < fNet.GetDepth(); l++) {
870 auto & layerWeights = fNet.GetLayer(l).GetWeights();
871 for (Int_t j = 0; j < layerWeights.GetNcols(); j++) {
872 for (Int_t i = 0; i < layerWeights.GetNrows(); i++) {
873 layerWeights(i,j) = weights[weightIndex];
874 weightIndex++;
875 }
876 }
877 auto & layerBiases = fNet.GetLayer(l).GetBiases();
878 if (l == 0) {
879 for (Int_t i = 0; i < layerBiases.GetNrows(); i++) {
880 layerBiases(i,0) = weights[weightIndex];
881 weightIndex++;
882 }
883 } else {
884 for (Int_t i = 0; i < layerBiases.GetNrows(); i++) {
885 layerBiases(i,0) = 0.0;
886 }
887 }
888 }
889 if (!fExitFromTraining) fIPyMaxIter = fIPyCurrentIter;
890 ExitFromTraining();
891}
892
893////////////////////////////////////////////////////////////////////////////////
894
896{
897
898#ifdef DNNCUDA // Included only if DNNCUDA flag is set.
899 Log() << kINFO << "Start of neural network training on GPU." << Endl << Endl;
900
901 size_t nValidationSamples = GetNumValidationSamples();
902 size_t nTrainingSamples = GetEventCollection(Types::kTraining).size() - nValidationSamples;
903 size_t nTestSamples = nValidationSamples;
904
905 Log() << kDEBUG << "Using " << nValidationSamples << " validation samples." << Endl;
906 Log() << kDEBUG << "Using " << nTestSamples << " training samples." << Endl;
907
908 size_t trainingPhase = 1;
909 fNet.Initialize(fWeightInitialization);
910 for (TTrainingSettings & settings : fTrainingSettings) {
911
912 if (fInteractive){
913 fInteractive->ClearGraphs();
914 }
915
916 TNet<TCuda<>> net(settings.batchSize, fNet);
917 net.SetWeightDecay(settings.weightDecay);
918 net.SetRegularization(settings.regularization);
919
920 // Need to convert dropoutprobabilities to conventions used
921 // by backend implementation.
922 std::vector<Double_t> dropoutVector(settings.dropoutProbabilities);
923 for (auto & p : dropoutVector) {
924 p = 1.0 - p;
925 }
926 net.SetDropoutProbabilities(dropoutVector);
927
929 auto testNet = net.CreateClone(settings.batchSize);
930
931 Log() << kINFO << "Training phase " << trainingPhase << " of "
932 << fTrainingSettings.size() << ":" << Endl;
933 trainingPhase++;
934
935 using DataLoader_t = TDataLoader<TMVAInput_t, TCuda<>>;
936
937 // Split training data into training and validation set
938 const std::vector<Event *> &allData = GetEventCollection(Types::kTraining);
939 const std::vector<Event *> trainingInputData =
940 std::vector<Event *>(allData.begin(), allData.begin() + nTrainingSamples);
941 const std::vector<Event *> testInputData =
942 std::vector<Event *>(allData.begin() + nTrainingSamples, allData.end());
943
944 if (trainingInputData.size() != nTrainingSamples) {
945 Log() << kFATAL << "Inconsistent training sample size" << Endl;
946 }
947 if (testInputData.size() != nTestSamples) {
948 Log() << kFATAL << "Inconsistent test sample size" << Endl;
949 }
950
951 size_t nThreads = 1;
952 TMVAInput_t trainingTuple = std::tie(trainingInputData, DataInfo());
953 TMVAInput_t testTuple = std::tie(testInputData, DataInfo());
954 DataLoader_t trainingData(trainingTuple, nTrainingSamples,
955 net.GetBatchSize(), net.GetInputWidth(),
956 net.GetOutputWidth(), nThreads);
957 DataLoader_t testData(testTuple, nTestSamples, testNet.GetBatchSize(),
958 net.GetInputWidth(), net.GetOutputWidth(),
959 nThreads);
960 DNN::TGradientDescent<TCuda<>> minimizer(settings.learningRate,
961 settings.convergenceSteps,
962 settings.testInterval);
963
964 std::vector<TNet<TCuda<>>> nets{};
965 std::vector<TBatch<TCuda<>>> batches{};
966 nets.reserve(nThreads);
967 for (size_t i = 0; i < nThreads; i++) {
968 nets.push_back(net);
969 for (size_t j = 0; j < net.GetDepth(); j++)
970 {
971 auto &masterLayer = net.GetLayer(j);
972 auto &layer = nets.back().GetLayer(j);
973 TCuda<>::Copy(layer.GetWeights(),
974 masterLayer.GetWeights());
975 TCuda<>::Copy(layer.GetBiases(),
976 masterLayer.GetBiases());
977 }
978 }
979
980 bool converged = false;
981 size_t stepCount = 0;
982 size_t batchesInEpoch = nTrainingSamples / net.GetBatchSize();
983
984 std::chrono::time_point<std::chrono::system_clock> start, end;
985 start = std::chrono::system_clock::now();
986
987 if (!fInteractive) {
988 Log() << std::setw(10) << "Epoch" << " | "
989 << std::setw(12) << "Train Err."
990 << std::setw(12) << "Test Err."
991 << std::setw(12) << "GFLOP/s"
992 << std::setw(12) << "Conv. Steps" << Endl;
993 std::string separator(62, '-');
994 Log() << separator << Endl;
995 }
996
997 while (!converged)
998 {
999 stepCount++;
1000
1001 // Perform minimization steps for a full epoch.
1002 trainingData.Shuffle();
1003 for (size_t i = 0; i < batchesInEpoch; i += nThreads) {
1004 batches.clear();
1005 for (size_t j = 0; j < nThreads; j++) {
1006 batches.reserve(nThreads);
1007 batches.push_back(trainingData.GetBatch());
1008 }
1009 if (settings.momentum > 0.0) {
1010 minimizer.StepMomentum(net, nets, batches, settings.momentum);
1011 } else {
1012 minimizer.Step(net, nets, batches);
1013 }
1014 }
1015
1016 if ((stepCount % minimizer.GetTestInterval()) == 0) {
1017
1018 // Compute test error.
1019 Double_t testError = 0.0;
1020 for (auto batch : testData) {
1021 auto inputMatrix = batch.GetInput();
1022 auto outputMatrix = batch.GetOutput();
1023 testError += testNet.Loss(inputMatrix, outputMatrix);
1024 }
1025 testError /= (Double_t) (nTestSamples / settings.batchSize);
1026
1027 end = std::chrono::system_clock::now();
1028
1029 // Compute training error.
1030 Double_t trainingError = 0.0;
1031 for (auto batch : trainingData) {
1032 auto inputMatrix = batch.GetInput();
1033 auto outputMatrix = batch.GetOutput();
1034 trainingError += net.Loss(inputMatrix, outputMatrix);
1035 }
1036 trainingError /= (Double_t) (nTrainingSamples / settings.batchSize);
1037
1038 // Compute numerical throughput.
1039 std::chrono::duration<double> elapsed_seconds = end - start;
1040 double seconds = elapsed_seconds.count();
1041 double nFlops = (double) (settings.testInterval * batchesInEpoch);
1042 nFlops *= net.GetNFlops() * 1e-9;
1043
1044 converged = minimizer.HasConverged(testError);
1045 start = std::chrono::system_clock::now();
1046
1047 if (fInteractive) {
1048 fInteractive->AddPoint(stepCount, trainingError, testError);
1049 fIPyCurrentIter = 100.0 * minimizer.GetConvergenceCount()
1050 / minimizer.GetConvergenceSteps ();
1051 if (fExitFromTraining) break;
1052 } else {
1053 Log() << std::setw(10) << stepCount << " | "
1054 << std::setw(12) << trainingError
1055 << std::setw(12) << testError
1056 << std::setw(12) << nFlops / seconds
1057 << std::setw(12) << minimizer.GetConvergenceCount() << Endl;
1058 if (converged) {
1059 Log() << Endl;
1060 }
1061 }
1062 }
1063 }
1064 for (size_t l = 0; l < net.GetDepth(); l++) {
1065 fNet.GetLayer(l).GetWeights() = (TMatrixT<Scalar_t>) net.GetLayer(l).GetWeights();
1066 fNet.GetLayer(l).GetBiases() = (TMatrixT<Scalar_t>) net.GetLayer(l).GetBiases();
1067 }
1068 }
1069
1070#else // DNNCUDA flag not set.
1071
1072 Log() << kFATAL << "CUDA backend not enabled. Please make sure "
1073 "you have CUDA installed and it was successfully "
1074 "detected by CMAKE." << Endl;
1075#endif // DNNCUDA
1076}
1077
1078////////////////////////////////////////////////////////////////////////////////
1079
1081{
1082
1083#ifdef DNNCPU // Included only if DNNCPU flag is set.
1084 Log() << kINFO << "Start of neural network training on CPU." << Endl << Endl;
1085
1086 size_t nValidationSamples = GetNumValidationSamples();
1087 size_t nTrainingSamples = GetEventCollection(Types::kTraining).size() - nValidationSamples;
1088 size_t nTestSamples = nValidationSamples;
1089
1090 Log() << kDEBUG << "Using " << nValidationSamples << " validation samples." << Endl;
1091 Log() << kDEBUG << "Using " << nTestSamples << " training samples." << Endl;
1092
1093 fNet.Initialize(fWeightInitialization);
1094
1095 size_t trainingPhase = 1;
1096 for (TTrainingSettings & settings : fTrainingSettings) {
1097
1098 if (fInteractive){
1099 fInteractive->ClearGraphs();
1100 }
1101
1102 Log() << "Training phase " << trainingPhase << " of "
1103 << fTrainingSettings.size() << ":" << Endl;
1104 trainingPhase++;
1105
1106 TNet<TCpu<>> net(settings.batchSize, fNet);
1107 net.SetWeightDecay(settings.weightDecay);
1108 net.SetRegularization(settings.regularization);
1109 // Need to convert dropoutprobabilities to conventions used
1110 // by backend implementation.
1111 std::vector<Double_t> dropoutVector(settings.dropoutProbabilities);
1112 for (auto & p : dropoutVector) {
1113 p = 1.0 - p;
1114 }
1115 net.SetDropoutProbabilities(dropoutVector);
1116 net.InitializeGradients();
1117 auto testNet = net.CreateClone(settings.batchSize);
1118
1119 using DataLoader_t = TDataLoader<TMVAInput_t, TCpu<>>;
1120
1121 // Split training data into training and validation set
1122 const std::vector<Event *> &allData = GetEventCollection(Types::kTraining);
1123 const std::vector<Event *> trainingInputData =
1124 std::vector<Event *>(allData.begin(), allData.begin() + nTrainingSamples);
1125 const std::vector<Event *> testInputData =
1126 std::vector<Event *>(allData.begin() + nTrainingSamples, allData.end());
1127
1128 if (trainingInputData.size() != nTrainingSamples) {
1129 Log() << kFATAL << "Inconsistent training sample size" << Endl;
1130 }
1131 if (testInputData.size() != nTestSamples) {
1132 Log() << kFATAL << "Inconsistent test sample size" << Endl;
1133 }
1134
1135 size_t nThreads = 1;
1136 TMVAInput_t trainingTuple = std::tie(trainingInputData, DataInfo());
1137 TMVAInput_t testTuple = std::tie(testInputData, DataInfo());
1138 DataLoader_t trainingData(trainingTuple, nTrainingSamples,
1139 net.GetBatchSize(), net.GetInputWidth(),
1140 net.GetOutputWidth(), nThreads);
1141 DataLoader_t testData(testTuple, nTestSamples, testNet.GetBatchSize(),
1142 net.GetInputWidth(), net.GetOutputWidth(),
1143 nThreads);
1144 DNN::TGradientDescent<TCpu<>> minimizer(settings.learningRate,
1145 settings.convergenceSteps,
1146 settings.testInterval);
1147
1148 std::vector<TNet<TCpu<>>> nets{};
1149 std::vector<TBatch<TCpu<>>> batches{};
1150 nets.reserve(nThreads);
1151 for (size_t i = 0; i < nThreads; i++) {
1152 nets.push_back(net);
1153 for (size_t j = 0; j < net.GetDepth(); j++)
1154 {
1155 auto &masterLayer = net.GetLayer(j);
1156 auto &layer = nets.back().GetLayer(j);
1157 TCpu<>::Copy(layer.GetWeights(),
1158 masterLayer.GetWeights());
1159 TCpu<>::Copy(layer.GetBiases(),
1160 masterLayer.GetBiases());
1161 }
1162 }
1163
1164 bool converged = false;
1165 size_t stepCount = 0;
1166 size_t batchesInEpoch = nTrainingSamples / net.GetBatchSize();
1167
1168 std::chrono::time_point<std::chrono::system_clock> start, end;
1169 start = std::chrono::system_clock::now();
1170
1171 if (!fInteractive) {
1172 Log() << std::setw(10) << "Epoch" << " | "
1173 << std::setw(12) << "Train Err."
1174 << std::setw(12) << "Test Err."
1175 << std::setw(12) << "GFLOP/s"
1176 << std::setw(12) << "Conv. Steps" << Endl;
1177 std::string separator(62, '-');
1178 Log() << separator << Endl;
1179 }
1180
1181 while (!converged)
1182 {
1183 stepCount++;
1184 // Perform minimization steps for a full epoch.
1185 trainingData.Shuffle();
1186 for (size_t i = 0; i < batchesInEpoch; i += nThreads) {
1187 batches.clear();
1188 for (size_t j = 0; j < nThreads; j++) {
1189 batches.reserve(nThreads);
1190 batches.push_back(trainingData.GetBatch());
1191 }
1192 if (settings.momentum > 0.0) {
1193 minimizer.StepMomentum(net, nets, batches, settings.momentum);
1194 } else {
1195 minimizer.Step(net, nets, batches);
1196 }
1197 }
1198
1199 if ((stepCount % minimizer.GetTestInterval()) == 0) {
1200
1201 // Compute test error.
1202 Double_t testError = 0.0;
1203 for (auto batch : testData) {
1204 auto inputMatrix = batch.GetInput();
1205 auto outputMatrix = batch.GetOutput();
1206 auto weightMatrix = batch.GetWeights();
1207 testError += testNet.Loss(inputMatrix, outputMatrix, weightMatrix);
1208 }
1209 testError /= (Double_t) (nTestSamples / settings.batchSize);
1210
1211 end = std::chrono::system_clock::now();
1212
1213 // Compute training error.
1214 Double_t trainingError = 0.0;
1215 for (auto batch : trainingData) {
1216 auto inputMatrix = batch.GetInput();
1217 auto outputMatrix = batch.GetOutput();
1218 auto weightMatrix = batch.GetWeights();
1219 trainingError += net.Loss(inputMatrix, outputMatrix, weightMatrix);
1220 }
1221 trainingError /= (Double_t) (nTrainingSamples / settings.batchSize);
1222
1223 if (fInteractive){
1224 fInteractive->AddPoint(stepCount, trainingError, testError);
1225 fIPyCurrentIter = 100*(double)minimizer.GetConvergenceCount() /(double)settings.convergenceSteps;
1226 if (fExitFromTraining) break;
1227 }
1228
1229 // Compute numerical throughput.
1230 std::chrono::duration<double> elapsed_seconds = end - start;
1231 double seconds = elapsed_seconds.count();
1232 double nFlops = (double) (settings.testInterval * batchesInEpoch);
1233 nFlops *= net.GetNFlops() * 1e-9;
1234
1235 converged = minimizer.HasConverged(testError);
1236 start = std::chrono::system_clock::now();
1237
1238 if (fInteractive) {
1239 fInteractive->AddPoint(stepCount, trainingError, testError);
1240 fIPyCurrentIter = 100.0 * minimizer.GetConvergenceCount()
1241 / minimizer.GetConvergenceSteps ();
1242 if (fExitFromTraining) break;
1243 } else {
1244 Log() << std::setw(10) << stepCount << " | "
1245 << std::setw(12) << trainingError
1246 << std::setw(12) << testError
1247 << std::setw(12) << nFlops / seconds
1248 << std::setw(12) << minimizer.GetConvergenceCount() << Endl;
1249 if (converged) {
1250 Log() << Endl;
1251 }
1252 }
1253 }
1254 }
1255
1256
1257 for (size_t l = 0; l < net.GetDepth(); l++) {
1258 auto & layer = fNet.GetLayer(l);
1259 layer.GetWeights() = (TMatrixT<Scalar_t>) net.GetLayer(l).GetWeights();
1260 layer.GetBiases() = (TMatrixT<Scalar_t>) net.GetLayer(l).GetBiases();
1261 }
1262 }
1263
1264#else // DNNCPU flag not set.
1265 Log() << kFATAL << "Multi-core CPU backend not enabled. Please make sure "
1266 "you have a BLAS implementation and it was successfully "
1267 "detected by CMake as well that the imt CMake flag is set." << Endl;
1268#endif // DNNCPU
1269}
1270
1271////////////////////////////////////////////////////////////////////////////////
1272
1274{
1275 size_t nVariables = GetEvent()->GetNVariables();
1276 Matrix_t X(1, nVariables);
1277 Matrix_t YHat(1, 1);
1278
1279 const std::vector<Float_t>& inputValues = GetEvent()->GetValues();
1280 for (size_t i = 0; i < nVariables; i++) {
1281 X(0,i) = inputValues[i];
1282 }
1283
1284 fNet.Prediction(YHat, X, fOutputFunction);
1285 return YHat(0,0);
1286}
1287
1288////////////////////////////////////////////////////////////////////////////////
1289
1290const std::vector<Float_t> & TMVA::MethodDNN::GetRegressionValues()
1291{
1292 size_t nVariables = GetEvent()->GetNVariables();
1293 Matrix_t X(1, nVariables);
1294
1295 const Event *ev = GetEvent();
1296 const std::vector<Float_t>& inputValues = ev->GetValues();
1297 for (size_t i = 0; i < nVariables; i++) {
1298 X(0,i) = inputValues[i];
1299 }
1300
1301 size_t nTargets = std::max(1u, ev->GetNTargets());
1302 Matrix_t YHat(1, nTargets);
1303 std::vector<Float_t> output(nTargets);
1304 auto net = fNet.CreateClone(1);
1305 net.Prediction(YHat, X, fOutputFunction);
1306
1307 for (size_t i = 0; i < nTargets; i++)
1308 output[i] = YHat(0, i);
1309
1310 if (fRegressionReturnVal == NULL) {
1311 fRegressionReturnVal = new std::vector<Float_t>();
1312 }
1313 fRegressionReturnVal->clear();
1314
1315 Event * evT = new Event(*ev);
1316 for (size_t i = 0; i < nTargets; ++i) {
1317 evT->SetTarget(i, output[i]);
1318 }
1319
1320 const Event* evT2 = GetTransformationHandler().InverseTransform(evT);
1321 for (size_t i = 0; i < nTargets; ++i) {
1322 fRegressionReturnVal->push_back(evT2->GetTarget(i));
1323 }
1324 delete evT;
1325 return *fRegressionReturnVal;
1326}
1327
1328const std::vector<Float_t> & TMVA::MethodDNN::GetMulticlassValues()
1329{
1330 size_t nVariables = GetEvent()->GetNVariables();
1331 Matrix_t X(1, nVariables);
1332 Matrix_t YHat(1, DataInfo().GetNClasses());
1333 if (fMulticlassReturnVal == NULL) {
1334 fMulticlassReturnVal = new std::vector<Float_t>(DataInfo().GetNClasses());
1335 }
1336
1337 const std::vector<Float_t>& inputValues = GetEvent()->GetValues();
1338 for (size_t i = 0; i < nVariables; i++) {
1339 X(0,i) = inputValues[i];
1340 }
1341
1342 fNet.Prediction(YHat, X, fOutputFunction);
1343 for (size_t i = 0; i < (size_t) YHat.GetNcols(); i++) {
1344 (*fMulticlassReturnVal)[i] = YHat(0, i);
1345 }
1346 return *fMulticlassReturnVal;
1347}
1348
1349////////////////////////////////////////////////////////////////////////////////
1350
1351void TMVA::MethodDNN::AddWeightsXMLTo( void* parent ) const
1352{
1353 void* nn = gTools().xmlengine().NewChild(parent, 0, "Weights");
1354 Int_t inputWidth = fNet.GetInputWidth();
1355 Int_t depth = fNet.GetDepth();
1356 char lossFunction = static_cast<char>(fNet.GetLossFunction());
1357 gTools().xmlengine().NewAttr(nn, 0, "InputWidth",
1358 gTools().StringFromInt(inputWidth));
1359 gTools().xmlengine().NewAttr(nn, 0, "Depth", gTools().StringFromInt(depth));
1360 gTools().xmlengine().NewAttr(nn, 0, "LossFunction", TString(lossFunction));
1361 gTools().xmlengine().NewAttr(nn, 0, "OutputFunction",
1362 TString(static_cast<char>(fOutputFunction)));
1363
1364 for (Int_t i = 0; i < depth; i++) {
1365 const auto& layer = fNet.GetLayer(i);
1366 auto layerxml = gTools().xmlengine().NewChild(nn, 0, "Layer");
1367 int activationFunction = static_cast<int>(layer.GetActivationFunction());
1368 gTools().xmlengine().NewAttr(layerxml, 0, "ActivationFunction",
1369 TString::Itoa(activationFunction, 10));
1370 WriteMatrixXML(layerxml, "Weights", layer.GetWeights());
1371 WriteMatrixXML(layerxml, "Biases", layer.GetBiases());
1372 }
1373}
1374
1375////////////////////////////////////////////////////////////////////////////////
1376
1378{
1379 auto netXML = gTools().GetChild(rootXML, "Weights");
1380 if (!netXML){
1381 netXML = rootXML;
1382 }
1383
1384 fNet.Clear();
1385 fNet.SetBatchSize(1);
1386
1387 size_t inputWidth, depth;
1388 gTools().ReadAttr(netXML, "InputWidth", inputWidth);
1389 gTools().ReadAttr(netXML, "Depth", depth);
1390 char lossFunctionChar;
1391 gTools().ReadAttr(netXML, "LossFunction", lossFunctionChar);
1392 char outputFunctionChar;
1393 gTools().ReadAttr(netXML, "OutputFunction", outputFunctionChar);
1394
1395 fNet.SetInputWidth(inputWidth);
1396 fNet.SetLossFunction(static_cast<ELossFunction>(lossFunctionChar));
1397 fOutputFunction = static_cast<EOutputFunction>(outputFunctionChar);
1398
1399 size_t previousWidth = inputWidth;
1400 auto layerXML = gTools().xmlengine().GetChild(netXML, "Layer");
1401 for (size_t i = 0; i < depth; i++) {
1402 TString fString;
1404
1405 // Read activation function.
1406 gTools().ReadAttr(layerXML, "ActivationFunction", fString);
1407 f = static_cast<EActivationFunction>(fString.Atoi());
1408
1409 // Read number of neurons.
1410 size_t width;
1411 auto matrixXML = gTools().GetChild(layerXML, "Weights");
1412 gTools().ReadAttr(matrixXML, "rows", width);
1413
1414 fNet.AddLayer(width, f);
1415 TMatrixT<Double_t> weights(width, previousWidth);
1416 TMatrixT<Double_t> biases(width, 1);
1417 ReadMatrixXML(layerXML, "Weights", weights);
1418 ReadMatrixXML(layerXML, "Biases", biases);
1419 fNet.GetLayer(i).GetWeights() = weights;
1420 fNet.GetLayer(i).GetBiases() = biases;
1421
1422 layerXML = gTools().GetNextChild(layerXML);
1423 previousWidth = width;
1424 }
1425}
1426
1427////////////////////////////////////////////////////////////////////////////////
1428
1429void TMVA::MethodDNN::ReadWeightsFromStream( std::istream & /*istr*/)
1430{
1431}
1432
1433////////////////////////////////////////////////////////////////////////////////
1434
1436{
1437 fRanking = new Ranking( GetName(), "Importance" );
1438 for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
1439 fRanking->AddRank( Rank( GetInputLabel(ivar), 1.0));
1440 }
1441 return fRanking;
1442}
1443
1444////////////////////////////////////////////////////////////////////////////////
1445
1446void TMVA::MethodDNN::MakeClassSpecific( std::ostream& /*fout*/,
1447 const TString& /*className*/ ) const
1448{
1449}
1450
1451////////////////////////////////////////////////////////////////////////////////
1452
1454{
1455 // get help message text
1456 //
1457 // typical length of text line:
1458 // "|--------------------------------------------------------------|"
1459 TString col = gConfig().WriteOptionsReference() ? TString() : gTools().Color("bold");
1460 TString colres = gConfig().WriteOptionsReference() ? TString() : gTools().Color("reset");
1461
1462 Log() << Endl;
1463 Log() << col << "--- Short description:" << colres << Endl;
1464 Log() << Endl;
1465 Log() << "The DNN neural network is a feedforward" << Endl;
1466 Log() << "multilayer perceptron implementation. The DNN has a user-" << Endl;
1467 Log() << "defined hidden layer architecture, where the number of input (output)" << Endl;
1468 Log() << "nodes is determined by the input variables (output classes, i.e., " << Endl;
1469 Log() << "signal and one background, regression or multiclass). " << Endl;
1470 Log() << Endl;
1471 Log() << col << "--- Performance optimisation:" << colres << Endl;
1472 Log() << Endl;
1473
1474 const char* txt = "The DNN supports various options to improve performance in terms of training speed and \n \
1475reduction of overfitting: \n \
1476\n \
1477 - different training settings can be stacked. Such that the initial training \n\
1478 is done with a large learning rate and a large drop out fraction whilst \n \
1479 in a later stage learning rate and drop out can be reduced. \n \
1480 - drop out \n \
1481 [recommended: \n \
1482 initial training stage: 0.0 for the first layer, 0.5 for later layers. \n \
1483 later training stage: 0.1 or 0.0 for all layers \n \
1484 final training stage: 0.0] \n \
1485 Drop out is a technique where a at each training cycle a fraction of arbitrary \n \
1486 nodes is disabled. This reduces co-adaptation of weights and thus reduces overfitting. \n \
1487 - L1 and L2 regularization are available \n \
1488 - Minibatches \n \
1489 [recommended 10 - 150] \n \
1490 Arbitrary mini-batch sizes can be chosen. \n \
1491 - Multithreading \n \
1492 [recommended: True] \n \
1493 Multithreading can be turned on. The minibatches are distributed to the available \n \
1494 cores. The algorithm is lock-free (\"Hogwild!\"-style) for each cycle. \n \
1495 \n \
1496 Options: \n \
1497 \"Layout\": \n \
1498 - example: \"TANH|(N+30)*2,TANH|(N+30),LINEAR\" \n \
1499 - meaning: \n \
1500 . two hidden layers (separated by \",\") \n \
1501 . the activation function is TANH (other options: RELU, SOFTSIGN, LINEAR) \n \
1502 . the activation function for the output layer is LINEAR \n \
1503 . the first hidden layer has (N+30)*2 nodes where N is the number of input neurons \n \
1504 . the second hidden layer has N+30 nodes, where N is the number of input neurons \n \
1505 . the number of nodes in the output layer is determined by the number of output nodes \n \
1506 and can therefore not be chosen freely. \n \
1507 \n \
1508 \"ErrorStrategy\": \n \
1509 - SUMOFSQUARES \n \
1510 The error of the neural net is determined by a sum-of-squares error function \n \
1511 For regression, this is the only possible choice. \n \
1512 - CROSSENTROPY \n \
1513 The error of the neural net is determined by a cross entropy function. The \n \
1514 output values are automatically (internally) transformed into probabilities \n \
1515 using a sigmoid function. \n \
1516 For signal/background classification this is the default choice. \n \
1517 For multiclass using cross entropy more than one or no output classes \n \
1518 can be equally true or false (e.g. Event 0: A and B are true, Event 1: \n \
1519 A and C is true, Event 2: C is true, ...) \n \
1520 - MUTUALEXCLUSIVE \n \
1521 In multiclass settings, exactly one of the output classes can be true (e.g. either A or B or C) \n \
1522 \n \
1523 \"WeightInitialization\" \n \
1524 - XAVIER \n \
1525 [recommended] \n \
1526 \"Xavier Glorot & Yoshua Bengio\"-style of initializing the weights. The weights are chosen randomly \n \
1527 such that the variance of the values of the nodes is preserved for each layer. \n \
1528 - XAVIERUNIFORM \n \
1529 The same as XAVIER, but with uniformly distributed weights instead of gaussian weights \n \
1530 - LAYERSIZE \n \
1531 Random values scaled by the layer size \n \
1532 \n \
1533 \"TrainingStrategy\" \n \
1534 - example: \"LearningRate=1e-1,Momentum=0.3,ConvergenceSteps=50,BatchSize=30,TestRepetitions=7,WeightDecay=0.0,Renormalize=L2,DropConfig=0.0,DropRepetitions=5|LearningRate=1e-4,Momentum=0.3,ConvergenceSteps=50,BatchSize=20,TestRepetitions=7,WeightDecay=0.001,Renormalize=L2,DropFraction=0.0,DropRepetitions=5\" \n \
1535 - explanation: two stacked training settings separated by \"|\" \n \
1536 . first training setting: \"LearningRate=1e-1,Momentum=0.3,ConvergenceSteps=50,BatchSize=30,TestRepetitions=7,WeightDecay=0.0,Renormalize=L2,DropConfig=0.0,DropRepetitions=5\" \n \
1537 . second training setting : \"LearningRate=1e-4,Momentum=0.3,ConvergenceSteps=50,BatchSize=20,TestRepetitions=7,WeightDecay=0.001,Renormalize=L2,DropFractions=0.0,DropRepetitions=5\" \n \
1538 . LearningRate : \n \
1539 - recommended for classification: 0.1 initially, 1e-4 later \n \
1540 - recommended for regression: 1e-4 and less \n \
1541 . Momentum : \n \
1542 preserve a fraction of the momentum for the next training batch [fraction = 0.0 - 1.0] \n \
1543 . Repetitions : \n \
1544 train \"Repetitions\" repetitions with the same minibatch before switching to the next one \n \
1545 . ConvergenceSteps : \n \
1546 Assume that convergence is reached after \"ConvergenceSteps\" cycles where no improvement \n \
1547 of the error on the test samples has been found. (Mind that only at each \"TestRepetitions\" \n \
1548 cycle the test samples are evaluated and thus the convergence is checked) \n \
1549 . BatchSize \n \
1550 Size of the mini-batches. \n \
1551 . TestRepetitions \n \
1552 Perform testing the neural net on the test samples each \"TestRepetitions\" cycle \n \
1553 . WeightDecay \n \
1554 If \"Renormalize\" is set to L1 or L2, \"WeightDecay\" provides the renormalization factor \n \
1555 . Renormalize \n \
1556 NONE, L1 (|w|) or L2 (w^2) \n \
1557 . DropConfig \n \
1558 Drop a fraction of arbitrary nodes of each of the layers according to the values given \n \
1559 in the DropConfig. \n \
1560 [example: DropConfig=0.0+0.5+0.3 \n \
1561 meaning: drop no nodes in layer 0 (input layer), half of the nodes in layer 1 and 30% of the nodes \n \
1562 in layer 2 \n \
1563 recommended: leave all the nodes turned on for the input layer (layer 0) \n \
1564 turn off half of the nodes in later layers for the initial training; leave all nodes \n \
1565 turned on (0.0) in later training stages] \n \
1566 . DropRepetitions \n \
1567 Each \"DropRepetitions\" cycle the configuration of which nodes are dropped is changed \n \
1568 [recommended : 1] \n \
1569 . Multithreading \n \
1570 turn on multithreading [recommended: True] \n \
1571 \n";
1572 Log () << txt << Endl;
1573}
1574
1575} // namespace TMVA
#define REGISTER_METHOD(CLASS)
for example
ROOT::R::TRInterface & r
Definition: Object.C:4
#define f(i)
Definition: RSha256.hxx:104
#define g(i)
Definition: RSha256.hxx:105
#define h(i)
Definition: RSha256.hxx:106
#define e(i)
Definition: RSha256.hxx:103
#define NONE
Definition: Rotated.cxx:52
int Int_t
Definition: RtypesCore.h:41
unsigned int UInt_t
Definition: RtypesCore.h:42
const Bool_t kFALSE
Definition: RtypesCore.h:88
bool Bool_t
Definition: RtypesCore.h:59
double Double_t
Definition: RtypesCore.h:55
const Bool_t kTRUE
Definition: RtypesCore.h:87
#define ClassImp(name)
Definition: Rtypes.h:365
include TDocParser_001 C image html pict1_TDocParser_001 png width
Definition: TDocParser.cxx:121
int type
Definition: TGX11.cxx:120
Definition: Pattern.h:8
The Formula class.
Definition: TFormula.h:84
Bool_t WriteOptionsReference() const
Definition: Config.h:67
Layer defines the layout of a layer.
Definition: NeuralNet.h:677
neural net
Definition: NeuralNet.h:1069
void setInputSize(size_t sizeInput)
set the input size of the DNN
Definition: NeuralNet.h:1099
void SetIpythonInteractive(IPythonInteractive *fI, bool *fE, UInt_t *M, UInt_t *C)
Definition: NeuralNet.h:1290
double train(std::vector< double > &weights, std::vector< Pattern > &trainPattern, const std::vector< Pattern > &testPattern, Minimizer &minimizer, Settings &settings)
start the training
Definition: NeuralNet.icc:710
void setErrorFunction(ModeErrorFunction eErrorFunction)
which error function is to be used
Definition: NeuralNet.h:1103
void initializeWeights(WeightInitializationStrategy eInitStrategy, OutIterator itWeight)
initialize the weights with the given strategy
Definition: NeuralNet.icc:1481
void addLayer(Layer &layer)
add a layer (layout)
Definition: NeuralNet.h:1101
void setOutputSize(size_t sizeOutput)
set the output size of the DNN
Definition: NeuralNet.h:1100
Settings for the training of the neural net.
Definition: NeuralNet.h:737
Steepest Gradient Descent algorithm (SGD)
Definition: NeuralNet.h:335
static void Copy(TCpuMatrix< Scalar_t > &B, const TCpuMatrix< Scalar_t > &A)
static void Copy(TCudaMatrix< AFloat > &B, const TCudaMatrix< AFloat > &A)
Copy the elements of matrix A into matrix B.
bool HasConverged()
Increases the minimization step counter by the test error evaluation period and uses the current inte...
Definition: Minimizers.h:665
void Step(Net_t &net, Matrix_t &input, const Matrix_t &output, const Matrix_t &weights)
Perform a single optimization step on a given batch.
Definition: Minimizers.h:329
size_t GetTestInterval() const
Definition: Minimizers.h:163
void StepMomentum(Net_t &master, std::vector< Net_t > &nets, std::vector< TBatch< Architecture_t > > &batches, Scalar_t momentum)
Same as the Step(...) method for multiple batches but uses momentum.
Definition: Minimizers.h:436
size_t GetConvergenceCount() const
Definition: Minimizers.h:159
size_t GetConvergenceSteps() const
Definition: Minimizers.h:160
Generic neural network class.
Definition: Net.h:49
void SetWeightDecay(Scalar_t weightDecay)
Definition: Net.h:152
Scalar_t Loss(const Matrix_t &Y, const Matrix_t &weights, bool includeRegularization=true) const
Evaluate the loss function of the net using the activations that are currently stored in the output l...
Definition: Net.h:305
void SetRegularization(ERegularization R)
Definition: Net.h:150
size_t GetOutputWidth() const
Definition: Net.h:144
void InitializeGradients()
Initialize the gradients in the net to zero.
Definition: Net.h:263
TNet< Architecture_t, TSharedLayer< Architecture_t > > CreateClone(size_t batchSize)
Create a clone that uses the same weight and biases matrices but potentially a difference batch size.
Definition: Net.h:212
Scalar_t GetNFlops()
Definition: Net.h:347
size_t GetBatchSize() const
Definition: Net.h:138
size_t GetDepth() const
Definition: Net.h:137
void SetDropoutProbabilities(const std::vector< Double_t > &probabilities)
Definition: Net.h:378
size_t GetInputWidth() const
Definition: Net.h:143
Layer_t & GetLayer(size_t i)
Definition: Net.h:139
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
Definition: Event.cxx:360
UInt_t GetNTargets() const
accessor to the number of targets
Definition: Event.cxx:320
std::vector< Float_t > & GetValues()
Definition: Event.h:95
Float_t GetTarget(UInt_t itgt) const
Definition: Event.h:103
Deep Neural Network Implementation.
Definition: MethodDNN.h:73
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets)
virtual const std::vector< Float_t > & GetMulticlassValues()
Definition: MethodDNN.cxx:1328
UInt_t GetNumValidationSamples()
void ReadWeightsFromXML(void *wghtnode)
Definition: MethodDNN.cxx:1377
std::vector< std::map< TString, TString > > KeyValueVector_t
Definition: MethodDNN.h:83
typename Architecture_t::Matrix_t Matrix_t
Definition: MethodDNN.h:78
void ReadWeightsFromStream(std::istream &i)
Definition: MethodDNN.cxx:1429
LayoutVector_t ParseLayoutString(TString layerSpec)
void MakeClassSpecific(std::ostream &, const TString &) const
Definition: MethodDNN.cxx:1446
MethodDNN(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption)
void ProcessOptions()
Definition: MethodDNN.cxx:412
virtual ~MethodDNN()
DNN::EInitialization fWeightInitialization
Definition: MethodDNN.h:108
void DeclareOptions()
virtual Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
Definition: MethodDNN.cxx:1273
const Ranking * CreateRanking()
Definition: MethodDNN.cxx:1435
KeyValueVector_t ParseKeyValueString(TString parseString, TString blockDelim, TString tokenDelim)
DNN::EOutputFunction fOutputFunction
Definition: MethodDNN.h:109
void AddWeightsXMLTo(void *parent) const
Definition: MethodDNN.cxx:1351
void GetHelpMessage() const
Definition: MethodDNN.cxx:1453
virtual const std::vector< Float_t > & GetRegressionValues()
Definition: MethodDNN.cxx:1290
Ranking for variables in method (implementation)
Definition: Ranking.h:48
void * GetNextChild(void *prevchild, const char *childname=0)
XML helpers.
Definition: Tools.cxx:1174
const TString & Color(const TString &)
human readable color strings
Definition: Tools.cxx:840
void * GetChild(void *parent, const char *childname=0)
get child node
Definition: Tools.cxx:1162
TXMLEngine & xmlengine()
Definition: Tools.h:270
void ReadAttr(void *node, const char *, T &value)
read attribute from xml
Definition: Tools.h:337
EAnalysisType
Definition: Types.h:127
@ kMulticlass
Definition: Types.h:130
@ kClassification
Definition: Types.h:128
@ kRegression
Definition: Types.h:129
@ kTraining
Definition: Types.h:144
TMatrixT.
Definition: TMatrixT.h:39
An array of TObjects.
Definition: TObjArray.h:37
Collectable string class.
Definition: TObjString.h:28
const TString & GetString() const
Definition: TObjString.h:46
Basic string class.
Definition: TString.h:131
Int_t Atoi() const
Return integer value of string.
Definition: TString.cxx:1921
TSubString Strip(EStripType s=kTrailing, char c=' ') const
Return a substring of self stripped at beginning and/or end.
Definition: TString.cxx:1106
Double_t Atof() const
Return floating-point value contained in string.
Definition: TString.cxx:1987
Bool_t IsFloat() const
Returns kTRUE if string contains a floating point or integer number.
Definition: TString.cxx:1791
const char * Data() const
Definition: TString.h:364
@ kTrailing
Definition: TString.h:262
@ kBoth
Definition: TString.h:262
void ToUpper()
Change string to upper case.
Definition: TString.cxx:1138
TObjArray * Tokenize(const TString &delim) const
This function is used to isolate sequential tokens in a TString.
Definition: TString.cxx:2197
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
Definition: TString.h:610
static TString Itoa(Int_t value, Int_t base)
Converts an Int_t to a TString with respect to the base specified (2-36).
Definition: TString.cxx:2025
XMLNodePointer_t GetChild(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
returns first child of xmlnode
XMLAttrPointer_t NewAttr(XMLNodePointer_t xmlnode, XMLNsPointer_t, const char *name, const char *value)
creates new attribute for xmlnode, namespaces are not supported for attributes
Definition: TXMLEngine.cxx:580
XMLNodePointer_t NewChild(XMLNodePointer_t parent, XMLNsPointer_t ns, const char *name, const char *content=0)
create new child element for parent node
Definition: TXMLEngine.cxx:709
std::string GetName(const std::string &scope_name)
Definition: Cppyy.cxx:146
double T(double x)
Definition: ChebyshevPol.h:34
static constexpr double s
EOutputFunction
Enum that represents output functions.
Definition: Functions.h:44
EnumRegularization
Definition: NeuralNet.h:174
auto regularization(const typename Architecture_t::Matrix_t &A, ERegularization R) -> decltype(Architecture_t::L1Regularization(A))
Evaluate the regularization functional for a given weight matrix.
Definition: Functions.h:216
EActivationFunction
Enum that represents layer activation functions.
Definition: Functions.h:32
ELossFunction
Enum that represents objective functions for the net, i.e.
Definition: Functions.h:55
@ fSteepest
SGD.
Definition: NeuralNet.h:323
ModeOutputValues
Definition: NeuralNet.h:180
std::tuple< const std::vector< Event * > &, const DataSetInfo & > TMVAInput_t
Definition: DataLoader.h:40
create variable transformations
Config & gConfig()
Tools & gTools()
TString fetchValue(const std::map< TString, TString > &keyValueMap, TString key)
Definition: MethodDNN.cxx:308
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:158
Double_t Log(Double_t x)
Definition: TMath.h:748
DNN::ERegularization regularization
Definition: MethodDNN.h:90
std::vector< Double_t > dropoutProbabilities
Definition: MethodDNN.h:94
auto * l
Definition: textangle.C:4
static void output(int code)
Definition: gifencode.c:226