82 : MethodBase(
jobName, Types::kDNN, methodTitle,
theData,
theOption), fWeightInitialization(), fOutputFunction(),
83 fLayoutString(), fErrorStrategy(), fTrainingStrategyString(), fWeightInitializationString(),
84 fArchitectureString(), fTrainingSettings(), fResume(
false), fSettings()
94 fWeightInitialization(), fOutputFunction(), fLayoutString(), fErrorStrategy(),
95 fTrainingStrategyString(), fWeightInitializationString(), fArchitectureString(),
96 fTrainingSettings(), fResume(
false), fSettings()
98 fWeightInitialization = DNN::EInitialization::kGauss;
99 fOutputFunction = DNN::EOutputFunction::kSigmoid;
131 <<
"MethodDNN is deprecated and it will be removed in future ROOT version. "
132 "Please use MethodDL ( TMVA::kDL)"
153 DeclareOptionRef(fLayoutString=
"SOFTSIGN|(N+100)*2,LINEAR",
155 "Layout of the network.");
157 DeclareOptionRef(fValidationSize =
"20%",
"ValidationSize",
158 "Part of the training data to use for "
159 "validation. Specify as 0.2 or 20% to use a "
160 "fifth of the data set as validation set. "
161 "Specify as 100 to use exactly 100 events. "
164 DeclareOptionRef(fErrorStrategy=
"CROSSENTROPY",
166 "Loss function: Mean squared error (regression)"
167 " or cross entropy (binary classification).");
168 AddPreDefVal(
TString(
"CROSSENTROPY"));
169 AddPreDefVal(
TString(
"SUMOFSQUARES"));
170 AddPreDefVal(
TString(
"MUTUALEXCLUSIVE"));
172 DeclareOptionRef(fWeightInitializationString=
"XAVIER",
173 "WeightInitialization",
174 "Weight initialization strategy");
175 AddPreDefVal(
TString(
"XAVIER"));
176 AddPreDefVal(
TString(
"XAVIERUNIFORM"));
178 DeclareOptionRef(fArchitectureString =
"CPU",
"Architecture",
"Which architecture to perform the training on.");
179 AddPreDefVal(
TString(
"STANDARD"));
182 AddPreDefVal(
TString(
"OPENCL"));
185 fTrainingStrategyString =
"LearningRate=1e-1,"
188 "ConvergenceSteps=50,"
194 "DropRepetitions=5|LearningRate=1e-4,"
197 "ConvergenceSteps=50,"
202 "DropConfig=0.0+0.5+0.5,"
204 "Multithreading=True",
206 "Defines the training strategies.");
220 const size_t inputSize = GetNvar();
228 EActivationFunction activationFunction = EActivationFunction::kTanh;
264 numNodes =
fml.Eval (inputSize);
270 layout.push_back(std::make_pair(numNodes, activationFunction));
324 std::map<TString, TString>::const_iterator it =
keyValueMap.find (key);
349 return value.Atoi ();
356 TString key,
double defaultValue)
362 return value.Atof ();
382 TString key,
bool defaultValue)
400 std::vector<double> defaultValue)
407 std::vector<double> values;
414 std::stringstream
sstr;
427 if (IgnoreEventsWithNegWeightsInTraining()) {
429 <<
"Will ignore negative events in training!"
433 if (fArchitectureString ==
"STANDARD") {
434 Log() << kERROR <<
"The STANDARD architecture has been deprecated. "
435 "Please use Architecture=CPU or Architecture=CPU."
436 "See the TMVA Users' Guide for instructions if you "
437 "encounter problems."
439 Log() << kFATAL <<
"The STANDARD architecture has been deprecated. "
440 "Please use Architecture=CPU or Architecture=CPU."
441 "See the TMVA Users' Guide for instructions if you "
442 "encounter problems."
446 if (fArchitectureString ==
"OPENCL") {
447 Log() << kERROR <<
"The OPENCL architecture has not been implemented yet. "
448 "Please use Architecture=CPU or Architecture=CPU for the "
449 "time being. See the TMVA Users' Guide for instructions "
450 "if you encounter problems."
452 Log() << kFATAL <<
"The OPENCL architecture has not been implemented yet. "
453 "Please use Architecture=CPU or Architecture=CPU for the "
454 "time being. See the TMVA Users' Guide for instructions "
455 "if you encounter problems."
459 if (fArchitectureString ==
"GPU") {
461 Log() << kERROR <<
"CUDA backend not enabled. Please make sure "
462 "you have CUDA installed and it was successfully "
465 Log() << kFATAL <<
"CUDA backend not enabled. Please make sure "
466 "you have CUDA installed and it was successfully "
472 if (fArchitectureString ==
"CPU") {
474 Log() << kERROR <<
"Multi-core CPU backend not enabled. Please make sure "
475 "you have a BLAS implementation and it was successfully "
476 "detected by CMake as well that the imt CMake flag is set."
478 Log() << kFATAL <<
"Multi-core CPU backend not enabled. Please make sure "
479 "you have a BLAS implementation and it was successfully "
480 "detected by CMake as well that the imt CMake flag is set."
490 size_t inputSize = GetNVariables ();
491 size_t outputSize = 1;
493 outputSize = GetNTargets();
495 outputSize = DataInfo().GetNClasses();
498 fNet.SetBatchSize(1);
499 fNet.SetInputWidth(inputSize);
501 auto itLayout = std::begin (fLayout);
504 fNet.AddLayer((*itLayout).first, (*itLayout).second);
506 fNet.AddLayer(outputSize, EActivationFunction::kIdentity);
512 fOutputFunction = EOutputFunction::kSigmoid;
515 if (fErrorStrategy ==
"SUMOFSQUARES") {
516 fNet.SetLossFunction(ELossFunction::kMeanSquaredError);
518 if (fErrorStrategy ==
"CROSSENTROPY") {
519 fNet.SetLossFunction(ELossFunction::kCrossEntropy);
521 fOutputFunction = EOutputFunction::kSigmoid;
523 if (fErrorStrategy !=
"SUMOFSQUARES") {
524 Log () << kWARNING <<
"For regression only SUMOFSQUARES is a valid "
525 <<
" neural net error function. Setting error function to "
526 <<
" SUMOFSQUARES now." <<
Endl;
528 fNet.SetLossFunction(ELossFunction::kMeanSquaredError);
529 fOutputFunction = EOutputFunction::kIdentity;
531 if (fErrorStrategy ==
"SUMOFSQUARES") {
532 fNet.SetLossFunction(ELossFunction::kMeanSquaredError);
534 if (fErrorStrategy ==
"CROSSENTROPY") {
535 fNet.SetLossFunction(ELossFunction::kCrossEntropy);
537 if (fErrorStrategy ==
"MUTUALEXCLUSIVE") {
538 fNet.SetLossFunction(ELossFunction::kSoftmaxCrossEntropy);
540 fOutputFunction = EOutputFunction::kSoftmax;
547 if (fWeightInitializationString ==
"XAVIER") {
550 else if (fWeightInitializationString ==
"XAVIERUNIFORM") {
562 GetNumValidationSamples();
568 std::cout <<
"Parsed Training DNN string " << fTrainingStrategyString << std::endl;
580 std::vector<Double_t>());
600 fTrainingSettings.push_back(
settings);
620 if (fValidationSize.EndsWith(
"%")) {
628 Log() << kFATAL <<
"Cannot parse number \"" << fValidationSize
629 <<
"\". Expected string like \"20%\" or \"20.0%\"." <<
Endl;
631 }
else if (fValidationSize.IsFloat()) {
642 Log() <<
kFATAL <<
"Cannot parse number \"" << fValidationSize <<
"\". Expected string like \"0.2\" or \"100\"."
649 Log() <<
kFATAL <<
"Validation size \"" << fValidationSize <<
"\" is negative." <<
Endl;
653 Log() <<
kFATAL <<
"Validation size \"" << fValidationSize <<
"\" is zero." <<
Endl;
657 Log() <<
kFATAL <<
"Validation size \"" << fValidationSize
658 <<
"\" is larger than or equal in size to training set (size=\"" <<
trainingSetSize <<
"\")." <<
Endl;
668 if (fInteractive && fInteractive->NotInitialized()){
669 std::vector<TString>
titles = {
"Error on training set",
"Error on test set"};
670 fInteractive->Init(
titles);
683 Log() << kFATAL <<
"Number of samples in the datasets are train: "
686 <<
"One of these is smaller than the batch size of "
687 <<
settings.batchSize <<
". Please increase the batch"
688 <<
" size to be at least the same size as the smallest"
689 <<
" of these values." <<
Endl;
693 if (fArchitectureString ==
"GPU") {
695 if (!fExitFromTraining) fIPyMaxIter = fIPyCurrentIter;
698 }
else if (fArchitectureString ==
"OpenCL") {
699 Log() << kFATAL <<
"OpenCL backend not yet supported." <<
Endl;
701 }
else if (fArchitectureString ==
"CPU") {
703 if (!fExitFromTraining) fIPyMaxIter = fIPyCurrentIter;
708 Log() << kINFO <<
"Using Standard Implementation.";
716 const std::vector<TMVA::Event *> &allData = GetEventCollection(
Types::kTraining);
721 const std::vector<Float_t>& values =
event->GetValues();
723 double outputValue =
event->GetClass () == 0 ? 0.9 : 0.1;
727 event->GetWeight()));
730 std::vector<Float_t>
oneHot(DataInfo().GetNClasses(), 0.0);
731 oneHot[
event->GetClass()] = 1.0;
734 event->GetWeight()));
737 const std::vector<Float_t>&
targets =
event->GetTargets ();
742 event->GetWeight ()));
748 const std::vector<Float_t>& values =
event->GetValues();
750 double outputValue =
event->GetClass () == 0 ? 0.9 : 0.1;
754 event->GetWeight()));
757 std::vector<Float_t>
oneHot(DataInfo().GetNClasses(), 0.0);
758 oneHot[
event->GetClass()] = 1.0;
761 event->GetWeight()));
764 const std::vector<Float_t>&
targets =
event->GetTargets ();
769 event->GetWeight ()));
775 std::vector<double> weights;
777 net.SetIpythonInteractive(fInteractive, &fExitFromTraining, &fIPyMaxIter, &fIPyCurrentIter);
779 net.setInputSize(fNet.GetInputWidth() + 1);
780 net.setOutputSize(fNet.GetOutputWidth() + 1);
782 for (
size_t i = 0; i < fNet.GetDepth(); i++) {
786 case EActivationFunction::kIdentity:
g = EnumFunction::LINEAR;
break;
787 case EActivationFunction::kRelu:
g = EnumFunction::RELU;
break;
788 case EActivationFunction::kSigmoid:
g = EnumFunction::SIGMOID;
break;
789 case EActivationFunction::kTanh:
g = EnumFunction::TANH;
break;
790 case EActivationFunction::kFastTanh:
g = EnumFunction::TANH;
break;
791 case EActivationFunction::kSymmRelu:
g = EnumFunction::SYMMRELU;
break;
792 case EActivationFunction::kSoftSign:
g = EnumFunction::SOFTSIGN;
break;
793 case EActivationFunction::kGauss:
g = EnumFunction::GAUSS;
break;
795 if (i < fNet.GetDepth() - 1) {
796 net.addLayer(
Layer(fNet.GetLayer(i).GetWidth(),
g));
799 switch(fOutputFunction) {
800 case EOutputFunction::kIdentity:
h = ModeOutputValues::DIRECT;
break;
801 case EOutputFunction::kSigmoid:
h = ModeOutputValues::SIGMOID;
break;
802 case EOutputFunction::kSoftmax:
h = ModeOutputValues::SOFTMAX;
break;
804 net.addLayer(
Layer(fNet.GetLayer(i).GetWidth(),
g,
h));
808 switch(fNet.GetLossFunction()) {
809 case ELossFunction::kMeanSquaredError:
810 net.setErrorFunction(ModeErrorFunction::SUMOFSQUARES);
812 case ELossFunction::kCrossEntropy:
813 net.setErrorFunction(ModeErrorFunction::CROSSENTROPY);
815 case ELossFunction::kSoftmaxCrossEntropy:
816 net.setErrorFunction(ModeErrorFunction::CROSSENTROPY_MUTUALEXCLUSIVE);
820 switch(fWeightInitialization) {
821 case EInitialization::kGauss:
822 net.initializeWeights(WeightInitializationStrategy::XAVIER,
823 std::back_inserter(weights));
825 case EInitialization::kUniform:
826 net.initializeWeights(WeightInitializationStrategy::XAVIERUNIFORM,
827 std::back_inserter(weights));
830 net.initializeWeights(WeightInitializationStrategy::XAVIER,
831 std::back_inserter(weights));
836 for (
auto s : fTrainingSettings) {
839 switch(s.regularization) {
840 case ERegularization::kNone:
r = EnumRegularization::NONE;
break;
841 case ERegularization::kL1:
r = EnumRegularization::L1;
break;
842 case ERegularization::kL2:
r = EnumRegularization::L2;
break;
846 s.testInterval, s.weightDecay,
r,
847 MinimizerType::fSteepest, s.learningRate,
848 s.momentum, 1, s.multithreading);
852 <<
"Training with learning rate = " <<
ptrSettings->learningRate ()
854 <<
", repetitions = " <<
ptrSettings->repetitions ()
862 Log () << kINFO <<
"Drop configuration" <<
Endl
863 <<
" drop repetitions = " <<
ptrSettings->dropRepetitions()
869 Log () << kINFO <<
" Layer " << idx <<
" = " <<
f <<
Endl;
872 Log () << kINFO <<
Endl;
879 Log () << kINFO <<
Endl;
883 for (
size_t l = 0;
l < fNet.GetDepth();
l++) {
903 if (!fExitFromTraining) fIPyMaxIter = fIPyCurrentIter;
913 Log() << kINFO <<
"Start of neural network training on GPU." <<
Endl <<
Endl;
920 Log() << kDEBUG <<
"Using " <<
nTestSamples <<
" training samples." <<
Endl;
923 fNet.Initialize(fWeightInitialization);
927 fInteractive->ClearGraphs();
942 net.InitializeGradients();
945 Log() << kINFO <<
"Training phase " <<
trainingPhase <<
" of "
946 << fTrainingSettings.size() <<
":" <<
Endl;
959 Log() << kFATAL <<
"Inconsistent training sample size" <<
Endl;
962 Log() << kFATAL <<
"Inconsistent test sample size" <<
Endl;
969 net.GetBatchSize(),
net.GetInputWidth(),
972 net.GetInputWidth(),
net.GetOutputWidth(),
978 std::vector<TNet<TCuda<>>>
nets{};
979 std::vector<TBatch<TCuda<>>>
batches{};
981 for (
size_t i = 0; i <
nThreads; i++) {
983 for (
size_t j = 0;
j <
net.GetDepth();
j++)
998 std::chrono::time_point<std::chrono::system_clock> start, end;
999 start = std::chrono::system_clock::now();
1001 if (!fInteractive) {
1002 Log() << std::setw(10) <<
"Epoch" <<
" | "
1003 << std::setw(12) <<
"Train Err."
1004 << std::setw(12) <<
"Test Err."
1005 << std::setw(12) <<
"GFLOP/s"
1006 << std::setw(12) <<
"Conv. Steps" <<
Endl;
1007 std::string separator(62,
'-');
1008 Log() << separator <<
Endl;
1035 auto inputMatrix =
batch.GetInput();
1036 auto outputMatrix =
batch.GetOutput();
1044 end = std::chrono::system_clock::now();
1049 auto inputMatrix =
batch.GetInput();
1050 auto outputMatrix =
batch.GetOutput();
1064 start = std::chrono::system_clock::now();
1070 if (fExitFromTraining)
break;
1072 Log() << std::setw(10) <<
stepCount <<
" | "
1075 << std::setw(12) <<
nFlops / seconds
1083 for (
size_t l = 0;
l <
net.GetDepth();
l++) {
1091 Log() << kFATAL <<
"CUDA backend not enabled. Please make sure "
1092 "you have CUDA installed and it was successfully "
1093 "detected by CMAKE." <<
Endl;
1103 Log() << kINFO <<
"Start of neural network training on CPU." <<
Endl <<
Endl;
1110 Log() << kDEBUG <<
"Using " <<
nTestSamples <<
" training samples." <<
Endl;
1112 fNet.Initialize(fWeightInitialization);
1118 fInteractive->ClearGraphs();
1122 << fTrainingSettings.size() <<
":" <<
Endl;
1135 net.InitializeGradients();
1141 const std::vector<Event *> &allData = GetEventCollection(
Types::kTraining);
1148 Log() << kFATAL <<
"Inconsistent training sample size" <<
Endl;
1151 Log() << kFATAL <<
"Inconsistent test sample size" <<
Endl;
1158 net.GetBatchSize(),
net.GetInputWidth(),
1161 net.GetInputWidth(),
net.GetOutputWidth(),
1167 std::vector<TNet<TCpu<>>>
nets{};
1168 std::vector<TBatch<TCpu<>>>
batches{};
1170 for (
size_t i = 0; i <
nThreads; i++) {
1172 for (
size_t j = 0;
j <
net.GetDepth();
j++)
1187 std::chrono::time_point<std::chrono::system_clock> start, end;
1188 start = std::chrono::system_clock::now();
1190 if (!fInteractive) {
1191 Log() << std::setw(10) <<
"Epoch" <<
" | "
1192 << std::setw(12) <<
"Train Err."
1193 << std::setw(12) <<
"Test Err."
1194 << std::setw(12) <<
"GFLOP/s"
1195 << std::setw(12) <<
"Conv. Steps" <<
Endl;
1196 std::string separator(62,
'-');
1197 Log() << separator <<
Endl;
1223 auto inputMatrix =
batch.GetInput();
1224 auto outputMatrix =
batch.GetOutput();
1225 auto weightMatrix =
batch.GetWeights();
1233 end = std::chrono::system_clock::now();
1238 auto inputMatrix =
batch.GetInput();
1239 auto outputMatrix =
batch.GetOutput();
1240 auto weightMatrix =
batch.GetWeights();
1251 if (fExitFromTraining)
break;
1261 start = std::chrono::system_clock::now();
1267 if (fExitFromTraining)
break;
1269 Log() << std::setw(10) <<
stepCount <<
" | "
1272 << std::setw(12) <<
nFlops / seconds
1282 for (
size_t l = 0;
l <
net.GetDepth();
l++) {
1283 auto &
layer = fNet.GetLayer(
l);
1290 Log() << kFATAL <<
"Multi-core CPU backend not enabled. Please make sure "
1291 "you have a BLAS implementation and it was successfully "
1292 "detected by CMake as well that the imt CMake flag is set." <<
Endl;
1300 size_t nVariables = GetEvent()->GetNVariables();
1304 const std::vector<Float_t>&
inputValues = GetEvent()->GetValues();
1309 fNet.Prediction(
YHat,
X, fOutputFunction);
1317 size_t nVariables = GetEvent()->GetNVariables();
1326 size_t nTargets = std::max(1u,
ev->GetNTargets());
1329 auto net = fNet.CreateClone(1);
1330 net.Prediction(
YHat,
X, fOutputFunction);
1332 for (
size_t i = 0; i <
nTargets; i++)
1335 if (fRegressionReturnVal ==
NULL) {
1336 fRegressionReturnVal =
new std::vector<Float_t>();
1338 fRegressionReturnVal->clear();
1341 for (
size_t i = 0; i <
nTargets; ++i) {
1345 const Event*
evT2 = GetTransformationHandler().InverseTransform(
evT);
1346 for (
size_t i = 0; i <
nTargets; ++i) {
1347 fRegressionReturnVal->push_back(
evT2->GetTarget(i));
1350 return *fRegressionReturnVal;
1355 size_t nVariables = GetEvent()->GetNVariables();
1358 if (fMulticlassReturnVal ==
NULL) {
1359 fMulticlassReturnVal =
new std::vector<Float_t>(DataInfo().GetNClasses());
1362 const std::vector<Float_t>&
inputValues = GetEvent()->GetValues();
1367 fNet.Prediction(
YHat,
X, fOutputFunction);
1368 for (
size_t i = 0; i < (size_t)
YHat.GetNcols(); i++) {
1369 (*fMulticlassReturnVal)[i] =
YHat(0, i);
1371 return *fMulticlassReturnVal;
1379 Int_t inputWidth = fNet.GetInputWidth();
1381 char lossFunction =
static_cast<char>(fNet.GetLossFunction());
1383 gTools().StringFromInt(inputWidth));
1387 TString(
static_cast<char>(fOutputFunction)));
1390 const auto&
layer = fNet.GetLayer(i);
1392 int activationFunction =
static_cast<int>(
layer.GetActivationFunction());
1410 fNet.SetBatchSize(1);
1412 size_t inputWidth,
depth;
1420 fNet.SetInputWidth(inputWidth);
1426 for (
size_t i = 0; i <
depth; i++) {
1442 ReadMatrixXML(
layerXML,
"Weights", weights);
1444 fNet.GetLayer(i).GetWeights() = weights;
1445 fNet.GetLayer(i).GetBiases() =
biases;
1462 fRanking =
new Ranking( GetName(),
"Importance" );
1464 fRanking->AddRank(
Rank( GetInputLabel(
ivar), 1.0));
1488 Log() << col <<
"--- Short description:" <<
colres <<
Endl;
1490 Log() <<
"The DNN neural network is a feedforward" <<
Endl;
1491 Log() <<
"multilayer perceptron implementation. The DNN has a user-" <<
Endl;
1492 Log() <<
"defined hidden layer architecture, where the number of input (output)" <<
Endl;
1493 Log() <<
"nodes is determined by the input variables (output classes, i.e., " <<
Endl;
1494 Log() <<
"signal and one background, regression or multiclass). " <<
Endl;
1496 Log() << col <<
"--- Performance optimisation:" <<
colres <<
Endl;
1499 const char*
txt =
"The DNN supports various options to improve performance in terms of training speed and \n \
1500reduction of overfitting: \n \
1502 - different training settings can be stacked. Such that the initial training \n\
1503 is done with a large learning rate and a large drop out fraction whilst \n \
1504 in a later stage learning rate and drop out can be reduced. \n \
1507 initial training stage: 0.0 for the first layer, 0.5 for later layers. \n \
1508 later training stage: 0.1 or 0.0 for all layers \n \
1509 final training stage: 0.0] \n \
1510 Drop out is a technique where a at each training cycle a fraction of arbitrary \n \
1511 nodes is disabled. This reduces co-adaptation of weights and thus reduces overfitting. \n \
1512 - L1 and L2 regularization are available \n \
1514 [recommended 10 - 150] \n \
1515 Arbitrary mini-batch sizes can be chosen. \n \
1516 - Multithreading \n \
1517 [recommended: True] \n \
1518 Multithreading can be turned on. The minibatches are distributed to the available \n \
1519 cores. The algorithm is lock-free (\"Hogwild!\"-style) for each cycle. \n \
1523 - example: \"TANH|(N+30)*2,TANH|(N+30),LINEAR\" \n \
1525 . two hidden layers (separated by \",\") \n \
1526 . the activation function is TANH (other options: RELU, SOFTSIGN, LINEAR) \n \
1527 . the activation function for the output layer is LINEAR \n \
1528 . the first hidden layer has (N+30)*2 nodes where N is the number of input neurons \n \
1529 . the second hidden layer has N+30 nodes, where N is the number of input neurons \n \
1530 . the number of nodes in the output layer is determined by the number of output nodes \n \
1531 and can therefore not be chosen freely. \n \
1533 \"ErrorStrategy\": \n \
1535 The error of the neural net is determined by a sum-of-squares error function \n \
1536 For regression, this is the only possible choice. \n \
1538 The error of the neural net is determined by a cross entropy function. The \n \
1539 output values are automatically (internally) transformed into probabilities \n \
1540 using a sigmoid function. \n \
1541 For signal/background classification this is the default choice. \n \
1542 For multiclass using cross entropy more than one or no output classes \n \
1543 can be equally true or false (e.g. Event 0: A and B are true, Event 1: \n \
1544 A and C is true, Event 2: C is true, ...) \n \
1545 - MUTUALEXCLUSIVE \n \
1546 In multiclass settings, exactly one of the output classes can be true (e.g. either A or B or C) \n \
1548 \"WeightInitialization\" \n \
1551 \"Xavier Glorot & Yoshua Bengio\"-style of initializing the weights. The weights are chosen randomly \n \
1552 such that the variance of the values of the nodes is preserved for each layer. \n \
1553 - XAVIERUNIFORM \n \
1554 The same as XAVIER, but with uniformly distributed weights instead of gaussian weights \n \
1556 Random values scaled by the layer size \n \
1558 \"TrainingStrategy\" \n \
1559 - example: \"LearningRate=1e-1,Momentum=0.3,ConvergenceSteps=50,BatchSize=30,TestRepetitions=7,WeightDecay=0.0,Renormalize=L2,DropConfig=0.0,DropRepetitions=5|LearningRate=1e-4,Momentum=0.3,ConvergenceSteps=50,BatchSize=20,TestRepetitions=7,WeightDecay=0.001,Renormalize=L2,DropFraction=0.0,DropRepetitions=5\" \n \
1560 - explanation: two stacked training settings separated by \"|\" \n \
1561 . first training setting: \"LearningRate=1e-1,Momentum=0.3,ConvergenceSteps=50,BatchSize=30,TestRepetitions=7,WeightDecay=0.0,Renormalize=L2,DropConfig=0.0,DropRepetitions=5\" \n \
1562 . second training setting : \"LearningRate=1e-4,Momentum=0.3,ConvergenceSteps=50,BatchSize=20,TestRepetitions=7,WeightDecay=0.001,Renormalize=L2,DropFractions=0.0,DropRepetitions=5\" \n \
1563 . LearningRate : \n \
1564 - recommended for classification: 0.1 initially, 1e-4 later \n \
1565 - recommended for regression: 1e-4 and less \n \
1567 preserve a fraction of the momentum for the next training batch [fraction = 0.0 - 1.0] \n \
1568 . Repetitions : \n \
1569 train \"Repetitions\" repetitions with the same minibatch before switching to the next one \n \
1570 . ConvergenceSteps : \n \
1571 Assume that convergence is reached after \"ConvergenceSteps\" cycles where no improvement \n \
1572 of the error on the test samples has been found. (Mind that only at each \"TestRepetitions\" \n \
1573 cycle the test samples are evaluated and thus the convergence is checked) \n \
1575 Size of the mini-batches. \n \
1576 . TestRepetitions \n \
1577 Perform testing the neural net on the test samples each \"TestRepetitions\" cycle \n \
1579 If \"Renormalize\" is set to L1 or L2, \"WeightDecay\" provides the renormalization factor \n \
1581 NONE, L1 (|w|) or L2 (w^2) \n \
1583 Drop a fraction of arbitrary nodes of each of the layers according to the values given \n \
1584 in the DropConfig. \n \
1585 [example: DropConfig=0.0+0.5+0.3 \n \
1586 meaning: drop no nodes in layer 0 (input layer), half of the nodes in layer 1 and 30% of the nodes \n \
1588 recommended: leave all the nodes turned on for the input layer (layer 0) \n \
1589 turn off half of the nodes in later layers for the initial training; leave all nodes \n \
1590 turned on (0.0) in later training stages] \n \
1591 . DropRepetitions \n \
1592 Each \"DropRepetitions\" cycle the configuration of which nodes are dropped is changed \n \
1593 [recommended : 1] \n \
1594 . Multithreading \n \
1595 turn on multithreading [recommended: True] \n \
#define REGISTER_METHOD(CLASS)
for example
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t r
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void value
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
const_iterator begin() const
const_iterator end() const
Bool_t WriteOptionsReference() const
Layer defines the layout of a layer.
Settings for the training of the neural net.
Steepest Gradient Descent algorithm (SGD)
static void Copy(Matrix_t &B, const Matrix_t &A)
static void Copy(Matrix_t &B, const Matrix_t &A)
bool HasConverged()
Increases the minimization step counter by the test error evaluation period and uses the current inte...
void Step(Net_t &net, Matrix_t &input, const Matrix_t &output, const Matrix_t &weights)
Perform a single optimization step on a given batch.
size_t GetTestInterval() const
void StepMomentum(Net_t &master, std::vector< Net_t > &nets, std::vector< TBatch< Architecture_t > > &batches, Scalar_t momentum)
Same as the Step(...) method for multiple batches but uses momentum.
size_t GetConvergenceCount() const
size_t GetConvergenceSteps() const
Deep Neural Network Implementation.
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets)
virtual const std::vector< Float_t > & GetMulticlassValues()
UInt_t GetNumValidationSamples()
void ReadWeightsFromXML(void *wghtnode)
typename Architecture_t::Matrix_t Matrix_t
void ReadWeightsFromStream(std::istream &i)
LayoutVector_t ParseLayoutString(TString layerSpec)
void MakeClassSpecific(std::ostream &, const TString &) const
MethodDNN(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption)
virtual Double_t GetMvaValue(Double_t *err=nullptr, Double_t *errUpper=nullptr)
std::vector< std::map< TString, TString > > KeyValueVector_t
DNN::EInitialization fWeightInitialization
const Ranking * CreateRanking()
KeyValueVector_t ParseKeyValueString(TString parseString, TString blockDelim, TString tokenDelim)
DNN::EOutputFunction fOutputFunction
void AddWeightsXMLTo(void *parent) const
void GetHelpMessage() const
virtual const std::vector< Float_t > & GetRegressionValues()
Ranking for variables in method (implementation)
Collectable string class.
Int_t Atoi() const
Return integer value of string.
void ToUpper()
Change string to upper case.
static TString Itoa(Int_t value, Int_t base)
Converts an Int_t to a TString with respect to the base specified (2-36).
XMLNodePointer_t NewChild(XMLNodePointer_t parent, XMLNsPointer_t ns, const char *name, const char *content=nullptr)
create new child element for parent node
XMLNodePointer_t GetChild(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
returns first child of xmlnode
XMLAttrPointer_t NewAttr(XMLNodePointer_t xmlnode, XMLNsPointer_t, const char *name, const char *value)
creates new attribute for xmlnode, namespaces are not supported for attributes
EOutputFunction
Enum that represents output functions.
auto regularization(const typename Architecture_t::Matrix_t &A, ERegularization R) -> decltype(Architecture_t::L1Regularization(A))
Evaluate the regularization functional for a given weight matrix.
EActivationFunction
Enum that represents layer activation functions.
ELossFunction
Enum that represents objective functions for the net, i.e.
std::tuple< const std::vector< Event * > &, const DataSetInfo & > TMVAInput_t
create variable transformations
TString fetchValue(const std::map< TString, TString > &keyValueMap, TString key)
MsgLogger & Endl(MsgLogger &ml)
Double_t Log(Double_t x)
Returns the natural logarithm of x.