75 fWeightInitialization(), fOutputFunction(), fLayoutString(), fErrorStrategy(),
76 fTrainingStrategyString(), fWeightInitializationString(), fArchitectureString(),
77 fTrainingSettings(), fResume(false), fSettings()
132 "Layou of the network.");
136 "Loss function: Mean squared error (regression)" 137 " or cross entropy (binary classifcation).");
142 "WeightInitialization",
143 "Weight initialization strategy");
149 "Which architecture to perfrom the training on.");
159 "ConvergenceSteps=50," 165 "DropRepetitions=5|LearningRate=1e-4," 168 "ConvergenceSteps=50," 173 "DropConfig=0.0+0.5+0.5," 175 "Multithreading=True",
177 "Defines the training strategies.");
187 const TString layerDelimiter(
",");
188 const TString subDelimiter(
"|");
190 const size_t inputSize =
GetNvar();
192 TObjArray* layerStrings = layoutString.Tokenize(layerDelimiter);
193 TIter nextLayer (layerStrings);
196 for (; layerString !=
nullptr; layerString = (
TObjString*) nextLayer()) {
201 TIter nextToken (subStrings);
204 for (; token !=
nullptr; token = (
TObjString *) nextToken()) {
210 if (strActFnc ==
"RELU") {
212 }
else if (strActFnc ==
"TANH") {
214 }
else if (strActFnc ==
"SYMMRELU") {
216 }
else if (strActFnc ==
"SOFTSIGN") {
218 }
else if (strActFnc ==
"SIGMOID") {
220 }
else if (strActFnc ==
"LINEAR") {
222 }
else if (strActFnc ==
"GAUSS") {
231 strNumNodes.ReplaceAll (
"N", strN);
232 strNumNodes.ReplaceAll (
"n", strN);
234 numNodes = fml.
Eval (inputSize);
240 layout.push_back(std::make_pair(numNodes, activationFunction));
253 const TString keyValueDelim (
"=");
255 TObjArray* blockStrings = parseString.Tokenize (blockDelim);
256 TIter nextBlock (blockStrings);
259 for (; blockString !=
nullptr; blockString = (
TObjString *) nextBlock())
261 blockKeyValues.push_back (std::map<TString,TString>());
262 std::map<TString,TString>& currentBlock = blockKeyValues.back ();
265 TIter nextToken (subStrings);
268 for (; token !=
nullptr; token = (
TObjString *)nextToken())
271 int delimPos = strKeyValue.
First (keyValueDelim.
Data ());
277 TString strValue =
TString (strKeyValue (delimPos+1, strKeyValue.Length ()));
282 currentBlock.insert (std::make_pair (strKey, strValue));
285 return blockKeyValues;
292 std::map<TString, TString>::const_iterator it = keyValueMap.find (key);
293 if (it == keyValueMap.end()) {
300 template <
typename T>
301 T fetchValue(
const std::map<TString,TString>& keyValueMap,
311 TString value (fetchValue (keyValueMap, key));
315 return value.
Atoi ();
320 double fetchValue (
const std::map<TString,TString>& keyValueMap,
321 TString key,
double defaultValue)
323 TString value (fetchValue (keyValueMap, key));
327 return value.
Atof ();
335 TString value (fetchValue (keyValueMap, key));
344 bool fetchValue (
const std::map<TString,TString>& keyValueMap,
345 TString key,
bool defaultValue)
347 TString value (fetchValue (keyValueMap, key));
352 if (value ==
"TRUE" || value ==
"T" || value ==
"1") {
360 std::vector<double>
fetchValue(
const std::map<TString, TString> & keyValueMap,
362 std::vector<double> defaultValue)
364 TString parseString (fetchValue (keyValueMap, key));
365 if (parseString ==
"") {
369 std::vector<double> values;
371 const TString tokenDelim (
"+");
373 TIter nextToken (tokenStrings);
375 for (; tokenString !=
NULL; tokenString = (
TObjString*)nextToken ()) {
376 std::stringstream sstr;
379 sstr >> currentValue;
380 values.push_back (currentValue);
390 <<
"Will ignore negative events in training!" 400 size_t outputSize = 1;
403 }
else if (
DataInfo().GetNClasses() > 2) {
410 auto itLayout = std::begin (
fLayout);
411 auto itLayoutEnd = std::end (
fLayout)-1;
412 for ( ; itLayout != itLayoutEnd; ++itLayout) {
433 Log () <<
kWARNING <<
"For regression only SUMOFSQUARES is a valid " 434 <<
" neural net error function. Setting error function to " 435 <<
" SUMOFSQUARES now." <<
Endl;
473 for (
auto& block : strategyKeyValues) {
483 std::vector<Double_t>());
487 if (regularization ==
"L1") {
489 }
else if (regularization ==
"L2") {
509 std::vector<TString> titles = {
"Error on training set",
"Error on test set"};
521 Log() <<
kFATAL <<
"OpenCL backend not yes supported." <<
Endl;
530 Log() <<
kINFO <<
"Using Standard Implementation.";
532 std::vector<Pattern> trainPattern;
533 std::vector<Pattern> testPattern;
538 for (
auto &event : eventCollectionTraining) {
539 const std::vector<Float_t>& values =
event->GetValues();
541 double outputValue =
event->GetClass () == 0 ? 0.9 : 0.1;
542 trainPattern.push_back(
Pattern (values.begin(),
545 event->GetWeight()));
546 trainPattern.back().addInput(1.0);
548 std::vector<Float_t> oneHot(
DataInfo().GetNClasses(), 0.0);
549 oneHot[
event->GetClass()] = 1.0;
550 trainPattern.push_back(
Pattern (values.begin(), values.end(),
551 oneHot.cbegin(), oneHot.cend(),
552 event->GetWeight()));
553 trainPattern.back().addInput(1.0);
555 const std::vector<Float_t>& targets =
event->GetTargets ();
556 trainPattern.push_back(
Pattern(values.begin(),
560 event->GetWeight ()));
561 trainPattern.back ().addInput (1.0);
565 for (
auto &event : eventCollectionTesting) {
566 const std::vector<Float_t>& values =
event->GetValues();
568 double outputValue =
event->GetClass () == 0 ? 0.9 : 0.1;
569 testPattern.push_back(
Pattern (values.begin(),
572 event->GetWeight()));
573 testPattern.back().addInput(1.0);
575 std::vector<Float_t> oneHot(
DataInfo().GetNClasses(), 0.0);
576 oneHot[
event->GetClass()] = 1.0;
577 testPattern.push_back(
Pattern (values.begin(), values.end(),
578 oneHot.cbegin(), oneHot.cend(),
579 event->GetWeight()));
580 testPattern.back().addInput(1.0);
582 const std::vector<Float_t>& targets =
event->GetTargets ();
583 testPattern.push_back(
Pattern(values.begin(),
587 event->GetWeight ()));
588 testPattern.back ().addInput (1.0);
593 std::vector<double> weights;
605 case EActivationFunction::kRelu: g = EnumFunction::RELU;
break;
608 case EActivationFunction::kSymmRelu: g = EnumFunction::SYMMRELU;
break;
609 case EActivationFunction::kSoftSign: g = EnumFunction::SOFTSIGN;
break;
626 case ELossFunction::kMeanSquaredError:
632 case ELossFunction::kSoftmaxCrossEntropy:
640 std::back_inserter(weights));
644 std::back_inserter(weights));
648 std::back_inserter(weights));
656 switch(s.regularization) {
658 case ERegularization::kL1: r = EnumRegularization::L1;
break;
659 case ERegularization::kL2: r = EnumRegularization::L2;
break;
663 s.testInterval, s.weightDecay, r,
665 s.momentum, 1, s.multithreading);
666 std::shared_ptr<Settings> ptrSettings(settings);
667 ptrSettings->setMonitoring (0);
669 <<
"Training with learning rate = " << ptrSettings->learningRate ()
670 <<
", momentum = " << ptrSettings->momentum ()
671 <<
", repetitions = " << ptrSettings->repetitions ()
674 ptrSettings->setProgressLimits ((idxSetting)*100.0/(
fSettings.size ()),
675 (idxSetting+1)*100.0/(
fSettings.size ()));
677 const std::vector<double>& dropConfig = ptrSettings->dropFractions ();
678 if (!dropConfig.empty ()) {
680 <<
" drop repetitions = " << ptrSettings->dropRepetitions()
685 for (
auto f : dropConfig) {
692 ptrSettings->momentum(),
693 ptrSettings->repetitions());
694 net.
train(weights, trainPattern, testPattern, minimizer, *ptrSettings.get());
699 size_t weightIndex = 0;
702 for (
Int_t j = 0; j < layerWeights.GetNcols(); j++) {
703 for (
Int_t i = 0; i < layerWeights.GetNrows(); i++) {
704 layerWeights(i,j) = weights[weightIndex];
710 for (
Int_t i = 0; i < layerBiases.GetNrows(); i++) {
711 layerBiases(i,0) = weights[weightIndex];
715 for (
Int_t i = 0; i < layerBiases.GetNrows(); i++) {
716 layerBiases(i,0) = 0.0;
728 #ifdef DNNCUDA // Included only if DNNCUDA flag is set. 733 Log() <<
kINFO <<
"Start of neural network training on GPU." <<
Endl;
735 size_t trainingPhase = 1;
749 std::vector<Double_t> dropoutVector(settings.dropoutProbabilities);
750 for (
auto & p : dropoutVector) {
756 auto testNet = net.
CreateClone(settings.batchSize);
758 Log() <<
kINFO <<
"Training phase " << trainingPhase <<
" of " 759 << fTrainingSettings.size() <<
":" <<
Endl;
772 testNet.GetBatchSize(),
776 settings.convergenceSteps,
777 settings.testInterval);
779 std::vector<TNet<TCuda<>>> nets{};
780 std::vector<TBatch<TCuda<>>> batches{};
781 nets.reserve(nThreads);
782 for (
size_t i = 0; i < nThreads; i++) {
784 for (
size_t j = 0; j < net.
GetDepth(); j++)
786 auto &masterLayer = net.
GetLayer(j);
787 auto &layer = nets.back().GetLayer(j);
789 masterLayer.GetWeights());
791 masterLayer.GetBiases());
795 bool converged =
false;
796 size_t stepCount = 0;
797 size_t batchesInEpoch = nTrainingSamples / net.
GetBatchSize();
799 std::chrono::time_point<std::chrono::system_clock> start, end;
800 start = std::chrono::system_clock::now();
803 Log() << std::setw(10) <<
"Epoch" <<
" | " 804 << std::setw(12) <<
"Train Err." 805 << std::setw(12) <<
"Test Err." 806 << std::setw(12) <<
"GFLOP/s" 807 << std::setw(12) <<
"Conv. Steps" <<
Endl;
817 trainingData.Shuffle();
818 for (
size_t i = 0; i < batchesInEpoch; i += nThreads) {
820 for (
size_t j = 0; j < nThreads; j++) {
821 batches.reserve(nThreads);
822 batches.push_back(trainingData.GetBatch());
824 if (settings.momentum > 0.0) {
825 minimizer.StepMomentum(net, nets, batches, settings.momentum);
827 minimizer.Step(net, nets, batches);
831 if ((stepCount % minimizer.GetTestInterval()) == 0) {
835 for (
auto batch : testData) {
836 auto inputMatrix = batch.GetInput();
837 auto outputMatrix = batch.GetOutput();
838 testError += testNet.Loss(inputMatrix, outputMatrix);
840 testError /= (
Double_t) (nTestSamples / settings.batchSize);
842 end = std::chrono::system_clock::now();
846 for (
auto batch : trainingData) {
847 auto inputMatrix = batch.GetInput();
848 auto outputMatrix = batch.GetOutput();
849 trainingError += net.
Loss(inputMatrix, outputMatrix);
851 trainingError /= (
Double_t) (nTrainingSamples / settings.batchSize);
854 std::chrono::duration<double> elapsed_seconds = end - start;
855 double seconds = elapsed_seconds.count();
856 double nFlops = (double) (settings.testInterval * batchesInEpoch);
859 converged = minimizer.HasConverged(testError);
860 start = std::chrono::system_clock::now();
865 / minimizer.GetConvergenceSteps ();
868 Log() << std::setw(10) << stepCount <<
" | " 869 << std::setw(12) << trainingError
870 << std::setw(12) << testError
871 << std::setw(12) << nFlops / seconds
872 << std::setw(12) << minimizer.GetConvergenceCount() <<
Endl;
885 #else // DNNCUDA flag not set. 887 Log() <<
kFATAL <<
"CUDA backend not enabled. Please make sure " 888 "you have CUDA installed and it was successfully " 889 "detected by CMAKE." <<
Endl;
897 #ifdef DNNCPU // Included only if DNNCPU flag is set. 902 Log() <<
kINFO <<
"Start of neural network training on CPU." <<
Endl <<
Endl;
906 size_t trainingPhase = 1;
913 Log() <<
"Training phase " << trainingPhase <<
" of " 914 << fTrainingSettings.size() <<
":" <<
Endl;
922 std::vector<Double_t> dropoutVector(settings.dropoutProbabilities);
923 for (
auto & p : dropoutVector) {
929 auto testNet = net.
CreateClone(settings.batchSize);
941 testNet.GetBatchSize(),
945 settings.convergenceSteps,
946 settings.testInterval);
948 std::vector<TNet<TCpu<>>> nets{};
949 std::vector<TBatch<TCpu<>>> batches{};
950 nets.reserve(nThreads);
951 for (
size_t i = 0; i < nThreads; i++) {
953 for (
size_t j = 0; j < net.
GetDepth(); j++)
955 auto &masterLayer = net.
GetLayer(j);
956 auto &layer = nets.back().GetLayer(j);
958 masterLayer.GetWeights());
960 masterLayer.GetBiases());
964 bool converged =
false;
965 size_t stepCount = 0;
966 size_t batchesInEpoch = nTrainingSamples / net.
GetBatchSize();
968 std::chrono::time_point<std::chrono::system_clock> start, end;
969 start = std::chrono::system_clock::now();
972 Log() << std::setw(10) <<
"Epoch" <<
" | " 973 << std::setw(12) <<
"Train Err." 974 << std::setw(12) <<
"Test Err." 975 << std::setw(12) <<
"GFLOP/s" 976 << std::setw(12) <<
"Conv. Steps" <<
Endl;
985 trainingData.Shuffle();
986 for (
size_t i = 0; i < batchesInEpoch; i += nThreads) {
988 for (
size_t j = 0; j < nThreads; j++) {
989 batches.reserve(nThreads);
990 batches.push_back(trainingData.GetBatch());
992 if (settings.momentum > 0.0) {
993 minimizer.StepMomentum(net, nets, batches, settings.momentum);
995 minimizer.Step(net, nets, batches);
999 if ((stepCount % minimizer.GetTestInterval()) == 0) {
1003 for (
auto batch : testData) {
1004 auto inputMatrix = batch.GetInput();
1005 auto outputMatrix = batch.GetOutput();
1006 testError += testNet.Loss(inputMatrix, outputMatrix);
1008 testError /= (
Double_t) (nTestSamples / settings.batchSize);
1010 end = std::chrono::system_clock::now();
1014 for (
auto batch : trainingData) {
1015 auto inputMatrix = batch.GetInput();
1016 auto outputMatrix = batch.GetOutput();
1017 trainingError += net.
Loss(inputMatrix, outputMatrix);
1019 trainingError /= (
Double_t) (nTrainingSamples / settings.batchSize);
1023 fIPyCurrentIter = 100*(double)minimizer.GetConvergenceCount() /(double)settings.convergenceSteps;
1028 std::chrono::duration<double> elapsed_seconds = end - start;
1029 double seconds = elapsed_seconds.count();
1030 double nFlops = (double) (settings.testInterval * batchesInEpoch);
1033 converged = minimizer.HasConverged(testError);
1034 start = std::chrono::system_clock::now();
1039 / minimizer.GetConvergenceSteps ();
1042 Log() << std::setw(10) << stepCount <<
" | " 1043 << std::setw(12) << trainingError
1044 << std::setw(12) << testError
1045 << std::setw(12) << nFlops / seconds
1046 << std::setw(12) << minimizer.GetConvergenceCount() <<
Endl;
1062 #else // DNNCPU flag not set. 1063 Log() <<
kFATAL <<
"Multi-core CPU backend not enabled. Please make sure " 1064 "you have a BLAS implementation and it was successfully " 1065 "detected by CMake as well that the imt CMake flag is set." <<
Endl;
1077 for (
size_t i = 0; i < nVariables; i++) {
1078 X(0,i) = inputValues[i];
1092 const std::vector<Float_t>& inputValues = ev->
GetValues();
1093 for (
size_t i = 0; i < nVariables; i++) {
1094 X(0,i) = inputValues[i];
1097 size_t nTargets = std::max(1u, ev->
GetNTargets());
1099 std::vector<Float_t>
output(nTargets);
1103 for (
size_t i = 0; i < nTargets; i++)
1104 output[i] = YHat(0, i);
1112 for (
size_t i = 0; i < nTargets; ++i) {
1117 for (
size_t i = 0; i < nTargets; ++i) {
1134 for (
size_t i = 0; i < nVariables; i++) {
1135 X(0,i) = inputValues[i];
1139 for (
size_t i = 0; i < (size_t) YHat.GetNcols(); i++) {
1140 (*fMulticlassReturnVal)[i] = YHat(0, i);
1153 gTools().StringFromInt(inputWidth));
1159 for (
Int_t i = 0; i < depth; i++) {
1162 int activationFunction =
static_cast<int>(layer.GetActivationFunction());
1181 size_t inputWidth, depth;
1184 char lossFunctionChar;
1186 char outputFunctionChar;
1193 size_t previousWidth = inputWidth;
1195 for (
size_t i = 0; i < depth; i++) {
1217 previousWidth = width;
1253 Log() << col <<
"--- Short description:" << colres <<
Endl;
1255 Log() <<
"The DNN neural network is a feedforward" <<
Endl;
1256 Log() <<
"multilayer perceptron impementation. The DNN has a user-" <<
Endl;
1257 Log() <<
"defined hidden layer architecture, where the number of input (output)" <<
Endl;
1258 Log() <<
"nodes is determined by the input variables (output classes, i.e., " <<
Endl;
1259 Log() <<
"signal and one background, regression or multiclass). " <<
Endl;
1261 Log() << col <<
"--- Performance optimisation:" << colres <<
Endl;
1264 const char* txt =
"The DNN supports various options to improve performance in terms of training speed and \n \ 1265 reduction of overfitting: \n \ 1267 - different training settings can be stacked. Such that the initial training \n\ 1268 is done with a large learning rate and a large drop out fraction whilst \n \ 1269 in a later stage learning rate and drop out can be reduced. \n \ 1272 initial training stage: 0.0 for the first layer, 0.5 for later layers. \n \ 1273 later training stage: 0.1 or 0.0 for all layers \n \ 1274 final training stage: 0.0] \n \ 1275 Drop out is a technique where a at each training cycle a fraction of arbitrary \n \ 1276 nodes is disabled. This reduces co-adaptation of weights and thus reduces overfitting. \n \ 1277 - L1 and L2 regularization are available \n \ 1279 [recommended 10 - 150] \n \ 1280 Arbitrary mini-batch sizes can be chosen. \n \ 1281 - Multithreading \n \ 1282 [recommended: True] \n \ 1283 Multithreading can be turned on. The minibatches are distributed to the available \n \ 1284 cores. The algorithm is lock-free (\"Hogwild!\"-style) for each cycle. \n \ 1288 - example: \"TANH|(N+30)*2,TANH|(N+30),LINEAR\" \n \ 1290 . two hidden layers (separated by \",\") \n \ 1291 . the activation function is TANH (other options: RELU, SOFTSIGN, LINEAR) \n \ 1292 . the activation function for the output layer is LINEAR \n \ 1293 . the first hidden layer has (N+30)*2 nodes where N is the number of input neurons \n \ 1294 . the second hidden layer has N+30 nodes, where N is the number of input neurons \n \ 1295 . the number of nodes in the output layer is determined by the number of output nodes \n \ 1296 and can therefore not be chosen freely. \n \ 1298 \"ErrorStrategy\": \n \ 1300 The error of the neural net is determined by a sum-of-squares error function \n \ 1301 For regression, this is the only possible choice. \n \ 1303 The error of the neural net is determined by a cross entropy function. The \n \ 1304 output values are automatically (internally) transformed into probabilities \n \ 1305 using a sigmoid function. \n \ 1306 For signal/background classification this is the default choice. \n \ 1307 For multiclass using cross entropy more than one or no output classes \n \ 1308 can be equally true or false (e.g. Event 0: A and B are true, Event 1: \n \ 1309 A and C is true, Event 2: C is true, ...) \n \ 1310 - MUTUALEXCLUSIVE \n \ 1311 In multiclass settings, exactly one of the output classes can be true (e.g. either A or B or C) \n \ 1313 \"WeightInitialization\" \n \ 1316 \"Xavier Glorot & Yoshua Bengio\"-style of initializing the weights. The weights are chosen randomly \n \ 1317 such that the variance of the values of the nodes is preserved for each layer. \n \ 1318 - XAVIERUNIFORM \n \ 1319 The same as XAVIER, but with uniformly distributed weights instead of gaussian weights \n \ 1321 Random values scaled by the layer size \n \ 1323 \"TrainingStrategy\" \n \ 1324 - example: \"LearningRate=1e-1,Momentum=0.3,ConvergenceSteps=50,BatchSize=30,TestRepetitions=7,WeightDecay=0.0,Renormalize=L2,DropConfig=0.0,DropRepetitions=5|LearningRate=1e-4,Momentum=0.3,ConvergenceSteps=50,BatchSize=20,TestRepetitions=7,WeightDecay=0.001,Renormalize=L2,DropFraction=0.0,DropRepetitions=5\" \n \ 1325 - explanation: two stacked training settings separated by \"|\" \n \ 1326 . first training setting: \"LearningRate=1e-1,Momentum=0.3,ConvergenceSteps=50,BatchSize=30,TestRepetitions=7,WeightDecay=0.0,Renormalize=L2,DropConfig=0.0,DropRepetitions=5\" \n \ 1327 . second training setting : \"LearningRate=1e-4,Momentum=0.3,ConvergenceSteps=50,BatchSize=20,TestRepetitions=7,WeightDecay=0.001,Renormalize=L2,DropFractions=0.0,DropRepetitions=5\" \n \ 1328 . LearningRate : \n \ 1329 - recommended for classification: 0.1 initially, 1e-4 later \n \ 1330 - recommended for regression: 1e-4 and less \n \ 1332 preserve a fraction of the momentum for the next training batch [fraction = 0.0 - 1.0] \n \ 1333 . Repetitions : \n \ 1334 train \"Repetitions\" repetitions with the same minibatch before switching to the next one \n \ 1335 . ConvergenceSteps : \n \ 1336 Assume that convergence is reached after \"ConvergenceSteps\" cycles where no improvement \n \ 1337 of the error on the test samples has been found. (Mind that only at each \"TestRepetitions\" \n \ 1338 cycle the test sampes are evaluated and thus the convergence is checked) \n \ 1340 Size of the mini-batches. \n \ 1341 . TestRepetitions \n \ 1342 Perform testing the neural net on the test samples each \"TestRepetitions\" cycle \n \ 1344 If \"Renormalize\" is set to L1 or L2, \"WeightDecay\" provides the renormalization factor \n \ 1346 NONE, L1 (|w|) or L2 (w^2) \n \ 1348 Drop a fraction of arbitrary nodes of each of the layers according to the values given \n \ 1349 in the DropConfig. \n \ 1350 [example: DropConfig=0.0+0.5+0.3 \n \ 1351 meaning: drop no nodes in layer 0 (input layer), half of the nodes in layer 1 and 30% of the nodes \n \ 1353 recommended: leave all the nodes turned on for the input layer (layer 0) \n \ 1354 turn off half of the nodes in later layers for the initial training; leave all nodes \n \ 1355 turned on (0.0) in later training stages] \n \ 1356 . DropRepetitions \n \ 1357 Each \"DropRepetitions\" cycle the configuration of which nodes are dropped is changed \n \ 1358 [recommended : 1] \n \ 1359 . Multithreading \n \ 1360 turn on multithreading [recommended: True] \n \ Types::EAnalysisType fAnalysisType
void GetHelpMessage() const
DNN::EOutputFunction fOutputFunction
static TString Itoa(Int_t value, Int_t base)
Converts an Int_t to a TString with respect to the base specified (2-36).
MsgLogger & Endl(MsgLogger &ml)
void AddPoint(Double_t x, Double_t y1, Double_t y2)
This function is used only in 2 TGraph case, and it will add new data points to graphs.
Collectable string class.
Steepest Gradient Descent algorithm (SGD)
std::vector< std::map< TString, TString > > KeyValueVector_t
void SetDropoutProbabilities(const std::vector< Double_t > &probabilities)
void MakeClassSpecific(std::ostream &, const TString &) const
OptionBase * DeclareOptionRef(T &ref, const TString &name, const TString &desc="")
void ToUpper()
Change string to upper case.
void setErrorFunction(ModeErrorFunction eErrorFunction)
which error function is to be used
typename Architecture_t::Matrix_t Matrix_t
TransformationHandler & GetTransformationHandler(Bool_t takeReroutedIfAvailable=true)
UInt_t GetNClasses() const
UInt_t GetNTargets() const
void setOutputSize(size_t sizeOutput)
set the output size of the DNN
void SetWeightDecay(Scalar_t weightDecay)
const TString & GetInputLabel(Int_t i) const
void AddLayer(size_t width, EActivationFunction f, Scalar_t dropoutProbability=1.0)
Add a layer of the given size to the neural net.
void ReadWeightsFromStream(std::istream &i)
void Initialize(EInitialization m)
Initialize the weights in the net with the initialization method.
std::vector< std::pair< int, DNN::EActivationFunction > > LayoutVector_t
void SetIpythonInteractive(IPythonInteractive *fI, bool *fE, UInt_t *M, UInt_t *C)
std::vector< Double_t > dropoutProbabilities
const Event * GetEvent() const
void ClearGraphs()
This function sets the point number to 0 for all graphs.
void ReadWeightsFromXML(void *wghtnode)
void setInputSize(size_t sizeInput)
set the input size of the DNN
Generic neural network class.
void Init(std::vector< TString > &graphTitles)
This function gets some title and it creates a TGraph for every title.
DataSetInfo & DataInfo() const
TString fArchitectureString
Ssiz_t First(char c) const
Find first occurrence of a character c.
static void ReadMatrixXML(void *xml, const char *name, TMatrixT< Double_t > &X)
UInt_t GetNTargets() const
accessor to the number of targets
KeyValueVector_t fSettings
void initializeWeights(WeightInitializationStrategy eInitStrategy, OutIterator itWeight)
initialize the weights with the given strategy
Float_t GetTarget(UInt_t itgt) const
const char * GetName() const
const Ranking * CreateRanking()
Bool_t BeginsWith(const char *s, ECaseCompare cmp=kExact) const
void SetRegularization(ERegularization R)
void Clear()
Remove all layers from the network.
static void WriteMatrixXML(void *parent, const char *name, const TMatrixT< Double_t > &X)
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets)
auto regularization(const typename Architecture_t::Matrix_t &A, ERegularization R) -> decltype(Architecture_t::L1Regularization(A))
Evaluate the regularization functional for a given weight matrix.
TSubString Strip(EStripType s=kTrailing, char c=' ') const
Return a substring of self stripped at beginning and/or end.
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
Settings for the training of the neural net.
TString GetString() const
TNet< Architecture_t, TSharedLayer< Architecture_t > > CreateClone(size_t batchSize)
Create a clone that uses the same weight and biases matrices but potentially a difference batch size...
DNN::EInitialization fWeightInitialization
UInt_t GetNVariables() const
UInt_t GetNVariables() const
accessor to the number of variables
static void Copy(TCpuMatrix< Scalar_t > &B, const TCpuMatrix< Scalar_t > &A)
Layer defines the layout of a layer.
Bool_t IgnoreEventsWithNegWeightsInTraining() const
XMLAttrPointer_t NewAttr(XMLNodePointer_t xmlnode, XMLNsPointer_t, const char *name, const char *value)
creates new attribute for xmlnode, namespaces are not supported for attributes
TObjArray * Tokenize(const TString &delim) const
This function is used to isolate sequential tokens in a TString.
const std::vector< TMVA::Event * > & GetEventCollection(Types::ETreeType type)
returns the event collection (i.e.
size_t GetOutputWidth() const
KeyValueVector_t ParseKeyValueString(TString parseString, TString blockDelim, TString tokenDelim)
size_t GetInputWidth() const
Bool_t WriteOptionsReference() const
TString fetchValue(const std::map< TString, TString > &keyValueMap, TString key)
std::vector< Float_t > * fMulticlassReturnVal
double train(std::vector< double > &weights, std::vector< Pattern > &trainPattern, const std::vector< Pattern > &testPattern, Minimizer &minimizer, Settings &settings)
start the training
EOutputFunction
Enum that represents output functions.
you should not use this method at all Int_t Int_t Double_t Double_t Double_t e
DNN::ERegularization regularization
void AddPreDefVal(const T &)
TString fTrainingStrategyString
Scalar_t Loss(const Matrix_t &Y, bool includeRegularization=true) const
Evaluate the loss function of the net using the activations that are currently stored in the output l...
TString fWeightInitializationString
std::vector< TTrainingSettings > fTrainingSettings
#define REGISTER_METHOD(CLASS)
for example
void addLayer(Layer &layer)
add a layer (layout)
Abstract ClassifierFactory template that handles arbitrary types.
std::vector< Float_t > & GetValues()
IPythonInteractive * fInteractive
virtual Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
void InitializeGradients()
Initialize the gradients in the net to zero.
XMLNodePointer_t GetChild(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
returns first child of xml node
void SetLossFunction(ELossFunction J)
virtual void AddRank(const Rank &rank)
Add a new rank take ownership of it.
MethodDNN(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption)
XMLNodePointer_t NewChild(XMLNodePointer_t parent, XMLNsPointer_t ns, const char *name, const char *content=0)
create new child element for parent node
void Prediction(Matrix_t &Y_hat, Matrix_t &X, EOutputFunction f)
Compute the neural network predictionion obtained from forwarding the batch X through the neural netw...
void SetInputWidth(size_t inputWidth)
LayoutVector_t ParseLayoutString(TString layerSpec)
Int_t Atoi() const
Return integer value of string.
EActivationFunction
Enum that represents layer activation functions.
std::vector< Float_t > * fRegressionReturnVal
Double_t Atof() const
Return floating-point value contained in string.
void AddWeightsXMLTo(void *parent) const
size_t GetBatchSize() const
static void Copy(TCudaMatrix< AFloat > &B, const TCudaMatrix< AFloat > &A)
Copy the elements of matrix A into matrix B.
virtual const std::vector< Float_t > & GetMulticlassValues()
virtual const std::vector< Float_t > & GetRegressionValues()
Layer_t & GetLayer(size_t i)
void SetBatchSize(size_t batchSize)
if(line.BeginsWith("/*"))
ELossFunction GetLossFunction() const
const char * Data() const