Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
NeuralNet.cxx
Go to the documentation of this file.
1
2
3#include "TMVA/NeuralNet.h"
4
5#include "TMVA/MethodDNN.h"
6
7namespace TMVA
8{
9 namespace DNN
10 {
11
12 std::shared_ptr<std::function<double(double)>> Gauss = std::make_shared<std::function<double(double)>> ([](double value){ const double s = 6.0; return exp (-std::pow(value*s,2.0)); });
13 std::shared_ptr<std::function<double(double)>> GaussComplement = std::make_shared<std::function<double(double)>> ([](double value){ const double s = 6.0; return 1.0 - exp (-std::pow(value*s,2.0)); });
14 std::shared_ptr<std::function<double(double)>> InvGauss = std::make_shared<std::function<double(double)>> ([](double value){ const double s = 6.0; return -2.0 * value * s*s * (*Gauss.get ()) (value); });
15 std::shared_ptr<std::function<double(double)>> InvGaussComplement = std::make_shared<std::function<double(double)>> ([](double value){ const double s = 6.0; return +2.0 * value * s*s * (*GaussComplement.get ()) (value); });
16 std::shared_ptr<std::function<double(double)>> InvLinear = std::make_shared<std::function<double(double)>> ([](double /*value*/){ return 1.0; });
17 std::shared_ptr<std::function<double(double)>> InvReLU = std::make_shared<std::function<double(double)>> ([](double value){ const double margin = 0.0; return value > margin ? 1.0 : 0; });
18 std::shared_ptr<std::function<double(double)>> InvSigmoid = std::make_shared<std::function<double(double)>> ([](double value){ double s = (*Sigmoid.get ()) (value); return s*(1.0-s); });
19 std::shared_ptr<std::function<double(double)>> InvSoftPlus = std::make_shared<std::function<double(double)>> ([](double value){ return 1.0 / (1.0 + std::exp (-value)); });
20 std::shared_ptr<std::function<double(double)>> InvSoftSign = std::make_shared<std::function<double(double)>> ([](double value){ return std::pow ((1.0 - fabs (value)),2.0); });
21 std::shared_ptr<std::function<double(double)>> InvSymmReLU = std::make_shared<std::function<double(double)>> ([](double value){ const double margin = 0.3; return value > margin ? 1.0 : value < -margin ? 1.0 : 0; });
22 std::shared_ptr<std::function<double(double)>> InvTanh = std::make_shared<std::function<double(double)>> ([](double value){ return 1.0 - std::pow (value, 2.0); });
23 std::shared_ptr<std::function<double(double)>> InvTanhShift = std::make_shared<std::function<double(double)>> ([](double value){ return 0.3 + (1.0 - std::pow (value, 2.0)); });
24 std::shared_ptr<std::function<double(double)>> Linear = std::make_shared<std::function<double(double)>> ([](double value){ return value; });
25 std::shared_ptr<std::function<double(double)>> ReLU = std::make_shared<std::function<double(double)>> ([](double value){ const double margin = 0.0; return value > margin ? value-margin : 0; });
26 std::shared_ptr<std::function<double(double)>> Sigmoid = std::make_shared<std::function<double(double)>> ([](double value){ value = std::max (-100.0, std::min (100.0,value)); return 1.0/(1.0 + std::exp (-value)); });
27 std::shared_ptr<std::function<double(double)>> SoftPlus = std::make_shared<std::function<double(double)>> ([](double value){ return std::log (1.0+ std::exp (value)); });
28 std::shared_ptr<std::function<double(double)>> ZeroFnc = std::make_shared<std::function<double(double)>> ([](double /*value*/){ return 0; });
29 std::shared_ptr<std::function<double(double)>> Tanh = std::make_shared<std::function<double(double)>> ([](double value){ return tanh (value); });
30 std::shared_ptr<std::function<double(double)>> SymmReLU = std::make_shared<std::function<double(double)>> ([](double value){ const double margin = 0.3; return value > margin ? value-margin : value < -margin ? value+margin : 0; });
31 std::shared_ptr<std::function<double(double)>> TanhShift = std::make_shared<std::function<double(double)>> ([](double value){ return tanh (value-0.3); });
32 std::shared_ptr<std::function<double(double)>> SoftSign = std::make_shared<std::function<double(double)>> ([](double value){ return value / (1.0 + fabs (value)); });
33
34
35 double gaussDouble (double mean, double sigma)
36 {
37 static std::default_random_engine generator;
38 std::normal_distribution<double> distribution (mean, sigma);
39 return distribution (generator);
40 }
41
42
43 double uniformDouble (double minValue, double maxValue)
44 {
45 static std::default_random_engine generator;
46 std::uniform_real_distribution<double> distribution(minValue, maxValue);
47 return distribution(generator);
48 }
49
50
51
52 int randomInt (int maxValue)
53 {
54 static std::default_random_engine generator;
55 std::uniform_int_distribution<int> distribution(0,maxValue-1);
56 return distribution(generator);
57 }
58
59
60 double studenttDouble (double distributionParameter)
61 {
62 static std::default_random_engine generator;
63 std::student_t_distribution<double> distribution (distributionParameter);
64 return distribution (generator);
65 }
66
67
68 LayerData::LayerData (size_t inputSize)
69 : m_hasDropOut (false)
70 , m_isInputLayer (true)
71 , m_hasWeights (false)
72 , m_hasGradients (false)
73 , m_eModeOutput (ModeOutputValues::DIRECT)
74 {
75 m_size = inputSize;
76 m_deltas.assign (m_size, 0);
77 }
78
79
80
82 : m_hasDropOut (false)
83 , m_isInputLayer (true)
84 , m_hasWeights (false)
85 , m_hasGradients (false)
86 , m_eModeOutput (eModeOutput)
87 {
88 m_itInputBegin = itInputBegin;
89 m_itInputEnd = itInputEnd;
90 m_size = std::distance (itInputBegin, itInputEnd);
91 m_deltas.assign (m_size, 0);
92 }
93
94
95
96
97 LayerData::LayerData (size_t _size,
98 const_iterator_type itWeightBegin,
99 iterator_type itGradientBegin,
100 std::shared_ptr<std::function<double(double)>> _activationFunction,
101 std::shared_ptr<std::function<double(double)>> _inverseActivationFunction,
102 ModeOutputValues eModeOutput)
103 : m_size (_size)
104 , m_hasDropOut (false)
105 , m_itConstWeightBegin (itWeightBegin)
106 , m_itGradientBegin (itGradientBegin)
107 , m_activationFunction (_activationFunction)
108 , m_inverseActivationFunction (_inverseActivationFunction)
109 , m_isInputLayer (false)
110 , m_hasWeights (true)
111 , m_hasGradients (true)
112 , m_eModeOutput (eModeOutput)
113 {
114 m_values.assign (_size, 0);
115 m_deltas.assign (_size, 0);
116 m_valueGradients.assign (_size, 0);
117 }
118
119
120
121
122 LayerData::LayerData (size_t _size, const_iterator_type itWeightBegin,
123 std::shared_ptr<std::function<double(double)>> _activationFunction,
124 ModeOutputValues eModeOutput)
125 : m_size (_size)
126 , m_hasDropOut (false)
127 , m_itConstWeightBegin (itWeightBegin)
128 , m_activationFunction (_activationFunction)
129 , m_inverseActivationFunction ()
130 , m_isInputLayer (false)
131 , m_hasWeights (true)
132 , m_hasGradients (false)
133 , m_eModeOutput (eModeOutput)
134 {
135 m_values.assign (_size, 0);
136 }
137
138
139
141 {
142 container_type probabilitiesContainer;
144 {
145 std::transform (begin (m_values), end (m_values), std::back_inserter (probabilitiesContainer), (*Sigmoid.get ()));
146 }
148 {
149 double sum = 0;
150 probabilitiesContainer = m_values;
151 std::for_each (begin (probabilitiesContainer), end (probabilitiesContainer), [&sum](double& p){ p = std::exp (p); sum += p; });
152 if (sum != 0)
153 std::for_each (begin (probabilitiesContainer), end (probabilitiesContainer), [sum ](double& p){ p /= sum; });
154 }
155 else
156 {
157 probabilitiesContainer.assign (begin (m_values), end (m_values));
158 }
159 return probabilitiesContainer;
160 }
161
162
163
164
165
166 Layer::Layer (size_t _numNodes, EnumFunction _activationFunction, ModeOutputValues eModeOutputValues)
167 : m_numNodes (_numNodes)
168 , m_eModeOutputValues (eModeOutputValues)
169 , m_activationFunctionType (_activationFunction)
170 {
171 for (size_t iNode = 0; iNode < _numNodes; ++iNode)
172 {
173 auto actFnc = Linear;
174 auto invActFnc = InvLinear;
175 switch (_activationFunction)
176 {
178 actFnc = ZeroFnc;
179 invActFnc = ZeroFnc;
180 break;
182 actFnc = Linear;
183 invActFnc = InvLinear;
184 break;
186 actFnc = Tanh;
187 invActFnc = InvTanh;
188 break;
190 actFnc = ReLU;
191 invActFnc = InvReLU;
192 break;
194 actFnc = SymmReLU;
195 invActFnc = InvSymmReLU;
196 break;
198 actFnc = TanhShift;
199 invActFnc = InvTanhShift;
200 break;
202 actFnc = SoftSign;
203 invActFnc = InvSoftSign;
204 break;
206 actFnc = Sigmoid;
207 invActFnc = InvSigmoid;
208 break;
210 actFnc = Gauss;
211 invActFnc = InvGauss;
212 break;
214 actFnc = GaussComplement;
215 invActFnc = InvGaussComplement;
216 break;
217 }
218 m_activationFunction = actFnc;
219 m_inverseActivationFunction = invActFnc;
220 }
221 }
222
223
224
225
226
227
228
229
230
231
233 size_t _convergenceSteps, size_t _batchSize, size_t _testRepetitions,
234 double _factorWeightDecay, EnumRegularization eRegularization,
235 MinimizerType _eMinimizerType, double _learningRate,
236 double _momentum, int _repetitions, bool _useMultithreading)
237 : m_timer (100, name)
238 , m_minProgress (0)
239 , m_maxProgress (100)
240 , m_convergenceSteps (_convergenceSteps)
241 , m_batchSize (_batchSize)
242 , m_testRepetitions (_testRepetitions)
243 , m_factorWeightDecay (_factorWeightDecay)
244 , count_E (0)
245 , count_dE (0)
246 , count_mb_E (0)
247 , count_mb_dE (0)
248 , m_regularization (eRegularization)
249 , fLearningRate (_learningRate)
250 , fMomentum (_momentum)
251 , fRepetitions (_repetitions)
252 , fMinimizerType (_eMinimizerType)
253 , m_convergenceCount (0)
254 , m_maxConvergenceCount (0)
255 , m_minError (1e10)
256 , m_useMultithreading (_useMultithreading)
257 , fMonitoring (NULL)
258 {
259 }
260
262 {
263 }
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278 /** \brief action to be done when the training cycle is started (e.g. update some monitoring output)
279 *
280 */
282 {
283 if (fMonitoring)
284 {
285 create ("ROC", 100, 0, 1, 100, 0, 1);
286 create ("Significance", 100, 0, 1, 100, 0, 3);
287 create ("OutputSig", 100, 0, 1);
288 create ("OutputBkg", 100, 0, 1);
289 fMonitoring->ProcessEvents ();
290 }
291 }
292
293 /** \brief action to be done when the training cycle is ended (e.g. update some monitoring output)
294 *
295 */
297 {
298 if (fMonitoring) fMonitoring->ProcessEvents ();
299 }
300
301 /** \brief action to be done after the computation of a test sample (e.g. update some monitoring output)
302 *
303 */
304 void ClassificationSettings::testSample (double /*error*/, double output, double target, double weight)
305 {
306
307 m_output.push_back (output);
308 m_targets.push_back (target);
309 m_weights.push_back (weight);
310 }
311
312
313 /** \brief action to be done when the test cycle is started (e.g. update some monitoring output)
314 *
315 */
317 {
318 m_output.clear ();
319 m_targets.clear ();
320 m_weights.clear ();
321 }
322
323 /** \brief action to be done when the training cycle is ended (e.g. update some monitoring output)
324 *
325 */
327 {
328 if (m_output.empty ())
329 return;
330 double minVal = *std::min_element (begin (m_output), end (m_output));
331 double maxVal = *std::max_element (begin (m_output), end (m_output));
332 const size_t numBinsROC = 1000;
333 const size_t numBinsData = 100;
334
335 std::vector<double> truePositives (numBinsROC+1, 0);
336 std::vector<double> falsePositives (numBinsROC+1, 0);
337 std::vector<double> trueNegatives (numBinsROC+1, 0);
338 std::vector<double> falseNegatives (numBinsROC+1, 0);
339
340 std::vector<double> x (numBinsData, 0);
341 std::vector<double> datSig (numBinsData+1, 0);
342 std::vector<double> datBkg (numBinsData+1, 0);
343
344 double binSizeROC = (maxVal - minVal)/(double)numBinsROC;
345 double binSizeData = (maxVal - minVal)/(double)numBinsData;
346
347 double sumWeightsSig = 0.0;
348 double sumWeightsBkg = 0.0;
349
350 for (size_t b = 0; b < numBinsData; ++b)
351 {
352 double binData = minVal + b*binSizeData;
353 x.at (b) = binData;
354 }
355
356 if (fabs(binSizeROC) < 0.0001)
357 return;
358
359 for (size_t i = 0, iEnd = m_output.size (); i < iEnd; ++i)
360 {
361 double val = m_output.at (i);
362 double truth = m_targets.at (i);
363 double weight = m_weights.at (i);
364
365 bool isSignal = (truth > 0.5 ? true : false);
366
367 if (m_sumOfSigWeights != 0 && m_sumOfBkgWeights != 0)
368 {
369 if (isSignal)
370 weight *= m_sumOfSigWeights;
371 else
372 weight *= m_sumOfBkgWeights;
373 }
374
375 size_t binROC = (val-minVal)/binSizeROC;
376 size_t binData = (val-minVal)/binSizeData;
377
378 if (isSignal)
379 {
380 for (size_t n = 0; n <= binROC; ++n)
381 {
382 truePositives.at (n) += weight;
383 }
384 for (size_t n = binROC+1; n < numBinsROC; ++n)
385 {
386 falseNegatives.at (n) += weight;
387 }
388
389 datSig.at (binData) += weight;
390 sumWeightsSig += weight;
391 }
392 else
393 {
394 for (size_t n = 0; n <= binROC; ++n)
395 {
396 falsePositives.at (n) += weight;
397 }
398 for (size_t n = binROC+1; n < numBinsROC; ++n)
399 {
400 trueNegatives.at (n) += weight;
401 }
402
403 datBkg.at (binData) += weight;
404 sumWeightsBkg += weight;
405 }
406 }
407
408 std::vector<double> sigEff;
409 std::vector<double> backRej;
410
411 double bestSignificance = 0;
412 double bestCutSignificance = 0;
413
414 double numEventsScaleFactor = 1.0;
415 if (m_scaleToNumEvents > 0)
416 {
417 size_t numEvents = m_output.size ();
418 numEventsScaleFactor = double (m_scaleToNumEvents)/double (numEvents);
419 }
420
421 clear ("ROC");
422 clear ("Significance");
423
424 for (size_t i = 0; i < numBinsROC; ++i)
425 {
426 double tp = truePositives.at (i) * numEventsScaleFactor;
427 double fp = falsePositives.at (i) * numEventsScaleFactor;
428 double tn = trueNegatives.at (i) * numEventsScaleFactor;
429 double fn = falseNegatives.at (i) * numEventsScaleFactor;
430
431 double seff = (tp+fn == 0.0 ? 1.0 : (tp / (tp+fn)));
432 double brej = (tn+fp == 0.0 ? 0.0 : (tn / (tn+fp)));
433
434 sigEff.push_back (seff);
435 backRej.push_back (brej);
436
437 // m_histROC->Fill (seff, brej);
438 addPoint ("ROC", seff, brej); // x, y
439
440
441 double currentCut = (i * binSizeROC)+minVal;
442
443 double sig = tp;
444 double bkg = fp;
445 double significance = sig / sqrt (sig + bkg);
446 if (significance > bestSignificance)
447 {
448 bestSignificance = significance;
449 bestCutSignificance = currentCut;
450 }
451
452 addPoint ("Significance", currentCut, significance);
453 // m_histSignificance->Fill (currentCut, significance);
454 }
455
456 m_significances.push_back (bestSignificance);
457 static size_t testCycle = 0;
458
459 clear ("OutputSig");
460 clear ("OutputBkg");
461 for (size_t i = 0; i < numBinsData; ++i)
462 {
463 addPoint ("OutputSig", x.at (i), datSig.at (i)/sumWeightsSig);
464 addPoint ("OutputBkg", x.at (i), datBkg.at (i)/sumWeightsBkg);
465 // m_histOutputSignal->Fill (x.at (i), datSig.at (1)/sumWeightsSig);
466 // m_histOutputBackground->Fill (x.at (i), datBkg.at (1)/sumWeightsBkg);
467 }
468
469
470 ++testCycle;
471
472 if (fMonitoring)
473 {
474 plot ("ROC", "", 2, kRed);
475 plot ("Significance", "", 3, kRed);
476 plot ("OutputSig", "", 4, kRed);
477 plot ("OutputBkg", "same", 4, kBlue);
478 fMonitoring->ProcessEvents ();
479 }
480
481 m_cutValue = bestCutSignificance;
482 }
483
484
485 /** \brief check for convergence
486 *
487 */
488 bool Settings::hasConverged (double testError)
489 {
490 // std::cout << "check convergence; minError " << m_minError << " current " << testError
491 // << " current convergence count " << m_convergenceCount << std::endl;
492 if (testError < m_minError*0.999)
493 {
495 m_minError = testError;
496 }
497 else
498 {
501 }
502
503
504 if (m_convergenceCount >= convergenceSteps () || testError <= 0)
505 return true;
506
507 return false;
508 }
509
510
511
512 /** \brief set the weight sums to be scaled to (preparations for monitoring output)
513 *
514 */
515 void ClassificationSettings::setWeightSums (double sumOfSigWeights, double sumOfBkgWeights)
516 {
517 m_sumOfSigWeights = sumOfSigWeights; m_sumOfBkgWeights = sumOfBkgWeights;
518 }
519
520 /** \brief preparation for monitoring output
521 *
522 */
524 std::string _fileNameNetConfig,
525 std::string _fileNameResult,
526 std::vector<Pattern>* _resultPatternContainer)
527 {
528 m_pResultPatternContainer = _resultPatternContainer;
529 m_fileNameResult = _fileNameResult;
530 m_fileNameNetConfig = _fileNameNetConfig;
531 }
532
533
534
535
536
537
538
539
540 /** \brief compute the number of weights given the size of the input layer
541 *
542 */
543 size_t Net::numWeights (size_t trainingStartLayer) const
544 {
545 size_t num (0);
546 size_t index (0);
547 size_t prevNodes (inputSize ());
548 for (auto& layer : m_layers)
549 {
550 if (index >= trainingStartLayer)
551 num += layer.numWeights (prevNodes);
552 prevNodes = layer.numNodes ();
553 ++index;
554 }
555 return num;
556 }
557
558
559 size_t Net::numNodes (size_t trainingStartLayer) const
560 {
561 size_t num (0);
562 size_t index (0);
563 for (auto& layer : m_layers)
564 {
565 if (index >= trainingStartLayer)
566 num += layer.numNodes ();
567 ++index;
568 }
569 return num;
570 }
571
572 /** \brief prepare the drop-out container given the provided drop-fractions
573 *
574 */
575 void Net::fillDropContainer (DropContainer& dropContainer, double dropFraction, size_t _numNodes) const
576 {
577 size_t numDrops = dropFraction * _numNodes;
578 if (numDrops >= _numNodes) // maintain at least one node
579 numDrops = _numNodes - 1;
580 // add the markers for the nodes which are enabled
581 dropContainer.insert (end (dropContainer), _numNodes-numDrops, true);
582 // add the markers for the disabled nodes
583 dropContainer.insert (end (dropContainer), numDrops, false);
584 // shuffle enabled and disabled markers
585 std::shuffle(end(dropContainer)-_numNodes, end(dropContainer), std::default_random_engine{});
586 }
587
588 }; // namespace DNN
589}; // namespace TMVA
590
double
#define b(i)
Definition RSha256.hxx:100
@ kRed
Definition Rtypes.h:66
@ kBlue
Definition Rtypes.h:66
char name[80]
Definition TGX11.cxx:110
void startTrainCycle()
action to be done when the training cycle is started (e.g.
void endTrainCycle(double)
action to be done when the training cycle is ended (e.g.
virtual void endTestCycle()
action to be done when the training cycle is ended (e.g.
std::vector< Pattern > * m_pResultPatternContainer
Definition NeuralNet.h:1009
void setResultComputation(std::string _fileNameNetConfig, std::string _fileNameResult, std::vector< Pattern > *_resultPatternContainer)
preparation for monitoring output
std::vector< double > m_significances
Definition NeuralNet.h:1001
std::vector< double > m_weights
Definition NeuralNet.h:998
std::vector< double > m_targets
Definition NeuralNet.h:997
void testSample(double error, double output, double target, double weight)
action to be done after the computation of a test sample (e.g.
virtual void startTestCycle()
action to be done when the test cycle is started (e.g.
void setWeightSums(double sumOfSigWeights, double sumOfBkgWeights)
set the weight sums to be scaled to (preparations for monitoring output)
std::vector< double > m_output
Definition NeuralNet.h:996
const_iterator_type m_itInputBegin
iterator to the first of the nodes in the input node vector
Definition NeuralNet.h:639
std::vector< double > m_deltas
stores the deltas for the DNN training
Definition NeuralNet.h:642
container_type::iterator iterator_type
Definition NeuralNet.h:441
LayerData(const_iterator_type itInputBegin, const_iterator_type itInputEnd, ModeOutputValues eModeOutput=ModeOutputValues::DIRECT)
c'tor of LayerData
Definition NeuralNet.cxx:81
std::vector< double > container_type
Definition NeuralNet.h:439
std::vector< double > m_values
stores the values of the nodes in this layer
Definition NeuralNet.h:644
const_iterator_type m_itInputEnd
iterator to the end of the nodes in the input node vector
Definition NeuralNet.h:640
container_type::const_iterator const_iterator_type
Definition NeuralNet.h:442
std::vector< double > m_valueGradients
stores the gradients of the values (nodes)
Definition NeuralNet.h:643
container_type computeProbabilities() const
compute the probabilities from the node values
ModeOutputValues m_eModeOutput
stores the output mode (DIRECT, SIGMOID, SOFTMAX)
Definition NeuralNet.h:658
std::shared_ptr< std::function< double(double)> > m_activationFunction
stores the activation function
Definition NeuralNet.h:696
std::shared_ptr< std::function< double(double)> > m_inverseActivationFunction
stores the inverse activation function
Definition NeuralNet.h:697
Layer(size_t numNodes, EnumFunction activationFunction, ModeOutputValues eModeOutputValues=ModeOutputValues::DIRECT)
c'tor for defining a Layer
std::vector< Layer > m_layers
layer-structure-data
Definition NeuralNet.h:1272
size_t inputSize() const
input size of the DNN
Definition NeuralNet.h:1098
size_t numNodes(size_t trainingStartLayer=0) const
returns the number of nodes in this net
void fillDropContainer(DropContainer &dropContainer, double dropFraction, size_t numNodes) const
prepare the drop-out-container (select the nodes which are to be dropped out)
size_t numWeights(size_t trainingStartLayer=0) const
returns the number of weights in this net
void clear(std::string histoName)
for monitoring
Definition NeuralNet.h:824
virtual bool hasConverged(double testError)
has this training converged already?
Settings(TString name, size_t _convergenceSteps=15, size_t _batchSize=10, size_t _testRepetitions=7, double _factorWeightDecay=1e-5, TMVA::DNN::EnumRegularization _regularization=TMVA::DNN::EnumRegularization::NONE, MinimizerType _eMinimizerType=MinimizerType::fSteepest, double _learningRate=1e-5, double _momentum=0.3, int _repetitions=3, bool _multithreading=true)
c'tor
void addPoint(std::string histoName, double x)
for monitoring
Definition NeuralNet.h:821
virtual ~Settings()
d'tor
size_t m_convergenceCount
Definition NeuralNet.h:857
void plot(std::string histoName, std::string options, int pad, EColor color)
for monitoring
Definition NeuralNet.h:823
size_t convergenceSteps() const
how many steps until training is deemed to have converged
Definition NeuralNet.h:766
std::shared_ptr< Monitoring > fMonitoring
Definition NeuralNet.h:865
void create(std::string histoName, int bins, double min, double max)
for monitoring
Definition NeuralNet.h:819
size_t m_maxConvergenceCount
Definition NeuralNet.h:858
Basic string class.
Definition TString.h:136
const Double_t sigma
Double_t x[n]
Definition legend1.C:17
const Int_t n
Definition legend1.C:16
std::shared_ptr< std::function< double(double)> > InvGauss
Definition NeuralNet.cxx:14
double uniformDouble(double minValue, double maxValue)
Definition NeuralNet.cxx:43
std::shared_ptr< std::function< double(double)> > SymmReLU
Definition NeuralNet.cxx:30
std::shared_ptr< std::function< double(double)> > TanhShift
Definition NeuralNet.cxx:31
std::shared_ptr< std::function< double(double)> > Tanh
Definition NeuralNet.cxx:29
std::shared_ptr< std::function< double(double)> > InvSigmoid
Definition NeuralNet.cxx:18
std::shared_ptr< std::function< double(double)> > SoftPlus
Definition NeuralNet.cxx:27
std::shared_ptr< std::function< double(double)> > ZeroFnc
Definition NeuralNet.cxx:28
std::shared_ptr< std::function< double(double)> > InvSoftSign
Definition NeuralNet.cxx:20
double studenttDouble(double distributionParameter)
Definition NeuralNet.cxx:60
std::shared_ptr< std::function< double(double)> > InvGaussComplement
Definition NeuralNet.cxx:15
std::shared_ptr< std::function< double(double)> > InvTanh
Definition NeuralNet.cxx:22
std::shared_ptr< std::function< double(double)> > Linear
Definition NeuralNet.cxx:24
std::shared_ptr< std::function< double(double)> > InvReLU
Definition NeuralNet.cxx:17
std::shared_ptr< std::function< double(double)> > GaussComplement
Definition NeuralNet.cxx:13
std::shared_ptr< std::function< double(double)> > Gauss
Definition NeuralNet.cxx:12
MinimizerType
< list all the minimizer types
Definition NeuralNet.h:321
std::shared_ptr< std::function< double(double)> > Sigmoid
Definition NeuralNet.cxx:26
double gaussDouble(double mean, double sigma)
Definition NeuralNet.cxx:35
std::shared_ptr< std::function< double(double)> > SoftSign
Definition NeuralNet.cxx:32
std::shared_ptr< std::function< double(double)> > InvSoftPlus
Definition NeuralNet.cxx:19
std::shared_ptr< std::function< double(double)> > ReLU
Definition NeuralNet.cxx:25
bool isFlagSet(T flag, T value)
Definition NeuralNet.h:212
int randomInt(int maxValue)
Definition NeuralNet.cxx:52
std::shared_ptr< std::function< double(double)> > InvTanhShift
Definition NeuralNet.cxx:23
std::vector< char > DropContainer
Definition NeuralNet.h:227
std::shared_ptr< std::function< double(double)> > InvSymmReLU
Definition NeuralNet.cxx:21
std::shared_ptr< std::function< double(double)> > InvLinear
Definition NeuralNet.cxx:16
create variable transformations
static uint64_t sum(uint64_t i)
Definition Factory.cxx:2345
static void output(int code)
Definition gifencode.c:226