Logo ROOT   6.08/07
Reference Guide
NeuralNet.cxx
Go to the documentation of this file.
1 
2 
3 #include "TMVA/NeuralNet.h"
4 
5 
6 namespace TMVA
7 {
8  namespace DNN
9  {
10 
11 
12 
13  double gaussDouble (double mean, double sigma)
14  {
15  static std::default_random_engine generator;
16  std::normal_distribution<double> distribution (mean, sigma);
17  return distribution (generator);
18  }
19 
20 
21  double uniformDouble (double minValue, double maxValue)
22  {
23  static std::default_random_engine generator;
24  std::uniform_real_distribution<double> distribution(minValue, maxValue);
25  return distribution(generator);
26  }
27 
28 
29 
30  int randomInt (int maxValue)
31  {
32  static std::default_random_engine generator;
33  std::uniform_int_distribution<int> distribution(0,maxValue-1);
34  return distribution(generator);
35  }
36 
37 
38  double studenttDouble (double distributionParameter)
39  {
40  static std::default_random_engine generator;
41  std::student_t_distribution<double> distribution (distributionParameter);
42  return distribution (generator);
43  }
44 
45 
46  LayerData::LayerData (size_t inputSize)
47  : m_hasDropOut (false)
48  , m_isInputLayer (true)
49  , m_hasWeights (false)
50  , m_hasGradients (false)
51  , m_eModeOutput (ModeOutputValues::DIRECT)
52  {
53  m_size = inputSize;
54  m_deltas.assign (m_size, 0);
55  }
56 
57 
58 
60  : m_hasDropOut (false)
61  , m_isInputLayer (true)
62  , m_hasWeights (false)
63  , m_hasGradients (false)
64  , m_eModeOutput (eModeOutput)
65  {
66  m_itInputBegin = itInputBegin;
67  m_itInputEnd = itInputEnd;
68  m_size = std::distance (itInputBegin, itInputEnd);
69  m_deltas.assign (m_size, 0);
70  }
71 
72 
73 
74 
75  LayerData::LayerData (size_t _size,
76  const_iterator_type itWeightBegin,
77  iterator_type itGradientBegin,
78  std::shared_ptr<std::function<double(double)>> _activationFunction,
79  std::shared_ptr<std::function<double(double)>> _inverseActivationFunction,
80  ModeOutputValues eModeOutput)
81  : m_size (_size)
82  , m_hasDropOut (false)
83  , m_itConstWeightBegin (itWeightBegin)
84  , m_itGradientBegin (itGradientBegin)
85  , m_activationFunction (_activationFunction)
86  , m_inverseActivationFunction (_inverseActivationFunction)
87  , m_isInputLayer (false)
88  , m_hasWeights (true)
89  , m_hasGradients (true)
90  , m_eModeOutput (eModeOutput)
91  {
92  m_values.assign (_size, 0);
93  m_deltas.assign (_size, 0);
94  m_valueGradients.assign (_size, 0);
95  }
96 
97 
98 
99 
100  LayerData::LayerData (size_t _size, const_iterator_type itWeightBegin,
101  std::shared_ptr<std::function<double(double)>> _activationFunction,
102  ModeOutputValues eModeOutput)
103  : m_size (_size)
104  , m_hasDropOut (false)
105  , m_itConstWeightBegin (itWeightBegin)
106  , m_activationFunction (_activationFunction)
108  , m_isInputLayer (false)
109  , m_hasWeights (true)
110  , m_hasGradients (false)
111  , m_eModeOutput (eModeOutput)
112  {
113  m_values.assign (_size, 0);
114  }
115 
116 
117 
119  {
120  container_type probabilitiesContainer;
122  {
123  std::transform (begin (m_values), end (m_values), std::back_inserter (probabilitiesContainer), (*Sigmoid.get ()));
124  }
126  {
127  double sum = 0;
128  probabilitiesContainer = m_values;
129  std::for_each (begin (probabilitiesContainer), end (probabilitiesContainer), [&sum](double& p){ p = std::exp (p); sum += p; });
130  if (sum != 0)
131  std::for_each (begin (probabilitiesContainer), end (probabilitiesContainer), [sum ](double& p){ p /= sum; });
132  }
133  else
134  {
135  probabilitiesContainer.assign (begin (m_values), end (m_values));
136  }
137  return probabilitiesContainer;
138  }
139 
140 
141 
142 
143 
144  Layer::Layer (size_t _numNodes, EnumFunction _activationFunction, ModeOutputValues eModeOutputValues)
145  : m_numNodes (_numNodes)
146  , m_eModeOutputValues (eModeOutputValues)
147  , m_activationFunctionType (_activationFunction)
148  {
149  for (size_t iNode = 0; iNode < _numNodes; ++iNode)
150  {
151  auto actFnc = Linear;
152  auto invActFnc = InvLinear;
153  switch (_activationFunction)
154  {
155  case EnumFunction::ZERO:
156  actFnc = ZeroFnc;
157  invActFnc = ZeroFnc;
158  break;
160  actFnc = Linear;
161  invActFnc = InvLinear;
162  break;
163  case EnumFunction::TANH:
164  actFnc = Tanh;
165  invActFnc = InvTanh;
166  break;
167  case EnumFunction::RELU:
168  actFnc = ReLU;
169  invActFnc = InvReLU;
170  break;
172  actFnc = SymmReLU;
173  invActFnc = InvSymmReLU;
174  break;
176  actFnc = TanhShift;
177  invActFnc = InvTanhShift;
178  break;
180  actFnc = SoftSign;
181  invActFnc = InvSoftSign;
182  break;
184  actFnc = Sigmoid;
185  invActFnc = InvSigmoid;
186  break;
187  case EnumFunction::GAUSS:
188  actFnc = Gauss;
189  invActFnc = InvGauss;
190  break;
192  actFnc = GaussComplement;
193  invActFnc = InvGaussComplement;
194  break;
195  }
196  m_activationFunction = actFnc;
197  m_inverseActivationFunction = invActFnc;
198  }
199  }
200 
201 
202 
203 
204 
205 
206 
207 
208 
209 
211  size_t _convergenceSteps, size_t _batchSize, size_t _testRepetitions,
212  double _factorWeightDecay, EnumRegularization eRegularization,
213  MinimizerType _eMinimizerType, double _learningRate,
214  double _momentum, int _repetitions, bool _useMultithreading)
215  : m_timer (100, name)
216  , m_minProgress (0)
217  , m_maxProgress (100)
218  , m_convergenceSteps (_convergenceSteps)
219  , m_batchSize (_batchSize)
220  , m_testRepetitions (_testRepetitions)
221  , m_factorWeightDecay (_factorWeightDecay)
222  , count_E (0)
223  , count_dE (0)
224  , count_mb_E (0)
225  , count_mb_dE (0)
226  , m_regularization (eRegularization)
227  , fLearningRate (_learningRate)
228  , fMomentum (_momentum)
229  , fRepetitions (_repetitions)
230  , fMinimizerType (_eMinimizerType)
231  , m_convergenceCount (0)
232  , m_maxConvergenceCount (0)
233  , m_minError (1e10)
234  , m_useMultithreading (_useMultithreading)
235  , fMonitoring (NULL)
236  {
237  }
238 
240  {
241  }
242 
243 
244 
245 
246 
247 
248 
249 
250 
251 
252 
253 
254 
255 
256  /** \brief action to be done when the training cycle is started (e.g. update some monitoring output)
257  *
258  */
260  {
261  if (fMonitoring)
262  {
263  create ("ROC", 100, 0, 1, 100, 0, 1);
264  create ("Significance", 100, 0, 1, 100, 0, 3);
265  create ("OutputSig", 100, 0, 1);
266  create ("OutputBkg", 100, 0, 1);
267  fMonitoring->ProcessEvents ();
268  }
269  }
270 
271  /** \brief action to be done when the training cycle is ended (e.g. update some monitoring output)
272  *
273  */
274  void ClassificationSettings::endTrainCycle (double /*error*/)
275  {
276  if (fMonitoring) fMonitoring->ProcessEvents ();
277  }
278 
279  /** \brief action to be done after the computation of a test sample (e.g. update some monitoring output)
280  *
281  */
282  void ClassificationSettings::testSample (double /*error*/, double output, double target, double weight)
283  {
284 
285  m_output.push_back (output);
286  m_targets.push_back (target);
287  m_weights.push_back (weight);
288  }
289 
290 
291  /** \brief action to be done when the test cycle is started (e.g. update some monitoring output)
292  *
293  */
295  {
296  m_output.clear ();
297  m_targets.clear ();
298  m_weights.clear ();
299  }
300 
301  /** \brief action to be done when the training cycle is ended (e.g. update some monitoring output)
302  *
303  */
305  {
306  if (m_output.empty ())
307  return;
308  double minVal = *std::min_element (begin (m_output), end (m_output));
309  double maxVal = *std::max_element (begin (m_output), end (m_output));
310  const size_t numBinsROC = 1000;
311  const size_t numBinsData = 100;
312 
313  std::vector<double> truePositives (numBinsROC+1, 0);
314  std::vector<double> falsePositives (numBinsROC+1, 0);
315  std::vector<double> trueNegatives (numBinsROC+1, 0);
316  std::vector<double> falseNegatives (numBinsROC+1, 0);
317 
318  std::vector<double> x (numBinsData, 0);
319  std::vector<double> datSig (numBinsData+1, 0);
320  std::vector<double> datBkg (numBinsData+1, 0);
321 
322  double binSizeROC = (maxVal - minVal)/(double)numBinsROC;
323  double binSizeData = (maxVal - minVal)/(double)numBinsData;
324 
325  double sumWeightsSig = 0.0;
326  double sumWeightsBkg = 0.0;
327 
328  for (size_t b = 0; b < numBinsData; ++b)
329  {
330  double binData = minVal + b*binSizeData;
331  x.at (b) = binData;
332  }
333 
334  if (fabs(binSizeROC) < 0.0001)
335  return;
336 
337  for (size_t i = 0, iEnd = m_output.size (); i < iEnd; ++i)
338  {
339  double val = m_output.at (i);
340  double truth = m_targets.at (i);
341  double weight = m_weights.at (i);
342 
343  bool isSignal = (truth > 0.5 ? true : false);
344 
345  if (m_sumOfSigWeights != 0 && m_sumOfBkgWeights != 0)
346  {
347  if (isSignal)
348  weight *= m_sumOfSigWeights;
349  else
350  weight *= m_sumOfBkgWeights;
351  }
352 
353  size_t binROC = (val-minVal)/binSizeROC;
354  size_t binData = (val-minVal)/binSizeData;
355 
356  if (isSignal)
357  {
358  for (size_t n = 0; n <= binROC; ++n)
359  {
360  truePositives.at (n) += weight;
361  }
362  for (size_t n = binROC+1; n < numBinsROC; ++n)
363  {
364  falseNegatives.at (n) += weight;
365  }
366 
367  datSig.at (binData) += weight;
368  sumWeightsSig += weight;
369  }
370  else
371  {
372  for (size_t n = 0; n <= binROC; ++n)
373  {
374  falsePositives.at (n) += weight;
375  }
376  for (size_t n = binROC+1; n < numBinsROC; ++n)
377  {
378  trueNegatives.at (n) += weight;
379  }
380 
381  datBkg.at (binData) += weight;
382  sumWeightsBkg += weight;
383  }
384  }
385 
386  std::vector<double> sigEff;
387  std::vector<double> backRej;
388 
389  double bestSignificance = 0;
390  double bestCutSignificance = 0;
391 
392  double numEventsScaleFactor = 1.0;
393  if (m_scaleToNumEvents > 0)
394  {
395  size_t numEvents = m_output.size ();
396  numEventsScaleFactor = double (m_scaleToNumEvents)/double (numEvents);
397  }
398 
399  clear ("ROC");
400  clear ("Significance");
401 
402  for (size_t i = 0; i < numBinsROC; ++i)
403  {
404  double tp = truePositives.at (i) * numEventsScaleFactor;
405  double fp = falsePositives.at (i) * numEventsScaleFactor;
406  double tn = trueNegatives.at (i) * numEventsScaleFactor;
407  double fn = falseNegatives.at (i) * numEventsScaleFactor;
408 
409  double seff = (tp+fn == 0.0 ? 1.0 : (tp / (tp+fn)));
410  double brej = (tn+fp == 0.0 ? 0.0 : (tn / (tn+fp)));
411 
412  sigEff.push_back (seff);
413  backRej.push_back (brej);
414 
415  // m_histROC->Fill (seff, brej);
416  addPoint ("ROC", seff, brej); // x, y
417 
418 
419  double currentCut = (i * binSizeROC)+minVal;
420 
421  double sig = tp;
422  double bkg = fp;
423  double significance = sig / sqrt (sig + bkg);
424  if (significance > bestSignificance)
425  {
426  bestSignificance = significance;
427  bestCutSignificance = currentCut;
428  }
429 
430  addPoint ("Significance", currentCut, significance);
431  // m_histSignificance->Fill (currentCut, significance);
432  }
433 
434  m_significances.push_back (bestSignificance);
435  static size_t testCycle = 0;
436 
437  clear ("OutputSig");
438  clear ("OutputBkg");
439  for (size_t i = 0; i < numBinsData; ++i)
440  {
441  addPoint ("OutputSig", x.at (i), datSig.at (i)/sumWeightsSig);
442  addPoint ("OutputBkg", x.at (i), datBkg.at (i)/sumWeightsBkg);
443  // m_histOutputSignal->Fill (x.at (i), datSig.at (1)/sumWeightsSig);
444  // m_histOutputBackground->Fill (x.at (i), datBkg.at (1)/sumWeightsBkg);
445  }
446 
447 
448  ++testCycle;
449 
450  if (fMonitoring)
451  {
452  plot ("ROC", "", 2, kRed);
453  plot ("Significance", "", 3, kRed);
454  plot ("OutputSig", "", 4, kRed);
455  plot ("OutputBkg", "same", 4, kBlue);
456  fMonitoring->ProcessEvents ();
457  }
458 
459  m_cutValue = bestCutSignificance;
460  }
461 
462 
463  /** \brief check for convergence
464  *
465  */
466  bool Settings::hasConverged (double testError)
467  {
468  // std::cout << "check convergence; minError " << m_minError << " current " << testError
469  // << " current convergence count " << m_convergenceCount << std::endl;
470  if (testError < m_minError*0.999)
471  {
472  m_convergenceCount = 0;
473  m_minError = testError;
474  }
475  else
476  {
479  }
480 
481 
482  if (m_convergenceCount >= convergenceSteps () || testError <= 0)
483  return true;
484 
485  return false;
486  }
487 
488 
489 
490  /** \brief set the weight sums to be scaled to (preparations for monitoring output)
491  *
492  */
493  void ClassificationSettings::setWeightSums (double sumOfSigWeights, double sumOfBkgWeights)
494  {
495  m_sumOfSigWeights = sumOfSigWeights; m_sumOfBkgWeights = sumOfBkgWeights;
496  }
497 
498  /** \brief preparation for monitoring output
499  *
500  */
502  std::string _fileNameNetConfig,
503  std::string _fileNameResult,
504  std::vector<Pattern>* _resultPatternContainer)
505  {
506  m_pResultPatternContainer = _resultPatternContainer;
507  m_fileNameResult = _fileNameResult;
508  m_fileNameNetConfig = _fileNameNetConfig;
509  }
510 
511 
512 
513 
514 
515 
516 
517 
518  /** \brief compute the number of weights given the size of the input layer
519  *
520  */
521  size_t Net::numWeights (size_t trainingStartLayer) const
522  {
523  size_t num (0);
524  size_t index (0);
525  size_t prevNodes (inputSize ());
526  for (auto& layer : m_layers)
527  {
528  if (index >= trainingStartLayer)
529  num += layer.numWeights (prevNodes);
530  prevNodes = layer.numNodes ();
531  ++index;
532  }
533  return num;
534  }
535 
536 
537  size_t Net::numNodes (size_t trainingStartLayer) const
538  {
539  size_t num (0);
540  size_t index (0);
541  for (auto& layer : m_layers)
542  {
543  if (index >= trainingStartLayer)
544  num += layer.numNodes ();
545  ++index;
546  }
547  return num;
548  }
549 
550  /** \brief prepare the drop-out container given the provided drop-fractions
551  *
552  */
553  void Net::fillDropContainer (DropContainer& dropContainer, double dropFraction, size_t _numNodes) const
554  {
555  size_t numDrops = dropFraction * _numNodes;
556  if (numDrops >= _numNodes) // maintain at least one node
557  numDrops = _numNodes - 1;
558  dropContainer.insert (end (dropContainer), _numNodes-numDrops, true); // add the markers for the nodes which are enabled
559  dropContainer.insert (end (dropContainer), numDrops, false); // add the markers for the disabled nodes
560  // shuffle
561  std::random_shuffle (end (dropContainer)-_numNodes, end (dropContainer)); // shuffle enabled and disabled markers
562  }
563 
564 
565 
566 
567 
568 
569 
570 
571  }; // namespace DNN
572 }; // namespace TMVA
573 
void addPoint(std::string histoName, double x)
for monitoring
Definition: NeuralNet.h:828
void setWeightSums(double sumOfSigWeights, double sumOfBkgWeights)
set the weight sums to be scaled to (preparations for monitoring output)
Definition: NeuralNet.cxx:493
static std::shared_ptr< std::function< double(double)> > InvGauss
Definition: NeuralNet.icc:72
static long int sum(long int i)
Definition: Factory.cxx:1786
MinimizerType
< list all the minimizer types
Definition: NeuralNet.h:321
static std::shared_ptr< std::function< double(double)> > Tanh
Definition: NeuralNet.icc:50
std::vector< char > DropContainer
Definition: NeuralNet.h:220
static std::shared_ptr< std::function< double(double)> > InvReLU
Definition: NeuralNet.icc:60
std::shared_ptr< std::function< double(double)> > m_inverseActivationFunction
stores the inverse activation function
Definition: NeuralNet.h:704
Definition: Rtypes.h:61
bool isFlagSet(T flag, T value)
Definition: NeuralNet.h:213
static std::shared_ptr< std::function< double(double)> > InvTanh
Definition: NeuralNet.icc:51
size_t convergenceSteps() const
how many steps until training is deemed to have converged
Definition: NeuralNet.h:773
std::shared_ptr< Monitoring > fMonitoring
Definition: NeuralNet.h:872
Basic string class.
Definition: TString.h:137
void plot(std::string histoName, std::string options, int pad, EColor color)
for monitoring
Definition: NeuralNet.h:830
iterator_type m_itGradientBegin
iterator to the first gradient of this layer in the gradient vector
Definition: NeuralNet.h:653
bool m_hasGradients
does this layer have gradients (only if in training mode)
Definition: NeuralNet.h:660
ModeOutputValues m_eModeOutput
stores the output mode (DIRECT, SIGMOID, SOFTMAX)
Definition: NeuralNet.h:662
static std::shared_ptr< std::function< double(double)> > InvSoftSign
Definition: NeuralNet.icc:69
bool m_isInputLayer
is this layer an input layer
Definition: NeuralNet.h:658
static std::shared_ptr< std::function< double(double)> > TanhShift
Definition: NeuralNet.icc:65
Settings(TString name, size_t _convergenceSteps=15, size_t _batchSize=10, size_t _testRepetitions=7, double _factorWeightDecay=1e-5, TMVA::DNN::EnumRegularization _regularization=TMVA::DNN::EnumRegularization::NONE, MinimizerType _eMinimizerType=MinimizerType::fSteepest, double _learningRate=1e-5, double _momentum=0.3, int _repetitions=3, bool _multithreading=true)
c&#39;tor
Definition: NeuralNet.cxx:210
std::vector< double > m_valueGradients
stores the gradients of the values (nodes)
Definition: NeuralNet.h:647
static std::shared_ptr< std::function< double(double)> > Sigmoid
Definition: NeuralNet.icc:47
double sqrt(double)
container_type::const_iterator const_iterator_type
Definition: NeuralNet.h:443
Double_t x[n]
Definition: legend1.C:17
const_iterator_type m_itConstWeightBegin
const iterator to the first weight of this layer in the weight vector
Definition: NeuralNet.h:652
const_iterator_type m_itInputBegin
iterator to the first of the nodes in the input node vector
Definition: NeuralNet.h:643
bool m_hasDropOut
dropOut is turned on?
Definition: NeuralNet.h:650
static std::shared_ptr< std::function< double(double)> > SymmReLU
Definition: NeuralNet.icc:56
void startTrainCycle()
action to be done when the training cycle is started (e.g.
Definition: NeuralNet.cxx:259
const Double_t sigma
void create(std::string histoName, int bins, double min, double max)
for monitoring
Definition: NeuralNet.h:826
double studenttDouble(double distributionParameter)
Definition: NeuralNet.cxx:38
LayerData(const_iterator_type itInputBegin, const_iterator_type itInputEnd, ModeOutputValues eModeOutput=ModeOutputValues::DIRECT)
c&#39;tor of LayerData
Definition: NeuralNet.cxx:59
virtual ~Settings()
d&#39;tor
Definition: NeuralNet.cxx:239
std::vector< double > m_deltas
stores the deltas for the DNN training
Definition: NeuralNet.h:646
VecExpr< UnaryOp< Fabs< T >, VecExpr< A, T, D >, T >, T, D > fabs(const VecExpr< A, T, D > &rhs)
Layer(size_t numNodes, EnumFunction activationFunction, ModeOutputValues eModeOutputValues=ModeOutputValues::DIRECT)
c&#39;tor for defining a Layer
Definition: NeuralNet.cxx:144
container_type::iterator iterator_type
Definition: NeuralNet.h:442
container_type computeProbabilities() const
compute the probabilities from the node values
Definition: NeuralNet.cxx:118
size_t m_convergenceCount
Definition: NeuralNet.h:864
std::vector< double > container_type
Definition: NeuralNet.h:440
void endTrainCycle(double)
action to be done when the training cycle is ended (e.g.
Definition: NeuralNet.cxx:274
void fillDropContainer(DropContainer &dropContainer, double dropFraction, size_t numNodes) const
prepare the drop-out-container (select the nodes which are to be dropped out)
Definition: NeuralNet.cxx:553
virtual void endTestCycle()
action to be done when the training cycle is ended (e.g.
Definition: NeuralNet.cxx:304
static std::shared_ptr< std::function< double(double)> > SoftSign
Definition: NeuralNet.icc:68
static std::shared_ptr< std::function< double(double)> > ReLU
Definition: NeuralNet.icc:59
static std::shared_ptr< std::function< double(double)> > InvSigmoid
Definition: NeuralNet.icc:48
size_t numWeights(size_t trainingStartLayer=0) const
returns the number of weights in this net
Definition: NeuralNet.cxx:521
bool m_hasWeights
does this layer have weights (it does not if it is the input layer)
Definition: NeuralNet.h:659
static std::shared_ptr< std::function< double(double)> > GaussComplement
Definition: NeuralNet.icc:74
ModeOutputValues
Definition: NeuralNet.h:179
double gaussDouble(double mean, double sigma)
Definition: NeuralNet.cxx:13
static std::shared_ptr< std::function< double(double)> > Gauss
Definition: NeuralNet.icc:71
size_t numNodes(size_t trainingStartLayer=0) const
returns the number of nodes in this net
Definition: NeuralNet.cxx:537
double uniformDouble(double minValue, double maxValue)
Definition: NeuralNet.cxx:21
std::vector< double > m_values
stores the values of the nodes in this layer
Definition: NeuralNet.h:648
void setResultComputation(std::string _fileNameNetConfig, std::string _fileNameResult, std::vector< Pattern > *_resultPatternContainer)
preparation for monitoring output
Definition: NeuralNet.cxx:501
size_t m_maxConvergenceCount
Definition: NeuralNet.h:865
static std::shared_ptr< std::function< double(double)> > Linear
Definition: NeuralNet.icc:53
Abstract ClassifierFactory template that handles arbitrary types.
static std::shared_ptr< std::function< double(double)> > InvGaussComplement
Definition: NeuralNet.icc:75
const_iterator_type m_itInputEnd
iterator to the end of the nodes in the input node vector
Definition: NeuralNet.h:644
std::shared_ptr< std::function< double(double)> > m_activationFunction
activation function for this layer
Definition: NeuralNet.h:655
static std::shared_ptr< std::function< double(double)> > InvLinear
Definition: NeuralNet.icc:54
std::shared_ptr< std::function< double(double)> > m_inverseActivationFunction
inverse activation function for this layer
Definition: NeuralNet.h:656
void testSample(double error, double output, double target, double weight)
action to be done after the computation of a test sample (e.g.
Definition: NeuralNet.cxx:282
virtual void startTestCycle()
action to be done when the test cycle is started (e.g.
Definition: NeuralNet.cxx:294
you should not use this method at all Int_t Int_t Double_t Double_t Double_t Int_t Double_t Double_t Double_t Double_t b
Definition: TRolke.cxx:630
#define NULL
Definition: Rtypes.h:82
static std::shared_ptr< std::function< double(double)> > InvTanhShift
Definition: NeuralNet.icc:66
Definition: Rtypes.h:61
void clear(std::string histoName)
for monitoring
Definition: NeuralNet.h:831
double exp(double)
EnumRegularization
Definition: NeuralNet.h:173
const Int_t n
Definition: legend1.C:16
static std::shared_ptr< std::function< double(double)> > ZeroFnc
Definition: NeuralNet.icc:44
virtual bool hasConverged(double testError)
has this training converged already?
Definition: NeuralNet.cxx:466
char name[80]
Definition: TGX11.cxx:109
int randomInt(int maxValue)
Definition: NeuralNet.cxx:30
std::shared_ptr< std::function< double(double)> > m_activationFunction
stores the activation function
Definition: NeuralNet.h:703
static std::shared_ptr< std::function< double(double)> > InvSymmReLU
Definition: NeuralNet.icc:57