Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
TMVA::DNN::TGradientDescent< Architecture_t > Class Template Reference

template<typename Architecture_t>
class TMVA::DNN::TGradientDescent< Architecture_t >

Definition at line 55 of file Minimizers.h.

Public Types

using Matrix_t = typename Architecture_t::Matrix_t
 
using Scalar_t = typename Architecture_t::Scalar_t
 

Public Member Functions

 TGradientDescent ()
 
 TGradientDescent (Scalar_t learningRate, size_t convergenceSteps, size_t testInterval)
 
size_t GetConvergenceCount () const
 
size_t GetConvergenceSteps () const
 
Scalar_t GetTestError () const
 
size_t GetTestInterval () const
 
Scalar_t GetTrainingError () const
 
bool HasConverged ()
 Increases the minimization step counter by the test error evaluation period and uses the current internal value of the test error to determine if the minimization has converged.
 
bool HasConverged (Scalar_t testError)
 Increases the minimization step counter by the test error evaluation period and uses the provided test error value to determine if the minimization has converged.
 
void Reset ()
 Reset minimizer object to default state.
 
void SetBatchSize (Scalar_t rate)
 
void SetConvergenceSteps (size_t steps)
 
void SetLearningRate (Scalar_t rate)
 
void SetTestInterval (size_t interval)
 
template<typename Net_t >
void Step (Net_t &master, std::vector< Net_t > &nets, std::vector< TBatch< Architecture_t > > &batches)
 Perform multiple optimization steps simultaneously.
 
template<typename Net_t >
void Step (Net_t &net, Matrix_t &input, const Matrix_t &output, const Matrix_t &weights)
 Perform a single optimization step on a given batch.
 
template<typename Net_t >
Scalar_t StepLoss (Net_t &net, Matrix_t &input, const Matrix_t &output, const Matrix_t &weights)
 Same as Step(...) but also evaluate the loss on the given training data.
 
template<typename Net_t >
auto StepLoss (Net_t &net, Matrix_t &input, const Matrix_t &output, const Matrix_t &weights) -> Scalar_t
 
template<typename Net_t >
void StepMomentum (Net_t &master, std::vector< Net_t > &nets, std::vector< TBatch< Architecture_t > > &batches, Scalar_t momentum)
 Same as the Step(...) method for multiple batches but uses momentum.
 
template<typename Net_t >
void StepNesterov (Net_t &master, std::vector< Net_t > &nets, std::vector< TBatch< Architecture_t > > &batches, Scalar_t momentum)
 Same as the Step(...) method for multiple batches but uses Nesterov momentum.
 
template<typename Net_t >
void StepReducedWeights (Net_t &net, Matrix_t &input, const Matrix_t &output)
 Does not evaluate the loss and therefore not trigger a possible synchronization with the device.
 
template<typename Net_t >
Scalar_t StepReducedWeightsLoss (Net_t &net, Matrix_t &input, const Matrix_t &output, const Matrix_t &weights)
 Similar to StepReducedWeights(...) but also evaluates the loss.
 
template<typename Net_t >
auto StepReducedWeightsLoss (Net_t &net, Matrix_t &input, const Matrix_t &output, const Matrix_t &weights) -> Scalar_t
 
template<typename Data_t , typename Net_t >
auto Train (const Data_t &trainingData, size_t nTrainingSamples, const Data_t &testData, size_t nTestSamples, Net_t &net, size_t nThreads) -> Scalar_t
 
template<typename Data_t , typename Net_t >
Scalar_t Train (const Data_t &TrainingDataIn, size_t nTrainingSamples, const Data_t &TestDataIn, size_t nTestSamples, Net_t &net, size_t nThreads=1)
 Train the given net using the given training input data (events), training output data (labels), test input data (events), test output data (labels).
 
template<typename Data_t , typename Net_t >
auto TrainMomentum (const Data_t &trainingData, size_t nTrainingSamples, const Data_t &testData, size_t nTestSamples, Net_t &net, Scalar_t momentum, size_t nThreads) -> Scalar_t
 
template<typename Data_t , typename Net_t >
Scalar_t TrainMomentum (const Data_t &TrainingDataIn, size_t nTrainingSamples, const Data_t &TestDataIn, size_t nTestSamples, Net_t &net, Scalar_t momentum, size_t nThreads=1)
 Same as Train(...) but uses the given momentum.
 

Private Attributes

size_t fBatchSize
 Batch size to use for the training.
 
size_t fConvergenceCount
 Current number of training epochs without.
 
size_t fConvergenceSteps
 Number of training epochs without considerable.
 
Scalar_t fLearningRate
 Learning rate \(\alpha\).
 
Scalar_t fMinimumError
 The minimum loss achieved on the training set during the current training session.
 
size_t fStepCount
 Number of steps performed in the current training session.
 
Scalar_t fTestError
 Holds the most recently computed test loss.
 
size_t fTestInterval
 Interval for the computation of the test error.
 
Scalar_t fTrainingError
 Holds the most recently computed training loss.
 

#include <TMVA/DNN/Minimizers.h>

Member Typedef Documentation

◆ Matrix_t

template<typename Architecture_t >
using TMVA::DNN::TGradientDescent< Architecture_t >::Matrix_t = typename Architecture_t::Matrix_t

Definition at line 59 of file Minimizers.h.

◆ Scalar_t

template<typename Architecture_t >
using TMVA::DNN::TGradientDescent< Architecture_t >::Scalar_t = typename Architecture_t::Scalar_t

Definition at line 58 of file Minimizers.h.

Constructor & Destructor Documentation

◆ TGradientDescent() [1/2]

template<typename Architecture_t >
TMVA::DNN::TGradientDescent< Architecture_t >::TGradientDescent

Definition at line 175 of file Minimizers.h.

◆ TGradientDescent() [2/2]

template<typename Architecture_t >
TMVA::DNN::TGradientDescent< Architecture_t >::TGradientDescent ( Scalar_t  learningRate,
size_t  convergenceSteps,
size_t  testInterval 
)

Definition at line 185 of file Minimizers.h.

Member Function Documentation

◆ GetConvergenceCount()

template<typename Architecture_t >
size_t TMVA::DNN::TGradientDescent< Architecture_t >::GetConvergenceCount ( ) const
inline

Definition at line 159 of file Minimizers.h.

◆ GetConvergenceSteps()

template<typename Architecture_t >
size_t TMVA::DNN::TGradientDescent< Architecture_t >::GetConvergenceSteps ( ) const
inline

Definition at line 160 of file Minimizers.h.

◆ GetTestError()

template<typename Architecture_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::GetTestError ( ) const
inline

Definition at line 162 of file Minimizers.h.

◆ GetTestInterval()

template<typename Architecture_t >
size_t TMVA::DNN::TGradientDescent< Architecture_t >::GetTestInterval ( ) const
inline

Definition at line 163 of file Minimizers.h.

◆ GetTrainingError()

template<typename Architecture_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::GetTrainingError ( ) const
inline

Definition at line 161 of file Minimizers.h.

◆ HasConverged() [1/2]

template<typename Architecture_t >
bool TMVA::DNN::TGradientDescent< Architecture_t >::HasConverged
inline

Increases the minimization step counter by the test error evaluation period and uses the current internal value of the test error to determine if the minimization has converged.

Definition at line 667 of file Minimizers.h.

◆ HasConverged() [2/2]

template<typename Architecture_t >
bool TMVA::DNN::TGradientDescent< Architecture_t >::HasConverged ( Scalar_t  testError)
inline

Increases the minimization step counter by the test error evaluation period and uses the provided test error value to determine if the minimization has converged.

Definition at line 681 of file Minimizers.h.

◆ Reset()

template<typename Architecture_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::Reset ( )
inline

Reset minimizer object to default state.

Definition at line 81 of file Minimizers.h.

◆ SetBatchSize()

template<typename Architecture_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::SetBatchSize ( Scalar_t  rate)
inline

Definition at line 168 of file Minimizers.h.

◆ SetConvergenceSteps()

template<typename Architecture_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::SetConvergenceSteps ( size_t  steps)
inline

Definition at line 165 of file Minimizers.h.

◆ SetLearningRate()

template<typename Architecture_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::SetLearningRate ( Scalar_t  rate)
inline

Definition at line 167 of file Minimizers.h.

◆ SetTestInterval()

template<typename Architecture_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::SetTestInterval ( size_t  interval)
inline

Definition at line 166 of file Minimizers.h.

◆ Step() [1/2]

template<typename Architecture_t >
template<typename Net_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::Step ( Net_t &  master,
std::vector< Net_t > &  nets,
std::vector< TBatch< Architecture_t > > &  batches 
)
inline

Perform multiple optimization steps simultaneously.

Performs the backprop algorithm on the input batches given in batches on the neural networks given in nets. The forward and backward propagation steps are executed in an interleaving manner in order to exploit potential batch-level parallelism for asynchronous device calls.

Definition at line 374 of file Minimizers.h.

◆ Step() [2/2]

template<typename Architecture_t >
template<typename Net_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::Step ( Net_t &  net,
Matrix_t input,
const Matrix_t output,
const Matrix_t weights 
)
inline

Perform a single optimization step on a given batch.

Propagates the input matrix forward through the net, evaluates the loss and propagates the gradients backward through the net. The computed gradients are scaled by the learning rate \(\alpha\) and subtracted from the weights and bias values of each layer.

Definition at line 331 of file Minimizers.h.

◆ StepLoss() [1/2]

template<typename Architecture_t >
template<typename Net_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::StepLoss ( Net_t &  net,
Matrix_t input,
const Matrix_t output,
const Matrix_t weights 
)

Same as Step(...) but also evaluate the loss on the given training data.

Note that this requires synchronization between host and device.

◆ StepLoss() [2/2]

template<typename Architecture_t >
template<typename Net_t >
auto TMVA::DNN::TGradientDescent< Architecture_t >::StepLoss ( Net_t &  net,
Matrix_t input,
const Matrix_t output,
const Matrix_t weights 
) -> Scalar_t
inline

Definition at line 352 of file Minimizers.h.

◆ StepMomentum()

template<typename Architecture_t >
template<typename Net_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::StepMomentum ( Net_t &  master,
std::vector< Net_t > &  nets,
std::vector< TBatch< Architecture_t > > &  batches,
Scalar_t  momentum 
)
inline

Same as the Step(...) method for multiple batches but uses momentum.

Definition at line 438 of file Minimizers.h.

◆ StepNesterov()

template<typename Architecture_t >
template<typename Net_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::StepNesterov ( Net_t &  master,
std::vector< Net_t > &  nets,
std::vector< TBatch< Architecture_t > > &  batches,
Scalar_t  momentum 
)
inline

Same as the Step(...) method for multiple batches but uses Nesterov momentum.

Definition at line 528 of file Minimizers.h.

◆ StepReducedWeights()

template<typename Architecture_t >
template<typename Net_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::StepReducedWeights ( Net_t &  net,
Matrix_t input,
const Matrix_t output 
)
inline

Does not evaluate the loss and therefore not trigger a possible synchronization with the device.

Trains the weights of each layer, but only the bias terms of the first layer for compatibility with the previous implementation.

Definition at line 617 of file Minimizers.h.

◆ StepReducedWeightsLoss() [1/2]

template<typename Architecture_t >
template<typename Net_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::StepReducedWeightsLoss ( Net_t &  net,
Matrix_t input,
const Matrix_t output,
const Matrix_t weights 
)

Similar to StepReducedWeights(...) but also evaluates the loss.

May trigger synchronization with the device.

◆ StepReducedWeightsLoss() [2/2]

template<typename Architecture_t >
template<typename Net_t >
auto TMVA::DNN::TGradientDescent< Architecture_t >::StepReducedWeightsLoss ( Net_t &  net,
Matrix_t input,
const Matrix_t output,
const Matrix_t weights 
) -> Scalar_t
inline

Definition at line 642 of file Minimizers.h.

◆ Train() [1/2]

template<typename Architecture_t >
template<typename Data_t , typename Net_t >
auto TMVA::DNN::TGradientDescent< Architecture_t >::Train ( const Data_t &  trainingData,
size_t  nTrainingSamples,
const Data_t &  testData,
size_t  nTestSamples,
Net_t &  net,
size_t  nThreads 
) -> Scalar_t

Definition at line 196 of file Minimizers.h.

◆ Train() [2/2]

template<typename Architecture_t >
template<typename Data_t , typename Net_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::Train ( const Data_t &  TrainingDataIn,
size_t  nTrainingSamples,
const Data_t &  TestDataIn,
size_t  nTestSamples,
Net_t &  net,
size_t  nThreads = 1 
)

Train the given net using the given training input data (events), training output data (labels), test input data (events), test output data (labels).

◆ TrainMomentum() [1/2]

template<typename Architecture_t >
template<typename Data_t , typename Net_t >
auto TMVA::DNN::TGradientDescent< Architecture_t >::TrainMomentum ( const Data_t &  trainingData,
size_t  nTrainingSamples,
const Data_t &  testData,
size_t  nTestSamples,
Net_t &  net,
Scalar_t  momentum,
size_t  nThreads 
) -> Scalar_t

Definition at line 259 of file Minimizers.h.

◆ TrainMomentum() [2/2]

template<typename Architecture_t >
template<typename Data_t , typename Net_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::TrainMomentum ( const Data_t &  TrainingDataIn,
size_t  nTrainingSamples,
const Data_t &  TestDataIn,
size_t  nTestSamples,
Net_t &  net,
Scalar_t  momentum,
size_t  nThreads = 1 
)

Same as Train(...) but uses the given momentum.

Member Data Documentation

◆ fBatchSize

template<typename Architecture_t >
size_t TMVA::DNN::TGradientDescent< Architecture_t >::fBatchSize
private

Batch size to use for the training.

Definition at line 62 of file Minimizers.h.

◆ fConvergenceCount

template<typename Architecture_t >
size_t TMVA::DNN::TGradientDescent< Architecture_t >::fConvergenceCount
private

Current number of training epochs without.

considerable decrease in the test error.

Definition at line 66 of file Minimizers.h.

◆ fConvergenceSteps

template<typename Architecture_t >
size_t TMVA::DNN::TGradientDescent< Architecture_t >::fConvergenceSteps
private

Number of training epochs without considerable.

decrease in the test error for convergence.

Definition at line 64 of file Minimizers.h.

◆ fLearningRate

template<typename Architecture_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::fLearningRate
private

Learning rate \(\alpha\).

Definition at line 71 of file Minimizers.h.

◆ fMinimumError

template<typename Architecture_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::fMinimumError
private

The minimum loss achieved on the training set during the current training session.

Definition at line 72 of file Minimizers.h.

◆ fStepCount

template<typename Architecture_t >
size_t TMVA::DNN::TGradientDescent< Architecture_t >::fStepCount
private

Number of steps performed in the current training session.

Definition at line 63 of file Minimizers.h.

◆ fTestError

template<typename Architecture_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::fTestError
private

Holds the most recently computed test loss.

Definition at line 70 of file Minimizers.h.

◆ fTestInterval

template<typename Architecture_t >
size_t TMVA::DNN::TGradientDescent< Architecture_t >::fTestInterval
private

Interval for the computation of the test error.

Definition at line 68 of file Minimizers.h.

◆ fTrainingError

template<typename Architecture_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::fTrainingError
private

Holds the most recently computed training loss.

Definition at line 69 of file Minimizers.h.

  • tmva/tmva/inc/TMVA/DNN/Minimizers.h