Logo ROOT   6.14/05
Reference Guide
List of all members | Public Types | Public Member Functions | Private Attributes | List of all members
TMVA::DNN::TGradientDescent< Architecture_t > Class Template Reference

template<typename Architecture_t>
class TMVA::DNN::TGradientDescent< Architecture_t >

Definition at line 54 of file Minimizers.h.

Public Types

using Matrix_t = typename Architecture_t::Matrix_t
 
using Scalar_t = typename Architecture_t::Scalar_t
 

Public Member Functions

 TGradientDescent ()
 
 TGradientDescent (Scalar_t learningRate, size_t convergenceSteps, size_t testInterval)
 
size_t GetConvergenceCount () const
 
size_t GetConvergenceSteps () const
 
Scalar_t GetTestError () const
 
size_t GetTestInterval () const
 
Scalar_t GetTrainingError () const
 
bool HasConverged ()
 Increases the minimization step counter by the test error evaluation period and uses the current internal value of the test error to determine if the minimization has converged. More...
 
bool HasConverged (Scalar_t testError)
 Increases the minimization step counter by the test error evaluation period and uses the provided test error value to determine if the minimization has converged. More...
 
void Reset ()
 Reset minimizer object to default state. More...
 
void SetBatchSize (Scalar_t rate)
 
void SetConvergenceSteps (size_t steps)
 
void SetLearningRate (Scalar_t rate)
 
void SetTestInterval (size_t interval)
 
template<typename Net_t >
void Step (Net_t &net, Matrix_t &input, const Matrix_t &output, const Matrix_t &weights)
 Perform a single optimization step on a given batch. More...
 
template<typename Net_t >
void Step (Net_t &master, std::vector< Net_t > &nets, std::vector< TBatch< Architecture_t >> &batches)
 Perform multiple optimization steps simultaneously. More...
 
template<typename Net_t >
Scalar_t StepLoss (Net_t &net, Matrix_t &input, const Matrix_t &output, const Matrix_t &weights)
 Same as Step(...) but also evaluate the loss on the given training data. More...
 
template<typename Net_t >
auto StepLoss (Net_t &net, Matrix_t &input, const Matrix_t &output, const Matrix_t &weights) -> Scalar_t
 
template<typename Net_t >
void StepMomentum (Net_t &master, std::vector< Net_t > &nets, std::vector< TBatch< Architecture_t >> &batches, Scalar_t momentum)
 Same as the Step(...) method for multiple batches but uses momentum. More...
 
template<typename Net_t >
void StepNesterov (Net_t &master, std::vector< Net_t > &nets, std::vector< TBatch< Architecture_t >> &batches, Scalar_t momentum)
 Same as the Step(...) method for multiple batches but uses Nesterov momentum. More...
 
template<typename Net_t >
void StepReducedWeights (Net_t &net, Matrix_t &input, const Matrix_t &output)
 Does not evaluate the loss and therefore not trigger a possible synchronization with the device. More...
 
template<typename Net_t >
Scalar_t StepReducedWeightsLoss (Net_t &net, Matrix_t &input, const Matrix_t &output, const Matrix_t &weights)
 Similar to StepReducedWeights(...) but also evaluates the loss. More...
 
template<typename Net_t >
auto StepReducedWeightsLoss (Net_t &net, Matrix_t &input, const Matrix_t &output, const Matrix_t &weights) -> Scalar_t
 
template<typename Data_t , typename Net_t >
Scalar_t Train (const Data_t &TrainingDataIn, size_t nTrainingSamples, const Data_t &TestDataIn, size_t nTestSamples, Net_t &net, size_t nThreads=1)
 Train the given net using the given training input data (events), training output data (labels), test input data (events), test output data (labels). More...
 
template<typename Data_t , typename Net_t >
auto Train (const Data_t &trainingData, size_t nTrainingSamples, const Data_t &testData, size_t nTestSamples, Net_t &net, size_t nThreads) -> Scalar_t
 
template<typename Data_t , typename Net_t >
Scalar_t TrainMomentum (const Data_t &TrainingDataIn, size_t nTrainingSamples, const Data_t &TestDataIn, size_t nTestSamples, Net_t &net, Scalar_t momentum, size_t nThreads=1)
 Same as Train(...) but uses the given momentum. More...
 
template<typename Data_t , typename Net_t >
auto TrainMomentum (const Data_t &trainingData, size_t nTrainingSamples, const Data_t &testData, size_t nTestSamples, Net_t &net, Scalar_t momentum, size_t nThreads) -> Scalar_t
 

Private Attributes

size_t fBatchSize
 Batch size to use for the training. More...
 
size_t fConvergenceCount
 Current number of training epochs without. More...
 
size_t fConvergenceSteps
 Number of training epochs without considerable. More...
 
Scalar_t fLearningRate
 Learning rate \(\alpha\). More...
 
Scalar_t fMinimumError
 The minimum loss achieved on the training set. More...
 
size_t fStepCount
 Number of steps performed in the current training session. More...
 
Scalar_t fTestError
 Holds the most recently computed test loss. More...
 
size_t fTestInterval
 Interval for the computation of the test error. More...
 
Scalar_t fTrainingError
 Holds the most recently computed training loss. More...
 

#include <TMVA/DNN/Minimizers.h>

Member Typedef Documentation

◆ Matrix_t

template<typename Architecture_t >
using TMVA::DNN::TGradientDescent< Architecture_t >::Matrix_t = typename Architecture_t::Matrix_t

Definition at line 58 of file Minimizers.h.

◆ Scalar_t

template<typename Architecture_t >
using TMVA::DNN::TGradientDescent< Architecture_t >::Scalar_t = typename Architecture_t::Scalar_t

Definition at line 57 of file Minimizers.h.

Constructor & Destructor Documentation

◆ TGradientDescent() [1/2]

template<typename Architecture_t >
TMVA::DNN::TGradientDescent< Architecture_t >::TGradientDescent ( )

Definition at line 175 of file Minimizers.h.

◆ TGradientDescent() [2/2]

template<typename Architecture_t >
TMVA::DNN::TGradientDescent< Architecture_t >::TGradientDescent ( Scalar_t  learningRate,
size_t  convergenceSteps,
size_t  testInterval 
)

Definition at line 184 of file Minimizers.h.

Member Function Documentation

◆ GetConvergenceCount()

template<typename Architecture_t >
size_t TMVA::DNN::TGradientDescent< Architecture_t >::GetConvergenceCount ( ) const
inline

Definition at line 159 of file Minimizers.h.

◆ GetConvergenceSteps()

template<typename Architecture_t >
size_t TMVA::DNN::TGradientDescent< Architecture_t >::GetConvergenceSteps ( ) const
inline

Definition at line 160 of file Minimizers.h.

◆ GetTestError()

template<typename Architecture_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::GetTestError ( ) const
inline

Definition at line 162 of file Minimizers.h.

◆ GetTestInterval()

template<typename Architecture_t >
size_t TMVA::DNN::TGradientDescent< Architecture_t >::GetTestInterval ( ) const
inline

Definition at line 163 of file Minimizers.h.

◆ GetTrainingError()

template<typename Architecture_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::GetTrainingError ( ) const
inline

Definition at line 161 of file Minimizers.h.

◆ HasConverged() [1/2]

template<typename Architecture_t >
bool TMVA::DNN::TGradientDescent< Architecture_t >::HasConverged ( )
inline

Increases the minimization step counter by the test error evaluation period and uses the current internal value of the test error to determine if the minimization has converged.

Definition at line 665 of file Minimizers.h.

◆ HasConverged() [2/2]

template<typename Architecture_t >
bool TMVA::DNN::TGradientDescent< Architecture_t >::HasConverged ( Scalar_t  testError)
inline

Increases the minimization step counter by the test error evaluation period and uses the provided test error value to determine if the minimization has converged.

Definition at line 679 of file Minimizers.h.

◆ Reset()

template<typename Architecture_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::Reset ( void  )
inline

Reset minimizer object to default state.

Definition at line 81 of file Minimizers.h.

◆ SetBatchSize()

template<typename Architecture_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::SetBatchSize ( Scalar_t  rate)
inline

Definition at line 168 of file Minimizers.h.

◆ SetConvergenceSteps()

template<typename Architecture_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::SetConvergenceSteps ( size_t  steps)
inline

Definition at line 165 of file Minimizers.h.

◆ SetLearningRate()

template<typename Architecture_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::SetLearningRate ( Scalar_t  rate)
inline

Definition at line 167 of file Minimizers.h.

◆ SetTestInterval()

template<typename Architecture_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::SetTestInterval ( size_t  interval)
inline

Definition at line 166 of file Minimizers.h.

◆ Step() [1/2]

template<typename Architecture_t >
template<typename Net_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::Step ( Net_t &  net,
Matrix_t input,
const Matrix_t output,
const Matrix_t weights 
)
inline

Perform a single optimization step on a given batch.

Propagates the input matrix foward through the net, evaluates the loss and propagates the gradients backward through the net. The computed gradients are scaled by the learning rate \(\alpha\) and subtracted from the weights and bias values of each layer.

Definition at line 329 of file Minimizers.h.

◆ Step() [2/2]

template<typename Architecture_t >
template<typename Net_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::Step ( Net_t &  master,
std::vector< Net_t > &  nets,
std::vector< TBatch< Architecture_t >> &  batches 
)
inline

Perform multiple optimization steps simultaneously.

Performs the backprop algorithm on the input batches given in batches on the neural networks given in nets. The forward and backward propagation steps are executed in an interleaving manner in order to exploit potential batch-level parallelism for asynchronous device calls.

Definition at line 372 of file Minimizers.h.

◆ StepLoss() [1/2]

template<typename Architecture_t >
template<typename Net_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::StepLoss ( Net_t &  net,
Matrix_t input,
const Matrix_t output,
const Matrix_t weights 
)

Same as Step(...) but also evaluate the loss on the given training data.

Note that this requires synchronization between host and device.

◆ StepLoss() [2/2]

template<typename Architecture_t >
template<typename Net_t >
auto TMVA::DNN::TGradientDescent< Architecture_t >::StepLoss ( Net_t &  net,
Matrix_t input,
const Matrix_t output,
const Matrix_t weights 
) -> Scalar_t
inline

Definition at line 350 of file Minimizers.h.

◆ StepMomentum()

template<typename Architecture_t >
template<typename Net_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::StepMomentum ( Net_t &  master,
std::vector< Net_t > &  nets,
std::vector< TBatch< Architecture_t >> &  batches,
Scalar_t  momentum 
)
inline

Same as the Step(...) method for multiple batches but uses momentum.

Definition at line 436 of file Minimizers.h.

◆ StepNesterov()

template<typename Architecture_t >
template<typename Net_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::StepNesterov ( Net_t &  master,
std::vector< Net_t > &  nets,
std::vector< TBatch< Architecture_t >> &  batches,
Scalar_t  momentum 
)
inline

Same as the Step(...) method for multiple batches but uses Nesterov momentum.

Definition at line 526 of file Minimizers.h.

◆ StepReducedWeights()

template<typename Architecture_t >
template<typename Net_t >
void TMVA::DNN::TGradientDescent< Architecture_t >::StepReducedWeights ( Net_t &  net,
Matrix_t input,
const Matrix_t output 
)
inline

Does not evaluate the loss and therefore not trigger a possible synchronization with the device.

Trains the weights of each layer, but only the bias terms of the first layer for compatibility with the previous implementation.

Definition at line 615 of file Minimizers.h.

◆ StepReducedWeightsLoss() [1/2]

template<typename Architecture_t >
template<typename Net_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::StepReducedWeightsLoss ( Net_t &  net,
Matrix_t input,
const Matrix_t output,
const Matrix_t weights 
)

Similar to StepReducedWeights(...) but also evaluates the loss.

May trigger synchronization with the device.

◆ StepReducedWeightsLoss() [2/2]

template<typename Architecture_t >
template<typename Net_t >
auto TMVA::DNN::TGradientDescent< Architecture_t >::StepReducedWeightsLoss ( Net_t &  net,
Matrix_t input,
const Matrix_t output,
const Matrix_t weights 
) -> Scalar_t
inline

Definition at line 640 of file Minimizers.h.

◆ Train() [1/2]

template<typename Architecture_t >
template<typename Data_t , typename Net_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::Train ( const Data_t &  TrainingDataIn,
size_t  nTrainingSamples,
const Data_t &  TestDataIn,
size_t  nTestSamples,
Net_t &  net,
size_t  nThreads = 1 
)

Train the given net using the given training input data (events), training output data (labels), test input data (events), test output data (labels).

◆ Train() [2/2]

template<typename Architecture_t >
template<typename Data_t , typename Net_t >
auto TMVA::DNN::TGradientDescent< Architecture_t >::Train ( const Data_t &  trainingData,
size_t  nTrainingSamples,
const Data_t &  testData,
size_t  nTestSamples,
Net_t &  net,
size_t  nThreads 
) -> Scalar_t

Definition at line 194 of file Minimizers.h.

◆ TrainMomentum() [1/2]

template<typename Architecture_t >
template<typename Data_t , typename Net_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::TrainMomentum ( const Data_t &  TrainingDataIn,
size_t  nTrainingSamples,
const Data_t &  TestDataIn,
size_t  nTestSamples,
Net_t &  net,
Scalar_t  momentum,
size_t  nThreads = 1 
)

Same as Train(...) but uses the given momentum.

◆ TrainMomentum() [2/2]

template<typename Architecture_t >
template<typename Data_t , typename Net_t >
auto TMVA::DNN::TGradientDescent< Architecture_t >::TrainMomentum ( const Data_t &  trainingData,
size_t  nTrainingSamples,
const Data_t &  testData,
size_t  nTestSamples,
Net_t &  net,
Scalar_t  momentum,
size_t  nThreads 
) -> Scalar_t

Definition at line 257 of file Minimizers.h.

Member Data Documentation

◆ fBatchSize

template<typename Architecture_t >
size_t TMVA::DNN::TGradientDescent< Architecture_t >::fBatchSize
private

Batch size to use for the training.

Definition at line 61 of file Minimizers.h.

◆ fConvergenceCount

template<typename Architecture_t >
size_t TMVA::DNN::TGradientDescent< Architecture_t >::fConvergenceCount
private

Current number of training epochs without.

considerable decrease in the test error.

Definition at line 65 of file Minimizers.h.

◆ fConvergenceSteps

template<typename Architecture_t >
size_t TMVA::DNN::TGradientDescent< Architecture_t >::fConvergenceSteps
private

Number of training epochs without considerable.

decrease in the test error for convergence.

Definition at line 63 of file Minimizers.h.

◆ fLearningRate

template<typename Architecture_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::fLearningRate
private

Learning rate \(\alpha\).

Definition at line 70 of file Minimizers.h.

◆ fMinimumError

template<typename Architecture_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::fMinimumError
private

The minimum loss achieved on the training set.

during the current traning session.

Definition at line 71 of file Minimizers.h.

◆ fStepCount

template<typename Architecture_t >
size_t TMVA::DNN::TGradientDescent< Architecture_t >::fStepCount
private

Number of steps performed in the current training session.

Definition at line 62 of file Minimizers.h.

◆ fTestError

template<typename Architecture_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::fTestError
private

Holds the most recently computed test loss.

Definition at line 69 of file Minimizers.h.

◆ fTestInterval

template<typename Architecture_t >
size_t TMVA::DNN::TGradientDescent< Architecture_t >::fTestInterval
private

Interval for the computation of the test error.

Definition at line 67 of file Minimizers.h.

◆ fTrainingError

template<typename Architecture_t >
Scalar_t TMVA::DNN::TGradientDescent< Architecture_t >::fTrainingError
private

Holds the most recently computed training loss.

Definition at line 68 of file Minimizers.h.

Libraries for TMVA::DNN::TGradientDescent< Architecture_t >:
[legend]

The documentation for this class was generated from the following file: