Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t > Class Template Reference

template<typename Architecture_t, typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
class TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >

Adadelta Optimizer class.

This class represents the Adadelta Optimizer.

Definition at line 45 of file Adadelta.h.

Public Types

using Matrix_t = typename Architecture_t::Matrix_t
 
using Scalar_t = typename Architecture_t::Scalar_t
 
- Public Types inherited from TMVA::DNN::VOptimizer< Architecture_t, Layer_t, DeepNet_t >
using Matrix_t = typename Architecture_t::Matrix_t
 
using Scalar_t = typename Architecture_t::Scalar_t
 

Public Member Functions

 TAdadelta (DeepNet_t &deepNet, Scalar_t learningRate=1.0, Scalar_t rho=0.95, Scalar_t epsilon=1e-8)
 Constructor.
 
 ~TAdadelta ()=default
 Destructor.
 
Scalar_t GetEpsilon () const
 
std::vector< std::vector< Matrix_t > > & GetPastSquaredBiasGradients ()
 
std::vector< Matrix_t > & GetPastSquaredBiasGradientsAt (size_t i)
 
std::vector< std::vector< Matrix_t > > & GetPastSquaredBiasUpdates ()
 
std::vector< Matrix_t > & GetPastSquaredBiasUpdatesAt (size_t i)
 
std::vector< std::vector< Matrix_t > > & GetPastSquaredWeightGradients ()
 
std::vector< Matrix_t > & GetPastSquaredWeightGradientsAt (size_t i)
 
std::vector< std::vector< Matrix_t > > & GetPastSquaredWeightUpdates ()
 
std::vector< Matrix_t > & GetPastSquaredWeightUpdatesAt (size_t i)
 
Scalar_t GetRho () const
 Getters.
 
- Public Member Functions inherited from TMVA::DNN::VOptimizer< Architecture_t, Layer_t, DeepNet_t >
 VOptimizer (Scalar_t learningRate, DeepNet_t &deepNet)
 Constructor.
 
virtual ~VOptimizer ()=default
 Virtual Destructor.
 
size_t GetGlobalStep () const
 
Layer_t * GetLayerAt (size_t i)
 
std::vector< Layer_t * > & GetLayers ()
 
Scalar_t GetLearningRate () const
 Getters.
 
void IncrementGlobalStep ()
 Increments the global step.
 
void SetLearningRate (size_t learningRate)
 Setters.
 
void Step ()
 Performs one step of optimization.
 

Protected Member Functions

void UpdateBiases (size_t layerIndex, std::vector< Matrix_t > &biases, const std::vector< Matrix_t > &biasGradients)
 Update the biases, given the current bias gradients.
 
void UpdateWeights (size_t layerIndex, std::vector< Matrix_t > &weights, const std::vector< Matrix_t > &weightGradients)
 Update the weights, given the current weight gradients.
 

Protected Attributes

Scalar_t fEpsilon
 The Smoothing term used to avoid division by zero.
 
std::vector< std::vector< Matrix_t > > fPastSquaredBiasGradients
 The accumulation of the square of the past bias gradients associated with the deep net.
 
std::vector< std::vector< Matrix_t > > fPastSquaredBiasUpdates
 The accumulation of the square of the past bias updates associated with the deep net.
 
std::vector< std::vector< Matrix_t > > fPastSquaredWeightGradients
 The accumulation of the square of the past weight gradients associated with the deep net.
 
std::vector< std::vector< Matrix_t > > fPastSquaredWeightUpdates
 The accumulation of the square of the past weight updates associated with the deep net.
 
Scalar_t fRho
 The Rho constant used by the optimizer.
 
std::vector< std::vector< Matrix_t > > fWorkBiasTensor1
 working tensor used to keep a temporary copy of bias or bias gradients
 
std::vector< std::vector< Matrix_t > > fWorkBiasTensor2
 working tensor used to keep a temporary copy of bias or bias gradients
 
std::vector< std::vector< Matrix_t > > fWorkWeightTensor1
 working tensor used to keep a temporary copy of weights or weight gradients
 
std::vector< std::vector< Matrix_t > > fWorkWeightTensor2
 working tensor used to keep a temporary copy of weights or weight gradients
 
- Protected Attributes inherited from TMVA::DNN::VOptimizer< Architecture_t, Layer_t, DeepNet_t >
DeepNet_t & fDeepNet
 The reference to the deep net.
 
size_t fGlobalStep
 The current global step count during training.
 
Scalar_t fLearningRate
 The learning rate used for training.
 

#include <TMVA/DNN/Adadelta.h>

Inheritance diagram for TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >:
[legend]

Member Typedef Documentation

◆ Matrix_t

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
using TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::Matrix_t = typename Architecture_t::Matrix_t

Definition at line 47 of file Adadelta.h.

◆ Scalar_t

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
using TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::Scalar_t = typename Architecture_t::Scalar_t

Definition at line 48 of file Adadelta.h.

Constructor & Destructor Documentation

◆ TAdadelta()

template<typename Architecture_t , typename Layer_t , typename DeepNet_t >
TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::TAdadelta ( DeepNet_t &  deepNet,
Scalar_t  learningRate = 1.0,
Scalar_t  rho = 0.95,
Scalar_t  epsilon = 1e-8 
)

Constructor.

Definition at line 102 of file Adadelta.h.

◆ ~TAdadelta()

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::~TAdadelta ( )
default

Destructor.

Member Function Documentation

◆ GetEpsilon()

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
Scalar_t TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::GetEpsilon ( ) const
inline

Definition at line 82 of file Adadelta.h.

◆ GetPastSquaredBiasGradients()

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
std::vector< std::vector< Matrix_t > > & TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::GetPastSquaredBiasGradients ( )
inline

Definition at line 87 of file Adadelta.h.

◆ GetPastSquaredBiasGradientsAt()

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
std::vector< Matrix_t > & TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::GetPastSquaredBiasGradientsAt ( size_t  i)
inline

Definition at line 88 of file Adadelta.h.

◆ GetPastSquaredBiasUpdates()

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
std::vector< std::vector< Matrix_t > > & TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::GetPastSquaredBiasUpdates ( )
inline

Definition at line 93 of file Adadelta.h.

◆ GetPastSquaredBiasUpdatesAt()

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
std::vector< Matrix_t > & TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::GetPastSquaredBiasUpdatesAt ( size_t  i)
inline

Definition at line 94 of file Adadelta.h.

◆ GetPastSquaredWeightGradients()

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
std::vector< std::vector< Matrix_t > > & TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::GetPastSquaredWeightGradients ( )
inline

Definition at line 84 of file Adadelta.h.

◆ GetPastSquaredWeightGradientsAt()

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
std::vector< Matrix_t > & TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::GetPastSquaredWeightGradientsAt ( size_t  i)
inline

Definition at line 85 of file Adadelta.h.

◆ GetPastSquaredWeightUpdates()

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
std::vector< std::vector< Matrix_t > > & TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::GetPastSquaredWeightUpdates ( )
inline

Definition at line 90 of file Adadelta.h.

◆ GetPastSquaredWeightUpdatesAt()

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
std::vector< Matrix_t > & TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::GetPastSquaredWeightUpdatesAt ( size_t  i)
inline

Definition at line 91 of file Adadelta.h.

◆ GetRho()

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
Scalar_t TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::GetRho ( ) const
inline

Getters.

Definition at line 81 of file Adadelta.h.

◆ UpdateBiases()

template<typename Architecture_t , typename Layer_t , typename DeepNet_t >
auto TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::UpdateBiases ( size_t  layerIndex,
std::vector< Matrix_t > &  biases,
const std::vector< Matrix_t > &  biasGradients 
)
protectedvirtual

Update the biases, given the current bias gradients.

Implements TMVA::DNN::VOptimizer< Architecture_t, Layer_t, DeepNet_t >.

Definition at line 206 of file Adadelta.h.

◆ UpdateWeights()

template<typename Architecture_t , typename Layer_t , typename DeepNet_t >
auto TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::UpdateWeights ( size_t  layerIndex,
std::vector< Matrix_t > &  weights,
const std::vector< Matrix_t > &  weightGradients 
)
protectedvirtual

Update the weights, given the current weight gradients.

Implements TMVA::DNN::VOptimizer< Architecture_t, Layer_t, DeepNet_t >.

Definition at line 147 of file Adadelta.h.

Member Data Documentation

◆ fEpsilon

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
Scalar_t TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::fEpsilon
protected

The Smoothing term used to avoid division by zero.

Definition at line 52 of file Adadelta.h.

◆ fPastSquaredBiasGradients

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
std::vector<std::vector<Matrix_t> > TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::fPastSquaredBiasGradients
protected

The accumulation of the square of the past bias gradients associated with the deep net.

Definition at line 55 of file Adadelta.h.

◆ fPastSquaredBiasUpdates

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
std::vector<std::vector<Matrix_t> > TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::fPastSquaredBiasUpdates
protected

The accumulation of the square of the past bias updates associated with the deep net.

Definition at line 60 of file Adadelta.h.

◆ fPastSquaredWeightGradients

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
std::vector<std::vector<Matrix_t> > TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::fPastSquaredWeightGradients
protected

The accumulation of the square of the past weight gradients associated with the deep net.

Definition at line 53 of file Adadelta.h.

◆ fPastSquaredWeightUpdates

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
std::vector<std::vector<Matrix_t> > TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::fPastSquaredWeightUpdates
protected

The accumulation of the square of the past weight updates associated with the deep net.

Definition at line 58 of file Adadelta.h.

◆ fRho

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
Scalar_t TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::fRho
protected

The Rho constant used by the optimizer.

Definition at line 51 of file Adadelta.h.

◆ fWorkBiasTensor1

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
std::vector<std::vector<Matrix_t> > TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::fWorkBiasTensor1
protected

working tensor used to keep a temporary copy of bias or bias gradients

Definition at line 63 of file Adadelta.h.

◆ fWorkBiasTensor2

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
std::vector<std::vector<Matrix_t> > TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::fWorkBiasTensor2
protected

working tensor used to keep a temporary copy of bias or bias gradients

Definition at line 65 of file Adadelta.h.

◆ fWorkWeightTensor1

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
std::vector<std::vector<Matrix_t> > TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::fWorkWeightTensor1
protected

working tensor used to keep a temporary copy of weights or weight gradients

Definition at line 62 of file Adadelta.h.

◆ fWorkWeightTensor2

template<typename Architecture_t , typename Layer_t = VGeneralLayer<Architecture_t>, typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
std::vector<std::vector<Matrix_t> > TMVA::DNN::TAdadelta< Architecture_t, Layer_t, DeepNet_t >::fWorkWeightTensor2
protected

working tensor used to keep a temporary copy of weights or weight gradients

Definition at line 64 of file Adadelta.h.

  • tmva/tmva/inc/TMVA/DNN/Adadelta.h