27#ifndef TMVA_DNN_ADAGRAD
28#define TMVA_DNN_ADAGRAD
42template <
typename Architecture_t,
typename Layer_t = VGeneralLayer<Architecture_t>,
43 typename DeepNet_t = TDeepNet<Architecture_t, Layer_t>>
46 using Matrix_t =
typename Architecture_t::Matrix_t;
47 using Scalar_t =
typename Architecture_t::Scalar_t;
52 std::vector<std::vector<Matrix_t>>
54 std::vector<std::vector<Matrix_t>>
58 void UpdateWeights(
size_t layerIndex, std::vector<Matrix_t> &weights,
const std::vector<Matrix_t> &weightGradients);
61 void UpdateBiases(
size_t layerIndex, std::vector<Matrix_t> &biases,
const std::vector<Matrix_t> &biasGradients);
84template <
typename Architecture_t,
typename Layer_t,
typename DeepNet_t>
86 :
VOptimizer<Architecture_t, Layer_t, DeepNet_t>(learningRate, deepNet), fEpsilon(
epsilon)
88 std::vector<Layer_t *> &layers = deepNet.
GetLayers();
89 const size_t layersNSlices = layers.size();
93 for (
size_t i = 0; i < layersNSlices; i++) {
94 const size_t weightsNSlices = (layers[i]->GetWeights()).size();
96 for (
size_t j = 0; j < weightsNSlices; j++) {
97 Matrix_t ¤tWeights = layers[i]->GetWeightsAt(j);
98 const size_t weightsNRows = currentWeights.GetNrows();
99 const size_t weightsNCols = currentWeights.GetNcols();
105 const size_t biasesNSlices = (layers[i]->GetBiases()).size();
107 for (
size_t j = 0; j < biasesNSlices; j++) {
108 Matrix_t ¤tBiases = layers[i]->GetBiasesAt(j);
109 const size_t biasesNRows = currentBiases.GetNrows();
110 const size_t biasesNCols = currentBiases.GetNcols();
119template <
typename Architecture_t,
typename Layer_t,
typename DeepNet_t>
121 const std::vector<Matrix_t> &weightGradients) ->
void
123 std::vector<Matrix_t> ¤tLayerPastSquaredWeightGradients = this->GetPastSquaredWeightGradientsAt(layerIndex);
125 for (
size_t k = 0; k < currentLayerPastSquaredWeightGradients.size(); k++) {
128 Matrix_t currentSquaredWeightGradients(weightGradients[k].GetNrows(), weightGradients[k].GetNcols());
130 Architecture_t::SquareElementWise(currentSquaredWeightGradients);
131 Architecture_t::ScaleAdd(currentLayerPastSquaredWeightGradients[k], currentSquaredWeightGradients, 1.0);
136 for (
size_t i = 0; i < weights.size(); i++) {
137 Matrix_t currentWeightUpdates(weights[i].GetNrows(), weights[i].GetNcols());
139 Architecture_t::ConstAdd(currentWeightUpdates, this->GetEpsilon());
140 Architecture_t::SqrtElementWise(currentWeightUpdates);
141 Architecture_t::ReciprocalElementWise(currentWeightUpdates);
142 Architecture_t::Hadamard(currentWeightUpdates, weightGradients[i]);
143 Architecture_t::ScaleAdd(weights[i], currentWeightUpdates, -this->GetLearningRate());
148template <
typename Architecture_t,
typename Layer_t,
typename DeepNet_t>
150 const std::vector<Matrix_t> &biasGradients) ->
void
152 std::vector<Matrix_t> ¤tLayerPastSquaredBiasGradients = this->GetPastSquaredBiasGradientsAt(layerIndex);
154 for (
size_t k = 0; k < currentLayerPastSquaredBiasGradients.size(); k++) {
157 Matrix_t currentSquaredBiasGradients(biasGradients[k].GetNrows(), biasGradients[k].GetNcols());
159 Architecture_t::SquareElementWise(currentSquaredBiasGradients);
160 Architecture_t::ScaleAdd(currentLayerPastSquaredBiasGradients[k], currentSquaredBiasGradients, 1.0);
165 for (
size_t i = 0; i < biases.size(); i++) {
166 Matrix_t currentBiasUpdates(biases[i].GetNrows(), biases[i].GetNcols());
168 Architecture_t::ConstAdd(currentBiasUpdates, this->GetEpsilon());
169 Architecture_t::SqrtElementWise(currentBiasUpdates);
170 Architecture_t::ReciprocalElementWise(currentBiasUpdates);
171 Architecture_t::Hadamard(currentBiasUpdates, biasGradients[i]);
172 Architecture_t::ScaleAdd(biases[i], currentBiasUpdates, -this->GetLearningRate());
void UpdateWeights(size_t layerIndex, std::vector< Matrix_t > &weights, const std::vector< Matrix_t > &weightGradients)
Update the weights, given the current weight gradients.
void UpdateBiases(size_t layerIndex, std::vector< Matrix_t > &biases, const std::vector< Matrix_t > &biasGradients)
Update the biases, given the current bias gradients.
std::vector< std::vector< Matrix_t > > & GetPastSquaredBiasGradients()
std::vector< std::vector< Matrix_t > > fPastSquaredBiasGradients
The sum of the square of the past bias gradients associated with the deep net.
Scalar_t GetEpsilon() const
Getters.
TAdagrad(DeepNet_t &deepNet, Scalar_t learningRate=0.01, Scalar_t epsilon=1e-8)
Constructor.
std::vector< std::vector< Matrix_t > > fPastSquaredWeightGradients
The sum of the square of the past weight gradients associated with the deep net.
typename Architecture_t::Matrix_t Matrix_t
typename Architecture_t::Scalar_t Scalar_t
Scalar_t fEpsilon
The Smoothing term used to avoid division by zero.
std::vector< std::vector< Matrix_t > > & GetPastSquaredWeightGradients()
std::vector< Matrix_t > & GetPastSquaredBiasGradientsAt(size_t i)
~TAdagrad()=default
Destructor.
std::vector< Matrix_t > & GetPastSquaredWeightGradientsAt(size_t i)
std::vector< Layer_t * > & GetLayers()
void Copy(void *source, void *dest)
Abstract ClassifierFactory template that handles arbitrary types.