18#ifndef TMVA_DNN_FUNCTIONS
19#define TMVA_DNN_FUNCTIONS
97template<
typename Architecture_t>
98inline void evaluate(
typename Architecture_t::Tensor_t &A,
124template<
typename Architecture_t>
127 const typename Architecture_t::Tensor_t & A)
151template<
typename Architecture_t>
155 typename Architecture_t::Tensor_t t(A);
156 evaluate<Architecture_t>(t,
f);
159template<
typename Architecture_t>
162 const typename Architecture_t::Matrix_t & A)
164 typename Architecture_t::Tensor_t t(B);
165 evaluateDerivative<Architecture_t>(t,
f,
typename Architecture_t::Tensor_t(A));
174template<
typename Architecture_t>
175inline void evaluate(
typename Architecture_t::Matrix_t &A,
177 const typename Architecture_t::Matrix_t &X)
197template <
typename Architecture_t>
199 const typename Architecture_t::Matrix_t &
output,
const typename Architecture_t::Matrix_t &weights)
200 ->
decltype(Architecture_t::CrossEntropy(Y,
output, weights))
214template <
typename Architecture_t>
216 const typename Architecture_t::Matrix_t &Y,
217 const typename Architecture_t::Matrix_t &
output,
218 const typename Architecture_t::Matrix_t &weights)
225 Architecture_t::SoftmaxCrossEntropyGradients(dY, Y,
output, weights);
237template<
typename Architecture_t>
240->
decltype(Architecture_t::L1Regularization(A))
247 return Architecture_t::L1Regularization(A);
249 return Architecture_t::L2Regularization(A);
257template<
typename Architecture_t>
259 const typename Architecture_t::Matrix_t &W,
268 Architecture_t::AddL1RegularizationGradients(A, W,
weightDecay);
271 Architecture_t::AddL2RegularizationGradients(A, W,
weightDecay);
281template<
typename Architecture_t>
void evaluateDerivativeMatrix(typename Architecture_t::Matrix_t &B, EActivationFunction f, const typename Architecture_t::Matrix_t &A)
EOptimizer
Enum representing the optimizer used for training.
void evaluateMatrix(typename Architecture_t::Matrix_t &A, EActivationFunction f)
void addRegularizationGradients(typename Architecture_t::Matrix_t &A, const typename Architecture_t::Matrix_t &W, typename Architecture_t::Scalar_t weightDecay, ERegularization R)
Add the regularization gradient corresponding to weight matrix W, to the matrix A.
EOutputFunction
Enum that represents output functions.
double weightDecay(double error, ItWeight itWeight, ItWeight itWeightEnd, double factorWeightDecay, EnumRegularization eRegularization)
compute the weight decay for regularization (L1 or L2)
void evaluate(typename Architecture_t::Tensor_t &A, EActivationFunction f)
Apply the given activation function to each value in the given tensor A.
auto regularization(const typename Architecture_t::Matrix_t &A, ERegularization R) -> decltype(Architecture_t::L1Regularization(A))
Evaluate the regularization functional for a given weight matrix.
ERegularization
Enum representing the regularization type applied for a given layer.
EActivationFunction
Enum that represents layer activation functions.
ELossFunction
Enum that represents objective functions for the net, i.e.
void evaluateGradients(typename Architecture_t::Matrix_t &dY, ELossFunction f, const typename Architecture_t::Matrix_t &Y, const typename Architecture_t::Matrix_t &output, const typename Architecture_t::Matrix_t &weights)
Compute the gradient of the given output function f for given activations output of the output layer ...
void initialize(typename Architecture_t::Matrix_t &A, EInitialization m)
void evaluateDerivative(typename Architecture_t::Tensor_t &B, EActivationFunction f, const typename Architecture_t::Tensor_t &A)
Compute the first partial derivative of the activation function for the values given in tensor A and ...
create variable transformations
static void output(int code)