template<typename Real_t>
class TMVA::DNN::TReference< Real_t >
The reference architecture class.
Class template that contains the reference implementation of the low-level interface for the DNN implementation. The reference implementation uses the TMatrixT class template to represent matrices.
- Template Parameters
-
Real_t | The floating point type used to represent scalars. |
Definition at line 37 of file Reference.h.
|
|
Low-level functions required for the forward propagation of activations through the network.
|
static void | MultiplyTranspose (TMatrixT< Scalar_t > &output, const TMatrixT< Scalar_t > &input, const TMatrixT< Scalar_t > &weights) |
| Matrix-multiply input with the transpose of and write the results into output . More...
|
|
static void | AddRowWise (TMatrixT< Scalar_t > &output, const TMatrixT< Scalar_t > &biases) |
| Add the vectors biases row-wise to the matrix output. More...
|
|
|
Low-level functions required for the forward propagation of activations through the network.
|
static void | Backward (TMatrixT< Scalar_t > &activationGradientsBackward, TMatrixT< Scalar_t > &weightGradients, TMatrixT< Scalar_t > &biasGradients, TMatrixT< Scalar_t > &df, const TMatrixT< Scalar_t > &activationGradients, const TMatrixT< Scalar_t > &weights, const TMatrixT< Scalar_t > &activationBackward) |
| Perform the complete backward propagation step. More...
|
|
static void | ScaleAdd (TMatrixT< Scalar_t > &A, const TMatrixT< Scalar_t > &B, Scalar_t beta=1.0) |
| Adds a the elements in matrix B scaled by c to the elements in the matrix A. More...
|
|
static void | Copy (TMatrixT< Scalar_t > &A, const TMatrixT< Scalar_t > &B) |
|
|
For each activation function, the low-level interface contains two routines.
One that applies the acitvation function to a matrix and one that evaluate the derivatives of the activation function at the elements of a given matrix and writes the results into the result matrix.
|
static void | Identity (TMatrixT< Real_t > &B) |
|
static void | IdentityDerivative (TMatrixT< Real_t > &B, const TMatrixT< Real_t > &A) |
|
static void | Relu (TMatrixT< Real_t > &B) |
|
static void | ReluDerivative (TMatrixT< Real_t > &B, const TMatrixT< Real_t > &A) |
|
static void | Sigmoid (TMatrixT< Real_t > &B) |
|
static void | SigmoidDerivative (TMatrixT< Real_t > &B, const TMatrixT< Real_t > &A) |
|
static void | Tanh (TMatrixT< Real_t > &B) |
|
static void | TanhDerivative (TMatrixT< Real_t > &B, const TMatrixT< Real_t > &A) |
|
static void | SymmetricRelu (TMatrixT< Real_t > &B) |
|
static void | SymmetricReluDerivative (TMatrixT< Real_t > &B, const TMatrixT< Real_t > &A) |
|
static void | SoftSign (TMatrixT< Real_t > &B) |
|
static void | SoftSignDerivative (TMatrixT< Real_t > &B, const TMatrixT< Real_t > &A) |
|
static void | Gauss (TMatrixT< Real_t > &B) |
|
static void | GaussDerivative (TMatrixT< Real_t > &B, const TMatrixT< Real_t > &A) |
|
|
Loss functions compute a scalar value given the output of the network for a given training input and the expected network prediction Y that quantifies the quality of the prediction.
For each function also a routing that computes the gradients (suffixed by Gradients) must be provided for the starting of the backpropagation algorithm.
|
static Real_t | MeanSquaredError (const TMatrixT< Real_t > &Y, const TMatrixT< Real_t > &output) |
|
static void | MeanSquaredErrorGradients (TMatrixT< Real_t > &dY, const TMatrixT< Real_t > &Y, const TMatrixT< Real_t > &output) |
|
static Real_t | CrossEntropy (const TMatrixT< Real_t > &Y, const TMatrixT< Real_t > &output) |
| Sigmoid transformation is implicitly applied, thus output should hold the linear activations of the last layer in the net. More...
|
|
static void | CrossEntropyGradients (TMatrixT< Real_t > &dY, const TMatrixT< Real_t > &Y, const TMatrixT< Real_t > &output) |
|
|
Output functions transform the activations output of the output layer in the network to a valid prediction YHat for the desired usage of the network, e.g.
the identity function for regression or the sigmoid transformation for two-class classification.
|
static void | Sigmoid (TMatrixT< Real_t > &YHat, const TMatrixT< Real_t > &) |
|
|
For each regularization type two functions are required, one named <Type>Regularization that evaluates the corresponding regularization functional for a given weight matrix and the Add<Type>RegularizationGradients , that adds the regularization component in the gradients to the provided matrix.
|
static Real_t | L1Regularization (const TMatrixT< Real_t > &W) |
|
static void | AddL1RegularizationGradients (TMatrixT< Real_t > &A, const TMatrixT< Real_t > &W, Real_t weightDecay) |
|
static Real_t | L2Regularization (const TMatrixT< Real_t > &W) |
|
static void | AddL2RegularizationGradients (TMatrixT< Real_t > &A, const TMatrixT< Real_t > &W, Real_t weightDecay) |
|
|
For each initialization method, one function in the low-level interface is provided.
The naming scheme is
Initialize<Type>
for a given initialization method Type.
|
static void | InitializeGauss (TMatrixT< Real_t > &A) |
|
static void | InitializeUniform (TMatrixT< Real_t > &A) |
|
static void | InitializeIdentity (TMatrixT< Real_t > &A) |
|
static void | InitializeZero (TMatrixT< Real_t > &A) |
|
|
static void | Dropout (TMatrixT< Real_t > &A, Real_t dropoutProbability) |
| Apply dropout with activation probability p to the given matrix A and scale the result by reciprocal of p . More...
|
|