18#ifndef TMVA_DNN_ARCHITECTURES_CPU
19#define TMVA_DNN_ARCHITECTURES_CPU
63template<
typename AReal = Float_t>
124 static void CreateWeightTensors( std::vector<Matrix_t> & newWeights,
const std::vector<Matrix_t> & weights) {
125 if (!newWeights.empty()) newWeights.clear();
126 size_t n = weights.size();
127 for (
size_t i = 0; i <
n; ++i)
128 newWeights.emplace_back( weights[i].GetNrows(), weights[i].GetNcols());
226 const Tensor_t & activationGradients,
228 const Tensor_t & activationBackward);
242 template<
typename AMatrix_t>
255 template<
typename ATensor_t>
257 const ATensor_t & B);
260 template<
typename AMatrix_t>
262 const std::vector<AMatrix_t> & B);
287 const double coef = 0.0,
const Scalar_t alpha = 1,
513 static size_t calculateDimension(
size_t imgDim,
size_t fltDim,
size_t padding,
size_t stride);
518 size_t fltWidth,
size_t strideRows,
size_t strideCols,
size_t zeroPaddingHeight,
519 size_t zeroPaddingWidth);
521 static void Im2colIndices(std::vector<int> &V,
const Matrix_t &B,
size_t nLocalViews,
size_t imgHeight,
522 size_t imgWidth,
size_t fltHeight,
size_t fltWidth,
size_t strideRows,
size_t strideCols,
523 size_t zeroPaddingHeight,
size_t zeroPaddingWidth);
529 size_t filterWidth,
size_t numFilters);
563 size_t inputHeight,
size_t inputWidth,
size_t depth,
size_t height,
size_t width,
564 size_t filterDepth,
size_t filterHeight,
size_t filterWidth,
size_t nLocalViews);
569 const Matrix_t &weights,
size_t batchSize,
size_t inputHeight,
570 size_t inputWidth,
size_t depth,
size_t height,
size_t width,
571 size_t filterDepth,
size_t filterHeight,
size_t filterWidth);
576 const Tensor_t &activations_backward,
size_t batchSize,
size_t inputHeight,
577 size_t inputWidth,
size_t depth,
size_t height,
size_t width,
578 size_t filterDepth,
size_t filterHeight,
size_t filterWidth,
600 size_t fltWidth,
size_t strideRows,
size_t strideCols);
614 size_t fltHeight,
size_t fltWidth,
size_t strideRows,
size_t strideCols,
710 bool resetGateAfter);
818template <
typename AReal>
819template <
typename AMatrix_t>
830template <
typename AReal>
831template <
typename ATensor_t>
838 for (
size_t i = 0; i < A.GetFirstSize(); ++i) {
852template <
typename AReal>
853template <
typename AMatrix_t>
856 for (
size_t i = 0; i < A.size(); ++i) {
857 CopyDiffArch(A[i], B[i]);
861template <
typename AReal>
864 std::cout <<
name <<
" size = " << A.
GetSize() <<
" shape = { ";
866 for (
size_t k = 0; k < shape.size()-1; ++k)
867 std::cout << shape[k] <<
" , ";
868 std::cout << shape.back() <<
" } ";
874 for (
size_t i = 0; i < A.
GetShape()[0]; ++i) {
877 if (truncate)
n = std::min(
n,
size_t(10));
878 for (
size_t j = 0; j <
n; ++j) {
879 std::cout << A(i,j) <<
" ";
881 if (truncate &&
n < A.
GetShape()[1]) std::cout <<
" ...... ";
882 std::cout <<
" } " << std::endl;
884 }
else if (A.
GetShape().size() == 3 ) {
887 for (
size_t j = 0; j < A.
GetHSize(); ++j) {
890 if (truncate)
n = std::min(
n,
size_t(10));
891 for (
size_t k = 0; k <
n; ++k) {
892 std::cout << A(i,j,k) <<
" ";
894 if (truncate &&
n < A.
GetWSize()) std::cout <<
" ...... ";
895 std::cout <<
" } " << std::endl;
897 std::cout <<
" } " << std::endl;
#define R__ASSERT(e)
Checks condition e and reports a fatal error if it's false.
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t height
Implementation of the CrossEntropy as separation criterion.
Generic Max Pooling Layer class.
Layer implementing Batch Normalization.
size_t GetBufferUseCount() const
size_t GetFirstSize() const
TCpuTensor< AFloat > At(size_t i)
The TCpu architecture class.
static void CalculateConvBiasGradients(Matrix_t &biasGradients, const Tensor_t &df, size_t batchSize, size_t depth, size_t nLocalViews)
Utility function for calculating the bias gradients of the convolutional layer.
static void Deflatten(Tensor_t &A, const Tensor_t &B)
Transforms each row of B to a matrix and stores it in the tensor B.
static void FastTanh(Tensor_t &B)
static void TransposeMultiply(Matrix_t &output, const Matrix_t &input, const Matrix_t &Weights, Scalar_t alpha=1.0, Scalar_t beta=0.)
Matrix multiplication of two matrices A and B^T (transposed) with the result being written into C.
static TRandom * fgRandomGen
static Tensor_t CreateTensor(DeviceBuffer_t buffer, size_t n, size_t c, size_t h, size_t w)
CNN::TCNNWorkspace< PoolingLayer_t > PoolingWorkspace_t
static Scalar_t L1Regularization(const Matrix_t &W)
static void MaxPoolLayerBackward(Tensor_t &activationGradientsBackward, const Tensor_t &activationGradients, const Tensor_t &indexMatrix, const Tensor_t &, const Tensor_t &, const PoolingDescriptors_t &, PoolingWorkspace_t &, size_t imgHeight, size_t imgWidth, size_t fltHeight, size_t fltWidth, size_t strideRows, size_t strideCols, size_t nLocalViews)
Perform the complete backward propagation step in a Pooling Layer.
static void ScaleAdd(Matrix_t &A, const Matrix_t &B, Scalar_t beta=1.0)
Adds a the elements in matrix B scaled by c to the elements in the matrix A.
static void AddL1RegularizationGradients(Matrix_t &A, const Matrix_t &W, Scalar_t weightDecay)
static void InitializeLSTMTensors(GenLayer_t *)
static void AddRowWise(Tensor_t &output, const Matrix_t &biases)
static void ConstAdd(Matrix_t &A, Scalar_t beta)
Add the constant beta to all the elements of matrix A and write the result into A.
DummyDescriptor TensorDescriptor_t
CNN::TCNNDescriptors< PoolingLayer_t > PoolingDescriptors_t
static void SumColumns(Matrix_t &B, const Matrix_t &A, Scalar_t alpha=1.0, Scalar_t beta=0.)
Sum columns of (m x n) matrix A and write the results into the first m elements in A.
static void Sigmoid(Tensor_t &B)
static void ConvLayerForward(Tensor_t &output, Tensor_t &inputActivationFunc, const Tensor_t &input, const Matrix_t &weights, const Matrix_t &biases, const DNN::CNN::TConvParams ¶ms, EActivationFunction activFunc, Tensor_t &, const ConvDescriptors_t &, ConvWorkspace_t &)
Forward propagation in the Convolutional layer.
static Tensor_t CreateTensor(DeviceBuffer_t buffer, size_t b, size_t t, size_t w)
static void DropoutBackward(Tensor_t &, TDescriptors *, TWorkspace *)
static Scalar_t Sum(const Matrix_t &A)
Compute the sum of all elements in A.
static void InitializeLSTMWorkspace(TWorkspace *&, TDescriptors *&, GenLayer_t *)
CNN::TCNNWorkspace< ConvLayer_t > ConvWorkspace_t
static void Sigmoid(Matrix_t &YHat, const Matrix_t &)
CNN::TCNNDescriptors< ConvLayer_t > ConvDescriptors_t
TCpuTensor< AReal > Tensor_t
static void SymmetricReluDerivative(Tensor_t &B, const Tensor_t &A)
static void InitializeBNormDescriptors(TDescriptors *&, BNormLayer_t *)
Initialize CNN data/operator descriptors.
static bool AlmostEquals(const Matrix_t &A, const Matrix_t &B, double epsilon=0.1)
Check two matrices for equality, taking floating point arithmetic errors into account.
static void Hadamard(Tensor_t &A, const Tensor_t &B)
In-place Hadamard (element-wise) product of matrices A and B with the result being written into A.
static void InitializeIdentity(Matrix_t &A)
static void ReleasePoolDescriptors(TDescriptors *&)
static void InitializePoolDropoutWorkspace(TWorkspace *&, TDescriptors *&, const DNN::CNN::TConvParams &, PoolingLayer_t *)
static void Im2colFast(Matrix_t &A, const Matrix_t &B, const std::vector< int > &V)
static void SqrtElementWise(Matrix_t &A)
Square root each element of the matrix A and write the result into A.
static void AddRowWise(Matrix_t &output, const Matrix_t &biases)
Add the vectors biases row-wise to the matrix output.
static void SoftmaxCrossEntropyGradients(Matrix_t &dY, const Matrix_t &Y, const Matrix_t &output, const Matrix_t &weights)
static void InitializeGRUDescriptors(TDescriptors *&, GenLayer_t *)
static void SymmetricRelu(Tensor_t &B)
static void PrintTensor(const Tensor_t &A, const std::string name="Cpu-tensor", bool truncate=false)
static TRandom & GetRandomGenerator()
static void MultiplyTranspose(Tensor_t &output, const Tensor_t &input, const Matrix_t &weights)
static void DropoutForward(Tensor_t &A, TDescriptors *descriptors, TWorkspace *workspace, Scalar_t p)
Apply dropout with activation probability p to the given tensor A and scale the result by reciprocal ...
static void FreePoolDropoutWorkspace(TWorkspace *&)
static Tensor_t CreateTensor(size_t b, size_t t, size_t w)
static void Softmax(Matrix_t &YHat, const Matrix_t &)
static void CalculateConvActivationGradients(Tensor_t &activationGradientsBackward, const Tensor_t &df, const Matrix_t &weights, size_t batchSize, size_t inputHeight, size_t inputWidth, size_t depth, size_t height, size_t width, size_t filterDepth, size_t filterHeight, size_t filterWidth)
Utility function for calculating the activation gradients of the layer before the convolutional layer...
static void TanhDerivative(Tensor_t &B, const Tensor_t &A)
static void InitializeGRUWorkspace(TWorkspace *&, TDescriptors *&, GenLayer_t *)
static void BatchNormLayerForwardTraining(int axis, const Tensor_t &x, Tensor_t &y, Matrix_t &gamma, Matrix_t &beta, Matrix_t &mean, Matrix_t &, Matrix_t &iVariance, Matrix_t &runningMeans, Matrix_t &runningVars, Scalar_t nTrainedBatches, Scalar_t momentum, Scalar_t epsilon, const TensorDescriptor_t &bnParDescriptor)
The input from each batch are normalized during training to have zero mean and unit variance and they...
static void Multiply(Matrix_t &C, const Matrix_t &A, const Matrix_t &B)
Standard multiplication of two matrices A and B with the result being written into C.
DummyDescriptor ActivationDescriptor_t
static void Backward(Tensor_t &activationGradientsBackward, Matrix_t &weightGradients, Matrix_t &biasGradients, const Tensor_t &df, const Tensor_t &activationGradients, const Matrix_t &weights, const Tensor_t &activationBackward)
Perform the complete backward propagation step.
static void InitializeUniform(Matrix_t &A)
static void ActivationFunctionForward(Tensor_t &X, EActivationFunction activFunct, const ActivationDescriptor_t activationDescr, const double coef=0.0, const Scalar_t alpha=1, const Scalar_t beta=0)
static void SoftSignDerivative(Tensor_t &B, const Tensor_t &A)
static void AdamUpdateSecondMom(Matrix_t &A, const Matrix_t &B, Scalar_t beta)
static void Copy(Matrix_t &B, const Matrix_t &A)
static void ReleaseBNormDescriptors(TDescriptors *&)
static void SetRandomSeed(size_t seed)
static void FreeConvWorkspace(TWorkspace *&)
Only used for certain cudnn on-device memory.
static Matrix_t & LSTMLayerBackward(TCpuMatrix< Scalar_t > &state_gradients_backward, TCpuMatrix< Scalar_t > &cell_gradients_backward, TCpuMatrix< Scalar_t > &input_weight_gradients, TCpuMatrix< Scalar_t > &forget_weight_gradients, TCpuMatrix< Scalar_t > &candidate_weight_gradients, TCpuMatrix< Scalar_t > &output_weight_gradients, TCpuMatrix< Scalar_t > &input_state_weight_gradients, TCpuMatrix< Scalar_t > &forget_state_weight_gradients, TCpuMatrix< Scalar_t > &candidate_state_weight_gradients, TCpuMatrix< Scalar_t > &output_state_weight_gradients, TCpuMatrix< Scalar_t > &input_bias_gradients, TCpuMatrix< Scalar_t > &forget_bias_gradients, TCpuMatrix< Scalar_t > &candidate_bias_gradients, TCpuMatrix< Scalar_t > &output_bias_gradients, TCpuMatrix< Scalar_t > &di, TCpuMatrix< Scalar_t > &df, TCpuMatrix< Scalar_t > &dc, TCpuMatrix< Scalar_t > &dout, const TCpuMatrix< Scalar_t > &precStateActivations, const TCpuMatrix< Scalar_t > &precCellActivations, const TCpuMatrix< Scalar_t > &fInput, const TCpuMatrix< Scalar_t > &fForget, const TCpuMatrix< Scalar_t > &fCandidate, const TCpuMatrix< Scalar_t > &fOutput, const TCpuMatrix< Scalar_t > &weights_input, const TCpuMatrix< Scalar_t > &weights_forget, const TCpuMatrix< Scalar_t > &weights_candidate, const TCpuMatrix< Scalar_t > &weights_output, const TCpuMatrix< Scalar_t > &weights_input_state, const TCpuMatrix< Scalar_t > &weights_forget_state, const TCpuMatrix< Scalar_t > &weights_candidate_state, const TCpuMatrix< Scalar_t > &weights_output_state, const TCpuMatrix< Scalar_t > &input, TCpuMatrix< Scalar_t > &input_gradient, TCpuMatrix< Scalar_t > &cell_gradient, TCpuMatrix< Scalar_t > &cell_tanh)
Backward pass for LSTM Network.
static Scalar_t L2Regularization(const Matrix_t &W)
static void CreateWeightTensors(std::vector< Matrix_t > &newWeights, const std::vector< Matrix_t > &weights)
static void AddL2RegularizationGradients(Matrix_t &A, const Matrix_t &W, Scalar_t weightDecay)
static void InitializeGauss(Matrix_t &A)
static void Reshape(Matrix_t &A, const Matrix_t &B)
Transform the matrix B to a matrix with different dimensions A.
static void IdentityDerivative(Tensor_t &B, const Tensor_t &A)
static Matrix_t & RecurrentLayerBackward(Matrix_t &state_gradients_backward, Matrix_t &input_weight_gradients, Matrix_t &state_weight_gradients, Matrix_t &bias_gradients, Matrix_t &df, const Matrix_t &state, const Matrix_t &weights_input, const Matrix_t &weights_state, const Matrix_t &input, Matrix_t &input_gradient)
Backward pass for Recurrent Networks.
static void Rearrange(Tensor_t &out, const Tensor_t &in)
Rearrage data according to time fill B x T x D out with T x B x D matrix in.
static void MultiplyTranspose(Matrix_t &output, const Matrix_t &input, const Matrix_t &weights)
Matrix-multiply input with the transpose of weights and write the results into output.
static void CrossEntropyGradients(Matrix_t &dY, const Matrix_t &Y, const Matrix_t &output, const Matrix_t &weights)
static void InitializeGRUTensors(GenLayer_t *)
static void InitializeRNNDescriptors(TDescriptors *&, GenLayer_t *)
static Matrix_t & GRULayerBackward(TCpuMatrix< Scalar_t > &state_gradients_backward, TCpuMatrix< Scalar_t > &reset_weight_gradients, TCpuMatrix< Scalar_t > &update_weight_gradients, TCpuMatrix< Scalar_t > &candidate_weight_gradients, TCpuMatrix< Scalar_t > &reset_state_weight_gradients, TCpuMatrix< Scalar_t > &update_state_weight_gradients, TCpuMatrix< Scalar_t > &candidate_state_weight_gradients, TCpuMatrix< Scalar_t > &reset_bias_gradients, TCpuMatrix< Scalar_t > &update_bias_gradients, TCpuMatrix< Scalar_t > &candidate_bias_gradients, TCpuMatrix< Scalar_t > &dr, TCpuMatrix< Scalar_t > &du, TCpuMatrix< Scalar_t > &dc, const TCpuMatrix< Scalar_t > &precStateActivations, const TCpuMatrix< Scalar_t > &fReset, const TCpuMatrix< Scalar_t > &fUpdate, const TCpuMatrix< Scalar_t > &fCandidate, const TCpuMatrix< Scalar_t > &weights_reset, const TCpuMatrix< Scalar_t > &weights_update, const TCpuMatrix< Scalar_t > &weights_candidate, const TCpuMatrix< Scalar_t > &weights_reset_state, const TCpuMatrix< Scalar_t > &weights_update_state, const TCpuMatrix< Scalar_t > &weights_candidate_state, const TCpuMatrix< Scalar_t > &input, TCpuMatrix< Scalar_t > &input_gradient, bool resetGateAfter)
Backward pass for GRU Network.
static void RNNBackward(const Tensor_t &, const Matrix_t &, const Matrix_t &, const Tensor_t &, const Tensor_t &, const Matrix_t &, const Matrix_t &, const Tensor_t &, Tensor_t &, Matrix_t &, Matrix_t &, Tensor_t &, const RNNDescriptors_t &, RNNWorkspace_t &)
static void CalculateConvWeightGradients(Matrix_t &weightGradients, const Tensor_t &df, const Tensor_t &activations_backward, size_t batchSize, size_t inputHeight, size_t inputWidth, size_t depth, size_t height, size_t width, size_t filterDepth, size_t filterHeight, size_t filterWidth, size_t nLocalViews)
Utility function for calculating the weight gradients of the convolutional layer.
static size_t calculateDimension(size_t imgDim, size_t fltDim, size_t padding, size_t stride)
Calculate how many neurons "fit" in the output layer, given the input as well as the layer's hyperpar...
static void BatchNormLayerBackward(int axis, const Tensor_t &x, const Tensor_t &dy, Tensor_t &dx, Matrix_t &gamma, Matrix_t &dgamma, Matrix_t &dbeta, const Matrix_t &mean, const Matrix_t &variance, const Matrix_t &iVariance, Scalar_t epsilon, const TensorDescriptor_t &)
static void InitializeConvWorkspace(TWorkspace *&, TDescriptors *&, const DNN::CNN::TConvParams &, ConvLayer_t *)
static void ConvLayerBackward(Tensor_t &activationGradientsBackward, Matrix_t &weightGradients, Matrix_t &biasGradients, Tensor_t &df, Tensor_t &activationGradients, const Matrix_t &weights, const Tensor_t &activationBackward, const Tensor_t &outputTensor, EActivationFunction activFunc, const ConvDescriptors_t &, ConvWorkspace_t &, size_t batchSize, size_t inputHeight, size_t inputWidth, size_t depth, size_t height, size_t width, size_t filterDepth, size_t filterHeight, size_t filterWidth, size_t nLocalViews)
Perform the complete backward propagation step in a Convolutional Layer.
static void InitializePoolDescriptors(TDescriptors *&, PoolingLayer_t *)
static void InitializeZero(Matrix_t &A)
static Tensor_t BatchNormLayerReshapeTensor(int axis, const Tensor_t &x)
static void PrepareInternals(Tensor_t &)
Dummy placeholder - preparation is currently only required for the CUDA architecture.
static void MeanSquaredErrorGradients(Matrix_t &dY, const Matrix_t &Y, const Matrix_t &output, const Matrix_t &weights)
static Scalar_t MeanSquaredError(const Matrix_t &Y, const Matrix_t &output, const Matrix_t &weights)
static void InitializeGlorotUniform(Matrix_t &A)
Sample from a uniform distribution in range [ -lim,+lim] where lim = sqrt(6/N_in+N_out).
static void Relu(Tensor_t &B)
static void ActivationFunctionBackward(Tensor_t &dX, const Tensor_t &Y, const Tensor_t &dY, const Tensor_t &X, EActivationFunction activFunct, const ActivationDescriptor_t activationDescr, const Scalar_t alpha=1, const Scalar_t beta=0)
Computes the gradient of the activation function.
static void SquareElementWise(Matrix_t &A)
Square each element of the matrix A and write the result into A.
static void Im2colIndices(std::vector< int > &V, const Matrix_t &B, size_t nLocalViews, size_t imgHeight, size_t imgWidth, size_t fltHeight, size_t fltWidth, size_t strideRows, size_t strideCols, size_t zeroPaddingHeight, size_t zeroPaddingWidth)
static void Flatten(Tensor_t &A, const Tensor_t &B)
Flattens the tensor B, such that each matrix, is stretched in one row, resulting with a matrix A.
static void AddConvBiases(Matrix_t &output, const Matrix_t &biases)
Add the biases in the Convolutional Layer.
static void InitializeRNNTensors(GenLayer_t *)
static void Im2col(Matrix_t &A, const Matrix_t &B, size_t imgHeight, size_t imgWidth, size_t fltHeight, size_t fltWidth, size_t strideRows, size_t strideCols, size_t zeroPaddingHeight, size_t zeroPaddingWidth)
Transform the matrix B in local view format, suitable for convolution, and store it in matrix A.
static void ReleaseConvDescriptors(TDescriptors *&)
Release CNN data/operator descriptors.
static void InitializeRNNWorkspace(TWorkspace *&, TDescriptors *&, GenLayer_t *)
static Tensor_t CreateTensor(size_t n, size_t c, size_t h, size_t w)
static void InitializeGlorotNormal(Matrix_t &A)
Truncated normal initialization (Glorot, called also Xavier normal) The values are sample with a norm...
static void InitializeLSTMDescriptors(TDescriptors *&, GenLayer_t *)
static void FreeRNNWorkspace(TWorkspace *&)
static void GaussDerivative(Tensor_t &B, const Tensor_t &A)
static void BatchNormLayerForwardInference(int axis, const Tensor_t &x, Matrix_t &gamma, Matrix_t &beta, Tensor_t &y, const Matrix_t &runningMeans, const Matrix_t &runningVars, Scalar_t epsilon, const TensorDescriptor_t &)
During inference the inputs are not normalized using the batch mean but the previously computed at ru...
static void AdamUpdateFirstMom(Matrix_t &A, const Matrix_t &B, Scalar_t beta)
static void DropoutForward(Matrix_t &A, Scalar_t p)
static void Downsample(Tensor_t &A, Tensor_t &B, const Tensor_t &C, const PoolingDescriptors_t &, PoolingWorkspace_t &, size_t imgHeight, size_t imgWidth, size_t fltHeight, size_t fltWidth, size_t strideRows, size_t strideCols)
Downsample the matrix C to the matrix A, using max operation, such that the winning indices are store...
static void InitializeConvDescriptors(TDescriptors *&, ConvLayer_t *)
static void ConstMult(Matrix_t &A, Scalar_t beta)
Multiply the constant beta to all the elements of matrix A and write the result into A.
static void SigmoidDerivative(Tensor_t &B, const Tensor_t &A)
static void RNNForward(const Tensor_t &, const Matrix_t &, const Matrix_t &, const Tensor_t &, Tensor_t &, Matrix_t &, Matrix_t &, const RNNDescriptors_t &, RNNWorkspace_t &, bool)
static void CopyDiffArch(Matrix_t &B, const AMatrix_t &A)
static Scalar_t SoftmaxCrossEntropy(const Matrix_t &Y, const Matrix_t &output, const Matrix_t &weights)
Softmax transformation is implicitly applied, thus output should hold the linear activations of the l...
static void InitializeZero(Tensor_t &A)
static void FastTanhDerivative(Tensor_t &B, const Tensor_t &A)
static void InitializeActivationDescriptor(ActivationDescriptor_t &, EActivationFunction, double=0.0)
TCpuMatrix< AReal > Matrix_t
static void ReleaseDescriptor(ActivationDescriptor_t &)
static void ReleaseRNNDescriptors(TDescriptors *&)
static TMVA::Experimental::MemoryLayout GetTensorLayout()
static void RotateWeights(Matrix_t &A, const Matrix_t &B, size_t filterDepth, size_t filterHeight, size_t filterWidth, size_t numFilters)
Rotates the matrix B, which is representing a weights, and stores them in the matrix A.
static void ReluDerivative(Tensor_t &B, const Tensor_t &A)
static void ReciprocalElementWise(Matrix_t &A)
Reciprocal each element of the matrix A and write the result into A.
static void AdamUpdate(Matrix_t &A, const Matrix_t &M, const Matrix_t &V, Scalar_t alpha, Scalar_t eps)
Adam updates.
Generic General Layer class.
std::size_t GetSize() const
const Shape_t & GetShape() const
This is the base class for the ROOT Random number generators.
std::shared_ptr< std::function< double(double)> > Tanh
double weightDecay(double error, ItWeight itWeight, ItWeight itWeightEnd, double factorWeightDecay, EnumRegularization eRegularization)
compute the weight decay for regularization (L1 or L2)
EActivationFunction
Enum that represents layer activation functions.
std::shared_ptr< std::function< double(double)> > Gauss
std::shared_ptr< std::function< double(double)> > SoftSign
MemoryLayout
Memory layout type (copy from RTensor.hxx)
create variable transformations