Generic Deep Neural Network class.
This classs encapsulates the information for all types of Deep Neural Networks.
Architecture | The Architecture type that holds the architecture-specific data types. |
Public Types | |
using | Matrix_t = typename Architecture_t::Matrix_t |
using | Scalar_t = typename Architecture_t::Scalar_t |
using | Tensor_t = typename Architecture_t::Tensor_t |
Public Member Functions | |
TDeepNet () | |
Default Constructor. | |
TDeepNet (const TDeepNet &) | |
Copy-constructor. | |
TDeepNet (size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t BatchDepth, size_t BatchHeight, size_t BatchWidth, ELossFunction fJ, EInitialization fI=EInitialization::kZero, ERegularization fR=ERegularization::kNone, Scalar_t fWeightDecay=0.0, bool isTraining=false) | |
Constructor. | |
~TDeepNet () | |
Destructor. | |
TBasicGRULayer< Architecture_t > * | AddBasicGRULayer (size_t stateSize, size_t inputSize, size_t timeSteps, bool rememberState=false, bool returnSequence=false, bool resetGateAfter=false) |
Function for adding GRU Layer in the Deep Neural Network, with given parameters. | |
void | AddBasicGRULayer (TBasicGRULayer< Architecture_t > *basicGRULayer) |
Function for adding GRU Layer in the Deep Neural Network, when the layer is already created. | |
TBasicLSTMLayer< Architecture_t > * | AddBasicLSTMLayer (size_t stateSize, size_t inputSize, size_t timeSteps, bool rememberState=false, bool returnSequence=false) |
Function for adding LSTM Layer in the Deep Neural Network, with given parameters. | |
void | AddBasicLSTMLayer (TBasicLSTMLayer< Architecture_t > *basicLSTMLayer) |
Function for adding LSTM Layer in the Deep Neural Network, when the layer is already created. | |
TBasicRNNLayer< Architecture_t > * | AddBasicRNNLayer (size_t stateSize, size_t inputSize, size_t timeSteps, bool rememberState=false, bool returnSequence=false, EActivationFunction f=EActivationFunction::kTanh) |
Function for adding Recurrent Layer in the Deep Neural Network, with given parameters. | |
void | AddBasicRNNLayer (TBasicRNNLayer< Architecture_t > *basicRNNLayer) |
Function for adding Vanilla RNN when the layer is already created. | |
TBatchNormLayer< Architecture_t > * | AddBatchNormLayer (Scalar_t momentum=-1, Scalar_t epsilon=0.0001) |
Function for adding a Batch Normalization layer with given parameters. | |
TConvLayer< Architecture_t > * | AddConvLayer (size_t depth, size_t filterHeight, size_t filterWidth, size_t strideRows, size_t strideCols, size_t paddingHeight, size_t paddingWidth, EActivationFunction f, Scalar_t dropoutProbability=1.0) |
Function for adding Convolution layer in the Deep Neural Network, with a given depth, filter height and width, striding in rows and columns, the zero paddings, as well as the activation function and the dropout probability. | |
void | AddConvLayer (TConvLayer< Architecture_t > *convLayer) |
Function for adding Convolution Layer in the Deep Neural Network, when the layer is already created. | |
TDenseLayer< Architecture_t > * | AddDenseLayer (size_t width, EActivationFunction f, Scalar_t dropoutProbability=1.0) |
Function for adding Dense Connected Layer in the Deep Neural Network, with a given width, activation function and dropout probability. | |
void | AddDenseLayer (TDenseLayer< Architecture_t > *denseLayer) |
Function for adding Dense Layer in the Deep Neural Network, when the layer is already created. | |
void | AddMaxPoolLayer (CNN::TMaxPoolLayer< Architecture_t > *maxPoolLayer) |
Function for adding Max Pooling layer in the Deep Neural Network, when the layer is already created. | |
TMaxPoolLayer< Architecture_t > * | AddMaxPoolLayer (size_t frameHeight, size_t frameWidth, size_t strideRows, size_t strideCols, Scalar_t dropoutProbability=1.0) |
Function for adding Pooling layer in the Deep Neural Network, with a given filter height and width, striding in rows and columns as well as the dropout probability. | |
TReshapeLayer< Architecture_t > * | AddReshapeLayer (size_t depth, size_t height, size_t width, bool flattening) |
Function for adding Reshape Layer in the Deep Neural Network, with a given height and width. | |
void | AddReshapeLayer (TReshapeLayer< Architecture_t > *reshapeLayer) |
Function for adding Reshape Layer in the Deep Neural Network, when the layer is already created. | |
void | Backward (const Tensor_t &input, const Matrix_t &groundTruth, const Matrix_t &weights) |
Function that executes the entire backward pass in the network. | |
void | Clear () |
Remove all layers from the network. | |
void | Forward (Tensor_t &input, bool applyDropout=false) |
Function that executes the entire forward pass in the network. | |
size_t | GetBatchDepth () const |
size_t | GetBatchHeight () const |
size_t | GetBatchSize () const |
Getters. | |
size_t | GetBatchWidth () const |
size_t | GetDepth () const |
EInitialization | GetInitialization () const |
size_t | GetInputDepth () const |
size_t | GetInputHeight () const |
size_t | GetInputWidth () const |
Layer_t * | GetLayerAt (size_t i) |
Get the layer in the vector of layers at poistion i. | |
const Layer_t * | GetLayerAt (size_t i) const |
std::vector< Layer_t * > & | GetLayers () |
const std::vector< Layer_t * > & | GetLayers () const |
ELossFunction | GetLossFunction () const |
size_t | GetOutputWidth () const |
ERegularization | GetRegularization () const |
Scalar_t | GetWeightDecay () const |
void | Initialize () |
DAE functions. | |
bool | IsTraining () const |
Scalar_t | Loss (const Matrix_t &groundTruth, const Matrix_t &weights, bool includeRegularization=true) const |
Function for evaluating the loss, based on the activations stored in the last layer. | |
Scalar_t | Loss (Tensor_t &input, const Matrix_t &groundTruth, const Matrix_t &weights, bool inTraining=false, bool includeRegularization=true) |
Function for evaluating the loss, based on the propagation of the given input. | |
void | Prediction (Matrix_t &predictions, EOutputFunction f) const |
Prediction based on activations stored in the last layer. | |
void | Prediction (Matrix_t &predictions, Tensor_t &input, EOutputFunction f) |
Prediction for the given inputs, based on what network learned. | |
void | Print () const |
Print the Deep Net Info. | |
Scalar_t | RegularizationTerm () const |
Function for computing the regularizaton term to be added to the loss function | |
void | ResetTraining () |
Function that reset some training flags after looping all the events but not the weights. | |
void | SetBatchDepth (size_t batchDepth) |
void | SetBatchHeight (size_t batchHeight) |
void | SetBatchSize (size_t batchSize) |
Setters. | |
void | SetBatchWidth (size_t batchWidth) |
void | SetDropoutProbabilities (const std::vector< Double_t > &probabilities) |
void | SetInitialization (EInitialization I) |
void | SetInputDepth (size_t inputDepth) |
void | SetInputHeight (size_t inputHeight) |
void | SetInputWidth (size_t inputWidth) |
void | SetLossFunction (ELossFunction J) |
void | SetRegularization (ERegularization R) |
void | SetWeightDecay (Scalar_t weightDecay) |
void | Update (Scalar_t learningRate) |
Function that will update the weights and biases in the layers that contain weights and biases. | |
Private Member Functions | |
size_t | calculateDimension (int imgDim, int fltDim, int padding, int stride) |
bool | isInteger (Scalar_t x) const |
Private Attributes | |
size_t | fBatchDepth |
The depth of the batch used for training/testing. | |
size_t | fBatchHeight |
The height of the batch used for training/testing. | |
size_t | fBatchSize |
Batch size used for training and evaluation. | |
size_t | fBatchWidth |
The width of the batch used for training/testing. | |
EInitialization | fI |
The initialization method of the network. | |
size_t | fInputDepth |
The depth of the input. | |
size_t | fInputHeight |
The height of the input. | |
size_t | fInputWidth |
The width of the input. | |
bool | fIsTraining |
Is the network training? | |
ELossFunction | fJ |
The loss function of the network. | |
std::vector< Layer_t * > | fLayers |
The layers consisting the DeepNet. | |
ERegularization | fR |
The regularization used for the network. | |
Scalar_t | fWeightDecay |
The weight decay factor. | |
#include <TMVA/DNN/DeepNet.h>
using TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::Matrix_t = typename Architecture_t::Matrix_t |
using TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::Scalar_t = typename Architecture_t::Scalar_t |
using TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::Tensor_t = typename Architecture_t::Tensor_t |
TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::TDeepNet |
TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::TDeepNet | ( | size_t | BatchSize, |
size_t | InputDepth, | ||
size_t | InputHeight, | ||
size_t | InputWidth, | ||
size_t | BatchDepth, | ||
size_t | BatchHeight, | ||
size_t | BatchWidth, | ||
ELossFunction | fJ, | ||
EInitialization | fI = EInitialization::kZero , |
||
ERegularization | fR = ERegularization::kNone , |
||
Scalar_t | fWeightDecay = 0.0 , |
||
bool | isTraining = false |
||
) |
TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::TDeepNet | ( | const TDeepNet< Architecture_t, Layer_t > & | deepNet | ) |
TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::~TDeepNet |
TBasicGRULayer< Architecture_t > * TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::AddBasicGRULayer | ( | size_t | stateSize, |
size_t | inputSize, | ||
size_t | timeSteps, | ||
bool | rememberState = false , |
||
bool | returnSequence = false , |
||
bool | resetGateAfter = false |
||
) |
void TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::AddBasicGRULayer | ( | TBasicGRULayer< Architecture_t > * | basicGRULayer | ) |
TBasicLSTMLayer< Architecture_t > * TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::AddBasicLSTMLayer | ( | size_t | stateSize, |
size_t | inputSize, | ||
size_t | timeSteps, | ||
bool | rememberState = false , |
||
bool | returnSequence = false |
||
) |
void TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::AddBasicLSTMLayer | ( | TBasicLSTMLayer< Architecture_t > * | basicLSTMLayer | ) |
TBasicRNNLayer< Architecture_t > * TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::AddBasicRNNLayer | ( | size_t | stateSize, |
size_t | inputSize, | ||
size_t | timeSteps, | ||
bool | rememberState = false , |
||
bool | returnSequence = false , |
||
EActivationFunction | f = EActivationFunction::kTanh |
||
) |
void TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::AddBasicRNNLayer | ( | TBasicRNNLayer< Architecture_t > * | basicRNNLayer | ) |
TBatchNormLayer< Architecture_t > * TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::AddBatchNormLayer | ( | Scalar_t | momentum = -1 , |
Scalar_t | epsilon = 0.0001 |
||
) |
TConvLayer< Architecture_t > * TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::AddConvLayer | ( | size_t | depth, |
size_t | filterHeight, | ||
size_t | filterWidth, | ||
size_t | strideRows, | ||
size_t | strideCols, | ||
size_t | paddingHeight, | ||
size_t | paddingWidth, | ||
EActivationFunction | f, | ||
Scalar_t | dropoutProbability = 1.0 |
||
) |
Function for adding Convolution layer in the Deep Neural Network, with a given depth, filter height and width, striding in rows and columns, the zero paddings, as well as the activation function and the dropout probability.
Based on these parameters, it calculates the width and height of the convolutional layer.
void TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::AddConvLayer | ( | TConvLayer< Architecture_t > * | convLayer | ) |
TDenseLayer< Architecture_t > * TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::AddDenseLayer | ( | size_t | width, |
EActivationFunction | f, | ||
Scalar_t | dropoutProbability = 1.0 |
||
) |
void TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::AddDenseLayer | ( | TDenseLayer< Architecture_t > * | denseLayer | ) |
void TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::AddMaxPoolLayer | ( | CNN::TMaxPoolLayer< Architecture_t > * | maxPoolLayer | ) |
TMaxPoolLayer< Architecture_t > * TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::AddMaxPoolLayer | ( | size_t | frameHeight, |
size_t | frameWidth, | ||
size_t | strideRows, | ||
size_t | strideCols, | ||
Scalar_t | dropoutProbability = 1.0 |
||
) |
Function for adding Pooling layer in the Deep Neural Network, with a given filter height and width, striding in rows and columns as well as the dropout probability.
The depth is same as the previous layer depth. Based on these parameters, it calculates the width and height of the pooling layer.
TReshapeLayer< Architecture_t > * TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::AddReshapeLayer | ( | size_t | depth, |
size_t | height, | ||
size_t | width, | ||
bool | flattening | ||
) |
void TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::AddReshapeLayer | ( | TReshapeLayer< Architecture_t > * | reshapeLayer | ) |
auto TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::Backward | ( | const Tensor_t & | input, |
const Matrix_t & | groundTruth, | ||
const Matrix_t & | weights | ||
) |
|
private |
|
inline |
auto TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::Forward | ( | Tensor_t & | input, |
bool | applyDropout = false |
||
) |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
auto TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::Initialize |
|
inlineprivate |
|
inline |
auto TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::Loss | ( | const Matrix_t & | groundTruth, |
const Matrix_t & | weights, | ||
bool | includeRegularization = true |
||
) | const |
auto TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::Prediction | ( | Matrix_t & | predictions, |
EOutputFunction | f | ||
) | const |
auto TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::Prediction | ( | Matrix_t & | predictions, |
Tensor_t & | input, | ||
EOutputFunction | f | ||
) |
auto TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::Print |
auto TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::RegularizationTerm |
auto TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::ResetTraining |
|
inline |
|
inline |
|
inline |
|
inline |
void TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::SetDropoutProbabilities | ( | const std::vector< Double_t > & | probabilities | ) |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
auto TMVA::DNN::TDeepNet< Architecture_t, Layer_t >::Update | ( | Scalar_t | learningRate | ) |
|
private |
|
private |
|
private |
|
private |
|
private |
|
private |
|
private |
|
private |
|
private |
|
private |
|
private |
|
private |
|
private |