Logo ROOT  
Reference Guide
ConvLayer.h
Go to the documentation of this file.
1// @(#)root/tmva/tmva/dnn:$Id$
2// Author: Vladimir Ilievski
3
4/**********************************************************************************
5 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6 * Package: TMVA *
7 * Class : TConvLayer *
8 * Web : http://tmva.sourceforge.net *
9 * *
10 * Description: *
11 * Convolutional Deep Neural Network Layer *
12 * *
13 * Authors (alphabetical): *
14 * Vladimir Ilievski <ilievski.vladimir@live.com> - CERN, Switzerland *
15 * *
16 * Copyright (c) 2005-2015: *
17 * CERN, Switzerland *
18 * U. of Victoria, Canada *
19 * MPI-K Heidelberg, Germany *
20 * U. of Bonn, Germany *
21 * *
22 * Redistribution and use in source and binary forms, with or without *
23 * modification, are permitted according to the terms listed in LICENSE *
24 * (http://tmva.sourceforge.net/LICENSE) *
25 **********************************************************************************/
26
27#ifndef TMVA_CNN_CONVLAYER
28#define TMVA_CNN_CONVLAYER
29
30#include "TMatrix.h"
31
33#include "TMVA/DNN/Functions.h"
35
36#include <vector>
37#include <iostream>
38
39namespace TMVA {
40namespace DNN {
41namespace CNN {
42
43typedef struct TConvParams {
44
45public:
46 size_t batchSize; ///< Batch size used for training and evaluation
47
48 size_t inputDepth; ///< The depth of the previous layer or input.
49 size_t inputHeight; ///< The height of the previous layer or input.
50 size_t inputWidth; ///< The width of the previous layer or input.
51
52 size_t numberFilters; ///< The number of the filters, which is equal to the output's depth.
53 size_t filterHeight; ///< The height of the filter.
54 size_t filterWidth; ///< The width of the filter.
55
56 size_t strideRows; ///< The number of row pixels to slid the filter each step.
57 size_t strideCols; ///< The number of column pixels to slid the filter each step.
58 size_t paddingHeight; ///< The number of zero layers added top and bottom of the input.
59 size_t paddingWidth; ///< The number of zero layers left and right of the input.
60
61 TConvParams(size_t _batchSize, size_t _inputDepth, size_t _inputHeight, size_t _inputWidth, size_t _numberFilters,
62 size_t _filterHeight, size_t _filterWidth, size_t _strideRows, size_t _strideCols,
63 size_t _paddingHeight, size_t _paddingWidth)
64 : batchSize(_batchSize), inputDepth(_inputDepth), inputHeight(_inputHeight), inputWidth(_inputWidth),
65 numberFilters(_numberFilters), filterHeight(_filterHeight), filterWidth(_filterWidth),
66 strideRows(_strideRows), strideCols(_strideCols), paddingHeight(_paddingHeight),
67 paddingWidth(_paddingWidth)
68 {}
70
71
72
73template <typename Architecture_t>
74class TConvLayer : public VGeneralLayer<Architecture_t> {
75public:
76 using Tensor_t = typename Architecture_t::Tensor_t;
77 using Matrix_t = typename Architecture_t::Matrix_t;
78 using Scalar_t = typename Architecture_t::Scalar_t;
79
80 using LayerDescriptor_t = typename Architecture_t::ConvolutionDescriptor_t;
81 using WeightsDescriptor_t = typename Architecture_t::FilterDescriptor_t;
82 using HelperDescriptor_t = typename Architecture_t::ActivationDescriptor_t;
83
84 using AlgorithmForward_t = typename Architecture_t::AlgorithmForward_t; // Forward layer operation
85 using AlgorithmBackward_t = typename Architecture_t::AlgorithmBackward_t; // Backward layer operation
86 using AlgorithmHelper_t = typename Architecture_t::AlgorithmHelper_t; // Used for weight grad backward pass
87 using ReduceTensorDescriptor_t = typename Architecture_t::ReduceTensorDescriptor_t; // used for reduction of tensor(bias grad)
88
89 // FIXME: Add other cudnn types (algorithm preference etc.)
90 using AlgorithmDataType_t = typename Architecture_t::AlgorithmDataType_t;
91
92 /* Calculate the output dimension of the convolutional layer */
93 static size_t calculateDimension(size_t imgDim, size_t fltDim, size_t padding, size_t stride);
94
95 /* Calculate the number of pixels in a single receptive field */
96 static size_t inline calculateNLocalViewPixels(size_t depth, size_t height, size_t width) { return depth * height * width; }
97
98 /* Calculate the number of receptive fields in an image given the filter and image sizes */
99 static size_t calculateNLocalViews(size_t inputHeight, size_t filterHeight, size_t paddingHeight, size_t strideRows,
100 size_t inputWidth, size_t filterWidth, size_t paddingWidth, size_t strideCols);
101
102protected:
103 size_t fFilterDepth; ///< The depth of the filter.
104 size_t fFilterHeight; ///< The height of the filter.
105 size_t fFilterWidth; ///< The width of the filter.
106
107 size_t fStrideRows; ///< The number of row pixels to slid the filter each step.
108 size_t fStrideCols; ///< The number of column pixels to slid the filter each step.
109
110 size_t fNLocalViewPixels; ///< The number of pixels in one local image view.
111 size_t fNLocalViews; ///< The number of local views in one image.
112
113 Scalar_t fDropoutProbability; ///< Probability that an input is active.
114
115 TDescriptors * fDescriptors = nullptr; ///< Keeps the convolution, activations and filter descriptors
116
118private:
119 size_t fPaddingHeight; ///< The number of zero layers added top and bottom of the input.
120 size_t fPaddingWidth; ///< The number of zero layers left and right of the input.
121
122 Tensor_t fInputActivation; ///< First output of this layer after conv, before activation.
123
124 std::vector<int> fBackwardIndices; ///< Vector of indices used for a fast Im2Col in backward pass
125
126 EActivationFunction fF; ///< Activation function of the layer.
127 ERegularization fReg; ///< The regularization method.
128 Scalar_t fWeightDecay; ///< The weight decay.
129
130 Tensor_t fForwardTensor; ///< Cache tensor used for speeding-up the forward pass.
131
133 void ReleaseDescriptors();
134 void InitializeWorkspace();
135 void FreeWorkspace();
136
137public:
138 /*! Constructor. */
139 TConvLayer(size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t Depth, EInitialization Init,
140 size_t FilterHeight, size_t FilterWidth, size_t StrideRows, size_t StrideCols, size_t PaddingHeight,
141 size_t PaddingWidth, Scalar_t DropoutProbability, EActivationFunction f, ERegularization Reg,
142 Scalar_t WeightDecay);
143
144 /*! Copy the conv layer provided as a pointer */
146
147 /*! Copy constructor. */
148 TConvLayer(const TConvLayer &);
149
150 /*! Destructor. */
151 virtual ~TConvLayer();
152
153 //virtual void Initialize();
154
155 /*! Computes activation of the layer for the given input. The input
156 * must be in 3D tensor form with the different matrices corresponding to
157 * different events in the batch. Computes activations as well as
158 * the first partial derivative of the activation function at those
159 * activations. */
160 void Forward(Tensor_t &input, bool applyDropout = false);
161
162 /*! Compute weight, bias and activation gradients. Uses the precomputed
163 * first partial derviatives of the activation function computed during
164 * forward propagation and modifies them. Must only be called directly
165 * at the corresponding call to Forward(...). */
166 void Backward(Tensor_t &gradients_backward, const Tensor_t &activations_backward);
167 //// Tensor_t &inp1, Tensor_t &inp2);
168
169 /*! Prints the info about the layer. */
170 void Print() const;
171
172 /*! Writes the information and the weights about the layer in an XML node. */
173 virtual void AddWeightsXMLTo(void *parent);
174
175 /*! Read the information and the weights about the layer from XML node. */
176 virtual void ReadWeightsFromXML(void *parent);
177
178 /*! Getters */
179 size_t GetFilterDepth() const { return fFilterDepth; }
180 size_t GetFilterHeight() const { return fFilterHeight; }
181 size_t GetFilterWidth() const { return fFilterWidth; }
182
183 size_t GetStrideRows() const { return fStrideRows; }
184 size_t GetStrideCols() const { return fStrideCols; }
185
186 size_t GetPaddingHeight() const { return fPaddingHeight; }
187 size_t GetPaddingWidth() const { return fPaddingWidth; }
188
189 size_t GetNLocalViewPixels() const { return fNLocalViewPixels; }
190 size_t GetNLocalViews() const { return fNLocalViews; }
191
193
196
198 const Matrix_t &GetInputActivationAt(size_t i) const { return fInputActivation[i]; }
199
200 const Tensor_t &GetForwardMatrices() const { return fForwardTensor; }
202
206
207 // The following getters are used for testing
209 const TDescriptors * GetDescriptors() const {return fDescriptors;}
210
212 const TWorkspace * GetWorkspace() const {return fWorkspace;}
213};
214
215
216//
217//
218// Conv Layer Class - Implementation
219//______________________________________________________________________________
220template <typename Architecture_t>
221TConvLayer<Architecture_t>::TConvLayer(size_t batchSize, size_t inputDepth, size_t inputHeight, size_t inputWidth,
222 size_t depth, EInitialization init, size_t filterHeight, size_t filterWidth,
223 size_t strideRows, size_t strideCols, size_t paddingHeight, size_t paddingWidth,
224 Scalar_t dropoutProbability, EActivationFunction f, ERegularization reg,
226 : VGeneralLayer<Architecture_t>(batchSize, inputDepth, inputHeight, inputWidth, depth,
227 calculateDimension(inputHeight, filterHeight, paddingHeight, strideRows),
228 calculateDimension(inputWidth, filterWidth, paddingWidth, strideCols),
229 1, depth, calculateNLocalViewPixels(inputDepth, filterHeight, filterWidth),
230 1, depth, 1, batchSize, depth,
231 calculateNLocalViews(inputHeight, filterHeight, paddingHeight, strideRows,
232 inputWidth, filterWidth, paddingWidth, strideCols),
233 init),
234 fFilterDepth(inputDepth), fFilterHeight(filterHeight), fFilterWidth(filterWidth), fStrideRows(strideRows),
235 fStrideCols(strideCols), fNLocalViewPixels(calculateNLocalViewPixels(inputDepth, filterHeight, filterWidth)),
236 fNLocalViews(calculateNLocalViews(inputHeight, filterHeight, paddingHeight, strideRows,
237 inputWidth, filterWidth, paddingWidth, strideCols)),
238 fDropoutProbability(dropoutProbability), fPaddingHeight(paddingHeight), fPaddingWidth(paddingWidth),
239 fInputActivation(), fF(f), fReg(reg), fWeightDecay(weightDecay)
240{
241 /** Each element in the vector is a `T_Matrix` representing an event, therefore `vec.size() == batchSize`.
242 * Cells in these matrices are distributed in the following manner:
243 * Each row represents a single feature map, therefore we have `nRows == depth`.
244 * Each column represents a single pixel in that feature map, therefore we have `nCols == nLocalViews`.
245 **/
246 fInputActivation = Tensor_t( batchSize, depth, fNLocalViews); // create tensor (shape is B x C x LV)
248
249
252}
253
254//______________________________________________________________________________
255template <typename Architecture_t>
257 : VGeneralLayer<Architecture_t>(layer), fFilterDepth(layer->GetFilterDepth()),
258 fFilterHeight(layer->GetFilterHeight()), fFilterWidth(layer->GetFilterWidth()),
259 fStrideRows(layer->GetStrideRows()), fStrideCols(layer->GetStrideCols()),
260 fNLocalViewPixels(layer->GetNLocalViewPixels()), fNLocalViews(layer->GetNLocalViews()),
261 fDropoutProbability(layer->GetDropoutProbability()), fPaddingHeight(layer->GetPaddingHeight()),
262 fPaddingWidth(layer->GetPaddingWidth()),
263 fInputActivation( layer->GetInputActivation().GetShape() ),
264 fF(layer->GetActivationFunction()),
265 fReg(layer->GetRegularization()), fWeightDecay(layer->GetWeightDecay()),
266 fForwardTensor( layer->GetForwardMatrices().GetShape() )
267{
270
271}
272
273//______________________________________________________________________________
274template <typename Architecture_t>
276 : VGeneralLayer<Architecture_t>(convLayer), fFilterDepth(convLayer.fFilterDepth),
277 fFilterHeight(convLayer.fFilterHeight), fFilterWidth(convLayer.fFilterWidth), fStrideRows(convLayer.fStrideRows),
278 fStrideCols(convLayer.fStrideCols), fNLocalViewPixels(convLayer.fNLocalViewPixels),
279 fNLocalViews(convLayer.fNLocalViews), fDropoutProbability(convLayer.fDropoutProbability),
280 fPaddingHeight(convLayer.fPaddingHeight), fPaddingWidth(convLayer.fPaddingWidth),
281 fInputActivation( convLayer.GetInputActivation().GetShape() ),
282 fF(convLayer.fF),
283 fReg(convLayer.fReg), fWeightDecay(convLayer.fWeightDecay),
284 fForwardTensor( convLayer.GetForwardMatrices().GetShape() )
285{
288}
289
290//______________________________________________________________________________
291//FIXME: Add function for cudaFree
292template <typename Architecture_t>
294{
295 //std::cout << "!!!!Delete conv layer " << this->GetOutput().GetShape()[1] << " " << this->GetOutput().GetShape()[2] << " " << this->GetOutput().GetShape()[3] << std::endl;
296 if (fDescriptors) {
297 ReleaseDescriptors();
298 delete fDescriptors;
299 }
300
301 if (fWorkspace) {
302 FreeWorkspace();
303 delete fWorkspace;
304 }
305}
306
307
308//______________________________________________________________________________
309template <typename Architecture_t>
310auto TConvLayer<Architecture_t>::Forward(Tensor_t &input, bool /*applyDropout*/) -> void
311{
312 TConvParams params(this->GetBatchSize(), this->GetInputDepth(), this->GetInputHeight(), this->GetInputWidth(),
313 this->GetDepth(), this->GetFilterHeight(), this->GetFilterWidth(),
314 this->GetStrideRows(), this->GetStrideCols(), this->GetPaddingHeight(), this->GetPaddingWidth());
315
316 //R__ASSERT( input.size() > 0);
317 Architecture_t::ConvLayerForward(this->GetOutput(), this->GetInputActivation(), input, this->GetWeightsAt(0),
318 this->GetBiasesAt(0), params, this->GetActivationFunction(),
319 this->GetForwardMatrices(), (TCNNDescriptors<TConvLayer<Architecture_t>> &) (*fDescriptors),
320 (TCNNWorkspace<TConvLayer<Architecture_t>> &) (*fWorkspace));
321}
322
323//______________________________________________________________________________
324template <typename Architecture_t>
326 const Tensor_t &activations_backward) -> void
327// Tensor_t & /*inp1*/, Tensor_t &
328// /*inp2*/) -> void
329{
330 Architecture_t::ConvLayerBackward(
331 gradients_backward, this->GetWeightGradientsAt(0), this->GetBiasGradientsAt(0), this->GetInputActivation(),
332 this->GetActivationGradients(), this->GetWeightsAt(0), activations_backward, this->GetOutput(),
333 this->GetActivationFunction(),
334 (TCNNDescriptors<TConvLayer<Architecture_t>> &) (*fDescriptors),
335 (TCNNWorkspace<TConvLayer<Architecture_t>> &) (*fWorkspace),
336 this->GetBatchSize(), this->GetInputHeight(), this->GetInputWidth(), this->GetDepth(),
337 this->GetHeight(), this->GetWidth(), this->GetFilterDepth(), this->GetFilterHeight(),
338 this->GetFilterWidth(), this->GetNLocalViews());
339
340 addRegularizationGradients<Architecture_t>(this->GetWeightGradientsAt(0), this->GetWeightsAt(0),
341 this->GetWeightDecay(), this->GetRegularization());
342}
343
344//______________________________________________________________________________
345template <typename Architecture_t>
347{
348 std::cout << " CONV LAYER: \t";
349 std::cout << "( W = " << this->GetWidth() << " , ";
350 std::cout << " H = " << this->GetHeight() << " , ";
351 std::cout << " D = " << this->GetDepth() << " ) ";
352
353 std::cout << "\t Filter ( W = " << this->GetFilterWidth() << " , ";
354 std::cout << " H = " << this->GetFilterHeight() << " ) ";
355 //std::cout << "\t Local Views = " << this->GetNLocalViews() << " " ;
356 if (this->GetOutput().GetSize() > 0) {
357 std::cout << "\tOutput = ( " << this->GetOutput().GetFirstSize() << " , "
358 << this->GetOutput().GetCSize() << " , " << this->GetOutput().GetHSize() << " , " << this->GetOutput().GetWSize()
359 << " ) ";
360 }
361 std::vector<std::string> activationNames = { "Identity","Relu","Sigmoid","Tanh","SymmRelu","SoftSign","Gauss" };
362 std::cout << "\t Activation Function = ";
363 std::cout << activationNames[ static_cast<int>(fF) ] << std::endl;
364}
365
366//______________________________________________________________________________
367template <typename Architecture_t>
369{
370 auto layerxml = gTools().xmlengine().NewChild(parent, 0, "ConvLayer");
371
372 gTools().xmlengine().NewAttr(layerxml, 0, "Depth", gTools().StringFromInt(this->GetDepth()));
373 gTools().xmlengine().NewAttr(layerxml, 0, "FilterHeight", gTools().StringFromInt(this->GetFilterHeight()));
374 gTools().xmlengine().NewAttr(layerxml, 0, "FilterWidth", gTools().StringFromInt(this->GetFilterWidth()));
375 gTools().xmlengine().NewAttr(layerxml, 0, "StrideRows", gTools().StringFromInt(this->GetStrideRows()));
376 gTools().xmlengine().NewAttr(layerxml, 0, "StrideCols", gTools().StringFromInt(this->GetStrideCols()));
377 gTools().xmlengine().NewAttr(layerxml, 0, "PaddingHeight", gTools().StringFromInt(this->GetPaddingHeight()));
378 gTools().xmlengine().NewAttr(layerxml, 0, "PaddingWidth", gTools().StringFromInt(this->GetPaddingWidth()));
379
380
381 int activationFunction = static_cast<int>(this -> GetActivationFunction());
382 gTools().xmlengine().NewAttr(layerxml, 0, "ActivationFunction",
383 TString::Itoa(activationFunction, 10));
384
385 // write weights and bias matrix
386 this->WriteMatrixToXML(layerxml, "Weights", this -> GetWeightsAt(0));
387 this->WriteMatrixToXML(layerxml, "Biases", this -> GetBiasesAt(0));
388}
389
390//______________________________________________________________________________
391template <typename Architecture_t>
393{
394 // read weights and biases
395 // the meta information is read before because it is needed before creating the Conv layer
396 this->ReadMatrixXML(parent,"Weights", this -> GetWeightsAt(0));
397 this->ReadMatrixXML(parent,"Biases", this -> GetBiasesAt(0));
398}
399
400template <typename Architecture_t>
401size_t TConvLayer<Architecture_t>::calculateDimension(size_t imgDim, size_t fltDim, size_t padding, size_t stride)
402{
403 size_t temp = imgDim - fltDim + 2 * padding;
404 if (temp % stride || temp + stride <= 0) {
405 Fatal("calculateDimension", "Not compatible hyper parameters for layer - (imageDim, filterDim, padding, stride) "
406 "%zu, %zu, %zu, %zu", imgDim, fltDim, padding, stride);
407 }
408 return temp / stride + 1;
409}
410
411template <typename Architecture_t>
412size_t TConvLayer<Architecture_t>::calculateNLocalViews(size_t inputHeight, size_t filterHeight, size_t paddingHeight,
413 size_t strideRows, size_t inputWidth, size_t filterWidth,
414 size_t paddingWidth, size_t strideCols)
415{
416 int height = calculateDimension(inputHeight, filterHeight, paddingHeight, strideRows);
417 int width = calculateDimension(inputWidth, filterWidth, paddingWidth, strideCols);
418
419 return height * width;
420}
421
422//______________________________________________________________________________
423template <typename Architecture_t>
425 Architecture_t::InitializeConvDescriptors(fDescriptors, this);
426}
427
428template <typename Architecture_t>
430 Architecture_t::ReleaseConvDescriptors(fDescriptors);
431}
432
433//______________________________________________________________________________
434template <typename Architecture_t>
436 TConvParams params(this->GetBatchSize(), this->GetInputDepth(), this->GetInputHeight(), this->GetInputWidth(),
437 this->GetDepth(), this->GetFilterHeight(), this->GetFilterWidth(),
438 this->GetStrideRows(), this->GetStrideCols(), this->GetPaddingHeight(), this->GetPaddingWidth());
439
440 Architecture_t::InitializeConvWorkspace(fWorkspace, fDescriptors, params, this);
441}
442
443template <typename Architecture_t>
445 Architecture_t::FreeConvWorkspace(fWorkspace, this);
446}
447
448//______________________________________________________________________________
449
450} // namespace CNN
451} // namespace DNN
452} // namespace TMVA
453
454#endif
#define f(i)
Definition: RSha256.hxx:104
include TDocParser_001 C image html pict1_TDocParser_001 png width
Definition: TDocParser.cxx:121
void Fatal(const char *location, const char *msgfmt,...)
size_t fNLocalViews
The number of local views in one image.
Definition: ConvLayer.h:111
static size_t calculateNLocalViews(size_t inputHeight, size_t filterHeight, size_t paddingHeight, size_t strideRows, size_t inputWidth, size_t filterWidth, size_t paddingWidth, size_t strideCols)
Definition: ConvLayer.h:412
virtual void ReadWeightsFromXML(void *parent)
Read the information and the weights about the layer from XML node.
Definition: ConvLayer.h:392
const Tensor_t & GetForwardMatrices() const
Definition: ConvLayer.h:200
size_t GetNLocalViewPixels() const
Definition: ConvLayer.h:189
Tensor_t fInputActivation
First output of this layer after conv, before activation.
Definition: ConvLayer.h:122
const TDescriptors * GetDescriptors() const
Definition: ConvLayer.h:209
typename Architecture_t::ActivationDescriptor_t HelperDescriptor_t
Definition: ConvLayer.h:82
size_t GetStrideRows() const
Definition: ConvLayer.h:183
void Backward(Tensor_t &gradients_backward, const Tensor_t &activations_backward)
Compute weight, bias and activation gradients.
Definition: ConvLayer.h:325
typename Architecture_t::Tensor_t Tensor_t
Definition: ConvLayer.h:76
size_t fPaddingWidth
The number of zero layers left and right of the input.
Definition: ConvLayer.h:120
Scalar_t GetWeightDecay() const
Definition: ConvLayer.h:205
Scalar_t fWeightDecay
The weight decay.
Definition: ConvLayer.h:128
Tensor_t & GetInputActivation()
Definition: ConvLayer.h:195
size_t fFilterWidth
The width of the filter.
Definition: ConvLayer.h:105
std::vector< int > fBackwardIndices
Vector of indices used for a fast Im2Col in backward pass.
Definition: ConvLayer.h:124
size_t GetFilterWidth() const
Definition: ConvLayer.h:181
TConvLayer(size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t Depth, EInitialization Init, size_t FilterHeight, size_t FilterWidth, size_t StrideRows, size_t StrideCols, size_t PaddingHeight, size_t PaddingWidth, Scalar_t DropoutProbability, EActivationFunction f, ERegularization Reg, Scalar_t WeightDecay)
Constructor.
Definition: ConvLayer.h:221
static size_t calculateDimension(size_t imgDim, size_t fltDim, size_t padding, size_t stride)
Definition: ConvLayer.h:401
Tensor_t fForwardTensor
Cache tensor used for speeding-up the forward pass.
Definition: ConvLayer.h:130
TDescriptors * GetDescriptors()
Definition: ConvLayer.h:208
typename Architecture_t::ConvolutionDescriptor_t LayerDescriptor_t
Definition: ConvLayer.h:80
size_t fFilterDepth
The depth of the filter.
Definition: ConvLayer.h:103
size_t GetPaddingWidth() const
Definition: ConvLayer.h:187
size_t fNLocalViewPixels
The number of pixels in one local image view.
Definition: ConvLayer.h:110
typename Architecture_t::AlgorithmForward_t AlgorithmForward_t
Definition: ConvLayer.h:84
Scalar_t fDropoutProbability
Probability that an input is active.
Definition: ConvLayer.h:113
static size_t calculateNLocalViewPixels(size_t depth, size_t height, size_t width)
Definition: ConvLayer.h:96
size_t fStrideCols
The number of column pixels to slid the filter each step.
Definition: ConvLayer.h:108
typename Architecture_t::AlgorithmDataType_t AlgorithmDataType_t
Definition: ConvLayer.h:90
virtual ~TConvLayer()
Destructor.
Definition: ConvLayer.h:293
Tensor_t & GetForwardMatrices()
Definition: ConvLayer.h:201
typename Architecture_t::AlgorithmBackward_t AlgorithmBackward_t
Definition: ConvLayer.h:85
size_t fStrideRows
The number of row pixels to slid the filter each step.
Definition: ConvLayer.h:107
ERegularization fReg
The regularization method.
Definition: ConvLayer.h:127
const TWorkspace * GetWorkspace() const
Definition: ConvLayer.h:212
const Matrix_t & GetInputActivationAt(size_t i) const
Definition: ConvLayer.h:198
typename Architecture_t::Matrix_t Matrix_t
Definition: ConvLayer.h:77
EActivationFunction GetActivationFunction() const
Definition: ConvLayer.h:203
TDescriptors * fDescriptors
Keeps the convolution, activations and filter descriptors.
Definition: ConvLayer.h:115
size_t fPaddingHeight
The number of zero layers added top and bottom of the input.
Definition: ConvLayer.h:119
Matrix_t & GetInputActivationAt(size_t i)
Definition: ConvLayer.h:197
TWorkspace * GetWorkspace()
Definition: ConvLayer.h:211
size_t fFilterHeight
The height of the filter.
Definition: ConvLayer.h:104
size_t GetStrideCols() const
Definition: ConvLayer.h:184
typename Architecture_t::FilterDescriptor_t WeightsDescriptor_t
Definition: ConvLayer.h:81
virtual void AddWeightsXMLTo(void *parent)
Writes the information and the weights about the layer in an XML node.
Definition: ConvLayer.h:368
EActivationFunction fF
Activation function of the layer.
Definition: ConvLayer.h:126
size_t GetFilterDepth() const
Getters.
Definition: ConvLayer.h:179
size_t GetPaddingHeight() const
Definition: ConvLayer.h:186
size_t GetFilterHeight() const
Definition: ConvLayer.h:180
const Tensor_t & GetInputActivation() const
Definition: ConvLayer.h:194
typename Architecture_t::ReduceTensorDescriptor_t ReduceTensorDescriptor_t
Definition: ConvLayer.h:87
void Forward(Tensor_t &input, bool applyDropout=false)
Computes activation of the layer for the given input.
Definition: ConvLayer.h:310
size_t GetNLocalViews() const
Definition: ConvLayer.h:190
typename Architecture_t::AlgorithmHelper_t AlgorithmHelper_t
Definition: ConvLayer.h:86
Scalar_t GetDropoutProbability() const
Definition: ConvLayer.h:192
typename Architecture_t::Scalar_t Scalar_t
Definition: ConvLayer.h:78
void Print() const
Prints the info about the layer.
Definition: ConvLayer.h:346
ERegularization GetRegularization() const
Definition: ConvLayer.h:204
Generic General Layer class.
Definition: GeneralLayer.h:49
TXMLEngine & xmlengine()
Definition: Tools.h:270
static TString Itoa(Int_t value, Int_t base)
Converts an Int_t to a TString with respect to the base specified (2-36).
Definition: TString.cxx:2025
XMLNodePointer_t NewChild(XMLNodePointer_t parent, XMLNsPointer_t ns, const char *name, const char *content=nullptr)
create new child element for parent node
Definition: TXMLEngine.cxx:709
XMLAttrPointer_t NewAttr(XMLNodePointer_t xmlnode, XMLNsPointer_t, const char *name, const char *value)
creates new attribute for xmlnode, namespaces are not supported for attributes
Definition: TXMLEngine.cxx:580
EvaluateInfo init(std::vector< RooRealProxy > parameters, std::vector< ArrayWrapper * > wrappers, std::vector< double * > arrays, size_t begin, size_t batchSize)
void Init(TClassEdit::TInterpreterLookupHelper *helper)
Definition: TClassEdit.cxx:155
struct TMVA::DNN::CNN::TConvParams TConvParams
EInitialization
Definition: Functions.h:70
double weightDecay(double error, ItWeight itWeight, ItWeight itWeightEnd, double factorWeightDecay, EnumRegularization eRegularization)
compute the weight decay for regularization (L1 or L2)
Definition: NeuralNet.icc:498
ERegularization
Enum representing the regularization type applied for a given layer.
Definition: Functions.h:63
EActivationFunction
Enum that represents layer activation functions.
Definition: Functions.h:32
UInt_t Depth(const Node< T > *node)
Definition: NodekNN.h:213
create variable transformations
Tools & gTools()
size_t strideRows
The number of row pixels to slid the filter each step.
Definition: ConvLayer.h:56
size_t filterHeight
The height of the filter.
Definition: ConvLayer.h:53
size_t inputHeight
The height of the previous layer or input.
Definition: ConvLayer.h:49
size_t batchSize
Batch size used for training and evaluation.
Definition: ConvLayer.h:46
size_t paddingWidth
The number of zero layers left and right of the input.
Definition: ConvLayer.h:59
size_t filterWidth
The width of the filter.
Definition: ConvLayer.h:54
size_t paddingHeight
The number of zero layers added top and bottom of the input.
Definition: ConvLayer.h:58
size_t inputWidth
The width of the previous layer or input.
Definition: ConvLayer.h:50
TConvParams(size_t _batchSize, size_t _inputDepth, size_t _inputHeight, size_t _inputWidth, size_t _numberFilters, size_t _filterHeight, size_t _filterWidth, size_t _strideRows, size_t _strideCols, size_t _paddingHeight, size_t _paddingWidth)
Definition: ConvLayer.h:61
size_t numberFilters
The number of the filters, which is equal to the output's depth.
Definition: ConvLayer.h:52
size_t inputDepth
The depth of the previous layer or input.
Definition: ConvLayer.h:48
size_t strideCols
The number of column pixels to slid the filter each step.
Definition: ConvLayer.h:57