27#ifndef TMVA_DNN_TENSORDATALOADER
28#define TMVA_DNN_TENSORDATALOADER
58template <
typename Architecture_t>
61 using Matrix_t =
typename Architecture_t::Matrix_t;
62 using Tensor_t =
typename Architecture_t::Tensor_t;
84template <
typename Data_t,
typename Architecture_t>
95template <
typename Data_t,
typename Architecture_t>
132template <
typename Data_t,
typename Architecture_t>
137 using Matrix_t =
typename Architecture_t::Matrix_t;
138 using Tensor_t =
typename Architecture_t::Tensor_t;
139 using Shape_t =
typename Architecture_t::Tensor_t::Shape_t;
162 const Shape_t & batchLayout,
size_t nOutputFeatures,
size_t nStreams = 1);
185 template<
typename RNG>
197template <
typename Architecture_t>
200 : fInputTensor(inputTensor), fOutputMatrix(outputMatrix), fWeightMatrix(weightMatrix)
208template <
typename Data_t,
typename Architecture_t>
211 size_t nOutputFeatures,
size_t nStreams)
212 : fData(data), fNSamples(nSamples), fBatchSize(batchSize), fInputLayout(inputLayout), fBatchDepth(batchLayout[0]), fBatchHeight(batchLayout[1]),
213 fBatchWidth(batchLayout[2]), fNOutputFeatures(nOutputFeatures), fBatchIndex(0), fNStreams(nStreams), fDeviceBuffers(),
214 fHostBuffers(), fSampleIndices()
232template <
typename Data_t,
typename Architecture_t>
235 fBatchIndex %= (fNSamples / fBatchSize);
237 size_t inputTensorSize = fBatchDepth * fBatchHeight * fBatchWidth;
238 size_t outputMatrixSize = fBatchSize * fNOutputFeatures;
239 size_t weightMatrixSize = fBatchSize;
241 size_t streamIndex = fBatchIndex % fNStreams;
245 HostBuffer_t inputHostBuffer = hostBuffer.GetSubBuffer(0, inputTensorSize);
246 HostBuffer_t outputHostBuffer = hostBuffer.GetSubBuffer(inputTensorSize, outputMatrixSize);
247 HostBuffer_t weightHostBuffer = hostBuffer.GetSubBuffer(inputTensorSize + outputMatrixSize, weightMatrixSize);
249 DeviceBuffer_t inputDeviceBuffer = deviceBuffer.GetSubBuffer(0, inputTensorSize);
250 DeviceBuffer_t outputDeviceBuffer = deviceBuffer.GetSubBuffer(inputTensorSize, outputMatrixSize);
251 DeviceBuffer_t weightDeviceBuffer = deviceBuffer.GetSubBuffer(inputTensorSize + outputMatrixSize, weightMatrixSize);
257 size_t sampleIndex = fBatchIndex * fBatchSize;
258 IndexIterator_t sampleIndexIterator = fSampleIndices.begin() + sampleIndex;
260 CopyTensorInput(inputHostBuffer, sampleIndexIterator);
261 CopyTensorOutput(outputHostBuffer, sampleIndexIterator);
262 CopyTensorWeights(weightHostBuffer, sampleIndexIterator);
264 deviceBuffer.CopyFrom(hostBuffer);
266 assert(fInputLayout.size() == 3);
267 Tensor_t inputTensor = Architecture_t::CreateTensor( inputDeviceBuffer, fBatchSize, fInputLayout[0], fInputLayout[1], fInputLayout[2] );
269 if (fBatchDepth == 1 && fBatchHeight == fBatchSize && fInputLayout[0] == 1 && fInputLayout[1] == 1){
270 inputTensor =
Tensor_t( inputDeviceBuffer, {fBatchSize, fInputLayout.back() }, Tensor_t::MemoryLayout::ColumnMajor );
273 Matrix_t outputMatrix(outputDeviceBuffer, fBatchSize, fNOutputFeatures);
274 Matrix_t weightMatrix(weightDeviceBuffer, fBatchSize, 1);
283template <
typename Data_t,
typename Architecture_t>
284template <
typename RNG>
287 std::shuffle(fSampleIndices.begin(), fSampleIndices.end(), rng);
TTensorBatchIterator(TTensorDataLoader< Data_t, Architecture_t > &tensorDataLoader, size_t index=0)
TTensorBatch< Architecture_t > operator*()
bool operator!=(const TTensorBatchIterator &other)
TTensorDataLoader< Data_t, Architecture_t > & fTensorDataLoader
TTensorBatchIterator operator++()
TTensorBatch & operator=(const TTensorBatch &)=default
Tensor_t fInputTensor
The input tensor batch, one matrix one input.
TTensorBatch(TTensorBatch &&)=default
TTensorBatch(Tensor_t &, Matrix_t &, Matrix_t &)
Matrix_t fWeightMatrix
The event/example weights.
TTensorBatch & operator=(TTensorBatch &&)=default
typename Architecture_t::Tensor_t Tensor_t
Matrix_t & GetWeights()
Return the matrix holding the event weights.
Matrix_t & GetOutput()
Return the matrix representing the output data.
TTensorBatch(const TTensorBatch &)=default
Matrix_t fOutputMatrix
The output matrix representing the ground truth.
Tensor_t & GetInput()
Return the tensor representing the input data.
typename Architecture_t::Matrix_t Matrix_t
typename Architecture_t::Tensor_t::Shape_t Shape_t
TTensorDataLoader(const TTensorDataLoader &)=default
void Shuffle(RNG &rng)
Shuffle the order of the samples in the batch.
size_t fNOutputFeatures
The number of outputs from the classifier/regressor.
std::vector< size_t > fSampleIndices
Ordering of the samples in the epoch.
std::vector< DeviceBuffer_t > fDeviceBuffers
The device buffers used to keep the input, output and weight data.
TTensorBatch< Architecture_t > GetTensorBatch()
Return the next batch from the training set.
TTensorDataLoader & operator=(const TTensorDataLoader &)=default
typename Architecture_t::DeviceBuffer_t DeviceBuffer_t
size_t fBatchWidth
The number of columns in each matrix.
void CopyTensorOutput(HostBuffer_t &buffer, IndexIterator_t begin)
Copy output matrix into the given host buffer.
size_t fBatchIndex
The index of the batch when there are multiple batches in parallel.
size_t fBatchHeight
The number od rows in each matrix.
std::vector< HostBuffer_t > fHostBuffers
The host buffers used to load the input, output and weight data.
TTensorDataLoader(const Data_t &data, size_t nSamples, size_t batchSize, const Shape_t &inputLayout, const Shape_t &batchLayout, size_t nOutputFeatures, size_t nStreams=1)
Constructor.
TTensorDataLoader(TTensorDataLoader &&)=default
size_t fBatchSize
The size of a batch.
void CopyTensorWeights(HostBuffer_t &buffer, IndexIterator_t begin)
Copy weight matrix into the given host buffer.
typename Architecture_t::Matrix_t Matrix_t
typename Architecture_t::HostBuffer_t HostBuffer_t
size_t fBatchDepth
The number of matrices in the tensor.
size_t fNStreams
Number of buffer pairs.
const Data_t & fData
The data that should be loaded in the batches.
TTensorDataLoader & operator=(TTensorDataLoader &&)=default
typename Architecture_t::Tensor_t Tensor_t
size_t fNSamples
The total number of samples in the dataset.
void CopyTensorInput(HostBuffer_t &buffer, IndexIterator_t begin)
Copy input tensor into the given host buffer.
Class that contains all the data information.
std::tuple< const std::vector< TMatrixT< Double_t > > &, const TMatrixT< Double_t > &, const TMatrixT< Double_t > & > TensorInput
typename std::vector< size_t >::iterator IndexIterator_t
std::tuple< const std::vector< Event * > &, const DataSetInfo & > TMVAInput_t
create variable transformations