Logo ROOT  
Reference Guide
NeuralNet.h File Reference
#include <map>
#include <vector>
#include <iostream>
#include <fstream>
#include <algorithm>
#include <iterator>
#include <functional>
#include <tuple>
#include <cmath>
#include <cassert>
#include <random>
#include <thread>
#include <future>
#include <type_traits>
#include "Pattern.h"
#include "Monitoring.h"
#include "TApplication.h"
#include "Timer.h"
#include "TH1F.h"
#include "TH2F.h"
#include "TStyle.h"
#include <fenv.h>
#include "TMVA/NeuralNet.icc"
Include dependency graph for NeuralNet.h:
This graph shows which files directly or indirectly include this file:

Classes

class  TMVA::DNN::Batch
 The Batch class encapsulates one mini-batch. More...
 
class  TMVA::DNN::ClassificationSettings
 Settings for classificationused to distinguish between different function signatures. More...
 
class  TMVA::DNN::Layer
 Layer defines the layout of a layer. More...
 
class  TMVA::DNN::LayerData
 LayerData holds the data of one layer. More...
 
class  TMVA::DNN::MeanVariance
 
class  TMVA::DNN::Net
 neural net More...
 
class  TMVA::DNN::Settings
 Settings for the training of the neural net. More...
 
class  TMVA::DNN::Steepest
 Steepest Gradient Descent algorithm (SGD) More...
 

Namespaces

namespace  TMVA
 create variable transformations
 
namespace  TMVA::DNN
 

Typedefs

typedef std::vector< char > TMVA::DNN::DropContainer
 
typedef std::tuple< Settings &, Batch &, DropContainer & > TMVA::DNN::pass_through_type
 

Enumerations

enum class  TMVA::DNN::EnumFunction {
  TMVA::DNN::ZERO = '0' , TMVA::DNN::LINEAR = 'L' , TMVA::DNN::TANH = 'T' , TMVA::DNN::RELU = 'R' ,
  TMVA::DNN::SYMMRELU = 'r' , TMVA::DNN::TANHSHIFT = 't' , TMVA::DNN::SIGMOID = 's' , TMVA::DNN::SOFTSIGN = 'S' ,
  TMVA::DNN::GAUSS = 'G' , TMVA::DNN::GAUSSCOMPLEMENT = 'C'
}
 
enum class  TMVA::DNN::EnumRegularization { TMVA::DNN::NONE , TMVA::DNN::L1 , TMVA::DNN::L2 , TMVA::DNN::L1MAX }
 
enum  TMVA::DNN::MinimizerType { TMVA::DNN::fSteepest }
 < list all the minimizer types More...
 
enum class  TMVA::DNN::ModeErrorFunction { TMVA::DNN::SUMOFSQUARES = 'S' , TMVA::DNN::CROSSENTROPY = 'C' , TMVA::DNN::CROSSENTROPY_MUTUALEXCLUSIVE = 'M' }
 error functions to be chosen from More...
 
enum class  TMVA::DNN::ModeOutput { TMVA::DNN::FETCH }
 
enum class  TMVA::DNN::ModeOutputValues : int { TMVA::DNN::DIRECT = 0x01 , TMVA::DNN::SIGMOID = 0x02 , TMVA::DNN::SOFTMAX = 0x04 , TMVA::DNN::BATCHNORMALIZATION = 0x08 }
 
enum class  TMVA::DNN::WeightInitializationStrategy { TMVA::DNN::XAVIER , TMVA::DNN::TEST , TMVA::DNN::LAYERSIZE , TMVA::DNN::XAVIERUNIFORM }
 weight initialization strategies to be chosen from More...
 

Functions

template<typename ItValue , typename ItFunction >
void TMVA::DNN::applyFunctions (ItValue itValue, ItValue itValueEnd, ItFunction itFunction)
 
template<typename ItValue , typename ItFunction , typename ItInverseFunction , typename ItGradient >
void TMVA::DNN::applyFunctions (ItValue itValue, ItValue itValueEnd, ItFunction itFunction, ItInverseFunction itInverseFunction, ItGradient itGradient)
 
template<typename ItSource , typename ItWeight , typename ItTarget >
void TMVA::DNN::applyWeights (ItSource itSourceBegin, ItSource itSourceEnd, ItWeight itWeight, ItTarget itTargetBegin, ItTarget itTargetEnd)
 
template<typename ItSource , typename ItWeight , typename ItPrev >
void TMVA::DNN::applyWeightsBackwards (ItSource itCurrBegin, ItSource itCurrEnd, ItWeight itWeight, ItPrev itPrevBegin, ItPrev itPrevEnd)
 
template<typename LAYERDATA >
void TMVA::DNN::backward (LAYERDATA &prevLayerData, LAYERDATA &currLayerData)
 backward application of the weights (back-propagation of the error) More...
 
template<typename ItProbability , typename ItTruth , typename ItDelta , typename ItInvActFnc >
double TMVA::DNN::crossEntropy (ItProbability itProbabilityBegin, ItProbability itProbabilityEnd, ItTruth itTruthBegin, ItTruth, ItDelta itDelta, ItDelta itDeltaEnd, ItInvActFnc, double patternWeight)
 cross entropy error function More...
 
template<typename LAYERDATA >
void TMVA::DNN::forward (const LAYERDATA &prevLayerData, LAYERDATA &currLayerData)
 apply the weights (and functions) in forward direction of the DNN More...
 
double TMVA::DNN::gaussDouble (double mean, double sigma)
 
template<typename T >
bool TMVA::DNN::isFlagSet (T flag, T value)
 
ModeOutputValues TMVA::DNN::operator& (ModeOutputValues lhs, ModeOutputValues rhs)
 
ModeOutputValues TMVA::DNN::operator&= (ModeOutputValues &lhs, ModeOutputValues rhs)
 
ModeOutputValues TMVA::DNN::operator| (ModeOutputValues lhs, ModeOutputValues rhs)
 
ModeOutputValues TMVA::DNN::operator|= (ModeOutputValues &lhs, ModeOutputValues rhs)
 
int TMVA::DNN::randomInt (int maxValue)
 
template<typename ItOutput , typename ItTruth , typename ItDelta , typename ItInvActFnc >
double TMVA::DNN::softMaxCrossEntropy (ItOutput itProbabilityBegin, ItOutput itProbabilityEnd, ItTruth itTruthBegin, ItTruth, ItDelta itDelta, ItDelta itDeltaEnd, ItInvActFnc, double patternWeight)
 soft-max-cross-entropy error function (for mutual exclusive cross-entropy) More...
 
template<typename ItOutput , typename ItTruth , typename ItDelta , typename ItInvActFnc >
double TMVA::DNN::sumOfSquares (ItOutput itOutputBegin, ItOutput itOutputEnd, ItTruth itTruthBegin, ItTruth itTruthEnd, ItDelta itDelta, ItDelta itDeltaEnd, ItInvActFnc itInvActFnc, double patternWeight)
 
double TMVA::DNN::uniformDouble (double minValue, double maxValue)
 
template<typename LAYERDATA >
void TMVA::DNN::update (const LAYERDATA &prevLayerData, LAYERDATA &currLayerData, double factorWeightDecay, EnumRegularization regularization)
 update the node values More...
 
template<typename ItSource , typename ItDelta , typename ItTargetGradient , typename ItGradient >
void TMVA::DNN::update (ItSource itSource, ItSource itSourceEnd, ItDelta itTargetDeltaBegin, ItDelta itTargetDeltaEnd, ItTargetGradient itTargetGradientBegin, ItGradient itGradient)
 update the gradients More...
 
template<EnumRegularization Regularization, typename ItSource , typename ItDelta , typename ItTargetGradient , typename ItGradient , typename ItWeight >
void TMVA::DNN::update (ItSource itSource, ItSource itSourceEnd, ItDelta itTargetDeltaBegin, ItDelta itTargetDeltaEnd, ItTargetGradient itTargetGradientBegin, ItGradient itGradient, ItWeight itWeight, double weightDecay)
 update the gradients, using regularization More...
 
template<typename ItWeight >
double TMVA::DNN::weightDecay (double error, ItWeight itWeight, ItWeight itWeightEnd, double factorWeightDecay, EnumRegularization eRegularization)
 compute the weight decay for regularization (L1 or L2) More...