library: libMLP
#include "TMultiLayerPerceptron.h"

TMultiLayerPerceptron


class description - header file - source file
viewCVS header - viewCVS source

class TMultiLayerPerceptron: public TObject

Inheritance Inherited Members Includes Libraries
Class Charts

Function Members (Methods)

Display options:
Show inherited
Show non-public
public:
TMultiLayerPerceptron()
TMultiLayerPerceptron(const char* layout, TTree* data = 0, const char* training = "Entry$%2==0", const char* test = "", TNeuron::ENeuronType type = TNeuron::kSigmoid, const char* extF = "", const char* extD = "")
TMultiLayerPerceptron(const char* layout, TTree* data, TEventList* training, TEventList* test, TNeuron::ENeuronType type = TNeuron::kSigmoid, const char* extF = "", const char* extD = "")
TMultiLayerPerceptron(const char* layout, const char* weight, TTree* data = 0, const char* training = "Entry$%2==0", const char* test = "", TNeuron::ENeuronType type = TNeuron::kSigmoid, const char* extF = "", const char* extD = "")
TMultiLayerPerceptron(const char* layout, const char* weight, TTree* data, TEventList* training, TEventList* test, TNeuron::ENeuronType type = TNeuron::kSigmoid, const char* extF = "", const char* extD = "")
virtual~TMultiLayerPerceptron()
voidTObject::AbstractMethod(const char* method) const
virtual voidTObject::AppendPad(Option_t* option = "")
virtual voidTObject::Browse(TBrowser* b)
static TClass*Class()
virtual const char*TObject::ClassName() const
virtual voidTObject::Clear(Option_t* = "")
virtual TObject*TObject::Clone(const char* newname = "") const
virtual Int_tTObject::Compare(const TObject* obj) const
voidComputeDEDw() const
virtual voidTObject::Copy(TObject& object) const
virtual voidTObject::Delete(Option_t* option = "")
virtual Int_tTObject::DistancetoPrimitive(Int_t px, Int_t py)
virtual voidDraw(Option_t* option = "")
virtual voidTObject::DrawClass() const
virtual TObject*TObject::DrawClone(Option_t* option = "") const
voidDrawResult(Int_t index = 0, Option_t* option = "test") const
virtual voidTObject::Dump() const
voidDumpWeights(Option_t* filename) const
virtual voidTObject::Error(const char* method, const char* msgfmt) const
Double_tEvaluate(Int_t index, Double_t* params) const
virtual voidTObject::Execute(const char* method, const char* params, Int_t* error = 0)
virtual voidTObject::Execute(TMethod* method, TObjArray* params, Int_t* error = 0)
virtual voidTObject::ExecuteEvent(Int_t event, Int_t px, Int_t py)
voidExport(Option_t* filename = "NNfunction", Option_t* language = "C++") const
virtual voidTObject::Fatal(const char* method, const char* msgfmt) const
virtual TObject*TObject::FindObject(const char* name) const
virtual TObject*TObject::FindObject(const TObject* obj) const
Double_tGetDelta() const
virtual Option_t*TObject::GetDrawOption() const
static Long_tTObject::GetDtorOnly()
Double_tGetEpsilon() const
Double_tGetError(Int_t event) const
Double_tGetError(TMultiLayerPerceptron::EDataSet set) const
Double_tGetEta() const
Double_tGetEtaDecay() const
virtual const char*TObject::GetIconName() const
virtual const char*TObject::GetName() const
virtual char*TObject::GetObjectInfo(Int_t px, Int_t py) const
static Bool_tTObject::GetObjectStat()
virtual Option_t*TObject::GetOption() const
Int_tGetReset() const
TStringGetStructure() const
Double_tGetTau() const
virtual const char*TObject::GetTitle() const
TNeuron::ENeuronTypeGetType() const
virtual UInt_tTObject::GetUniqueID() const
virtual Bool_tTObject::HandleTimer(TTimer* timer)
virtual ULong_tTObject::Hash() const
virtual voidTObject::Info(const char* method, const char* msgfmt) const
virtual Bool_tTObject::InheritsFrom(const char* classname) const
virtual Bool_tTObject::InheritsFrom(const TClass* cl) const
virtual voidTObject::Inspect() const
voidTObject::InvertBit(UInt_t f)
virtual TClass*IsA() const
virtual Bool_tTObject::IsEqual(const TObject* obj) const
virtual Bool_tTObject::IsFolder() const
Bool_tTObject::IsOnHeap() const
virtual Bool_tTObject::IsSortable() const
Bool_tTObject::IsZombie() const
voidLoadWeights(Option_t* filename = "")
virtual voidTObject::ls(Option_t* option = "") const
voidTObject::MayNotUse(const char* method) const
virtual Bool_tTObject::Notify()
static voidTObject::operator delete(void* ptr)
static voidTObject::operator delete(void* ptr, void* vp)
static voidTObject::operator delete[](void* ptr)
static voidTObject::operator delete[](void* ptr, void* vp)
void*TObject::operator new(size_t sz)
void*TObject::operator new(size_t sz, void* vp)
void*TObject::operator new[](size_t sz)
void*TObject::operator new[](size_t sz, void* vp)
TObject&TObject::operator=(const TObject& rhs)
virtual voidTObject::Paint(Option_t* option = "")
virtual voidTObject::Pop()
virtual voidTObject::Print(Option_t* option = "") const
voidRandomize() const
virtual Int_tTObject::Read(const char* name)
virtual voidTObject::RecursiveRemove(TObject* obj)
voidTObject::ResetBit(UInt_t f)
Double_tResult(Int_t event, Int_t index = 0) const
virtual voidTObject::SaveAs(const char* filename = "", Option_t* option = "") const
virtual voidTObject::SavePrimitive(ostream& out, Option_t* option = "")
voidTObject::SetBit(UInt_t f)
voidTObject::SetBit(UInt_t f, Bool_t set)
voidSetData(TTree*)
voidSetDelta(Double_t delta)
virtual voidTObject::SetDrawOption(Option_t* option = "")
static voidTObject::SetDtorOnly(void* obj)
voidSetEpsilon(Double_t eps)
voidSetEta(Double_t eta)
voidSetEtaDecay(Double_t ed)
voidSetEventWeight(const char*)
voidSetLearningMethod(TMultiLayerPerceptron::ELearningMethod method)
static voidTObject::SetObjectStat(Bool_t stat)
voidSetReset(Int_t reset)
voidSetTau(Double_t tau)
voidSetTestDataSet(TEventList* test)
voidSetTestDataSet(const char* test)
voidSetTrainingDataSet(TEventList* train)
voidSetTrainingDataSet(const char* train)
virtual voidTObject::SetUniqueID(UInt_t uid)
virtual voidShowMembers(TMemberInspector& insp, char* parent)
virtual voidStreamer(TBuffer& b)
voidStreamerNVirtual(TBuffer& b)
virtual voidTObject::SysError(const char* method, const char* msgfmt) const
Bool_tTObject::TestBit(UInt_t f) const
Int_tTObject::TestBits(UInt_t f) const
voidTrain(Int_t nEpoch, Option_t* option = "text")
virtual voidTObject::UseCurrentStyle()
virtual voidTObject::Warning(const char* method, const char* msgfmt) const
virtual Int_tTObject::Write(const char* name = "0", Int_t option = 0, Int_t bufsize = 0)
virtual Int_tTObject::Write(const char* name = "0", Int_t option = 0, Int_t bufsize = 0) const
protected:
voidAttachData()
voidBFGSDir(TMatrixD&, Double_t*)
voidBuildNetwork()
voidConjugateGradientsDir(Double_t*, Double_t)
Double_tDerivDir(Double_t*)
virtual voidTObject::DoError(int level, const char* location, const char* fmt, va_list va) const
boolGetBFGSH(TMatrixD&, TMatrixD&, TMatrixD&)
Double_tGetCrossEntropy() const
Double_tGetCrossEntropyBinary() const
voidGetEntry(Int_t) const
Double_tGetSumSquareError() const
Bool_tLineSearch(Double_t*, Double_t*)
voidTObject::MakeZombie()
voidMLP_Batch(Double_t*)
voidMLP_Stochastic(Double_t*)
voidSetGammaDelta(TMatrixD&, TMatrixD&, Double_t*)
voidSteepestDir(Double_t*)
private:
voidBuildFirstLayer(TString&)
voidBuildHiddenLayers(TString&)
voidBuildLastLayer(TString&, Int_t)
voidExpandStructure()
voidMLP_Line(Double_t*, Double_t*, Double_t)
voidShuffle(Int_t*, Int_t) const

Data Members

public:
enum ELearningMethod { kStochastic
kBatch
kSteepestDescent
kRibierePolak
kFletcherReeves
kBFGS
};
enum EDataSet { kTraining
kTest
};
enum TObject::EStatusBits { kCanDelete
kMustCleanup
kObjInCanvas
kIsReferenced
kHasUUID
kCannotPick
kNoContextMenu
kInvalidObject
};
enum TObject::[unnamed] { kIsOnHeap
kNotDeleted
kZombie
kBitMask
kSingleKey
kOverwrite
kWriteDelete
};
private:
TTree*fData! pointer to the tree used as datasource
Int_tfCurrentTree! index of the current tree in a chain
Double_tfCurrentTreeWeight! weight of the current tree in a chain
TObjArrayfNetworkCollection of all the neurons in the network
TObjArrayfFirstLayerCollection of the input neurons; subset of fNetwork
TObjArrayfLastLayerCollection of the output neurons; subset of fNetwork
TObjArrayfSynapsesCollection of all the synapses in the network
TStringfStructureString containing the network structure
TStringfWeightString containing the event weight
TNeuron::ENeuronTypefTypeType of hidden neurons
TNeuron::ENeuronTypefOutTypeType of output neurons
TStringfextFString containing the function name
TStringfextDString containing the derivative name
TEventList*fTraining! EventList defining the events in the training dataset
TEventList*fTest! EventList defining the events in the test dataset
TMultiLayerPerceptron::ELearningMethodfLearningMethod! The Learning Method
TTreeFormula*fEventWeight! formula representing the event weight
TTreeFormulaManager*fManager! TTreeFormulaManager for the weight and neurons
Double_tfEta! Eta - used in stochastic minimisation - Default=0.1
Double_tfEpsilon! Epsilon - used in stochastic minimisation - Default=0.
Double_tfDelta! Delta - used in stochastic minimisation - Default=0.
Double_tfEtaDecay! EtaDecay - Eta *= EtaDecay at each epoch - Default=1.
Double_tfTau! Tau - used in line search - Default=3.
Double_tfLastAlpha! internal parameter used in line search
Int_tfReset! number of epochs between two resets of the search direction to the steepest descent - Default=50
Bool_tfTrainingOwner! internal flag whether one has to delete fTraining or not
Bool_tfTestOwner! internal flag whether one has to delete fTest or not

Class Description


 TMultiLayerPerceptron

 This class describes a neural network.
 There are facilities to train the network and use the output.

 The input layer is made of inactive neurons (returning the
 optionaly normalized input) and output neurons are linear.
 The type of hidden neurons is free, the default being sigmoids.
 (One should still try to pass normalized inputs, e.g. between [0.,1])

 The basic input is a TTree and two (training and test) TEventLists.
 Input and output neurons are assigned a value computed for each event
 with the same possibilities as for TTree::Draw().
 Events may be weighted individualy or via TTree::SetWeight().
 6 learning methods are available: kStochastic, kBatch,
 kSteepestDescent, kRibierePolak, kFletcherReeves and kBFGS.

 This implementation, written by C. Delaere, is *inspired* from
 the mlpfit package from J.Schwindling et al. with some extensions:
   * the algorithms are globally the same
   * in TMultilayerPerceptron, there is no limitation on the number of 
     layers/neurons, while MLPFIT was limited to 2 hidden layers
   * TMultilayerPerceptron allows you to save the network in a root file, and 
     provides more export functionnalities
   * TMultilayerPerceptron gives more flexibility regarding the normalization of 
     inputs/outputs
   * TMultilayerPerceptron provides, thanks to Andrea Bocci, the possibility to 
     use cross-entropy errors, which allows to train a network for pattern 
     classification based on Bayesian posterior probability.


Neural Networks are more and more used in various fields for data analysis and classification, both for research and commercial institutions. Some randomly choosen examples are:

More than 50% of neural networks are multilayer perceptrons. This implementation of multilayer perceptrons is inspired from the MLPfit package originaly written by Jerome Schwindling. MLPfit remains one of the fastest tool for neural networks studies, and this ROOT add-on will not try to compete on that. A clear and flexible Object Oriented implementation has been choosen over a faster but more difficult to maintain code. Nevertheless, the time penalty does not exceed a factor 2.

The multilayer perceptron is a simple feed-forward network with the following structure:

It is made of neurons characterized by a bias and weighted links between them (let's call those links synapses). The input neurons receive the inputs, normalize them and forward them to the first hidden layer.

Each neuron in any subsequent layer first computes a linear combination of the outputs of the previous layer. The output of the neuron is then function of that combination with f being linear for output neurons or a sigmoid for hidden layers. This is useful because of two theorems:

  1. A linear combination of sigmoids can approximate any continuous function.

  2. Trained with output = 1 for the signal and 0 for the background, the approximated function of inputs X is the probability of signal, knowing X.

The aim of all learning methods is to minimize the total error on a set of weighted examples. The error is defined as the sum in quadrature, devided by two, of the error on each individual output neuron.

In all methods implemented, one needs to compute the first derivative of that error with respect to the weights. Exploiting the well-known properties of the derivative, especialy the derivative of compound functions, one can write:

This computation is called back-propagation of the errors. A loop over all examples is called an epoch.

Six learning methods are implemented.

Stochastic minimization: This is the most trivial learning method. This is the Robbins-Monro stochastic approximation applied to multilayer perceptrons. The weights are updated after each example according to the formula:

$w_{ij}(t+1) = w_{ij}(t) + \Delta w_{ij}(t)$

with

$\Delta w_{ij}(t) = - \eta(\d e_p / \d w_{ij} + \delta) + \epsilon \Deltaw_{ij}(t-1)$

The parameters for this method are Eta, EtaDecay, Delta and Epsilon.

Steepest descent with fixed step size (batch learning): It is the same as the stochastic minimization, but the weights are updated after considering all the examples, with the total derivative dEdw. The parameters for this method are Eta, EtaDecay, Delta and Epsilon.

Steepest descent algorithm: Weights are set to the minimum along the line defined by the gradient. The only parameter for this method is Tau. Lower tau = higher precision = slower search. A value Tau = 3 seems reasonable.

Conjugate gradients with the Polak-Ribiere updating formula: Weights are set to the minimum along the line defined by the conjugate gradient. Parameters are Tau and Reset, which defines the epochs where the direction is reset to the steepes descent.

Conjugate gradients with the Fletcher-Reeves updating formula: Weights are set to the minimum along the line defined by the conjugate gradient. Parameters are Tau and Reset, which defines the epochs where the direction is reset to the steepes descent.

Broyden, Fletcher, Goldfarb, Shanno (BFGS) method: Implies the computation of a NxN matrix computation, but seems more powerful at least for less than 300 weights. Parameters are Tau and Reset, which defines the epochs where the direction is reset to the steepes descent.

TMLP is build from 3 classes: TNeuron, TSynapse and TMultiLayerPerceptron. Only TMultiLayerPerceptron should be used explicitely by the user.

TMultiLayerPerceptron will take examples from a TTree given in the constructor. The network is described by a simple string: The input/output layers are defined by giving the expression for each neuron, separated by comas. Hidden layers are just described by the number of neurons. The layers are separated by semicolons. In addition, input/output layer formulas can be preceded by '@' (e.g "@out") if one wants to also normalize the data from the TTree. Input and outputs are taken from the TTree given as second argument. Expressions are evaluated as for TTree::Draw(), arrays are expended in distinct neurons, one for each index. This can only be done for fixed-size arrays. One defines the training and test datasets by TEventLists.

Example: TMultiLayerPerceptron("x,y:10:5:f",inputTree);

Both the TTree and the TEventLists can be defined in the constructor, or later with the suited setter method. The lists used for training and test can be defined either explicitely, or via a string containing the formula to be used to define them, exactly as for a TCut.

The learning method is defined using the TMultiLayerPerceptron::SetLearningMethod() . Learning methods are :

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

TMultiLayerPerceptron::kStochastic,
TMultiLayerPerceptron::kBatch,
TMultiLayerPerceptron::kSteepestDescent,
TMultiLayerPerceptron::kRibierePolak,
TMultiLayerPerceptron::kFletcherReeves,
TMultiLayerPerceptron::kBFGS

A weight can be assigned to events, either in the constructor, either with TMultiLayerPerceptron::SetEventWeight(). In addition, the TTree weight

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

is taken into account.

Finally, one starts the training with TMultiLayerPerceptron::Train(Int_t nepoch, Option_t* options). The first argument is the number of epochs while option is a string that can contain: "text" (simple text output) , "graph" (evoluting graphical training curves), "update=X" (step for the text/graph output update) or "+" (will skip the randomisation and start from the previous values). All combinations are available.

Example: net.Train(100,"text, graph, update=10").

When the neural net is trained, it can be used directly ( TMultiLayerPerceptron::Evaluate() ) or exported to a standalone C++ code ( TMultiLayerPerceptron::Export() ).

Finaly, note that even if this implementation is inspired from the mlpfit code, the feature lists are not exactly matching:

In addition, the paw version of mlpfit had additional limitations on the number of neurons, hidden layers and inputs/outputs that does not apply to TMultiLayerPerceptron.
TMultiLayerPerceptron()
 Default constructor
TMultiLayerPerceptron(const char * layout, TTree * data, TEventList * training, TEventList * test, TNeuron::ENeuronType type, const char* extF, const char* extD)
 The network is described by a simple string:
 The input/output layers are defined by giving
 the branch names separated by comas.
 Hidden layers are just described by the number of neurons.
 The layers are separated by semicolons.
 Ex: "x,y:10:5:f"
 The output can be prepended by '@' if the variable has to be
 normalized.
 The output can be followed by '!' to use Softmax neurons for the
 output layer only.
 Ex: "x,y:10:5:c1,c2,c3!"
 Input and outputs are taken from the TTree given as second argument.
 training and test are the two TEventLists defining events
 to be used during the neural net training.
 Both the TTree and the TEventLists  can be defined in the constructor,
 or later with the suited setter method.
TMultiLayerPerceptron(const char * layout, const char * weight, TTree * data, TEventList * training, TEventList * test, TNeuron::ENeuronType type, const char* extF, const char* extD)
 The network is described by a simple string:
 The input/output layers are defined by giving
 the branch names separated by comas.
 Hidden layers are just described by the number of neurons.
 The layers are separated by semicolons.
 Ex: "x,y:10:5:f"
 The output can be prepended by '@' if the variable has to be
 normalized.
 The output can be followed by '!' to use Softmax neurons for the
 output layer only.
 Ex: "x,y:10:5:c1,c2,c3!"
 Input and outputs are taken from the TTree given as second argument.
 training and test are the two TEventLists defining events
 to be used during the neural net training.
 Both the TTree and the TEventLists  can be defined in the constructor,
 or later with the suited setter method.
TMultiLayerPerceptron(const char * layout, TTree * data, const char * training, const char * test, TNeuron::ENeuronType type, const char* extF, const char* extD)
 The network is described by a simple string:
 The input/output layers are defined by giving
 the branch names separated by comas.
 Hidden layers are just described by the number of neurons.
 The layers are separated by semicolons.
 Ex: "x,y:10:5:f"
 The output can be prepended by '@' if the variable has to be
 normalized.
 The output can be followed by '!' to use Softmax neurons for the
 output layer only.
 Ex: "x,y:10:5:c1,c2,c3!"
 Input and outputs are taken from the TTree given as second argument.
 training and test are two cuts (see TTreeFormula) defining events
 to be used during the neural net training and testing.
 Example: "Entry$%2", "(Entry$+1)%2".
 Both the TTree and the cut can be defined in the constructor,
 or later with the suited setter method.
TMultiLayerPerceptron(const char * layout, const char * weight, TTree * data, const char * training, const char * test, TNeuron::ENeuronType type, const char* extF, const char* extD)
 The network is described by a simple string:
 The input/output layers are defined by giving
 the branch names separated by comas.
 Hidden layers are just described by the number of neurons.
 The layers are separated by semicolons.
 Ex: "x,y:10:5:f"
 The output can be prepended by '@' if the variable has to be
 normalized.
 The output can be followed by '!' to use Softmax neurons for the
 output layer only.
 Ex: "x,y:10:5:c1,c2,c3!"
 Input and outputs are taken from the TTree given as second argument.
 training and test are two cuts (see TTreeFormula) defining events
 to be used during the neural net training and testing.
 Example: "Entry$%2", "(Entry$+1)%2".
 Both the TTree and the cut can be defined in the constructor,
 or later with the suited setter method.
~TMultiLayerPerceptron()
 Destructor
void SetData(TTree * data)
 Set the data source
void SetEventWeight(const char * branch)
 Set the event weight
void SetTrainingDataSet(TEventList* train)
 Sets the Training dataset.
 Those events will be used for the minimization
void SetTestDataSet(TEventList* test)
 Sets the Test dataset.
 Those events will not be used for the minimization but for control
void SetTrainingDataSet(const char * train)
 Sets the Training dataset.
 Those events will be used for the minimization.
 Note that the tree must be already defined.
void SetTestDataSet(const char * test)
 Sets the Test dataset.
 Those events will not be used for the minimization but for control.
 Note that the tree must be already defined.
void SetLearningMethod(TMultiLayerPerceptron::ELearningMethod method)
 Sets the learning method.
 Available methods are: kStochastic, kBatch,
 kSteepestDescent, kRibierePolak, kFletcherReeves and kBFGS.
 (look at the constructor for the complete description
 of learning methods and parameters)
void SetEta(Double_t eta)
 Sets Eta - used in stochastic minimisation
 (look at the constructor for the complete description
 of learning methods and parameters)
void SetEpsilon(Double_t eps)
 Sets Epsilon - used in stochastic minimisation
 (look at the constructor for the complete description
 of learning methods and parameters)
void SetDelta(Double_t delta)
 Sets Delta - used in stochastic minimisation
 (look at the constructor for the complete description
 of learning methods and parameters)
void SetEtaDecay(Double_t ed)
 Sets EtaDecay - Eta *= EtaDecay at each epoch
 (look at the constructor for the complete description
 of learning methods and parameters)
void SetTau(Double_t tau)
 Sets Tau - used in line search
 (look at the constructor for the complete description
 of learning methods and parameters)
void SetReset(Int_t reset)
 Sets number of epochs between two resets of the
 search direction to the steepest descent.
 (look at the constructor for the complete description
 of learning methods and parameters)
void GetEntry(Int_t entry)
 Load an entry into the network
void Train(Int_t nEpoch, Option_t * option)
 Train the network.
 nEpoch is the number of iterations.
 option can contain:
 - "text" (simple text output)
 - "graph" (evoluting graphical training curves)
 - "update=X" (step for the text/graph output update)
 - "+" will skip the randomisation and start from the previous values.
 - "current" (draw in the current canvas)
 All combinations are available.
Double_t Result(Int_t event, Int_t index)
 Computes the output for a given event.
 Look at the output neuron designed by index.
Double_t GetError(Int_t event)
 Error on the output for a given event
Double_t GetError(TMultiLayerPerceptron::EDataSet set)
 Error on the whole dataset
Double_t GetSumSquareError()
 Error on the output for a given event
Double_t GetCrossEntropyBinary()
 Cross entropy error for sigmoid output neurons, for a given event
Double_t GetCrossEntropy()
 Cross entropy error for a softmax output neuron, for a given event
void ComputeDEDw()
 Compute the DEDw = sum on all training events of dedw for each weight
 normalized by the number of events.
void Randomize()
 Randomize the weights
void AttachData()
 Connects the TTree to Neurons in input and output
 layers. The formulas associated to each neuron are created
 and reported to the network formula manager.
 By default, the branch is not normalised since this would degrade
 performance for classification jobs.
 Normalisation can be requested by putting '@' in front of the formula.
void ExpandStructure()
 Expand the structure of the first layer
void BuildNetwork()
 Instanciates the network from the description
void BuildFirstLayer(TString & input)
 Instanciates the neurons in input
 Inputs are normalised and the type is set to kOff
 (simple forward of the formula value)
void BuildHiddenLayers(TString & hidden)
 Builds hidden layers.
void BuildLastLayer(TString & output, Int_t prev)
 Builds the output layer
 Neurons are linear combinations of input, by defaul.
 If the structure ends with "!", neurons are set up for classification,
 ie. with a sigmoid (1 neuron) or softmax (more neurons) activation function.
void DrawResult(Int_t index, Option_t * option)
 Draws the neural net output
 It produces an histogram with the output for the two datasets.
 Index is the number of the desired output neuron.
 "option" can contain:
 - test or train to select a dataset
 - comp to produce a X-Y comparison plot
 - nocanv to not create a new TCanvas for the plot
void DumpWeights(Option_t * filename)
 Dumps the weights to a text file.
 Set filename to "-" (default) to dump to the standard output
void LoadWeights(Option_t * filename)
 Loads the weights from a text file conforming to the format
 defined by DumpWeights.
Double_t Evaluate(Int_t index, Double_t *params)
 Returns the Neural Net for a given set of input parameters
 #parameters must equal #input neurons
void Export(Option_t * filename, Option_t * language)
 Exports the NN as a function for any non-ROOT-dependant code
 Supported languages are: only C++ , FORTRAN and Python (yet)
 This feature is also usefull if you want to plot the NN as
 a function (TF1 or TF2).
void Shuffle(Int_t * index, Int_t n)
 Shuffle the Int_t index[n] in input.
 Input:
   index: the array to shuffle
   n: the size of the array
 Output:
   index: the shuffled indexes
 This method is used for stochastic training
void MLP_Stochastic(Double_t * buffer)
 One step for the stochastic method
 buffer should contain the previous dw vector and will be updated
void MLP_Batch(Double_t * buffer)
 One step for the batch (stochastic) method.
 DEDw should have been updated before calling this.
void MLP_Line(Double_t * origin, Double_t * dir, Double_t dist)
 Sets the weights to a point along a line
 Weights are set to [origin + (dist * dir)].
void SteepestDir(Double_t * dir)
 Sets the search direction to steepest descent.
bool LineSearch(Double_t * direction, Double_t * buffer)
 Search along the line defined by direction.
 buffer is not used but is updated with the new dw
 so that it can be used by a later stochastic step.
 It returns true if the line search fails.
void ConjugateGradientsDir(Double_t * dir, Double_t beta)
 Sets the search direction to conjugate gradient direction
 beta should be:
  ||g_{(t+1)}||^2 / ||g_{(t)}||^2                   (Fletcher-Reeves)
  g_{(t+1)} (g_{(t+1)}-g_{(t)}) / ||g_{(t)}||^2     (Ribiere-Polak)
bool GetBFGSH(TMatrixD & bfgsh, TMatrixD & gamma, TMatrixD & delta)
 Computes the hessian matrix using the BFGS update algorithm.
 from gamma (g_{(t+1)}-g_{(t)}) and delta (w_{(t+1)}-w_{(t)}).
 It returns true if such a direction could not be found
 (if gamma and delta are orthogonal).
void SetGammaDelta(TMatrixD & gamma, TMatrixD & delta, Double_t * buffer)
 Sets the gamma (g_{(t+1)}-g_{(t)}) and delta (w_{(t+1)}-w_{(t)}) vectors
 Gamma is computed here, so ComputeDEDw cannot have been called before,
 and delta is a direct translation of buffer into a TMatrixD.
Double_t DerivDir(Double_t * dir)
 scalar product between gradient and direction
 = derivative along direction
void BFGSDir(TMatrixD & bfgsh, Double_t * dir)
 Computes the direction for the BFGS algorithm as the product
 between the Hessian estimate (bfgsh) and the dir.
void Draw(Option_t * /*option*/)
 Draws the network structure.
 Neurons are depicted by a blue disk, and synapses by
 lines connecting neurons.
 The line width is proportionnal to the weight.
Double_t GetEta()
{ return fEta; }
Double_t GetEpsilon()
{ return fEpsilon; }
Double_t GetDelta()
{ return fDelta; }
Double_t GetEtaDecay()
{ return fEtaDecay; }
Double_t GetTau()
{ return fTau; }
Int_t GetReset()
{ return fReset; }
TString GetStructure()
{ return fStructure; }
TNeuron::ENeuronType GetType()
{ return fType; }

Author: Christophe.Delaere@cern.ch 20/07/03
Last update: root/mlp:$Name: $:$Id: TMultiLayerPerceptron.cxx,v 1.39 2006/11/27 17:10:33 brun Exp $
Copyright (C) 1995-2003, Rene Brun and Fons Rademakers. *


ROOT page - Class index - Class Hierarchy - Top of the page

This page has been automatically generated. If you have any comments or suggestions about the page layout send a mail to ROOT support, or contact the developers with any questions or problems regarding ROOT.