// Begin_Html
/*
Interface to Clermond-Ferrand artificial neural network
<p>
The CFMlpANN belong to the class of Multilayer Perceptrons (MLP), which are
feed-forward networks according to the following propagation schema:<br>
<center>
<img vspace=10 src="gif/tmva_mlp.gif" align="bottom" alt="Schema for artificial neural network">
</center>
The input layer contains as many neurons as input variables used in the MVA.
The output layer contains two neurons for the signal and background
event classes. In between the input and output layers are a variable number
of <i>k</i> hidden layers with arbitrary numbers of neurons. (While the
structure of the input and output layers is determined by the problem, the
hidden layers can be configured by the user through the option string
of the method booking.) <br>
As indicated in the sketch, all neuron inputs to a layer are linear
combinations of the neuron output of the previous layer. The transfer
from input to output within a neuron is performed by means of an "activation
function". In general, the activation function of a neuron can be
zero (deactivated), one (linear), or non-linear. The above example uses
a sigmoid activation function. The transfer function of the output layer
is usually linear. As a consequence: an ANN without hidden layer should
give identical discrimination power as a linear discriminant analysis (Fisher).
In case of one hidden layer, the ANN computes a linear combination of
sigmoid. <br>
The learning method used by the CFMlpANN is only stochastic.
*/
// End_Html
#include "TMatrix.h"
#include "TObjString.h"
#include "Riostream.h"
#include <string>
#include "TMVA/MethodCFMlpANN.h"
#include "TMVA/MethodCFMlpANN_def.h"
ClassImp(TMVA::MethodCFMlpANN)
;
namespace TMVA {
Int_t MethodCFMlpANN_nsel = 0;
}
TMVA::MethodCFMlpANN* TMVA::MethodCFMlpANN::fgThis = 0;
TMVA::MethodCFMlpANN::MethodCFMlpANN( TString jobName, TString methodTitle, DataSet& theData,
TString theOption, TDirectory* theTargetDir )
: TMVA::MethodBase( jobName, methodTitle, theData, theOption, theTargetDir )
, fNodes(0)
{
InitCFMlpANN();
DeclareOptions();
ParseOptions();
ProcessOptions();
if (HasTrainingTree()) {
Int_t nEvtTrain = Data().GetNEvtTrain();
fData = new TMatrix( nEvtTrain, GetNvar() );
fClass = new vector<Int_t>( nEvtTrain );
Event& event = Data().Event();
Int_t ivar;
for (Int_t ievt=0; ievt<nEvtTrain; ievt++) {
ReadTrainingEvent(ievt);
(*fClass)[ievt] = (event.IsSignal()?1:2);
for (ivar=0; ivar<GetNvar(); ivar++) {
(*fData)( ievt, ivar ) = GetEventValNormalized(ivar);
}
}
fLogger << kVERBOSE << Data().GetNEvtSigTrain() << " signal and "
<< Data().GetNEvtBkgdTrain() << " background" << " events in trainingTree" << Endl;
}
}
TMVA::MethodCFMlpANN::MethodCFMlpANN( DataSet & theData,
TString theWeightFile,
TDirectory* theTargetDir )
: TMVA::MethodBase( theData, theWeightFile, theTargetDir )
, fNodes(0)
{
InitCFMlpANN();
DeclareOptions();
}
void TMVA::MethodCFMlpANN::DeclareOptions()
{
DeclareOptionRef(fNcycles=3000,"NCycles","Number of training cycles");
DeclareOptionRef(fLayerSpec="N-1,N-2","HiddenLayers","Specification of the hidden layers");
}
void TMVA::MethodCFMlpANN::ProcessOptions()
{
MethodBase::ProcessOptions();
fNodes = new Int_t[100];
fNlayers = 2;
int currentHiddenLayer = 1;
TString layerSpec(fLayerSpec);
while(layerSpec.Length()>0) {
TString sToAdd = "";
if(layerSpec.First(',')<0) {
sToAdd = layerSpec;
layerSpec = "";
} else {
sToAdd = layerSpec(0,layerSpec.First(','));
layerSpec = layerSpec(layerSpec.First(',')+1,layerSpec.Length());
}
int nNodes = 0;
if(sToAdd.BeginsWith("N") || sToAdd.BeginsWith("n")) { sToAdd.Remove(0,1); nNodes = GetNvar(); }
nNodes += atoi(sToAdd);
fNodes[currentHiddenLayer++] = nNodes;
fNlayers++;
}
fNodes[0] = GetNvar();
fNodes[fNlayers-1] = 2;
fLogger << kINFO << "use " << fNcycles << " training cycles" << Endl;
fLogger << kINFO << "use configuration (nodes per layer): in=";
for (Int_t i=0; i<fNlayers-1; i++) fLogger << kINFO << fNodes[i] << ":";
fLogger << kINFO << fNodes[fNlayers-1] << "=out" << Endl;
}
void TMVA::MethodCFMlpANN::InitCFMlpANN( void )
{
SetMethodName( "CFMlpANN" );
SetMethodType( TMVA::Types::kCFMlpANN );
SetTestvarName();
fNeuronNN = 0;
fWNN = 0;
fWwNN = 0;
fYNN = 0;
fTempNN = 0;
fXmaxNN = 0;
fXminNN = 0;
fgThis = this;
TMVA::MethodCFMlpANN_nsel = 0;
}
TMVA::MethodCFMlpANN::~MethodCFMlpANN( void )
{
delete fData;
delete fClass;
delete[] fNodes;
delete[] fNeuronNN;
delete[] fWNN;
delete[] fWwNN;
delete[] fYNN;
delete[] fTempNN;
delete[] fXmaxNN;
delete[] fXminNN;
}
void TMVA::MethodCFMlpANN::Train( void )
{
if (!CheckSanity()) fLogger << kFATAL << "<Train> sanity check failed" << Endl;
Double_t* dumDat = 0;
Int_t* ntrain = new Int_t(Data().GetNEvtTrain());
Int_t* ntest = new Int_t(0);
Int_t* nvar = new Int_t(GetNvar());
Int_t* nlayers = new Int_t(fNlayers);
Int_t* nodes = new Int_t[*nlayers];
for (Int_t i=0; i<*nlayers; i++) nodes[i] = fNodes[i];
Int_t* ncycles = new Int_t(fNcycles);
#ifndef R__WIN32
Train_nn( dumDat, dumDat, ntrain, ntest, nvar, nlayers, nodes, ncycles );
#else
fLogger << kWARNING << "<Train> sorry CFMlpANN is not yet implemented on Windows" << Endl;
#endif
delete nodes;
delete ntrain;
delete ntest;
delete nvar;
delete ncycles;
delete nlayers;
}
Double_t TMVA::MethodCFMlpANN::GetMvaValue()
{
Bool_t isOK = kTRUE;
static vector<Double_t> inputVec = vector<Double_t>( GetNvar() );
for (Int_t ivar=0; ivar<GetNvar(); ivar++)
inputVec[ivar] = GetEventValNormalized(ivar);
Double_t myMVA = EvalANN( &inputVec, isOK );
if (!isOK) fLogger << kFATAL << "<EvalANN> returns (!isOK) for event " << Endl;
return myMVA;
}
Double_t TMVA::MethodCFMlpANN::EvalANN( vector<Double_t>* inVar, Bool_t& isOK )
{
Double_t* xeev = new Double_t[GetNvar()];
for (Int_t ivar=0; ivar<GetNvar(); ivar++) xeev[ivar] = (*inVar)[ivar];
isOK = kTRUE;
for (Int_t jvar=0; jvar<GetNvar(); jvar++) {
if (fXmaxNN[jvar] < xeev[jvar] ) xeev[jvar] = fXmaxNN[jvar];
if (fXminNN[jvar] > xeev[jvar] ) xeev[jvar] = fXminNN[jvar];
if (fXmaxNN[jvar] == fXminNN[jvar]) {
isOK = kFALSE;
xeev[jvar] = 0;
}
else {
xeev[jvar] = xeev[jvar] - ((fXmaxNN[jvar] + fXminNN[jvar])/2);
xeev[jvar] = xeev[jvar] / ((fXmaxNN[jvar] - fXminNN[jvar])/2);
}
}
NN_ava( xeev );
delete [] xeev;
return 0.5*(1.0 + fYNN[fLayermNN-1][0]);
}
void TMVA::MethodCFMlpANN::NN_ava( Double_t* xeev )
{
for (Int_t ivar=0; ivar<fNeuronNN[0]; ivar++) fYNN[0][ivar] = xeev[ivar];
for (Int_t layer=0; layer<fLayermNN-1; layer++) {
for (Int_t j=0; j<fNeuronNN[layer+1]; j++) {
Double_t x( 0 );
for (Int_t k=0; k<fNeuronNN[layer]; k++)
x = x + fYNN[layer][k]*fWNN[layer+1][j][k];
x = x + fWwNN[layer+1][j];
fYNN[layer+1][j] = NN_fonc( layer+1, x );
}
}
}
Double_t TMVA::MethodCFMlpANN::NN_fonc( Int_t i, Double_t u ) const
{
Double_t f(0);
if (u/fTempNN[i] > 170) f = +1;
else if (u/fTempNN[i] < -170) f = -1;
else {
Double_t yy = exp(-u/fTempNN[i]);
f = (1 - yy)/(1 + yy);
}
return f;
}
void TMVA::MethodCFMlpANN::WriteWeightsToStream( std::ostream & o ) const
{
WriteNNWeightsToStream( o, fParam_1.nvar, fParam_1.lclass,
fVarn_1.xmax, fVarn_1.xmin,
fParam_1.layerm, neur_1.neuron,
neur_1.w, neur_1.ww, fDel_1.temp );
}
void TMVA::MethodCFMlpANN::ReadWeightsFromStream( istream & istr )
{
TString var;
Int_t nva(0), lclass(0);
istr >> nva >> lclass;
if (GetNvar() != nva) {
fLogger << kFATAL << "<ReadWeightsFromFile> mismatch in number of variables" << Endl;
}
else {
if (lclass != 2) {
fLogger << kFATAL << "<ReadWeightsFromFile> mismatch in number of classes" << Endl;
}
else {
if (istr.eof( )) {
fLogger << kFATAL << "<ReadWeightsFromStream> reached EOF prematurely " << Endl;
}
else {
fXmaxNN = new Double_t[GetNvar()];
fXminNN = new Double_t[GetNvar()];
for (Int_t ivar=0; ivar<GetNvar(); ivar++)
istr >> fXmaxNN[ivar] >> fXminNN[ivar];
istr >> fLayermNN;
fNeuronNN = new Int_t [fLayermNN];
fWNN = new Double_t**[fLayermNN];
fWwNN = new Double_t* [fLayermNN];
fYNN = new Double_t* [fLayermNN];
fTempNN = new Double_t [fLayermNN];
Int_t layer(0);
for (layer=0; layer<fLayermNN; layer++) {
istr >> fNeuronNN[layer];
Int_t neuN = fNeuronNN[layer];
fWNN [layer] = new Double_t*[neuN];
fWwNN[layer] = new Double_t [neuN];
fYNN [layer] = new Double_t [neuN];
if (layer > 0)
for (Int_t neu=0; neu<neuN; neu++)
fWNN[layer][neu] = new Double_t[fNeuronNN[layer-1]];
}
const Int_t nchar( 100 );
char* dumchar = new char[nchar];
for (layer=0; layer<fLayermNN-1; layer++) {
Int_t nq = fNeuronNN[layer+1]/10;
Int_t nr = fNeuronNN[layer+1] - nq*10;
Int_t kk(0);
if (nr==0) kk = nq;
else kk = nq+1;
for (Int_t k=0; k<kk; k++) {
Int_t jmin = 10*(k+1) - 10;
Int_t jmax = 10*(k+1) - 1;
if (fNeuronNN[layer+1]-1<jmax) jmax = fNeuronNN[layer+1]-1;
for (Int_t j=jmin; j<=jmax; j++) istr >> fWwNN[layer+1][j];
for (Int_t i=0; i<fNeuronNN[layer]; i++)
for (Int_t l=jmin; l<=jmax; l++) istr >> fWNN[layer+1][l][i];
istr.getline( dumchar, nchar );
}
}
for (layer=0; layer<fLayermNN; layer++) {
istr.getline( dumchar, nchar );
istr.getline( dumchar, nchar );
istr >> fTempNN[layer];
}
}
}
}
if (GetNvar() != fNeuronNN[0]) {
fLogger << kFATAL << "<ReadWeightsFromFile> mismatch in zeroth layer:"
<< GetNvar() << " " << fNeuronNN[0] << Endl;
}
}
Int_t TMVA::MethodCFMlpANN::DataInterface( Double_t* , Double_t* ,
Int_t* , Int_t* ,
Int_t* , Int_t* nvar,
Double_t* xpg, Int_t* iclass, Int_t* ikend )
{
*ikend = 0;
TMVA::MethodCFMlpANN* opt = TMVA::MethodCFMlpANN::This();
if (0 == xpg) {
fLogger << kFATAL << "ERROR in MethodCFMlpANN_DataInterface zero pointer xpg" << Endl;
}
if (*nvar != opt->GetNvar()) {
fLogger << kFATAL << "ERROR in MethodCFMlpANN_DataInterface mismatch in num of variables: "
<< *nvar << " " << opt->GetNvar() << Endl;
}
*iclass = (int)opt->GetClass( TMVA::MethodCFMlpANN_nsel );
for (Int_t ivar=0; ivar<opt->GetNvar(); ivar++)
xpg[ivar] = (double)opt->GetData( TMVA::MethodCFMlpANN_nsel, ivar );
++TMVA::MethodCFMlpANN_nsel;
return 0;
}
void TMVA::MethodCFMlpANN::WriteNNWeightsToStream( std::ostream & o, Int_t nva, Int_t lclass,
const Double_t* xmaxNN, const Double_t* xminNN,
Int_t layermNN, const Int_t* neuronNN,
const Double_t* wNN, const Double_t* wwNN,
const Double_t* tempNN ) const
{
#define w_ref(a_1,a_2,a_3) wNN [((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
#define ww_ref(a_1,a_2) wwNN[(a_2)*max_nLayers_ + a_1 - 7]
o << nva << " " << lclass << endl;
if (lclass != 2) {
fLogger << kFATAL << "<WriteNNWeightsToStream> mismatch in number of classes" << Endl;
}
else {
for (Int_t ivar=0; ivar<nva; ivar++)
o << xmaxNN[ivar] << " " << xminNN[ivar] << endl;
o << layermNN << endl;;
Int_t layer(0);
for (layer=0; layer<layermNN; layer++) {
o << neuronNN[layer] << " ";
}
o << endl;
for (layer=1; layer<=layermNN-1; layer++) {
Int_t nq = neuronNN[layer]/10;
Int_t nr = neuronNN[layer] - nq*10;
Int_t kk(0);
if (nr==0) kk = nq;
else kk = nq+1;
for (Int_t k=1; k<=kk; k++) {
Int_t jmin = 10*k - 9;
Int_t jmax = 10*k;
Int_t i, j;
if (neuronNN[layer]<jmax) jmax = neuronNN[layer];
for (j=jmin; j<=jmax; j++) o << ww_ref(layer + 1, j) << " ";
o << endl;
for (i=1; i<=neuronNN[layer-1]; i++) {
for (j=jmin; j<=jmax; j++) o << w_ref(layer + 1, j, i) << " ";
o << endl;
}
o << endl << endl;
}
}
for (layer=0; layer<layermNN; layer++) {
o << endl << endl;
o << tempNN[layer] << endl;
}
}
}
ROOT page - Class index - Class Hierarchy - Top of the page
This page has been automatically generated. If you have any comments or suggestions about the page layout send a mail to ROOT support, or contact the developers with any questions or problems regarding ROOT.