* *
**********************************************************************************/
// Begin_Html
/*
Interface to Clermond-Ferrand artificial neural network
<p>
The CFMlpANN belong to the class of Multilayer Perceptrons (MLP), which are
feed-forward networks according to the following propagation schema:<br>
<center>
<img vspace=10 src="gif/tmva_mlp.gif" align="bottom" alt="Schema for artificial neural network">
</center>
The input layer contains as many neurons as input variables used in the MVA.
The output layer contains two neurons for the signal and background
event classes. In between the input and output layers are a variable number
of <i>k</i> hidden layers with arbitrary numbers of neurons. (While the
structure of the input and output layers is determined by the problem, the
hidden layers can be configured by the user through the option string
of the method booking.) <br>
As indicated in the sketch, all neuron inputs to a layer are linear
combinations of the neuron output of the previous layer. The transfer
from input to output within a neuron is performed by means of an "activation
function". In general, the activation function of a neuron can be
zero (deactivated), one (linear), or non-linear. The above example uses
a sigmoid activation function. The transfer function of the output layer
is usually linear. As a consequence: an ANN without hidden layer should
give identical discrimination power as a linear discriminant analysis (Fisher).
In case of one hidden layer, the ANN computes a linear combination of
sigmoid. <br>
The learning method used by the CFMlpANN is only stochastic.
*/
// End_Html
#include "TMatrix.h"
#include "TObjString.h"
#include "Riostream.h"
#include <string>
#include "TMVA/MethodCFMlpANN.h"
#include "TMVA/MethodCFMlpANN_def.h"
#include "TMVA/Tools.h"
ClassImp(TMVA::MethodCFMlpANN)
namespace TMVA {
Int_t MethodCFMlpANN_nsel = 0;
}
TMVA::MethodCFMlpANN* TMVA::MethodCFMlpANN::fgThis = 0;
TMVA::MethodCFMlpANN::MethodCFMlpANN( TString jobName, vector<TString>* theVariables,
TTree* theTree, TString theOption, TDirectory* theTargetDir )
: TMVA::MethodBase( jobName, theVariables, theTree, theOption, theTargetDir )
{
InitCFMlpANN();
if (fOptions.Sizeof()<2) {
fOptions = "3000:N-1:N-2";
cout << "--- " << GetName() << ": problems with options; using default: "
<< fOptions << endl;
}
vector<Int_t>* nodes = ParseOptionString( fOptions, fNvar, new vector<Int_t> );
if (nodes->size() < 1) {
cout << "--- " << GetName() << ": Error: wrong number of arguments"
<< " in options string: " << fOptions
<< " | required format is: n_cycles:n_layers" << endl;
exit(1);
}
fNcycles = (*nodes)[0];
fNlayers = 2 + (nodes->size() - 1);
fNodes = new Int_t[fNlayers];
fNodes[0] = fNvar;
fNodes[fNlayers-1] = 2;
for (Int_t i=1; i<fNlayers-1; i++) fNodes[i] = ((*nodes)[i] < 2) ? 2 : (*nodes)[i];
cout << "--- " << GetName() << ": use " << fNcycles << " training cycles" << endl;
cout << "--- " << GetName() << ": use configuration (nodes per layer): in:";
for (Int_t i=0; i<fNlayers; i++) cout << fNodes[i] << ":";
cout << "out" << endl;
if (0 != fTrainingTree) {
if (fTrainingTree->GetListOfBranches()->GetEntries() - 1 != fNvar) {
cout << "--- " << GetName() << ": Error: mismatch in number of variables"
<< " --> exit(1)" << endl;
exit(1);
}
fNevt = fTrainingTree->GetEntries();
fData = new TMatrix( fNevt, fNvar );
fClass = new vector<Int_t>( fNevt );
fNsig = 0;
fNbgd = 0;
for (Int_t ievt = 0; ievt < fNevt; ievt++)
if ((Int_t)TMVA::Tools::GetValue( fTrainingTree, ievt, "type" ) == 1)
++fNsig;
else
++fNbgd;
if (fNsig + fNbgd != fNevt) {
cout << "--- " << GetName() << ": Error: mismatch in number of events"
<< " --> exit(1)" << endl;
exit(1);
}
Int_t isig = 0, ibgd = 0, ivar;
for (Int_t ievt=0; ievt<fNevt; ievt++) {
if ((Int_t)TMVA::Tools::GetValue( fTrainingTree, ievt, "type" ) == 1) {
(*fClass)[ievt] = 1;
++isig;
}
else {
(*fClass)[ievt] = 2;
++ibgd;
}
for (ivar=0; ivar<fNvar; ivar++) {
Double_t x = TMVA::Tools::GetValue( fTrainingTree, ievt, (*fInputVars)[ivar] );
(*fData)( ievt, ivar ) = __N__( x, GetXminNorm( ivar ), GetXmaxNorm( ivar ) );
}
}
if (Verbose())
cout << "--- " << GetName() << " <verbose>: "
<< isig << " signal and " << ibgd << " background"
<< " events in trainingTree" << endl;
}
else {
fNevt = 0;
fNsig = 0;
fNbgd = 0;
}
delete nodes;
}
TMVA::MethodCFMlpANN::MethodCFMlpANN( vector<TString> *theVariables,
TString theWeightFile,
TDirectory* theTargetDir )
: TMVA::MethodBase( theVariables, theWeightFile, theTargetDir )
{
InitCFMlpANN();
}
void TMVA::MethodCFMlpANN::InitCFMlpANN( void )
{
fMethodName = "CFMlpANN";
fMethod = TMVA::Types::CFMlpANN;
fTestvar = fTestvarPrefix+GetMethodName();
fNodes = 0;
fNeuronNN = 0;
fWNN = 0;
fWwNN = 0;
fYNN = 0;
fTempNN = 0;
fXmaxNN = 0;
fXminNN = 0;
fgThis = this;
fNevt = 0;
fNsig = 0;
fNbgd = 0;
TMVA::MethodCFMlpANN_nsel = 0;
}
TMVA::MethodCFMlpANN::~MethodCFMlpANN( void )
{
delete fData;
delete fClass;
delete fNodes;
delete [] fNeuronNN;
delete [] fWNN;
delete [] fWwNN;
delete [] fYNN;
delete [] fTempNN;
delete [] fXmaxNN;
delete [] fXminNN;
}
void TMVA::MethodCFMlpANN::Train( void )
{
if (!CheckSanity()) {
cout << "--- " << GetName() << ": Error in ::Train sanity check failed" << endl;
exit(1);
}
Double_t* dumDat = 0;
Int_t* ntrain = new Int_t(fNevt);
Int_t* ntest = new Int_t(0);
Int_t* nvar = new Int_t(fNvar);
Int_t* nlayers = new Int_t(fNlayers);
Int_t* nodes = new Int_t[*nlayers];
for (Int_t i=0; i<*nlayers; i++) nodes[i] = fNodes[i];
Int_t* ncycles = new Int_t(fNcycles);
#ifndef R__WIN32
Train_nn( dumDat, dumDat, ntrain, ntest, nvar, nlayers, nodes, ncycles );
#else
printf("Sorry Train_nn is not yet implemented on Windows\n");
#endif
delete nodes;
delete ntrain;
delete ntest;
delete nvar;
delete ncycles;
delete nlayers;
}
Double_t TMVA::MethodCFMlpANN::GetMvaValue( TMVA::Event *e )
{
Double_t myMVA = 0;
Bool_t isOK = kTRUE;
vector<Double_t>* inputVec = new vector<Double_t>( fNvar );
for (Int_t ivar=0; ivar<fNvar; ivar++)
(*inputVec)[ivar] = __N__( e->GetData(ivar), GetXminNorm( ivar ), GetXmaxNorm( ivar ) );
myMVA = EvalANN( inputVec, isOK );
if (!isOK) {
cout << "--- " << GetName() << ": Problem in ::EvalANN (!isOK) for event " << e
<< " ==> exit(1)"
<< endl;
exit(1);
}
delete inputVec;
return myMVA;
}
Double_t TMVA::MethodCFMlpANN::EvalANN( vector<Double_t>* inVar, Bool_t& isOK )
{
Double_t* xeev = new Double_t[fNvar];
for (Int_t ivar=0; ivar<fNvar; ivar++) xeev[ivar] = (*inVar)[ivar];
isOK = kTRUE;
for (Int_t jvar=0; jvar<fNvar; jvar++) {
if (fXmaxNN[jvar] < xeev[jvar] ) xeev[jvar] = fXmaxNN[jvar];
if (fXminNN[jvar] > xeev[jvar] ) xeev[jvar] = fXminNN[jvar];
if (fXmaxNN[jvar] == fXminNN[jvar]) {
isOK = kFALSE;
xeev[jvar] = 0;
}
else {
xeev[jvar] = xeev[jvar] - ((fXmaxNN[jvar] + fXminNN[jvar])/2);
xeev[jvar] = xeev[jvar] / ((fXmaxNN[jvar] - fXminNN[jvar])/2);
}
}
NN_ava( xeev );
delete [] xeev;
return 0.5*(1.0 + fYNN[fLayermNN-1][0]);
}
void TMVA::MethodCFMlpANN::NN_ava( Double_t* xeev )
{
for (Int_t ivar=0; ivar<fNeuronNN[0]; ivar++) fYNN[0][ivar] = xeev[ivar];
for (Int_t layer=0; layer<fLayermNN-1; layer++) {
for (Int_t j=0; j<fNeuronNN[layer+1]; j++) {
Double_t x( 0 );
for (Int_t k=0; k<fNeuronNN[layer]; k++)
x = x + fYNN[layer][k]*fWNN[layer+1][j][k];
x = x + fWwNN[layer+1][j];
fYNN[layer+1][j] = NN_fonc( layer+1, x );
}
}
}
Double_t TMVA::MethodCFMlpANN::NN_fonc( Int_t i, Double_t u ) const
{
Double_t f(0);
if (u/fTempNN[i] > 170) f = +1;
else if (u/fTempNN[i] < -170) f = -1;
else {
Double_t yy = exp(-u/fTempNN[i]);
f = (1 - yy)/(1 + yy);
}
return f;
}
void TMVA::MethodCFMlpANN::WriteWeightsToFile( void )
{
}
void TMVA::MethodCFMlpANN::ReadWeightsFromFile( void )
{
TString fname = GetWeightFileName();
cout << "--- " << GetName() << ": reading weight file: " << fname << endl;
Bool_t isOK = kTRUE;
ifstream* fin = new ifstream( fname );
if (!fin->good( )) {
cout << "--- " << GetName() << ": Error in ::ReadWeightsFromFile: "
<< "unable to open input file: " << fname << endl;
isOK = kFALSE;
}
else {
TString var;
Double_t xmin, xmax;
for (Int_t ivar=0; ivar<fNvar; ivar++) {
*fin >> var >> xmin >> xmax;
if (var != (*fInputVars)[ivar]) {
cout << "--- " << GetName() << ": Error while reading weight file; "
<< "unknown variable: " << var << " at position: " << ivar << ". "
<< "Expected variable: " << (*fInputVars)[ivar] << " ==> abort" << endl;
exit(1);
}
this->SetXminNorm( ivar, xmin );
this->SetXmaxNorm( ivar, xmax );
}
Int_t nva(0), lclass(0);
*fin >> nva >> lclass;
if (fNvar != nva) {
cout << "--- " << GetName() << ": Error in ::ReadWeightsFromFile: "
<< "mismatch in number of variables" << endl;
}
else {
if (lclass != 2) {
cout << "--- " << GetName() << ": Error in ::ReadWeightsFromFile: "
<< "mismatch in number of classes" << endl;
}
else {
if (fin->eof( )) {
cout << "--- " << GetName() << ": Error in ::ReadWeightsFromFile: "
<< "EOF while reading input file: " << fname << endl;
}
else {
fXmaxNN = new Double_t[fNvar];
fXminNN = new Double_t[fNvar];
for (Int_t ivar=0; ivar<fNvar; ivar++)
*fin >> fXmaxNN[ivar] >> fXminNN[ivar];
*fin >> fLayermNN;
fNeuronNN = new Int_t [fLayermNN];
fWNN = new Double_t**[fLayermNN];
fWwNN = new Double_t* [fLayermNN];
fYNN = new Double_t* [fLayermNN];
fTempNN = new Double_t [fLayermNN];
Int_t layer(0);
for (layer=0; layer<fLayermNN; layer++) {
*fin >> fNeuronNN[layer];
Int_t neuN = fNeuronNN[layer];
fWNN [layer] = new Double_t*[neuN];
fWwNN[layer] = new Double_t [neuN];
fYNN [layer] = new Double_t [neuN];
if (layer > 0)
for (Int_t neu=0; neu<neuN; neu++)
fWNN[layer][neu] = new Double_t[fNeuronNN[layer-1]];
}
const Int_t nchar( 100 );
char* dumchar = new char[nchar];
for (layer=0; layer<fLayermNN-1; layer++) {
Int_t nq = fNeuronNN[layer+1]/10;
Int_t nr = fNeuronNN[layer+1] - nq*10;
Int_t kk(0);
if (nr==0) kk = nq;
else kk = nq+1;
for (Int_t k=0; k<kk; k++) {
Int_t jmin = 10*(k+1) - 10;
Int_t jmax = 10*(k+1) - 1;
if (fNeuronNN[layer+1]-1<jmax) jmax = fNeuronNN[layer+1]-1;
for (Int_t j=jmin; j<=jmax; j++) *fin >> fWwNN[layer+1][j];
for (Int_t i=0; i<fNeuronNN[layer]; i++)
for (Int_t l=jmin; l<=jmax; l++) *fin >> fWNN[layer+1][l][i];
fin->getline( dumchar, nchar );
}
}
for (layer=0; layer<fLayermNN; layer++) {
fin->getline( dumchar, nchar );
fin->getline( dumchar, nchar );
*fin >> fTempNN[layer];
}
}
}
}
fin->close( );
}
delete fin;
if (fNvar != fNeuronNN[0]) {
cout << "--- " << GetName() << ": Error in ::ReadWeightsFromFile: mismatch in zeroth layer:"
<< fNvar << " " << fNeuronNN[0] << " ==> exit(1)" << endl;
exit(1);
}
if (!isOK) exit(1);
}
Int_t TMVA::MethodCFMlpANN::DataInterface( Double_t* , Double_t* ,
Int_t* , Int_t* ,
Int_t* , Int_t* nvar,
Double_t* xpg, Int_t* iclass, Int_t* ikend )
{
*ikend = 0;
TMVA::MethodCFMlpANN* opt = TMVA::MethodCFMlpANN::This();
if (0 == xpg) {
cout << "*** ERROR in MethodCFMlpANN_DataInterface zero pointer xpg ==> exit(1)"
<< endl;
exit(1);
}
if (*nvar != opt->GetNvar()) {
cout << "*** ERROR in MethodCFMlpANN_DataInterface mismatch in num of variables: "
<< *nvar << " " << opt->GetNvar()
<< " ==> exit(1)"
<< endl;
exit(1);
}
*iclass = (int)opt->GetClass( TMVA::MethodCFMlpANN_nsel );
for (Int_t ivar=0; ivar<opt->GetNvar(); ivar++)
xpg[ivar] = (double)opt->GetData( TMVA::MethodCFMlpANN_nsel, ivar );
++TMVA::MethodCFMlpANN_nsel;
return 0;
}
void TMVA::MethodCFMlpANN::WriteNNWeightsToFile( Int_t nva, Int_t lclass,
Double_t* xmaxNN, Double_t* xminNN,
Int_t layermNN, Int_t* neuronNN,
Double_t* wNN, Double_t* wwNN, Double_t* tempNN )
{
#define w_ref(a_1,a_2,a_3) wNN [((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
#define ww_ref(a_1,a_2) wwNN[(a_2)*max_nLayers_ + a_1 - 7]
TMVA::MethodCFMlpANN* opt = TMVA::MethodCFMlpANN::This();
TString fname = opt->GetWeightFileName();
TString className = "TMVA::MethodCFMlpANN";
cout << "--- " << className << ": creating weight file: " << fname << endl;
Bool_t isOK = kTRUE;
ofstream* fout = new ofstream( fname );
if (!fout->good( )) {
cout << "--- " << className << ": Error in ::WriteNNWeightsToFile: "
<< "unable to open input file: " << fname << endl;
isOK = kFALSE;
}
else {
for (Int_t ivar=0; ivar<opt->GetNvar(); ivar++) {
TString var = (*opt->GetInputVars())[ivar];
*fout << var << " " << opt->GetXminNorm( var ) << " " << opt->GetXmaxNorm( var ) << endl;
}
*fout << nva << " " << lclass << endl;
if (lclass != 2) {
cout << "--- " << className << ": Error in ::WriteNNWeightsToFile: "
<< "mismatch in number of classes" << endl;
}
else {
if (fout->eof( )) {
cout << "--- " << className << ": Error in ::WriteNNWeightsToFile: "
<< "EOF while writing output file: " << fname << endl;
}
else {
for (Int_t ivar=0; ivar<nva; ivar++)
*fout << xmaxNN[ivar] << " " << xminNN[ivar] << endl;
*fout << layermNN << endl;;
Int_t layer(0);
for (layer=0; layer<layermNN; layer++) {
*fout << neuronNN[layer] << " ";
}
*fout << endl;
for (layer=1; layer<=layermNN-1; layer++) {
Int_t nq = neuronNN[layer]/10;
Int_t nr = neuronNN[layer] - nq*10;
Int_t kk(0);
if (nr==0) kk = nq;
else kk = nq+1;
for (Int_t k=1; k<=kk; k++) {
Int_t jmin = 10*k - 9;
Int_t jmax = 10*k;
Int_t i, j;
if (neuronNN[layer]<jmax) jmax = neuronNN[layer];
for (j=jmin; j<=jmax; j++) *fout << ww_ref(layer + 1, j) << " ";
*fout << endl;
for (i=1; i<=neuronNN[layer-1]; i++) {
for (j=jmin; j<=jmax; j++) *fout << w_ref(layer + 1, j, i) << " ";
*fout << endl;
}
*fout << endl << endl;
}
}
for (layer=0; layer<layermNN; layer++) {
*fout << endl << endl;
*fout << tempNN[layer] << endl;
}
}
}
fout->close();
}
delete fout;
if (!isOK) exit(1);
}
void TMVA::MethodCFMlpANN::WriteHistosToFile( void )
{
cout << "--- " << GetName() << ": write " << GetName()
<< " special histos to file: " << fBaseDir->GetPath() << endl;
}
ROOT page - Class index - Class Hierarchy - Top of the page
This page has been automatically generated. If you have any comments or suggestions about the page layout send a mail to ROOT support, or contact the developers with any questions or problems regarding ROOT.