// @(#)root/tmva $Id$    
// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss 

/**********************************************************************************
 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
 * Package: TMVA                                                                  *
 * Class  : MethodCFMlpANN                                                        *
 * Web    : http://tmva.sourceforge.net                                           *
 *                                                                                *
 * Description:                                                                   *
 *      Interface for Clermond-Ferrand artificial neural network.                 *
 *      The ANN code has been translated from FORTRAN77 (f2c);                    *
 *      see files: MethodCFMlpANN_f2c_mlpl3.cpp                                   *
 *                 MethodCFMlpANN_f2c_datacc.cpp                                  *
 *                                                                                *
 *      --------------------------------------------------------------------      *
 *      Reference for the original FORTRAN version:                               *
 *           Authors  : J. Proriol and contributions from ALEPH-Clermont-Fd       *
 *                      Team members. Contact : gaypas@afal11.cern.ch             *
 *                                                                                *
 *           Copyright: Laboratoire Physique Corpusculaire                        *
 *                      Universite de Blaise Pascal, IN2P3/CNRS                   *
 *      --------------------------------------------------------------------      *
 *                                                                                *
 * Usage: options are given through Factory:                                      *
 *            factory->BookMethod( "MethodCFMlpANN", OptionsString );             *
 *                                                                                *
 *        where:                                                                  *
 *            TString OptionsString = "n_training_cycles:n_hidden_layers"         *
 *                                                                                *
 *        default is:  n_training_cycles = 5000, n_layers = 4                     *
 *        note that the number of hidden layers in the NN is                      *
 *                                                                                *
 *            n_hidden_layers = n_layers - 2                                      *
 *                                                                                *
 *        since there is one input and one output layer. The number of            *
 *        nodes (neurons) is predefined to be                                     *
 *                                                                                *
 *           n_nodes[i] = nvars + 1 - i (where i=1..n_layers)                     *
 *                                                                                *
 *        with nvars being the number of variables used in the NN.                *
 *        Hence, the default case is: n_neurons(layer 1 (input)) : nvars          *
 *                                    n_neurons(layer 2 (hidden)): nvars-1        *
 *                                    n_neurons(layer 3 (hidden)): nvars-1        *
 *                                    n_neurons(layer 4 (out))   : 2              *
 *                                                                                *
 *        This artificial neural network usually needs a relatively large         *
 *        number of cycles to converge (8000 and more). Overtraining can          *
 *        be efficienctly tested by comparing the signal and background           *
 *        output of the NN for the events that were used for training and         *
 *        an independent data sample (with equal properties). If the separation   *
 *        performance is significantly better for the training sample, the        *
 *        NN interprets statistical effects, and is hence overtrained. In         * 
 *        this case, the number of cycles should be reduced, or the size          *
 *        of the training sample increased.                                       *
 *                                                                                *
 * Authors (alphabetical):                                                        *
 *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
 *      Xavier Prudent  <prudent@lapp.in2p3.fr>  - LAPP, France                   *
 *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
 *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
 *                                                                                *
 * Copyright (c) 2005:                                                            *
 *      CERN, Switzerland                                                         * 
 *      U. of Victoria, Canada                                                    * 
 *      MPI-K Heidelberg, Germany                                                 * 
 *      LAPP, Annecy, France                                                      *
 *                                                                                *
 * Redistribution and use in source and binary forms, with or without             *
 * modification, are permitted according to the terms listed in LICENSE           *
 * (http://tmva.sourceforge.net/LICENSE)                                          *
 *                                                                                *
 **********************************************************************************/

#ifndef ROOT_TMVA_MethodCFMlpANN
#define ROOT_TMVA_MethodCFMlpANN

//////////////////////////////////////////////////////////////////////////
//                                                                      //
// MethodCFMlpANN                                                       //
//                                                                      //
// Interface for Clermond-Ferrand artificial neural network             //
//                                                                      //
//////////////////////////////////////////////////////////////////////////

#include <iosfwd>

#ifndef ROOT_TMVA_MethodBase
#include "TMVA/MethodBase.h"
#endif
#ifndef ROOT_TMVA_MethodCFMlpANN_Utils
#include "TMVA/MethodCFMlpANN_Utils.h"
#endif
#ifndef ROOT_TMatrixF
#include "TMatrixF.h"
#endif

namespace TMVA {

   class MethodCFMlpANN : public MethodBase, MethodCFMlpANN_Utils {

   public:

      MethodCFMlpANN( const TString& jobName,
                      const TString& methodTitle, 
                      DataSetInfo& theData,
                      const TString& theOption = "3000:N-1:N-2",
                      TDirectory* theTargetDir = 0 );

      MethodCFMlpANN( DataSetInfo& theData, 
                      const TString& theWeightFile,  
                      TDirectory* theTargetDir = NULL );

      virtual ~MethodCFMlpANN( void );
    
      virtual Bool_t HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, UInt_t /*numberTargets*/ );

      // training method
      void Train( void );

      using MethodBase::ReadWeightsFromStream;

      // write weights to file
      void AddWeightsXMLTo( void* parent ) const;

      // read weights from file
      void ReadWeightsFromStream( std::istream& istr );
      void ReadWeightsFromXML( void* wghtnode );
      // calculate the MVA value
      Double_t GetMvaValue( Double_t* err = 0, Double_t* errUpper = 0 );

      // data accessors for external functions
      Double_t GetData ( Int_t isel, Int_t ivar ) const { return (*fData)(isel, ivar); }
      Int_t    GetClass( Int_t ivar             ) const { return (*fClass)[ivar]; }

      // static pointer to this object (required for external functions
      static MethodCFMlpANN* This( void );

      // ranking of input variables
      const Ranking* CreateRanking() { return 0; }

   protected:

      // make ROOT-independent C++ class for classifier response (classifier-specific implementation)
      void MakeClassSpecific( std::ostream&, const TString& ) const;

      // header and auxiliary classes
      void MakeClassSpecificHeader( std::ostream&, const TString& = "" ) const;

      // get help message text
      void GetHelpMessage() const;

      Int_t DataInterface( Double_t*, Double_t*, Int_t*, Int_t*, Int_t*, Int_t*,
                           Double_t*, Int_t*, Int_t* );
  
   private:

      void PrintWeights( std::ostream & o ) const;

      // the option handling methods
      void DeclareOptions();
      void ProcessOptions();
      
      static MethodCFMlpANN* fgThis; // this carrier

      // LUTs
      TMatrixF       *fData;     // the (data,var) string
      std::vector<Int_t> *fClass;    // the event class (1=signal, 2=background)

      Int_t         fNlayers;   // number of layers (including input and output layers)
      Int_t         fNcycles;   // number of training cycles
      Int_t*        fNodes;     // number of nodes per layer

      // additional member variables for the independent NN::Evaluation phase
      Double_t**    fYNN;       // weights
      TString       fLayerSpec; // the hidden layer specification string

      // auxiliary member functions
      Double_t EvalANN( std::vector<Double_t>&, Bool_t& isOK );
      void     NN_ava ( Double_t* );
      Double_t NN_fonc( Int_t, Double_t ) const;

      // default initialisation 
      void Init( void );

      ClassDef(MethodCFMlpANN,0) // Interface for Clermond-Ferrand artificial neural network
   };

} // namespace TMVA

#endif
 MethodCFMlpANN.h:1
 MethodCFMlpANN.h:2
 MethodCFMlpANN.h:3
 MethodCFMlpANN.h:4
 MethodCFMlpANN.h:5
 MethodCFMlpANN.h:6
 MethodCFMlpANN.h:7
 MethodCFMlpANN.h:8
 MethodCFMlpANN.h:9
 MethodCFMlpANN.h:10
 MethodCFMlpANN.h:11
 MethodCFMlpANN.h:12
 MethodCFMlpANN.h:13
 MethodCFMlpANN.h:14
 MethodCFMlpANN.h:15
 MethodCFMlpANN.h:16
 MethodCFMlpANN.h:17
 MethodCFMlpANN.h:18
 MethodCFMlpANN.h:19
 MethodCFMlpANN.h:20
 MethodCFMlpANN.h:21
 MethodCFMlpANN.h:22
 MethodCFMlpANN.h:23
 MethodCFMlpANN.h:24
 MethodCFMlpANN.h:25
 MethodCFMlpANN.h:26
 MethodCFMlpANN.h:27
 MethodCFMlpANN.h:28
 MethodCFMlpANN.h:29
 MethodCFMlpANN.h:30
 MethodCFMlpANN.h:31
 MethodCFMlpANN.h:32
 MethodCFMlpANN.h:33
 MethodCFMlpANN.h:34
 MethodCFMlpANN.h:35
 MethodCFMlpANN.h:36
 MethodCFMlpANN.h:37
 MethodCFMlpANN.h:38
 MethodCFMlpANN.h:39
 MethodCFMlpANN.h:40
 MethodCFMlpANN.h:41
 MethodCFMlpANN.h:42
 MethodCFMlpANN.h:43
 MethodCFMlpANN.h:44
 MethodCFMlpANN.h:45
 MethodCFMlpANN.h:46
 MethodCFMlpANN.h:47
 MethodCFMlpANN.h:48
 MethodCFMlpANN.h:49
 MethodCFMlpANN.h:50
 MethodCFMlpANN.h:51
 MethodCFMlpANN.h:52
 MethodCFMlpANN.h:53
 MethodCFMlpANN.h:54
 MethodCFMlpANN.h:55
 MethodCFMlpANN.h:56
 MethodCFMlpANN.h:57
 MethodCFMlpANN.h:58
 MethodCFMlpANN.h:59
 MethodCFMlpANN.h:60
 MethodCFMlpANN.h:61
 MethodCFMlpANN.h:62
 MethodCFMlpANN.h:63
 MethodCFMlpANN.h:64
 MethodCFMlpANN.h:65
 MethodCFMlpANN.h:66
 MethodCFMlpANN.h:67
 MethodCFMlpANN.h:68
 MethodCFMlpANN.h:69
 MethodCFMlpANN.h:70
 MethodCFMlpANN.h:71
 MethodCFMlpANN.h:72
 MethodCFMlpANN.h:73
 MethodCFMlpANN.h:74
 MethodCFMlpANN.h:75
 MethodCFMlpANN.h:76
 MethodCFMlpANN.h:77
 MethodCFMlpANN.h:78
 MethodCFMlpANN.h:79
 MethodCFMlpANN.h:80
 MethodCFMlpANN.h:81
 MethodCFMlpANN.h:82
 MethodCFMlpANN.h:83
 MethodCFMlpANN.h:84
 MethodCFMlpANN.h:85
 MethodCFMlpANN.h:86
 MethodCFMlpANN.h:87
 MethodCFMlpANN.h:88
 MethodCFMlpANN.h:89
 MethodCFMlpANN.h:90
 MethodCFMlpANN.h:91
 MethodCFMlpANN.h:92
 MethodCFMlpANN.h:93
 MethodCFMlpANN.h:94
 MethodCFMlpANN.h:95
 MethodCFMlpANN.h:96
 MethodCFMlpANN.h:97
 MethodCFMlpANN.h:98
 MethodCFMlpANN.h:99
 MethodCFMlpANN.h:100
 MethodCFMlpANN.h:101
 MethodCFMlpANN.h:102
 MethodCFMlpANN.h:103
 MethodCFMlpANN.h:104
 MethodCFMlpANN.h:105
 MethodCFMlpANN.h:106
 MethodCFMlpANN.h:107
 MethodCFMlpANN.h:108
 MethodCFMlpANN.h:109
 MethodCFMlpANN.h:110
 MethodCFMlpANN.h:111
 MethodCFMlpANN.h:112
 MethodCFMlpANN.h:113
 MethodCFMlpANN.h:114
 MethodCFMlpANN.h:115
 MethodCFMlpANN.h:116
 MethodCFMlpANN.h:117
 MethodCFMlpANN.h:118
 MethodCFMlpANN.h:119
 MethodCFMlpANN.h:120
 MethodCFMlpANN.h:121
 MethodCFMlpANN.h:122
 MethodCFMlpANN.h:123
 MethodCFMlpANN.h:124
 MethodCFMlpANN.h:125
 MethodCFMlpANN.h:126
 MethodCFMlpANN.h:127
 MethodCFMlpANN.h:128
 MethodCFMlpANN.h:129
 MethodCFMlpANN.h:130
 MethodCFMlpANN.h:131
 MethodCFMlpANN.h:132
 MethodCFMlpANN.h:133
 MethodCFMlpANN.h:134
 MethodCFMlpANN.h:135
 MethodCFMlpANN.h:136
 MethodCFMlpANN.h:137
 MethodCFMlpANN.h:138
 MethodCFMlpANN.h:139
 MethodCFMlpANN.h:140
 MethodCFMlpANN.h:141
 MethodCFMlpANN.h:142
 MethodCFMlpANN.h:143
 MethodCFMlpANN.h:144
 MethodCFMlpANN.h:145
 MethodCFMlpANN.h:146
 MethodCFMlpANN.h:147
 MethodCFMlpANN.h:148
 MethodCFMlpANN.h:149
 MethodCFMlpANN.h:150
 MethodCFMlpANN.h:151
 MethodCFMlpANN.h:152
 MethodCFMlpANN.h:153
 MethodCFMlpANN.h:154
 MethodCFMlpANN.h:155
 MethodCFMlpANN.h:156
 MethodCFMlpANN.h:157
 MethodCFMlpANN.h:158
 MethodCFMlpANN.h:159
 MethodCFMlpANN.h:160
 MethodCFMlpANN.h:161
 MethodCFMlpANN.h:162
 MethodCFMlpANN.h:163
 MethodCFMlpANN.h:164
 MethodCFMlpANN.h:165
 MethodCFMlpANN.h:166
 MethodCFMlpANN.h:167
 MethodCFMlpANN.h:168
 MethodCFMlpANN.h:169
 MethodCFMlpANN.h:170
 MethodCFMlpANN.h:171
 MethodCFMlpANN.h:172
 MethodCFMlpANN.h:173
 MethodCFMlpANN.h:174
 MethodCFMlpANN.h:175
 MethodCFMlpANN.h:176
 MethodCFMlpANN.h:177
 MethodCFMlpANN.h:178
 MethodCFMlpANN.h:179
 MethodCFMlpANN.h:180
 MethodCFMlpANN.h:181
 MethodCFMlpANN.h:182
 MethodCFMlpANN.h:183
 MethodCFMlpANN.h:184
 MethodCFMlpANN.h:185
 MethodCFMlpANN.h:186
 MethodCFMlpANN.h:187
 MethodCFMlpANN.h:188
 MethodCFMlpANN.h:189
 MethodCFMlpANN.h:190
 MethodCFMlpANN.h:191