// @(#)root/tmva $Id$
// Author: Krzysztof Danielowski, Andreas Hoecker, Matt Jachowski, Kamil Kraszewski, Maciej Kruk, Peter Speckmayer, Joerg Stelzer, Eckhard v. Toerne, Jan Therhaag, Jiahang Zhong

/**********************************************************************************
 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
 * Package: TMVA                                                                  *
 * Class  : MethodMLP                                                             *
 * Web    : http://tmva.sourceforge.net                                           *
 *                                                                                *
 * Description:                                                                   *
 *      ANN Multilayer Perceptron class for the discrimination of signal          *
 *      from background. BFGS implementation based on TMultiLayerPerceptron       *
 *      class from ROOT (http://root.cern.ch).                                    *
 *                                                                                *
 * Authors (alphabetical):                                                        *
 *      Krzysztof Danielowski <danielow@cern.ch>       - IFJ & AGH, Poland        *
 *      Andreas Hoecker       <Andreas.Hocker@cern.ch> - CERN, Switzerland        *
 *      Matt Jachowski        <jachowski@stanford.edu> - Stanford University, USA *
 *      Kamil Kraszewski      <kalq@cern.ch>           - IFJ & UJ, Poland         *
 *      Maciej Kruk           <mkruk@cern.ch>          - IFJ & AGH, Poland        *
 *      Peter Speckmayer      <peter.speckmayer@cern.ch> - CERN, Switzerland      *
 *      Joerg Stelzer         <stelzer@cern.ch>        - DESY, Germany            *
 *      Jan Therhaag          <Jan.Therhaag@cern.ch>     - U of Bonn, Germany     *
 *      Eckhard v. Toerne     <evt@uni-bonn.de>          - U of Bonn, Germany     *
 *      Jiahang Zhong         <Jiahang.Zhong@cern.ch>  - Academia Sinica, Taipei  *
 *                                                                                *
 * Copyright (c) 2005-2011:                                                       *
 *      CERN, Switzerland                                                         *
 *      U. of Victoria, Canada                                                    *
 *      MPI-K Heidelberg, Germany                                                 *
 *      U. of Bonn, Germany                                                       *
 *                                                                                *
 * Redistribution and use in source and binary forms, with or without             *
 * modification, are permitted according to the terms listed in LICENSE           *
 * (http://tmva.sourceforge.net/LICENSE)                                          *
 **********************************************************************************/

//_______________________________________________________________________
//
// Multilayer Perceptron class built off of MethodANNBase
//_______________________________________________________________________

#include "TString.h"
#include <vector>
#include <cmath>
#include "TTree.h"
#include "Riostream.h"
#include "TFitter.h"
#include "TMatrixD.h"
#include "TMath.h"
#include "TFile.h"

#include "TMVA/ClassifierFactory.h"
#include "TMVA/Interval.h"
#include "TMVA/MethodMLP.h"
#include "TMVA/TNeuron.h"
#include "TMVA/TSynapse.h"
#include "TMVA/Timer.h"
#include "TMVA/Types.h"
#include "TMVA/Tools.h"
#include "TMVA/GeneticFitter.h"
#include "TMVA/Config.h"

#ifdef MethodMLP_UseMinuit__
TMVA::MethodMLP* TMVA::MethodMLP::fgThis = 0;
Bool_t MethodMLP_UseMinuit = kTRUE;
#endif

REGISTER_METHOD(MLP)

ClassImp(TMVA::MethodMLP)

using std::vector;

//______________________________________________________________________________
TMVA::MethodMLP::MethodMLP( const TString& jobName,
                            const TString& methodTitle,
                            DataSetInfo& theData,
                            const TString& theOption,
                            TDirectory* theTargetDir )
   : MethodANNBase( jobName, Types::kMLP, methodTitle, theData, theOption, theTargetDir ),
     fUseRegulator(false), fCalculateErrors(false),
     fPrior(0.0), fPriorDev(0), fUpdateLimit(0),
     fTrainingMethod(kBFGS), fTrainMethodS("BFGS"),
     fSamplingFraction(1.0), fSamplingEpoch(0.0), fSamplingWeight(0.0),
     fSamplingTraining(false), fSamplingTesting(false),
     fLastAlpha(0.0), fTau(0.),
     fResetStep(0), fLearnRate(0.0), fDecayRate(0.0),
     fBPMode(kSequential), fBpModeS("None"),
     fBatchSize(0), fTestRate(0), fEpochMon(false),
     fGA_nsteps(0), fGA_preCalc(0), fGA_SC_steps(0),
     fGA_SC_rate(0), fGA_SC_factor(0.0),
     fDeviationsFromTargets(0),
     fWeightRange     (1.0)
{
   // standard constructor
}

//______________________________________________________________________________
TMVA::MethodMLP::MethodMLP( DataSetInfo& theData,
                            const TString& theWeightFile,
                            TDirectory* theTargetDir )
   : MethodANNBase( Types::kMLP, theData, theWeightFile, theTargetDir ),
     fUseRegulator(false), fCalculateErrors(false),
     fPrior(0.0), fPriorDev(0), fUpdateLimit(0),
     fTrainingMethod(kBFGS), fTrainMethodS("BFGS"),
     fSamplingFraction(1.0), fSamplingEpoch(0.0), fSamplingWeight(0.0),
     fSamplingTraining(false), fSamplingTesting(false),
     fLastAlpha(0.0), fTau(0.),
     fResetStep(0), fLearnRate(0.0), fDecayRate(0.0),
     fBPMode(kSequential), fBpModeS("None"),
     fBatchSize(0), fTestRate(0), fEpochMon(false),
     fGA_nsteps(0), fGA_preCalc(0), fGA_SC_steps(0),
     fGA_SC_rate(0), fGA_SC_factor(0.0),
     fDeviationsFromTargets(0),
     fWeightRange     (1.0)
{
   // constructor from a weight file
}

//______________________________________________________________________________
TMVA::MethodMLP::~MethodMLP()
{
   // destructor
   // nothing to be done
}

//_______________________________________________________________________
Bool_t TMVA::MethodMLP::HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, UInt_t /*numberTargets*/ )
{
   // MLP can handle classification with 2 classes and regression with one regression-target
   if (type == Types::kClassification && numberClasses == 2 ) return kTRUE;
   if (type == Types::kMulticlass ) return kTRUE;
   if (type == Types::kRegression ) return kTRUE;

   return kFALSE;
}

//______________________________________________________________________________
void TMVA::MethodMLP::Init()
{
   // default initializations

   // the minimum requirement to declare an event signal-like
   SetSignalReferenceCut( 0.5 );
#ifdef MethodMLP_UseMinuit__
   fgThis = this;
#endif
}

//_______________________________________________________________________
void TMVA::MethodMLP::DeclareOptions()
{
   // define the options (their key words) that can be set in the option string
   // know options:
   // TrainingMethod  <string>     Training method
   //    available values are:         BP   Back-Propagation <default>
   //                                  GA   Genetic Algorithm (takes a LONG time)
   //
   // LearningRate    <float>      NN learning rate parameter
   // DecayRate       <float>      Decay rate for learning parameter
   // TestRate        <int>        Test for overtraining performed at each #th epochs
   //
   // BPMode          <string>     Back-propagation learning mode
   //    available values are:         sequential <default>
   //                                  batch
   //
   // BatchSize       <int>        Batch size: number of events/batch, only set if in Batch Mode,
   //                                          -1 for BatchSize=number_of_events

   DeclareOptionRef(fTrainMethodS="BP", "TrainingMethod",
                    "Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)");
   AddPreDefVal(TString("BP"));
   AddPreDefVal(TString("GA"));
   AddPreDefVal(TString("BFGS"));

   DeclareOptionRef(fLearnRate=0.02,    "LearningRate",    "ANN learning rate parameter");
   DeclareOptionRef(fDecayRate=0.01,    "DecayRate",       "Decay rate for learning parameter");
   DeclareOptionRef(fTestRate =10,      "TestRate",        "Test for overtraining performed at each #th epochs");
   DeclareOptionRef(fEpochMon = kFALSE, "EpochMonitoring", "Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)" );

   DeclareOptionRef(fSamplingFraction=1.0, "Sampling","Only 'Sampling' (randomly selected) events are trained each epoch");
   DeclareOptionRef(fSamplingEpoch=1.0,    "SamplingEpoch","Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training");
   DeclareOptionRef(fSamplingWeight=1.0,    "SamplingImportance"," The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.");

   DeclareOptionRef(fSamplingTraining=kTRUE,    "SamplingTraining","The training sample is sampled");
   DeclareOptionRef(fSamplingTesting= kFALSE,    "SamplingTesting" ,"The testing sample is sampled");

   DeclareOptionRef(fResetStep=50,   "ResetStep",    "How often BFGS should reset history");
   DeclareOptionRef(fTau      =3.0,  "Tau",          "LineSearch \"size step\"");

   DeclareOptionRef(fBpModeS="sequential", "BPMode",
                    "Back-propagation learning mode: sequential or batch");
   AddPreDefVal(TString("sequential"));
   AddPreDefVal(TString("batch"));

   DeclareOptionRef(fBatchSize=-1, "BatchSize",
                    "Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events");

   DeclareOptionRef(fImprovement=1e-30, "ConvergenceImprove",
                    "Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)");

   DeclareOptionRef(fSteps=-1, "ConvergenceTests",
                    "Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)");

   DeclareOptionRef(fUseRegulator=kFALSE, "UseRegulator",
                    "Use regulator to avoid over-training");   //zjh
   DeclareOptionRef(fUpdateLimit=10000, "UpdateLimit",
		    "Maximum times of regulator update");   //zjh
   DeclareOptionRef(fCalculateErrors=kFALSE, "CalculateErrors",
                    "Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value");   //zjh

   DeclareOptionRef(fWeightRange=1.0, "WeightRange",
                    "Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range");

}

//_______________________________________________________________________
void TMVA::MethodMLP::ProcessOptions()
{
   // process user options
   MethodANNBase::ProcessOptions();


   if (IgnoreEventsWithNegWeightsInTraining()) {
      Log() << kINFO
            << "Will ignore negative events in training!"
            << Endl;
   }


   if      (fTrainMethodS == "BP"  ) fTrainingMethod = kBP;
   else if (fTrainMethodS == "BFGS") fTrainingMethod = kBFGS;
   else if (fTrainMethodS == "GA"  ) fTrainingMethod = kGA;

   if      (fBpModeS == "sequential") fBPMode = kSequential;
   else if (fBpModeS == "batch")      fBPMode = kBatch;

   //   InitializeLearningRates();

   if (fBPMode == kBatch) {
      Data()->SetCurrentType(Types::kTraining);
      Int_t numEvents = Data()->GetNEvents();
      if (fBatchSize < 1 || fBatchSize > numEvents) fBatchSize = numEvents;
   }
}

//______________________________________________________________________________
void TMVA::MethodMLP::InitializeLearningRates()
{
   // initialize learning rates of synapses, used only by backpropagation
   Log() << kDEBUG << "Initialize learning rates" << Endl;
   TSynapse *synapse;
   Int_t numSynapses = fSynapses->GetEntriesFast();
   for (Int_t i = 0; i < numSynapses; i++) {
      synapse = (TSynapse*)fSynapses->At(i);
      synapse->SetLearningRate(fLearnRate);
   }
}

//______________________________________________________________________________
Double_t TMVA::MethodMLP::CalculateEstimator( Types::ETreeType treeType, Int_t iEpoch )
{
   // calculate the estimator that training is attempting to minimize

   // sanity check
   if (treeType!=Types::kTraining && treeType!=Types::kTesting) {
      Log() << kFATAL << "<CalculateEstimator> fatal error: wrong tree type: " << treeType << Endl;
   }

   Types::ETreeType saveType = Data()->GetCurrentType();
   Data()->SetCurrentType(treeType);

   // if epochs are counted create monitoring histograms (only available for classification)
   TString type  = (treeType == Types::kTraining ? "train" : "test");
   TString name  = Form("convergencetest___mlp_%s_epoch_%04i", type.Data(), iEpoch);
   TString nameB = name + "_B";
   TString nameS = name + "_S";
   Int_t   nbin  = 100;
   Float_t limit = 2;
   TH1*    histS = 0;
   TH1*    histB = 0;
   if (fEpochMon && iEpoch >= 0 && !DoRegression()) {
      histS = new TH1F( nameS, nameS, nbin, -limit, limit );
      histB = new TH1F( nameB, nameB, nbin, -limit, limit );
   }

   Double_t estimator = 0;

   // loop over all training events
   Int_t  nEvents  = GetNEvents();
   UInt_t nClasses = DataInfo().GetNClasses();
   UInt_t nTgts = DataInfo().GetNTargets();


   Float_t sumOfWeights = 0.f;
   if( fWeightRange < 1.f ){
      fDeviationsFromTargets = new std::vector<std::pair<Float_t,Float_t> >(nEvents);
   }

   for (Int_t i = 0; i < nEvents; i++) {

      const Event* ev = GetEvent(i);

      if ((ev->GetWeight() < 0) && IgnoreEventsWithNegWeightsInTraining()
          &&  (saveType == Types::kTraining)){
         continue;
      }

       Double_t     w  = ev->GetWeight();

      ForceNetworkInputs( ev );
      ForceNetworkCalculations();

      Double_t d = 0, v = 0;
      if (DoRegression()) {
         for (UInt_t itgt = 0; itgt < nTgts; itgt++) {
            v = GetOutputNeuron( itgt )->GetActivationValue();
            Double_t targetValue = ev->GetTarget( itgt );
            Double_t dt = v - targetValue;
            d += (dt*dt);
         }
         estimator += d*w;
      } else if (DoMulticlass() ) {
         UInt_t cls = ev->GetClass();
         if (fEstimator==kCE){
            Double_t norm(0);
            for (UInt_t icls = 0; icls < nClasses; icls++) {
	       Float_t activationValue = GetOutputNeuron( icls )->GetActivationValue();
               norm += exp( activationValue );
               if(icls==cls)
                  d = exp( activationValue );
            }
            d = -TMath::Log(d/norm);
         }
         else{
            for (UInt_t icls = 0; icls < nClasses; icls++) {
               Double_t desired = (icls==cls) ? 1.0 : 0.0;
               v = GetOutputNeuron( icls )->GetActivationValue();
               d = (desired-v)*(desired-v);
            }
         }
         estimator += d*w; //zjh
      } else {
         Double_t desired =  DataInfo().IsSignal(ev)?1.:0.;
         v = GetOutputNeuron()->GetActivationValue();
         if (fEstimator==kMSE) d = (desired-v)*(desired-v);                         //zjh
         else if (fEstimator==kCE) d = -2*(desired*TMath::Log(v)+(1-desired)*TMath::Log(1-v));     //zjh
         estimator += d*w; //zjh
      }

      if( fDeviationsFromTargets )
	 fDeviationsFromTargets->push_back(std::pair<Float_t,Float_t>(d,w));

      sumOfWeights += w;


      // fill monitoring histograms
      if (DataInfo().IsSignal(ev) && histS != 0) histS->Fill( float(v), float(w) );
      else if              (histB != 0) histB->Fill( float(v), float(w) );
   }


   if( fDeviationsFromTargets ) {
      std::sort(fDeviationsFromTargets->begin(),fDeviationsFromTargets->end());

      Float_t sumOfWeightsInRange = fWeightRange*sumOfWeights;
      estimator = 0.f;

      Float_t weightRangeCut = fWeightRange*sumOfWeights;
      Float_t weightSum      = 0.f;
      for(std::vector<std::pair<Float_t,Float_t> >::iterator itDev = fDeviationsFromTargets->begin(), itDevEnd = fDeviationsFromTargets->end(); itDev != itDevEnd; ++itDev ){
	 float deviation = (*itDev).first;
	 float devWeight = (*itDev).second;
	 weightSum += devWeight; // add the weight of this event
	 if( weightSum <= weightRangeCut ) { // if within the region defined by fWeightRange
	    estimator += devWeight*deviation;
	 }
      }

      sumOfWeights = sumOfWeightsInRange;
      delete fDeviationsFromTargets;
   }

   if (histS != 0) fEpochMonHistS.push_back( histS );
   if (histB != 0) fEpochMonHistB.push_back( histB );

   //if      (DoRegression()) estimator = TMath::Sqrt(estimator/Float_t(nEvents));
   //else if (DoMulticlass()) estimator = TMath::Sqrt(estimator/Float_t(nEvents));
   //else                     estimator = estimator*0.5/Float_t(nEvents);
   if      (DoRegression()) estimator = estimator/Float_t(sumOfWeights);
   else if (DoMulticlass()) estimator = estimator/Float_t(sumOfWeights);
   else                     estimator = estimator/Float_t(sumOfWeights);


   //if (fUseRegulator) estimator+=fPrior/Float_t(nEvents);  //zjh

   Data()->SetCurrentType( saveType );

   // provide epoch-wise monitoring
   if (fEpochMon && iEpoch >= 0 && !DoRegression() && treeType == Types::kTraining) {
      CreateWeightMonitoringHists( Form("epochmonitoring___epoch_%04i_weights_hist", iEpoch), &fEpochMonHistW );
   }

   return estimator;
}

//______________________________________________________________________________
void TMVA::MethodMLP::Train(Int_t nEpochs)
{
   if (fNetwork == 0) {
      //Log() << kERROR <<"ANN Network is not initialized, doing it now!"<< Endl;
      Log() << kFATAL <<"ANN Network is not initialized, doing it now!"<< Endl;
      SetAnalysisType(GetAnalysisType());
   }
   Log() << kDEBUG << "reinitalize learning rates" << Endl;
   InitializeLearningRates();
   PrintMessage("Training Network");

   Int_t nEvents=GetNEvents();
   Int_t nSynapses=fSynapses->GetEntriesFast();
   if (nSynapses>nEvents)
      Log()<<kWARNING<<"ANN too complicated: #events="<<nEvents<<"\t#synapses="<<nSynapses<<Endl;

#ifdef MethodMLP_UseMinuit__
   if (useMinuit) MinuitMinimize();
#else
   if (fTrainingMethod == kGA)        GeneticMinimize();
   else if (fTrainingMethod == kBFGS) BFGSMinimize(nEpochs);
   else                               BackPropagationMinimize(nEpochs);
#endif

   float trainE = CalculateEstimator( Types::kTraining, 0 ) ; // estimator for training sample  //zjh
   float testE  = CalculateEstimator( Types::kTesting,  0 ) ; // estimator for test sample //zjh
   if (fUseRegulator){
      Log()<<kINFO<<"Finalizing handling of Regulator terms, trainE="<<trainE<<" testE="<<testE<<Endl;
      UpdateRegulators();
      Log()<<kINFO<<"Done with handling of Regulator terms"<<Endl;
   }

   if( fCalculateErrors || fUseRegulator )
   {
      Int_t numSynapses=fSynapses->GetEntriesFast();
      fInvHessian.ResizeTo(numSynapses,numSynapses);
      GetApproxInvHessian( fInvHessian ,false);
   }
}

//______________________________________________________________________________
void TMVA::MethodMLP::BFGSMinimize( Int_t nEpochs )
{
   // train network with BFGS algorithm

   Timer timer( (fSteps>0?100:nEpochs), GetName() );

   // create histograms for overtraining monitoring
   Int_t nbinTest = Int_t(nEpochs/fTestRate);
   fEstimatorHistTrain = new TH1F( "estimatorHistTrain", "training estimator",
                                   nbinTest, Int_t(fTestRate/2), nbinTest*fTestRate+Int_t(fTestRate/2) );
   fEstimatorHistTest  = new TH1F( "estimatorHistTest", "test estimator",
                                   nbinTest, Int_t(fTestRate/2), nbinTest*fTestRate+Int_t(fTestRate/2) );

   Int_t nSynapses = fSynapses->GetEntriesFast();
   Int_t nWeights  = nSynapses;

   for (Int_t i=0;i<nSynapses;i++) {
      TSynapse* synapse = (TSynapse*)fSynapses->At(i);
      synapse->SetDEDw(0.0);
   }

   std::vector<Double_t> buffer( nWeights );
   for (Int_t i=0;i<nWeights;i++) buffer[i] = 0.;

   TMatrixD Dir     ( nWeights, 1 );
   TMatrixD Hessian ( nWeights, nWeights );
   TMatrixD Gamma   ( nWeights, 1 );
   TMatrixD Delta   ( nWeights, 1 );
   Int_t        RegUpdateCD=0;                  //zjh
   Int_t        RegUpdateTimes=0;               //zjh
   Double_t     AccuError=0;

   Double_t trainE = -1;
   Double_t testE  = -1;

   fLastAlpha = 0.;

   if(fSamplingTraining || fSamplingTesting)
      Data()->InitSampling(1.0,1.0,fRandomSeed); // initialize sampling to initialize the random generator with the given seed

   if (fSteps > 0) Log() << kINFO << "Inaccurate progress timing for MLP... " << Endl;
   timer.DrawProgressBar( 0 );

   // start training cycles (epochs)
   for (Int_t i = 0; i < nEpochs; i++) {
      if (Float_t(i)/nEpochs < fSamplingEpoch) {
         if ((i+1)%fTestRate == 0 || (i == 0)) {
            if (fSamplingTraining) {
               Data()->SetCurrentType( Types::kTraining );
               Data()->InitSampling(fSamplingFraction,fSamplingWeight);
               Data()->CreateSampling();
            }
            if (fSamplingTesting) {
               Data()->SetCurrentType( Types::kTesting );
               Data()->InitSampling(fSamplingFraction,fSamplingWeight);
               Data()->CreateSampling();
            }
         }
      }
      else {
         Data()->SetCurrentType( Types::kTraining );
         Data()->InitSampling(1.0,1.0);
         Data()->SetCurrentType( Types::kTesting );
         Data()->InitSampling(1.0,1.0);
      }
      Data()->SetCurrentType( Types::kTraining );

      //zjh
      if (fUseRegulator) {
         UpdatePriors();
         RegUpdateCD++;
      }
      //zjh

      SetGammaDelta( Gamma, Delta, buffer );

      if (i % fResetStep == 0 && i<0.5*nEpochs) { //zjh
         SteepestDir( Dir );
         Hessian.UnitMatrix();
         RegUpdateCD=0;    //zjh
      }
      else {
         if (GetHessian( Hessian, Gamma, Delta )) {
            SteepestDir( Dir );
            Hessian.UnitMatrix();
            RegUpdateCD=0;    //zjh
         }
         else SetDir( Hessian, Dir );
      }

      Double_t dError=0;  //zjh
      if (DerivDir( Dir ) > 0) {
         SteepestDir( Dir );
         Hessian.UnitMatrix();
         RegUpdateCD=0;    //zjh
      }
      if (LineSearch( Dir, buffer, &dError )) { //zjh
         Hessian.UnitMatrix();
         SteepestDir( Dir );
         RegUpdateCD=0;    //zjh
         if (LineSearch(Dir, buffer, &dError)) {  //zjh
            i = nEpochs;
            Log() << kFATAL << "Line search failed! Huge troubles somewhere..." << Endl;
         }
      }

      //zjh+
      if (dError<0) Log()<<kWARNING<<"\nnegative dError=" <<dError<<Endl;
      AccuError+=dError;

      if ( fUseRegulator && RegUpdateTimes<fUpdateLimit && RegUpdateCD>=5 && fabs(dError)<0.1*AccuError) {
         Log()<<kDEBUG<<"\n\nUpdate regulators "<<RegUpdateTimes<<" on epoch "<<i<<"\tdError="<<dError<<Endl;
         UpdateRegulators();
         Hessian.UnitMatrix();
         RegUpdateCD=0;
         RegUpdateTimes++;
         AccuError=0;
      }
      //zjh-

      // monitor convergence of training and control sample
      if ((i+1)%fTestRate == 0) {
         //trainE = CalculateEstimator( Types::kTraining, i ) - fPrior/Float_t(GetNEvents()); // estimator for training sample  //zjh
         //testE  = CalculateEstimator( Types::kTesting,  i ) - fPrior/Float_t(GetNEvents()); // estimator for test sample //zjh
         trainE = CalculateEstimator( Types::kTraining, i ) ; // estimator for training sample  //zjh
         testE  = CalculateEstimator( Types::kTesting,  i ) ; // estimator for test sample //zjh
         fEstimatorHistTrain->Fill( i+1, trainE );
         fEstimatorHistTest ->Fill( i+1, testE );

         Bool_t success = kFALSE;
         if ((testE < GetCurrentValue()) || (GetCurrentValue()<1e-100)) {
            success = kTRUE;
         }
         Data()->EventResult( success );

         SetCurrentValue( testE );
         if (HasConverged()) {
            if (Float_t(i)/nEpochs < fSamplingEpoch) {
               Int_t newEpoch = Int_t(fSamplingEpoch*nEpochs);
               i = newEpoch;
               ResetConvergenceCounter();
            }
            else break;
         }
      }

      // draw progress
      TString convText = Form( "<D^2> (train/test/epoch): %.4g/%.4g/%d", trainE, testE,i  ); //zjh
      if (fSteps > 0) {
         Float_t progress = 0;
         if (Float_t(i)/nEpochs < fSamplingEpoch)
//            progress = Progress()*fSamplingEpoch*fSamplingFraction*100;
            progress = Progress()*fSamplingFraction*100*fSamplingEpoch;
         else
	 {
//            progress = 100.0*(fSamplingEpoch*fSamplingFraction+(1.0-fSamplingFraction*fSamplingEpoch)*Progress());
            progress = 100.0*(fSamplingFraction*fSamplingEpoch+(1.0-fSamplingEpoch)*Progress());
	 }
         Float_t progress2= 100.0*RegUpdateTimes/fUpdateLimit; //zjh
         if (progress2>progress) progress=progress2; //zjh
         timer.DrawProgressBar( Int_t(progress), convText );
      }
      else {
         Int_t progress=Int_t(nEpochs*RegUpdateTimes/Float_t(fUpdateLimit)); //zjh
         if (progress<i) progress=i; //zjh
         timer.DrawProgressBar( progress, convText ); //zjh
      }

      // some verbose output
      if (fgPRINT_SEQ) {
         PrintNetwork();
         WaitForKeyboard();
      }
   }
}

//______________________________________________________________________________
void TMVA::MethodMLP::SetGammaDelta( TMatrixD &Gamma, TMatrixD &Delta, std::vector<Double_t> &buffer )
{
   Int_t nWeights = fSynapses->GetEntriesFast();

   Int_t IDX = 0;
   Int_t nSynapses = fSynapses->GetEntriesFast();
   for (Int_t i=0;i<nSynapses;i++) {
      TSynapse *synapse = (TSynapse*)fSynapses->At(i);
      Gamma[IDX++][0] = -synapse->GetDEDw();
   }

   for (Int_t i=0;i<nWeights;i++) Delta[i][0] = buffer[i];

   ComputeDEDw();

   IDX = 0;
   for (Int_t i=0;i<nSynapses;i++)
      {
         TSynapse *synapse = (TSynapse*)fSynapses->At(i);
         Gamma[IDX++][0] += synapse->GetDEDw();
      }
}

//______________________________________________________________________________
void TMVA::MethodMLP::ComputeDEDw()
{
   Int_t nSynapses = fSynapses->GetEntriesFast();
   for (Int_t i=0;i<nSynapses;i++) {
      TSynapse *synapse = (TSynapse*)fSynapses->At(i);
      synapse->SetDEDw( 0.0 );
   }

   Int_t nEvents = GetNEvents();
   Int_t nPosEvents = nEvents;
   for (Int_t i=0;i<nEvents;i++) {

      const Event* ev = GetEvent(i);
       if ((ev->GetWeight() < 0) && IgnoreEventsWithNegWeightsInTraining()
          &&  (Data()->GetCurrentType() == Types::kTraining)){
         --nPosEvents;
         continue;
      }

      SimulateEvent( ev );

      for (Int_t j=0;j<nSynapses;j++) {
         TSynapse *synapse = (TSynapse*)fSynapses->At(j);
         synapse->SetDEDw( synapse->GetDEDw() + synapse->GetDelta() );
      }
   }

   for (Int_t i=0;i<nSynapses;i++) {
      TSynapse *synapse = (TSynapse*)fSynapses->At(i);
      Double_t DEDw=synapse->GetDEDw();     //zjh
      if (fUseRegulator) DEDw+=fPriorDev[i]; //zjh
      synapse->SetDEDw( DEDw / nPosEvents );   //zjh
   }
}

//______________________________________________________________________________
void TMVA::MethodMLP::SimulateEvent( const Event* ev )
{
   Double_t eventWeight = ev->GetWeight();

   ForceNetworkInputs( ev );
   ForceNetworkCalculations();

   if (DoRegression()) {
      UInt_t ntgt = DataInfo().GetNTargets();
      for (UInt_t itgt = 0; itgt < ntgt; itgt++) {
         Double_t desired     = ev->GetTarget(itgt);
         Double_t error = ( GetOutputNeuron( itgt )->GetActivationValue() - desired )*eventWeight;
         GetOutputNeuron( itgt )->SetError(error);
      }
   } else if (DoMulticlass()) {
      UInt_t nClasses = DataInfo().GetNClasses();
      UInt_t cls      = ev->GetClass();
      for (UInt_t icls = 0; icls < nClasses; icls++) {
         Double_t desired  = ( cls==icls ? 1.0 : 0.0 );
         Double_t error    = ( GetOutputNeuron( icls )->GetActivationValue() - desired )*eventWeight;
         GetOutputNeuron( icls )->SetError(error);
      }
   } else {
      Double_t desired     = GetDesiredOutput( ev );
      Double_t error=-1;				//zjh
      if (fEstimator==kMSE) error = ( GetOutputNeuron()->GetActivationValue() - desired )*eventWeight;       //zjh
      else if (fEstimator==kCE) error = -eventWeight/(GetOutputNeuron()->GetActivationValue() -1 + desired);  //zjh
      GetOutputNeuron()->SetError(error);
   }

   CalculateNeuronDeltas();
   for (Int_t j=0;j<fSynapses->GetEntriesFast();j++) {
      TSynapse *synapse = (TSynapse*)fSynapses->At(j);
      synapse->InitDelta();
      synapse->CalculateDelta();
   }
}

//______________________________________________________________________________
void TMVA::MethodMLP::SteepestDir( TMatrixD &Dir )
{
   Int_t IDX = 0;
   Int_t nSynapses = fSynapses->GetEntriesFast();

   for (Int_t i=0;i<nSynapses;i++) {
      TSynapse *synapse = (TSynapse*)fSynapses->At(i);
      Dir[IDX++][0] = -synapse->GetDEDw();
   }
}

//______________________________________________________________________________
Bool_t TMVA::MethodMLP::GetHessian( TMatrixD &Hessian, TMatrixD &Gamma, TMatrixD &Delta )
{
   TMatrixD gd(Gamma, TMatrixD::kTransposeMult, Delta);
   if ((Double_t) gd[0][0] == 0.) return kTRUE;
   TMatrixD aHg(Hessian, TMatrixD::kMult, Gamma);
   TMatrixD tmp(Gamma,   TMatrixD::kTransposeMult, Hessian);
   TMatrixD gHg(Gamma,   TMatrixD::kTransposeMult, aHg);
   Double_t a = 1 / (Double_t) gd[0][0];
   Double_t f = 1 + ((Double_t)gHg[0][0]*a);
   TMatrixD res(TMatrixD(Delta, TMatrixD::kMult, TMatrixD(TMatrixD::kTransposed,Delta)));
   res *= f;
   res -= (TMatrixD(Delta, TMatrixD::kMult, tmp) + TMatrixD(aHg, TMatrixD::kMult,
                                                            TMatrixD(TMatrixD::kTransposed,Delta)));
   res *= a;
   Hessian += res;

   return kFALSE;
}

//______________________________________________________________________________
void TMVA::MethodMLP::SetDir( TMatrixD &Hessian, TMatrixD &dir )
{
   Int_t IDX = 0;
   Int_t nSynapses = fSynapses->GetEntriesFast();
   TMatrixD DEDw(nSynapses, 1);

   for (Int_t i=0;i<nSynapses;i++) {
      TSynapse *synapse = (TSynapse*)fSynapses->At(i);
      DEDw[IDX++][0] = synapse->GetDEDw();
   }

   dir = Hessian * DEDw;
   for (Int_t i=0;i<IDX;i++) dir[i][0] = -dir[i][0];
}

//______________________________________________________________________________
Double_t TMVA::MethodMLP::DerivDir( TMatrixD &Dir )
{
   Int_t IDX = 0;
   Int_t nSynapses = fSynapses->GetEntriesFast();
   Double_t Result = 0.0;

   for (Int_t i=0;i<nSynapses;i++) {
      TSynapse *synapse = (TSynapse*)fSynapses->At(i);
      Result += Dir[IDX++][0] * synapse->GetDEDw();
   }
   return Result;
}

//______________________________________________________________________________
Bool_t TMVA::MethodMLP::LineSearch(TMatrixD &Dir, std::vector<Double_t> &buffer, Double_t* dError)
{
   Int_t IDX = 0;
   Int_t nSynapses = fSynapses->GetEntriesFast();
   Int_t nWeights = nSynapses;

   std::vector<Double_t> Origin(nWeights);
   for (Int_t i=0;i<nSynapses;i++) {
      TSynapse *synapse = (TSynapse*)fSynapses->At(i);
      Origin[i] = synapse->GetWeight();
   }

   Double_t err1 = GetError();
   Double_t errOrigin=err1;  	//zjh
   Double_t alpha1 = 0.;
   Double_t alpha2 = fLastAlpha;


   if      (alpha2 < 0.01) alpha2 = 0.01;
   else if (alpha2 > 2.0)  alpha2 = 2.0;
   Double_t alpha_original = alpha2;
   Double_t alpha3 = alpha2;

   SetDirWeights( Origin, Dir, alpha2 );
   Double_t err2 = GetError();
   //Double_t err2 = err1;
   Double_t err3 = err2;
   Bool_t bingo = kFALSE;


   if (err1 > err2) {
      for (Int_t i=0;i<100;i++)  {
         alpha3 *= fTau;
         SetDirWeights(Origin, Dir, alpha3);
         err3 = GetError();
         if (err3 > err2) {
            bingo = kTRUE;
            break;
         }
         alpha1 = alpha2;
         err1 = err2;
         alpha2 = alpha3;
         err2 = err3;
      }
      if (!bingo) {
         SetDirWeights(Origin, Dir, 0.);
         return kTRUE;
      }
   }
   else {
      for (Int_t i=0;i<100;i++) {
         alpha2 /= fTau;
         if (i==50) {
            Log() << kWARNING << "linesearch, starting to investigate direction opposite of steepestDIR" << Endl;
            alpha2 = -alpha_original;
         }
         SetDirWeights(Origin, Dir, alpha2);
         err2 = GetError();
         if (err1 > err2) {
            bingo = kTRUE;
            break;
         }
         alpha3 = alpha2;
         err3 = err2;
      }
      if (!bingo) {
         SetDirWeights(Origin, Dir, 0.);
         Log() << kWARNING << "linesearch, failed even in opposite direction of steepestDIR" << Endl;
         fLastAlpha = 0.05;
         return kTRUE;
      }
   }

   if (alpha1>0 && alpha2>0 && alpha3 > 0) {
      fLastAlpha = 0.5 * (alpha1 + alpha3 -
                          (err3 - err1) / ((err3 - err2) / ( alpha3 - alpha2 )
                                           - ( err2 - err1 ) / (alpha2 - alpha1 )));
   }
   else {
      fLastAlpha = alpha2;
   }

   fLastAlpha = fLastAlpha < 10000 ? fLastAlpha : 10000;

   SetDirWeights(Origin, Dir, fLastAlpha);

   // leaving these lines uncommented is a heavy price to pay for only a warning message
   // (which shoulnd't appear anyway)
   // --> about 15% of time is spent in the final GetError().
   //
   Double_t finalError = GetError();
   if (finalError > err1) {
      Log() << kWARNING << "Line search increased error! Something is wrong."
            << "fLastAlpha=" << fLastAlpha << "al123=" << alpha1 << " "
            << alpha2 << " " << alpha3 << " err1="<< err1 << " errfinal=" << finalError << Endl;
   }

   for (Int_t i=0;i<nSynapses;i++) {
      TSynapse *synapse = (TSynapse*)fSynapses->At(i);
      buffer[IDX] = synapse->GetWeight() - Origin[IDX];
      IDX++;
   }

   if (dError) (*dError)=(errOrigin-finalError)/finalError; //zjh

   return kFALSE;
}

//______________________________________________________________________________
void TMVA::MethodMLP::SetDirWeights( std::vector<Double_t> &Origin, TMatrixD &Dir, Double_t alpha )
{
   Int_t IDX = 0;
   Int_t nSynapses = fSynapses->GetEntriesFast();

   for (Int_t i=0;i<nSynapses;i++) {
      TSynapse *synapse = (TSynapse*)fSynapses->At(i);
      synapse->SetWeight( Origin[IDX] + Dir[IDX][0] * alpha );
      IDX++;
   }
   if (fUseRegulator) UpdatePriors();	//zjh
}


//______________________________________________________________________________
Double_t TMVA::MethodMLP::GetError()
{
   Int_t nEvents = GetNEvents();
   UInt_t ntgts = GetNTargets();
   Double_t Result = 0.;

   for (Int_t i=0;i<nEvents;i++) {
      const Event* ev = GetEvent(i);

       if ((ev->GetWeight() < 0) && IgnoreEventsWithNegWeightsInTraining()
          &&  (Data()->GetCurrentType() == Types::kTraining)){
         continue;
      }
      SimulateEvent( ev );

      Double_t error = 0.;
      if (DoRegression()) {
         for (UInt_t itgt = 0; itgt < ntgts; itgt++) {
            error += GetMSEErr( ev, itgt );	//zjh
         }
      } else if ( DoMulticlass() ){
         for( UInt_t icls = 0, iclsEnd = DataInfo().GetNClasses(); icls < iclsEnd; icls++ ){
            error += GetMSEErr( ev, icls );
         }
      } else {
         if (fEstimator==kMSE) error = GetMSEErr( ev );  //zjh
         else if (fEstimator==kCE) error= GetCEErr( ev ); //zjh
      }
      Result += error * ev->GetWeight();
   }
   if (fUseRegulator) Result+=fPrior;  //zjh
   if (Result<0) Log()<<kWARNING<<"\nNegative Error!!! :"<<Result-fPrior<<"+"<<fPrior<<Endl;
   return Result;
}

//______________________________________________________________________________
Double_t TMVA::MethodMLP::GetMSEErr( const Event* ev, UInt_t index )
{
   Double_t error = 0;
   Double_t output = GetOutputNeuron( index )->GetActivationValue();
   Double_t target = 0;
   if      (DoRegression()) target = ev->GetTarget( index );
   else if (DoMulticlass()) target = (ev->GetClass() == index ? 1.0 : 0.0 );
   else                     target = GetDesiredOutput( ev );

   error = 0.5*(output-target)*(output-target); //zjh

   return error;

}

//______________________________________________________________________________
Double_t TMVA::MethodMLP::GetCEErr( const Event* ev, UInt_t index )  //zjh
{
   Double_t error = 0;
   Double_t output = GetOutputNeuron( index )->GetActivationValue();
   Double_t target = 0;
   if      (DoRegression()) target = ev->GetTarget( index );
   else if (DoMulticlass()) target = (ev->GetClass() == index ? 1.0 : 0.0 );
   else                     target = GetDesiredOutput( ev );

   error = -(target*TMath::Log(output)+(1-target)*TMath::Log(1-output));

   return error;
}

//______________________________________________________________________________
void TMVA::MethodMLP::BackPropagationMinimize(Int_t nEpochs)
{
   // minimize estimator / train network with backpropagation algorithm

   //    Timer timer( nEpochs, GetName() );
   Timer timer( (fSteps>0?100:nEpochs), GetName() );
   Int_t lateEpoch = (Int_t)(nEpochs*0.95) - 1;

   // create histograms for overtraining monitoring
   Int_t nbinTest = Int_t(nEpochs/fTestRate);
   fEstimatorHistTrain = new TH1F( "estimatorHistTrain", "training estimator",
                                   nbinTest, Int_t(fTestRate/2), nbinTest*fTestRate+Int_t(fTestRate/2) );
   fEstimatorHistTest  = new TH1F( "estimatorHistTest", "test estimator",
                                   nbinTest, Int_t(fTestRate/2), nbinTest*fTestRate+Int_t(fTestRate/2) );

   if(fSamplingTraining || fSamplingTesting)
      Data()->InitSampling(1.0,1.0,fRandomSeed); // initialize sampling to initialize the random generator with the given seed

   if (fSteps > 0) Log() << kINFO << "Inaccurate progress timing for MLP... " << Endl;
   timer.DrawProgressBar(0);

   // estimators
   Double_t trainE = -1;
   Double_t testE  = -1;

   // start training cycles (epochs)
   for (Int_t i = 0; i < nEpochs; i++) {

      if (Float_t(i)/nEpochs < fSamplingEpoch) {
         if ((i+1)%fTestRate == 0 || (i == 0)) {
            if (fSamplingTraining) {
               Data()->SetCurrentType( Types::kTraining );
               Data()->InitSampling(fSamplingFraction,fSamplingWeight);
               Data()->CreateSampling();
            }
            if (fSamplingTesting) {
               Data()->SetCurrentType( Types::kTesting );
               Data()->InitSampling(fSamplingFraction,fSamplingWeight);
               Data()->CreateSampling();
            }
         }
      }
      else {
         Data()->SetCurrentType( Types::kTraining );
         Data()->InitSampling(1.0,1.0);
         Data()->SetCurrentType( Types::kTesting );
         Data()->InitSampling(1.0,1.0);
      }
      Data()->SetCurrentType( Types::kTraining );

      TrainOneEpoch();
      DecaySynapseWeights(i >= lateEpoch);

      // monitor convergence of training and control sample
      if ((i+1)%fTestRate == 0) {
         trainE = CalculateEstimator( Types::kTraining, i ); // estimator for training sample
         testE  = CalculateEstimator( Types::kTesting,  i );  // estimator for test samplea
         fEstimatorHistTrain->Fill( i+1, trainE );
         fEstimatorHistTest ->Fill( i+1, testE );

         Bool_t success = kFALSE;
         if ((testE < GetCurrentValue()) || (GetCurrentValue()<1e-100)) {
            success = kTRUE;
         }
         Data()->EventResult( success );

         SetCurrentValue( testE );
         if (HasConverged()) {
            if (Float_t(i)/nEpochs < fSamplingEpoch) {
               Int_t newEpoch = Int_t(fSamplingEpoch*nEpochs);
               i = newEpoch;
               ResetConvergenceCounter();
            }
            else {
               if (lateEpoch > i) lateEpoch = i;
               else                break;
            }
         }
      }

      // draw progress bar (add convergence value)
      TString convText = Form( "<D^2> (train/test): %.4g/%.4g", trainE, testE );
      if (fSteps > 0) {
         Float_t progress = 0;
         if (Float_t(i)/nEpochs < fSamplingEpoch)
            progress = Progress()*fSamplingEpoch*fSamplingFraction*100;
         else
            progress = 100*(fSamplingEpoch*fSamplingFraction+(1.0-fSamplingFraction*fSamplingEpoch)*Progress());

         timer.DrawProgressBar( Int_t(progress), convText );
      }
      else {
        timer.DrawProgressBar( i, convText );
      }
   }
}

//______________________________________________________________________________
void TMVA::MethodMLP::TrainOneEpoch()
{
   // train network over a single epoch/cyle of events

   Int_t nEvents = Data()->GetNEvents();

   // randomize the order events will be presented, important for sequential mode
   Int_t* index = new Int_t[nEvents];
   for (Int_t i = 0; i < nEvents; i++) index[i] = i;
   Shuffle(index, nEvents);

   // loop over all training events
   for (Int_t i = 0; i < nEvents; i++) {

      const Event * ev = GetEvent(index[i]);
      if ((ev->GetWeight() < 0) && IgnoreEventsWithNegWeightsInTraining()
          &&  (Data()->GetCurrentType() == Types::kTraining)){
         continue;
      }

      TrainOneEvent(index[i]);

      // do adjustments if in batch mode
      if (fBPMode == kBatch && (i+1)%fBatchSize == 0) {
         AdjustSynapseWeights();
         if (fgPRINT_BATCH) {
            PrintNetwork();
            WaitForKeyboard();
         }
      }

      // debug in sequential mode
      if (fgPRINT_SEQ) {
         PrintNetwork();
         WaitForKeyboard();
      }
   }

   delete[] index;
}

//______________________________________________________________________________
void TMVA::MethodMLP::Shuffle(Int_t* index, Int_t n)
{
   // Input:
   //   index: the array to shuffle
   //   n: the size of the array
   // Output:
   //   index: the shuffled indexes
   // This method is used for sequential training

   Int_t j, k;
   Int_t a = n - 1;
   for (Int_t i = 0; i < n; i++) {
      j = (Int_t) (frgen->Rndm() * a);
      if (j<n){ // address the 'worries' of coverity
	k = index[j];
	index[j] = index[i];
	index[i] = k;
      }
   }
}

//______________________________________________________________________________
void TMVA::MethodMLP::DecaySynapseWeights(Bool_t lateEpoch)
{
   // decay synapse weights
   // in last 10 epochs, lower learning rate even more to find a good minimum

   TSynapse* synapse;
   Int_t numSynapses = fSynapses->GetEntriesFast();
   for (Int_t i = 0; i < numSynapses; i++) {
      synapse = (TSynapse*)fSynapses->At(i);
      if (lateEpoch) synapse->DecayLearningRate(TMath::Sqrt(fDecayRate)); // In order to lower the learning rate even more, we need to apply sqrt instead of square.
      else           synapse->DecayLearningRate(fDecayRate);
   }
}

//______________________________________________________________________________
void TMVA::MethodMLP::TrainOneEventFast(Int_t ievt, Float_t*& branchVar, Int_t& type)
{
   // fast per-event training

   GetEvent(ievt);

   // as soon as we know how to get event weights, get that here

   // note: the normalization of event weights will affect the choice
   // of learning rate, one will have to experiment to get the right value.
   // in general, if the "average" event weight is 1, the learning rate
   // should be good if set around 0.02 (a good value if all event weights are 1)
   Double_t eventWeight = 1.0;

   // get the desired output of this event
   Double_t desired;
   if (type == 0) desired = fOutput->GetMin();  // background //zjh
   else           desired = fOutput->GetMax();  // signal     //zjh

   // force the value for each input neuron
   Double_t x;
   TNeuron* neuron;

   for (UInt_t j = 0; j < GetNvar(); j++) {
      x = branchVar[j];
      if (IsNormalised()) x = gTools().NormVariable( x, GetXmin( j ), GetXmax( j ) );
      neuron = GetInputNeuron(j);
      neuron->ForceValue(x);
   }

   ForceNetworkCalculations();
   UpdateNetwork(desired, eventWeight);
}

//______________________________________________________________________________
void TMVA::MethodMLP::TrainOneEvent(Int_t ievt)
{
   // train network over a single event
   // this uses the new event model

   // note: the normalization of event weights will affect the choice
   // of learning rate, one will have to experiment to get the right value.
   // in general, if the "average" event weight is 1, the learning rate
   // should be good if set around 0.02 (a good value if all event weights are 1)

   const Event * ev = GetEvent(ievt);
   Double_t eventWeight = ev->GetWeight();
   ForceNetworkInputs( ev );
   ForceNetworkCalculations();
   if (DoRegression()) UpdateNetwork( ev->GetTargets(),       eventWeight );
   if (DoMulticlass()) UpdateNetwork( *DataInfo().GetTargetsForMulticlass( ev ), eventWeight );
   else                UpdateNetwork( GetDesiredOutput( ev ), eventWeight );
}

//______________________________________________________________________________
Double_t TMVA::MethodMLP::GetDesiredOutput( const Event* ev )
{
   // get the desired output of this event
   return DataInfo().IsSignal(ev)?fOutput->GetMax():fOutput->GetMin(); //zjh
}


//______________________________________________________________________________
void TMVA::MethodMLP::UpdateNetwork(Double_t desired, Double_t eventWeight)
{
   // update the network based on how closely
   // the output matched the desired output
   Double_t error = GetOutputNeuron()->GetActivationValue() - desired;
   if (fEstimator==kMSE)  error = GetOutputNeuron()->GetActivationValue() - desired ;  //zjh
   else if (fEstimator==kCE)  error = -1./(GetOutputNeuron()->GetActivationValue() -1 + desired); //zjh
   else  Log() << kFATAL << "Estimator type unspecified!!" << Endl;              //zjh
   error *= eventWeight;
   GetOutputNeuron()->SetError(error);
   CalculateNeuronDeltas();
   UpdateSynapses();
}

//______________________________________________________________________________
void TMVA::MethodMLP::UpdateNetwork(const std::vector<Float_t>& desired, Double_t eventWeight)
{
   // update the network based on how closely
   // the output matched the desired output
   for (UInt_t i = 0, iEnd = desired.size(); i < iEnd; ++i) {
      Double_t error = GetOutputNeuron( i )->GetActivationValue() - desired.at(i);
      error *= eventWeight;
      GetOutputNeuron( i )->SetError(error);
   }
   CalculateNeuronDeltas();
   UpdateSynapses();
}


//______________________________________________________________________________
void TMVA::MethodMLP::CalculateNeuronDeltas()
{
   // have each neuron calculate its delta by backpropagation

   TNeuron* neuron;
   Int_t    numNeurons;
   Int_t    numLayers = fNetwork->GetEntriesFast();
   TObjArray* curLayer;

   // step backwards through the network (backpropagation)
   // deltas calculated starting at output layer
   for (Int_t i = numLayers-1; i >= 0; i--) {
      curLayer = (TObjArray*)fNetwork->At(i);
      numNeurons = curLayer->GetEntriesFast();

      for (Int_t j = 0; j < numNeurons; j++) {
         neuron = (TNeuron*) curLayer->At(j);
         neuron->CalculateDelta();
      }
   }
}

//______________________________________________________________________________
void TMVA::MethodMLP::GeneticMinimize()
{
   // create genetics class similar to GeneticCut
   // give it vector of parameter ranges (parameters = weights)
   // link fitness function of this class to ComputeEstimator
   // instantiate GA (see MethodCuts)
   // run it
   // then this should exist for GA, Minuit and random sampling

   PrintMessage("Minimizing Estimator with GA");

   // define GA parameters
   fGA_preCalc   = 1;
   fGA_SC_steps  = 10;
   fGA_SC_rate   = 5;
   fGA_SC_factor = 0.95;
   fGA_nsteps    = 30;

   // ranges
   std::vector<Interval*> ranges;

   Int_t numWeights = fSynapses->GetEntriesFast();
   for (Int_t ivar=0; ivar< numWeights; ivar++) {
      ranges.push_back( new Interval( 0, GetXmax(ivar) - GetXmin(ivar) ));
   }

   FitterBase *gf = new GeneticFitter( *this, Log().GetPrintedSource(), ranges, GetOptions() );
   gf->Run();

   Double_t estimator = CalculateEstimator();
   Log() << kINFO << "GA: estimator after optimization: " << estimator << Endl;
}

//______________________________________________________________________________
Double_t TMVA::MethodMLP::EstimatorFunction( std::vector<Double_t>& parameters)
{
   // interface to the estimate
   return ComputeEstimator( parameters );
}

//______________________________________________________________________________
Double_t TMVA::MethodMLP::ComputeEstimator( std::vector<Double_t>& parameters)
{
   // this function is called by GeneticANN for GA optimization

   TSynapse* synapse;
   Int_t numSynapses = fSynapses->GetEntriesFast();

   for (Int_t i = 0; i < numSynapses; i++) {
      synapse = (TSynapse*)fSynapses->At(i);
      synapse->SetWeight(parameters.at(i));
   }
   if (fUseRegulator) UpdatePriors(); //zjh

   Double_t estimator = CalculateEstimator();

   return estimator;
}

//______________________________________________________________________________
void TMVA::MethodMLP::UpdateSynapses()
{
   // update synapse error fields and adjust the weights (if in sequential mode)

   TNeuron* neuron;
   Int_t numNeurons;
   TObjArray* curLayer;
   Int_t numLayers = fNetwork->GetEntriesFast();

   for (Int_t i = 0; i < numLayers; i++) {
      curLayer = (TObjArray*)fNetwork->At(i);
      numNeurons = curLayer->GetEntriesFast();

      for (Int_t j = 0; j < numNeurons; j++) {
         neuron = (TNeuron*) curLayer->At(j);
         if (fBPMode == kBatch) neuron->UpdateSynapsesBatch();
         else                neuron->UpdateSynapsesSequential();
      }
   }
}

//______________________________________________________________________________
void TMVA::MethodMLP::AdjustSynapseWeights()
{
   // just adjust the synapse weights (should be called in batch mode)

   TNeuron* neuron;
   Int_t numNeurons;
   TObjArray* curLayer;
   Int_t numLayers = fNetwork->GetEntriesFast();

   for (Int_t i = numLayers-1; i >= 0; i--) {
      curLayer = (TObjArray*)fNetwork->At(i);
      numNeurons = curLayer->GetEntriesFast();

      for (Int_t j = 0; j < numNeurons; j++) {
         neuron = (TNeuron*) curLayer->At(j);
         neuron->AdjustSynapseWeights();
      }
   }
}

//_______________________________________________________________________
void TMVA::MethodMLP::UpdatePriors()  //zjh
{
   fPrior=0;
   fPriorDev.clear();
   Int_t nSynapses = fSynapses->GetEntriesFast();
   for (Int_t i=0;i<nSynapses;i++) {
      TSynapse* synapse = (TSynapse*)fSynapses->At(i);
      fPrior+=0.5*fRegulators[fRegulatorIdx[i]]*(synapse->GetWeight())*(synapse->GetWeight());
      fPriorDev.push_back(fRegulators[fRegulatorIdx[i]]*(synapse->GetWeight()));
   }
}

//_______________________________________________________________________
void TMVA::MethodMLP::UpdateRegulators()  //zjh
{
   TMatrixD InvH(0,0);
   GetApproxInvHessian(InvH);
   Int_t numSynapses=fSynapses->GetEntriesFast();
   Int_t numRegulators=fRegulators.size();
   Float_t gamma=0,
      variance=1.;    // Gaussian noise
   std::vector<Int_t> nWDP(numRegulators);
   std::vector<Double_t> trace(numRegulators),weightSum(numRegulators);
   for (int i=0;i<numSynapses;i++) {
      TSynapse* synapses = (TSynapse*)fSynapses->At(i);
      Int_t idx=fRegulatorIdx[i];
      nWDP[idx]++;
      trace[idx]+=InvH[i][i];
      gamma+=1-fRegulators[idx]*InvH[i][i];
      weightSum[idx]+=(synapses->GetWeight())*(synapses->GetWeight());
   }
   if (fEstimator==kMSE) {
      if (GetNEvents()>gamma) variance=CalculateEstimator( Types::kTraining, 0 )/(1-(gamma/GetNEvents()));
      else variance=CalculateEstimator( Types::kTraining, 0 );
   }

   //Log() << kDEBUG << Endl;
   for (int i=0;i<numRegulators;i++)
      {
         //fRegulators[i]=variance*(nWDP[i]-fRegulators[i]*trace[i])/weightSum[i];
         fRegulators[i]=variance*nWDP[i]/(weightSum[i]+variance*trace[i]);
         if (fRegulators[i]<0) fRegulators[i]=0;
         Log()<<kDEBUG<<"R"<<i<<":"<<fRegulators[i]<<"\t";
      }
   float trainE = CalculateEstimator( Types::kTraining, 0 ) ; // estimator for training sample  //zjh
   float testE  = CalculateEstimator( Types::kTesting,  0 ) ; // estimator for test sample //zjh

   Log()<<kDEBUG<<"\n"<<"trainE:"<<trainE<<"\ttestE:"<<testE<<"\tvariance:"<<variance<<"\tgamma:"<<gamma<<Endl;

}

//_______________________________________________________________________
void TMVA::MethodMLP::GetApproxInvHessian(TMatrixD& InvHessian, bool regulate)  //zjh
{
   Int_t numSynapses=fSynapses->GetEntriesFast();
   InvHessian.ResizeTo( numSynapses, numSynapses );
   InvHessian=0;
   TMatrixD sens(numSynapses,1);
   TMatrixD sensT(1,numSynapses);
   Int_t nEvents = GetNEvents();
   for (Int_t i=0;i<nEvents;i++) {
      GetEvent(i);
      double outputValue=GetMvaValue(); // force calculation
      GetOutputNeuron()->SetError(1./fOutput->EvalDerivative(GetOutputNeuron()->GetValue()));
      CalculateNeuronDeltas();
      for (Int_t j = 0; j < numSynapses; j++){
         TSynapse* synapses = (TSynapse*)fSynapses->At(j);
         synapses->InitDelta();
         synapses->CalculateDelta();
         sens[j][0]=sensT[0][j]=synapses->GetDelta();
      }
      if (fEstimator==kMSE ) InvHessian+=sens*sensT;
      else if (fEstimator==kCE) InvHessian+=(outputValue*(1-outputValue))*sens*sensT;
   }

   // TVectorD eValue(numSynapses);
   if (regulate) {
      for (Int_t i = 0; i < numSynapses; i++){
         InvHessian[i][i]+=fRegulators[fRegulatorIdx[i]];
      }
   }
   else {
      for (Int_t i = 0; i < numSynapses; i++){
         InvHessian[i][i]+=1e-6; //to avoid precision problem that will destroy the pos-def
      }
   }

   InvHessian.Invert();

}

//_______________________________________________________________________
Double_t TMVA::MethodMLP::GetMvaValue( Double_t* errLower, Double_t* errUpper )
{
  Double_t MvaValue = MethodANNBase::GetMvaValue();// contains back propagation

   // no hessian (old training file) or no error reqested
   if (!fCalculateErrors || errLower==0 || errUpper==0)
      return MvaValue;

   Double_t MvaUpper,MvaLower,median,variance;
   Int_t numSynapses=fSynapses->GetEntriesFast();
   if (fInvHessian.GetNcols()!=numSynapses) {
      Log() << kWARNING << "inconsistent dimension " << fInvHessian.GetNcols() << " vs " << numSynapses << Endl;
   }
   TMatrixD sens(numSynapses,1);
   TMatrixD sensT(1,numSynapses);
   GetOutputNeuron()->SetError(1./fOutput->EvalDerivative(GetOutputNeuron()->GetValue()));
   //GetOutputNeuron()->SetError(1.);
   CalculateNeuronDeltas();
   for (Int_t i = 0; i < numSynapses; i++){
      TSynapse* synapses = (TSynapse*)fSynapses->At(i);
      synapses->InitDelta();
      synapses->CalculateDelta();
      sensT[0][i]=synapses->GetDelta();
   }
   sens.Transpose(sensT);
   TMatrixD sig=sensT*fInvHessian*sens;
   variance=sig[0][0];
   median=GetOutputNeuron()->GetValue();

   if (variance<0) {
     Log()<<kWARNING<<"Negative variance!!! median=" << median << "\tvariance(sigma^2)=" << variance <<Endl;
     variance=0;
   }
   variance=sqrt(variance);

   //upper
   MvaUpper=fOutput->Eval(median+variance);
   if(errUpper)
      *errUpper=MvaUpper-MvaValue;

   //lower
   MvaLower=fOutput->Eval(median-variance);
   if(errLower)
      *errLower=MvaValue-MvaLower;

   return MvaValue;
}


#ifdef MethodMLP_UseMinuit__

//______________________________________________________________________________
void TMVA::MethodMLP::MinuitMinimize()
{
   // minimize using Minuit
   fNumberOfWeights = fSynapses->GetEntriesFast();

   TFitter* tfitter = new TFitter( fNumberOfWeights );

   // minuit-specific settings
   Double_t args[10];

   // output level
   args[0] = 2; // put to 0 for results only, or to -1 for no garbage
   tfitter->ExecuteCommand( "SET PRINTOUT", args, 1 );
   tfitter->ExecuteCommand( "SET NOWARNINGS", args, 0 );

   double w[54];

   // init parameters
   for (Int_t ipar=0; ipar < fNumberOfWeights; ipar++) {
      TString parName = Form("w%i", ipar);
      tfitter->SetParameter( ipar,
                             parName, w[ipar], 0.1, 0, 0 );
   }

   // define the CFN function
   tfitter->SetFCN( &IFCN );

   // define fit strategy
   args[0] = 2;
   tfitter->ExecuteCommand( "SET STRATEGY", args, 1 );

   // now do the fit !
   args[0] = 1e-04;
   tfitter->ExecuteCommand( "MIGRAD", args, 1 );

   Bool_t doBetter     = kFALSE;
   Bool_t doEvenBetter = kFALSE;
   if (doBetter) {
      args[0] = 1e-04;
      tfitter->ExecuteCommand( "IMPROVE", args, 1 );

      if (doEvenBetter) {
         args[0] = 500;
         tfitter->ExecuteCommand( "MINOS", args, 1 );
      }
   }
}

_____________________________________________________________________________
void TMVA::MethodMLP::IFCN( Int_t& npars, Double_t* grad, Double_t &f, Double_t* fitPars, Int_t iflag )
{
   // Evaluate the minimisation function ----------------------------------------------------
   //
   //  Input parameters:
   //    npars:   number of currently variable parameters
   //             CAUTION: this is not (necessarily) the dimension of the fitPars vector !
   //    fitPars: array of (constant and variable) parameters
   //    iflag:   indicates what is to be calculated (see example below)
   //    grad:    array of gradients
   //
   //  Output parameters:
   //    f:       the calculated function value.
   //    grad:    the (optional) vector of first derivatives).
   // ---------------------------------------------------------------------------------------
   ((MethodMLP*)GetThisPtr())->FCN( npars, grad, f, fitPars, iflag );
}

TTHREAD_TLS(Int_t) nc   = 0;
TTHREAD_TLS(double) minf = 1000000;

void TMVA::MethodMLP::FCN( Int_t& npars, Double_t* grad, Double_t &f, Double_t* fitPars, Int_t iflag )
{
   // first update the weights
   for (Int_t ipar=0; ipar<fNumberOfWeights; ipar++) {
      TSynapse* synapse = (TSynapse*)fSynapses->At(ipar);
      synapse->SetWeight(fitPars[ipar]);
   }

   // now compute the estimator
   f = CalculateEstimator();

   nc++;
   if (f < minf) minf = f;
   for (Int_t ipar=0; ipar<fNumberOfWeights; ipar++) Log() << kDEBUG << fitPars[ipar] << " ";
   Log() << kDEBUG << Endl;
   Log() << kDEBUG << "***** New estimator: " << f << "  min: " << minf << " --> ncalls: " << nc << Endl;
}

//_______________________________________________________________________
TMVA::MethodMLP* TMVA::MethodMLP::GetThisPtr()
{
   // global "this" pointer to be used in minuit
   return fgThis;
}

#endif


//_______________________________________________________________________
void TMVA::MethodMLP::MakeClassSpecific( std::ostream& fout, const TString& className ) const
{
   // write specific classifier response
   MethodANNBase::MakeClassSpecific(fout, className);
}

//_______________________________________________________________________
void TMVA::MethodMLP::GetHelpMessage() const
{
   // get help message text
   //
   // typical length of text line:
   //         "|--------------------------------------------------------------|"
   TString col    = gConfig().WriteOptionsReference() ? TString() : gTools().Color("bold");
   TString colres = gConfig().WriteOptionsReference() ? TString() : gTools().Color("reset");

   Log() << Endl;
   Log() << col << "--- Short description:" << colres << Endl;
   Log() << Endl;
   Log() << "The MLP artificial neural network (ANN) is a traditional feed-" << Endl;
   Log() << "forward multilayer perceptron impementation. The MLP has a user-" << Endl;
   Log() << "defined hidden layer architecture, while the number of input (output)" << Endl;
   Log() << "nodes is determined by the input variables (output classes, i.e., " << Endl;
   Log() << "signal and one background). " << Endl;
   Log() << Endl;
   Log() << col << "--- Performance optimisation:" << colres << Endl;
   Log() << Endl;
   Log() << "Neural networks are stable and performing for a large variety of " << Endl;
   Log() << "linear and non-linear classification problems. However, in contrast" << Endl;
   Log() << "to (e.g.) boosted decision trees, the user is advised to reduce the " << Endl;
   Log() << "number of input variables that have only little discrimination power. " << Endl;
   Log() << "" << Endl;
   Log() << "In the tests we have carried out so far, the MLP and ROOT networks" << Endl;
   Log() << "(TMlpANN, interfaced via TMVA) performed equally well, with however" << Endl;
   Log() << "a clear speed advantage for the MLP. The Clermont-Ferrand neural " << Endl;
   Log() << "net (CFMlpANN) exhibited worse classification performance in these" << Endl;
   Log() << "tests, which is partly due to the slow convergence of its training" << Endl;
   Log() << "(at least 10k training cycles are required to achieve approximately" << Endl;
   Log() << "competitive results)." << Endl;
   Log() << Endl;
   Log() << col << "Overtraining: " << colres
         << "only the TMlpANN performs an explicit separation of the" << Endl;
   Log() << "full training sample into independent training and validation samples." << Endl;
   Log() << "We have found that in most high-energy physics applications the " << Endl;
   Log() << "avaliable degrees of freedom (training events) are sufficient to " << Endl;
   Log() << "constrain the weights of the relatively simple architectures required" << Endl;
   Log() << "to achieve good performance. Hence no overtraining should occur, and " << Endl;
   Log() << "the use of validation samples would only reduce the available training" << Endl;
   Log() << "information. However, if the perrormance on the training sample is " << Endl;
   Log() << "found to be significantly better than the one found with the inde-" << Endl;
   Log() << "pendent test sample, caution is needed. The results for these samples " << Endl;
   Log() << "are printed to standard output at the end of each training job." << Endl;
   Log() << Endl;
   Log() << col << "--- Performance tuning via configuration options:" << colres << Endl;
   Log() << Endl;
   Log() << "The hidden layer architecture for all ANNs is defined by the option" << Endl;
   Log() << "\"HiddenLayers=N+1,N,...\", where here the first hidden layer has N+1" << Endl;
   Log() << "neurons and the second N neurons (and so on), and where N is the number  " << Endl;
   Log() << "of input variables. Excessive numbers of hidden layers should be avoided," << Endl;
   Log() << "in favour of more neurons in the first hidden layer." << Endl;
   Log() << "" << Endl;
   Log() << "The number of cycles should be above 500. As said, if the number of" << Endl;
   Log() << "adjustable weights is small compared to the training sample size," << Endl;
   Log() << "using a large number of training samples should not lead to overtraining." << Endl;
}

 MethodMLP.cxx:1
 MethodMLP.cxx:2
 MethodMLP.cxx:3
 MethodMLP.cxx:4
 MethodMLP.cxx:5
 MethodMLP.cxx:6
 MethodMLP.cxx:7
 MethodMLP.cxx:8
 MethodMLP.cxx:9
 MethodMLP.cxx:10
 MethodMLP.cxx:11
 MethodMLP.cxx:12
 MethodMLP.cxx:13
 MethodMLP.cxx:14
 MethodMLP.cxx:15
 MethodMLP.cxx:16
 MethodMLP.cxx:17
 MethodMLP.cxx:18
 MethodMLP.cxx:19
 MethodMLP.cxx:20
 MethodMLP.cxx:21
 MethodMLP.cxx:22
 MethodMLP.cxx:23
 MethodMLP.cxx:24
 MethodMLP.cxx:25
 MethodMLP.cxx:26
 MethodMLP.cxx:27
 MethodMLP.cxx:28
 MethodMLP.cxx:29
 MethodMLP.cxx:30
 MethodMLP.cxx:31
 MethodMLP.cxx:32
 MethodMLP.cxx:33
 MethodMLP.cxx:34
 MethodMLP.cxx:35
 MethodMLP.cxx:36
 MethodMLP.cxx:37
 MethodMLP.cxx:38
 MethodMLP.cxx:39
 MethodMLP.cxx:40
 MethodMLP.cxx:41
 MethodMLP.cxx:42
 MethodMLP.cxx:43
 MethodMLP.cxx:44
 MethodMLP.cxx:45
 MethodMLP.cxx:46
 MethodMLP.cxx:47
 MethodMLP.cxx:48
 MethodMLP.cxx:49
 MethodMLP.cxx:50
 MethodMLP.cxx:51
 MethodMLP.cxx:52
 MethodMLP.cxx:53
 MethodMLP.cxx:54
 MethodMLP.cxx:55
 MethodMLP.cxx:56
 MethodMLP.cxx:57
 MethodMLP.cxx:58
 MethodMLP.cxx:59
 MethodMLP.cxx:60
 MethodMLP.cxx:61
 MethodMLP.cxx:62
 MethodMLP.cxx:63
 MethodMLP.cxx:64
 MethodMLP.cxx:65
 MethodMLP.cxx:66
 MethodMLP.cxx:67
 MethodMLP.cxx:68
 MethodMLP.cxx:69
 MethodMLP.cxx:70
 MethodMLP.cxx:71
 MethodMLP.cxx:72
 MethodMLP.cxx:73
 MethodMLP.cxx:74
 MethodMLP.cxx:75
 MethodMLP.cxx:76
 MethodMLP.cxx:77
 MethodMLP.cxx:78
 MethodMLP.cxx:79
 MethodMLP.cxx:80
 MethodMLP.cxx:81
 MethodMLP.cxx:82
 MethodMLP.cxx:83
 MethodMLP.cxx:84
 MethodMLP.cxx:85
 MethodMLP.cxx:86
 MethodMLP.cxx:87
 MethodMLP.cxx:88
 MethodMLP.cxx:89
 MethodMLP.cxx:90
 MethodMLP.cxx:91
 MethodMLP.cxx:92
 MethodMLP.cxx:93
 MethodMLP.cxx:94
 MethodMLP.cxx:95
 MethodMLP.cxx:96
 MethodMLP.cxx:97
 MethodMLP.cxx:98
 MethodMLP.cxx:99
 MethodMLP.cxx:100
 MethodMLP.cxx:101
 MethodMLP.cxx:102
 MethodMLP.cxx:103
 MethodMLP.cxx:104
 MethodMLP.cxx:105
 MethodMLP.cxx:106
 MethodMLP.cxx:107
 MethodMLP.cxx:108
 MethodMLP.cxx:109
 MethodMLP.cxx:110
 MethodMLP.cxx:111
 MethodMLP.cxx:112
 MethodMLP.cxx:113
 MethodMLP.cxx:114
 MethodMLP.cxx:115
 MethodMLP.cxx:116
 MethodMLP.cxx:117
 MethodMLP.cxx:118
 MethodMLP.cxx:119
 MethodMLP.cxx:120
 MethodMLP.cxx:121
 MethodMLP.cxx:122
 MethodMLP.cxx:123
 MethodMLP.cxx:124
 MethodMLP.cxx:125
 MethodMLP.cxx:126
 MethodMLP.cxx:127
 MethodMLP.cxx:128
 MethodMLP.cxx:129
 MethodMLP.cxx:130
 MethodMLP.cxx:131
 MethodMLP.cxx:132
 MethodMLP.cxx:133
 MethodMLP.cxx:134
 MethodMLP.cxx:135
 MethodMLP.cxx:136
 MethodMLP.cxx:137
 MethodMLP.cxx:138
 MethodMLP.cxx:139
 MethodMLP.cxx:140
 MethodMLP.cxx:141
 MethodMLP.cxx:142
 MethodMLP.cxx:143
 MethodMLP.cxx:144
 MethodMLP.cxx:145
 MethodMLP.cxx:146
 MethodMLP.cxx:147
 MethodMLP.cxx:148
 MethodMLP.cxx:149
 MethodMLP.cxx:150
 MethodMLP.cxx:151
 MethodMLP.cxx:152
 MethodMLP.cxx:153
 MethodMLP.cxx:154
 MethodMLP.cxx:155
 MethodMLP.cxx:156
 MethodMLP.cxx:157
 MethodMLP.cxx:158
 MethodMLP.cxx:159
 MethodMLP.cxx:160
 MethodMLP.cxx:161
 MethodMLP.cxx:162
 MethodMLP.cxx:163
 MethodMLP.cxx:164
 MethodMLP.cxx:165
 MethodMLP.cxx:166
 MethodMLP.cxx:167
 MethodMLP.cxx:168
 MethodMLP.cxx:169
 MethodMLP.cxx:170
 MethodMLP.cxx:171
 MethodMLP.cxx:172
 MethodMLP.cxx:173
 MethodMLP.cxx:174
 MethodMLP.cxx:175
 MethodMLP.cxx:176
 MethodMLP.cxx:177
 MethodMLP.cxx:178
 MethodMLP.cxx:179
 MethodMLP.cxx:180
 MethodMLP.cxx:181
 MethodMLP.cxx:182
 MethodMLP.cxx:183
 MethodMLP.cxx:184
 MethodMLP.cxx:185
 MethodMLP.cxx:186
 MethodMLP.cxx:187
 MethodMLP.cxx:188
 MethodMLP.cxx:189
 MethodMLP.cxx:190
 MethodMLP.cxx:191
 MethodMLP.cxx:192
 MethodMLP.cxx:193
 MethodMLP.cxx:194
 MethodMLP.cxx:195
 MethodMLP.cxx:196
 MethodMLP.cxx:197
 MethodMLP.cxx:198
 MethodMLP.cxx:199
 MethodMLP.cxx:200
 MethodMLP.cxx:201
 MethodMLP.cxx:202
 MethodMLP.cxx:203
 MethodMLP.cxx:204
 MethodMLP.cxx:205
 MethodMLP.cxx:206
 MethodMLP.cxx:207
 MethodMLP.cxx:208
 MethodMLP.cxx:209
 MethodMLP.cxx:210
 MethodMLP.cxx:211
 MethodMLP.cxx:212
 MethodMLP.cxx:213
 MethodMLP.cxx:214
 MethodMLP.cxx:215
 MethodMLP.cxx:216
 MethodMLP.cxx:217
 MethodMLP.cxx:218
 MethodMLP.cxx:219
 MethodMLP.cxx:220
 MethodMLP.cxx:221
 MethodMLP.cxx:222
 MethodMLP.cxx:223
 MethodMLP.cxx:224
 MethodMLP.cxx:225
 MethodMLP.cxx:226
 MethodMLP.cxx:227
 MethodMLP.cxx:228
 MethodMLP.cxx:229
 MethodMLP.cxx:230
 MethodMLP.cxx:231
 MethodMLP.cxx:232
 MethodMLP.cxx:233
 MethodMLP.cxx:234
 MethodMLP.cxx:235
 MethodMLP.cxx:236
 MethodMLP.cxx:237
 MethodMLP.cxx:238
 MethodMLP.cxx:239
 MethodMLP.cxx:240
 MethodMLP.cxx:241
 MethodMLP.cxx:242
 MethodMLP.cxx:243
 MethodMLP.cxx:244
 MethodMLP.cxx:245
 MethodMLP.cxx:246
 MethodMLP.cxx:247
 MethodMLP.cxx:248
 MethodMLP.cxx:249
 MethodMLP.cxx:250
 MethodMLP.cxx:251
 MethodMLP.cxx:252
 MethodMLP.cxx:253
 MethodMLP.cxx:254
 MethodMLP.cxx:255
 MethodMLP.cxx:256
 MethodMLP.cxx:257
 MethodMLP.cxx:258
 MethodMLP.cxx:259
 MethodMLP.cxx:260
 MethodMLP.cxx:261
 MethodMLP.cxx:262
 MethodMLP.cxx:263
 MethodMLP.cxx:264
 MethodMLP.cxx:265
 MethodMLP.cxx:266
 MethodMLP.cxx:267
 MethodMLP.cxx:268
 MethodMLP.cxx:269
 MethodMLP.cxx:270
 MethodMLP.cxx:271
 MethodMLP.cxx:272
 MethodMLP.cxx:273
 MethodMLP.cxx:274
 MethodMLP.cxx:275
 MethodMLP.cxx:276
 MethodMLP.cxx:277
 MethodMLP.cxx:278
 MethodMLP.cxx:279
 MethodMLP.cxx:280
 MethodMLP.cxx:281
 MethodMLP.cxx:282
 MethodMLP.cxx:283
 MethodMLP.cxx:284
 MethodMLP.cxx:285
 MethodMLP.cxx:286
 MethodMLP.cxx:287
 MethodMLP.cxx:288
 MethodMLP.cxx:289
 MethodMLP.cxx:290
 MethodMLP.cxx:291
 MethodMLP.cxx:292
 MethodMLP.cxx:293
 MethodMLP.cxx:294
 MethodMLP.cxx:295
 MethodMLP.cxx:296
 MethodMLP.cxx:297
 MethodMLP.cxx:298
 MethodMLP.cxx:299
 MethodMLP.cxx:300
 MethodMLP.cxx:301
 MethodMLP.cxx:302
 MethodMLP.cxx:303
 MethodMLP.cxx:304
 MethodMLP.cxx:305
 MethodMLP.cxx:306
 MethodMLP.cxx:307
 MethodMLP.cxx:308
 MethodMLP.cxx:309
 MethodMLP.cxx:310
 MethodMLP.cxx:311
 MethodMLP.cxx:312
 MethodMLP.cxx:313
 MethodMLP.cxx:314
 MethodMLP.cxx:315
 MethodMLP.cxx:316
 MethodMLP.cxx:317
 MethodMLP.cxx:318
 MethodMLP.cxx:319
 MethodMLP.cxx:320
 MethodMLP.cxx:321
 MethodMLP.cxx:322
 MethodMLP.cxx:323
 MethodMLP.cxx:324
 MethodMLP.cxx:325
 MethodMLP.cxx:326
 MethodMLP.cxx:327
 MethodMLP.cxx:328
 MethodMLP.cxx:329
 MethodMLP.cxx:330
 MethodMLP.cxx:331
 MethodMLP.cxx:332
 MethodMLP.cxx:333
 MethodMLP.cxx:334
 MethodMLP.cxx:335
 MethodMLP.cxx:336
 MethodMLP.cxx:337
 MethodMLP.cxx:338
 MethodMLP.cxx:339
 MethodMLP.cxx:340
 MethodMLP.cxx:341
 MethodMLP.cxx:342
 MethodMLP.cxx:343
 MethodMLP.cxx:344
 MethodMLP.cxx:345
 MethodMLP.cxx:346
 MethodMLP.cxx:347
 MethodMLP.cxx:348
 MethodMLP.cxx:349
 MethodMLP.cxx:350
 MethodMLP.cxx:351
 MethodMLP.cxx:352
 MethodMLP.cxx:353
 MethodMLP.cxx:354
 MethodMLP.cxx:355
 MethodMLP.cxx:356
 MethodMLP.cxx:357
 MethodMLP.cxx:358
 MethodMLP.cxx:359
 MethodMLP.cxx:360
 MethodMLP.cxx:361
 MethodMLP.cxx:362
 MethodMLP.cxx:363
 MethodMLP.cxx:364
 MethodMLP.cxx:365
 MethodMLP.cxx:366
 MethodMLP.cxx:367
 MethodMLP.cxx:368
 MethodMLP.cxx:369
 MethodMLP.cxx:370
 MethodMLP.cxx:371
 MethodMLP.cxx:372
 MethodMLP.cxx:373
 MethodMLP.cxx:374
 MethodMLP.cxx:375
 MethodMLP.cxx:376
 MethodMLP.cxx:377
 MethodMLP.cxx:378
 MethodMLP.cxx:379
 MethodMLP.cxx:380
 MethodMLP.cxx:381
 MethodMLP.cxx:382
 MethodMLP.cxx:383
 MethodMLP.cxx:384
 MethodMLP.cxx:385
 MethodMLP.cxx:386
 MethodMLP.cxx:387
 MethodMLP.cxx:388
 MethodMLP.cxx:389
 MethodMLP.cxx:390
 MethodMLP.cxx:391
 MethodMLP.cxx:392
 MethodMLP.cxx:393
 MethodMLP.cxx:394
 MethodMLP.cxx:395
 MethodMLP.cxx:396
 MethodMLP.cxx:397
 MethodMLP.cxx:398
 MethodMLP.cxx:399
 MethodMLP.cxx:400
 MethodMLP.cxx:401
 MethodMLP.cxx:402
 MethodMLP.cxx:403
 MethodMLP.cxx:404
 MethodMLP.cxx:405
 MethodMLP.cxx:406
 MethodMLP.cxx:407
 MethodMLP.cxx:408
 MethodMLP.cxx:409
 MethodMLP.cxx:410
 MethodMLP.cxx:411
 MethodMLP.cxx:412
 MethodMLP.cxx:413
 MethodMLP.cxx:414
 MethodMLP.cxx:415
 MethodMLP.cxx:416
 MethodMLP.cxx:417
 MethodMLP.cxx:418
 MethodMLP.cxx:419
 MethodMLP.cxx:420
 MethodMLP.cxx:421
 MethodMLP.cxx:422
 MethodMLP.cxx:423
 MethodMLP.cxx:424
 MethodMLP.cxx:425
 MethodMLP.cxx:426
 MethodMLP.cxx:427
 MethodMLP.cxx:428
 MethodMLP.cxx:429
 MethodMLP.cxx:430
 MethodMLP.cxx:431
 MethodMLP.cxx:432
 MethodMLP.cxx:433
 MethodMLP.cxx:434
 MethodMLP.cxx:435
 MethodMLP.cxx:436
 MethodMLP.cxx:437
 MethodMLP.cxx:438
 MethodMLP.cxx:439
 MethodMLP.cxx:440
 MethodMLP.cxx:441
 MethodMLP.cxx:442
 MethodMLP.cxx:443
 MethodMLP.cxx:444
 MethodMLP.cxx:445
 MethodMLP.cxx:446
 MethodMLP.cxx:447
 MethodMLP.cxx:448
 MethodMLP.cxx:449
 MethodMLP.cxx:450
 MethodMLP.cxx:451
 MethodMLP.cxx:452
 MethodMLP.cxx:453
 MethodMLP.cxx:454
 MethodMLP.cxx:455
 MethodMLP.cxx:456
 MethodMLP.cxx:457
 MethodMLP.cxx:458
 MethodMLP.cxx:459
 MethodMLP.cxx:460
 MethodMLP.cxx:461
 MethodMLP.cxx:462
 MethodMLP.cxx:463
 MethodMLP.cxx:464
 MethodMLP.cxx:465
 MethodMLP.cxx:466
 MethodMLP.cxx:467
 MethodMLP.cxx:468
 MethodMLP.cxx:469
 MethodMLP.cxx:470
 MethodMLP.cxx:471
 MethodMLP.cxx:472
 MethodMLP.cxx:473
 MethodMLP.cxx:474
 MethodMLP.cxx:475
 MethodMLP.cxx:476
 MethodMLP.cxx:477
 MethodMLP.cxx:478
 MethodMLP.cxx:479
 MethodMLP.cxx:480
 MethodMLP.cxx:481
 MethodMLP.cxx:482
 MethodMLP.cxx:483
 MethodMLP.cxx:484
 MethodMLP.cxx:485
 MethodMLP.cxx:486
 MethodMLP.cxx:487
 MethodMLP.cxx:488
 MethodMLP.cxx:489
 MethodMLP.cxx:490
 MethodMLP.cxx:491
 MethodMLP.cxx:492
 MethodMLP.cxx:493
 MethodMLP.cxx:494
 MethodMLP.cxx:495
 MethodMLP.cxx:496
 MethodMLP.cxx:497
 MethodMLP.cxx:498
 MethodMLP.cxx:499
 MethodMLP.cxx:500
 MethodMLP.cxx:501
 MethodMLP.cxx:502
 MethodMLP.cxx:503
 MethodMLP.cxx:504
 MethodMLP.cxx:505
 MethodMLP.cxx:506
 MethodMLP.cxx:507
 MethodMLP.cxx:508
 MethodMLP.cxx:509
 MethodMLP.cxx:510
 MethodMLP.cxx:511
 MethodMLP.cxx:512
 MethodMLP.cxx:513
 MethodMLP.cxx:514
 MethodMLP.cxx:515
 MethodMLP.cxx:516
 MethodMLP.cxx:517
 MethodMLP.cxx:518
 MethodMLP.cxx:519
 MethodMLP.cxx:520
 MethodMLP.cxx:521
 MethodMLP.cxx:522
 MethodMLP.cxx:523
 MethodMLP.cxx:524
 MethodMLP.cxx:525
 MethodMLP.cxx:526
 MethodMLP.cxx:527
 MethodMLP.cxx:528
 MethodMLP.cxx:529
 MethodMLP.cxx:530
 MethodMLP.cxx:531
 MethodMLP.cxx:532
 MethodMLP.cxx:533
 MethodMLP.cxx:534
 MethodMLP.cxx:535
 MethodMLP.cxx:536
 MethodMLP.cxx:537
 MethodMLP.cxx:538
 MethodMLP.cxx:539
 MethodMLP.cxx:540
 MethodMLP.cxx:541
 MethodMLP.cxx:542
 MethodMLP.cxx:543
 MethodMLP.cxx:544
 MethodMLP.cxx:545
 MethodMLP.cxx:546
 MethodMLP.cxx:547
 MethodMLP.cxx:548
 MethodMLP.cxx:549
 MethodMLP.cxx:550
 MethodMLP.cxx:551
 MethodMLP.cxx:552
 MethodMLP.cxx:553
 MethodMLP.cxx:554
 MethodMLP.cxx:555
 MethodMLP.cxx:556
 MethodMLP.cxx:557
 MethodMLP.cxx:558
 MethodMLP.cxx:559
 MethodMLP.cxx:560
 MethodMLP.cxx:561
 MethodMLP.cxx:562
 MethodMLP.cxx:563
 MethodMLP.cxx:564
 MethodMLP.cxx:565
 MethodMLP.cxx:566
 MethodMLP.cxx:567
 MethodMLP.cxx:568
 MethodMLP.cxx:569
 MethodMLP.cxx:570
 MethodMLP.cxx:571
 MethodMLP.cxx:572
 MethodMLP.cxx:573
 MethodMLP.cxx:574
 MethodMLP.cxx:575
 MethodMLP.cxx:576
 MethodMLP.cxx:577
 MethodMLP.cxx:578
 MethodMLP.cxx:579
 MethodMLP.cxx:580
 MethodMLP.cxx:581
 MethodMLP.cxx:582
 MethodMLP.cxx:583
 MethodMLP.cxx:584
 MethodMLP.cxx:585
 MethodMLP.cxx:586
 MethodMLP.cxx:587
 MethodMLP.cxx:588
 MethodMLP.cxx:589
 MethodMLP.cxx:590
 MethodMLP.cxx:591
 MethodMLP.cxx:592
 MethodMLP.cxx:593
 MethodMLP.cxx:594
 MethodMLP.cxx:595
 MethodMLP.cxx:596
 MethodMLP.cxx:597
 MethodMLP.cxx:598
 MethodMLP.cxx:599
 MethodMLP.cxx:600
 MethodMLP.cxx:601
 MethodMLP.cxx:602
 MethodMLP.cxx:603
 MethodMLP.cxx:604
 MethodMLP.cxx:605
 MethodMLP.cxx:606
 MethodMLP.cxx:607
 MethodMLP.cxx:608
 MethodMLP.cxx:609
 MethodMLP.cxx:610
 MethodMLP.cxx:611
 MethodMLP.cxx:612
 MethodMLP.cxx:613
 MethodMLP.cxx:614
 MethodMLP.cxx:615
 MethodMLP.cxx:616
 MethodMLP.cxx:617
 MethodMLP.cxx:618
 MethodMLP.cxx:619
 MethodMLP.cxx:620
 MethodMLP.cxx:621
 MethodMLP.cxx:622
 MethodMLP.cxx:623
 MethodMLP.cxx:624
 MethodMLP.cxx:625
 MethodMLP.cxx:626
 MethodMLP.cxx:627
 MethodMLP.cxx:628
 MethodMLP.cxx:629
 MethodMLP.cxx:630
 MethodMLP.cxx:631
 MethodMLP.cxx:632
 MethodMLP.cxx:633
 MethodMLP.cxx:634
 MethodMLP.cxx:635
 MethodMLP.cxx:636
 MethodMLP.cxx:637
 MethodMLP.cxx:638
 MethodMLP.cxx:639
 MethodMLP.cxx:640
 MethodMLP.cxx:641
 MethodMLP.cxx:642
 MethodMLP.cxx:643
 MethodMLP.cxx:644
 MethodMLP.cxx:645
 MethodMLP.cxx:646
 MethodMLP.cxx:647
 MethodMLP.cxx:648
 MethodMLP.cxx:649
 MethodMLP.cxx:650
 MethodMLP.cxx:651
 MethodMLP.cxx:652
 MethodMLP.cxx:653
 MethodMLP.cxx:654
 MethodMLP.cxx:655
 MethodMLP.cxx:656
 MethodMLP.cxx:657
 MethodMLP.cxx:658
 MethodMLP.cxx:659
 MethodMLP.cxx:660
 MethodMLP.cxx:661
 MethodMLP.cxx:662
 MethodMLP.cxx:663
 MethodMLP.cxx:664
 MethodMLP.cxx:665
 MethodMLP.cxx:666
 MethodMLP.cxx:667
 MethodMLP.cxx:668
 MethodMLP.cxx:669
 MethodMLP.cxx:670
 MethodMLP.cxx:671
 MethodMLP.cxx:672
 MethodMLP.cxx:673
 MethodMLP.cxx:674
 MethodMLP.cxx:675
 MethodMLP.cxx:676
 MethodMLP.cxx:677
 MethodMLP.cxx:678
 MethodMLP.cxx:679
 MethodMLP.cxx:680
 MethodMLP.cxx:681
 MethodMLP.cxx:682
 MethodMLP.cxx:683
 MethodMLP.cxx:684
 MethodMLP.cxx:685
 MethodMLP.cxx:686
 MethodMLP.cxx:687
 MethodMLP.cxx:688
 MethodMLP.cxx:689
 MethodMLP.cxx:690
 MethodMLP.cxx:691
 MethodMLP.cxx:692
 MethodMLP.cxx:693
 MethodMLP.cxx:694
 MethodMLP.cxx:695
 MethodMLP.cxx:696
 MethodMLP.cxx:697
 MethodMLP.cxx:698
 MethodMLP.cxx:699
 MethodMLP.cxx:700
 MethodMLP.cxx:701
 MethodMLP.cxx:702
 MethodMLP.cxx:703
 MethodMLP.cxx:704
 MethodMLP.cxx:705
 MethodMLP.cxx:706
 MethodMLP.cxx:707
 MethodMLP.cxx:708
 MethodMLP.cxx:709
 MethodMLP.cxx:710
 MethodMLP.cxx:711
 MethodMLP.cxx:712
 MethodMLP.cxx:713
 MethodMLP.cxx:714
 MethodMLP.cxx:715
 MethodMLP.cxx:716
 MethodMLP.cxx:717
 MethodMLP.cxx:718
 MethodMLP.cxx:719
 MethodMLP.cxx:720
 MethodMLP.cxx:721
 MethodMLP.cxx:722
 MethodMLP.cxx:723
 MethodMLP.cxx:724
 MethodMLP.cxx:725
 MethodMLP.cxx:726
 MethodMLP.cxx:727
 MethodMLP.cxx:728
 MethodMLP.cxx:729
 MethodMLP.cxx:730
 MethodMLP.cxx:731
 MethodMLP.cxx:732
 MethodMLP.cxx:733
 MethodMLP.cxx:734
 MethodMLP.cxx:735
 MethodMLP.cxx:736
 MethodMLP.cxx:737
 MethodMLP.cxx:738
 MethodMLP.cxx:739
 MethodMLP.cxx:740
 MethodMLP.cxx:741
 MethodMLP.cxx:742
 MethodMLP.cxx:743
 MethodMLP.cxx:744
 MethodMLP.cxx:745
 MethodMLP.cxx:746
 MethodMLP.cxx:747
 MethodMLP.cxx:748
 MethodMLP.cxx:749
 MethodMLP.cxx:750
 MethodMLP.cxx:751
 MethodMLP.cxx:752
 MethodMLP.cxx:753
 MethodMLP.cxx:754
 MethodMLP.cxx:755
 MethodMLP.cxx:756
 MethodMLP.cxx:757
 MethodMLP.cxx:758
 MethodMLP.cxx:759
 MethodMLP.cxx:760
 MethodMLP.cxx:761
 MethodMLP.cxx:762
 MethodMLP.cxx:763
 MethodMLP.cxx:764
 MethodMLP.cxx:765
 MethodMLP.cxx:766
 MethodMLP.cxx:767
 MethodMLP.cxx:768
 MethodMLP.cxx:769
 MethodMLP.cxx:770
 MethodMLP.cxx:771
 MethodMLP.cxx:772
 MethodMLP.cxx:773
 MethodMLP.cxx:774
 MethodMLP.cxx:775
 MethodMLP.cxx:776
 MethodMLP.cxx:777
 MethodMLP.cxx:778
 MethodMLP.cxx:779
 MethodMLP.cxx:780
 MethodMLP.cxx:781
 MethodMLP.cxx:782
 MethodMLP.cxx:783
 MethodMLP.cxx:784
 MethodMLP.cxx:785
 MethodMLP.cxx:786
 MethodMLP.cxx:787
 MethodMLP.cxx:788
 MethodMLP.cxx:789
 MethodMLP.cxx:790
 MethodMLP.cxx:791
 MethodMLP.cxx:792
 MethodMLP.cxx:793
 MethodMLP.cxx:794
 MethodMLP.cxx:795
 MethodMLP.cxx:796
 MethodMLP.cxx:797
 MethodMLP.cxx:798
 MethodMLP.cxx:799
 MethodMLP.cxx:800
 MethodMLP.cxx:801
 MethodMLP.cxx:802
 MethodMLP.cxx:803
 MethodMLP.cxx:804
 MethodMLP.cxx:805
 MethodMLP.cxx:806
 MethodMLP.cxx:807
 MethodMLP.cxx:808
 MethodMLP.cxx:809
 MethodMLP.cxx:810
 MethodMLP.cxx:811
 MethodMLP.cxx:812
 MethodMLP.cxx:813
 MethodMLP.cxx:814
 MethodMLP.cxx:815
 MethodMLP.cxx:816
 MethodMLP.cxx:817
 MethodMLP.cxx:818
 MethodMLP.cxx:819
 MethodMLP.cxx:820
 MethodMLP.cxx:821
 MethodMLP.cxx:822
 MethodMLP.cxx:823
 MethodMLP.cxx:824
 MethodMLP.cxx:825
 MethodMLP.cxx:826
 MethodMLP.cxx:827
 MethodMLP.cxx:828
 MethodMLP.cxx:829
 MethodMLP.cxx:830
 MethodMLP.cxx:831
 MethodMLP.cxx:832
 MethodMLP.cxx:833
 MethodMLP.cxx:834
 MethodMLP.cxx:835
 MethodMLP.cxx:836
 MethodMLP.cxx:837
 MethodMLP.cxx:838
 MethodMLP.cxx:839
 MethodMLP.cxx:840
 MethodMLP.cxx:841
 MethodMLP.cxx:842
 MethodMLP.cxx:843
 MethodMLP.cxx:844
 MethodMLP.cxx:845
 MethodMLP.cxx:846
 MethodMLP.cxx:847
 MethodMLP.cxx:848
 MethodMLP.cxx:849
 MethodMLP.cxx:850
 MethodMLP.cxx:851
 MethodMLP.cxx:852
 MethodMLP.cxx:853
 MethodMLP.cxx:854
 MethodMLP.cxx:855
 MethodMLP.cxx:856
 MethodMLP.cxx:857
 MethodMLP.cxx:858
 MethodMLP.cxx:859
 MethodMLP.cxx:860
 MethodMLP.cxx:861
 MethodMLP.cxx:862
 MethodMLP.cxx:863
 MethodMLP.cxx:864
 MethodMLP.cxx:865
 MethodMLP.cxx:866
 MethodMLP.cxx:867
 MethodMLP.cxx:868
 MethodMLP.cxx:869
 MethodMLP.cxx:870
 MethodMLP.cxx:871
 MethodMLP.cxx:872
 MethodMLP.cxx:873
 MethodMLP.cxx:874
 MethodMLP.cxx:875
 MethodMLP.cxx:876
 MethodMLP.cxx:877
 MethodMLP.cxx:878
 MethodMLP.cxx:879
 MethodMLP.cxx:880
 MethodMLP.cxx:881
 MethodMLP.cxx:882
 MethodMLP.cxx:883
 MethodMLP.cxx:884
 MethodMLP.cxx:885
 MethodMLP.cxx:886
 MethodMLP.cxx:887
 MethodMLP.cxx:888
 MethodMLP.cxx:889
 MethodMLP.cxx:890
 MethodMLP.cxx:891
 MethodMLP.cxx:892
 MethodMLP.cxx:893
 MethodMLP.cxx:894
 MethodMLP.cxx:895
 MethodMLP.cxx:896
 MethodMLP.cxx:897
 MethodMLP.cxx:898
 MethodMLP.cxx:899
 MethodMLP.cxx:900
 MethodMLP.cxx:901
 MethodMLP.cxx:902
 MethodMLP.cxx:903
 MethodMLP.cxx:904
 MethodMLP.cxx:905
 MethodMLP.cxx:906
 MethodMLP.cxx:907
 MethodMLP.cxx:908
 MethodMLP.cxx:909
 MethodMLP.cxx:910
 MethodMLP.cxx:911
 MethodMLP.cxx:912
 MethodMLP.cxx:913
 MethodMLP.cxx:914
 MethodMLP.cxx:915
 MethodMLP.cxx:916
 MethodMLP.cxx:917
 MethodMLP.cxx:918
 MethodMLP.cxx:919
 MethodMLP.cxx:920
 MethodMLP.cxx:921
 MethodMLP.cxx:922
 MethodMLP.cxx:923
 MethodMLP.cxx:924
 MethodMLP.cxx:925
 MethodMLP.cxx:926
 MethodMLP.cxx:927
 MethodMLP.cxx:928
 MethodMLP.cxx:929
 MethodMLP.cxx:930
 MethodMLP.cxx:931
 MethodMLP.cxx:932
 MethodMLP.cxx:933
 MethodMLP.cxx:934
 MethodMLP.cxx:935
 MethodMLP.cxx:936
 MethodMLP.cxx:937
 MethodMLP.cxx:938
 MethodMLP.cxx:939
 MethodMLP.cxx:940
 MethodMLP.cxx:941
 MethodMLP.cxx:942
 MethodMLP.cxx:943
 MethodMLP.cxx:944
 MethodMLP.cxx:945
 MethodMLP.cxx:946
 MethodMLP.cxx:947
 MethodMLP.cxx:948
 MethodMLP.cxx:949
 MethodMLP.cxx:950
 MethodMLP.cxx:951
 MethodMLP.cxx:952
 MethodMLP.cxx:953
 MethodMLP.cxx:954
 MethodMLP.cxx:955
 MethodMLP.cxx:956
 MethodMLP.cxx:957
 MethodMLP.cxx:958
 MethodMLP.cxx:959
 MethodMLP.cxx:960
 MethodMLP.cxx:961
 MethodMLP.cxx:962
 MethodMLP.cxx:963
 MethodMLP.cxx:964
 MethodMLP.cxx:965
 MethodMLP.cxx:966
 MethodMLP.cxx:967
 MethodMLP.cxx:968
 MethodMLP.cxx:969
 MethodMLP.cxx:970
 MethodMLP.cxx:971
 MethodMLP.cxx:972
 MethodMLP.cxx:973
 MethodMLP.cxx:974
 MethodMLP.cxx:975
 MethodMLP.cxx:976
 MethodMLP.cxx:977
 MethodMLP.cxx:978
 MethodMLP.cxx:979
 MethodMLP.cxx:980
 MethodMLP.cxx:981
 MethodMLP.cxx:982
 MethodMLP.cxx:983
 MethodMLP.cxx:984
 MethodMLP.cxx:985
 MethodMLP.cxx:986
 MethodMLP.cxx:987
 MethodMLP.cxx:988
 MethodMLP.cxx:989
 MethodMLP.cxx:990
 MethodMLP.cxx:991
 MethodMLP.cxx:992
 MethodMLP.cxx:993
 MethodMLP.cxx:994
 MethodMLP.cxx:995
 MethodMLP.cxx:996
 MethodMLP.cxx:997
 MethodMLP.cxx:998
 MethodMLP.cxx:999
 MethodMLP.cxx:1000
 MethodMLP.cxx:1001
 MethodMLP.cxx:1002
 MethodMLP.cxx:1003
 MethodMLP.cxx:1004
 MethodMLP.cxx:1005
 MethodMLP.cxx:1006
 MethodMLP.cxx:1007
 MethodMLP.cxx:1008
 MethodMLP.cxx:1009
 MethodMLP.cxx:1010
 MethodMLP.cxx:1011
 MethodMLP.cxx:1012
 MethodMLP.cxx:1013
 MethodMLP.cxx:1014
 MethodMLP.cxx:1015
 MethodMLP.cxx:1016
 MethodMLP.cxx:1017
 MethodMLP.cxx:1018
 MethodMLP.cxx:1019
 MethodMLP.cxx:1020
 MethodMLP.cxx:1021
 MethodMLP.cxx:1022
 MethodMLP.cxx:1023
 MethodMLP.cxx:1024
 MethodMLP.cxx:1025
 MethodMLP.cxx:1026
 MethodMLP.cxx:1027
 MethodMLP.cxx:1028
 MethodMLP.cxx:1029
 MethodMLP.cxx:1030
 MethodMLP.cxx:1031
 MethodMLP.cxx:1032
 MethodMLP.cxx:1033
 MethodMLP.cxx:1034
 MethodMLP.cxx:1035
 MethodMLP.cxx:1036
 MethodMLP.cxx:1037
 MethodMLP.cxx:1038
 MethodMLP.cxx:1039
 MethodMLP.cxx:1040
 MethodMLP.cxx:1041
 MethodMLP.cxx:1042
 MethodMLP.cxx:1043
 MethodMLP.cxx:1044
 MethodMLP.cxx:1045
 MethodMLP.cxx:1046
 MethodMLP.cxx:1047
 MethodMLP.cxx:1048
 MethodMLP.cxx:1049
 MethodMLP.cxx:1050
 MethodMLP.cxx:1051
 MethodMLP.cxx:1052
 MethodMLP.cxx:1053
 MethodMLP.cxx:1054
 MethodMLP.cxx:1055
 MethodMLP.cxx:1056
 MethodMLP.cxx:1057
 MethodMLP.cxx:1058
 MethodMLP.cxx:1059
 MethodMLP.cxx:1060
 MethodMLP.cxx:1061
 MethodMLP.cxx:1062
 MethodMLP.cxx:1063
 MethodMLP.cxx:1064
 MethodMLP.cxx:1065
 MethodMLP.cxx:1066
 MethodMLP.cxx:1067
 MethodMLP.cxx:1068
 MethodMLP.cxx:1069
 MethodMLP.cxx:1070
 MethodMLP.cxx:1071
 MethodMLP.cxx:1072
 MethodMLP.cxx:1073
 MethodMLP.cxx:1074
 MethodMLP.cxx:1075
 MethodMLP.cxx:1076
 MethodMLP.cxx:1077
 MethodMLP.cxx:1078
 MethodMLP.cxx:1079
 MethodMLP.cxx:1080
 MethodMLP.cxx:1081
 MethodMLP.cxx:1082
 MethodMLP.cxx:1083
 MethodMLP.cxx:1084
 MethodMLP.cxx:1085
 MethodMLP.cxx:1086
 MethodMLP.cxx:1087
 MethodMLP.cxx:1088
 MethodMLP.cxx:1089
 MethodMLP.cxx:1090
 MethodMLP.cxx:1091
 MethodMLP.cxx:1092
 MethodMLP.cxx:1093
 MethodMLP.cxx:1094
 MethodMLP.cxx:1095
 MethodMLP.cxx:1096
 MethodMLP.cxx:1097
 MethodMLP.cxx:1098
 MethodMLP.cxx:1099
 MethodMLP.cxx:1100
 MethodMLP.cxx:1101
 MethodMLP.cxx:1102
 MethodMLP.cxx:1103
 MethodMLP.cxx:1104
 MethodMLP.cxx:1105
 MethodMLP.cxx:1106
 MethodMLP.cxx:1107
 MethodMLP.cxx:1108
 MethodMLP.cxx:1109
 MethodMLP.cxx:1110
 MethodMLP.cxx:1111
 MethodMLP.cxx:1112
 MethodMLP.cxx:1113
 MethodMLP.cxx:1114
 MethodMLP.cxx:1115
 MethodMLP.cxx:1116
 MethodMLP.cxx:1117
 MethodMLP.cxx:1118
 MethodMLP.cxx:1119
 MethodMLP.cxx:1120
 MethodMLP.cxx:1121
 MethodMLP.cxx:1122
 MethodMLP.cxx:1123
 MethodMLP.cxx:1124
 MethodMLP.cxx:1125
 MethodMLP.cxx:1126
 MethodMLP.cxx:1127
 MethodMLP.cxx:1128
 MethodMLP.cxx:1129
 MethodMLP.cxx:1130
 MethodMLP.cxx:1131
 MethodMLP.cxx:1132
 MethodMLP.cxx:1133
 MethodMLP.cxx:1134
 MethodMLP.cxx:1135
 MethodMLP.cxx:1136
 MethodMLP.cxx:1137
 MethodMLP.cxx:1138
 MethodMLP.cxx:1139
 MethodMLP.cxx:1140
 MethodMLP.cxx:1141
 MethodMLP.cxx:1142
 MethodMLP.cxx:1143
 MethodMLP.cxx:1144
 MethodMLP.cxx:1145
 MethodMLP.cxx:1146
 MethodMLP.cxx:1147
 MethodMLP.cxx:1148
 MethodMLP.cxx:1149
 MethodMLP.cxx:1150
 MethodMLP.cxx:1151
 MethodMLP.cxx:1152
 MethodMLP.cxx:1153
 MethodMLP.cxx:1154
 MethodMLP.cxx:1155
 MethodMLP.cxx:1156
 MethodMLP.cxx:1157
 MethodMLP.cxx:1158
 MethodMLP.cxx:1159
 MethodMLP.cxx:1160
 MethodMLP.cxx:1161
 MethodMLP.cxx:1162
 MethodMLP.cxx:1163
 MethodMLP.cxx:1164
 MethodMLP.cxx:1165
 MethodMLP.cxx:1166
 MethodMLP.cxx:1167
 MethodMLP.cxx:1168
 MethodMLP.cxx:1169
 MethodMLP.cxx:1170
 MethodMLP.cxx:1171
 MethodMLP.cxx:1172
 MethodMLP.cxx:1173
 MethodMLP.cxx:1174
 MethodMLP.cxx:1175
 MethodMLP.cxx:1176
 MethodMLP.cxx:1177
 MethodMLP.cxx:1178
 MethodMLP.cxx:1179
 MethodMLP.cxx:1180
 MethodMLP.cxx:1181
 MethodMLP.cxx:1182
 MethodMLP.cxx:1183
 MethodMLP.cxx:1184
 MethodMLP.cxx:1185
 MethodMLP.cxx:1186
 MethodMLP.cxx:1187
 MethodMLP.cxx:1188
 MethodMLP.cxx:1189
 MethodMLP.cxx:1190
 MethodMLP.cxx:1191
 MethodMLP.cxx:1192
 MethodMLP.cxx:1193
 MethodMLP.cxx:1194
 MethodMLP.cxx:1195
 MethodMLP.cxx:1196
 MethodMLP.cxx:1197
 MethodMLP.cxx:1198
 MethodMLP.cxx:1199
 MethodMLP.cxx:1200
 MethodMLP.cxx:1201
 MethodMLP.cxx:1202
 MethodMLP.cxx:1203
 MethodMLP.cxx:1204
 MethodMLP.cxx:1205
 MethodMLP.cxx:1206
 MethodMLP.cxx:1207
 MethodMLP.cxx:1208
 MethodMLP.cxx:1209
 MethodMLP.cxx:1210
 MethodMLP.cxx:1211
 MethodMLP.cxx:1212
 MethodMLP.cxx:1213
 MethodMLP.cxx:1214
 MethodMLP.cxx:1215
 MethodMLP.cxx:1216
 MethodMLP.cxx:1217
 MethodMLP.cxx:1218
 MethodMLP.cxx:1219
 MethodMLP.cxx:1220
 MethodMLP.cxx:1221
 MethodMLP.cxx:1222
 MethodMLP.cxx:1223
 MethodMLP.cxx:1224
 MethodMLP.cxx:1225
 MethodMLP.cxx:1226
 MethodMLP.cxx:1227
 MethodMLP.cxx:1228
 MethodMLP.cxx:1229
 MethodMLP.cxx:1230
 MethodMLP.cxx:1231
 MethodMLP.cxx:1232
 MethodMLP.cxx:1233
 MethodMLP.cxx:1234
 MethodMLP.cxx:1235
 MethodMLP.cxx:1236
 MethodMLP.cxx:1237
 MethodMLP.cxx:1238
 MethodMLP.cxx:1239
 MethodMLP.cxx:1240
 MethodMLP.cxx:1241
 MethodMLP.cxx:1242
 MethodMLP.cxx:1243
 MethodMLP.cxx:1244
 MethodMLP.cxx:1245
 MethodMLP.cxx:1246
 MethodMLP.cxx:1247
 MethodMLP.cxx:1248
 MethodMLP.cxx:1249
 MethodMLP.cxx:1250
 MethodMLP.cxx:1251
 MethodMLP.cxx:1252
 MethodMLP.cxx:1253
 MethodMLP.cxx:1254
 MethodMLP.cxx:1255
 MethodMLP.cxx:1256
 MethodMLP.cxx:1257
 MethodMLP.cxx:1258
 MethodMLP.cxx:1259
 MethodMLP.cxx:1260
 MethodMLP.cxx:1261
 MethodMLP.cxx:1262
 MethodMLP.cxx:1263
 MethodMLP.cxx:1264
 MethodMLP.cxx:1265
 MethodMLP.cxx:1266
 MethodMLP.cxx:1267
 MethodMLP.cxx:1268
 MethodMLP.cxx:1269
 MethodMLP.cxx:1270
 MethodMLP.cxx:1271
 MethodMLP.cxx:1272
 MethodMLP.cxx:1273
 MethodMLP.cxx:1274
 MethodMLP.cxx:1275
 MethodMLP.cxx:1276
 MethodMLP.cxx:1277
 MethodMLP.cxx:1278
 MethodMLP.cxx:1279
 MethodMLP.cxx:1280
 MethodMLP.cxx:1281
 MethodMLP.cxx:1282
 MethodMLP.cxx:1283
 MethodMLP.cxx:1284
 MethodMLP.cxx:1285
 MethodMLP.cxx:1286
 MethodMLP.cxx:1287
 MethodMLP.cxx:1288
 MethodMLP.cxx:1289
 MethodMLP.cxx:1290
 MethodMLP.cxx:1291
 MethodMLP.cxx:1292
 MethodMLP.cxx:1293
 MethodMLP.cxx:1294
 MethodMLP.cxx:1295
 MethodMLP.cxx:1296
 MethodMLP.cxx:1297
 MethodMLP.cxx:1298
 MethodMLP.cxx:1299
 MethodMLP.cxx:1300
 MethodMLP.cxx:1301
 MethodMLP.cxx:1302
 MethodMLP.cxx:1303
 MethodMLP.cxx:1304
 MethodMLP.cxx:1305
 MethodMLP.cxx:1306
 MethodMLP.cxx:1307
 MethodMLP.cxx:1308
 MethodMLP.cxx:1309
 MethodMLP.cxx:1310
 MethodMLP.cxx:1311
 MethodMLP.cxx:1312
 MethodMLP.cxx:1313
 MethodMLP.cxx:1314
 MethodMLP.cxx:1315
 MethodMLP.cxx:1316
 MethodMLP.cxx:1317
 MethodMLP.cxx:1318
 MethodMLP.cxx:1319
 MethodMLP.cxx:1320
 MethodMLP.cxx:1321
 MethodMLP.cxx:1322
 MethodMLP.cxx:1323
 MethodMLP.cxx:1324
 MethodMLP.cxx:1325
 MethodMLP.cxx:1326
 MethodMLP.cxx:1327
 MethodMLP.cxx:1328
 MethodMLP.cxx:1329
 MethodMLP.cxx:1330
 MethodMLP.cxx:1331
 MethodMLP.cxx:1332
 MethodMLP.cxx:1333
 MethodMLP.cxx:1334
 MethodMLP.cxx:1335
 MethodMLP.cxx:1336
 MethodMLP.cxx:1337
 MethodMLP.cxx:1338
 MethodMLP.cxx:1339
 MethodMLP.cxx:1340
 MethodMLP.cxx:1341
 MethodMLP.cxx:1342
 MethodMLP.cxx:1343
 MethodMLP.cxx:1344
 MethodMLP.cxx:1345
 MethodMLP.cxx:1346
 MethodMLP.cxx:1347
 MethodMLP.cxx:1348
 MethodMLP.cxx:1349
 MethodMLP.cxx:1350
 MethodMLP.cxx:1351
 MethodMLP.cxx:1352
 MethodMLP.cxx:1353
 MethodMLP.cxx:1354
 MethodMLP.cxx:1355
 MethodMLP.cxx:1356
 MethodMLP.cxx:1357
 MethodMLP.cxx:1358
 MethodMLP.cxx:1359
 MethodMLP.cxx:1360
 MethodMLP.cxx:1361
 MethodMLP.cxx:1362
 MethodMLP.cxx:1363
 MethodMLP.cxx:1364
 MethodMLP.cxx:1365
 MethodMLP.cxx:1366
 MethodMLP.cxx:1367
 MethodMLP.cxx:1368
 MethodMLP.cxx:1369
 MethodMLP.cxx:1370
 MethodMLP.cxx:1371
 MethodMLP.cxx:1372
 MethodMLP.cxx:1373
 MethodMLP.cxx:1374
 MethodMLP.cxx:1375
 MethodMLP.cxx:1376
 MethodMLP.cxx:1377
 MethodMLP.cxx:1378
 MethodMLP.cxx:1379
 MethodMLP.cxx:1380
 MethodMLP.cxx:1381
 MethodMLP.cxx:1382
 MethodMLP.cxx:1383
 MethodMLP.cxx:1384
 MethodMLP.cxx:1385
 MethodMLP.cxx:1386
 MethodMLP.cxx:1387
 MethodMLP.cxx:1388
 MethodMLP.cxx:1389
 MethodMLP.cxx:1390
 MethodMLP.cxx:1391
 MethodMLP.cxx:1392
 MethodMLP.cxx:1393
 MethodMLP.cxx:1394
 MethodMLP.cxx:1395
 MethodMLP.cxx:1396
 MethodMLP.cxx:1397
 MethodMLP.cxx:1398
 MethodMLP.cxx:1399
 MethodMLP.cxx:1400
 MethodMLP.cxx:1401
 MethodMLP.cxx:1402
 MethodMLP.cxx:1403
 MethodMLP.cxx:1404
 MethodMLP.cxx:1405
 MethodMLP.cxx:1406
 MethodMLP.cxx:1407
 MethodMLP.cxx:1408
 MethodMLP.cxx:1409
 MethodMLP.cxx:1410
 MethodMLP.cxx:1411
 MethodMLP.cxx:1412
 MethodMLP.cxx:1413
 MethodMLP.cxx:1414
 MethodMLP.cxx:1415
 MethodMLP.cxx:1416
 MethodMLP.cxx:1417
 MethodMLP.cxx:1418
 MethodMLP.cxx:1419
 MethodMLP.cxx:1420
 MethodMLP.cxx:1421
 MethodMLP.cxx:1422
 MethodMLP.cxx:1423
 MethodMLP.cxx:1424
 MethodMLP.cxx:1425
 MethodMLP.cxx:1426
 MethodMLP.cxx:1427
 MethodMLP.cxx:1428
 MethodMLP.cxx:1429
 MethodMLP.cxx:1430
 MethodMLP.cxx:1431
 MethodMLP.cxx:1432
 MethodMLP.cxx:1433
 MethodMLP.cxx:1434
 MethodMLP.cxx:1435
 MethodMLP.cxx:1436
 MethodMLP.cxx:1437
 MethodMLP.cxx:1438
 MethodMLP.cxx:1439
 MethodMLP.cxx:1440
 MethodMLP.cxx:1441
 MethodMLP.cxx:1442
 MethodMLP.cxx:1443
 MethodMLP.cxx:1444
 MethodMLP.cxx:1445
 MethodMLP.cxx:1446
 MethodMLP.cxx:1447
 MethodMLP.cxx:1448
 MethodMLP.cxx:1449
 MethodMLP.cxx:1450
 MethodMLP.cxx:1451
 MethodMLP.cxx:1452
 MethodMLP.cxx:1453
 MethodMLP.cxx:1454
 MethodMLP.cxx:1455
 MethodMLP.cxx:1456
 MethodMLP.cxx:1457
 MethodMLP.cxx:1458
 MethodMLP.cxx:1459
 MethodMLP.cxx:1460
 MethodMLP.cxx:1461
 MethodMLP.cxx:1462
 MethodMLP.cxx:1463
 MethodMLP.cxx:1464
 MethodMLP.cxx:1465
 MethodMLP.cxx:1466
 MethodMLP.cxx:1467
 MethodMLP.cxx:1468
 MethodMLP.cxx:1469
 MethodMLP.cxx:1470
 MethodMLP.cxx:1471
 MethodMLP.cxx:1472
 MethodMLP.cxx:1473
 MethodMLP.cxx:1474
 MethodMLP.cxx:1475
 MethodMLP.cxx:1476
 MethodMLP.cxx:1477
 MethodMLP.cxx:1478
 MethodMLP.cxx:1479
 MethodMLP.cxx:1480
 MethodMLP.cxx:1481
 MethodMLP.cxx:1482
 MethodMLP.cxx:1483
 MethodMLP.cxx:1484
 MethodMLP.cxx:1485
 MethodMLP.cxx:1486
 MethodMLP.cxx:1487
 MethodMLP.cxx:1488
 MethodMLP.cxx:1489
 MethodMLP.cxx:1490
 MethodMLP.cxx:1491
 MethodMLP.cxx:1492
 MethodMLP.cxx:1493
 MethodMLP.cxx:1494
 MethodMLP.cxx:1495
 MethodMLP.cxx:1496
 MethodMLP.cxx:1497
 MethodMLP.cxx:1498
 MethodMLP.cxx:1499
 MethodMLP.cxx:1500
 MethodMLP.cxx:1501
 MethodMLP.cxx:1502
 MethodMLP.cxx:1503
 MethodMLP.cxx:1504
 MethodMLP.cxx:1505
 MethodMLP.cxx:1506
 MethodMLP.cxx:1507
 MethodMLP.cxx:1508
 MethodMLP.cxx:1509
 MethodMLP.cxx:1510
 MethodMLP.cxx:1511
 MethodMLP.cxx:1512
 MethodMLP.cxx:1513
 MethodMLP.cxx:1514
 MethodMLP.cxx:1515
 MethodMLP.cxx:1516
 MethodMLP.cxx:1517
 MethodMLP.cxx:1518
 MethodMLP.cxx:1519
 MethodMLP.cxx:1520
 MethodMLP.cxx:1521
 MethodMLP.cxx:1522
 MethodMLP.cxx:1523
 MethodMLP.cxx:1524
 MethodMLP.cxx:1525
 MethodMLP.cxx:1526
 MethodMLP.cxx:1527
 MethodMLP.cxx:1528
 MethodMLP.cxx:1529
 MethodMLP.cxx:1530
 MethodMLP.cxx:1531
 MethodMLP.cxx:1532
 MethodMLP.cxx:1533
 MethodMLP.cxx:1534
 MethodMLP.cxx:1535
 MethodMLP.cxx:1536
 MethodMLP.cxx:1537
 MethodMLP.cxx:1538
 MethodMLP.cxx:1539
 MethodMLP.cxx:1540
 MethodMLP.cxx:1541
 MethodMLP.cxx:1542
 MethodMLP.cxx:1543
 MethodMLP.cxx:1544
 MethodMLP.cxx:1545
 MethodMLP.cxx:1546
 MethodMLP.cxx:1547
 MethodMLP.cxx:1548
 MethodMLP.cxx:1549
 MethodMLP.cxx:1550
 MethodMLP.cxx:1551
 MethodMLP.cxx:1552
 MethodMLP.cxx:1553
 MethodMLP.cxx:1554
 MethodMLP.cxx:1555
 MethodMLP.cxx:1556
 MethodMLP.cxx:1557
 MethodMLP.cxx:1558
 MethodMLP.cxx:1559
 MethodMLP.cxx:1560
 MethodMLP.cxx:1561
 MethodMLP.cxx:1562
 MethodMLP.cxx:1563
 MethodMLP.cxx:1564
 MethodMLP.cxx:1565
 MethodMLP.cxx:1566
 MethodMLP.cxx:1567
 MethodMLP.cxx:1568
 MethodMLP.cxx:1569
 MethodMLP.cxx:1570
 MethodMLP.cxx:1571
 MethodMLP.cxx:1572
 MethodMLP.cxx:1573
 MethodMLP.cxx:1574
 MethodMLP.cxx:1575
 MethodMLP.cxx:1576
 MethodMLP.cxx:1577
 MethodMLP.cxx:1578
 MethodMLP.cxx:1579
 MethodMLP.cxx:1580
 MethodMLP.cxx:1581
 MethodMLP.cxx:1582
 MethodMLP.cxx:1583
 MethodMLP.cxx:1584
 MethodMLP.cxx:1585
 MethodMLP.cxx:1586
 MethodMLP.cxx:1587
 MethodMLP.cxx:1588
 MethodMLP.cxx:1589
 MethodMLP.cxx:1590
 MethodMLP.cxx:1591
 MethodMLP.cxx:1592
 MethodMLP.cxx:1593
 MethodMLP.cxx:1594
 MethodMLP.cxx:1595
 MethodMLP.cxx:1596
 MethodMLP.cxx:1597
 MethodMLP.cxx:1598
 MethodMLP.cxx:1599
 MethodMLP.cxx:1600
 MethodMLP.cxx:1601
 MethodMLP.cxx:1602
 MethodMLP.cxx:1603
 MethodMLP.cxx:1604
 MethodMLP.cxx:1605
 MethodMLP.cxx:1606
 MethodMLP.cxx:1607
 MethodMLP.cxx:1608
 MethodMLP.cxx:1609
 MethodMLP.cxx:1610
 MethodMLP.cxx:1611
 MethodMLP.cxx:1612
 MethodMLP.cxx:1613
 MethodMLP.cxx:1614
 MethodMLP.cxx:1615
 MethodMLP.cxx:1616
 MethodMLP.cxx:1617
 MethodMLP.cxx:1618
 MethodMLP.cxx:1619
 MethodMLP.cxx:1620
 MethodMLP.cxx:1621
 MethodMLP.cxx:1622
 MethodMLP.cxx:1623
 MethodMLP.cxx:1624
 MethodMLP.cxx:1625
 MethodMLP.cxx:1626
 MethodMLP.cxx:1627
 MethodMLP.cxx:1628
 MethodMLP.cxx:1629
 MethodMLP.cxx:1630
 MethodMLP.cxx:1631
 MethodMLP.cxx:1632
 MethodMLP.cxx:1633
 MethodMLP.cxx:1634
 MethodMLP.cxx:1635
 MethodMLP.cxx:1636
 MethodMLP.cxx:1637
 MethodMLP.cxx:1638
 MethodMLP.cxx:1639
 MethodMLP.cxx:1640
 MethodMLP.cxx:1641
 MethodMLP.cxx:1642
 MethodMLP.cxx:1643
 MethodMLP.cxx:1644
 MethodMLP.cxx:1645
 MethodMLP.cxx:1646
 MethodMLP.cxx:1647
 MethodMLP.cxx:1648
 MethodMLP.cxx:1649
 MethodMLP.cxx:1650
 MethodMLP.cxx:1651
 MethodMLP.cxx:1652
 MethodMLP.cxx:1653
 MethodMLP.cxx:1654
 MethodMLP.cxx:1655
 MethodMLP.cxx:1656
 MethodMLP.cxx:1657
 MethodMLP.cxx:1658
 MethodMLP.cxx:1659
 MethodMLP.cxx:1660
 MethodMLP.cxx:1661
 MethodMLP.cxx:1662
 MethodMLP.cxx:1663
 MethodMLP.cxx:1664
 MethodMLP.cxx:1665
 MethodMLP.cxx:1666
 MethodMLP.cxx:1667
 MethodMLP.cxx:1668
 MethodMLP.cxx:1669
 MethodMLP.cxx:1670
 MethodMLP.cxx:1671
 MethodMLP.cxx:1672
 MethodMLP.cxx:1673
 MethodMLP.cxx:1674
 MethodMLP.cxx:1675
 MethodMLP.cxx:1676
 MethodMLP.cxx:1677
 MethodMLP.cxx:1678
 MethodMLP.cxx:1679
 MethodMLP.cxx:1680
 MethodMLP.cxx:1681
 MethodMLP.cxx:1682
 MethodMLP.cxx:1683