ROOT logo
// @(#)root/tmva $Id: MethodLD.cxx 38609 2011-03-24 16:06:32Z evt $
// Author: Krzysztof Danielowski, Kamil Kraszewski, Maciej Kruk, Jan Therhaag

/**********************************************************************************
 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
 * Package: TMVA                                                                  *
 * Class  : MethodLD                                                              *
 * Web    : http://tmva.sourceforge.net                                           *
 *                                                                                *
 * Description:                                                                   *
 *      Linear Discriminant - Simple Linear Regression and Classification         *
 *                                                                                *
 * Authors (alphabetical):                                                        *
 *      Krzysztof Danielowski   <danielow@cern.ch>      - IFJ PAN & AGH, Poland   *
 *      Kamil Kraszewski        <kalq@cern.ch>          - IFJ PAN & UJ, Poland    *
 *      Maciej Kruk             <mkruk@cern.ch>         - IFJ PAN & AGH, Poland   *
 *      Jan Therhaag            <therhaag@physik.uni-bonn.de> - Uni Bonn, Germany *
 *                                                                                *
 * Copyright (c) 2005-2011:                                                       *
 *      CERN, Switzerland                                                         *
 *      PAN, Poland                                                               *
 *      U. of Bonn, Germany                                                       *
 *                                                                                *
 * Redistribution and use in source and binary forms, with or without             *
 * modification, are permitted according to the terms listed in LICENSE           *
 * (http://tmva.sourceforge.net/LICENSE)                                          *
 *                                                                                *
 **********************************************************************************/

#include <iomanip>

#include "TMath.h"
#include "Riostream.h"
#include "TMatrix.h"
#include "TMatrixD.h"

#include "TMVA/VariableTransformBase.h"
#include "TMVA/MethodLD.h"
#include "TMVA/Tools.h"
#include "TMVA/Ranking.h"
#include "TMVA/Types.h"
#include "TMVA/PDF.h"
#include "TMVA/ClassifierFactory.h"

REGISTER_METHOD(LD)

ClassImp(TMVA::MethodLD)

//_______________________________________________________________________
TMVA::MethodLD::MethodLD( const TString& jobName,
                          const TString& methodTitle,
                          DataSetInfo& dsi,
                          const TString& theOption,
                          TDirectory* theTargetDir ) :
   MethodBase( jobName, Types::kLD, methodTitle, dsi, theOption, theTargetDir ),
   fNRegOut   ( 0 ),
   fSumMatx   ( 0 ),
   fSumValMatx( 0 ),
   fCoeffMatx ( 0 ),
   fLDCoeff   ( 0 )
{
   // standard constructor for the LD
}

//_______________________________________________________________________
TMVA::MethodLD::MethodLD( DataSetInfo& theData, const TString& theWeightFile, TDirectory* theTargetDir )
   : MethodBase( Types::kLD, theData, theWeightFile, theTargetDir ),
     fNRegOut   ( 0 ),
     fSumMatx   ( 0 ),
     fSumValMatx( 0 ),
     fCoeffMatx ( 0 ),
     fLDCoeff   ( 0 )
{
   // constructor from weight file
}

//_______________________________________________________________________
void TMVA::MethodLD::Init( void )
{
   // default initialization called by all constructors

   if(DataInfo().GetNTargets()!=0) fNRegOut = DataInfo().GetNTargets();
   else                fNRegOut = 1;

   fLDCoeff = new vector< vector< Double_t >* >(fNRegOut);
   for (Int_t iout = 0; iout<fNRegOut; iout++) (*fLDCoeff)[iout] = new std::vector<Double_t>( GetNvar()+1 );

   // the minimum requirement to declare an event signal-like
   SetSignalReferenceCut( 0.0 );
}

//_______________________________________________________________________
TMVA::MethodLD::~MethodLD( void )
{
   // destructor
   if (fSumMatx)    { delete fSumMatx;    fSumMatx    = 0; }
   if (fSumValMatx) { delete fSumValMatx; fSumValMatx = 0; }
   if (fCoeffMatx)  { delete fCoeffMatx;  fCoeffMatx  = 0; }
   if (fLDCoeff) {
      for (vector< vector< Double_t >* >::iterator vi=fLDCoeff->begin(); vi!=fLDCoeff->end(); vi++)
         if (*vi) { delete *vi; *vi = 0; }
      delete fLDCoeff; fLDCoeff = 0;
   }
}

//_______________________________________________________________________
Bool_t TMVA::MethodLD::HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets )
{
   // LD can handle classification with 2 classes and regression with one regression-target
   if      (type == Types::kClassification && numberClasses == 2) return kTRUE;
   else if (type == Types::kRegression     && numberTargets == 1) {
     Log() << "regression with " << numberTargets << " targets.";
     return kTRUE;
   }
   else return kFALSE;
}


//_______________________________________________________________________
void TMVA::MethodLD::Train( void )
{
   // compute fSumMatx
   GetSum();

   // compute fSumValMatx
   GetSumVal();

   // compute fCoeffMatx and fLDCoeff
   GetLDCoeff();

   // nice output
   PrintCoefficients();
}

//_______________________________________________________________________
Double_t TMVA::MethodLD::GetMvaValue( Double_t* err, Double_t* errUpper )
{
   //Returns the MVA classification output
   const Event* ev = GetEvent();

   if (fRegressionReturnVal == NULL) fRegressionReturnVal = new vector< Float_t >();
   fRegressionReturnVal->resize( fNRegOut );
   
   for (Int_t iout = 0; iout<fNRegOut; iout++) {
      (*fRegressionReturnVal)[iout] = (*(*fLDCoeff)[iout])[0] ;

      int icoeff=0;
      for (std::vector<Float_t>::const_iterator it = ev->GetValues().begin();it!=ev->GetValues().end();++it){
         (*fRegressionReturnVal)[iout] += (*(*fLDCoeff)[iout])[++icoeff] * (*it);
      }
   }

   // cannot determine error
   NoErrorCalc(err, errUpper);

   return (*fRegressionReturnVal)[0];
}

//_______________________________________________________________________
const std::vector< Float_t >& TMVA::MethodLD::GetRegressionValues()
{
  //Calculates the regression output
   const Event* ev = GetEvent();

   if (fRegressionReturnVal == NULL) fRegressionReturnVal = new vector< Float_t >();
   fRegressionReturnVal->resize( fNRegOut );
   
   for (Int_t iout = 0; iout<fNRegOut; iout++) {
      (*fRegressionReturnVal)[iout] = (*(*fLDCoeff)[iout])[0] ;

      int icoeff = 0;      
      for (std::vector<Float_t>::const_iterator it = ev->GetValues().begin();it!=ev->GetValues().end();++it){
         (*fRegressionReturnVal)[iout] += (*(*fLDCoeff)[iout])[++icoeff] * (*it);
      }
   }

   // perform inverse transformation 
   Event* evT = new Event(*ev);
   for (Int_t iout = 0; iout<fNRegOut; iout++) evT->SetTarget(iout,(*fRegressionReturnVal)[iout]);

   const Event* evT2 = GetTransformationHandler().InverseTransform( evT );
   fRegressionReturnVal->clear();
   for (Int_t iout = 0; iout<fNRegOut; iout++) fRegressionReturnVal->push_back(evT2->GetTarget(iout));

   delete evT;
   return (*fRegressionReturnVal);
}

//_______________________________________________________________________
void TMVA::MethodLD::InitMatrices( void )
{
   // Initializaton method; creates global matrices and vectors

   fSumMatx    = new TMatrixD( GetNvar()+1, GetNvar()+1 );
   fSumValMatx = new TMatrixD( GetNvar()+1, fNRegOut ); 
   fCoeffMatx  = new TMatrixD( GetNvar()+1, fNRegOut ); 

}

//_______________________________________________________________________
void TMVA::MethodLD::GetSum( void )
{
   // Calculates the matrix transposed(X)*W*X with W being the diagonal weight matrix 
   // and X the coordinates values
   const UInt_t nvar = DataInfo().GetNVariables();

   for (UInt_t ivar = 0; ivar<=nvar; ivar++)
      for (UInt_t jvar = 0; jvar<=nvar; jvar++) (*fSumMatx)( ivar, jvar ) = 0;

   // compute sample means
   Long64_t nevts = Data()->GetNEvents();
   for (Int_t ievt=0; ievt<nevts; ievt++) {
      const Event * ev = GetEvent(ievt);
      Double_t weight = ev->GetWeight();

      if (IgnoreEventsWithNegWeightsInTraining() && weight <= 0) continue;

      // Sum of weights
      (*fSumMatx)( 0, 0 ) += weight;

      // Sum of coordinates
      for (UInt_t ivar=0; ivar<nvar; ivar++) {
         (*fSumMatx)( ivar+1, 0 ) += ev->GetValue( ivar ) * weight;
         (*fSumMatx)( 0, ivar+1 ) += ev->GetValue( ivar ) * weight;
      }

      // Sum of products of coordinates
      for (UInt_t ivar=0; ivar<nvar; ivar++)
         for (UInt_t jvar=0; jvar<nvar; jvar++)
            (*fSumMatx)( ivar+1, jvar+1 ) += ev->GetValue( ivar ) * ev->GetValue( jvar ) * weight;
   }
}

//_______________________________________________________________________
void TMVA::MethodLD::GetSumVal( void )
{
   //Calculates the vector transposed(X)*W*Y with Y being the target vector
   const UInt_t nvar = DataInfo().GetNVariables();

   for (Int_t ivar = 0; ivar<fNRegOut; ivar++)
      for (UInt_t jvar = 0; jvar<=nvar; jvar++)
         (*fSumValMatx)(jvar,ivar) = 0;

   // Sum of coordinates multiplied by values
   for (Int_t ievt=0; ievt<Data()->GetNEvents(); ievt++) {

      // retrieve the event
      const Event* ev = GetEvent(ievt);
      Double_t weight = ev->GetWeight();

      // in case event with neg weights are to be ignored
      if (IgnoreEventsWithNegWeightsInTraining() && weight <= 0) continue; 

      for (Int_t ivar=0; ivar<fNRegOut; ivar++) {

         Double_t val = weight;

         if (!DoRegression())
            val *= DataInfo().IsSignal(ev);
         else //for regression
            val *= ev->GetTarget( ivar ); 

         (*fSumValMatx)( 0,ivar ) += val; 
         for (UInt_t jvar=0; jvar<nvar; jvar++) 
            (*fSumValMatx)(jvar+1,ivar ) += ev->GetValue(jvar) * val;
      }
   }
}

//_______________________________________________________________________
void TMVA::MethodLD::GetLDCoeff( void )
{
   //Calculates the coeffiecients used for classification/regression
   const UInt_t nvar = DataInfo().GetNVariables();

   for (Int_t ivar = 0; ivar<fNRegOut; ivar++){
      TMatrixD invSum( *fSumMatx );
      if ( TMath::Abs(invSum.Determinant()) < 10E-24 ) {
         Log() << kWARNING << "<GetCoeff> matrix is almost singular with determinant="
                 << TMath::Abs(invSum.Determinant()) 
                 << " did you use the variables that are linear combinations or highly correlated?" 
                 << Endl;
      }
      if ( TMath::Abs(invSum.Determinant()) < 10E-120 ) {
         Log() << kFATAL << "<GetCoeff> matrix is singular with determinant="
                 << TMath::Abs(invSum.Determinant())  
                 << " did you use the variables that are linear combinations?" 
                 << Endl;
      }
      invSum.Invert();

      fCoeffMatx = new TMatrixD( invSum * (*fSumValMatx));
      for (UInt_t jvar = 0; jvar<nvar+1; jvar++) {
         (*(*fLDCoeff)[ivar])[jvar] = (*fCoeffMatx)(jvar, ivar );
      }
      if (!DoRegression()) {
         (*(*fLDCoeff)[ivar])[0]=0.0;
         for (UInt_t jvar = 1; jvar<nvar+1; jvar++)
            (*(*fLDCoeff)[ivar])[0]+=(*fCoeffMatx)(jvar,ivar)*(*fSumMatx)(0,jvar)/(*fSumMatx)( 0, 0 );
         (*(*fLDCoeff)[ivar])[0]/=-2.0;
      }
      
   }
}

//_______________________________________________________________________
void  TMVA::MethodLD::ReadWeightsFromStream( istream& istr )
{
   // read LD coefficients from weight file
   for (Int_t iout=0; iout<fNRegOut; iout++)
      for (UInt_t icoeff=0; icoeff<GetNvar()+1; icoeff++)
         istr >> (*(*fLDCoeff)[iout])[icoeff];
}

//_______________________________________________________________________
void TMVA::MethodLD::AddWeightsXMLTo( void* parent ) const 
{
   // create XML description for LD classification and regression 
   // (for arbitrary number of output classes/targets)

   void* wght = gTools().AddChild(parent, "Weights");
   gTools().AddAttr( wght, "NOut",   fNRegOut    );
   gTools().AddAttr( wght, "NCoeff", GetNvar()+1 );
   for (Int_t iout=0; iout<fNRegOut; iout++) {
      for (UInt_t icoeff=0; icoeff<GetNvar()+1; icoeff++) {
         void* coeffxml = gTools().AddChild( wght, "Coefficient" );
         gTools().AddAttr( coeffxml, "IndexOut",   iout   );
         gTools().AddAttr( coeffxml, "IndexCoeff", icoeff );
         gTools().AddAttr( coeffxml, "Value",      (*(*fLDCoeff)[iout])[icoeff] );
      }
   }
}
  
//_______________________________________________________________________
void TMVA::MethodLD::ReadWeightsFromXML( void* wghtnode ) 
{
   // read coefficients from xml weight file
   UInt_t ncoeff;
   gTools().ReadAttr( wghtnode, "NOut",   fNRegOut );
   gTools().ReadAttr( wghtnode, "NCoeff", ncoeff   );
   
   // sanity checks
   if (ncoeff != GetNvar()+1) Log() << kFATAL << "Mismatch in number of output variables/coefficients: " 
                                      << ncoeff << " != " << GetNvar()+1 << Endl;

   // create vector with coefficients (double vector due to arbitrary output dimension)
   if (fLDCoeff) { 
      for (vector< vector< Double_t >* >::iterator vi=fLDCoeff->begin(); vi!=fLDCoeff->end(); vi++)
         if (*vi) { delete *vi; *vi = 0; }
      delete fLDCoeff; fLDCoeff = 0;
   }
   fLDCoeff = new vector< vector< Double_t >* >(fNRegOut);
   for (Int_t ivar = 0; ivar<fNRegOut; ivar++) (*fLDCoeff)[ivar] = new std::vector<Double_t>( ncoeff );

   void* ch = gTools().GetChild(wghtnode);
   Double_t coeff;
   Int_t iout, icoeff;
   while (ch) {
      gTools().ReadAttr( ch, "IndexOut",   iout   );
      gTools().ReadAttr( ch, "IndexCoeff", icoeff );
      gTools().ReadAttr( ch, "Value",      coeff  );

      (*(*fLDCoeff)[iout])[icoeff] = coeff;

      ch = gTools().GetNextChild(ch);
   }
}

//_______________________________________________________________________
void TMVA::MethodLD::MakeClassSpecific( std::ostream& fout, const TString& className ) const
{
   // write LD-specific classifier response
   fout << "   std::vector<double> fLDCoefficients;" << endl;
   fout << "};" << endl;
   fout << "" << endl;
   fout << "inline void " << className << "::Initialize() " << endl;
   fout << "{" << endl;
   for (UInt_t ivar=0; ivar<GetNvar()+1; ivar++) {
      Int_t dp = fout.precision();
      fout << "   fLDCoefficients.push_back( "
           << std::setprecision(12) << (*(*fLDCoeff)[0])[ivar]
           << std::setprecision(dp) << " );" << endl;
   }
   fout << endl;
   fout << "   // sanity check" << endl;
   fout << "   if (fLDCoefficients.size() != fNvars+1) {" << endl;
   fout << "      std::cout << \"Problem in class \\\"\" << fClassName << \"\\\"::Initialize: mismatch in number of input values\"" << endl;
   fout << "                << fLDCoefficients.size() << \" != \" << fNvars+1 << std::endl;" << endl;
   fout << "      fStatusIsClean = false;" << endl;
   fout << "   }         " << endl;
   fout << "}" << endl;
   fout << endl;
   fout << "inline double " << className << "::GetMvaValue__( const std::vector<double>& inputValues ) const" << endl;
   fout << "{" << endl;
   fout << "   double retval = fLDCoefficients[0];" << endl;
   fout << "   for (size_t ivar = 1; ivar < fNvars+1; ivar++) {" << endl;
   fout << "      retval += fLDCoefficients[ivar]*inputValues[ivar-1];" << endl;
   fout << "   }" << endl;
   fout << endl;
   fout << "   return retval;" << endl;
   fout << "}" << endl;
   fout << endl;
   fout << "// Clean up" << endl;
   fout << "inline void " << className << "::Clear() " << endl;
   fout << "{" << endl;
   fout << "   // clear coefficients" << endl;
   fout << "   fLDCoefficients.clear(); " << endl;
   fout << "}" << endl;
}
//_______________________________________________________________________
const TMVA::Ranking* TMVA::MethodLD::CreateRanking()
{
   // computes ranking of input variables

   // create the ranking object
   fRanking = new Ranking( GetName(), "Discr. power" );

   for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
      fRanking->AddRank( Rank( GetInputLabel(ivar), TMath::Abs((* (*fLDCoeff)[0])[ivar+1] )) );
   }

   return fRanking;
}

//_______________________________________________________________________
void TMVA::MethodLD::DeclareOptions()
{
   //MethodLD options
   AddPreDefVal(TString("LD"));
}

//_______________________________________________________________________
void TMVA::MethodLD::ProcessOptions()
{
   // this is the preparation for training
   if (HasTrainingTree()) InitMatrices();
}

//_______________________________________________________________________
void TMVA::MethodLD::PrintCoefficients( void ) 
{
   //Display the classification/regression coefficients for each variable
   Log() << kINFO << "Results for LD coefficients:" << Endl;

   if (GetTransformationHandler().GetTransformationList().GetSize() != 0) {
      Log() << kINFO << "NOTE: The coefficients must be applied to TRANFORMED variables" << Endl;
      Log() << kINFO << "      List of the transformation: " << Endl;
      TListIter trIt(&GetTransformationHandler().GetTransformationList());
      while (VariableTransformBase *trf = (VariableTransformBase*) trIt() ) {
         Log() << kINFO << "  -- " << trf->GetName() << Endl;
      }
   }
   std::vector<TString>  vars;
   std::vector<Double_t> coeffs;
   for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
      vars  .push_back( GetInputLabel(ivar) );
      coeffs.push_back( (* (*fLDCoeff)[0])[ivar+1] );
   }
   vars  .push_back( "(offset)" );
   coeffs.push_back((* (*fLDCoeff)[0])[0] );
   TMVA::gTools().FormattedOutput( coeffs, vars, "Variable" , "Coefficient", Log() );
   if (IsNormalised()) {
      Log() << kINFO << "NOTE: You have chosen to use the \"Normalise\" booking option. Hence, the" << Endl;
      Log() << kINFO << "      coefficients must be applied to NORMALISED (') variables as follows:" << Endl;
      Int_t maxL = 0;
      for (UInt_t ivar=0; ivar<GetNvar(); ivar++) if (GetInputLabel(ivar).Length() > maxL) maxL = GetInputLabel(ivar).Length();

      // Print normalisation expression (see Tools.cxx): "2*(x - xmin)/(xmax - xmin) - 1.0"
      for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
         Log() << kINFO 
                 << setw(maxL+9) << TString("[") + GetInputLabel(ivar) + "]' = 2*(" 
                 << setw(maxL+2) << TString("[") + GetInputLabel(ivar) + "]"
                 << setw(3) << (GetXmin(ivar) > 0 ? " - " : " + ")
                 << setw(6) << TMath::Abs(GetXmin(ivar)) << setw(3) << ")/"
                 << setw(6) << (GetXmax(ivar) -  GetXmin(ivar) )
                 << setw(3) << " - 1"
                 << Endl;
      }
      Log() << kINFO << "The TMVA Reader will properly account for this normalisation, but if the" << Endl;
      Log() << kINFO << "LD classifier is applied outside the Reader, the transformation must be" << Endl;
      Log() << kINFO << "implemented -- or the \"Normalise\" option is removed and LD retrained." << Endl;
      Log() << kINFO << Endl;
   }
}

//_______________________________________________________________________
void TMVA::MethodLD::GetHelpMessage() const
{
   // get help message text
   //
   // typical length of text line: 
   //         "|--------------------------------------------------------------|"
   Log() << Endl;
   Log() << gTools().Color("bold") << "--- Short description:" << gTools().Color("reset") << Endl;
   Log() << Endl;
   Log() << "Linear discriminants select events by distinguishing the mean " << Endl;
   Log() << "values of the signal and background distributions in a trans- " << Endl;
   Log() << "formed variable space where linear correlations are removed." << Endl;
   Log() << "The LD implementation here is equivalent to the \"Fisher\" discriminant" << Endl;
   Log() << "for classification, but also provides linear regression." << Endl;
   Log() << Endl;
   Log() << "   (More precisely: the \"linear discriminator\" determines" << Endl;
   Log() << "    an axis in the (correlated) hyperspace of the input " << Endl;
   Log() << "    variables such that, when projecting the output classes " << Endl;
   Log() << "    (signal and background) upon this axis, they are pushed " << Endl;
   Log() << "    as far as possible away from each other, while events" << Endl;
   Log() << "    of a same class are confined in a close vicinity. The  " << Endl;
   Log() << "    linearity property of this classifier is reflected in the " << Endl;
   Log() << "    metric with which \"far apart\" and \"close vicinity\" are " << Endl;
   Log() << "    determined: the covariance matrix of the discriminating" << Endl;
   Log() << "    variable space.)" << Endl;
   Log() << Endl;
   Log() << gTools().Color("bold") << "--- Performance optimisation:" << gTools().Color("reset") << Endl;
   Log() << Endl;
   Log() << "Optimal performance for the linear discriminant is obtained for " << Endl;
   Log() << "linearly correlated Gaussian-distributed variables. Any deviation" << Endl;
   Log() << "from this ideal reduces the achievable separation power. In " << Endl;
   Log() << "particular, no discrimination at all is achieved for a variable" << Endl;
   Log() << "that has the same sample mean for signal and background, even if " << Endl;
   Log() << "the shapes of the distributions are very different. Thus, the linear " << Endl;
   Log() << "discriminant often benefits from a suitable transformation of the " << Endl;
   Log() << "input variables. For example, if a variable x in [-1,1] has a " << Endl;
   Log() << "a parabolic signal distributions, and a uniform background" << Endl;
   Log() << "distributions, their mean value is zero in both cases, leading " << Endl;
   Log() << "to no separation. The simple transformation x -> |x| renders this " << Endl;
   Log() << "variable powerful for the use in a linear discriminant." << Endl;
   Log() << Endl;
   Log() << gTools().Color("bold") << "--- Performance tuning via configuration options:" << gTools().Color("reset") << Endl;
   Log() << Endl;
   Log() << "<None>" << Endl;
}
 MethodLD.cxx:1
 MethodLD.cxx:2
 MethodLD.cxx:3
 MethodLD.cxx:4
 MethodLD.cxx:5
 MethodLD.cxx:6
 MethodLD.cxx:7
 MethodLD.cxx:8
 MethodLD.cxx:9
 MethodLD.cxx:10
 MethodLD.cxx:11
 MethodLD.cxx:12
 MethodLD.cxx:13
 MethodLD.cxx:14
 MethodLD.cxx:15
 MethodLD.cxx:16
 MethodLD.cxx:17
 MethodLD.cxx:18
 MethodLD.cxx:19
 MethodLD.cxx:20
 MethodLD.cxx:21
 MethodLD.cxx:22
 MethodLD.cxx:23
 MethodLD.cxx:24
 MethodLD.cxx:25
 MethodLD.cxx:26
 MethodLD.cxx:27
 MethodLD.cxx:28
 MethodLD.cxx:29
 MethodLD.cxx:30
 MethodLD.cxx:31
 MethodLD.cxx:32
 MethodLD.cxx:33
 MethodLD.cxx:34
 MethodLD.cxx:35
 MethodLD.cxx:36
 MethodLD.cxx:37
 MethodLD.cxx:38
 MethodLD.cxx:39
 MethodLD.cxx:40
 MethodLD.cxx:41
 MethodLD.cxx:42
 MethodLD.cxx:43
 MethodLD.cxx:44
 MethodLD.cxx:45
 MethodLD.cxx:46
 MethodLD.cxx:47
 MethodLD.cxx:48
 MethodLD.cxx:49
 MethodLD.cxx:50
 MethodLD.cxx:51
 MethodLD.cxx:52
 MethodLD.cxx:53
 MethodLD.cxx:54
 MethodLD.cxx:55
 MethodLD.cxx:56
 MethodLD.cxx:57
 MethodLD.cxx:58
 MethodLD.cxx:59
 MethodLD.cxx:60
 MethodLD.cxx:61
 MethodLD.cxx:62
 MethodLD.cxx:63
 MethodLD.cxx:64
 MethodLD.cxx:65
 MethodLD.cxx:66
 MethodLD.cxx:67
 MethodLD.cxx:68
 MethodLD.cxx:69
 MethodLD.cxx:70
 MethodLD.cxx:71
 MethodLD.cxx:72
 MethodLD.cxx:73
 MethodLD.cxx:74
 MethodLD.cxx:75
 MethodLD.cxx:76
 MethodLD.cxx:77
 MethodLD.cxx:78
 MethodLD.cxx:79
 MethodLD.cxx:80
 MethodLD.cxx:81
 MethodLD.cxx:82
 MethodLD.cxx:83
 MethodLD.cxx:84
 MethodLD.cxx:85
 MethodLD.cxx:86
 MethodLD.cxx:87
 MethodLD.cxx:88
 MethodLD.cxx:89
 MethodLD.cxx:90
 MethodLD.cxx:91
 MethodLD.cxx:92
 MethodLD.cxx:93
 MethodLD.cxx:94
 MethodLD.cxx:95
 MethodLD.cxx:96
 MethodLD.cxx:97
 MethodLD.cxx:98
 MethodLD.cxx:99
 MethodLD.cxx:100
 MethodLD.cxx:101
 MethodLD.cxx:102
 MethodLD.cxx:103
 MethodLD.cxx:104
 MethodLD.cxx:105
 MethodLD.cxx:106
 MethodLD.cxx:107
 MethodLD.cxx:108
 MethodLD.cxx:109
 MethodLD.cxx:110
 MethodLD.cxx:111
 MethodLD.cxx:112
 MethodLD.cxx:113
 MethodLD.cxx:114
 MethodLD.cxx:115
 MethodLD.cxx:116
 MethodLD.cxx:117
 MethodLD.cxx:118
 MethodLD.cxx:119
 MethodLD.cxx:120
 MethodLD.cxx:121
 MethodLD.cxx:122
 MethodLD.cxx:123
 MethodLD.cxx:124
 MethodLD.cxx:125
 MethodLD.cxx:126
 MethodLD.cxx:127
 MethodLD.cxx:128
 MethodLD.cxx:129
 MethodLD.cxx:130
 MethodLD.cxx:131
 MethodLD.cxx:132
 MethodLD.cxx:133
 MethodLD.cxx:134
 MethodLD.cxx:135
 MethodLD.cxx:136
 MethodLD.cxx:137
 MethodLD.cxx:138
 MethodLD.cxx:139
 MethodLD.cxx:140
 MethodLD.cxx:141
 MethodLD.cxx:142
 MethodLD.cxx:143
 MethodLD.cxx:144
 MethodLD.cxx:145
 MethodLD.cxx:146
 MethodLD.cxx:147
 MethodLD.cxx:148
 MethodLD.cxx:149
 MethodLD.cxx:150
 MethodLD.cxx:151
 MethodLD.cxx:152
 MethodLD.cxx:153
 MethodLD.cxx:154
 MethodLD.cxx:155
 MethodLD.cxx:156
 MethodLD.cxx:157
 MethodLD.cxx:158
 MethodLD.cxx:159
 MethodLD.cxx:160
 MethodLD.cxx:161
 MethodLD.cxx:162
 MethodLD.cxx:163
 MethodLD.cxx:164
 MethodLD.cxx:165
 MethodLD.cxx:166
 MethodLD.cxx:167
 MethodLD.cxx:168
 MethodLD.cxx:169
 MethodLD.cxx:170
 MethodLD.cxx:171
 MethodLD.cxx:172
 MethodLD.cxx:173
 MethodLD.cxx:174
 MethodLD.cxx:175
 MethodLD.cxx:176
 MethodLD.cxx:177
 MethodLD.cxx:178
 MethodLD.cxx:179
 MethodLD.cxx:180
 MethodLD.cxx:181
 MethodLD.cxx:182
 MethodLD.cxx:183
 MethodLD.cxx:184
 MethodLD.cxx:185
 MethodLD.cxx:186
 MethodLD.cxx:187
 MethodLD.cxx:188
 MethodLD.cxx:189
 MethodLD.cxx:190
 MethodLD.cxx:191
 MethodLD.cxx:192
 MethodLD.cxx:193
 MethodLD.cxx:194
 MethodLD.cxx:195
 MethodLD.cxx:196
 MethodLD.cxx:197
 MethodLD.cxx:198
 MethodLD.cxx:199
 MethodLD.cxx:200
 MethodLD.cxx:201
 MethodLD.cxx:202
 MethodLD.cxx:203
 MethodLD.cxx:204
 MethodLD.cxx:205
 MethodLD.cxx:206
 MethodLD.cxx:207
 MethodLD.cxx:208
 MethodLD.cxx:209
 MethodLD.cxx:210
 MethodLD.cxx:211
 MethodLD.cxx:212
 MethodLD.cxx:213
 MethodLD.cxx:214
 MethodLD.cxx:215
 MethodLD.cxx:216
 MethodLD.cxx:217
 MethodLD.cxx:218
 MethodLD.cxx:219
 MethodLD.cxx:220
 MethodLD.cxx:221
 MethodLD.cxx:222
 MethodLD.cxx:223
 MethodLD.cxx:224
 MethodLD.cxx:225
 MethodLD.cxx:226
 MethodLD.cxx:227
 MethodLD.cxx:228
 MethodLD.cxx:229
 MethodLD.cxx:230
 MethodLD.cxx:231
 MethodLD.cxx:232
 MethodLD.cxx:233
 MethodLD.cxx:234
 MethodLD.cxx:235
 MethodLD.cxx:236
 MethodLD.cxx:237
 MethodLD.cxx:238
 MethodLD.cxx:239
 MethodLD.cxx:240
 MethodLD.cxx:241
 MethodLD.cxx:242
 MethodLD.cxx:243
 MethodLD.cxx:244
 MethodLD.cxx:245
 MethodLD.cxx:246
 MethodLD.cxx:247
 MethodLD.cxx:248
 MethodLD.cxx:249
 MethodLD.cxx:250
 MethodLD.cxx:251
 MethodLD.cxx:252
 MethodLD.cxx:253
 MethodLD.cxx:254
 MethodLD.cxx:255
 MethodLD.cxx:256
 MethodLD.cxx:257
 MethodLD.cxx:258
 MethodLD.cxx:259
 MethodLD.cxx:260
 MethodLD.cxx:261
 MethodLD.cxx:262
 MethodLD.cxx:263
 MethodLD.cxx:264
 MethodLD.cxx:265
 MethodLD.cxx:266
 MethodLD.cxx:267
 MethodLD.cxx:268
 MethodLD.cxx:269
 MethodLD.cxx:270
 MethodLD.cxx:271
 MethodLD.cxx:272
 MethodLD.cxx:273
 MethodLD.cxx:274
 MethodLD.cxx:275
 MethodLD.cxx:276
 MethodLD.cxx:277
 MethodLD.cxx:278
 MethodLD.cxx:279
 MethodLD.cxx:280
 MethodLD.cxx:281
 MethodLD.cxx:282
 MethodLD.cxx:283
 MethodLD.cxx:284
 MethodLD.cxx:285
 MethodLD.cxx:286
 MethodLD.cxx:287
 MethodLD.cxx:288
 MethodLD.cxx:289
 MethodLD.cxx:290
 MethodLD.cxx:291
 MethodLD.cxx:292
 MethodLD.cxx:293
 MethodLD.cxx:294
 MethodLD.cxx:295
 MethodLD.cxx:296
 MethodLD.cxx:297
 MethodLD.cxx:298
 MethodLD.cxx:299
 MethodLD.cxx:300
 MethodLD.cxx:301
 MethodLD.cxx:302
 MethodLD.cxx:303
 MethodLD.cxx:304
 MethodLD.cxx:305
 MethodLD.cxx:306
 MethodLD.cxx:307
 MethodLD.cxx:308
 MethodLD.cxx:309
 MethodLD.cxx:310
 MethodLD.cxx:311
 MethodLD.cxx:312
 MethodLD.cxx:313
 MethodLD.cxx:314
 MethodLD.cxx:315
 MethodLD.cxx:316
 MethodLD.cxx:317
 MethodLD.cxx:318
 MethodLD.cxx:319
 MethodLD.cxx:320
 MethodLD.cxx:321
 MethodLD.cxx:322
 MethodLD.cxx:323
 MethodLD.cxx:324
 MethodLD.cxx:325
 MethodLD.cxx:326
 MethodLD.cxx:327
 MethodLD.cxx:328
 MethodLD.cxx:329
 MethodLD.cxx:330
 MethodLD.cxx:331
 MethodLD.cxx:332
 MethodLD.cxx:333
 MethodLD.cxx:334
 MethodLD.cxx:335
 MethodLD.cxx:336
 MethodLD.cxx:337
 MethodLD.cxx:338
 MethodLD.cxx:339
 MethodLD.cxx:340
 MethodLD.cxx:341
 MethodLD.cxx:342
 MethodLD.cxx:343
 MethodLD.cxx:344
 MethodLD.cxx:345
 MethodLD.cxx:346
 MethodLD.cxx:347
 MethodLD.cxx:348
 MethodLD.cxx:349
 MethodLD.cxx:350
 MethodLD.cxx:351
 MethodLD.cxx:352
 MethodLD.cxx:353
 MethodLD.cxx:354
 MethodLD.cxx:355
 MethodLD.cxx:356
 MethodLD.cxx:357
 MethodLD.cxx:358
 MethodLD.cxx:359
 MethodLD.cxx:360
 MethodLD.cxx:361
 MethodLD.cxx:362
 MethodLD.cxx:363
 MethodLD.cxx:364
 MethodLD.cxx:365
 MethodLD.cxx:366
 MethodLD.cxx:367
 MethodLD.cxx:368
 MethodLD.cxx:369
 MethodLD.cxx:370
 MethodLD.cxx:371
 MethodLD.cxx:372
 MethodLD.cxx:373
 MethodLD.cxx:374
 MethodLD.cxx:375
 MethodLD.cxx:376
 MethodLD.cxx:377
 MethodLD.cxx:378
 MethodLD.cxx:379
 MethodLD.cxx:380
 MethodLD.cxx:381
 MethodLD.cxx:382
 MethodLD.cxx:383
 MethodLD.cxx:384
 MethodLD.cxx:385
 MethodLD.cxx:386
 MethodLD.cxx:387
 MethodLD.cxx:388
 MethodLD.cxx:389
 MethodLD.cxx:390
 MethodLD.cxx:391
 MethodLD.cxx:392
 MethodLD.cxx:393
 MethodLD.cxx:394
 MethodLD.cxx:395
 MethodLD.cxx:396
 MethodLD.cxx:397
 MethodLD.cxx:398
 MethodLD.cxx:399
 MethodLD.cxx:400
 MethodLD.cxx:401
 MethodLD.cxx:402
 MethodLD.cxx:403
 MethodLD.cxx:404
 MethodLD.cxx:405
 MethodLD.cxx:406
 MethodLD.cxx:407
 MethodLD.cxx:408
 MethodLD.cxx:409
 MethodLD.cxx:410
 MethodLD.cxx:411
 MethodLD.cxx:412
 MethodLD.cxx:413
 MethodLD.cxx:414
 MethodLD.cxx:415
 MethodLD.cxx:416
 MethodLD.cxx:417
 MethodLD.cxx:418
 MethodLD.cxx:419
 MethodLD.cxx:420
 MethodLD.cxx:421
 MethodLD.cxx:422
 MethodLD.cxx:423
 MethodLD.cxx:424
 MethodLD.cxx:425
 MethodLD.cxx:426
 MethodLD.cxx:427
 MethodLD.cxx:428
 MethodLD.cxx:429
 MethodLD.cxx:430
 MethodLD.cxx:431
 MethodLD.cxx:432
 MethodLD.cxx:433
 MethodLD.cxx:434
 MethodLD.cxx:435
 MethodLD.cxx:436
 MethodLD.cxx:437
 MethodLD.cxx:438
 MethodLD.cxx:439
 MethodLD.cxx:440
 MethodLD.cxx:441
 MethodLD.cxx:442
 MethodLD.cxx:443
 MethodLD.cxx:444
 MethodLD.cxx:445
 MethodLD.cxx:446
 MethodLD.cxx:447
 MethodLD.cxx:448
 MethodLD.cxx:449
 MethodLD.cxx:450
 MethodLD.cxx:451
 MethodLD.cxx:452
 MethodLD.cxx:453
 MethodLD.cxx:454
 MethodLD.cxx:455
 MethodLD.cxx:456
 MethodLD.cxx:457
 MethodLD.cxx:458
 MethodLD.cxx:459
 MethodLD.cxx:460
 MethodLD.cxx:461
 MethodLD.cxx:462
 MethodLD.cxx:463
 MethodLD.cxx:464
 MethodLD.cxx:465
 MethodLD.cxx:466
 MethodLD.cxx:467
 MethodLD.cxx:468
 MethodLD.cxx:469
 MethodLD.cxx:470
 MethodLD.cxx:471
 MethodLD.cxx:472
 MethodLD.cxx:473
 MethodLD.cxx:474
 MethodLD.cxx:475
 MethodLD.cxx:476
 MethodLD.cxx:477
 MethodLD.cxx:478
 MethodLD.cxx:479
 MethodLD.cxx:480
 MethodLD.cxx:481
 MethodLD.cxx:482
 MethodLD.cxx:483
 MethodLD.cxx:484
 MethodLD.cxx:485
 MethodLD.cxx:486
 MethodLD.cxx:487
 MethodLD.cxx:488
 MethodLD.cxx:489
 MethodLD.cxx:490
 MethodLD.cxx:491
 MethodLD.cxx:492
 MethodLD.cxx:493
 MethodLD.cxx:494
 MethodLD.cxx:495
 MethodLD.cxx:496
 MethodLD.cxx:497
 MethodLD.cxx:498
 MethodLD.cxx:499
 MethodLD.cxx:500
 MethodLD.cxx:501
 MethodLD.cxx:502
 MethodLD.cxx:503
 MethodLD.cxx:504
 MethodLD.cxx:505
 MethodLD.cxx:506
 MethodLD.cxx:507
 MethodLD.cxx:508
 MethodLD.cxx:509
 MethodLD.cxx:510
 MethodLD.cxx:511
 MethodLD.cxx:512
 MethodLD.cxx:513
 MethodLD.cxx:514
 MethodLD.cxx:515
 MethodLD.cxx:516
 MethodLD.cxx:517
 MethodLD.cxx:518
 MethodLD.cxx:519
 MethodLD.cxx:520
 MethodLD.cxx:521
 MethodLD.cxx:522
 MethodLD.cxx:523
 MethodLD.cxx:524
 MethodLD.cxx:525
 MethodLD.cxx:526
 MethodLD.cxx:527
 MethodLD.cxx:528
 MethodLD.cxx:529
 MethodLD.cxx:530
 MethodLD.cxx:531