ROOT logo
// @(#)root/tmva $Id: MethodBDT.cxx 31458 2009-11-30 13:58:20Z stelzer $
// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss

/**********************************************************************************
 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
 * Package: TMVA                                                                  *
 * Class  : MethodBDT (BDT = Boosted Decision Trees)                              *
 * Web    : http://tmva.sourceforge.net                                           *
 *                                                                                *
 * Description:                                                                   *
 *      Analysis of Boosted Decision Trees                                        *
 *                                                                                *
 * Authors (alphabetical):                                                        *
 *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
 *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
 *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
 *      Doug Schouten   <dschoute@sfu.ca>        - Simon Fraser U., Canada        *
 *      Jan Therhaag    <jan.therhaag@cern.ch>   - U. of Bonn, Germany            *
 *                                                                                *
 * Copyright (c) 2005:                                                            *
 *      CERN, Switzerland                                                         *
 *      U. of Victoria, Canada                                                    *
 *      MPI-K Heidelberg, Germany                                                 *
 *                                                                                *
 * Redistribution and use in source and binary forms, with or without             *
 * modification, are permitted according to the terms listed in LICENSE           *
 * (http://tmva.sourceforge.net/LICENSE)                                          *
 **********************************************************************************/

//_______________________________________________________________________
//
// Analysis of Boosted Decision Trees
//
// Boosted decision trees have been successfully used in High Energy
// Physics analysis for example by the MiniBooNE experiment
// (Yang-Roe-Zhu, physics/0508045). In Boosted Decision Trees, the
// selection is done on a majority vote on the result of several decision
// trees, which are all derived from the same training sample by
// supplying different event weights during the training.
//
// Decision trees:
//
// Successive decision nodes are used to categorize the
// events out of the sample as either signal or background. Each node
// uses only a single discriminating variable to decide if the event is
// signal-like ("goes right") or background-like ("goes left"). This
// forms a tree like structure with "baskets" at the end (leave nodes),
// and an event is classified as either signal or background according to
// whether the basket where it ends up has been classified signal or
// background during the training. Training of a decision tree is the
// process to define the "cut criteria" for each node. The training
// starts with the root node. Here one takes the full training event
// sample and selects the variable and corresponding cut value that gives
// the best separation between signal and background at this stage. Using
// this cut criterion, the sample is then divided into two subsamples, a
// signal-like (right) and a background-like (left) sample. Two new nodes
// are then created for each of the two sub-samples and they are
// constructed using the same mechanism as described for the root
// node. The devision is stopped once a certain node has reached either a
// minimum number of events, or a minimum or maximum signal purity. These
// leave nodes are then called "signal" or "background" if they contain
// more signal respective background events from the training sample.
//
// Boosting:
//
// The idea behind the boosting is, that signal events from the training
// sample, that end up in a background node (and vice versa) are given a
// larger weight than events that are in the correct leave node. This
// results in a re-weighed training event sample, with which then a new
// decision tree can be developed. The boosting can be applied several
// times (typically 100-500 times) and one ends up with a set of decision
// trees (a forest).
//
// Bagging:
//
// In this particular variant of the Boosted Decision Trees the boosting
// is not done on the basis of previous training results, but by a simple
// stochastic re-sampling of the initial training event sample.
//
// Random Trees:
// Similar to the "Random Forests" from Leo Breiman and Adele Cutler, it
// uses the bagging algorithm together and bases the determination of the
// best node-split during the training on a random subset of variables only
// which is individually chosen for each split.
//
// Analysis:
//
// Applying an individual decision tree to a test event results in a
// classification of the event as either signal or background. For the
// boosted decision tree selection, an event is successively subjected to
// the whole set of decision trees and depending on how often it is
// classified as signal, a "likelihood" estimator is constructed for the
// event being signal or background. The value of this estimator is the
// one which is then used to select the events from an event sample, and
// the cut value on this estimator defines the efficiency and purity of
// the selection.
//
//_______________________________________________________________________

#include <algorithm>

#include <math.h>
#include <fstream>

#include "Riostream.h"
#include "TRandom3.h"
#include "TRandom3.h"
#include "TMath.h"
#include "TObjString.h"

#include "TMVA/ClassifierFactory.h"
#include "TMVA/MethodBDT.h"
#include "TMVA/Tools.h"
#include "TMVA/Timer.h"
#include "TMVA/Ranking.h"
#include "TMVA/SdivSqrtSplusB.h"
#include "TMVA/BinarySearchTree.h"
#include "TMVA/SeparationBase.h"
#include "TMVA/GiniIndex.h"
#include "TMVA/GiniIndexWithLaplace.h"
#include "TMVA/CrossEntropy.h"
#include "TMVA/MisClassificationError.h"
#include "TMVA/Results.h"

using std::vector;

REGISTER_METHOD(BDT)

ClassImp(TMVA::MethodBDT)

const Int_t TMVA::MethodBDT::fgDebugLevel = 0;


//_______________________________________________________________________
TMVA::MethodBDT::MethodBDT( const TString& jobName,
                            const TString& methodTitle,
                            DataSetInfo& theData,
                            const TString& theOption,
                            TDirectory* theTargetDir ) :
   TMVA::MethodBase( jobName, Types::kBDT, methodTitle, theData, theOption, theTargetDir )
{
   // the standard constructor for the "boosted decision trees"
}

//_______________________________________________________________________
TMVA::MethodBDT::MethodBDT( DataSetInfo& theData,
                            const TString& theWeightFile,
                            TDirectory* theTargetDir )
   : TMVA::MethodBase( Types::kBDT, theData, theWeightFile, theTargetDir )
{
   // constructor for calculating BDT-MVA using previously generated decision trees
   // the result of the previous training (the decision trees) are read in via the
   // weight file. Make sure the the variables correspond to the ones used in
   // creating the "weight"-file
}

//_______________________________________________________________________
Bool_t TMVA::MethodBDT::HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets )
{
   // BDT can handle classification with 2 classes and regression with one regression-target
   if( type == Types::kClassification && numberClasses == 2 ) return kTRUE;
   if( type == Types::kRegression && numberTargets == 1 ) return kTRUE;
   return kFALSE;
}


//_______________________________________________________________________
void TMVA::MethodBDT::DeclareOptions()
{
   // define the options (their key words) that can be set in the option string
   // know options:
   // nTrees        number of trees in the forest to be created
   // BoostType     the boosting type for the trees in the forest (AdaBoost e.t.c..)
   //                  known: AdaBoost
   //                         AdaBoostR2 (Adaboost for regression)
   //                         Bagging
   //                         GradBoost
   // AdaBoostBeta     the boosting parameter, beta, for AdaBoost
   // UseRandomisedTrees  choose at each node splitting a random set of variables
   // UseNvars         use UseNvars variables in randomised trees
   // UseNTrainEvents  number of training events used in randomised (and bagged) trees
   // SeparationType   the separation criterion applied in the node splitting
   //                  known: GiniIndex
   //                         MisClassificationError
   //                         CrossEntropy
   //                         SDivSqrtSPlusB
   // nEventsMin:      the minimum number of events in a node (leaf criteria, stop splitting)
   // nCuts:           the number of steps in the optimisation of the cut for a node (if < 0, then
   //                  step size is determined by the events)
   // UseYesNoLeaf     decide if the classification is done simply by the node type, or the S/B
   //                  (from the training) in the leaf node
   // NodePurityLimit  the minimum purity to classify a node as a signal node (used in pruning and boosting to determine
   //                  misclassification error rate)
   // UseWeightedTrees use average classification from the trees, or have the individual trees
   //                  trees in the forest weighted (e.g. log(boostweight) from AdaBoost
   // PruneMethod      The Pruning method:
   //                  known: NoPruning  // switch off pruning completely
   //                         ExpectedError
   //                         CostComplexity
   // PruneStrength    a parameter to adjust the amount of pruning. Should be large enough such that overtraining is avoided.
   // PruneBeforeBoost flag to prune the tree before applying boosting algorithm
   // PruningValFraction   number of events to use for optimizing pruning (only if PruneStrength < 0, i.e. automatic pruning)
   // IgnoreNegWeightsInTraining  Ignore negative weight events in the training.
   // NNodesMax        maximum number of nodes allwed in the tree splitting, then it stops.
   // MaxDepth         maximum depth of the decision tree allowed before further splitting is stopped

   DeclareOptionRef(fNTrees, "NTrees", "Number of trees in the forest");
   DeclareOptionRef(fBoostType, "BoostType", "Boosting type for the trees in the forest");
   AddPreDefVal(TString("AdaBoost"));
   AddPreDefVal(TString("Bagging"));
   AddPreDefVal(TString("RegBoost"));
   AddPreDefVal(TString("AdaBoostR2"));
   AddPreDefVal(TString("Grad"));
   if (DoRegression()) {
      fBoostType = "AdaBoostR2";
   }else{
      fBoostType = "AdaBoost";
   }
   DeclareOptionRef(fAdaBoostR2Loss="Quadratic", "AdaBoostR2Loss", "Type of Loss function in AdaBoostR2t (Linear,Quadratic or Exponential)");
   AddPreDefVal(TString("Linear"));
   AddPreDefVal(TString("Quadratic"));
   AddPreDefVal(TString("Exponential"));

   DeclareOptionRef(fBaggedGradBoost=kFALSE, "UseBaggedGrad","Use only a random subsample of all events for growing the trees in each iteration. (Only valid for GradBoost)");
   DeclareOptionRef(fSampleFraction=0.6, "GradBaggingFraction","Defines the fraction of events to be used in each iteration when UseBaggedGrad=kTRUE. (Only valid for GradBoost)");
   DeclareOptionRef(fShrinkage=1.0, "Shrinkage", "Learning rate for GradBoost algorithm");
   DeclareOptionRef(fAdaBoostBeta=1.0, "AdaBoostBeta", "Parameter for AdaBoost algorithm");
   DeclareOptionRef(fRandomisedTrees,"UseRandomisedTrees","Choose at each node splitting a random set of variables");
   DeclareOptionRef(fUseNvars,"UseNvars","Number of variables used if randomised tree option is chosen");
   DeclareOptionRef(fUseNTrainEvents,"UseNTrainEvents","number of randomly picked training events used in randomised (and bagged) trees");

   DeclareOptionRef(fUseWeightedTrees=kTRUE, "UseWeightedTrees",
                    "Use weighted trees or simple average in classification from the forest");
   DeclareOptionRef(fUseYesNoLeaf=kTRUE, "UseYesNoLeaf",
                    "Use Sig or Bkg categories, or the purity=S/(S+B) as classification of the leaf node");
   if (DoRegression()) {
      fUseYesNoLeaf = kFALSE;
   }


   DeclareOptionRef(fNodePurityLimit=0.5, "NodePurityLimit", "In boosting/pruning, nodes with purity > NodePurityLimit are signal; background otherwise.");
   DeclareOptionRef(fSepTypeS, "SeparationType", "Separation criterion for node splitting");
   AddPreDefVal(TString("CrossEntropy"));
   AddPreDefVal(TString("GiniIndex"));
   AddPreDefVal(TString("GiniIndexWithLaplace"));
   AddPreDefVal(TString("MisClassificationError"));
   AddPreDefVal(TString("SDivSqrtSPlusB"));
   AddPreDefVal(TString("RegressionVariance"));
   if (DoRegression()) {
      fSepTypeS = "RegressionVariance";
   }else{
      fSepTypeS = "GiniIndex";
   }
   DeclareOptionRef(fNodeMinEvents, "nEventsMin", "Minimum number of events required in a leaf node (default: max(20, N_train/(Nvar^2)/10) ) ");
   DeclareOptionRef(fNCuts, "nCuts", "Number of steps during node cut optimisation");
   DeclareOptionRef(fPruneStrength, "PruneStrength", "Pruning strength");
   DeclareOptionRef(fPruneMethodS, "PruneMethod", "Method used for pruning (removal) of statistically insignificant branches");
   AddPreDefVal(TString("NoPruning"));
   AddPreDefVal(TString("ExpectedError"));
   AddPreDefVal(TString("CostComplexity"));
   DeclareOptionRef(fPruneBeforeBoost=kFALSE, "PruneBeforeBoost", "Flag to prune the tree before applying boosting algorithm");
   DeclareOptionRef(fFValidationEvents=0.5, "PruningValFraction", "Fraction of events to use for optimizing automatic pruning.");
   DeclareOptionRef(fNNodesMax=100000,"NNodesMax","Max number of nodes in tree");
   if (DoRegression()) {
      DeclareOptionRef(fMaxDepth=50,"MaxDepth","Max depth of the decision tree allowed");
   }else{
      DeclareOptionRef(fMaxDepth=3,"MaxDepth","Max depth of the decision tree allowed");
   }
}

void TMVA::MethodBDT::DeclareCompatibilityOptions() {
   MethodBase::DeclareCompatibilityOptions();
   DeclareOptionRef(fSampleSizeFraction=1.0,"SampleSizeFraction","Relative size of bagged event sample to original size of the data sample" );
   DeclareOptionRef(fNoNegWeightsInTraining,"NoNegWeightsInTraining","Ignore negative event weights in the training process" );
}




//_______________________________________________________________________
void TMVA::MethodBDT::ProcessOptions()
{
   // the option string is decoded, for available options see "DeclareOptions"

   fSepTypeS.ToLower();
   if      (fSepTypeS == "misclassificationerror") fSepType = new MisClassificationError();
   else if (fSepTypeS == "giniindex")              fSepType = new GiniIndex();
   else if (fSepTypeS == "giniindexwithlaplace")   fSepType = new GiniIndexWithLaplace();
   else if (fSepTypeS == "crossentropy")           fSepType = new CrossEntropy();
   else if (fSepTypeS == "sdivsqrtsplusb")         fSepType = new SdivSqrtSplusB();
   else if (fSepTypeS == "regressionvariance")     fSepType = NULL;
   else {
      Log() << kINFO << GetOptions() << Endl;
      Log() << kFATAL << "<ProcessOptions> unknown Separation Index option called" << Endl;
   }

   fPruneMethodS.ToLower();
   if      (fPruneMethodS == "expectederror")  fPruneMethod = DecisionTree::kExpectedErrorPruning;
   else if (fPruneMethodS == "costcomplexity") fPruneMethod = DecisionTree::kCostComplexityPruning;
   else if (fPruneMethodS == "nopruning")      fPruneMethod = DecisionTree::kNoPruning;
   else {
      Log() << kINFO << GetOptions() << Endl;
      Log() << kFATAL << "<ProcessOptions> unknown PruneMethod option called" << Endl;
   }
   if (fPruneStrength < 0 && (fPruneMethod != DecisionTree::kNoPruning) && fBoostType!="Grad") fAutomatic = kTRUE;
   else fAutomatic = kFALSE;
   if (fAutomatic && fPruneMethod==DecisionTree::kExpectedErrorPruning){
      Log() << kFATAL 
            <<  "Sorry autmoatic pruning strength determination is not implemented yet for ExpectedErrorPruning" << Endl;
   }
   fAdaBoostR2Loss.ToLower();
   
   if (fBoostType!="Grad") fBaggedGradBoost=kFALSE;
   else fPruneMethod = DecisionTree::kNoPruning;
   if (fFValidationEvents < 0.0) fFValidationEvents = 0.0;
   if (fAutomatic && fFValidationEvents > 0.5) {
      Log() << kWARNING << "You have chosen to use more than half of your training sample "
            << "to optimize the automatic pruning algorithm. This is probably wasteful "
            << "and your overall results will be degraded. Are you sure you want this?"
            << Endl;
   }


   if (this->Data()->HasNegativeEventWeights()){
      Log() << kINFO << " You are using a Monte Carlo that has also negative weights. "
            << "That should in principle be fine as long as on average you end up with "
            << "something positive. For this you have to make sure that the minimal number "
            << "of (un-weighted) events demanded for a tree node (currently you use: nEventsMin="
            <<fNodeMinEvents<<", you can set this via the BDT option string when booking the "
            << "classifier) is large enough to allow for reasonable averaging!!! "
            << " If this does not help.. maybe you want to try the option: IgnoreNegWeightsInTraining  "
            << "which ignores events with negative weight in the training. " << Endl
            << Endl << "Note: You'll get a WARNING message during the training if that should ever happen" << Endl;
   }

   if (DoRegression()) {
      if (fUseYesNoLeaf && !IsConstructedFromWeightFile()){
         Log() << kWARNING << "Regression Trees do not work with fUseYesNoLeaf=TRUE --> I will set it to FALSE" << Endl;
         fUseYesNoLeaf = kFALSE;
      }

      if (fSepType != NULL){
         Log() << kWARNING << "Regression Trees do not work with Separation type other than <RegressionVariance> --> I will use it instead" << Endl;
         fSepType = NULL;
      }
   }
   if (fRandomisedTrees){
      Log() << kINFO << " Randomised trees use *bagging* as *boost* method and no pruning" << Endl;
      fPruneMethod = DecisionTree::kNoPruning;
      fBoostType   = "Bagging";
   }

   //    if (2*fNodeMinEvents >  Data()->GetNTrainingEvents()) {
   //       Log() << kFATAL << "you've demanded a minimun number of events in a leaf node " 
   //             << " that is larger than 1/2 the total number of events in the training sample."
   //             << " Hence I cannot make any split at all... this will not work!" << Endl;
   //    }


}
//_______________________________________________________________________
void TMVA::MethodBDT::Init( void )
{
   // common initialisation with defaults for the BDT-Method
   fNTrees         = 400;
   if (fAnalysisType == Types::kClassification || fAnalysisType == Types::kMulticlass ) {
      fMaxDepth        = 3;
      fBoostType      = "AdaBoost";
   }else {
      fMaxDepth = 100;
      fBoostType      = "AdaBoostR2";
      fAdaBoostR2Loss = "Quadratic";
   }

   fNodeMinEvents  = TMath::Max( Int_t(40), Int_t( Data()->GetNTrainingEvents() / (10*GetNvar()*GetNvar())) );
   fNCuts          = 20;
   fPruneMethodS   = "CostComplexity";
   fPruneMethod    = DecisionTree::kCostComplexityPruning;
   fPruneStrength  = -1.0;
   fFValidationEvents = 0.5;
   fRandomisedTrees = kFALSE;
   fUseNvars        =  (GetNvar()>12) ? UInt_t(GetNvar()/8) : TMath::Max(UInt_t(2),UInt_t(GetNvar()/3));
   fUseNTrainEvents = Data()->GetNTrainingEvents();
   fNNodesMax       = 1000000;
   fShrinkage       = 1.0;

   // reference cut value to distinguish signal-like from background-like events
   SetSignalReferenceCut( 0 );

}

//_______________________________________________________________________
TMVA::MethodBDT::~MethodBDT( void )
{
   //destructor
   for (UInt_t i=0; i<fEventSample.size();      i++) delete fEventSample[i];
   for (UInt_t i=0; i<fValidationSample.size(); i++) delete fValidationSample[i];
   for (UInt_t i=0; i<fForest.size();           i++) delete fForest[i];
}

//_______________________________________________________________________
void TMVA::MethodBDT::InitEventSample( void )
{
   // Write all Events from the Tree into a vector of Events, that are
   // more easily manipulated. This method should never be called without
   // existing trainingTree, as it the vector of events from the ROOT training tree
   if (!HasTrainingTree()) Log() << kFATAL << "<Init> Data().TrainingTree() is zero pointer" << Endl;

   UInt_t nevents = Data()->GetNTrainingEvents();
   Bool_t first=kTRUE;

   for (UInt_t ievt=0; ievt<nevents; ievt++) {

      Event* event = new Event( *GetTrainingEvent(ievt) );

      if (!IgnoreEventsWithNegWeightsInTraining() || event->GetWeight() > 0) {
         if (first && event->GetWeight() < 0) {
            first = kFALSE;
            Log() << kINFO << "Events with negative event weights are ignored during "
                  << "the BDT training (option IgnoreNegWeightsInTraining is now enabled)" 
                  << Endl;
            continue;
         }
         // if fAutomatic == true you need a validation sample to optimize pruning
         if (fAutomatic) {
            Double_t modulo = 1.0/(fFValidationEvents);
            Int_t   imodulo = static_cast<Int_t>( fmod(modulo,1.0) > 0.5 ? ceil(modulo) : floor(modulo) );
            if (ievt % imodulo == 0) fValidationSample.push_back( event );
            else                     fEventSample.push_back( event );
         }
         else {
            fEventSample.push_back(event);
         }
      }
   }
   if (fAutomatic) {
      Log() << kINFO << "<InitEventSample> Internally I use " << fEventSample.size()
            << " for Training  and " << fValidationSample.size()
            << " for Pruning Validation (" << ((Float_t)fValidationSample.size())/((Float_t)fEventSample.size()+fValidationSample.size())*100.0
            << "% of training used for validation)" << Endl;
   }
}

//_______________________________________________________________________
void TMVA::MethodBDT::Train()
{
   // BDT training

   // fill the STL Vector with the event sample
   InitEventSample();

   if (IsNormalised()) Log() << kFATAL << "\"Normalise\" option cannot be used with BDT; "
                             << "please remove the option from the configuration string, or "
                             << "use \"!Normalise\""
                             << Endl;

   Log() << kINFO << "Training "<< fNTrees << " Decision Trees ... patience please" << Endl;

   Results* results = Data()->GetResults(GetMethodName(), Types::kTraining, GetAnalysisType());

   // book monitoring histograms (currently for AdaBost, only)

   
   // weights applied in boosting
   Int_t nBins;
   Double_t xMin,xMax;
   TString hname = "AdaBooost weight distribution";

   nBins= 100;
   xMin = 0;
   xMax = 30;

   if (DoRegression()) {
      nBins= 100;
      xMin = 0;
      xMax = 1;
      hname="Boost event weights distribution";
   }
      
   TH1* h = new TH1F("BoostWeight",hname,nBins,xMin,xMax);
   h->SetXTitle("boost weight");
   results->Store(h, "BoostWeights");

   // weights applied in boosting vs tree number
   h = new TH1F("BoostWeightVsTree","Boost weights vs tree",fNTrees,0,fNTrees);
   h->SetXTitle("#tree");
   h->SetYTitle("boost weight");
   results->Store(h, "BoostWeightsVsTree");

   // error fraction vs tree number
   h = new TH1F("ErrFractHist","error fraction vs tree number",fNTrees,0,fNTrees);
   h->SetXTitle("#tree");
   h->SetYTitle("error fraction");
   results->Store(h, "ErrorFrac");

   // nNodesBeforePruning vs tree number
   TH1* nodesBeforePruningVsTree = new TH1I("NodesBeforePruning","nodes before pruning",fNTrees,0,fNTrees);
   nodesBeforePruningVsTree->SetXTitle("#tree");
   nodesBeforePruningVsTree->SetYTitle("#tree nodes");
   results->Store(nodesBeforePruningVsTree);

   // nNodesAfterPruning vs tree number
   TH1* nodesAfterPruningVsTree = new TH1I("NodesAfterPruning","nodes after pruning",fNTrees,0,fNTrees);
   nodesAfterPruningVsTree->SetXTitle("#tree");
   nodesAfterPruningVsTree->SetYTitle("#tree nodes");
   results->Store(nodesAfterPruningVsTree);

   fMonitorNtuple= new TTree("MonitorNtuple","BDT variables");
   fMonitorNtuple->Branch("iTree",&fITree,"iTree/I");
   fMonitorNtuple->Branch("boostWeight",&fBoostWeight,"boostWeight/D");
   fMonitorNtuple->Branch("errorFraction",&fErrorFraction,"errorFraction/D");

   Timer timer( fNTrees, GetName() );
   Int_t nNodesBeforePruningCount = 0;
   Int_t nNodesAfterPruningCount = 0;

   Int_t nNodesBeforePruning = 0;
   Int_t nNodesAfterPruning = 0;

   TH1D *alpha = new TH1D("alpha","PruneStrengths",fNTrees,0,fNTrees);
   alpha->SetXTitle("#tree");
   alpha->SetYTitle("PruneStrength");

   if(fBoostType=="Grad"){
      InitGradBoost(fEventSample);
   }

   for (int itree=0; itree<fNTrees; itree++) {
      timer.DrawProgressBar( itree );

      fForest.push_back( new DecisionTree( fSepType, fNodeMinEvents, fNCuts,
                                           fRandomisedTrees, fUseNvars, fNNodesMax, fMaxDepth,
                                           itree, fNodePurityLimit, itree));
      if (fBaggedGradBoost) nNodesBeforePruning = fForest.back()->BuildTree(fSubSample);
      else                  nNodesBeforePruning = fForest.back()->BuildTree(fEventSample);

      if (fBoostType!="Grad")
         if (fUseYesNoLeaf && !DoRegression() ){ // remove leaf nodes where both daughter nodes are of same type
            nNodesBeforePruning = fForest.back()->CleanTree();
         }
      nNodesBeforePruningCount += nNodesBeforePruning;
      nodesBeforePruningVsTree->SetBinContent(itree+1,nNodesBeforePruning);

      fForest.back()->SetPruneMethod(fPruneMethod); // set the pruning method for the tree
      fForest.back()->SetPruneStrength(fPruneStrength); // set the strength parameter

      std::vector<Event*> * validationSample = NULL;
      if(fAutomatic) validationSample = &fValidationSample;

      if(fBoostType=="Grad"){
         this->Boost(fEventSample, fForest.back(), itree);
      }
      else {
         if(!fPruneBeforeBoost) { // only prune after boosting
            fBoostWeights.push_back( this->Boost(fEventSample, fForest.back(), itree) );
            // if fAutomatic == true, pruneStrength will be the optimal pruning strength
            // determined by the pruning algorithm; otherwise, it is simply the strength parameter
            // set by the user
            Double_t pruneStrength = fForest.back()->PruneTree(validationSample);
            alpha->SetBinContent(itree+1,pruneStrength);
         }
         else { // prune first, then apply a boosting cycle
            Double_t pruneStrength = fForest.back()->PruneTree(validationSample);
            alpha->SetBinContent(itree+1,pruneStrength);
            fBoostWeights.push_back( this->Boost(fEventSample, fForest.back(), itree) );
         }
         
         if (fUseYesNoLeaf && !DoRegression() ){ // remove leaf nodes where both daughter nodes are of same type
            fForest.back()->CleanTree();
         }
      }
      nNodesAfterPruning = fForest.back()->GetNNodes();
      nNodesAfterPruningCount += nNodesAfterPruning;
      nodesAfterPruningVsTree->SetBinContent(itree+1,nNodesAfterPruning);

      fITree = itree;
      fMonitorNtuple->Fill();
   }

   alpha->Write();

   // get elapsed time
   Log() << kINFO << "<Train> elapsed time: " << timer.GetElapsedTime()
         << "                              " << Endl;
   if (fPruneMethod == DecisionTree::kNoPruning) {
      Log() << kINFO << "<Train> average number of nodes (w/o pruning) : "
            << nNodesBeforePruningCount/fNTrees << Endl;
   }
   else {
      Log() << kINFO << "<Train> average number of nodes before/after pruning : "
            << nNodesBeforePruningCount/fNTrees << " / "
            << nNodesAfterPruningCount/fNTrees
            << Endl;
   }
}

//_______________________________________________________________________
void TMVA::MethodBDT::GetRandomSubSample()
{
   // fills fEventSample with fSampleFraction*NEvents random training events
   UInt_t nevents = fEventSample.size();
   UInt_t nfraction = static_cast<UInt_t>(fSampleFraction*Data()->GetNTrainingEvents());

   //for (UInt_t i=0; i<fSubSample.size();i++)
   if (fSubSample.size()!=0) fSubSample.clear();
   TRandom3 *trandom   = new TRandom3(fForest.size());

   for (UInt_t ievt=0; ievt<nfraction; ievt++) { // recreate new random subsample
      fSubSample.push_back(fEventSample[(static_cast<UInt_t>(trandom->Uniform(nevents)-1))]);
   }
}

//_______________________________________________________________________
Double_t TMVA::MethodBDT::GetGradBoostMVA(TMVA::Event& e, UInt_t nTrees)
{
   //returns MVA value: -1 for background, 1 for signal
   Double_t sum=0;
   for (UInt_t itree=0; itree<nTrees; itree++) {
      //loop over all trees in forest
      sum += fForest[itree]->CheckEvent(e,kFALSE);
 
   }
   return 2.0/(1.0+exp(-2.0*sum))-1; //MVA output between -1 and 1
}


//_______________________________________________________________________
void TMVA::MethodBDT::UpdateTargets(vector<TMVA::Event*> eventSample)
{
   //Calculate residua for all events;
   UInt_t iValue=0;
   for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
      fBoostWeights[iValue]+=fForest.back()->CheckEvent(*(*e),kFALSE);
      Double_t p_sig=1.0/(1.0+exp(-2.0*fBoostWeights[iValue]));
      Double_t res = ((*e)->IsSignal()?1:0)-p_sig;
      (*e)->SetTarget(0,res);
      iValue++;
   }   
}

//_______________________________________________________________________
void TMVA::MethodBDT::UpdateTargetsRegression(vector<TMVA::Event*> eventSample, Bool_t first)
{
   //Calculate current residuals for all events and update targets for next iteration
   vector<Double_t> absResiduals;
   vector< vector<Double_t> > temp;
   UInt_t i=0;
   for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
      if(first){
         fRegResiduals.push_back((*e)->GetTarget(0)-fBoostWeights[i]);
      }
      else{
         fRegResiduals[i]-=fForest.back()->CheckEvent(*(*e),kFALSE);
      }
      absResiduals.push_back(fabs(fRegResiduals[i]));
      i++;
   }
   temp.push_back(absResiduals);
   temp.push_back(fInitialWeights);
   fTransitionPoint = GetWeightedQuantile(temp,0.9);
   i=0;
   for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
      if(absResiduals[i]<=fTransitionPoint)
         (*e)->SetTarget(0,fRegResiduals[i]);
      else
         (*e)->SetTarget(0,fTransitionPoint*(fRegResiduals[i]<0?-1.0:1.0));
      i++;
   }
}

//_______________________________________________________________________
Double_t TMVA::MethodBDT::GetWeightedQuantile(vector<  vector<Double_t> > &vec, const Double_t quantile, const Double_t SumOfWeights){
   //calculates the quantile of the distribution in vec[0] weighted with the values in vec[1]
   gTools().UsefulSortAscending( vec );
   Double_t norm = fSumOfWeights;
   if(SumOfWeights!=0.0) norm = SumOfWeights;
   Double_t temp = 0.0;
   
   Int_t i = 0;
   while(temp <= norm*quantile){
      temp += vec[1][i];
      i++;
   }
      
   return vec[0][i];
}

//_______________________________________________________________________
Double_t TMVA::MethodBDT::GradBoost( vector<TMVA::Event*> eventSample, DecisionTree *dt )
{
   //Calculate the desired response value for each region (line search)
   std::map<TMVA::DecisionTreeNode*,vector<Double_t> > leaves;
   for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
      TMVA::DecisionTreeNode* node = dt->GetEventNode(*(*e));
      if ((leaves[node]).size()==0){
         (leaves[node]).push_back((*e)->GetTarget(0) * (*e)->GetWeight());
         (leaves[node]).push_back(fabs((*e)->GetTarget(0))*(1.0-fabs((*e)->GetTarget(0))) * (*e)->GetWeight() * (*e)->GetWeight());
      }
      else {
         (leaves[node])[0]+=((*e)->GetTarget(0) * (*e)->GetWeight());
         (leaves[node])[1]+=fabs((*e)->GetTarget(0))*(1.0-fabs((*e)->GetTarget(0))) *
            ((*e)->GetWeight()) * ((*e)->GetWeight());
      }
   }
   for (std::map<TMVA::DecisionTreeNode*,vector<Double_t> >::iterator iLeave=leaves.begin();
        iLeave!=leaves.end();++iLeave){
      if ((iLeave->second)[1]<1e-30) (iLeave->second)[1]=1e-30;

      (iLeave->first)->SetResponse(fShrinkage*0.5*(iLeave->second)[0]/((iLeave->second)[1]));
   }
   //call UpdateTargets before next tree is grown
   UpdateTargets(eventSample);
   if (fBaggedGradBoost) GetRandomSubSample();
   return 1; //trees all have the same weight
}

//_______________________________________________________________________
Double_t TMVA::MethodBDT::GradBoostRegression( vector<TMVA::Event*> eventSample, DecisionTree *dt )
{
   // Implementation of M_TreeBoost using a Huber loss function as desribed by Friedman 1999
   std::map<TMVA::DecisionTreeNode*,vector< vector<Double_t> > > leaves;
   UInt_t i =0;
   for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
      TMVA::DecisionTreeNode* node = dt->GetEventNode(*(*e));
      if(leaves[node].size()==0){
         (leaves[node]).push_back(vector<Double_t>());
         (leaves[node]).push_back(vector<Double_t>());
      }
      (leaves[node])[0].push_back(fRegResiduals[i]);
      (leaves[node])[1].push_back((*e)->GetWeight());
      i++;
   }

   for (std::map<TMVA::DecisionTreeNode*,vector<vector<Double_t> > >::iterator iLeave=leaves.begin();
        iLeave!=leaves.end();++iLeave){
      Double_t LeaveWeight = 0;
      for(UInt_t j=0;j<((iLeave->second)[0].size());j++){
         LeaveWeight+=((iLeave->second)[1][j]);
      }
      Double_t shift=0,diff= 0;
      Double_t ResidualMedian = GetWeightedQuantile(iLeave->second,0.5,LeaveWeight);
      for(UInt_t j=0;j<((iLeave->second)[0].size());j++){
         diff = (iLeave->second)[0][j]-ResidualMedian;
         shift+=1.0/((iLeave->second)[0].size())*((diff<0)?-1.0:1.0)*TMath::Min(fTransitionPoint,fabs(diff));
      }
      (iLeave->first)->SetResponse(fShrinkage*(ResidualMedian+shift));
   }
   UpdateTargetsRegression(eventSample);
   return 1;
}

//_______________________________________________________________________
void TMVA::MethodBDT::InitGradBoost( vector<TMVA::Event*> eventSample)
{
   // initialize targets for first tree
   fSepType=NULL; //set fSepType to NULL (regression trees are used for both classification an regression)
   if(DoRegression()){

      vector< vector<Double_t> > weightedTargetValues;
      vector<Double_t> targets;
       for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
         targets.push_back((*e)->GetTarget(0));
         fInitialWeights.push_back((*e)->GetWeight());
         fSumOfWeights+=(*e)->GetWeight();
      }
      weightedTargetValues.push_back(targets);
      weightedTargetValues.push_back(fInitialWeights);
      Double_t weightedMedian = GetWeightedQuantile(weightedTargetValues,0.5);
 
      for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
         fBoostWeights.push_back(weightedMedian);  
      }
      UpdateTargetsRegression(eventSample,kTRUE);
   }
   else{
      for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
         Double_t r = ((*e)->IsSignal()?1:0)-0.5; //Calculate initial residua
         (*e)->SetTarget(0,r);
         fBoostWeights.push_back(0);
      }
   }
   if (fBaggedGradBoost) GetRandomSubSample(); 
}
//_______________________________________________________________________
Double_t TMVA::MethodBDT::TestTreeQuality( DecisionTree *dt )
{
   // test the tree quality.. in terms of Miscalssification

   Double_t ncorrect=0, nfalse=0;
   for (UInt_t ievt=0; ievt<fValidationSample.size(); ievt++) {
      Bool_t isSignalType= (dt->CheckEvent(*(fValidationSample[ievt])) > fNodePurityLimit ) ? 1 : 0;

      if (isSignalType == ((fValidationSample[ievt])->IsSignal()) ) {
         ncorrect += fValidationSample[ievt]->GetWeight();
      }
      else{
         nfalse += fValidationSample[ievt]->GetWeight();
      }
   }

   return  ncorrect / (ncorrect + nfalse);
}

//_______________________________________________________________________
Double_t TMVA::MethodBDT::Boost( vector<TMVA::Event*> eventSample, DecisionTree *dt, Int_t iTree )
{
   // apply the boosting alogrithim (the algorithm is selecte via the the "option" given
   // in the constructor. The return value is the boosting weight

   if      (fBoostType=="AdaBoost")    return this->AdaBoost  (eventSample, dt);
   else if (fBoostType=="Bagging")     return this->Bagging   (eventSample, iTree);
   else if (fBoostType=="RegBoost")    return this->RegBoost  (eventSample, dt);
   else if (fBoostType=="AdaBoostR2")  return this->AdaBoostR2(eventSample, dt);
   else if (fBoostType=="Grad"){
      if(DoRegression())
         return this->GradBoostRegression(eventSample, dt);
      else
         return this->GradBoost (eventSample, dt);
   }
   else {
      Log() << kINFO << GetOptions() << Endl;
      Log() << kFATAL << "<Boost> unknown boost option called" << Endl;
   }

   return -1;
}

//_______________________________________________________________________
Double_t TMVA::MethodBDT::AdaBoost( vector<TMVA::Event*> eventSample, DecisionTree *dt )
{
   // the AdaBoost implementation.
   // a new training sample is generated by weighting
   // events that are misclassified by the decision tree. The weight
   // applied is w = (1-err)/err or more general:
   //            w = ((1-err)/err)^beta
   // where err is the fraction of misclassified events in the tree ( <0.5 assuming
   // demanding the that previous selection was better than random guessing)
   // and "beta" beeing a free parameter (standard: beta = 1) that modifies the
   // boosting.

   Double_t err=0, sumw=0, sumwfalse=0, sumwfalse2=0;
   Double_t maxDev=0;
   for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
      Double_t w = (*e)->GetWeight();
      sumw += w;
      if ( DoRegression() ) {
         Double_t tmpDev = TMath::Abs(dt->CheckEvent(*(*e),kFALSE) - (*e)->GetTarget(0) ); 
         sumwfalse += w * tmpDev;
         sumwfalse2 += w * tmpDev*tmpDev;
         if (tmpDev > maxDev) maxDev = tmpDev;
      }else{
         Bool_t isSignalType = (dt->CheckEvent(*(*e),fUseYesNoLeaf) > fNodePurityLimit );
         //       if (!(isSignalType == DataInfo().IsSignal((*e)))) {
         if (!(isSignalType == (*e)->IsSignal())) {
            sumwfalse+= w;
         }
      }
   }
   err = sumwfalse/sumw ;
   if ( DoRegression() ) {
      //if quadratic loss:
      if (fAdaBoostR2Loss=="linear"){
         err = sumwfalse/maxDev/sumw ;
      }
      else if (fAdaBoostR2Loss=="quadratic"){
         err = sumwfalse2/maxDev/maxDev/sumw ;
      }
      else if (fAdaBoostR2Loss=="exponential"){
         err = 0;
         for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
            Double_t w = (*e)->GetWeight();
            Double_t  tmpDev = TMath::Abs(dt->CheckEvent(*(*e),kFALSE) - (*e)->GetTarget(0) ); 
            err += w * (1 - exp (-tmpDev/maxDev)) / sumw;
         }
         
      }
      else {
         Log() << kFATAL << " you've chosen a Loss type for Adaboost other than linear, quadratic or exponential " 
               << " namely " << fAdaBoostR2Loss << "\n" 
               << "and this is not implemented... a typo in the options ??" <<Endl;
      }
   }
   Double_t newSumw=0;

   Double_t boostWeight=1.;
   if (err >= 0.5) { // sanity check ... should never happen as otherwise there is apparently
      // something odd with the assignement of the leaf nodes (rem: you use the training
      // events for this determination of the error rate)
      Log() << kWARNING << " The error rate in the BDT boosting is > 0.5. ("<< err
            << ") That should not happen, please check your code (i.e... the BDT code), I "
            << " set it to 0.5.. just to continue.." <<  Endl;
      err = 0.5;
   } else if (err < 0) {
      Log() << kWARNING << " The error rate in the BDT boosting is < 0. That can happen"
            << " due to improper treatment of negative weights in a Monte Carlo.. (if you have"
            << " an idea on how to do it in a better way, please let me know (Helge.Voss@cern.ch)"
            << " for the time being I set it to its absolute value.. just to continue.." <<  Endl;
      err = TMath::Abs(err);
   }
   if (fAdaBoostBeta == 1) {
      boostWeight = (1.-err)/err;
   }
   else {
      boostWeight =  TMath::Power((1.0 - err)/err, fAdaBoostBeta);
   }

   Results* results = Data()->GetResults(GetMethodName(),Types::kTraining, Types::kMaxAnalysisType);

   for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
      //       if ((!( (dt->CheckEvent(*(*e),fUseYesNoLeaf) > fNodePurityLimit ) == DataInfo().IsSignal((*e)))) || DoRegression()) {
      if ((!( (dt->CheckEvent(*(*e),fUseYesNoLeaf) > fNodePurityLimit ) == (*e)->IsSignal())) || DoRegression()) {
         Double_t boostfactor = boostWeight;
         if (DoRegression()) boostfactor = TMath::Power(1/boostWeight,(1.-TMath::Abs(dt->CheckEvent(*(*e),kFALSE) - (*e)->GetTarget(0) )/maxDev ) );
         if ( (*e)->GetWeight() > 0 ){
            (*e)->SetBoostWeight( (*e)->GetBoostWeight() * boostfactor);
            if (DoRegression()) results->GetHist("BoostWeights")->Fill(boostfactor);
            //            cout << "  " << boostfactor << endl;
         } else {
            (*e)->SetBoostWeight( (*e)->GetBoostWeight() / boostfactor);
         }
      }
      newSumw+=(*e)->GetWeight();
   }

   // re-normalise the weights
   for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
      (*e)->SetBoostWeight( (*e)->GetBoostWeight() * sumw / newSumw );
   }

   if (!(DoRegression()))results->GetHist("BoostWeights")->Fill(boostWeight);
   results->GetHist("BoostWeightsVsTree")->SetBinContent(fForest.size(),boostWeight);
   results->GetHist("ErrorFrac")->SetBinContent(fForest.size(),err);

   fBoostWeight = boostWeight;
   fErrorFraction = err;

   return TMath::Log(boostWeight);
}

//_______________________________________________________________________
Double_t TMVA::MethodBDT::Bagging( vector<TMVA::Event*> eventSample, Int_t iTree )
{
   // call it boot-strapping, re-sampling or whatever you like, in the end it is nothing
   // else but applying "random" weights to each event.
   Double_t newSumw=0;
   Double_t newWeight;
   TRandom3 *trandom   = new TRandom3(iTree);
   Double_t eventFraction = fUseNTrainEvents/Data()->GetNTrainingEvents();
   for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
      newWeight = trandom->PoissonD(eventFraction);
      (*e)->SetBoostWeight(newWeight);
      newSumw+=(*e)->GetBoostWeight();
   }
   for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
      (*e)->SetBoostWeight( (*e)->GetBoostWeight() * eventSample.size() / newSumw );
   }
   return 1.;  //here as there are random weights for each event, just return a constant==1;
}

//_______________________________________________________________________
Double_t TMVA::MethodBDT::RegBoost( vector<TMVA::Event*> /* eventSample */, DecisionTree* /* dt */ )
{
   // a special boosting only for Regression ...
   // maybe I'll implement it later...

   return 1;
}

//_______________________________________________________________________
Double_t TMVA::MethodBDT::AdaBoostR2( vector<TMVA::Event*> eventSample, DecisionTree *dt )
{
   // adaption of the AdaBoost to regression problems (see H.Drucker 1997)

   if ( !DoRegression() ) Log() << kFATAL << "Somehow you chose a regression boost method for a classification job" << Endl;

   Double_t err=0, sumw=0, sumwfalse=0, sumwfalse2=0;
   Double_t maxDev=0;
   for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
      Double_t w = (*e)->GetWeight();
      sumw += w;

      Double_t  tmpDev = TMath::Abs(dt->CheckEvent(*(*e),kFALSE) - (*e)->GetTarget(0) );
      sumwfalse  += w * tmpDev;
      sumwfalse2 += w * tmpDev*tmpDev;
      if (tmpDev > maxDev) maxDev = tmpDev;
   }

   //if quadratic loss:
   if (fAdaBoostR2Loss=="linear"){
      err = sumwfalse/maxDev/sumw ;
   }
   else if (fAdaBoostR2Loss=="quadratic"){
      err = sumwfalse2/maxDev/maxDev/sumw ;
   }
   else if (fAdaBoostR2Loss=="exponential"){
      err = 0;
      for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
         Double_t w = (*e)->GetWeight();
         Double_t  tmpDev = TMath::Abs(dt->CheckEvent(*(*e),kFALSE) - (*e)->GetTarget(0) ); 
         err += w * (1 - exp (-tmpDev/maxDev)) / sumw;
      }
      
   }
   else {
      Log() << kFATAL << " you've chosen a Loss type for Adaboost other than linear, quadratic or exponential " 
            << " namely " << fAdaBoostR2Loss << "\n" 
            << "and this is not implemented... a typo in the options ??" <<Endl;
   }



   if (err >= 0.5) {
      Log() << kFATAL << " The error rate in the BDT boosting is > 0.5. "
            << " i.e. " << err 
            << " That should induce a stop condition of the boosting " << Endl;
   } else if (err < 0) {
      Log() << kWARNING << " The error rate in the BDT boosting is < 0. That can happen"
            << " due to improper treatment of negative weights in a Monte Carlo.. (if you have"
            << " an idea on how to do it in a better way, please let me know (Helge.Voss@cern.ch)"
            << " for the time being I set it to its absolute value.. just to continue.." <<  Endl;
      err = TMath::Abs(err);
   }

   Double_t boostWeight = err / (1.-err);
   Double_t newSumw=0;

   Results* results = Data()->GetResults(GetMethodName(), Types::kTraining, Types::kMaxAnalysisType);

   for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
      Double_t boostfactor =  TMath::Power(boostWeight,(1.-TMath::Abs(dt->CheckEvent(*(*e),kFALSE) - (*e)->GetTarget(0) )/maxDev ) );
      results->GetHist("BoostWeights")->Fill(boostfactor);
      //      cout << "R2  " << boostfactor << "   " << boostWeight << "   " << (1.-TMath::Abs(dt->CheckEvent(*(*e),kFALSE) - (*e)->GetTarget(0) )/maxDev)  << endl;
      if ( (*e)->GetWeight() > 0 ){
         Float_t newBoostWeight = (*e)->GetBoostWeight() * boostfactor;
         Float_t newWeight = (*e)->GetWeight() * (*e)->GetBoostWeight() * boostfactor;
         if (newWeight == 0) {
            std::cout << "Weight=    "   <<   (*e)->GetWeight() << std::endl;
            std::cout << "BoostWeight= " <<   (*e)->GetBoostWeight() << std::endl;
            std::cout << "boostweight="<<boostWeight << "  err= " <<err << std::endl; 
            std::cout << "NewBoostWeight= " <<   newBoostWeight << std::endl;
            std::cout << "boostfactor= " <<  boostfactor << std::endl;
            std::cout << "maxDev     = " <<  maxDev << std::endl;
            std::cout << "tmpDev     = " <<  TMath::Abs(dt->CheckEvent(*(*e),kFALSE) - (*e)->GetTarget(0) ) << std::endl;
            std::cout << "target     = " <<  (*e)->GetTarget(0)  << std::endl; 
            std::cout << "estimate   = " <<  dt->CheckEvent(*(*e),kFALSE)  << std::endl;
         }
         (*e)->SetBoostWeight( newBoostWeight );
         //         (*e)->SetBoostWeight( (*e)->GetBoostWeight() * boostfactor);
      } else {
         (*e)->SetBoostWeight( (*e)->GetBoostWeight() / boostfactor);
      }
      newSumw+=(*e)->GetWeight();
   }

   // re-normalise the weights
   for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
      (*e)->SetBoostWeight( (*e)->GetBoostWeight() * sumw / newSumw );
   }


   results->GetHist("BoostWeightsVsTree")->SetBinContent(fForest.size(),1./boostWeight);
   results->GetHist("ErrorFrac")->SetBinContent(fForest.size(),err);

   fBoostWeight = boostWeight;
   fErrorFraction = err;

   return TMath::Log(1./boostWeight);
}

//_______________________________________________________________________
void TMVA::MethodBDT::AddWeightsXMLTo( void* parent ) const
{
   // write weights to XML 
   void* wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
   gTools().AddAttr( wght, "NTrees", fForest.size() );
   gTools().AddAttr( wght, "TreeType", fForest.back()->GetAnalysisType() );

   for (UInt_t i=0; i< fForest.size(); i++) {
      void* trxml = fForest[i]->AddXMLTo(wght);
      gTools().AddAttr( trxml, "boostWeight", fBoostWeights[i] );
      gTools().AddAttr( trxml, "itree", i );
   }
}

//_______________________________________________________________________
void TMVA::MethodBDT::ReadWeightsFromXML(void* parent) {
   // reads the BDT from the xml file

   UInt_t i;
   for (i=0; i<fForest.size(); i++) delete fForest[i];
   fForest.clear();
   fBoostWeights.clear();

   UInt_t ntrees;
   UInt_t analysisType;
   Float_t boostWeight;

   gTools().ReadAttr( parent, "NTrees", ntrees );
   gTools().ReadAttr( parent, "TreeType", analysisType );

   void* ch = gTools().xmlengine().GetChild(parent);
   i=0;
   while(ch) {
      fForest.push_back( dynamic_cast<DecisionTree*>( BinaryTree::CreateFromXML(ch, GetTrainingTMVAVersionCode()) ) );
      fForest.back()->SetAnalysisType(Types::EAnalysisType(analysisType));
      fForest.back()->SetTreeID(i++);
      gTools().ReadAttr(ch,"boostWeight",boostWeight);
      fBoostWeights.push_back(boostWeight);
      ch = gTools().xmlengine().GetNext(ch);
   }
}

//_______________________________________________________________________
void  TMVA::MethodBDT::ReadWeightsFromStream( istream& istr )
{
   // read the weights (BDT coefficients)
   TString var, dummy;
   //   Types::EAnalysisType analysisType;
   Int_t analysisType(0);

   istr >> dummy >> fNTrees;
   Log() << kINFO << "Read " << fNTrees << " Decision trees" << Endl;

   for (UInt_t i=0;i<fForest.size();i++) delete fForest[i];
   fForest.clear();
   fBoostWeights.clear();
   Int_t iTree;
   Double_t boostWeight;
   for (int i=0;i<fNTrees;i++) {
      istr >> dummy >> iTree >> dummy >> boostWeight;
      if (iTree != i) {
         fForest.back()->Print( cout );
         Log() << kFATAL << "Error while reading weight file; mismatch iTree="
               << iTree << " i=" << i
               << " dummy " << dummy
               << " boostweight " << boostWeight
               << Endl;
      }
      fForest.push_back( new DecisionTree() );
      fForest.back()->SetAnalysisType(Types::EAnalysisType(analysisType));
      fForest.back()->SetTreeID(i);
      fForest.back()->Read(istr, GetTrainingTMVAVersionCode());
      fBoostWeights.push_back(boostWeight);
   }
}

//_______________________________________________________________________
Double_t TMVA::MethodBDT::GetMvaValue( Double_t* err ){
   return this->GetMvaValue( err, 0 );
}
//_______________________________________________________________________
Double_t TMVA::MethodBDT::GetMvaValue( Double_t* err, UInt_t useNTrees )
{
   // Return the MVA value (range [-1;1]) that classifies the
   // event according to the majority vote from the total number of
   // decision trees.

   // cannot determine error
   if (err != 0) *err = -1;
   
   // allow for the possibility to use less trees in the actual MVA calculation
   // than have been originally trained.
   UInt_t nTrees = fForest.size();
   if (useNTrees > 0 ) nTrees = useNTrees;

   if (fBoostType=="Grad") return GetGradBoostMVA(const_cast<TMVA::Event&>(*GetEvent()),nTrees);
   
   Double_t myMVA = 0;
   Double_t norm  = 0;
   for (UInt_t itree=0; itree<nTrees; itree++) {
      //
      if (fUseWeightedTrees) {
         myMVA += fBoostWeights[itree] * fForest[itree]->CheckEvent(*GetEvent(),fUseYesNoLeaf);
         norm  += fBoostWeights[itree];
      }
      else {
         myMVA += fForest[itree]->CheckEvent(*GetEvent(),fUseYesNoLeaf);
         norm  += 1;
      }
   }
   return myMVA /= norm;
}
//_______________________________________________________________________
const std::vector<Float_t> & TMVA::MethodBDT::GetRegressionValues()
{
   // get the regression value generated by the BDTs


   if (fRegressionReturnVal == NULL) fRegressionReturnVal = new std::vector<Float_t>();
   fRegressionReturnVal->clear();

   Double_t myMVA = 0;
   Double_t norm  = 0;
   if (fBoostType=="AdaBoostR2") {
      // rather than using the weighted average of the tree respones in the forest
      // H.Decker(1997) proposed to use the "weighted median"
     
      // sort all individual tree responses according to the prediction value 
      //   (keep the association to their tree weight)
      // the sum up all the associated weights (starting from the one whose tree
      //   yielded the smalles response) up to the tree "t" at which you've
      //   added enough tree weights to have more than half of the sum of all tree weights.
      // choose as response of the forest that one which belongs to this "t"

      vector< Double_t > response(fForest.size());
      vector< Double_t > weight(fForest.size());
      Double_t           totalSumOfWeights = 0;

      for (UInt_t itree=0; itree<fForest.size(); itree++) {
         response[itree]    = fForest[itree]->CheckEvent(*GetEvent(),kFALSE);
         weight[itree]      = fBoostWeights[itree];
         totalSumOfWeights += fBoostWeights[itree];
      }

      vector< vector<Double_t> > vtemp;
      vtemp.push_back( response ); // this is the vector that will get sorted
      vtemp.push_back( weight ); 
      gTools().UsefulSortAscending( vtemp );

      Int_t t=0;
      Double_t sumOfWeights = 0;
      while (sumOfWeights <= totalSumOfWeights/2.) {
         sumOfWeights += vtemp[1][t];
         t++;
      }

      Double_t rVal=0;
      Int_t    count=0;
      for (UInt_t i= TMath::Max(UInt_t(0),UInt_t(t-(fForest.size()/6)-0.5)); 
           i< TMath::Min(UInt_t(fForest.size()),UInt_t(t+(fForest.size()/6)+0.5)); i++) {
         count++;
         rVal+=vtemp[0][i];
      }
      fRegressionReturnVal->push_back( rVal/Double_t(count));
   }
   else if(fBoostType=="Grad"){
      for (UInt_t itree=0; itree<fForest.size(); itree++) {
         myMVA += fForest[itree]->CheckEvent(*GetEvent(),kFALSE);
      }
      fRegressionReturnVal->push_back( myMVA+fBoostWeights[0]);
   }
   else{
      for (UInt_t itree=0; itree<fForest.size(); itree++) {
         //
         if (fUseWeightedTrees) {
            myMVA += fBoostWeights[itree] * fForest[itree]->CheckEvent(*GetEvent(),kFALSE);
            norm  += fBoostWeights[itree];
         }
         else {
            myMVA += fForest[itree]->CheckEvent(*GetEvent(),kFALSE);
            norm  += 1;
         }
      }
      fRegressionReturnVal->push_back( myMVA/norm );
   }
   return *fRegressionReturnVal;
}

//_______________________________________________________________________
void  TMVA::MethodBDT::WriteMonitoringHistosToFile( void ) const
{
   // Here we could write some histograms created during the processing
   // to the output file.
   Log() << kINFO << "Write monitoring histograms to file: " << BaseDir()->GetPath() << Endl;

   //Results* results = Data()->GetResults(GetMethodName(), Types::kTraining, Types::kMaxAnalysisType);
   //results->GetStorage()->Write();
   fMonitorNtuple->Write();
}

//_______________________________________________________________________
vector< Double_t > TMVA::MethodBDT::GetVariableImportance()
{
   // Return the relative variable importance, normalized to all
   // variables together having the importance 1. The importance in
   // evaluated as the total separation-gain that this variable had in
   // the decision trees (weighted by the number of events)

   fVariableImportance.resize(GetNvar());
   Double_t  sum=0;
   for (int itree = 0; itree < fNTrees; itree++) {
      vector<Double_t> relativeImportance(fForest[itree]->GetVariableImportance());
      for (UInt_t i=0; i< relativeImportance.size(); i++) {
         fVariableImportance[i] += relativeImportance[i];
      }
   }
   for (UInt_t i=0; i< fVariableImportance.size(); i++) sum += fVariableImportance[i];
   for (UInt_t i=0; i< fVariableImportance.size(); i++) fVariableImportance[i] /= sum;

   return fVariableImportance;
}

//_______________________________________________________________________
Double_t TMVA::MethodBDT::GetVariableImportance( UInt_t ivar )
{
   // Returns the measure for the variable importance of variable "ivar"
   // which is later used in GetVariableImportance() to calculate the
   // relative variable importances.

   vector<Double_t> relativeImportance = this->GetVariableImportance();
   if (ivar < (UInt_t)relativeImportance.size()) return relativeImportance[ivar];
   else Log() << kFATAL << "<GetVariableImportance> ivar = " << ivar << " is out of range " << Endl;

   return -1;
}

//_______________________________________________________________________
const TMVA::Ranking* TMVA::MethodBDT::CreateRanking()
{
   // Compute ranking of input variables

   // create the ranking object
   fRanking = new Ranking( GetName(), "Variable Importance" );
   vector< Double_t> importance(this->GetVariableImportance());

   for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {

      fRanking->AddRank( Rank( GetInputLabel(ivar), importance[ivar] ) );
   }

   return fRanking;
}

//_______________________________________________________________________
void TMVA::MethodBDT::GetHelpMessage() const
{
   // Get help message text
   //
   // typical length of text line:
   //         "|--------------------------------------------------------------|"
   Log() << Endl;
   Log() << gTools().Color("bold") << "--- Short description:" << gTools().Color("reset") << Endl;
   Log() << Endl;
   Log() << "Boosted Decision Trees are a collection of individual decision" << Endl;
   Log() << "trees which form a multivariate classifier by (weighted) majority " << Endl;
   Log() << "vote of the individual trees. Consecutive decision trees are  " << Endl;
   Log() << "trained using the original training data set with re-weighted " << Endl;
   Log() << "events. By default, the AdaBoost method is employed, which gives " << Endl;
   Log() << "events that were misclassified in the previous tree a larger " << Endl;
   Log() << "weight in the training of the following tree." << Endl;
   Log() << Endl;
   Log() << "Decision trees are a sequence of binary splits of the data sample" << Endl;
   Log() << "using a single descriminant variable at a time. A test event " << Endl;
   Log() << "ending up after the sequence of left-right splits in a final " << Endl;
   Log() << "(\"leaf\") node is classified as either signal or background" << Endl;
   Log() << "depending on the majority type of training events in that node." << Endl;
   Log() << Endl;
   Log() << gTools().Color("bold") << "--- Performance optimisation:" << gTools().Color("reset") << Endl;
   Log() << Endl;
   Log() << "By the nature of the binary splits performed on the individual" << Endl;
   Log() << "variables, decision trees do not deal well with linear correlations" << Endl;
   Log() << "between variables (they need to approximate the linear split in" << Endl;
   Log() << "the two dimensional space by a sequence of splits on the two " << Endl;
   Log() << "variables individually). Hence decorrelation could be useful " << Endl;
   Log() << "to optimise the BDT performance." << Endl;
   Log() << Endl;
   Log() << gTools().Color("bold") << "--- Performance tuning via configuration options:" << gTools().Color("reset") << Endl;
   Log() << Endl;
   Log() << "The two most important parameters in the configuration are the  " << Endl;
   Log() << "minimal number of events requested by a leaf node (option " << Endl;
   Log() << "\"nEventsMin\"). If this number is too large, detailed features " << Endl;
   Log() << "in the parameter space cannot be modelled. If it is too small, " << Endl;
   Log() << "the risk to overtrain rises." << Endl;
   Log() << "   (Imagine the decision tree is split until the leaf node contains" << Endl;
   Log() << "    only a single event. In such a case, no training event is  " << Endl;
   Log() << "    misclassified, while the situation will look very different" << Endl;
   Log() << "    for the test sample.)" << Endl;
   Log() << Endl;
   Log() << "The default minimal number is currently set to " << Endl;
   Log() << "   max(20, (N_training_events / N_variables^2 / 10)) " << Endl;
   Log() << "and can be changed by the user." << Endl;
   Log() << Endl;
   Log() << "The other crucial parameter, the pruning strength (\"PruneStrength\")," << Endl;
   Log() << "is also related to overtraining. It is a regularisation parameter " << Endl;
   Log() << "that is used when determining after the training which splits " << Endl;
   Log() << "are considered statistically insignificant and are removed. The" << Endl;
   Log() << "user is advised to carefully watch the BDT screen output for" << Endl;
   Log() << "the comparison between efficiencies obtained on the training and" << Endl;
   Log() << "the independent test sample. They should be equal within statistical" << Endl;
   Log() << "errors, in order to minimize statistical fluctuations in different samples." << Endl;
}

//_______________________________________________________________________
void TMVA::MethodBDT::MakeClassSpecific( std::ostream& fout, const TString& className ) const
{
   // make ROOT-independent C++ class for classifier response (classifier-specific implementation)

   // write BDT-specific classifier response
   fout << "   std::vector<BDT_DecisionTreeNode*> fForest;       // i.e. root nodes of decision trees" << endl;
   fout << "   std::vector<double>                fBoostWeights; // the weights applied in the individual boosts" << endl;
   fout << "};" << endl << endl;
   fout << "double " << className << "::GetMvaValue__( const std::vector<double>& inputValues ) const" << endl;
   fout << "{" << endl;
   fout << "   double myMVA = 0;" << endl;
   fout << "   double norm  = 0;" << endl;
   fout << "   for (unsigned int itree=0; itree<fForest.size(); itree++){" << endl;
   fout << "      BDT_DecisionTreeNode *current = fForest[itree];" << endl;
   fout << "      while (current->GetNodeType() == 0) { //intermediate node" << endl;
   fout << "         if (current->GoesRight(inputValues)) current=(BDT_DecisionTreeNode*)current->GetRight();" << endl;
   fout << "         else current=(BDT_DecisionTreeNode*)current->GetLeft();" << endl;
   fout << "      }" << endl;
   if (fBoostType=="Grad"){
      fout << "      myMVA += current->GetResponse();" << endl;
   }
   else if (fUseWeightedTrees) {
      if (fUseYesNoLeaf) fout << "      myMVA += fBoostWeights[itree] *  current->GetNodeType();" << endl;
      else               fout << "      myMVA += fBoostWeights[itree] *  current->GetPurity();" << endl;
      fout << "      norm  += fBoostWeights[itree];" << endl;
   }
   else {
      if (fUseYesNoLeaf) fout << "      myMVA += current->GetNodeType();" << endl;
      else               fout << "      myMVA += current->GetPurity();" << endl;
      fout << "      norm  += 1.;" << endl;
   }
   fout << "   }" << endl;
   if (fBoostType=="Grad"){
      fout << "   return 2.0/(1.0+exp(-2.0*myMVA))-1.0;" << endl;
   }
   else fout << "   return myMVA /= norm;" << endl;
   fout << "};" << endl << endl;
   fout << "void " << className << "::Initialize()" << endl;
   fout << "{" << endl;
   //Now for each decision tree, write directly the constructors of the nodes in the tree structure
   for (int itree=0; itree<fNTrees; itree++) {
      fout << "  // itree = " << itree << endl;
      fout << "  fBoostWeights.push_back(" << fBoostWeights[itree] << ");" << endl;
      fout << "  fForest.push_back( " << endl;
      this->MakeClassInstantiateNode((DecisionTreeNode*)fForest[itree]->GetRoot(), fout, className);
      fout <<"   );" << endl;
   }
   fout << "   return;" << endl;
   fout << "};" << endl;
   fout << " " << endl;
   fout << "// Clean up" << endl;
   fout << "inline void " << className << "::Clear() " << endl;
   fout << "{" << endl;
   fout << "   for (unsigned int itree=0; itree<fForest.size(); itree++) { " << endl;
   fout << "      delete fForest[itree]; " << endl;
   fout << "   }" << endl;
   fout << "}" << endl;
}

//_______________________________________________________________________
void TMVA::MethodBDT::MakeClassSpecificHeader(  std::ostream& fout, const TString& ) const
{
   // specific class header
   fout << "#ifndef NN" << endl;
   fout << "#define NN new BDT_DecisionTreeNode" << endl;
   fout << "#endif" << endl;
   fout << "   " << endl;
   fout << "#ifndef BDT_DecisionTreeNode__def" << endl;
   fout << "#define BDT_DecisionTreeNode__def" << endl;
   fout << "   " << endl;
   fout << "class BDT_DecisionTreeNode {" << endl;
   fout << "   " << endl;
   fout << "public:" << endl;
   fout << "   " << endl;
   fout << "   // constructor of an essentially \"empty\" node floating in space" << endl;
   fout << "   BDT_DecisionTreeNode ( BDT_DecisionTreeNode* left," << endl;
   fout << "                          BDT_DecisionTreeNode* right," << endl;
   fout << "                          double cutValue, Bool_t cutType, int selector," << endl;
   fout << "                          int nodeType, double purity, double response ) :" << endl;
   fout << "   fLeft    ( left     )," << endl;
   fout << "   fRight   ( right    )," << endl;
   fout << "   fCutValue( cutValue )," << endl;
   fout << "   fCutType ( cutType  )," << endl;
   fout << "   fSelector( selector )," << endl;
   fout << "   fNodeType( nodeType )," << endl;
   fout << "   fPurity  ( purity   )," << endl;
   fout << "   fResponse( response ){}" << endl << endl;
   fout << "   virtual ~BDT_DecisionTreeNode();" << endl << endl;
   fout << "   // test event if it decends the tree at this node to the right" << endl;
   fout << "   virtual Bool_t GoesRight( const std::vector<double>& inputValues ) const;" << endl;
   fout << "   BDT_DecisionTreeNode* GetRight( void )  {return fRight; };" << endl << endl;
   fout << "   // test event if it decends the tree at this node to the left " << endl;
   fout << "   virtual Bool_t GoesLeft ( const std::vector<double>& inputValues ) const;" << endl;
   fout << "   BDT_DecisionTreeNode* GetLeft( void ) { return fLeft; };   " << endl << endl;
   fout << "   // return  S/(S+B) (purity) at this node (from  training)" << endl << endl;
   fout << "   double GetPurity( void ) const { return fPurity; } " << endl;
   fout << "   // return the node type" << endl;
   fout << "   int    GetNodeType( void ) const { return fNodeType; }" << endl;
   fout << "   double GetResponse(void) const {return fResponse;}" << endl << endl;
   fout << "private:" << endl << endl;
   fout << "   BDT_DecisionTreeNode*   fLeft;     // pointer to the left daughter node" << endl;
   fout << "   BDT_DecisionTreeNode*   fRight;    // pointer to the right daughter node" << endl;
   fout << "   double                  fCutValue; // cut value appplied on this node to discriminate bkg against sig" << endl;
   fout << "   Bool_t                  fCutType;  // true: if event variable > cutValue ==> signal , false otherwise" << endl;
   fout << "   int                     fSelector; // index of variable used in node selection (decision tree)   " << endl;
   fout << "   int                     fNodeType; // Type of node: -1 == Bkg-leaf, 1 == Signal-leaf, 0 = internal " << endl;
   fout << "   double                  fPurity;   // Purity of node from training"<< endl;
   fout << "   double                  fResponse; // Regression response value of node" << endl;
   fout << "}; " << endl;
   fout << "   " << endl;
   fout << "//_______________________________________________________________________" << endl;
   fout << "BDT_DecisionTreeNode::~BDT_DecisionTreeNode()" << endl;
   fout << "{" << endl;
   fout << "   if (fLeft  != NULL) delete fLeft;" << endl;
   fout << "   if (fRight != NULL) delete fRight;" << endl;
   fout << "}; " << endl;
   fout << "   " << endl;
   fout << "//_______________________________________________________________________" << endl;
   fout << "Bool_t BDT_DecisionTreeNode::GoesRight( const std::vector<double>& inputValues ) const" << endl;
   fout << "{" << endl;
   fout << "   // test event if it decends the tree at this node to the right" << endl;
   fout << "   Bool_t result = (inputValues[fSelector] > fCutValue );" << endl;
   fout << "   if (fCutType == true) return result; //the cuts are selecting Signal ;" << endl;
   fout << "   else return !result;" << endl;
   fout << "}" << endl;
   fout << "   " << endl;
   fout << "//_______________________________________________________________________" << endl;
   fout << "Bool_t BDT_DecisionTreeNode::GoesLeft( const std::vector<double>& inputValues ) const" << endl;
   fout << "{" << endl;
   fout << "   // test event if it decends the tree at this node to the left" << endl;
   fout << "   if (!this->GoesRight(inputValues)) return true;" << endl;
   fout << "   else return false;" << endl;
   fout << "}" << endl;
   fout << "   " << endl;
   fout << "#endif" << endl;
   fout << "   " << endl;
}

//_______________________________________________________________________
void TMVA::MethodBDT::MakeClassInstantiateNode( DecisionTreeNode *n, std::ostream& fout, const TString& className ) const
{
   // recursively descends a tree and writes the node instance to the output streem
   if (n == NULL) {
      Log() << kFATAL << "MakeClassInstantiateNode: started with undefined node" <<Endl;
      return ;
   }
   fout << "NN("<<endl;
   if (n->GetLeft() != NULL){
      this->MakeClassInstantiateNode( (DecisionTreeNode*)n->GetLeft() , fout, className);
   }
   else {
      fout << "0";
   }
   fout << ", " <<endl;
   if (n->GetRight() != NULL){
      this->MakeClassInstantiateNode( (DecisionTreeNode*)n->GetRight(), fout, className );
   }
   else {
      fout << "0";
   }
   fout << ", " <<  endl
        << setprecision(6)
        << n->GetCutValue() << ", "
        << n->GetCutType() << ", "
        << n->GetSelector() << ", "
        << n->GetNodeType() << ", "
        << n->GetPurity() << ","
        << n->GetResponse() << ") ";

}
 MethodBDT.cxx:1
 MethodBDT.cxx:2
 MethodBDT.cxx:3
 MethodBDT.cxx:4
 MethodBDT.cxx:5
 MethodBDT.cxx:6
 MethodBDT.cxx:7
 MethodBDT.cxx:8
 MethodBDT.cxx:9
 MethodBDT.cxx:10
 MethodBDT.cxx:11
 MethodBDT.cxx:12
 MethodBDT.cxx:13
 MethodBDT.cxx:14
 MethodBDT.cxx:15
 MethodBDT.cxx:16
 MethodBDT.cxx:17
 MethodBDT.cxx:18
 MethodBDT.cxx:19
 MethodBDT.cxx:20
 MethodBDT.cxx:21
 MethodBDT.cxx:22
 MethodBDT.cxx:23
 MethodBDT.cxx:24
 MethodBDT.cxx:25
 MethodBDT.cxx:26
 MethodBDT.cxx:27
 MethodBDT.cxx:28
 MethodBDT.cxx:29
 MethodBDT.cxx:30
 MethodBDT.cxx:31
 MethodBDT.cxx:32
 MethodBDT.cxx:33
 MethodBDT.cxx:34
 MethodBDT.cxx:35
 MethodBDT.cxx:36
 MethodBDT.cxx:37
 MethodBDT.cxx:38
 MethodBDT.cxx:39
 MethodBDT.cxx:40
 MethodBDT.cxx:41
 MethodBDT.cxx:42
 MethodBDT.cxx:43
 MethodBDT.cxx:44
 MethodBDT.cxx:45
 MethodBDT.cxx:46
 MethodBDT.cxx:47
 MethodBDT.cxx:48
 MethodBDT.cxx:49
 MethodBDT.cxx:50
 MethodBDT.cxx:51
 MethodBDT.cxx:52
 MethodBDT.cxx:53
 MethodBDT.cxx:54
 MethodBDT.cxx:55
 MethodBDT.cxx:56
 MethodBDT.cxx:57
 MethodBDT.cxx:58
 MethodBDT.cxx:59
 MethodBDT.cxx:60
 MethodBDT.cxx:61
 MethodBDT.cxx:62
 MethodBDT.cxx:63
 MethodBDT.cxx:64
 MethodBDT.cxx:65
 MethodBDT.cxx:66
 MethodBDT.cxx:67
 MethodBDT.cxx:68
 MethodBDT.cxx:69
 MethodBDT.cxx:70
 MethodBDT.cxx:71
 MethodBDT.cxx:72
 MethodBDT.cxx:73
 MethodBDT.cxx:74
 MethodBDT.cxx:75
 MethodBDT.cxx:76
 MethodBDT.cxx:77
 MethodBDT.cxx:78
 MethodBDT.cxx:79
 MethodBDT.cxx:80
 MethodBDT.cxx:81
 MethodBDT.cxx:82
 MethodBDT.cxx:83
 MethodBDT.cxx:84
 MethodBDT.cxx:85
 MethodBDT.cxx:86
 MethodBDT.cxx:87
 MethodBDT.cxx:88
 MethodBDT.cxx:89
 MethodBDT.cxx:90
 MethodBDT.cxx:91
 MethodBDT.cxx:92
 MethodBDT.cxx:93
 MethodBDT.cxx:94
 MethodBDT.cxx:95
 MethodBDT.cxx:96
 MethodBDT.cxx:97
 MethodBDT.cxx:98
 MethodBDT.cxx:99
 MethodBDT.cxx:100
 MethodBDT.cxx:101
 MethodBDT.cxx:102
 MethodBDT.cxx:103
 MethodBDT.cxx:104
 MethodBDT.cxx:105
 MethodBDT.cxx:106
 MethodBDT.cxx:107
 MethodBDT.cxx:108
 MethodBDT.cxx:109
 MethodBDT.cxx:110
 MethodBDT.cxx:111
 MethodBDT.cxx:112
 MethodBDT.cxx:113
 MethodBDT.cxx:114
 MethodBDT.cxx:115
 MethodBDT.cxx:116
 MethodBDT.cxx:117
 MethodBDT.cxx:118
 MethodBDT.cxx:119
 MethodBDT.cxx:120
 MethodBDT.cxx:121
 MethodBDT.cxx:122
 MethodBDT.cxx:123
 MethodBDT.cxx:124
 MethodBDT.cxx:125
 MethodBDT.cxx:126
 MethodBDT.cxx:127
 MethodBDT.cxx:128
 MethodBDT.cxx:129
 MethodBDT.cxx:130
 MethodBDT.cxx:131
 MethodBDT.cxx:132
 MethodBDT.cxx:133
 MethodBDT.cxx:134
 MethodBDT.cxx:135
 MethodBDT.cxx:136
 MethodBDT.cxx:137
 MethodBDT.cxx:138
 MethodBDT.cxx:139
 MethodBDT.cxx:140
 MethodBDT.cxx:141
 MethodBDT.cxx:142
 MethodBDT.cxx:143
 MethodBDT.cxx:144
 MethodBDT.cxx:145
 MethodBDT.cxx:146
 MethodBDT.cxx:147
 MethodBDT.cxx:148
 MethodBDT.cxx:149
 MethodBDT.cxx:150
 MethodBDT.cxx:151
 MethodBDT.cxx:152
 MethodBDT.cxx:153
 MethodBDT.cxx:154
 MethodBDT.cxx:155
 MethodBDT.cxx:156
 MethodBDT.cxx:157
 MethodBDT.cxx:158
 MethodBDT.cxx:159
 MethodBDT.cxx:160
 MethodBDT.cxx:161
 MethodBDT.cxx:162
 MethodBDT.cxx:163
 MethodBDT.cxx:164
 MethodBDT.cxx:165
 MethodBDT.cxx:166
 MethodBDT.cxx:167
 MethodBDT.cxx:168
 MethodBDT.cxx:169
 MethodBDT.cxx:170
 MethodBDT.cxx:171
 MethodBDT.cxx:172
 MethodBDT.cxx:173
 MethodBDT.cxx:174
 MethodBDT.cxx:175
 MethodBDT.cxx:176
 MethodBDT.cxx:177
 MethodBDT.cxx:178
 MethodBDT.cxx:179
 MethodBDT.cxx:180
 MethodBDT.cxx:181
 MethodBDT.cxx:182
 MethodBDT.cxx:183
 MethodBDT.cxx:184
 MethodBDT.cxx:185
 MethodBDT.cxx:186
 MethodBDT.cxx:187
 MethodBDT.cxx:188
 MethodBDT.cxx:189
 MethodBDT.cxx:190
 MethodBDT.cxx:191
 MethodBDT.cxx:192
 MethodBDT.cxx:193
 MethodBDT.cxx:194
 MethodBDT.cxx:195
 MethodBDT.cxx:196
 MethodBDT.cxx:197
 MethodBDT.cxx:198
 MethodBDT.cxx:199
 MethodBDT.cxx:200
 MethodBDT.cxx:201
 MethodBDT.cxx:202
 MethodBDT.cxx:203
 MethodBDT.cxx:204
 MethodBDT.cxx:205
 MethodBDT.cxx:206
 MethodBDT.cxx:207
 MethodBDT.cxx:208
 MethodBDT.cxx:209
 MethodBDT.cxx:210
 MethodBDT.cxx:211
 MethodBDT.cxx:212
 MethodBDT.cxx:213
 MethodBDT.cxx:214
 MethodBDT.cxx:215
 MethodBDT.cxx:216
 MethodBDT.cxx:217
 MethodBDT.cxx:218
 MethodBDT.cxx:219
 MethodBDT.cxx:220
 MethodBDT.cxx:221
 MethodBDT.cxx:222
 MethodBDT.cxx:223
 MethodBDT.cxx:224
 MethodBDT.cxx:225
 MethodBDT.cxx:226
 MethodBDT.cxx:227
 MethodBDT.cxx:228
 MethodBDT.cxx:229
 MethodBDT.cxx:230
 MethodBDT.cxx:231
 MethodBDT.cxx:232
 MethodBDT.cxx:233
 MethodBDT.cxx:234
 MethodBDT.cxx:235
 MethodBDT.cxx:236
 MethodBDT.cxx:237
 MethodBDT.cxx:238
 MethodBDT.cxx:239
 MethodBDT.cxx:240
 MethodBDT.cxx:241
 MethodBDT.cxx:242
 MethodBDT.cxx:243
 MethodBDT.cxx:244
 MethodBDT.cxx:245
 MethodBDT.cxx:246
 MethodBDT.cxx:247
 MethodBDT.cxx:248
 MethodBDT.cxx:249
 MethodBDT.cxx:250
 MethodBDT.cxx:251
 MethodBDT.cxx:252
 MethodBDT.cxx:253
 MethodBDT.cxx:254
 MethodBDT.cxx:255
 MethodBDT.cxx:256
 MethodBDT.cxx:257
 MethodBDT.cxx:258
 MethodBDT.cxx:259
 MethodBDT.cxx:260
 MethodBDT.cxx:261
 MethodBDT.cxx:262
 MethodBDT.cxx:263
 MethodBDT.cxx:264
 MethodBDT.cxx:265
 MethodBDT.cxx:266
 MethodBDT.cxx:267
 MethodBDT.cxx:268
 MethodBDT.cxx:269
 MethodBDT.cxx:270
 MethodBDT.cxx:271
 MethodBDT.cxx:272
 MethodBDT.cxx:273
 MethodBDT.cxx:274
 MethodBDT.cxx:275
 MethodBDT.cxx:276
 MethodBDT.cxx:277
 MethodBDT.cxx:278
 MethodBDT.cxx:279
 MethodBDT.cxx:280
 MethodBDT.cxx:281
 MethodBDT.cxx:282
 MethodBDT.cxx:283
 MethodBDT.cxx:284
 MethodBDT.cxx:285
 MethodBDT.cxx:286
 MethodBDT.cxx:287
 MethodBDT.cxx:288
 MethodBDT.cxx:289
 MethodBDT.cxx:290
 MethodBDT.cxx:291
 MethodBDT.cxx:292
 MethodBDT.cxx:293
 MethodBDT.cxx:294
 MethodBDT.cxx:295
 MethodBDT.cxx:296
 MethodBDT.cxx:297
 MethodBDT.cxx:298
 MethodBDT.cxx:299
 MethodBDT.cxx:300
 MethodBDT.cxx:301
 MethodBDT.cxx:302
 MethodBDT.cxx:303
 MethodBDT.cxx:304
 MethodBDT.cxx:305
 MethodBDT.cxx:306
 MethodBDT.cxx:307
 MethodBDT.cxx:308
 MethodBDT.cxx:309
 MethodBDT.cxx:310
 MethodBDT.cxx:311
 MethodBDT.cxx:312
 MethodBDT.cxx:313
 MethodBDT.cxx:314
 MethodBDT.cxx:315
 MethodBDT.cxx:316
 MethodBDT.cxx:317
 MethodBDT.cxx:318
 MethodBDT.cxx:319
 MethodBDT.cxx:320
 MethodBDT.cxx:321
 MethodBDT.cxx:322
 MethodBDT.cxx:323
 MethodBDT.cxx:324
 MethodBDT.cxx:325
 MethodBDT.cxx:326
 MethodBDT.cxx:327
 MethodBDT.cxx:328
 MethodBDT.cxx:329
 MethodBDT.cxx:330
 MethodBDT.cxx:331
 MethodBDT.cxx:332
 MethodBDT.cxx:333
 MethodBDT.cxx:334
 MethodBDT.cxx:335
 MethodBDT.cxx:336
 MethodBDT.cxx:337
 MethodBDT.cxx:338
 MethodBDT.cxx:339
 MethodBDT.cxx:340
 MethodBDT.cxx:341
 MethodBDT.cxx:342
 MethodBDT.cxx:343
 MethodBDT.cxx:344
 MethodBDT.cxx:345
 MethodBDT.cxx:346
 MethodBDT.cxx:347
 MethodBDT.cxx:348
 MethodBDT.cxx:349
 MethodBDT.cxx:350
 MethodBDT.cxx:351
 MethodBDT.cxx:352
 MethodBDT.cxx:353
 MethodBDT.cxx:354
 MethodBDT.cxx:355
 MethodBDT.cxx:356
 MethodBDT.cxx:357
 MethodBDT.cxx:358
 MethodBDT.cxx:359
 MethodBDT.cxx:360
 MethodBDT.cxx:361
 MethodBDT.cxx:362
 MethodBDT.cxx:363
 MethodBDT.cxx:364
 MethodBDT.cxx:365
 MethodBDT.cxx:366
 MethodBDT.cxx:367
 MethodBDT.cxx:368
 MethodBDT.cxx:369
 MethodBDT.cxx:370
 MethodBDT.cxx:371
 MethodBDT.cxx:372
 MethodBDT.cxx:373
 MethodBDT.cxx:374
 MethodBDT.cxx:375
 MethodBDT.cxx:376
 MethodBDT.cxx:377
 MethodBDT.cxx:378
 MethodBDT.cxx:379
 MethodBDT.cxx:380
 MethodBDT.cxx:381
 MethodBDT.cxx:382
 MethodBDT.cxx:383
 MethodBDT.cxx:384
 MethodBDT.cxx:385
 MethodBDT.cxx:386
 MethodBDT.cxx:387
 MethodBDT.cxx:388
 MethodBDT.cxx:389
 MethodBDT.cxx:390
 MethodBDT.cxx:391
 MethodBDT.cxx:392
 MethodBDT.cxx:393
 MethodBDT.cxx:394
 MethodBDT.cxx:395
 MethodBDT.cxx:396
 MethodBDT.cxx:397
 MethodBDT.cxx:398
 MethodBDT.cxx:399
 MethodBDT.cxx:400
 MethodBDT.cxx:401
 MethodBDT.cxx:402
 MethodBDT.cxx:403
 MethodBDT.cxx:404
 MethodBDT.cxx:405
 MethodBDT.cxx:406
 MethodBDT.cxx:407
 MethodBDT.cxx:408
 MethodBDT.cxx:409
 MethodBDT.cxx:410
 MethodBDT.cxx:411
 MethodBDT.cxx:412
 MethodBDT.cxx:413
 MethodBDT.cxx:414
 MethodBDT.cxx:415
 MethodBDT.cxx:416
 MethodBDT.cxx:417
 MethodBDT.cxx:418
 MethodBDT.cxx:419
 MethodBDT.cxx:420
 MethodBDT.cxx:421
 MethodBDT.cxx:422
 MethodBDT.cxx:423
 MethodBDT.cxx:424
 MethodBDT.cxx:425
 MethodBDT.cxx:426
 MethodBDT.cxx:427
 MethodBDT.cxx:428
 MethodBDT.cxx:429
 MethodBDT.cxx:430
 MethodBDT.cxx:431
 MethodBDT.cxx:432
 MethodBDT.cxx:433
 MethodBDT.cxx:434
 MethodBDT.cxx:435
 MethodBDT.cxx:436
 MethodBDT.cxx:437
 MethodBDT.cxx:438
 MethodBDT.cxx:439
 MethodBDT.cxx:440
 MethodBDT.cxx:441
 MethodBDT.cxx:442
 MethodBDT.cxx:443
 MethodBDT.cxx:444
 MethodBDT.cxx:445
 MethodBDT.cxx:446
 MethodBDT.cxx:447
 MethodBDT.cxx:448
 MethodBDT.cxx:449
 MethodBDT.cxx:450
 MethodBDT.cxx:451
 MethodBDT.cxx:452
 MethodBDT.cxx:453
 MethodBDT.cxx:454
 MethodBDT.cxx:455
 MethodBDT.cxx:456
 MethodBDT.cxx:457
 MethodBDT.cxx:458
 MethodBDT.cxx:459
 MethodBDT.cxx:460
 MethodBDT.cxx:461
 MethodBDT.cxx:462
 MethodBDT.cxx:463
 MethodBDT.cxx:464
 MethodBDT.cxx:465
 MethodBDT.cxx:466
 MethodBDT.cxx:467
 MethodBDT.cxx:468
 MethodBDT.cxx:469
 MethodBDT.cxx:470
 MethodBDT.cxx:471
 MethodBDT.cxx:472
 MethodBDT.cxx:473
 MethodBDT.cxx:474
 MethodBDT.cxx:475
 MethodBDT.cxx:476
 MethodBDT.cxx:477
 MethodBDT.cxx:478
 MethodBDT.cxx:479
 MethodBDT.cxx:480
 MethodBDT.cxx:481
 MethodBDT.cxx:482
 MethodBDT.cxx:483
 MethodBDT.cxx:484
 MethodBDT.cxx:485
 MethodBDT.cxx:486
 MethodBDT.cxx:487
 MethodBDT.cxx:488
 MethodBDT.cxx:489
 MethodBDT.cxx:490
 MethodBDT.cxx:491
 MethodBDT.cxx:492
 MethodBDT.cxx:493
 MethodBDT.cxx:494
 MethodBDT.cxx:495
 MethodBDT.cxx:496
 MethodBDT.cxx:497
 MethodBDT.cxx:498
 MethodBDT.cxx:499
 MethodBDT.cxx:500
 MethodBDT.cxx:501
 MethodBDT.cxx:502
 MethodBDT.cxx:503
 MethodBDT.cxx:504
 MethodBDT.cxx:505
 MethodBDT.cxx:506
 MethodBDT.cxx:507
 MethodBDT.cxx:508
 MethodBDT.cxx:509
 MethodBDT.cxx:510
 MethodBDT.cxx:511
 MethodBDT.cxx:512
 MethodBDT.cxx:513
 MethodBDT.cxx:514
 MethodBDT.cxx:515
 MethodBDT.cxx:516
 MethodBDT.cxx:517
 MethodBDT.cxx:518
 MethodBDT.cxx:519
 MethodBDT.cxx:520
 MethodBDT.cxx:521
 MethodBDT.cxx:522
 MethodBDT.cxx:523
 MethodBDT.cxx:524
 MethodBDT.cxx:525
 MethodBDT.cxx:526
 MethodBDT.cxx:527
 MethodBDT.cxx:528
 MethodBDT.cxx:529
 MethodBDT.cxx:530
 MethodBDT.cxx:531
 MethodBDT.cxx:532
 MethodBDT.cxx:533
 MethodBDT.cxx:534
 MethodBDT.cxx:535
 MethodBDT.cxx:536
 MethodBDT.cxx:537
 MethodBDT.cxx:538
 MethodBDT.cxx:539
 MethodBDT.cxx:540
 MethodBDT.cxx:541
 MethodBDT.cxx:542
 MethodBDT.cxx:543
 MethodBDT.cxx:544
 MethodBDT.cxx:545
 MethodBDT.cxx:546
 MethodBDT.cxx:547
 MethodBDT.cxx:548
 MethodBDT.cxx:549
 MethodBDT.cxx:550
 MethodBDT.cxx:551
 MethodBDT.cxx:552
 MethodBDT.cxx:553
 MethodBDT.cxx:554
 MethodBDT.cxx:555
 MethodBDT.cxx:556
 MethodBDT.cxx:557
 MethodBDT.cxx:558
 MethodBDT.cxx:559
 MethodBDT.cxx:560
 MethodBDT.cxx:561
 MethodBDT.cxx:562
 MethodBDT.cxx:563
 MethodBDT.cxx:564
 MethodBDT.cxx:565
 MethodBDT.cxx:566
 MethodBDT.cxx:567
 MethodBDT.cxx:568
 MethodBDT.cxx:569
 MethodBDT.cxx:570
 MethodBDT.cxx:571
 MethodBDT.cxx:572
 MethodBDT.cxx:573
 MethodBDT.cxx:574
 MethodBDT.cxx:575
 MethodBDT.cxx:576
 MethodBDT.cxx:577
 MethodBDT.cxx:578
 MethodBDT.cxx:579
 MethodBDT.cxx:580
 MethodBDT.cxx:581
 MethodBDT.cxx:582
 MethodBDT.cxx:583
 MethodBDT.cxx:584
 MethodBDT.cxx:585
 MethodBDT.cxx:586
 MethodBDT.cxx:587
 MethodBDT.cxx:588
 MethodBDT.cxx:589
 MethodBDT.cxx:590
 MethodBDT.cxx:591
 MethodBDT.cxx:592
 MethodBDT.cxx:593
 MethodBDT.cxx:594
 MethodBDT.cxx:595
 MethodBDT.cxx:596
 MethodBDT.cxx:597
 MethodBDT.cxx:598
 MethodBDT.cxx:599
 MethodBDT.cxx:600
 MethodBDT.cxx:601
 MethodBDT.cxx:602
 MethodBDT.cxx:603
 MethodBDT.cxx:604
 MethodBDT.cxx:605
 MethodBDT.cxx:606
 MethodBDT.cxx:607
 MethodBDT.cxx:608
 MethodBDT.cxx:609
 MethodBDT.cxx:610
 MethodBDT.cxx:611
 MethodBDT.cxx:612
 MethodBDT.cxx:613
 MethodBDT.cxx:614
 MethodBDT.cxx:615
 MethodBDT.cxx:616
 MethodBDT.cxx:617
 MethodBDT.cxx:618
 MethodBDT.cxx:619
 MethodBDT.cxx:620
 MethodBDT.cxx:621
 MethodBDT.cxx:622
 MethodBDT.cxx:623
 MethodBDT.cxx:624
 MethodBDT.cxx:625
 MethodBDT.cxx:626
 MethodBDT.cxx:627
 MethodBDT.cxx:628
 MethodBDT.cxx:629
 MethodBDT.cxx:630
 MethodBDT.cxx:631
 MethodBDT.cxx:632
 MethodBDT.cxx:633
 MethodBDT.cxx:634
 MethodBDT.cxx:635
 MethodBDT.cxx:636
 MethodBDT.cxx:637
 MethodBDT.cxx:638
 MethodBDT.cxx:639
 MethodBDT.cxx:640
 MethodBDT.cxx:641
 MethodBDT.cxx:642
 MethodBDT.cxx:643
 MethodBDT.cxx:644
 MethodBDT.cxx:645
 MethodBDT.cxx:646
 MethodBDT.cxx:647
 MethodBDT.cxx:648
 MethodBDT.cxx:649
 MethodBDT.cxx:650
 MethodBDT.cxx:651
 MethodBDT.cxx:652
 MethodBDT.cxx:653
 MethodBDT.cxx:654
 MethodBDT.cxx:655
 MethodBDT.cxx:656
 MethodBDT.cxx:657
 MethodBDT.cxx:658
 MethodBDT.cxx:659
 MethodBDT.cxx:660
 MethodBDT.cxx:661
 MethodBDT.cxx:662
 MethodBDT.cxx:663
 MethodBDT.cxx:664
 MethodBDT.cxx:665
 MethodBDT.cxx:666
 MethodBDT.cxx:667
 MethodBDT.cxx:668
 MethodBDT.cxx:669
 MethodBDT.cxx:670
 MethodBDT.cxx:671
 MethodBDT.cxx:672
 MethodBDT.cxx:673
 MethodBDT.cxx:674
 MethodBDT.cxx:675
 MethodBDT.cxx:676
 MethodBDT.cxx:677
 MethodBDT.cxx:678
 MethodBDT.cxx:679
 MethodBDT.cxx:680
 MethodBDT.cxx:681
 MethodBDT.cxx:682
 MethodBDT.cxx:683
 MethodBDT.cxx:684
 MethodBDT.cxx:685
 MethodBDT.cxx:686
 MethodBDT.cxx:687
 MethodBDT.cxx:688
 MethodBDT.cxx:689
 MethodBDT.cxx:690
 MethodBDT.cxx:691
 MethodBDT.cxx:692
 MethodBDT.cxx:693
 MethodBDT.cxx:694
 MethodBDT.cxx:695
 MethodBDT.cxx:696
 MethodBDT.cxx:697
 MethodBDT.cxx:698
 MethodBDT.cxx:699
 MethodBDT.cxx:700
 MethodBDT.cxx:701
 MethodBDT.cxx:702
 MethodBDT.cxx:703
 MethodBDT.cxx:704
 MethodBDT.cxx:705
 MethodBDT.cxx:706
 MethodBDT.cxx:707
 MethodBDT.cxx:708
 MethodBDT.cxx:709
 MethodBDT.cxx:710
 MethodBDT.cxx:711
 MethodBDT.cxx:712
 MethodBDT.cxx:713
 MethodBDT.cxx:714
 MethodBDT.cxx:715
 MethodBDT.cxx:716
 MethodBDT.cxx:717
 MethodBDT.cxx:718
 MethodBDT.cxx:719
 MethodBDT.cxx:720
 MethodBDT.cxx:721
 MethodBDT.cxx:722
 MethodBDT.cxx:723
 MethodBDT.cxx:724
 MethodBDT.cxx:725
 MethodBDT.cxx:726
 MethodBDT.cxx:727
 MethodBDT.cxx:728
 MethodBDT.cxx:729
 MethodBDT.cxx:730
 MethodBDT.cxx:731
 MethodBDT.cxx:732
 MethodBDT.cxx:733
 MethodBDT.cxx:734
 MethodBDT.cxx:735
 MethodBDT.cxx:736
 MethodBDT.cxx:737
 MethodBDT.cxx:738
 MethodBDT.cxx:739
 MethodBDT.cxx:740
 MethodBDT.cxx:741
 MethodBDT.cxx:742
 MethodBDT.cxx:743
 MethodBDT.cxx:744
 MethodBDT.cxx:745
 MethodBDT.cxx:746
 MethodBDT.cxx:747
 MethodBDT.cxx:748
 MethodBDT.cxx:749
 MethodBDT.cxx:750
 MethodBDT.cxx:751
 MethodBDT.cxx:752
 MethodBDT.cxx:753
 MethodBDT.cxx:754
 MethodBDT.cxx:755
 MethodBDT.cxx:756
 MethodBDT.cxx:757
 MethodBDT.cxx:758
 MethodBDT.cxx:759
 MethodBDT.cxx:760
 MethodBDT.cxx:761
 MethodBDT.cxx:762
 MethodBDT.cxx:763
 MethodBDT.cxx:764
 MethodBDT.cxx:765
 MethodBDT.cxx:766
 MethodBDT.cxx:767
 MethodBDT.cxx:768
 MethodBDT.cxx:769
 MethodBDT.cxx:770
 MethodBDT.cxx:771
 MethodBDT.cxx:772
 MethodBDT.cxx:773
 MethodBDT.cxx:774
 MethodBDT.cxx:775
 MethodBDT.cxx:776
 MethodBDT.cxx:777
 MethodBDT.cxx:778
 MethodBDT.cxx:779
 MethodBDT.cxx:780
 MethodBDT.cxx:781
 MethodBDT.cxx:782
 MethodBDT.cxx:783
 MethodBDT.cxx:784
 MethodBDT.cxx:785
 MethodBDT.cxx:786
 MethodBDT.cxx:787
 MethodBDT.cxx:788
 MethodBDT.cxx:789
 MethodBDT.cxx:790
 MethodBDT.cxx:791
 MethodBDT.cxx:792
 MethodBDT.cxx:793
 MethodBDT.cxx:794
 MethodBDT.cxx:795
 MethodBDT.cxx:796
 MethodBDT.cxx:797
 MethodBDT.cxx:798
 MethodBDT.cxx:799
 MethodBDT.cxx:800
 MethodBDT.cxx:801
 MethodBDT.cxx:802
 MethodBDT.cxx:803
 MethodBDT.cxx:804
 MethodBDT.cxx:805
 MethodBDT.cxx:806
 MethodBDT.cxx:807
 MethodBDT.cxx:808
 MethodBDT.cxx:809
 MethodBDT.cxx:810
 MethodBDT.cxx:811
 MethodBDT.cxx:812
 MethodBDT.cxx:813
 MethodBDT.cxx:814
 MethodBDT.cxx:815
 MethodBDT.cxx:816
 MethodBDT.cxx:817
 MethodBDT.cxx:818
 MethodBDT.cxx:819
 MethodBDT.cxx:820
 MethodBDT.cxx:821
 MethodBDT.cxx:822
 MethodBDT.cxx:823
 MethodBDT.cxx:824
 MethodBDT.cxx:825
 MethodBDT.cxx:826
 MethodBDT.cxx:827
 MethodBDT.cxx:828
 MethodBDT.cxx:829
 MethodBDT.cxx:830
 MethodBDT.cxx:831
 MethodBDT.cxx:832
 MethodBDT.cxx:833
 MethodBDT.cxx:834
 MethodBDT.cxx:835
 MethodBDT.cxx:836
 MethodBDT.cxx:837
 MethodBDT.cxx:838
 MethodBDT.cxx:839
 MethodBDT.cxx:840
 MethodBDT.cxx:841
 MethodBDT.cxx:842
 MethodBDT.cxx:843
 MethodBDT.cxx:844
 MethodBDT.cxx:845
 MethodBDT.cxx:846
 MethodBDT.cxx:847
 MethodBDT.cxx:848
 MethodBDT.cxx:849
 MethodBDT.cxx:850
 MethodBDT.cxx:851
 MethodBDT.cxx:852
 MethodBDT.cxx:853
 MethodBDT.cxx:854
 MethodBDT.cxx:855
 MethodBDT.cxx:856
 MethodBDT.cxx:857
 MethodBDT.cxx:858
 MethodBDT.cxx:859
 MethodBDT.cxx:860
 MethodBDT.cxx:861
 MethodBDT.cxx:862
 MethodBDT.cxx:863
 MethodBDT.cxx:864
 MethodBDT.cxx:865
 MethodBDT.cxx:866
 MethodBDT.cxx:867
 MethodBDT.cxx:868
 MethodBDT.cxx:869
 MethodBDT.cxx:870
 MethodBDT.cxx:871
 MethodBDT.cxx:872
 MethodBDT.cxx:873
 MethodBDT.cxx:874
 MethodBDT.cxx:875
 MethodBDT.cxx:876
 MethodBDT.cxx:877
 MethodBDT.cxx:878
 MethodBDT.cxx:879
 MethodBDT.cxx:880
 MethodBDT.cxx:881
 MethodBDT.cxx:882
 MethodBDT.cxx:883
 MethodBDT.cxx:884
 MethodBDT.cxx:885
 MethodBDT.cxx:886
 MethodBDT.cxx:887
 MethodBDT.cxx:888
 MethodBDT.cxx:889
 MethodBDT.cxx:890
 MethodBDT.cxx:891
 MethodBDT.cxx:892
 MethodBDT.cxx:893
 MethodBDT.cxx:894
 MethodBDT.cxx:895
 MethodBDT.cxx:896
 MethodBDT.cxx:897
 MethodBDT.cxx:898
 MethodBDT.cxx:899
 MethodBDT.cxx:900
 MethodBDT.cxx:901
 MethodBDT.cxx:902
 MethodBDT.cxx:903
 MethodBDT.cxx:904
 MethodBDT.cxx:905
 MethodBDT.cxx:906
 MethodBDT.cxx:907
 MethodBDT.cxx:908
 MethodBDT.cxx:909
 MethodBDT.cxx:910
 MethodBDT.cxx:911
 MethodBDT.cxx:912
 MethodBDT.cxx:913
 MethodBDT.cxx:914
 MethodBDT.cxx:915
 MethodBDT.cxx:916
 MethodBDT.cxx:917
 MethodBDT.cxx:918
 MethodBDT.cxx:919
 MethodBDT.cxx:920
 MethodBDT.cxx:921
 MethodBDT.cxx:922
 MethodBDT.cxx:923
 MethodBDT.cxx:924
 MethodBDT.cxx:925
 MethodBDT.cxx:926
 MethodBDT.cxx:927
 MethodBDT.cxx:928
 MethodBDT.cxx:929
 MethodBDT.cxx:930
 MethodBDT.cxx:931
 MethodBDT.cxx:932
 MethodBDT.cxx:933
 MethodBDT.cxx:934
 MethodBDT.cxx:935
 MethodBDT.cxx:936
 MethodBDT.cxx:937
 MethodBDT.cxx:938
 MethodBDT.cxx:939
 MethodBDT.cxx:940
 MethodBDT.cxx:941
 MethodBDT.cxx:942
 MethodBDT.cxx:943
 MethodBDT.cxx:944
 MethodBDT.cxx:945
 MethodBDT.cxx:946
 MethodBDT.cxx:947
 MethodBDT.cxx:948
 MethodBDT.cxx:949
 MethodBDT.cxx:950
 MethodBDT.cxx:951
 MethodBDT.cxx:952
 MethodBDT.cxx:953
 MethodBDT.cxx:954
 MethodBDT.cxx:955
 MethodBDT.cxx:956
 MethodBDT.cxx:957
 MethodBDT.cxx:958
 MethodBDT.cxx:959
 MethodBDT.cxx:960
 MethodBDT.cxx:961
 MethodBDT.cxx:962
 MethodBDT.cxx:963
 MethodBDT.cxx:964
 MethodBDT.cxx:965
 MethodBDT.cxx:966
 MethodBDT.cxx:967
 MethodBDT.cxx:968
 MethodBDT.cxx:969
 MethodBDT.cxx:970
 MethodBDT.cxx:971
 MethodBDT.cxx:972
 MethodBDT.cxx:973
 MethodBDT.cxx:974
 MethodBDT.cxx:975
 MethodBDT.cxx:976
 MethodBDT.cxx:977
 MethodBDT.cxx:978
 MethodBDT.cxx:979
 MethodBDT.cxx:980
 MethodBDT.cxx:981
 MethodBDT.cxx:982
 MethodBDT.cxx:983
 MethodBDT.cxx:984
 MethodBDT.cxx:985
 MethodBDT.cxx:986
 MethodBDT.cxx:987
 MethodBDT.cxx:988
 MethodBDT.cxx:989
 MethodBDT.cxx:990
 MethodBDT.cxx:991
 MethodBDT.cxx:992
 MethodBDT.cxx:993
 MethodBDT.cxx:994
 MethodBDT.cxx:995
 MethodBDT.cxx:996
 MethodBDT.cxx:997
 MethodBDT.cxx:998
 MethodBDT.cxx:999
 MethodBDT.cxx:1000
 MethodBDT.cxx:1001
 MethodBDT.cxx:1002
 MethodBDT.cxx:1003
 MethodBDT.cxx:1004
 MethodBDT.cxx:1005
 MethodBDT.cxx:1006
 MethodBDT.cxx:1007
 MethodBDT.cxx:1008
 MethodBDT.cxx:1009
 MethodBDT.cxx:1010
 MethodBDT.cxx:1011
 MethodBDT.cxx:1012
 MethodBDT.cxx:1013
 MethodBDT.cxx:1014
 MethodBDT.cxx:1015
 MethodBDT.cxx:1016
 MethodBDT.cxx:1017
 MethodBDT.cxx:1018
 MethodBDT.cxx:1019
 MethodBDT.cxx:1020
 MethodBDT.cxx:1021
 MethodBDT.cxx:1022
 MethodBDT.cxx:1023
 MethodBDT.cxx:1024
 MethodBDT.cxx:1025
 MethodBDT.cxx:1026
 MethodBDT.cxx:1027
 MethodBDT.cxx:1028
 MethodBDT.cxx:1029
 MethodBDT.cxx:1030
 MethodBDT.cxx:1031
 MethodBDT.cxx:1032
 MethodBDT.cxx:1033
 MethodBDT.cxx:1034
 MethodBDT.cxx:1035
 MethodBDT.cxx:1036
 MethodBDT.cxx:1037
 MethodBDT.cxx:1038
 MethodBDT.cxx:1039
 MethodBDT.cxx:1040
 MethodBDT.cxx:1041
 MethodBDT.cxx:1042
 MethodBDT.cxx:1043
 MethodBDT.cxx:1044
 MethodBDT.cxx:1045
 MethodBDT.cxx:1046
 MethodBDT.cxx:1047
 MethodBDT.cxx:1048
 MethodBDT.cxx:1049
 MethodBDT.cxx:1050
 MethodBDT.cxx:1051
 MethodBDT.cxx:1052
 MethodBDT.cxx:1053
 MethodBDT.cxx:1054
 MethodBDT.cxx:1055
 MethodBDT.cxx:1056
 MethodBDT.cxx:1057
 MethodBDT.cxx:1058
 MethodBDT.cxx:1059
 MethodBDT.cxx:1060
 MethodBDT.cxx:1061
 MethodBDT.cxx:1062
 MethodBDT.cxx:1063
 MethodBDT.cxx:1064
 MethodBDT.cxx:1065
 MethodBDT.cxx:1066
 MethodBDT.cxx:1067
 MethodBDT.cxx:1068
 MethodBDT.cxx:1069
 MethodBDT.cxx:1070
 MethodBDT.cxx:1071
 MethodBDT.cxx:1072
 MethodBDT.cxx:1073
 MethodBDT.cxx:1074
 MethodBDT.cxx:1075
 MethodBDT.cxx:1076
 MethodBDT.cxx:1077
 MethodBDT.cxx:1078
 MethodBDT.cxx:1079
 MethodBDT.cxx:1080
 MethodBDT.cxx:1081
 MethodBDT.cxx:1082
 MethodBDT.cxx:1083
 MethodBDT.cxx:1084
 MethodBDT.cxx:1085
 MethodBDT.cxx:1086
 MethodBDT.cxx:1087
 MethodBDT.cxx:1088
 MethodBDT.cxx:1089
 MethodBDT.cxx:1090
 MethodBDT.cxx:1091
 MethodBDT.cxx:1092
 MethodBDT.cxx:1093
 MethodBDT.cxx:1094
 MethodBDT.cxx:1095
 MethodBDT.cxx:1096
 MethodBDT.cxx:1097
 MethodBDT.cxx:1098
 MethodBDT.cxx:1099
 MethodBDT.cxx:1100
 MethodBDT.cxx:1101
 MethodBDT.cxx:1102
 MethodBDT.cxx:1103
 MethodBDT.cxx:1104
 MethodBDT.cxx:1105
 MethodBDT.cxx:1106
 MethodBDT.cxx:1107
 MethodBDT.cxx:1108
 MethodBDT.cxx:1109
 MethodBDT.cxx:1110
 MethodBDT.cxx:1111
 MethodBDT.cxx:1112
 MethodBDT.cxx:1113
 MethodBDT.cxx:1114
 MethodBDT.cxx:1115
 MethodBDT.cxx:1116
 MethodBDT.cxx:1117
 MethodBDT.cxx:1118
 MethodBDT.cxx:1119
 MethodBDT.cxx:1120
 MethodBDT.cxx:1121
 MethodBDT.cxx:1122
 MethodBDT.cxx:1123
 MethodBDT.cxx:1124
 MethodBDT.cxx:1125
 MethodBDT.cxx:1126
 MethodBDT.cxx:1127
 MethodBDT.cxx:1128
 MethodBDT.cxx:1129
 MethodBDT.cxx:1130
 MethodBDT.cxx:1131
 MethodBDT.cxx:1132
 MethodBDT.cxx:1133
 MethodBDT.cxx:1134
 MethodBDT.cxx:1135
 MethodBDT.cxx:1136
 MethodBDT.cxx:1137
 MethodBDT.cxx:1138
 MethodBDT.cxx:1139
 MethodBDT.cxx:1140
 MethodBDT.cxx:1141
 MethodBDT.cxx:1142
 MethodBDT.cxx:1143
 MethodBDT.cxx:1144
 MethodBDT.cxx:1145
 MethodBDT.cxx:1146
 MethodBDT.cxx:1147
 MethodBDT.cxx:1148
 MethodBDT.cxx:1149
 MethodBDT.cxx:1150
 MethodBDT.cxx:1151
 MethodBDT.cxx:1152
 MethodBDT.cxx:1153
 MethodBDT.cxx:1154
 MethodBDT.cxx:1155
 MethodBDT.cxx:1156
 MethodBDT.cxx:1157
 MethodBDT.cxx:1158
 MethodBDT.cxx:1159
 MethodBDT.cxx:1160
 MethodBDT.cxx:1161
 MethodBDT.cxx:1162
 MethodBDT.cxx:1163
 MethodBDT.cxx:1164
 MethodBDT.cxx:1165
 MethodBDT.cxx:1166
 MethodBDT.cxx:1167
 MethodBDT.cxx:1168
 MethodBDT.cxx:1169
 MethodBDT.cxx:1170
 MethodBDT.cxx:1171
 MethodBDT.cxx:1172
 MethodBDT.cxx:1173
 MethodBDT.cxx:1174
 MethodBDT.cxx:1175
 MethodBDT.cxx:1176
 MethodBDT.cxx:1177
 MethodBDT.cxx:1178
 MethodBDT.cxx:1179
 MethodBDT.cxx:1180
 MethodBDT.cxx:1181
 MethodBDT.cxx:1182
 MethodBDT.cxx:1183
 MethodBDT.cxx:1184
 MethodBDT.cxx:1185
 MethodBDT.cxx:1186
 MethodBDT.cxx:1187
 MethodBDT.cxx:1188
 MethodBDT.cxx:1189
 MethodBDT.cxx:1190
 MethodBDT.cxx:1191
 MethodBDT.cxx:1192
 MethodBDT.cxx:1193
 MethodBDT.cxx:1194
 MethodBDT.cxx:1195
 MethodBDT.cxx:1196
 MethodBDT.cxx:1197
 MethodBDT.cxx:1198
 MethodBDT.cxx:1199
 MethodBDT.cxx:1200
 MethodBDT.cxx:1201
 MethodBDT.cxx:1202
 MethodBDT.cxx:1203
 MethodBDT.cxx:1204
 MethodBDT.cxx:1205
 MethodBDT.cxx:1206
 MethodBDT.cxx:1207
 MethodBDT.cxx:1208
 MethodBDT.cxx:1209
 MethodBDT.cxx:1210
 MethodBDT.cxx:1211
 MethodBDT.cxx:1212
 MethodBDT.cxx:1213
 MethodBDT.cxx:1214
 MethodBDT.cxx:1215
 MethodBDT.cxx:1216
 MethodBDT.cxx:1217
 MethodBDT.cxx:1218
 MethodBDT.cxx:1219
 MethodBDT.cxx:1220
 MethodBDT.cxx:1221
 MethodBDT.cxx:1222
 MethodBDT.cxx:1223
 MethodBDT.cxx:1224
 MethodBDT.cxx:1225
 MethodBDT.cxx:1226
 MethodBDT.cxx:1227
 MethodBDT.cxx:1228
 MethodBDT.cxx:1229
 MethodBDT.cxx:1230
 MethodBDT.cxx:1231
 MethodBDT.cxx:1232
 MethodBDT.cxx:1233
 MethodBDT.cxx:1234
 MethodBDT.cxx:1235
 MethodBDT.cxx:1236
 MethodBDT.cxx:1237
 MethodBDT.cxx:1238
 MethodBDT.cxx:1239
 MethodBDT.cxx:1240
 MethodBDT.cxx:1241
 MethodBDT.cxx:1242
 MethodBDT.cxx:1243
 MethodBDT.cxx:1244
 MethodBDT.cxx:1245
 MethodBDT.cxx:1246
 MethodBDT.cxx:1247
 MethodBDT.cxx:1248
 MethodBDT.cxx:1249
 MethodBDT.cxx:1250
 MethodBDT.cxx:1251
 MethodBDT.cxx:1252
 MethodBDT.cxx:1253
 MethodBDT.cxx:1254
 MethodBDT.cxx:1255
 MethodBDT.cxx:1256
 MethodBDT.cxx:1257
 MethodBDT.cxx:1258
 MethodBDT.cxx:1259
 MethodBDT.cxx:1260
 MethodBDT.cxx:1261
 MethodBDT.cxx:1262
 MethodBDT.cxx:1263
 MethodBDT.cxx:1264
 MethodBDT.cxx:1265
 MethodBDT.cxx:1266
 MethodBDT.cxx:1267
 MethodBDT.cxx:1268
 MethodBDT.cxx:1269
 MethodBDT.cxx:1270
 MethodBDT.cxx:1271
 MethodBDT.cxx:1272
 MethodBDT.cxx:1273
 MethodBDT.cxx:1274
 MethodBDT.cxx:1275
 MethodBDT.cxx:1276
 MethodBDT.cxx:1277
 MethodBDT.cxx:1278
 MethodBDT.cxx:1279
 MethodBDT.cxx:1280
 MethodBDT.cxx:1281
 MethodBDT.cxx:1282
 MethodBDT.cxx:1283
 MethodBDT.cxx:1284
 MethodBDT.cxx:1285
 MethodBDT.cxx:1286
 MethodBDT.cxx:1287
 MethodBDT.cxx:1288
 MethodBDT.cxx:1289
 MethodBDT.cxx:1290
 MethodBDT.cxx:1291
 MethodBDT.cxx:1292
 MethodBDT.cxx:1293
 MethodBDT.cxx:1294
 MethodBDT.cxx:1295
 MethodBDT.cxx:1296
 MethodBDT.cxx:1297
 MethodBDT.cxx:1298
 MethodBDT.cxx:1299
 MethodBDT.cxx:1300
 MethodBDT.cxx:1301
 MethodBDT.cxx:1302
 MethodBDT.cxx:1303
 MethodBDT.cxx:1304
 MethodBDT.cxx:1305
 MethodBDT.cxx:1306
 MethodBDT.cxx:1307
 MethodBDT.cxx:1308
 MethodBDT.cxx:1309
 MethodBDT.cxx:1310
 MethodBDT.cxx:1311
 MethodBDT.cxx:1312
 MethodBDT.cxx:1313
 MethodBDT.cxx:1314
 MethodBDT.cxx:1315
 MethodBDT.cxx:1316
 MethodBDT.cxx:1317
 MethodBDT.cxx:1318
 MethodBDT.cxx:1319
 MethodBDT.cxx:1320
 MethodBDT.cxx:1321
 MethodBDT.cxx:1322
 MethodBDT.cxx:1323
 MethodBDT.cxx:1324
 MethodBDT.cxx:1325
 MethodBDT.cxx:1326
 MethodBDT.cxx:1327
 MethodBDT.cxx:1328
 MethodBDT.cxx:1329
 MethodBDT.cxx:1330
 MethodBDT.cxx:1331
 MethodBDT.cxx:1332
 MethodBDT.cxx:1333
 MethodBDT.cxx:1334
 MethodBDT.cxx:1335
 MethodBDT.cxx:1336
 MethodBDT.cxx:1337
 MethodBDT.cxx:1338
 MethodBDT.cxx:1339
 MethodBDT.cxx:1340
 MethodBDT.cxx:1341
 MethodBDT.cxx:1342
 MethodBDT.cxx:1343
 MethodBDT.cxx:1344
 MethodBDT.cxx:1345
 MethodBDT.cxx:1346
 MethodBDT.cxx:1347
 MethodBDT.cxx:1348
 MethodBDT.cxx:1349
 MethodBDT.cxx:1350
 MethodBDT.cxx:1351
 MethodBDT.cxx:1352
 MethodBDT.cxx:1353
 MethodBDT.cxx:1354
 MethodBDT.cxx:1355
 MethodBDT.cxx:1356
 MethodBDT.cxx:1357
 MethodBDT.cxx:1358
 MethodBDT.cxx:1359
 MethodBDT.cxx:1360
 MethodBDT.cxx:1361
 MethodBDT.cxx:1362
 MethodBDT.cxx:1363
 MethodBDT.cxx:1364
 MethodBDT.cxx:1365
 MethodBDT.cxx:1366
 MethodBDT.cxx:1367
 MethodBDT.cxx:1368
 MethodBDT.cxx:1369
 MethodBDT.cxx:1370
 MethodBDT.cxx:1371
 MethodBDT.cxx:1372
 MethodBDT.cxx:1373
 MethodBDT.cxx:1374
 MethodBDT.cxx:1375
 MethodBDT.cxx:1376
 MethodBDT.cxx:1377
 MethodBDT.cxx:1378
 MethodBDT.cxx:1379
 MethodBDT.cxx:1380
 MethodBDT.cxx:1381
 MethodBDT.cxx:1382
 MethodBDT.cxx:1383
 MethodBDT.cxx:1384
 MethodBDT.cxx:1385
 MethodBDT.cxx:1386
 MethodBDT.cxx:1387
 MethodBDT.cxx:1388
 MethodBDT.cxx:1389
 MethodBDT.cxx:1390
 MethodBDT.cxx:1391
 MethodBDT.cxx:1392
 MethodBDT.cxx:1393
 MethodBDT.cxx:1394
 MethodBDT.cxx:1395
 MethodBDT.cxx:1396
 MethodBDT.cxx:1397
 MethodBDT.cxx:1398
 MethodBDT.cxx:1399
 MethodBDT.cxx:1400
 MethodBDT.cxx:1401
 MethodBDT.cxx:1402
 MethodBDT.cxx:1403
 MethodBDT.cxx:1404
 MethodBDT.cxx:1405
 MethodBDT.cxx:1406
 MethodBDT.cxx:1407
 MethodBDT.cxx:1408
 MethodBDT.cxx:1409
 MethodBDT.cxx:1410
 MethodBDT.cxx:1411
 MethodBDT.cxx:1412
 MethodBDT.cxx:1413
 MethodBDT.cxx:1414
 MethodBDT.cxx:1415
 MethodBDT.cxx:1416
 MethodBDT.cxx:1417
 MethodBDT.cxx:1418
 MethodBDT.cxx:1419
 MethodBDT.cxx:1420
 MethodBDT.cxx:1421
 MethodBDT.cxx:1422
 MethodBDT.cxx:1423
 MethodBDT.cxx:1424
 MethodBDT.cxx:1425
 MethodBDT.cxx:1426
 MethodBDT.cxx:1427
 MethodBDT.cxx:1428
 MethodBDT.cxx:1429
 MethodBDT.cxx:1430
 MethodBDT.cxx:1431
 MethodBDT.cxx:1432
 MethodBDT.cxx:1433
 MethodBDT.cxx:1434
 MethodBDT.cxx:1435
 MethodBDT.cxx:1436
 MethodBDT.cxx:1437
 MethodBDT.cxx:1438
 MethodBDT.cxx:1439
 MethodBDT.cxx:1440
 MethodBDT.cxx:1441
 MethodBDT.cxx:1442
 MethodBDT.cxx:1443
 MethodBDT.cxx:1444
 MethodBDT.cxx:1445
 MethodBDT.cxx:1446
 MethodBDT.cxx:1447
 MethodBDT.cxx:1448
 MethodBDT.cxx:1449
 MethodBDT.cxx:1450
 MethodBDT.cxx:1451
 MethodBDT.cxx:1452
 MethodBDT.cxx:1453
 MethodBDT.cxx:1454
 MethodBDT.cxx:1455
 MethodBDT.cxx:1456
 MethodBDT.cxx:1457
 MethodBDT.cxx:1458
 MethodBDT.cxx:1459
 MethodBDT.cxx:1460
 MethodBDT.cxx:1461
 MethodBDT.cxx:1462
 MethodBDT.cxx:1463
 MethodBDT.cxx:1464
 MethodBDT.cxx:1465
 MethodBDT.cxx:1466
 MethodBDT.cxx:1467
 MethodBDT.cxx:1468
 MethodBDT.cxx:1469
 MethodBDT.cxx:1470
 MethodBDT.cxx:1471
 MethodBDT.cxx:1472
 MethodBDT.cxx:1473
 MethodBDT.cxx:1474
 MethodBDT.cxx:1475
 MethodBDT.cxx:1476
 MethodBDT.cxx:1477
 MethodBDT.cxx:1478
 MethodBDT.cxx:1479
 MethodBDT.cxx:1480
 MethodBDT.cxx:1481
 MethodBDT.cxx:1482
 MethodBDT.cxx:1483
 MethodBDT.cxx:1484
 MethodBDT.cxx:1485
 MethodBDT.cxx:1486
 MethodBDT.cxx:1487
 MethodBDT.cxx:1488
 MethodBDT.cxx:1489
 MethodBDT.cxx:1490
 MethodBDT.cxx:1491
 MethodBDT.cxx:1492
 MethodBDT.cxx:1493
 MethodBDT.cxx:1494
 MethodBDT.cxx:1495
 MethodBDT.cxx:1496
 MethodBDT.cxx:1497
 MethodBDT.cxx:1498
 MethodBDT.cxx:1499
 MethodBDT.cxx:1500
 MethodBDT.cxx:1501
 MethodBDT.cxx:1502
 MethodBDT.cxx:1503
 MethodBDT.cxx:1504
 MethodBDT.cxx:1505
 MethodBDT.cxx:1506
 MethodBDT.cxx:1507
 MethodBDT.cxx:1508
 MethodBDT.cxx:1509
 MethodBDT.cxx:1510
 MethodBDT.cxx:1511
 MethodBDT.cxx:1512
 MethodBDT.cxx:1513
 MethodBDT.cxx:1514
 MethodBDT.cxx:1515
 MethodBDT.cxx:1516
 MethodBDT.cxx:1517
 MethodBDT.cxx:1518
 MethodBDT.cxx:1519
 MethodBDT.cxx:1520
 MethodBDT.cxx:1521
 MethodBDT.cxx:1522
 MethodBDT.cxx:1523
 MethodBDT.cxx:1524
 MethodBDT.cxx:1525
 MethodBDT.cxx:1526
 MethodBDT.cxx:1527
 MethodBDT.cxx:1528
 MethodBDT.cxx:1529
 MethodBDT.cxx:1530
 MethodBDT.cxx:1531
 MethodBDT.cxx:1532
 MethodBDT.cxx:1533
 MethodBDT.cxx:1534
 MethodBDT.cxx:1535
 MethodBDT.cxx:1536
 MethodBDT.cxx:1537
 MethodBDT.cxx:1538
 MethodBDT.cxx:1539
 MethodBDT.cxx:1540
 MethodBDT.cxx:1541
 MethodBDT.cxx:1542
 MethodBDT.cxx:1543
 MethodBDT.cxx:1544
 MethodBDT.cxx:1545
 MethodBDT.cxx:1546
 MethodBDT.cxx:1547
 MethodBDT.cxx:1548
 MethodBDT.cxx:1549
 MethodBDT.cxx:1550
 MethodBDT.cxx:1551
 MethodBDT.cxx:1552
 MethodBDT.cxx:1553