Logo ROOT   6.07/09
Reference Guide
MethodLD.cxx
Go to the documentation of this file.
1 // @(#)root/tmva $Id$
2 // Author: Krzysztof Danielowski, Kamil Kraszewski, Maciej Kruk, Jan Therhaag
3 
4 /**********************************************************************************
5  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6  * Package: TMVA *
7  * Class : MethodLD *
8  * Web : http://tmva.sourceforge.net *
9  * *
10  * Description: *
11  * Linear Discriminant - Simple Linear Regression and Classification *
12  * *
13  * Authors (alphabetical): *
14  * Krzysztof Danielowski <danielow@cern.ch> - IFJ PAN & AGH, Poland *
15  * Kamil Kraszewski <kalq@cern.ch> - IFJ PAN & UJ, Poland *
16  * Maciej Kruk <mkruk@cern.ch> - IFJ PAN & AGH, Poland *
17  * Jan Therhaag <therhaag@physik.uni-bonn.de> - Uni Bonn, Germany *
18  * *
19  * Copyright (c) 2005-2011: *
20  * CERN, Switzerland *
21  * PAN, Poland *
22  * U. of Bonn, Germany *
23  * *
24  * Redistribution and use in source and binary forms, with or without *
25  * modification, are permitted according to the terms listed in LICENSE *
26  * (http://tmva.sourceforge.net/LICENSE) *
27  * *
28  **********************************************************************************/
29 
30 #include "TMVA/MethodLD.h"
31 
32 #include "TMVA/ClassifierFactory.h"
33 #include "TMVA/Configurable.h"
34 #include "TMVA/DataSet.h"
35 #include "TMVA/DataSetInfo.h"
36 #include "TMVA/IMethod.h"
37 #include "TMVA/MethodBase.h"
38 #include "TMVA/MsgLogger.h"
39 #include "TMVA/PDF.h"
40 #include "TMVA/Ranking.h"
41 #include "TMVA/Tools.h"
43 #include "TMVA/Types.h"
45 
46 #include "Riostream.h"
47 #include "TMath.h"
48 #include "TMatrix.h"
49 #include "TMatrixD.h"
50 #include "TList.h"
51 
52 #include <iomanip>
53 
54 using std::vector;
55 
57 
58 ClassImp(TMVA::MethodLD)
59 
60 ////////////////////////////////////////////////////////////////////////////////
61 /// standard constructor for the LD
62 
63  TMVA::MethodLD::MethodLD( const TString& jobName,
64  const TString& methodTitle,
65  DataSetInfo& dsi,
66  const TString& theOption ) :
67  MethodBase( jobName, Types::kLD, methodTitle, dsi, theOption),
68  fNRegOut ( 0 ),
69  fSumMatx ( 0 ),
70  fSumValMatx( 0 ),
71  fCoeffMatx ( 0 ),
72  fLDCoeff ( 0 )
73 {
74 }
75 
76 ////////////////////////////////////////////////////////////////////////////////
77 /// constructor from weight file
78 
79 TMVA::MethodLD::MethodLD( DataSetInfo& theData, const TString& theWeightFile)
80  : MethodBase( Types::kLD, theData, theWeightFile),
81  fNRegOut ( 0 ),
82  fSumMatx ( 0 ),
83  fSumValMatx( 0 ),
84  fCoeffMatx ( 0 ),
85  fLDCoeff ( 0 )
86 {
87 }
88 
89 ////////////////////////////////////////////////////////////////////////////////
90 /// default initialization called by all constructors
91 
93 {
95  else fNRegOut = 1;
96 
97  fLDCoeff = new vector< vector< Double_t >* >(fNRegOut);
98  for (Int_t iout = 0; iout<fNRegOut; iout++){
99  (*fLDCoeff)[iout] = new std::vector<Double_t>( GetNvar()+1 );
100  }
101 
102  // the minimum requirement to declare an event signal-like
103  SetSignalReferenceCut( 0.0 );
104 }
105 
106 ////////////////////////////////////////////////////////////////////////////////
107 /// destructor
108 
110 {
111  if (fSumMatx) { delete fSumMatx; fSumMatx = 0; }
112  if (fSumValMatx) { delete fSumValMatx; fSumValMatx = 0; }
113  if (fCoeffMatx) { delete fCoeffMatx; fCoeffMatx = 0; }
114  if (fLDCoeff) {
115  for (vector< vector< Double_t >* >::iterator vi=fLDCoeff->begin(); vi!=fLDCoeff->end(); vi++){
116  if (*vi) { delete *vi; *vi = 0; }
117  }
118  delete fLDCoeff; fLDCoeff = 0;
119  }
120 }
121 
122 ////////////////////////////////////////////////////////////////////////////////
123 /// LD can handle classification with 2 classes and regression with one regression-target
124 
126 {
127  if (type == Types::kClassification && numberClasses == 2) return kTRUE;
128  else if (type == Types::kRegression && numberTargets == 1) {
129  Log() << "regression with " << numberTargets << " targets.";
130  return kTRUE;
131  }
132  else return kFALSE;
133 }
134 
135 
136 ////////////////////////////////////////////////////////////////////////////////
137 /// compute fSumMatx
138 
140 {
141  GetSum();
142 
143  // compute fSumValMatx
144  GetSumVal();
145 
146  // compute fCoeffMatx and fLDCoeff
147  GetLDCoeff();
148 
149  // nice output
151 
153 }
154 
155 ////////////////////////////////////////////////////////////////////////////////
156 ///Returns the MVA classification output
157 
159 {
160  const Event* ev = GetEvent();
161 
162  if (fRegressionReturnVal == NULL) fRegressionReturnVal = new vector< Float_t >();
163  fRegressionReturnVal->resize( fNRegOut );
164 
165  for (Int_t iout = 0; iout<fNRegOut; iout++) {
166  (*fRegressionReturnVal)[iout] = (*(*fLDCoeff)[iout])[0] ;
167 
168  int icoeff=0;
169  for (std::vector<Float_t>::const_iterator it = ev->GetValues().begin();it!=ev->GetValues().end();++it){
170  (*fRegressionReturnVal)[iout] += (*(*fLDCoeff)[iout])[++icoeff] * (*it);
171  }
172  }
173 
174  // cannot determine error
175  NoErrorCalc(err, errUpper);
176 
177  return (*fRegressionReturnVal)[0];
178 }
179 
180 ////////////////////////////////////////////////////////////////////////////////
181 ///Calculates the regression output
182 
183 const std::vector< Float_t >& TMVA::MethodLD::GetRegressionValues()
184 {
185  const Event* ev = GetEvent();
186 
187  if (fRegressionReturnVal == NULL) fRegressionReturnVal = new vector< Float_t >();
188  fRegressionReturnVal->resize( fNRegOut );
189 
190  for (Int_t iout = 0; iout<fNRegOut; iout++) {
191  (*fRegressionReturnVal)[iout] = (*(*fLDCoeff)[iout])[0] ;
192 
193  int icoeff = 0;
194  for (std::vector<Float_t>::const_iterator it = ev->GetValues().begin();it!=ev->GetValues().end();++it){
195  (*fRegressionReturnVal)[iout] += (*(*fLDCoeff)[iout])[++icoeff] * (*it);
196  }
197  }
198 
199  // perform inverse transformation
200  Event* evT = new Event(*ev);
201  for (Int_t iout = 0; iout<fNRegOut; iout++) evT->SetTarget(iout,(*fRegressionReturnVal)[iout]);
202 
203  const Event* evT2 = GetTransformationHandler().InverseTransform( evT );
204  fRegressionReturnVal->clear();
205  for (Int_t iout = 0; iout<fNRegOut; iout++) fRegressionReturnVal->push_back(evT2->GetTarget(iout));
206 
207  delete evT;
208  return (*fRegressionReturnVal);
209 }
210 
211 ////////////////////////////////////////////////////////////////////////////////
212 /// Initializaton method; creates global matrices and vectors
213 
215 {
216  fSumMatx = new TMatrixD( GetNvar()+1, GetNvar()+1 );
217  fSumValMatx = new TMatrixD( GetNvar()+1, fNRegOut );
218  fCoeffMatx = new TMatrixD( GetNvar()+1, fNRegOut );
219 
220 }
221 
222 ////////////////////////////////////////////////////////////////////////////////
223 /// Calculates the matrix transposed(X)*W*X with W being the diagonal weight matrix
224 /// and X the coordinates values
225 
227 {
228  const UInt_t nvar = DataInfo().GetNVariables();
229 
230  for (UInt_t ivar = 0; ivar<=nvar; ivar++){
231  for (UInt_t jvar = 0; jvar<=nvar; jvar++) (*fSumMatx)( ivar, jvar ) = 0;
232  }
233 
234  // compute sample means
235  Long64_t nevts = Data()->GetNEvents();
236  for (Int_t ievt=0; ievt<nevts; ievt++) {
237  const Event * ev = GetEvent(ievt);
238  Double_t weight = ev->GetWeight();
239 
240  if (IgnoreEventsWithNegWeightsInTraining() && weight <= 0) continue;
241 
242  // Sum of weights
243  (*fSumMatx)( 0, 0 ) += weight;
244 
245  // Sum of coordinates
246  for (UInt_t ivar=0; ivar<nvar; ivar++) {
247  (*fSumMatx)( ivar+1, 0 ) += ev->GetValue( ivar ) * weight;
248  (*fSumMatx)( 0, ivar+1 ) += ev->GetValue( ivar ) * weight;
249  }
250 
251  // Sum of products of coordinates
252  for (UInt_t ivar=0; ivar<nvar; ivar++){
253  for (UInt_t jvar=0; jvar<nvar; jvar++){
254  (*fSumMatx)( ivar+1, jvar+1 ) += ev->GetValue( ivar ) * ev->GetValue( jvar ) * weight;
255  }
256  }
257  }
258 }
259 
260 ////////////////////////////////////////////////////////////////////////////////
261 ///Calculates the vector transposed(X)*W*Y with Y being the target vector
262 
264 {
265  const UInt_t nvar = DataInfo().GetNVariables();
266 
267  for (Int_t ivar = 0; ivar<fNRegOut; ivar++){
268  for (UInt_t jvar = 0; jvar<=nvar; jvar++){
269  (*fSumValMatx)(jvar,ivar) = 0;
270  }
271  }
272 
273  // Sum of coordinates multiplied by values
274  for (Int_t ievt=0; ievt<Data()->GetNEvents(); ievt++) {
275 
276  // retrieve the event
277  const Event* ev = GetEvent(ievt);
278  Double_t weight = ev->GetWeight();
279 
280  // in case event with neg weights are to be ignored
281  if (IgnoreEventsWithNegWeightsInTraining() && weight <= 0) continue;
282 
283  for (Int_t ivar=0; ivar<fNRegOut; ivar++) {
284 
285  Double_t val = weight;
286 
287  if (!DoRegression()){
288  val *= DataInfo().IsSignal(ev); // yes it works.. but I'm still surprised (Helge).. would have not set y_B to zero though..
289  }else {//for regression
290  val *= ev->GetTarget( ivar );
291  }
292  (*fSumValMatx)( 0,ivar ) += val;
293  for (UInt_t jvar=0; jvar<nvar; jvar++) {
294  (*fSumValMatx)(jvar+1,ivar ) += ev->GetValue(jvar) * val;
295  }
296  }
297  }
298 }
299 
300 ////////////////////////////////////////////////////////////////////////////////
301 ///Calculates the coeffiecients used for classification/regression
302 
304 {
305  const UInt_t nvar = DataInfo().GetNVariables();
306 
307  for (Int_t ivar = 0; ivar<fNRegOut; ivar++){
308  TMatrixD invSum( *fSumMatx );
309  if ( TMath::Abs(invSum.Determinant()) < 10E-24 ) {
310  Log() << kWARNING << "<GetCoeff> matrix is almost singular with determinant="
311  << TMath::Abs(invSum.Determinant())
312  << " did you use the variables that are linear combinations or highly correlated?"
313  << Endl;
314  }
315  if ( TMath::Abs(invSum.Determinant()) < 10E-120 ) {
316  Log() << kFATAL << "<GetCoeff> matrix is singular with determinant="
317  << TMath::Abs(invSum.Determinant())
318  << " did you use the variables that are linear combinations?"
319  << Endl;
320  }
321  invSum.Invert();
322 
323  fCoeffMatx = new TMatrixD( invSum * (*fSumValMatx));
324  for (UInt_t jvar = 0; jvar<nvar+1; jvar++) {
325  (*(*fLDCoeff)[ivar])[jvar] = (*fCoeffMatx)(jvar, ivar );
326  }
327  if (!DoRegression()) {
328  (*(*fLDCoeff)[ivar])[0]=0.0;
329  for (UInt_t jvar = 1; jvar<nvar+1; jvar++){
330  (*(*fLDCoeff)[ivar])[0]+=(*fCoeffMatx)(jvar,ivar)*(*fSumMatx)(0,jvar)/(*fSumMatx)( 0, 0 );
331  }
332  (*(*fLDCoeff)[ivar])[0]/=-2.0;
333  }
334 
335  }
336 }
337 
338 ////////////////////////////////////////////////////////////////////////////////
339 /// read LD coefficients from weight file
340 
341 void TMVA::MethodLD::ReadWeightsFromStream( std::istream& istr )
342 {
343  for (Int_t iout=0; iout<fNRegOut; iout++){
344  for (UInt_t icoeff=0; icoeff<GetNvar()+1; icoeff++){
345  istr >> (*(*fLDCoeff)[iout])[icoeff];
346  }
347  }
348 }
349 
350 ////////////////////////////////////////////////////////////////////////////////
351 /// create XML description for LD classification and regression
352 /// (for arbitrary number of output classes/targets)
353 
354 void TMVA::MethodLD::AddWeightsXMLTo( void* parent ) const
355 {
356  void* wght = gTools().AddChild(parent, "Weights");
357  gTools().AddAttr( wght, "NOut", fNRegOut );
358  gTools().AddAttr( wght, "NCoeff", GetNvar()+1 );
359  for (Int_t iout=0; iout<fNRegOut; iout++) {
360  for (UInt_t icoeff=0; icoeff<GetNvar()+1; icoeff++) {
361  void* coeffxml = gTools().AddChild( wght, "Coefficient" );
362  gTools().AddAttr( coeffxml, "IndexOut", iout );
363  gTools().AddAttr( coeffxml, "IndexCoeff", icoeff );
364  gTools().AddAttr( coeffxml, "Value", (*(*fLDCoeff)[iout])[icoeff] );
365  }
366  }
367 }
368 
369 ////////////////////////////////////////////////////////////////////////////////
370 /// read coefficients from xml weight file
371 
372 void TMVA::MethodLD::ReadWeightsFromXML( void* wghtnode )
373 {
374  UInt_t ncoeff;
375  gTools().ReadAttr( wghtnode, "NOut", fNRegOut );
376  gTools().ReadAttr( wghtnode, "NCoeff", ncoeff );
377 
378  // sanity checks
379  if (ncoeff != GetNvar()+1) Log() << kFATAL << "Mismatch in number of output variables/coefficients: "
380  << ncoeff << " != " << GetNvar()+1 << Endl;
381 
382  // create vector with coefficients (double vector due to arbitrary output dimension)
383  if (fLDCoeff) {
384  for (vector< vector< Double_t >* >::iterator vi=fLDCoeff->begin(); vi!=fLDCoeff->end(); vi++){
385  if (*vi) { delete *vi; *vi = 0; }
386  }
387  delete fLDCoeff; fLDCoeff = 0;
388  }
389  fLDCoeff = new vector< vector< Double_t >* >(fNRegOut);
390  for (Int_t ivar = 0; ivar<fNRegOut; ivar++) (*fLDCoeff)[ivar] = new std::vector<Double_t>( ncoeff );
391 
392  void* ch = gTools().GetChild(wghtnode);
393  Double_t coeff;
394  Int_t iout, icoeff;
395  while (ch) {
396  gTools().ReadAttr( ch, "IndexOut", iout );
397  gTools().ReadAttr( ch, "IndexCoeff", icoeff );
398  gTools().ReadAttr( ch, "Value", coeff );
399 
400  (*(*fLDCoeff)[iout])[icoeff] = coeff;
401 
402  ch = gTools().GetNextChild(ch);
403  }
404 }
405 
406 ////////////////////////////////////////////////////////////////////////////////
407 /// write LD-specific classifier response
408 
409 void TMVA::MethodLD::MakeClassSpecific( std::ostream& fout, const TString& className ) const
410 {
411  fout << " std::vector<double> fLDCoefficients;" << std::endl;
412  fout << "};" << std::endl;
413  fout << "" << std::endl;
414  fout << "inline void " << className << "::Initialize() " << std::endl;
415  fout << "{" << std::endl;
416  for (UInt_t ivar=0; ivar<GetNvar()+1; ivar++) {
417  Int_t dp = fout.precision();
418  fout << " fLDCoefficients.push_back( "
419  << std::setprecision(12) << (*(*fLDCoeff)[0])[ivar]
420  << std::setprecision(dp) << " );" << std::endl;
421  }
422  fout << std::endl;
423  fout << " // sanity check" << std::endl;
424  fout << " if (fLDCoefficients.size() != fNvars+1) {" << std::endl;
425  fout << " std::cout << \"Problem in class \\\"\" << fClassName << \"\\\"::Initialize: mismatch in number of input values\"" << std::endl;
426  fout << " << fLDCoefficients.size() << \" != \" << fNvars+1 << std::endl;" << std::endl;
427  fout << " fStatusIsClean = false;" << std::endl;
428  fout << " } " << std::endl;
429  fout << "}" << std::endl;
430  fout << std::endl;
431  fout << "inline double " << className << "::GetMvaValue__( const std::vector<double>& inputValues ) const" << std::endl;
432  fout << "{" << std::endl;
433  fout << " double retval = fLDCoefficients[0];" << std::endl;
434  fout << " for (size_t ivar = 1; ivar < fNvars+1; ivar++) {" << std::endl;
435  fout << " retval += fLDCoefficients[ivar]*inputValues[ivar-1];" << std::endl;
436  fout << " }" << std::endl;
437  fout << std::endl;
438  fout << " return retval;" << std::endl;
439  fout << "}" << std::endl;
440  fout << std::endl;
441  fout << "// Clean up" << std::endl;
442  fout << "inline void " << className << "::Clear() " << std::endl;
443  fout << "{" << std::endl;
444  fout << " // clear coefficients" << std::endl;
445  fout << " fLDCoefficients.clear(); " << std::endl;
446  fout << "}" << std::endl;
447 }
448 ////////////////////////////////////////////////////////////////////////////////
449 /// computes ranking of input variables
450 
452 {
453  // create the ranking object
454  fRanking = new Ranking( GetName(), "Discr. power" );
455 
456  for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
457  fRanking->AddRank( Rank( GetInputLabel(ivar), TMath::Abs((* (*fLDCoeff)[0])[ivar+1] )) );
458  }
459 
460  return fRanking;
461 }
462 
463 ////////////////////////////////////////////////////////////////////////////////
464 ///MethodLD options
465 
467 {
468  AddPreDefVal(TString("LD"));
469 }
470 
471 ////////////////////////////////////////////////////////////////////////////////
472 /// this is the preparation for training
473 
475 {
476  if (HasTrainingTree()) InitMatrices();
477 }
478 
479 ////////////////////////////////////////////////////////////////////////////////
480 ///Display the classification/regression coefficients for each variable
481 
483 {
484  Log() << kHEADER << "Results for LD coefficients:" << Endl;
485 
486  if (GetTransformationHandler().GetTransformationList().GetSize() != 0) {
487  Log() << kINFO << "NOTE: The coefficients must be applied to TRANFORMED variables" << Endl;
488  Log() << kINFO << " List of the transformation: " << Endl;
489  TListIter trIt(&GetTransformationHandler().GetTransformationList());
490  while (VariableTransformBase *trf = (VariableTransformBase*) trIt() ) {
491  Log() << kINFO << " -- " << trf->GetName() << Endl;
492  }
493  }
494  std::vector<TString> vars;
495  std::vector<Double_t> coeffs;
496  for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
497  vars .push_back( GetInputLabel(ivar) );
498  coeffs.push_back( (* (*fLDCoeff)[0])[ivar+1] );
499  }
500  vars .push_back( "(offset)" );
501  coeffs.push_back((* (*fLDCoeff)[0])[0] );
502  TMVA::gTools().FormattedOutput( coeffs, vars, "Variable" , "Coefficient", Log() );
503  if (IsNormalised()) {
504  Log() << kINFO << "NOTE: You have chosen to use the \"Normalise\" booking option. Hence, the" << Endl;
505  Log() << kINFO << " coefficients must be applied to NORMALISED (') variables as follows:" << Endl;
506  Int_t maxL = 0;
507  for (UInt_t ivar=0; ivar<GetNvar(); ivar++) if (GetInputLabel(ivar).Length() > maxL) maxL = GetInputLabel(ivar).Length();
508 
509  // Print normalisation expression (see Tools.cxx): "2*(x - xmin)/(xmax - xmin) - 1.0"
510  for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
511  Log() << kINFO
512  << std::setw(maxL+9) << TString("[") + GetInputLabel(ivar) + "]' = 2*("
513  << std::setw(maxL+2) << TString("[") + GetInputLabel(ivar) + "]"
514  << std::setw(3) << (GetXmin(ivar) > 0 ? " - " : " + ")
515  << std::setw(6) << TMath::Abs(GetXmin(ivar)) << std::setw(3) << ")/"
516  << std::setw(6) << (GetXmax(ivar) - GetXmin(ivar) )
517  << std::setw(3) << " - 1"
518  << Endl;
519  }
520  Log() << kINFO << "The TMVA Reader will properly account for this normalisation, but if the" << Endl;
521  Log() << kINFO << "LD classifier is applied outside the Reader, the transformation must be" << Endl;
522  Log() << kINFO << "implemented -- or the \"Normalise\" option is removed and LD retrained." << Endl;
523  Log() << kINFO << Endl;
524  }
525 }
526 
527 ////////////////////////////////////////////////////////////////////////////////
528 /// get help message text
529 ///
530 /// typical length of text line:
531 /// "|--------------------------------------------------------------|"
532 
534 {
535  Log() << Endl;
536  Log() << gTools().Color("bold") << "--- Short description:" << gTools().Color("reset") << Endl;
537  Log() << Endl;
538  Log() << "Linear discriminants select events by distinguishing the mean " << Endl;
539  Log() << "values of the signal and background distributions in a trans- " << Endl;
540  Log() << "formed variable space where linear correlations are removed." << Endl;
541  Log() << "The LD implementation here is equivalent to the \"Fisher\" discriminant" << Endl;
542  Log() << "for classification, but also provides linear regression." << Endl;
543  Log() << Endl;
544  Log() << " (More precisely: the \"linear discriminator\" determines" << Endl;
545  Log() << " an axis in the (correlated) hyperspace of the input " << Endl;
546  Log() << " variables such that, when projecting the output classes " << Endl;
547  Log() << " (signal and background) upon this axis, they are pushed " << Endl;
548  Log() << " as far as possible away from each other, while events" << Endl;
549  Log() << " of a same class are confined in a close vicinity. The " << Endl;
550  Log() << " linearity property of this classifier is reflected in the " << Endl;
551  Log() << " metric with which \"far apart\" and \"close vicinity\" are " << Endl;
552  Log() << " determined: the covariance matrix of the discriminating" << Endl;
553  Log() << " variable space.)" << Endl;
554  Log() << Endl;
555  Log() << gTools().Color("bold") << "--- Performance optimisation:" << gTools().Color("reset") << Endl;
556  Log() << Endl;
557  Log() << "Optimal performance for the linear discriminant is obtained for " << Endl;
558  Log() << "linearly correlated Gaussian-distributed variables. Any deviation" << Endl;
559  Log() << "from this ideal reduces the achievable separation power. In " << Endl;
560  Log() << "particular, no discrimination at all is achieved for a variable" << Endl;
561  Log() << "that has the same sample mean for signal and background, even if " << Endl;
562  Log() << "the shapes of the distributions are very different. Thus, the linear " << Endl;
563  Log() << "discriminant often benefits from a suitable transformation of the " << Endl;
564  Log() << "input variables. For example, if a variable x in [-1,1] has a " << Endl;
565  Log() << "a parabolic signal distributions, and a uniform background" << Endl;
566  Log() << "distributions, their mean value is zero in both cases, leading " << Endl;
567  Log() << "to no separation. The simple transformation x -> |x| renders this " << Endl;
568  Log() << "variable powerful for the use in a linear discriminant." << Endl;
569  Log() << Endl;
570  Log() << gTools().Color("bold") << "--- Performance tuning via configuration options:" << gTools().Color("reset") << Endl;
571  Log() << Endl;
572  Log() << "<None>" << Endl;
573 }
virtual const std::vector< Float_t > & GetRegressionValues()
Calculates the regression output.
Definition: MethodLD.cxx:183
const Ranking * CreateRanking()
computes ranking of input variables
Definition: MethodLD.cxx:451
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:162
long long Long64_t
Definition: RtypesCore.h:69
#define REGISTER_METHOD(CLASS)
for example
std::vector< std::vector< Double_t > * > * fLDCoeff
Definition: MethodLD.h:105
Ssiz_t Length() const
Definition: TString.h:390
Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets)
LD can handle classification with 2 classes and regression with one regression-target.
Definition: MethodLD.cxx:125
const char * GetName() const
Definition: MethodBase.h:330
Bool_t IgnoreEventsWithNegWeightsInTraining() const
Definition: MethodBase.h:680
void GetSumVal(void)
Calculates the vector transposed(X)*W*Y with Y being the target vector.
Definition: MethodLD.cxx:263
DataSet * Data() const
Definition: MethodBase.h:405
UInt_t GetNvar() const
Definition: MethodBase.h:340
EAnalysisType
Definition: Types.h:128
UInt_t GetNTargets() const
Definition: DataSetInfo.h:129
Bool_t IsNormalised() const
Definition: MethodBase.h:490
Basic string class.
Definition: TString.h:137
TransformationHandler & GetTransformationHandler(Bool_t takeReroutedIfAvailable=true)
Definition: MethodBase.h:390
int Int_t
Definition: RtypesCore.h:41
bool Bool_t
Definition: RtypesCore.h:59
const Bool_t kFALSE
Definition: Rtypes.h:92
virtual ~MethodLD(void)
destructor
Definition: MethodLD.cxx:109
TMatrixD * fSumValMatx
Definition: MethodLD.h:103
void PrintCoefficients(void)
Display the classification/regression coefficients for each variable.
Definition: MethodLD.cxx:482
Double_t GetWeight() const
return the event weight - depending on whether the flag IgnoreNegWeightsInTraining is or not...
Definition: Event.cxx:378
void AddAttr(void *node, const char *, const T &value, Int_t precision=16)
Definition: Tools.h:309
UInt_t GetNVariables() const
Definition: DataSetInfo.h:128
void * AddChild(void *parent, const char *childname, const char *content=0, bool isRootNode=false)
add child node
Definition: Tools.cxx:1134
Short_t Abs(Short_t d)
Definition: TMathBase.h:110
Iterator of linked list.
Definition: TList.h:187
TMatrixD * fCoeffMatx
Definition: MethodLD.h:104
Float_t GetValue(UInt_t ivar) const
return value of i&#39;th variable
Definition: Event.cxx:233
void ReadWeightsFromXML(void *wghtnode)
read coefficients from xml weight file
Definition: MethodLD.cxx:372
Tools & gTools()
Definition: Tools.cxx:79
Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
Returns the MVA classification output.
Definition: MethodLD.cxx:158
Bool_t IsSignal(const Event *ev) const
UInt_t GetNTargets() const
Definition: MethodBase.h:342
void * GetChild(void *parent, const char *childname=0)
get child node
Definition: Tools.cxx:1158
void MakeClassSpecific(std::ostream &, const TString &) const
write LD-specific classifier response
Definition: MethodLD.cxx:409
TMatrixT< Element > & Invert(Double_t *det=0)
Invert the matrix and calculate its determinant.
Definition: TMatrixT.cxx:1396
Int_t fNRegOut
Definition: MethodLD.h:100
TMatrixT< Double_t > TMatrixD
Definition: TMatrixDfwd.h:24
void ReadWeightsFromStream(std::istream &i)
read LD coefficients from weight file
Definition: MethodLD.cxx:341
Bool_t HasTrainingTree() const
Definition: MethodBase.h:507
void AddWeightsXMLTo(void *parent) const
create XML description for LD classification and regression (for arbitrary number of output classes/t...
Definition: MethodLD.cxx:354
unsigned int UInt_t
Definition: RtypesCore.h:42
const Event * GetEvent() const
Definition: MethodBase.h:745
Double_t E()
Definition: TMath.h:54
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
Definition: Event.cxx:356
void ReadAttr(void *node, const char *, T &value)
Definition: Tools.h:296
void DeclareOptions()
MethodLD options.
Definition: MethodLD.cxx:466
#define ClassImp(name)
Definition: Rtypes.h:279
double Double_t
Definition: RtypesCore.h:55
MethodLD(const TString &jobName, const TString &methodTitle, DataSetInfo &dsi, const TString &theOption="LD")
standard constructor for the LD
Definition: MethodLD.cxx:63
int type
Definition: TGX11.cxx:120
void * GetNextChild(void *prevchild, const char *childname=0)
XML helpers.
Definition: Tools.cxx:1170
Long64_t GetNEvents(Types::ETreeType type=Types::kMaxTreeType) const
Definition: DataSet.h:229
MsgLogger & Log() const
Definition: Configurable.h:128
DataSetInfo & DataInfo() const
Definition: MethodBase.h:406
void AddPreDefVal(const T &)
Definition: Configurable.h:174
const TString & GetInputLabel(Int_t i) const
Definition: MethodBase.h:346
void ExitFromTraining()
Definition: MethodBase.h:458
void FormattedOutput(const std::vector< Double_t > &, const std::vector< TString > &, const TString titleVars, const TString titleValues, MsgLogger &logger, TString format="%+1.3f")
formatted output of simple table
Definition: Tools.cxx:896
const TString & Color(const TString &)
human readable color strings
Definition: Tools.cxx:837
void GetSum(void)
Calculates the matrix transposed(X)*W*X with W being the diagonal weight matrix and X the coordinates...
Definition: MethodLD.cxx:226
Float_t GetTarget(UInt_t itgt) const
Definition: Event.h:104
Bool_t DoRegression() const
Definition: MethodBase.h:434
Abstract ClassifierFactory template that handles arbitrary types.
Ranking * fRanking
Definition: MethodBase.h:581
std::vector< Float_t > & GetValues()
Definition: Event.h:96
void GetLDCoeff(void)
Calculates the coeffiecients used for classification/regression.
Definition: MethodLD.cxx:303
Double_t GetXmax(Int_t ivar) const
Definition: MethodBase.h:353
virtual void AddRank(const Rank &rank)
Add a new rank take ownership of it.
Definition: Ranking.cxx:86
#define NULL
Definition: Rtypes.h:82
virtual Double_t Determinant() const
Return the matrix determinant.
Definition: TMatrixT.cxx:1361
void Train(void)
compute fSumMatx
Definition: MethodLD.cxx:139
void InitMatrices(void)
Initializaton method; creates global matrices and vectors.
Definition: MethodLD.cxx:214
std::vector< Float_t > * fRegressionReturnVal
Definition: MethodBase.h:591
void Init(void)
default initialization called by all constructors
Definition: MethodLD.cxx:92
const Bool_t kTRUE
Definition: Rtypes.h:91
void GetHelpMessage() const
get help message text
Definition: MethodLD.cxx:533
TMatrixD * fSumMatx
Definition: MethodLD.h:102
void NoErrorCalc(Double_t *const err, Double_t *const errUpper)
Definition: MethodBase.cxx:819
void SetSignalReferenceCut(Double_t cut)
Definition: MethodBase.h:360
void ProcessOptions()
this is the preparation for training
Definition: MethodLD.cxx:474
Double_t GetXmin(Int_t ivar) const
Definition: MethodBase.h:352
const Event * InverseTransform(const Event *, Bool_t suppressIfNoTargets=true) const