Logo ROOT  
Reference Guide
MethodLD.cxx
Go to the documentation of this file.
1// @(#)root/tmva $Id$
2// Author: Krzysztof Danielowski, Kamil Kraszewski, Maciej Kruk, Jan Therhaag
3
4/**********************************************************************************
5 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6 * Package: TMVA *
7 * Class : MethodLD *
8 * Web : http://tmva.sourceforge.net *
9 * *
10 * Description: *
11 * Linear Discriminant - Simple Linear Regression and Classification *
12 * *
13 * Authors (alphabetical): *
14 * Krzysztof Danielowski <danielow@cern.ch> - IFJ PAN & AGH, Poland *
15 * Kamil Kraszewski <kalq@cern.ch> - IFJ PAN & UJ, Poland *
16 * Maciej Kruk <mkruk@cern.ch> - IFJ PAN & AGH, Poland *
17 * Jan Therhaag <therhaag@physik.uni-bonn.de> - Uni Bonn, Germany *
18 * *
19 * Copyright (c) 2005-2011: *
20 * CERN, Switzerland *
21 * PAN, Poland *
22 * U. of Bonn, Germany *
23 * *
24 * Redistribution and use in source and binary forms, with or without *
25 * modification, are permitted according to the terms listed in LICENSE *
26 * (http://tmva.sourceforge.net/LICENSE) *
27 * *
28 **********************************************************************************/
29
30/*! \class TMVA::MethodLD
31\ingroup TMVA
32Linear Discriminant.
33
34Can compute multidimensional output for regression
35(although it computes every dimension separately)
36*/
37
38#include "TMVA/MethodLD.h"
39
41#include "TMVA/Configurable.h"
42#include "TMVA/DataSet.h"
43#include "TMVA/DataSetInfo.h"
44#include "TMVA/IMethod.h"
45#include "TMVA/MethodBase.h"
46#include "TMVA/MsgLogger.h"
47#include "TMVA/PDF.h"
48#include "TMVA/Ranking.h"
49#include "TMVA/Tools.h"
51#include "TMVA/Types.h"
53
54#include "Riostream.h"
55#include "TMath.h"
56#include "TMatrix.h"
57#include "TMatrixD.h"
58#include "TList.h"
59
60#include <iomanip>
61
62using std::vector;
63
65
67
68////////////////////////////////////////////////////////////////////////////////
69/// standard constructor for the LD
70
72 const TString& methodTitle,
73 DataSetInfo& dsi,
74 const TString& theOption ) :
75 MethodBase( jobName, Types::kLD, methodTitle, dsi, theOption),
76 fNRegOut ( 0 ),
77 fSumMatx ( 0 ),
78 fSumValMatx( 0 ),
79 fCoeffMatx ( 0 ),
80 fLDCoeff ( 0 )
81{
82}
83
84////////////////////////////////////////////////////////////////////////////////
85/// constructor from weight file
86
87TMVA::MethodLD::MethodLD( DataSetInfo& theData, const TString& theWeightFile)
88 : MethodBase( Types::kLD, theData, theWeightFile),
89 fNRegOut ( 0 ),
90 fSumMatx ( 0 ),
91 fSumValMatx( 0 ),
92 fCoeffMatx ( 0 ),
93 fLDCoeff ( 0 )
94{
95}
96
97////////////////////////////////////////////////////////////////////////////////
98/// default initialization called by all constructors
99
101{
102 if(DataInfo().GetNTargets()!=0) fNRegOut = DataInfo().GetNTargets();
103 else fNRegOut = 1;
104
105 fLDCoeff = new vector< vector< Double_t >* >(fNRegOut);
106 for (Int_t iout = 0; iout<fNRegOut; iout++){
107 (*fLDCoeff)[iout] = new std::vector<Double_t>( GetNvar()+1 );
108 }
109
110 // the minimum requirement to declare an event signal-like
111 SetSignalReferenceCut( 0.0 );
112}
113
114////////////////////////////////////////////////////////////////////////////////
115/// destructor
116
118{
119 if (fSumMatx) { delete fSumMatx; fSumMatx = 0; }
120 if (fSumValMatx) { delete fSumValMatx; fSumValMatx = 0; }
121 if (fCoeffMatx) { delete fCoeffMatx; fCoeffMatx = 0; }
122 if (fLDCoeff) {
123 for (vector< vector< Double_t >* >::iterator vi=fLDCoeff->begin(); vi!=fLDCoeff->end(); ++vi){
124 if (*vi) { delete *vi; *vi = 0; }
125 }
126 delete fLDCoeff; fLDCoeff = 0;
127 }
128}
129
130////////////////////////////////////////////////////////////////////////////////
131/// LD can handle classification with 2 classes and regression with one regression-target
132
134{
135 if (type == Types::kClassification && numberClasses == 2) return kTRUE;
136 else if (type == Types::kRegression && numberTargets == 1) {
137 Log() << "regression with " << numberTargets << " targets.";
138 return kTRUE;
139 }
140 else return kFALSE;
141}
142
143
144////////////////////////////////////////////////////////////////////////////////
145/// compute fSumMatx
146
148{
149 GetSum();
150
151 // compute fSumValMatx
152 GetSumVal();
153
154 // compute fCoeffMatx and fLDCoeff
155 GetLDCoeff();
156
157 // nice output
158 PrintCoefficients();
159
160 ExitFromTraining();
161}
162
163////////////////////////////////////////////////////////////////////////////////
164/// Returns the MVA classification output
165
167{
168 const Event* ev = GetEvent();
169
170 if (fRegressionReturnVal == NULL) fRegressionReturnVal = new vector< Float_t >();
171 fRegressionReturnVal->resize( fNRegOut );
172
173 for (Int_t iout = 0; iout<fNRegOut; iout++) {
174 (*fRegressionReturnVal)[iout] = (*(*fLDCoeff)[iout])[0] ;
175
176 int icoeff=0;
177 for (std::vector<Float_t>::const_iterator it = ev->GetValues().begin();it!=ev->GetValues().end();++it){
178 (*fRegressionReturnVal)[iout] += (*(*fLDCoeff)[iout])[++icoeff] * (*it);
179 }
180 }
181
182 // cannot determine error
183 NoErrorCalc(err, errUpper);
184
185 return (*fRegressionReturnVal)[0];
186}
187
188////////////////////////////////////////////////////////////////////////////////
189/// Calculates the regression output
190
191const std::vector< Float_t >& TMVA::MethodLD::GetRegressionValues()
192{
193 const Event* ev = GetEvent();
194
195 if (fRegressionReturnVal == NULL) fRegressionReturnVal = new vector< Float_t >();
196 fRegressionReturnVal->resize( fNRegOut );
197
198 for (Int_t iout = 0; iout<fNRegOut; iout++) {
199 (*fRegressionReturnVal)[iout] = (*(*fLDCoeff)[iout])[0] ;
200
201 int icoeff = 0;
202 for (std::vector<Float_t>::const_iterator it = ev->GetValues().begin();it!=ev->GetValues().end();++it){
203 (*fRegressionReturnVal)[iout] += (*(*fLDCoeff)[iout])[++icoeff] * (*it);
204 }
205 }
206
207 // perform inverse transformation
208 Event* evT = new Event(*ev);
209 for (Int_t iout = 0; iout<fNRegOut; iout++) evT->SetTarget(iout,(*fRegressionReturnVal)[iout]);
210
211 const Event* evT2 = GetTransformationHandler().InverseTransform( evT );
212 fRegressionReturnVal->clear();
213 for (Int_t iout = 0; iout<fNRegOut; iout++) fRegressionReturnVal->push_back(evT2->GetTarget(iout));
214
215 delete evT;
216 return (*fRegressionReturnVal);
217}
218
219////////////////////////////////////////////////////////////////////////////////
220/// Initialization method; creates global matrices and vectors
221
223{
224 fSumMatx = new TMatrixD( GetNvar()+1, GetNvar()+1 );
225 fSumValMatx = new TMatrixD( GetNvar()+1, fNRegOut );
226 fCoeffMatx = new TMatrixD( GetNvar()+1, fNRegOut );
227
228}
229
230////////////////////////////////////////////////////////////////////////////////
231/// Calculates the matrix transposed(X)*W*X with W being the diagonal weight matrix
232/// and X the coordinates values
233
235{
236 const UInt_t nvar = DataInfo().GetNVariables();
237
238 for (UInt_t ivar = 0; ivar<=nvar; ivar++){
239 for (UInt_t jvar = 0; jvar<=nvar; jvar++) (*fSumMatx)( ivar, jvar ) = 0;
240 }
241
242 // compute sample means
243 Long64_t nevts = Data()->GetNEvents();
244 for (Int_t ievt=0; ievt<nevts; ievt++) {
245 const Event * ev = GetEvent(ievt);
246 Double_t weight = ev->GetWeight();
247
248 if (IgnoreEventsWithNegWeightsInTraining() && weight <= 0) continue;
249
250 // Sum of weights
251 (*fSumMatx)( 0, 0 ) += weight;
252
253 // Sum of coordinates
254 for (UInt_t ivar=0; ivar<nvar; ivar++) {
255 (*fSumMatx)( ivar+1, 0 ) += ev->GetValue( ivar ) * weight;
256 (*fSumMatx)( 0, ivar+1 ) += ev->GetValue( ivar ) * weight;
257 }
258
259 // Sum of products of coordinates
260 for (UInt_t ivar=0; ivar<nvar; ivar++){
261 for (UInt_t jvar=0; jvar<nvar; jvar++){
262 (*fSumMatx)( ivar+1, jvar+1 ) += ev->GetValue( ivar ) * ev->GetValue( jvar ) * weight;
263 }
264 }
265 }
266}
267
268////////////////////////////////////////////////////////////////////////////////
269/// Calculates the vector transposed(X)*W*Y with Y being the target vector
270
272{
273 const UInt_t nvar = DataInfo().GetNVariables();
274
275 for (Int_t ivar = 0; ivar<fNRegOut; ivar++){
276 for (UInt_t jvar = 0; jvar<=nvar; jvar++){
277 (*fSumValMatx)(jvar,ivar) = 0;
278 }
279 }
280
281 // Sum of coordinates multiplied by values
282 for (Int_t ievt=0; ievt<Data()->GetNEvents(); ievt++) {
283
284 // retrieve the event
285 const Event* ev = GetEvent(ievt);
286 Double_t weight = ev->GetWeight();
287
288 // in case event with neg weights are to be ignored
289 if (IgnoreEventsWithNegWeightsInTraining() && weight <= 0) continue;
290
291 for (Int_t ivar=0; ivar<fNRegOut; ivar++) {
292
293 Double_t val = weight;
294
295 if (!DoRegression()){
296 val *= DataInfo().IsSignal(ev); // yes it works.. but I'm still surprised (Helge).. would have not set y_B to zero though..
297 }else {//for regression
298 val *= ev->GetTarget( ivar );
299 }
300 (*fSumValMatx)( 0,ivar ) += val;
301 for (UInt_t jvar=0; jvar<nvar; jvar++) {
302 (*fSumValMatx)(jvar+1,ivar ) += ev->GetValue(jvar) * val;
303 }
304 }
305 }
306}
307
308////////////////////////////////////////////////////////////////////////////////
309/// Calculates the coefficients used for classification/regression
310
312{
313 const UInt_t nvar = DataInfo().GetNVariables();
314
315 for (Int_t ivar = 0; ivar<fNRegOut; ivar++){
316 TMatrixD invSum( *fSumMatx );
317 if ( TMath::Abs(invSum.Determinant()) < 10E-24 ) {
318 Log() << kWARNING << "<GetCoeff> matrix is almost singular with determinant="
319 << TMath::Abs(invSum.Determinant())
320 << " did you use the variables that are linear combinations or highly correlated?"
321 << Endl;
322 }
323 if ( TMath::Abs(invSum.Determinant()) < 10E-120 ) {
324 Log() << kFATAL << "<GetCoeff> matrix is singular with determinant="
325 << TMath::Abs(invSum.Determinant())
326 << " did you use the variables that are linear combinations?"
327 << Endl;
328 }
329 invSum.Invert();
330
331 fCoeffMatx = new TMatrixD( invSum * (*fSumValMatx));
332 for (UInt_t jvar = 0; jvar<nvar+1; jvar++) {
333 (*(*fLDCoeff)[ivar])[jvar] = (*fCoeffMatx)(jvar, ivar );
334 }
335 if (!DoRegression()) {
336 (*(*fLDCoeff)[ivar])[0]=0.0;
337 for (UInt_t jvar = 1; jvar<nvar+1; jvar++){
338 (*(*fLDCoeff)[ivar])[0]+=(*fCoeffMatx)(jvar,ivar)*(*fSumMatx)(0,jvar)/(*fSumMatx)( 0, 0 );
339 }
340 (*(*fLDCoeff)[ivar])[0]/=-2.0;
341 }
342
343 }
344}
345
346////////////////////////////////////////////////////////////////////////////////
347/// read LD coefficients from weight file
348
350{
351 for (Int_t iout=0; iout<fNRegOut; iout++){
352 for (UInt_t icoeff=0; icoeff<GetNvar()+1; icoeff++){
353 istr >> (*(*fLDCoeff)[iout])[icoeff];
354 }
355 }
356}
357
358////////////////////////////////////////////////////////////////////////////////
359/// create XML description for LD classification and regression
360/// (for arbitrary number of output classes/targets)
361
362void TMVA::MethodLD::AddWeightsXMLTo( void* parent ) const
363{
364 void* wght = gTools().AddChild(parent, "Weights");
365 gTools().AddAttr( wght, "NOut", fNRegOut );
366 gTools().AddAttr( wght, "NCoeff", GetNvar()+1 );
367 for (Int_t iout=0; iout<fNRegOut; iout++) {
368 for (UInt_t icoeff=0; icoeff<GetNvar()+1; icoeff++) {
369 void* coeffxml = gTools().AddChild( wght, "Coefficient" );
370 gTools().AddAttr( coeffxml, "IndexOut", iout );
371 gTools().AddAttr( coeffxml, "IndexCoeff", icoeff );
372 gTools().AddAttr( coeffxml, "Value", (*(*fLDCoeff)[iout])[icoeff] );
373 }
374 }
375}
376
377////////////////////////////////////////////////////////////////////////////////
378/// read coefficients from xml weight file
379
381{
382 UInt_t ncoeff;
383 gTools().ReadAttr( wghtnode, "NOut", fNRegOut );
384 gTools().ReadAttr( wghtnode, "NCoeff", ncoeff );
385
386 // sanity checks
387 if (ncoeff != GetNvar()+1) Log() << kFATAL << "Mismatch in number of output variables/coefficients: "
388 << ncoeff << " != " << GetNvar()+1 << Endl;
389
390 // create vector with coefficients (double vector due to arbitrary output dimension)
391 if (fLDCoeff) {
392 for (vector< vector< Double_t >* >::iterator vi=fLDCoeff->begin(); vi!=fLDCoeff->end(); ++vi){
393 if (*vi) { delete *vi; *vi = 0; }
394 }
395 delete fLDCoeff; fLDCoeff = 0;
396 }
397 fLDCoeff = new vector< vector< Double_t >* >(fNRegOut);
398 for (Int_t ivar = 0; ivar<fNRegOut; ivar++) (*fLDCoeff)[ivar] = new std::vector<Double_t>( ncoeff );
399
400 void* ch = gTools().GetChild(wghtnode);
401 Double_t coeff;
402 Int_t iout, icoeff;
403 while (ch) {
404 gTools().ReadAttr( ch, "IndexOut", iout );
405 gTools().ReadAttr( ch, "IndexCoeff", icoeff );
406 gTools().ReadAttr( ch, "Value", coeff );
407
408 (*(*fLDCoeff)[iout])[icoeff] = coeff;
409
410 ch = gTools().GetNextChild(ch);
411 }
412}
413
414////////////////////////////////////////////////////////////////////////////////
415/// write LD-specific classifier response
416
417void TMVA::MethodLD::MakeClassSpecific( std::ostream& fout, const TString& className ) const
418{
419 fout << " std::vector<double> fLDCoefficients;" << std::endl;
420 fout << "};" << std::endl;
421 fout << "" << std::endl;
422 fout << "inline void " << className << "::Initialize() " << std::endl;
423 fout << "{" << std::endl;
424 for (UInt_t ivar=0; ivar<GetNvar()+1; ivar++) {
425 Int_t dp = fout.precision();
426 fout << " fLDCoefficients.push_back( "
427 << std::setprecision(12) << (*(*fLDCoeff)[0])[ivar]
428 << std::setprecision(dp) << " );" << std::endl;
429 }
430 fout << std::endl;
431 fout << " // sanity check" << std::endl;
432 fout << " if (fLDCoefficients.size() != fNvars+1) {" << std::endl;
433 fout << " std::cout << \"Problem in class \\\"\" << fClassName << \"\\\"::Initialize: mismatch in number of input values\"" << std::endl;
434 fout << " << fLDCoefficients.size() << \" != \" << fNvars+1 << std::endl;" << std::endl;
435 fout << " fStatusIsClean = false;" << std::endl;
436 fout << " } " << std::endl;
437 fout << "}" << std::endl;
438 fout << std::endl;
439 fout << "inline double " << className << "::GetMvaValue__( const std::vector<double>& inputValues ) const" << std::endl;
440 fout << "{" << std::endl;
441 fout << " double retval = fLDCoefficients[0];" << std::endl;
442 fout << " for (size_t ivar = 1; ivar < fNvars+1; ivar++) {" << std::endl;
443 fout << " retval += fLDCoefficients[ivar]*inputValues[ivar-1];" << std::endl;
444 fout << " }" << std::endl;
445 fout << std::endl;
446 fout << " return retval;" << std::endl;
447 fout << "}" << std::endl;
448 fout << std::endl;
449 fout << "// Clean up" << std::endl;
450 fout << "inline void " << className << "::Clear() " << std::endl;
451 fout << "{" << std::endl;
452 fout << " // clear coefficients" << std::endl;
453 fout << " fLDCoefficients.clear(); " << std::endl;
454 fout << "}" << std::endl;
455}
456////////////////////////////////////////////////////////////////////////////////
457/// computes ranking of input variables
458
460{
461 // create the ranking object
462 fRanking = new Ranking( GetName(), "Discr. power" );
463
464 for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
465 fRanking->AddRank( Rank( GetInputLabel(ivar), TMath::Abs((* (*fLDCoeff)[0])[ivar+1] )) );
466 }
467
468 return fRanking;
469}
470
471////////////////////////////////////////////////////////////////////////////////
472/// MethodLD options
473
475{
476 AddPreDefVal(TString("LD"));
477}
478
479////////////////////////////////////////////////////////////////////////////////
480/// this is the preparation for training
481
483{
484 if (HasTrainingTree()) InitMatrices();
485}
486
487////////////////////////////////////////////////////////////////////////////////
488/// Display the classification/regression coefficients for each variable
489
491{
492 Log() << kHEADER << "Results for LD coefficients:" << Endl;
493
494 if (GetTransformationHandler().GetTransformationList().GetSize() != 0) {
495 Log() << kINFO << "NOTE: The coefficients must be applied to TRANFORMED variables" << Endl;
496 Log() << kINFO << " List of the transformation: " << Endl;
497 TListIter trIt(&GetTransformationHandler().GetTransformationList());
498 while (VariableTransformBase *trf = (VariableTransformBase*) trIt() ) {
499 Log() << kINFO << " -- " << trf->GetName() << Endl;
500 }
501 }
502 std::vector<TString> vars;
503 std::vector<Double_t> coeffs;
504 for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
505 vars .push_back( GetInputLabel(ivar) );
506 coeffs.push_back( (* (*fLDCoeff)[0])[ivar+1] );
507 }
508 vars .push_back( "(offset)" );
509 coeffs.push_back((* (*fLDCoeff)[0])[0] );
510 TMVA::gTools().FormattedOutput( coeffs, vars, "Variable" , "Coefficient", Log() );
511 if (IsNormalised()) {
512 Log() << kINFO << "NOTE: You have chosen to use the \"Normalise\" booking option. Hence, the" << Endl;
513 Log() << kINFO << " coefficients must be applied to NORMALISED (') variables as follows:" << Endl;
514 Int_t maxL = 0;
515 for (UInt_t ivar=0; ivar<GetNvar(); ivar++) if (GetInputLabel(ivar).Length() > maxL) maxL = GetInputLabel(ivar).Length();
516
517 // Print normalisation expression (see Tools.cxx): "2*(x - xmin)/(xmax - xmin) - 1.0"
518 for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
519 Log() << kINFO
520 << std::setw(maxL+9) << TString("[") + GetInputLabel(ivar) + "]' = 2*("
521 << std::setw(maxL+2) << TString("[") + GetInputLabel(ivar) + "]"
522 << std::setw(3) << (GetXmin(ivar) > 0 ? " - " : " + ")
523 << std::setw(6) << TMath::Abs(GetXmin(ivar)) << std::setw(3) << ")/"
524 << std::setw(6) << (GetXmax(ivar) - GetXmin(ivar) )
525 << std::setw(3) << " - 1"
526 << Endl;
527 }
528 Log() << kINFO << "The TMVA Reader will properly account for this normalisation, but if the" << Endl;
529 Log() << kINFO << "LD classifier is applied outside the Reader, the transformation must be" << Endl;
530 Log() << kINFO << "implemented -- or the \"Normalise\" option is removed and LD retrained." << Endl;
531 Log() << kINFO << Endl;
532 }
533}
534
535////////////////////////////////////////////////////////////////////////////////
536/// get help message text
537///
538/// typical length of text line:
539/// "|--------------------------------------------------------------|"
540
542{
543 Log() << Endl;
544 Log() << gTools().Color("bold") << "--- Short description:" << gTools().Color("reset") << Endl;
545 Log() << Endl;
546 Log() << "Linear discriminants select events by distinguishing the mean " << Endl;
547 Log() << "values of the signal and background distributions in a trans- " << Endl;
548 Log() << "formed variable space where linear correlations are removed." << Endl;
549 Log() << "The LD implementation here is equivalent to the \"Fisher\" discriminant" << Endl;
550 Log() << "for classification, but also provides linear regression." << Endl;
551 Log() << Endl;
552 Log() << " (More precisely: the \"linear discriminator\" determines" << Endl;
553 Log() << " an axis in the (correlated) hyperspace of the input " << Endl;
554 Log() << " variables such that, when projecting the output classes " << Endl;
555 Log() << " (signal and background) upon this axis, they are pushed " << Endl;
556 Log() << " as far as possible away from each other, while events" << Endl;
557 Log() << " of a same class are confined in a close vicinity. The " << Endl;
558 Log() << " linearity property of this classifier is reflected in the " << Endl;
559 Log() << " metric with which \"far apart\" and \"close vicinity\" are " << Endl;
560 Log() << " determined: the covariance matrix of the discriminating" << Endl;
561 Log() << " variable space.)" << Endl;
562 Log() << Endl;
563 Log() << gTools().Color("bold") << "--- Performance optimisation:" << gTools().Color("reset") << Endl;
564 Log() << Endl;
565 Log() << "Optimal performance for the linear discriminant is obtained for " << Endl;
566 Log() << "linearly correlated Gaussian-distributed variables. Any deviation" << Endl;
567 Log() << "from this ideal reduces the achievable separation power. In " << Endl;
568 Log() << "particular, no discrimination at all is achieved for a variable" << Endl;
569 Log() << "that has the same sample mean for signal and background, even if " << Endl;
570 Log() << "the shapes of the distributions are very different. Thus, the linear " << Endl;
571 Log() << "discriminant often benefits from a suitable transformation of the " << Endl;
572 Log() << "input variables. For example, if a variable x in [-1,1] has a " << Endl;
573 Log() << "a parabolic signal distributions, and a uniform background" << Endl;
574 Log() << "distributions, their mean value is zero in both cases, leading " << Endl;
575 Log() << "to no separation. The simple transformation x -> |x| renders this " << Endl;
576 Log() << "variable powerful for the use in a linear discriminant." << Endl;
577 Log() << Endl;
578 Log() << gTools().Color("bold") << "--- Performance tuning via configuration options:" << gTools().Color("reset") << Endl;
579 Log() << Endl;
580 Log() << "<None>" << Endl;
581}
#define REGISTER_METHOD(CLASS)
for example
int Int_t
Definition: RtypesCore.h:41
unsigned int UInt_t
Definition: RtypesCore.h:42
const Bool_t kFALSE
Definition: RtypesCore.h:88
bool Bool_t
Definition: RtypesCore.h:59
double Double_t
Definition: RtypesCore.h:55
long long Long64_t
Definition: RtypesCore.h:69
const Bool_t kTRUE
Definition: RtypesCore.h:87
#define ClassImp(name)
Definition: Rtypes.h:365
int type
Definition: TGX11.cxx:120
TMatrixT< Double_t > TMatrixD
Definition: TMatrixDfwd.h:22
Iterator of linked list.
Definition: TList.h:200
Class that contains all the data information.
Definition: DataSetInfo.h:60
Float_t GetValue(UInt_t ivar) const
return value of i'th variable
Definition: Event.cxx:237
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
Definition: Event.cxx:360
Double_t GetWeight() const
return the event weight - depending on whether the flag IgnoreNegWeightsInTraining is or not.
Definition: Event.cxx:382
std::vector< Float_t > & GetValues()
Definition: Event.h:94
Float_t GetTarget(UInt_t itgt) const
Definition: Event.h:102
Virtual base Class for all MVA method.
Definition: MethodBase.h:111
Linear Discriminant.
Definition: MethodLD.h:50
void GetSum(void)
Calculates the matrix transposed(X)*W*X with W being the diagonal weight matrix and X the coordinates...
Definition: MethodLD.cxx:234
void GetHelpMessage() const
get help message text
Definition: MethodLD.cxx:541
const Ranking * CreateRanking()
computes ranking of input variables
Definition: MethodLD.cxx:459
Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets)
LD can handle classification with 2 classes and regression with one regression-target.
Definition: MethodLD.cxx:133
MethodLD(const TString &jobName, const TString &methodTitle, DataSetInfo &dsi, const TString &theOption="LD")
standard constructor for the LD
Definition: MethodLD.cxx:71
void DeclareOptions()
MethodLD options.
Definition: MethodLD.cxx:474
void GetLDCoeff(void)
Calculates the coefficients used for classification/regression.
Definition: MethodLD.cxx:311
virtual const std::vector< Float_t > & GetRegressionValues()
Calculates the regression output.
Definition: MethodLD.cxx:191
void ReadWeightsFromStream(std::istream &i)
read LD coefficients from weight file
Definition: MethodLD.cxx:349
void ReadWeightsFromXML(void *wghtnode)
read coefficients from xml weight file
Definition: MethodLD.cxx:380
void Init(void)
default initialization called by all constructors
Definition: MethodLD.cxx:100
void ProcessOptions()
this is the preparation for training
Definition: MethodLD.cxx:482
void PrintCoefficients(void)
Display the classification/regression coefficients for each variable.
Definition: MethodLD.cxx:490
void Train(void)
compute fSumMatx
Definition: MethodLD.cxx:147
virtual ~MethodLD(void)
destructor
Definition: MethodLD.cxx:117
void AddWeightsXMLTo(void *parent) const
create XML description for LD classification and regression (for arbitrary number of output classes/t...
Definition: MethodLD.cxx:362
void MakeClassSpecific(std::ostream &, const TString &) const
write LD-specific classifier response
Definition: MethodLD.cxx:417
void InitMatrices(void)
Initialization method; creates global matrices and vectors.
Definition: MethodLD.cxx:222
Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
Returns the MVA classification output.
Definition: MethodLD.cxx:166
void GetSumVal(void)
Calculates the vector transposed(X)*W*Y with Y being the target vector.
Definition: MethodLD.cxx:271
Ranking for variables in method (implementation)
Definition: Ranking.h:48
void FormattedOutput(const std::vector< Double_t > &, const std::vector< TString > &, const TString titleVars, const TString titleValues, MsgLogger &logger, TString format="%+1.3f")
formatted output of simple table
Definition: Tools.cxx:899
void * GetNextChild(void *prevchild, const char *childname=0)
XML helpers.
Definition: Tools.cxx:1174
void * AddChild(void *parent, const char *childname, const char *content=0, bool isRootNode=false)
add child node
Definition: Tools.cxx:1136
const TString & Color(const TString &)
human readable color strings
Definition: Tools.cxx:840
void * GetChild(void *parent, const char *childname=0)
get child node
Definition: Tools.cxx:1162
void ReadAttr(void *node, const char *, T &value)
read attribute from xml
Definition: Tools.h:337
void AddAttr(void *node, const char *, const T &value, Int_t precision=16)
add attribute to xml
Definition: Tools.h:355
Singleton class for Global types used by TMVA.
Definition: Types.h:73
EAnalysisType
Definition: Types.h:127
@ kClassification
Definition: Types.h:128
@ kRegression
Definition: Types.h:129
Linear interpolation class.
TMatrixT< Element > & Invert(Double_t *det=0)
Invert the matrix and calculate its determinant.
Definition: TMatrixT.cxx:1396
virtual Double_t Determinant() const
Return the matrix determinant.
Definition: TMatrixT.cxx:1361
Basic string class.
Definition: TString.h:131
std::string GetName(const std::string &scope_name)
Definition: Cppyy.cxx:150
Tools & gTools()
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:158
constexpr Double_t E()
Base of natural log:
Definition: TMath.h:97
Double_t Log(Double_t x)
Definition: TMath.h:750
Short_t Abs(Short_t d)
Definition: TMathBase.h:120