Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
MethodLD.cxx
Go to the documentation of this file.
1// @(#)root/tmva $Id$
2// Author: Krzysztof Danielowski, Kamil Kraszewski, Maciej Kruk, Jan Therhaag
3
4/**********************************************************************************
5 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6 * Package: TMVA *
7 * Class : MethodLD *
8 * *
9 * *
10 * Description: *
11 * Linear Discriminant - Simple Linear Regression and Classification *
12 * *
13 * Authors (alphabetical): *
14 * Krzysztof Danielowski <danielow@cern.ch> - IFJ PAN & AGH, Poland *
15 * Kamil Kraszewski <kalq@cern.ch> - IFJ PAN & UJ, Poland *
16 * Maciej Kruk <mkruk@cern.ch> - IFJ PAN & AGH, Poland *
17 * Jan Therhaag <therhaag@physik.uni-bonn.de> - Uni Bonn, Germany *
18 * *
19 * Copyright (c) 2005-2011: *
20 * CERN, Switzerland *
21 * PAN, Poland *
22 * U. of Bonn, Germany *
23 * *
24 * Redistribution and use in source and binary forms, with or without *
25 * modification, are permitted according to the terms listed in LICENSE *
26 * (see tmva/doc/LICENSE) *
27 * *
28 **********************************************************************************/
29
30/*! \class TMVA::MethodLD
31\ingroup TMVA
32Linear Discriminant.
33
34Can compute multidimensional output for regression
35(although it computes every dimension separately)
36*/
37
38#include "TMVA/MethodLD.h"
39
41#include "TMVA/Configurable.h"
42#include "TMVA/DataSet.h"
43#include "TMVA/DataSetInfo.h"
44#include "TMVA/IMethod.h"
45#include "TMVA/MethodBase.h"
46#include "TMVA/MsgLogger.h"
47#include "TMVA/PDF.h"
48#include "TMVA/Ranking.h"
49#include "TMVA/Tools.h"
51#include "TMVA/Types.h"
53
54#include "TMath.h"
55#include "TMatrix.h"
56#include "TMatrixD.h"
57#include "TList.h"
58
59#include <iostream>
60#include <iomanip>
61
62using std::vector;
63
65
66
67////////////////////////////////////////////////////////////////////////////////
68/// standard constructor for the LD
69
71 const TString& methodTitle,
74 MethodBase( jobName, Types::kLD, methodTitle, dsi, theOption),
75 fNRegOut ( 0 ),
76 fSumMatx ( 0 ),
77 fSumValMatx( 0 ),
78 fCoeffMatx ( 0 ),
79 fLDCoeff ( 0 )
80{
81}
82
83////////////////////////////////////////////////////////////////////////////////
84/// constructor from weight file
85
88 fNRegOut ( 0 ),
89 fSumMatx ( 0 ),
90 fSumValMatx( 0 ),
91 fCoeffMatx ( 0 ),
92 fLDCoeff ( 0 )
93{
94}
95
96////////////////////////////////////////////////////////////////////////////////
97/// default initialization called by all constructors
98
100{
101 if(DataInfo().GetNTargets()!=0) fNRegOut = DataInfo().GetNTargets();
102 else fNRegOut = 1;
103
104 fLDCoeff = new vector< vector< Double_t >* >(fNRegOut);
105 for (Int_t iout = 0; iout<fNRegOut; iout++){
106 (*fLDCoeff)[iout] = new std::vector<Double_t>( GetNvar()+1 );
107 }
108
109 // the minimum requirement to declare an event signal-like
110 SetSignalReferenceCut( 0.0 );
111}
112
113////////////////////////////////////////////////////////////////////////////////
114/// destructor
115
117{
118 if (fSumMatx) { delete fSumMatx; fSumMatx = 0; }
119 if (fSumValMatx) { delete fSumValMatx; fSumValMatx = 0; }
120 if (fCoeffMatx) { delete fCoeffMatx; fCoeffMatx = 0; }
121 if (fLDCoeff) {
122 for (vector< vector< Double_t >* >::iterator vi=fLDCoeff->begin(); vi!=fLDCoeff->end(); ++vi){
123 if (*vi) { delete *vi; *vi = 0; }
124 }
125 delete fLDCoeff; fLDCoeff = 0;
126 }
127}
128
129////////////////////////////////////////////////////////////////////////////////
130/// LD can handle classification with 2 classes and regression with one regression-target
131
133{
134 if (type == Types::kClassification && numberClasses == 2) return kTRUE;
135 else if (type == Types::kRegression && numberTargets == 1) {
136 Log() << "regression with " << numberTargets << " targets.";
137 return kTRUE;
138 }
139 else return kFALSE;
140}
141
142
143////////////////////////////////////////////////////////////////////////////////
144/// compute fSumMatx
145
147{
148 GetSum();
149
150 // compute fSumValMatx
151 GetSumVal();
152
153 // compute fCoeffMatx and fLDCoeff
154 GetLDCoeff();
155
156 // nice output
157 PrintCoefficients();
158
159 ExitFromTraining();
160}
161
162////////////////////////////////////////////////////////////////////////////////
163/// Returns the MVA classification output
164
166{
167 const Event* ev = GetEvent();
168
169 if (fRegressionReturnVal == NULL) fRegressionReturnVal = new vector< Float_t >();
170 fRegressionReturnVal->resize( fNRegOut );
171
172 for (Int_t iout = 0; iout<fNRegOut; iout++) {
173 (*fRegressionReturnVal)[iout] = (*(*fLDCoeff)[iout])[0] ;
174
175 int icoeff = 0;
176 for (auto const& val : ev->GetValues()) {
177 (*fRegressionReturnVal)[iout] += (*(*fLDCoeff)[iout])[++icoeff] * val;
178 }
179 }
180
181 // cannot determine error
182 NoErrorCalc(err, errUpper);
183
184 return (*fRegressionReturnVal)[0];
185}
186
187////////////////////////////////////////////////////////////////////////////////
188/// Calculates the regression output
189
190const std::vector< Float_t >& TMVA::MethodLD::GetRegressionValues()
191{
192 const Event* ev = GetEvent();
193
194 if (fRegressionReturnVal == NULL) fRegressionReturnVal = new vector< Float_t >();
195 fRegressionReturnVal->resize( fNRegOut );
196
197 for (Int_t iout = 0; iout<fNRegOut; iout++) {
198 (*fRegressionReturnVal)[iout] = (*(*fLDCoeff)[iout])[0] ;
199
200 int icoeff = 0;
201 for (auto const& val : ev->GetValues()) {
202 (*fRegressionReturnVal)[iout] += (*(*fLDCoeff)[iout])[++icoeff] * val;
203 }
204 }
205
206 // perform inverse transformation
207 Event* evT = new Event(*ev);
208 for (Int_t iout = 0; iout<fNRegOut; iout++) evT->SetTarget(iout,(*fRegressionReturnVal)[iout]);
209
210 const Event* evT2 = GetTransformationHandler().InverseTransform( evT );
211 fRegressionReturnVal->clear();
212 for (Int_t iout = 0; iout<fNRegOut; iout++) fRegressionReturnVal->push_back(evT2->GetTarget(iout));
213
214 delete evT;
215 return (*fRegressionReturnVal);
216}
217
218////////////////////////////////////////////////////////////////////////////////
219/// Initialization method; creates global matrices and vectors
220
222{
223 fSumMatx = new TMatrixD( GetNvar()+1, GetNvar()+1 );
224 fSumValMatx = new TMatrixD( GetNvar()+1, fNRegOut );
225 fCoeffMatx = new TMatrixD( GetNvar()+1, fNRegOut );
226
227}
228
229////////////////////////////////////////////////////////////////////////////////
230/// Calculates the matrix transposed(X)*W*X with W being the diagonal weight matrix
231/// and X the coordinates values
232
234{
235 const UInt_t nvar = DataInfo().GetNVariables();
236
237 for (UInt_t ivar = 0; ivar<=nvar; ivar++){
238 for (UInt_t jvar = 0; jvar<=nvar; jvar++) (*fSumMatx)( ivar, jvar ) = 0;
239 }
240
241 // compute sample means
242 Long64_t nevts = Data()->GetNEvents();
243 for (Int_t ievt=0; ievt<nevts; ievt++) {
244 const Event * ev = GetEvent(ievt);
245 Double_t weight = ev->GetWeight();
246
247 if (IgnoreEventsWithNegWeightsInTraining() && weight <= 0) continue;
248
249 // Sum of weights
250 (*fSumMatx)( 0, 0 ) += weight;
251
252 // Sum of coordinates
253 for (UInt_t ivar=0; ivar<nvar; ivar++) {
254 (*fSumMatx)( ivar+1, 0 ) += ev->GetValue( ivar ) * weight;
255 (*fSumMatx)( 0, ivar+1 ) += ev->GetValue( ivar ) * weight;
256 }
257
258 // Sum of products of coordinates
259 for (UInt_t ivar=0; ivar<nvar; ivar++){
260 for (UInt_t jvar=0; jvar<nvar; jvar++){
261 (*fSumMatx)( ivar+1, jvar+1 ) += ev->GetValue( ivar ) * ev->GetValue( jvar ) * weight;
262 }
263 }
264 }
265}
266
267////////////////////////////////////////////////////////////////////////////////
268/// Calculates the vector transposed(X)*W*Y with Y being the target vector
269
271{
272 const UInt_t nvar = DataInfo().GetNVariables();
273
274 for (Int_t ivar = 0; ivar<fNRegOut; ivar++){
275 for (UInt_t jvar = 0; jvar<=nvar; jvar++){
276 (*fSumValMatx)(jvar,ivar) = 0;
277 }
278 }
279
280 // Sum of coordinates multiplied by values
281 for (Int_t ievt=0; ievt<Data()->GetNEvents(); ievt++) {
282
283 // retrieve the event
284 const Event* ev = GetEvent(ievt);
285 Double_t weight = ev->GetWeight();
286
287 // in case event with neg weights are to be ignored
288 if (IgnoreEventsWithNegWeightsInTraining() && weight <= 0) continue;
289
290 for (Int_t ivar=0; ivar<fNRegOut; ivar++) {
291
292 Double_t val = weight;
293
294 if (!DoRegression()){
295 val *= DataInfo().IsSignal(ev); // yes it works.. but I'm still surprised (Helge).. would have not set y_B to zero though..
296 }else {//for regression
297 val *= ev->GetTarget( ivar );
298 }
299 (*fSumValMatx)( 0,ivar ) += val;
300 for (UInt_t jvar=0; jvar<nvar; jvar++) {
301 (*fSumValMatx)(jvar+1,ivar ) += ev->GetValue(jvar) * val;
302 }
303 }
304 }
305}
306
307////////////////////////////////////////////////////////////////////////////////
308/// Calculates the coefficients used for classification/regression
309
311{
312 const UInt_t nvar = DataInfo().GetNVariables();
313
314 for (Int_t ivar = 0; ivar<fNRegOut; ivar++){
315 TMatrixD invSum( *fSumMatx );
316 if ( TMath::Abs(invSum.Determinant()) < 10E-24 ) {
317 Log() << kWARNING << "<GetCoeff> matrix is almost singular with determinant="
318 << TMath::Abs(invSum.Determinant())
319 << " did you use the variables that are linear combinations or highly correlated?"
320 << Endl;
321 }
322 if ( TMath::Abs(invSum.Determinant()) < 10E-120 ) {
323 Log() << kFATAL << "<GetCoeff> matrix is singular with determinant="
324 << TMath::Abs(invSum.Determinant())
325 << " did you use the variables that are linear combinations?"
326 << Endl;
327 }
328 invSum.Invert();
329
330 fCoeffMatx = new TMatrixD( invSum * (*fSumValMatx));
331 for (UInt_t jvar = 0; jvar<nvar+1; jvar++) {
332 (*(*fLDCoeff)[ivar])[jvar] = (*fCoeffMatx)(jvar, ivar );
333 }
334 if (!DoRegression()) {
335 (*(*fLDCoeff)[ivar])[0]=0.0;
336 for (UInt_t jvar = 1; jvar<nvar+1; jvar++){
337 (*(*fLDCoeff)[ivar])[0]+=(*fCoeffMatx)(jvar,ivar)*(*fSumMatx)(0,jvar)/(*fSumMatx)( 0, 0 );
338 }
339 (*(*fLDCoeff)[ivar])[0]/=-2.0;
340 }
341
342 }
343}
344
345////////////////////////////////////////////////////////////////////////////////
346/// read LD coefficients from weight file
347
349{
350 for (Int_t iout=0; iout<fNRegOut; iout++){
351 for (UInt_t icoeff=0; icoeff<GetNvar()+1; icoeff++){
352 istr >> (*(*fLDCoeff)[iout])[icoeff];
353 }
354 }
355}
356
357////////////////////////////////////////////////////////////////////////////////
358/// create XML description for LD classification and regression
359/// (for arbitrary number of output classes/targets)
360
361void TMVA::MethodLD::AddWeightsXMLTo( void* parent ) const
362{
363 void* wght = gTools().AddChild(parent, "Weights");
364 gTools().AddAttr( wght, "NOut", fNRegOut );
365 gTools().AddAttr( wght, "NCoeff", GetNvar()+1 );
366 for (Int_t iout=0; iout<fNRegOut; iout++) {
367 for (UInt_t icoeff=0; icoeff<GetNvar()+1; icoeff++) {
368 void* coeffxml = gTools().AddChild( wght, "Coefficient" );
369 gTools().AddAttr( coeffxml, "IndexOut", iout );
370 gTools().AddAttr( coeffxml, "IndexCoeff", icoeff );
371 gTools().AddAttr( coeffxml, "Value", (*(*fLDCoeff)[iout])[icoeff] );
372 }
373 }
374}
375
376////////////////////////////////////////////////////////////////////////////////
377/// read coefficients from xml weight file
378
380{
382 gTools().ReadAttr( wghtnode, "NOut", fNRegOut );
383 gTools().ReadAttr( wghtnode, "NCoeff", ncoeff );
384
385 // sanity checks
386 if (ncoeff != GetNvar()+1) Log() << kFATAL << "Mismatch in number of output variables/coefficients: "
387 << ncoeff << " != " << GetNvar()+1 << Endl;
388
389 // create vector with coefficients (double vector due to arbitrary output dimension)
390 if (fLDCoeff) {
391 for (vector< vector< Double_t >* >::iterator vi=fLDCoeff->begin(); vi!=fLDCoeff->end(); ++vi){
392 if (*vi) { delete *vi; *vi = 0; }
393 }
394 delete fLDCoeff; fLDCoeff = 0;
395 }
396 fLDCoeff = new vector< vector< Double_t >* >(fNRegOut);
398
399 void* ch = gTools().GetChild(wghtnode);
402 while (ch) {
403 gTools().ReadAttr( ch, "IndexOut", iout );
404 gTools().ReadAttr( ch, "IndexCoeff", icoeff );
405 gTools().ReadAttr( ch, "Value", coeff );
406
407 (*(*fLDCoeff)[iout])[icoeff] = coeff;
408
409 ch = gTools().GetNextChild(ch);
410 }
411}
412
413////////////////////////////////////////////////////////////////////////////////
414/// write LD-specific classifier response
415
416void TMVA::MethodLD::MakeClassSpecific( std::ostream& fout, const TString& className ) const
417{
418 fout << " std::vector<double> fLDCoefficients;" << std::endl;
419 fout << "};" << std::endl;
420 fout << "" << std::endl;
421 fout << "inline void " << className << "::Initialize() " << std::endl;
422 fout << "{" << std::endl;
423 for (UInt_t ivar=0; ivar<GetNvar()+1; ivar++) {
424 Int_t dp = fout.precision();
425 fout << " fLDCoefficients.push_back( "
426 << std::setprecision(12) << (*(*fLDCoeff)[0])[ivar]
427 << std::setprecision(dp) << " );" << std::endl;
428 }
429 fout << std::endl;
430 fout << " // sanity check" << std::endl;
431 fout << " if (fLDCoefficients.size() != fNvars+1) {" << std::endl;
432 fout << " std::cout << \"Problem in class \\\"\" << fClassName << \"\\\"::Initialize: mismatch in number of input values\"" << std::endl;
433 fout << " << fLDCoefficients.size() << \" != \" << fNvars+1 << std::endl;" << std::endl;
434 fout << " fStatusIsClean = false;" << std::endl;
435 fout << " } " << std::endl;
436 fout << "}" << std::endl;
437 fout << std::endl;
438 fout << "inline double " << className << "::GetMvaValue__( const std::vector<double>& inputValues ) const" << std::endl;
439 fout << "{" << std::endl;
440 fout << " double retval = fLDCoefficients[0];" << std::endl;
441 fout << " for (size_t ivar = 1; ivar < fNvars+1; ivar++) {" << std::endl;
442 fout << " retval += fLDCoefficients[ivar]*inputValues[ivar-1];" << std::endl;
443 fout << " }" << std::endl;
444 fout << std::endl;
445 fout << " return retval;" << std::endl;
446 fout << "}" << std::endl;
447 fout << std::endl;
448 fout << "// Clean up" << std::endl;
449 fout << "inline void " << className << "::Clear() " << std::endl;
450 fout << "{" << std::endl;
451 fout << " // clear coefficients" << std::endl;
452 fout << " fLDCoefficients.clear(); " << std::endl;
453 fout << "}" << std::endl;
454}
455////////////////////////////////////////////////////////////////////////////////
456/// computes ranking of input variables
457
459{
460 // create the ranking object
461 fRanking = new Ranking( GetName(), "Discr. power" );
462
463 for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
464 fRanking->AddRank( Rank( GetInputLabel(ivar), TMath::Abs((* (*fLDCoeff)[0])[ivar+1] )) );
465 }
466
467 return fRanking;
468}
469
470////////////////////////////////////////////////////////////////////////////////
471/// MethodLD options
472
474{
475 AddPreDefVal(TString("LD"));
476}
477
478////////////////////////////////////////////////////////////////////////////////
479/// this is the preparation for training
480
482{
483 if (HasTrainingTree()) InitMatrices();
484}
485
486////////////////////////////////////////////////////////////////////////////////
487/// Display the classification/regression coefficients for each variable
488
490{
491 Log() << kHEADER << "Results for LD coefficients:" << Endl;
492
493 if (GetTransformationHandler().GetTransformationList().GetSize() != 0) {
494 Log() << kINFO << "NOTE: The coefficients must be applied to TRANFORMED variables" << Endl;
495 Log() << kINFO << " List of the transformation: " << Endl;
496 TListIter trIt(&GetTransformationHandler().GetTransformationList());
498 Log() << kINFO << " -- " << trf->GetName() << Endl;
499 }
500 }
501 std::vector<TString> vars;
502 std::vector<Double_t> coeffs;
503 for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
504 vars .push_back( GetInputLabel(ivar) );
505 coeffs.push_back( (* (*fLDCoeff)[0])[ivar+1] );
506 }
507 vars .push_back( "(offset)" );
508 coeffs.push_back((* (*fLDCoeff)[0])[0] );
509 TMVA::gTools().FormattedOutput( coeffs, vars, "Variable" , "Coefficient", Log() );
510 if (IsNormalised()) {
511 Log() << kINFO << "NOTE: You have chosen to use the \"Normalise\" booking option. Hence, the" << Endl;
512 Log() << kINFO << " coefficients must be applied to NORMALISED (') variables as follows:" << Endl;
513 Int_t maxL = 0;
514 for (UInt_t ivar=0; ivar<GetNvar(); ivar++) if (GetInputLabel(ivar).Length() > maxL) maxL = GetInputLabel(ivar).Length();
515
516 // Print normalisation expression (see Tools.cxx): "2*(x - xmin)/(xmax - xmin) - 1.0"
517 for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
518 Log() << kINFO
519 << std::setw(maxL+9) << TString("[") + GetInputLabel(ivar) + "]' = 2*("
520 << std::setw(maxL+2) << TString("[") + GetInputLabel(ivar) + "]"
521 << std::setw(3) << (GetXmin(ivar) > 0 ? " - " : " + ")
522 << std::setw(6) << TMath::Abs(GetXmin(ivar)) << std::setw(3) << ")/"
523 << std::setw(6) << (GetXmax(ivar) - GetXmin(ivar) )
524 << std::setw(3) << " - 1"
525 << Endl;
526 }
527 Log() << kINFO << "The TMVA Reader will properly account for this normalisation, but if the" << Endl;
528 Log() << kINFO << "LD classifier is applied outside the Reader, the transformation must be" << Endl;
529 Log() << kINFO << "implemented -- or the \"Normalise\" option is removed and LD retrained." << Endl;
530 Log() << kINFO << Endl;
531 }
532}
533
534////////////////////////////////////////////////////////////////////////////////
535/// get help message text
536///
537/// typical length of text line:
538/// "|--------------------------------------------------------------|"
539
541{
542 Log() << Endl;
543 Log() << gTools().Color("bold") << "--- Short description:" << gTools().Color("reset") << Endl;
544 Log() << Endl;
545 Log() << "Linear discriminants select events by distinguishing the mean " << Endl;
546 Log() << "values of the signal and background distributions in a trans- " << Endl;
547 Log() << "formed variable space where linear correlations are removed." << Endl;
548 Log() << "The LD implementation here is equivalent to the \"Fisher\" discriminant" << Endl;
549 Log() << "for classification, but also provides linear regression." << Endl;
550 Log() << Endl;
551 Log() << " (More precisely: the \"linear discriminator\" determines" << Endl;
552 Log() << " an axis in the (correlated) hyperspace of the input " << Endl;
553 Log() << " variables such that, when projecting the output classes " << Endl;
554 Log() << " (signal and background) upon this axis, they are pushed " << Endl;
555 Log() << " as far as possible away from each other, while events" << Endl;
556 Log() << " of a same class are confined in a close vicinity. The " << Endl;
557 Log() << " linearity property of this classifier is reflected in the " << Endl;
558 Log() << " metric with which \"far apart\" and \"close vicinity\" are " << Endl;
559 Log() << " determined: the covariance matrix of the discriminating" << Endl;
560 Log() << " variable space.)" << Endl;
561 Log() << Endl;
562 Log() << gTools().Color("bold") << "--- Performance optimisation:" << gTools().Color("reset") << Endl;
563 Log() << Endl;
564 Log() << "Optimal performance for the linear discriminant is obtained for " << Endl;
565 Log() << "linearly correlated Gaussian-distributed variables. Any deviation" << Endl;
566 Log() << "from this ideal reduces the achievable separation power. In " << Endl;
567 Log() << "particular, no discrimination at all is achieved for a variable" << Endl;
568 Log() << "that has the same sample mean for signal and background, even if " << Endl;
569 Log() << "the shapes of the distributions are very different. Thus, the linear " << Endl;
570 Log() << "discriminant often benefits from a suitable transformation of the " << Endl;
571 Log() << "input variables. For example, if a variable x in [-1,1] has a " << Endl;
572 Log() << "a parabolic signal distributions, and a uniform background" << Endl;
573 Log() << "distributions, their mean value is zero in both cases, leading " << Endl;
574 Log() << "to no separation. The simple transformation x -> |x| renders this " << Endl;
575 Log() << "variable powerful for the use in a linear discriminant." << Endl;
576 Log() << Endl;
577 Log() << gTools().Color("bold") << "--- Performance tuning via configuration options:" << gTools().Color("reset") << Endl;
578 Log() << Endl;
579 Log() << "<None>" << Endl;
580}
#define REGISTER_METHOD(CLASS)
for example
constexpr Bool_t kFALSE
Definition RtypesCore.h:108
constexpr Bool_t kTRUE
Definition RtypesCore.h:107
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
TMatrixT< Double_t > TMatrixD
Definition TMatrixDfwd.h:23
const_iterator begin() const
const_iterator end() const
Iterator of linked list.
Definition TList.h:191
Class that contains all the data information.
Definition DataSetInfo.h:62
Virtual base Class for all MVA method.
Definition MethodBase.h:111
Linear Discriminant.
Definition MethodLD.h:50
void GetSum(void)
Calculates the matrix transposed(X)*W*X with W being the diagonal weight matrix and X the coordinates...
Definition MethodLD.cxx:233
void ReadWeightsFromStream(std::istream &i) override
read LD coefficients from weight file
Definition MethodLD.cxx:348
void Init(void) override
default initialization called by all constructors
Definition MethodLD.cxx:99
MethodLD(const TString &jobName, const TString &methodTitle, DataSetInfo &dsi, const TString &theOption="LD")
standard constructor for the LD
Definition MethodLD.cxx:70
Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets) override
LD can handle classification with 2 classes and regression with one regression-target.
Definition MethodLD.cxx:132
void MakeClassSpecific(std::ostream &, const TString &) const override
write LD-specific classifier response
Definition MethodLD.cxx:416
void ReadWeightsFromXML(void *wghtnode) override
read coefficients from xml weight file
Definition MethodLD.cxx:379
void GetLDCoeff(void)
Calculates the coefficients used for classification/regression.
Definition MethodLD.cxx:310
void GetHelpMessage() const override
get help message text
Definition MethodLD.cxx:540
const Ranking * CreateRanking() override
computes ranking of input variables
Definition MethodLD.cxx:458
void DeclareOptions() override
MethodLD options.
Definition MethodLD.cxx:473
const std::vector< Float_t > & GetRegressionValues() override
Calculates the regression output.
Definition MethodLD.cxx:190
Double_t GetMvaValue(Double_t *err=nullptr, Double_t *errUpper=nullptr) override
Returns the MVA classification output.
Definition MethodLD.cxx:165
void PrintCoefficients(void)
Display the classification/regression coefficients for each variable.
Definition MethodLD.cxx:489
void AddWeightsXMLTo(void *parent) const override
create XML description for LD classification and regression (for arbitrary number of output classes/t...
Definition MethodLD.cxx:361
virtual ~MethodLD(void)
destructor
Definition MethodLD.cxx:116
void Train(void) override
compute fSumMatx
Definition MethodLD.cxx:146
void InitMatrices(void)
Initialization method; creates global matrices and vectors.
Definition MethodLD.cxx:221
void GetSumVal(void)
Calculates the vector transposed(X)*W*Y with Y being the target vector.
Definition MethodLD.cxx:270
void ProcessOptions() override
this is the preparation for training
Definition MethodLD.cxx:481
Ranking for variables in method (implementation)
Definition Ranking.h:48
void FormattedOutput(const std::vector< Double_t > &, const std::vector< TString > &, const TString titleVars, const TString titleValues, MsgLogger &logger, TString format="%+1.3f")
formatted output of simple table
Definition Tools.cxx:887
const TString & Color(const TString &)
human readable color strings
Definition Tools.cxx:828
void ReadAttr(void *node, const char *, T &value)
read attribute from xml
Definition Tools.h:329
void * GetChild(void *parent, const char *childname=nullptr)
get child node
Definition Tools.cxx:1150
void AddAttr(void *node, const char *, const T &value, Int_t precision=16)
add attribute to xml
Definition Tools.h:347
void * AddChild(void *parent, const char *childname, const char *content=nullptr, bool isRootNode=false)
add child node
Definition Tools.cxx:1124
void * GetNextChild(void *prevchild, const char *childname=nullptr)
XML helpers.
Definition Tools.cxx:1162
Singleton class for Global types used by TMVA.
Definition Types.h:71
@ kClassification
Definition Types.h:127
@ kRegression
Definition Types.h:128
Linear interpolation class.
Basic string class.
Definition TString.h:138
create variable transformations
Tools & gTools()
MsgLogger & Endl(MsgLogger &ml)
Definition MsgLogger.h:148
Short_t Abs(Short_t d)
Returns the absolute value of parameter Short_t d.
Definition TMathBase.h:124