ROOT  6.06/09
Reference Guide
MethodSVM.cxx
Go to the documentation of this file.
1 // @(#)root/tmva $Id$
2 // Author: Marcin Wolter, Andrzej Zemla
3 
4 /**********************************************************************************
5  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6  * Package: TMVA *
7  * Class : MethodSVM *
8  * Web : http://tmva.sourceforge.net *
9  * *
10  * Description: *
11  * Implementation *
12  * *
13  * Authors (alphabetical): *
14  * Marcin Wolter <Marcin.Wolter@cern.ch> - IFJ PAN, Krakow, Poland *
15  * Andrzej Zemla <azemla@cern.ch> - IFJ PAN, Krakow, Poland *
16  * (IFJ PAN: Henryk Niewodniczanski Inst. Nucl. Physics, Krakow, Poland) *
17  * *
18  * Introduction of regression by: *
19  * Krzysztof Danielowski <danielow@cern.ch> - IFJ PAN & AGH, Krakow, Poland *
20  * Kamil Kraszewski <kalq@cern.ch> - IFJ PAN & UJ, Krakow, Poland *
21  * Maciej Kruk <mkruk@cern.ch> - IFJ PAN & AGH, Krakow, Poland *
22  * *
23  * *
24  * Copyright (c) 2005: *
25  * CERN, Switzerland *
26  * MPI-K Heidelberg, Germany *
27  * PAN, Krakow, Poland *
28  * *
29  * Redistribution and use in source and binary forms, with or without *
30  * modification, are permitted according to the terms listed in LICENSE *
31  * (http://tmva.sourceforge.net/LICENSE) *
32  **********************************************************************************/
33 
34 //_______________________________________________________________________
35 //
36 // SMO Platt's SVM classifier with Keerthi & Shavade improvements
37 //_______________________________________________________________________
38 
39 #include "Riostream.h"
40 #include "TMath.h"
41 #include "TFile.h"
42 
43 #include "TMVA/ClassifierFactory.h"
44 #ifndef ROOT_TMVA_MethodSVM
45 #include "TMVA/MethodSVM.h"
46 #endif
47 #ifndef ROOT_TMVA_Tools
48 #include "TMVA/Tools.h"
49 #endif
50 #ifndef ROOT_TMVA_Timer
51 #include "TMVA/Timer.h"
52 #endif
53 
54 #ifndef ROOT_TMVA_SVWorkingSet
55 #include "TMVA/SVWorkingSet.h"
56 #endif
57 
58 #ifndef ROOT_TMVA_SVEvent
59 #include "TMVA/SVEvent.h"
60 #endif
61 
62 #ifndef ROOT_TMVA_SVKernelFunction
63 #include "TMVA/SVKernelFunction.h"
64 #endif
65 
66 #include <string>
67 
68 using std::vector;
69 
70 //const Int_t basketsize__ = 1280000;
71 REGISTER_METHOD(SVM)
72 
73 ClassImp(TMVA::MethodSVM)
74 
75 ////////////////////////////////////////////////////////////////////////////////
76 /// standard constructor
77 
78 TMVA::MethodSVM::MethodSVM( const TString& jobName, const TString& methodTitle, DataSetInfo& theData,
79  const TString& theOption, TDirectory* theTargetDir )
80  : MethodBase( jobName, Types::kSVM, methodTitle, theData, theOption, theTargetDir )
81  , fCost(0)
82  , fTolerance(0)
83  , fMaxIter(0)
84  , fNSubSets(0)
85  , fBparm(0)
86  , fGamma(0)
87  , fWgSet(0)
88  , fInputData(0)
89  , fSupportVectors(0)
90  , fSVKernelFunction(0)
91  , fMinVars(0)
92  , fMaxVars(0)
93  , fDoubleSigmaSquared(0)
94  , fOrder(0)
95  , fTheta(0)
96  , fKappa(0)
97 {
98 }
99 
100 ////////////////////////////////////////////////////////////////////////////////
101 /// constructor from weight file
102 
103 TMVA::MethodSVM::MethodSVM( DataSetInfo& theData, const TString& theWeightFile, TDirectory* theTargetDir )
104  : MethodBase( Types::kSVM, theData, theWeightFile, theTargetDir )
105  , fCost(0)
106  , fTolerance(0)
107  , fMaxIter(0)
108  , fNSubSets(0)
109  , fBparm(0)
110  , fGamma(0)
111  , fWgSet(0)
112  , fInputData(0)
113  , fSupportVectors(0)
114  , fSVKernelFunction(0)
115  , fMinVars(0)
116  , fMaxVars(0)
117  , fDoubleSigmaSquared(0)
118  , fOrder(0)
119  , fTheta(0)
120  , fKappa(0)
121 {
122 }
123 
124 ////////////////////////////////////////////////////////////////////////////////
125 /// destructor
126 
128 {
129  if (fInputData !=0) { delete fInputData; fInputData=0; }
130  if (fSupportVectors !=0 ) { delete fSupportVectors; fSupportVectors = 0; }
131  if (fWgSet !=0) { delete fWgSet; fWgSet=0; }
132  if (fSVKernelFunction !=0 ) { delete fSVKernelFunction; fSVKernelFunction = 0; }
133 }
134 
135 ////////////////////////////////////////////////////////////////////////////////
136 /// SVM can handle classification with 2 classes and regression with one regression-target
137 
139 {
140  if (type == Types::kClassification && numberClasses == 2) return kTRUE;
141  if (type == Types::kRegression && numberTargets == 1) return kTRUE;
142  return kFALSE;
143 }
144 
145 ////////////////////////////////////////////////////////////////////////////////
146 /// default initialisation
147 
149 {
150  // SVM always uses normalised input variables
151  SetNormalised( kTRUE );
152 
153  // Helge: do not book a event vector of given size but rather fill the vector
154  // later with pus_back. Anyway, this is NOT what is time consuming in
155  // SVM and it allows to skip totally events with weights == 0 ;)
156  fInputData = new std::vector<TMVA::SVEvent*>(0);
157  fSupportVectors = new std::vector<TMVA::SVEvent*>(0);
158 }
159 
160 ////////////////////////////////////////////////////////////////////////////////
161 /// declare options available for this method
162 
164 {
165  // for gaussian kernel parameter(s)
166  DeclareOptionRef( fGamma = 1., "Gamma", "RBF kernel parameter: Gamma (size of the Kernel)");
167 
168  DeclareOptionRef( fCost, "C", "Cost parameter" );
169  if (DoRegression()) {
170  fCost = 0.002;
171  }else{
172  fCost = 1.;
173  }
174  DeclareOptionRef( fTolerance = 0.01, "Tol", "Tolerance parameter" ); //should be fixed
175  DeclareOptionRef( fMaxIter = 1000, "MaxIter", "Maximum number of training loops" );
176 
177 }
178 
179 ////////////////////////////////////////////////////////////////////////////////
180 /// options that are used ONLY for the READER to ensure backward compatibility
181 
183 {
185  DeclareOptionRef( fNSubSets = 1, "NSubSets", "Number of training subsets" );
186  DeclareOptionRef( fTheKernel = "Gauss", "Kernel", "Uses kernel function");
187  // for gaussian kernel parameter(s)
188  DeclareOptionRef( fDoubleSigmaSquared = 2., "Sigma", "Kernel parameter: sigma");
189  // for polynomiarl kernel parameter(s)
190  DeclareOptionRef( fOrder = 3, "Order", "Polynomial Kernel parameter: polynomial order");
191  // for sigmoid kernel parameters
192  DeclareOptionRef( fTheta = 1., "Theta", "Sigmoid Kernel parameter: theta");
193  DeclareOptionRef( fKappa = 1., "Kappa", "Sigmoid Kernel parameter: kappa");
194 }
195 
196 ////////////////////////////////////////////////////////////////////////////////
197 /// option post processing (if necessary)
198 
200 {
201  if (IgnoreEventsWithNegWeightsInTraining()) {
202  Log() << kFATAL << "Mechanism to ignore events with negative weights in training not yet available for method: "
203  << GetMethodTypeName()
204  << " --> please remove \"IgnoreNegWeightsInTraining\" option from booking string."
205  << Endl;
206  }
207 }
208 
209 ////////////////////////////////////////////////////////////////////////////////
210 /// Train SVM
211 
213 {
214  Data()->SetCurrentType(Types::kTraining);
215 
216  Log() << kDEBUG << "Create event vector"<< Endl;
217  for (Int_t ievt=0; ievt<Data()->GetNEvents(); ievt++){
218  if (GetEvent(ievt)->GetWeight() != 0)
219  fInputData->push_back(new SVEvent(GetEvent(ievt), fCost, DataInfo().IsSignal(GetEvent(ievt))));
220  }
221 
222  fSVKernelFunction = new SVKernelFunction(fGamma);
223 
224  Log()<< kINFO << "Building SVM Working Set...with "<<fInputData->size()<<" event instances"<< Endl;
225  Timer bldwstime( GetName());
226  fWgSet = new SVWorkingSet( fInputData, fSVKernelFunction,fTolerance, DoRegression() );
227  Log() << kINFO <<"Elapsed time for Working Set build: "<< bldwstime.GetElapsedTime()<<Endl;
228 
229  // timing
230  Timer timer( GetName() );
231  Log() << kINFO << "Sorry, no computing time forecast available for SVM, please wait ..." << Endl;
232 
233  fWgSet->Train(fMaxIter);
234 
235  Log() << kINFO << "Elapsed time: " << timer.GetElapsedTime()
236  << " " << Endl;
237 
238  fBparm = fWgSet->GetBpar();
239  fSupportVectors = fWgSet->GetSupportVectors();
240 
241 
242  delete fWgSet;
243  fWgSet=0;
244 
245  // for (UInt_t i=0; i<fInputData->size();i++) delete fInputData->at(i);
246  delete fInputData;
247  fInputData=0;
248 }
249 
250 ////////////////////////////////////////////////////////////////////////////////
251 /// write configuration to xml file
252 
253 void TMVA::MethodSVM::AddWeightsXMLTo( void* parent ) const
254 {
255  void* wght = gTools().AddChild(parent, "Weights");
256  gTools().AddAttr(wght,"fBparm",fBparm);
257  gTools().AddAttr(wght,"fGamma",fGamma);
258  gTools().AddAttr(wght,"NSupVec",fSupportVectors->size());
259 
260  for (std::vector<TMVA::SVEvent*>::iterator veciter=fSupportVectors->begin();
261  veciter!=fSupportVectors->end() ; ++veciter ) {
262  TVectorD temp(GetNvar()+4);
263  temp[0] = (*veciter)->GetNs();
264  temp[1] = (*veciter)->GetTypeFlag();
265  temp[2] = (*veciter)->GetAlpha();
266  temp[3] = (*veciter)->GetAlpha_p();
267  for (UInt_t ivar = 0; ivar < GetNvar(); ivar++)
268  temp[ivar+4] = (*(*veciter)->GetDataVector())[ivar];
269  gTools().WriteTVectorDToXML(wght,"SupportVector",&temp);
270  }
271  // write max/min data values
272  void* maxnode = gTools().AddChild(wght, "Maxima");
273  for (UInt_t ivar = 0; ivar < GetNvar(); ivar++)
274  gTools().AddAttr(maxnode, "Var"+gTools().StringFromInt(ivar), GetXmax(ivar));
275  void* minnode = gTools().AddChild(wght, "Minima");
276  for (UInt_t ivar = 0; ivar < GetNvar(); ivar++)
277  gTools().AddAttr(minnode, "Var"+gTools().StringFromInt(ivar), GetXmin(ivar));
278 }
279 
280 ////////////////////////////////////////////////////////////////////////////////
281 
283 {
284  gTools().ReadAttr( wghtnode, "fBparm",fBparm );
285  gTools().ReadAttr( wghtnode, "fGamma",fGamma);
286  UInt_t fNsupv=0;
287  gTools().ReadAttr( wghtnode, "NSupVec",fNsupv );
288 
289  Float_t alpha=0.;
290  Float_t alpha_p = 0.;
291 
292  Int_t typeFlag=-1;
293  // UInt_t ns = 0;
294  std::vector<Float_t>* svector = new std::vector<Float_t>(GetNvar());
295 
296  if (fMaxVars!=0) delete fMaxVars;
297  fMaxVars = new TVectorD( GetNvar() );
298  if (fMinVars!=0) delete fMinVars;
299  fMinVars = new TVectorD( GetNvar() );
300  if (fSupportVectors!=0) {
301  for (vector< SVEvent* >::iterator it = fSupportVectors->begin(); it!=fSupportVectors->end(); ++it)
302  delete *it;
303  delete fSupportVectors;
304  }
305  fSupportVectors = new std::vector<TMVA::SVEvent*>(0);
306  void* supportvectornode = gTools().GetChild(wghtnode);
307  for (UInt_t ievt = 0; ievt < fNsupv; ievt++) {
308  TVectorD temp(GetNvar()+4);
309  gTools().ReadTVectorDFromXML(supportvectornode,"SupportVector",&temp);
310  // ns=(UInt_t)temp[0];
311  typeFlag=(int)temp[1];
312  alpha=temp[2];
313  alpha_p=temp[3];
314  for (UInt_t ivar = 0; ivar < GetNvar(); ivar++) (*svector)[ivar]=temp[ivar+4];
315 
316  fSupportVectors->push_back(new SVEvent(svector,alpha,alpha_p,typeFlag));
317  supportvectornode = gTools().GetNextChild(supportvectornode);
318  }
319 
320  void* maxminnode = supportvectornode;
321  for (UInt_t ivar = 0; ivar < GetNvar(); ivar++)
322  gTools().ReadAttr( maxminnode,"Var"+gTools().StringFromInt(ivar),(*fMaxVars)[ivar]);
323  maxminnode = gTools().GetNextChild(maxminnode);
324  for (UInt_t ivar = 0; ivar < GetNvar(); ivar++)
325  gTools().ReadAttr( maxminnode,"Var"+gTools().StringFromInt(ivar),(*fMinVars)[ivar]);
326  if (fSVKernelFunction!=0) delete fSVKernelFunction;
327  fSVKernelFunction = new SVKernelFunction(fGamma);
328  delete svector;
329 }
330 
331 ////////////////////////////////////////////////////////////////////////////////
332 ///TODO write IT
333 /// write training sample (TTree) to file
334 
336 {
337 }
338 
339 ////////////////////////////////////////////////////////////////////////////////
340 
341 void TMVA::MethodSVM::ReadWeightsFromStream( std::istream& istr )
342 {
343  if (fSupportVectors !=0) { delete fSupportVectors; fSupportVectors = 0;}
344  fSupportVectors = new std::vector<TMVA::SVEvent*>(0);
345 
346  // read configuration from input stream
347  istr >> fBparm;
348 
349  UInt_t fNsupv;
350  // coverity[tainted_data_argument]
351  istr >> fNsupv;
352  fSupportVectors->reserve(fNsupv);
353 
354  Float_t typeTalpha=0.;
355  Float_t alpha=0.;
356  Int_t typeFlag=-1;
357  UInt_t ns = 0;
358  std::vector<Float_t>* svector = new std::vector<Float_t>(GetNvar());
359 
360  fMaxVars = new TVectorD( GetNvar() );
361  fMinVars = new TVectorD( GetNvar() );
362 
363  for (UInt_t ievt = 0; ievt < fNsupv; ievt++) {
364  istr>>ns;
365  istr>>typeTalpha;
366  typeFlag = typeTalpha<0?-1:1;
367  alpha = typeTalpha<0?-typeTalpha:typeTalpha;
368  for (UInt_t ivar = 0; ivar < GetNvar(); ivar++) istr >> svector->at(ivar);
369 
370  fSupportVectors->push_back(new SVEvent(svector,alpha,typeFlag,ns));
371  }
372 
373  for (UInt_t ivar = 0; ivar < GetNvar(); ivar++) istr >> (*fMaxVars)[ivar];
374 
375  for (UInt_t ivar = 0; ivar < GetNvar(); ivar++) istr >> (*fMinVars)[ivar];
376 
377  delete fSVKernelFunction;
378  if (fTheKernel == "Gauss" ) {
379  fSVKernelFunction = new SVKernelFunction(1/fDoubleSigmaSquared);
380  }
381  else {
383  if(fTheKernel == "Linear") k = SVKernelFunction::kLinear;
384  else if (fTheKernel == "Polynomial") k = SVKernelFunction::kPolynomial;
385  else if (fTheKernel == "Sigmoid" ) k = SVKernelFunction::kSigmoidal;
386  else {
387  Log() << kFATAL <<"Unknown kernel function found in weight file!" << Endl;
388  }
389  fSVKernelFunction = new SVKernelFunction();
390  fSVKernelFunction->setCompatibilityParams(k, fOrder, fTheta, fKappa);
391  }
392  delete svector;
393 }
394 
395 ////////////////////////////////////////////////////////////////////////////////
396 /// TODO write IT
397 
399 {
400 }
401 
402 ////////////////////////////////////////////////////////////////////////////////
403 /// returns MVA value for given event
404 
406 {
407  Double_t myMVA = 0;
408 
409  // TODO: avoid creation of a new SVEvent every time (Joerg)
410  SVEvent* ev = new SVEvent( GetEvent(), 0. ); // check for specificators
411 
412  for (UInt_t ievt = 0; ievt < fSupportVectors->size() ; ievt++) {
413  myMVA += ( fSupportVectors->at(ievt)->GetAlpha()
414  * fSupportVectors->at(ievt)->GetTypeFlag()
415  * fSVKernelFunction->Evaluate( fSupportVectors->at(ievt), ev ) );
416  }
417 
418  delete ev;
419 
420  myMVA -= fBparm;
421 
422  // cannot determine error
423  NoErrorCalc(err, errUpper);
424 
425  // 08/12/09: changed sign here to make results agree with convention signal=1
426  return 1.0/(1.0 + TMath::Exp(myMVA));
427 }
428 ////////////////////////////////////////////////////////////////////////////////
429 
430 const std::vector<Float_t>& TMVA::MethodSVM::GetRegressionValues()
431 {
432  if( fRegressionReturnVal == NULL )
433  fRegressionReturnVal = new std::vector<Float_t>();
434  fRegressionReturnVal->clear();
435 
436  Double_t myMVA = 0;
437 
438  const Event *baseev = GetEvent();
439  SVEvent* ev = new SVEvent( baseev,0. ); //check for specificators
440 
441  for (UInt_t ievt = 0; ievt < fSupportVectors->size() ; ievt++) {
442  myMVA += ( fSupportVectors->at(ievt)->GetDeltaAlpha()
443  *fSVKernelFunction->Evaluate( fSupportVectors->at(ievt), ev ) );
444  }
445  myMVA += fBparm;
446  Event * evT = new Event(*baseev);
447  evT->SetTarget(0,myMVA);
448 
449  const Event* evT2 = GetTransformationHandler().InverseTransform( evT );
450 
451  fRegressionReturnVal->push_back(evT2->GetTarget(0));
452 
453  delete evT;
454 
455  delete ev;
456 
457  return *fRegressionReturnVal;
458 }
459 
460 ////////////////////////////////////////////////////////////////////////////////
461 /// write specific classifier response
462 
463 void TMVA::MethodSVM::MakeClassSpecific( std::ostream& fout, const TString& className ) const
464 {
465  const int fNsupv = fSupportVectors->size();
466  fout << " // not implemented for class: \"" << className << "\"" << std::endl;
467  fout << " float fBparameter;" << std::endl;
468  fout << " int fNOfSuppVec;" << std::endl;
469  fout << " static float fAllSuppVectors[][" << fNsupv << "];" << std::endl;
470  fout << " static float fAlphaTypeCoef[" << fNsupv << "];" << std::endl;
471  fout << std::endl;
472  fout << " // Kernel parameter(s) " << std::endl;
473  fout << " float fGamma;" << std::endl;
474  fout << "};" << std::endl;
475  fout << "" << std::endl;
476 
477  //Initialize function definition
478  fout << "inline void " << className << "::Initialize() " << std::endl;
479  fout << "{" << std::endl;
480  fout << " fBparameter = " << fBparm << ";" << std::endl;
481  fout << " fNOfSuppVec = " << fNsupv << ";" << std::endl;
482  fout << " fGamma = " << fGamma << ";" <<std::endl;
483  fout << "}" << std::endl;
484  fout << std::endl;
485 
486  // GetMvaValue__ function defninition
487  fout << "inline double " << className << "::GetMvaValue__(const std::vector<double>& inputValues ) const" << std::endl;
488  fout << "{" << std::endl;
489  fout << " double mvaval = 0; " << std::endl;
490  fout << " double temp = 0; " << std::endl;
491  fout << std::endl;
492  fout << " for (int ievt = 0; ievt < fNOfSuppVec; ievt++ ){" << std::endl;
493  fout << " temp = 0;" << std::endl;
494  fout << " for ( unsigned int ivar = 0; ivar < GetNvar(); ivar++ ) {" << std::endl;
495 
496  fout << " temp += (fAllSuppVectors[ivar][ievt] - inputValues[ivar]) " << std::endl;
497  fout << " * (fAllSuppVectors[ivar][ievt] - inputValues[ivar]); " << std::endl;
498  fout << " }" << std::endl;
499  fout << " mvaval += fAlphaTypeCoef[ievt] * exp( -fGamma * temp ); " << std::endl;
500 
501  fout << " }" << std::endl;
502  fout << " mvaval -= fBparameter;" << std::endl;
503  fout << " return 1./(1. + exp(mvaval));" << std::endl;
504  fout << "}" << std::endl;
505  fout << "// Clean up" << std::endl;
506  fout << "inline void " << className << "::Clear() " << std::endl;
507  fout << "{" << std::endl;
508  fout << " // nothing to clear " << std::endl;
509  fout << "}" << std::endl;
510  fout << "" << std::endl;
511 
512  // define support vectors
513  fout << "float " << className << "::fAlphaTypeCoef[] =" << std::endl;
514  fout << "{ ";
515  for (Int_t isv = 0; isv < fNsupv; isv++) {
516  fout << fSupportVectors->at(isv)->GetDeltaAlpha() * fSupportVectors->at(isv)->GetTypeFlag();
517  if (isv < fNsupv-1) fout << ", ";
518  }
519  fout << " };" << std::endl << std::endl;
520 
521  fout << "float " << className << "::fAllSuppVectors[][" << fNsupv << "] =" << std::endl;
522  fout << "{";
523  for (UInt_t ivar = 0; ivar < GetNvar(); ivar++) {
524  fout << std::endl;
525  fout << " { ";
526  for (Int_t isv = 0; isv < fNsupv; isv++){
527  fout << fSupportVectors->at(isv)->GetDataVector()->at(ivar);
528  if (isv < fNsupv-1) fout << ", ";
529  }
530  fout << " }";
531  if (ivar < GetNvar()-1) fout << ", " << std::endl;
532  else fout << std::endl;
533  }
534  fout << "};" << std::endl<< std::endl;
535 }
536 
537 ////////////////////////////////////////////////////////////////////////////////
538 /// get help message text
539 ///
540 /// typical length of text line:
541 /// "|--------------------------------------------------------------|"
542 
544 {
545  Log() << Endl;
546  Log() << gTools().Color("bold") << "--- Short description:" << gTools().Color("reset") << Endl;
547  Log() << Endl;
548  Log() << "The Support Vector Machine (SVM) builds a hyperplance separating" << Endl;
549  Log() << "signal and background events (vectors) using the minimal subset of " << Endl;
550  Log() << "all vectors used for training (support vectors). The extension to" << Endl;
551  Log() << "the non-linear case is performed by mapping input vectors into a " << Endl;
552  Log() << "higher-dimensional feature space in which linear separation is " << Endl;
553  Log() << "possible. The use of the kernel functions thereby eliminates the " << Endl;
554  Log() << "explicit transformation to the feature space. The implemented SVM " << Endl;
555  Log() << "algorithm performs the classification tasks using linear, polynomial, " << Endl;
556  Log() << "Gaussian and sigmoidal kernel functions. The Gaussian kernel allows " << Endl;
557  Log() << "to apply any discriminant shape in the input space." << Endl;
558  Log() << Endl;
559  Log() << gTools().Color("bold") << "--- Performance optimisation:" << gTools().Color("reset") << Endl;
560  Log() << Endl;
561  Log() << "SVM is a general purpose non-linear classification method, which " << Endl;
562  Log() << "does not require data preprocessing like decorrelation or Principal " << Endl;
563  Log() << "Component Analysis. It generalises quite well and can handle analyses " << Endl;
564  Log() << "with large numbers of input variables." << Endl;
565  Log() << Endl;
566  Log() << gTools().Color("bold") << "--- Performance tuning via configuration options:" << gTools().Color("reset") << Endl;
567  Log() << Endl;
568  Log() << "Optimal performance requires primarily a proper choice of the kernel " << Endl;
569  Log() << "parameters (the width \"Sigma\" in case of Gaussian kernel) and the" << Endl;
570  Log() << "cost parameter \"C\". The user must optimise them empirically by running" << Endl;
571  Log() << "SVM several times with different parameter sets. The time needed for " << Endl;
572  Log() << "each evaluation scales like the square of the number of training " << Endl;
573  Log() << "events so that a coarse preliminary tuning should be performed on " << Endl;
574  Log() << "reduced data sets." << Endl;
575 }
void Train(void)
Train SVM.
Definition: MethodSVM.cxx:212
ClassImp(TMVA::MethodSVM) TMVA
standard constructor
Definition: MethodSVM.cxx:73
void DeclareCompatibilityOptions()
options that are used ONLY for the READER to ensure backward compatibility
Definition: MethodSVM.cxx:182
MsgLogger & Endl(MsgLogger &ml)
Definition: MsgLogger.h:162
float Float_t
Definition: RtypesCore.h:53
A ROOT file is a suite of consecutive data records (TKey instances) with a well defined format...
Definition: TFile.h:45
EAnalysisType
Definition: Types.h:124
Basic string class.
Definition: TString.h:137
void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
Definition: MethodSVM.cxx:463
int Int_t
Definition: RtypesCore.h:41
bool Bool_t
Definition: RtypesCore.h:59
const Bool_t kFALSE
Definition: Rtypes.h:92
void ProcessOptions()
option post processing (if necessary)
Definition: MethodSVM.cxx:199
void AddAttr(void *node, const char *, const T &value, Int_t precision=16)
Definition: Tools.h:308
void * AddChild(void *parent, const char *childname, const char *content=0, bool isRootNode=false)
add child node
Definition: Tools.cxx:1134
Tools & gTools()
Definition: Tools.cxx:79
TStopwatch timer
Definition: pirndm.C:37
void * GetChild(void *parent, const char *childname=0)
get child node
Definition: Tools.cxx:1158
std::vector< std::vector< double > > Data
void DeclareOptions()
declare options available for this method
Definition: MethodSVM.cxx:163
TVectorT< Double_t > TVectorD
Definition: TVectorDfwd.h:24
TString GetElapsedTime(Bool_t Scientific=kTRUE)
Definition: Timer.cxx:131
void AddWeightsXMLTo(void *parent) const
write configuration to xml file
Definition: MethodSVM.cxx:253
void ReadWeightsFromXML(void *wghtnode)
Definition: MethodSVM.cxx:282
void ReadWeightsFromStream(std::istream &istr)
Definition: MethodSVM.cxx:341
void Init(void)
default initialisation
Definition: MethodSVM.cxx:148
unsigned int UInt_t
Definition: RtypesCore.h:42
void SetTarget(UInt_t itgt, Float_t value)
set the target value (dimension itgt) to value
Definition: Event.cxx:354
void ReadAttr(void *node, const char *, T &value)
Definition: Tools.h:295
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets)
SVM can handle classification with 2 classes and regression with one regression-target.
Definition: MethodSVM.cxx:138
Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
returns MVA value for given event
Definition: MethodSVM.cxx:405
MethodSVM(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption="", TDirectory *theTargetDir=0)
Double_t Exp(Double_t x)
Definition: TMath.h:495
double Double_t
Definition: RtypesCore.h:55
Describe directory structure in memory.
Definition: TDirectory.h:41
int type
Definition: TGX11.cxx:120
void WriteTVectorDToXML(void *node, const char *name, TVectorD *vec)
Definition: Tools.cxx:1267
void * GetNextChild(void *prevchild, const char *childname=0)
XML helpers.
Definition: Tools.cxx:1170
const TString & Color(const TString &)
human readable color strings
Definition: Tools.cxx:837
Float_t GetTarget(UInt_t itgt) const
Definition: Event.h:101
#define REGISTER_METHOD(CLASS)
for example
Abstract ClassifierFactory template that handles arbitrary types.
virtual ~MethodSVM(void)
destructor
Definition: MethodSVM.cxx:127
virtual void DeclareCompatibilityOptions()
options that are used ONLY for the READER to ensure backward compatibility they are hence without any...
Definition: MethodBase.cxx:599
#define NULL
Definition: Rtypes.h:82
void WriteWeightsToStream(TFile &fout) const
TODO write IT write training sample (TTree) to file.
Definition: MethodSVM.cxx:335
void ReadTVectorDFromXML(void *node, const char *name, TVectorD *vec)
Definition: Tools.cxx:1275
const Bool_t kTRUE
Definition: Rtypes.h:91
void GetHelpMessage() const
get help message text
Definition: MethodSVM.cxx:543
Definition: math.cpp:60
const std::vector< Float_t > & GetRegressionValues()
Definition: MethodSVM.cxx:430