Logo ROOT   6.08/07
Reference Guide
TMVAClassification_LD.class.C
Go to the documentation of this file.
1 // Class: ReadLD
2 // Automatically generated by MethodBase::MakeClass
3 //
4 
5 /* configuration options =====================================================
6 
7 #GEN -*-*-*-*-*-*-*-*-*-*-*- general info -*-*-*-*-*-*-*-*-*-*-*-
8 
9 Method : LD::LD
10 TMVA Release : 4.2.1 [262657]
11 ROOT Release : 6.08/07 [395271]
12 Creator : sftnight
13 Date : Thu May 31 21:35:43 2018
14 Host : Linux SFT-ubuntu-1710-1 4.13.0-31-generic #34-Ubuntu SMP Fri Jan 19 16:34:46 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
15 Dir : /mnt/build/workspace/root-makedoc-v608/rootspi/rdoc/src/v6-08-00-patches/documentation/doxygen
16 Training events: 2000
17 Analysis type : [Classification]
18 
19 
20 #OPT -*-*-*-*-*-*-*-*-*-*-*-*- options -*-*-*-*-*-*-*-*-*-*-*-*-
21 
22 # Set by User:
23 V: "False" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
24 VarTransform: "None" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
25 H: "True" [Print method-specific help message]
26 CreateMVAPdfs: "True" [Create PDFs for classifier outputs (signal and background)]
27 # Default:
28 VerbosityLevel: "Default" [Verbosity level]
29 IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
30 ##
31 
32 
33 #VAR -*-*-*-*-*-*-*-*-*-*-*-* variables *-*-*-*-*-*-*-*-*-*-*-*-
34 
35 NVar 4
36 var1+var2 myvar1 myvar1 myvar1 'F' [-8.14423561096,7.26972866058]
37 var1-var2 myvar2 myvar2 Expression 2 'F' [-3.96643972397,4.0258936882]
38 var3 var3 var3 Variable 3 units 'F' [-5.03730010986,4.27845287323]
39 var4 var4 var4 Variable 4 units 'F' [-5.95050764084,4.64035463333]
40 NSpec 2
41 var1*2 spec1 spec1 Spectator 1 units 'F' [-9.91655540466,8.67800140381]
42 var1*3 spec2 spec2 Spectator 2 units 'F' [-14.874833107,13.0170021057]
43 
44 
45 ============================================================================ */
46 
47 #include <vector>
48 #include <cmath>
49 #include <string>
50 #include <iostream>
51 
52 #ifndef IClassifierReader__def
53 #define IClassifierReader__def
54 
55 class IClassifierReader {
56 
57  public:
58 
59  // constructor
60  IClassifierReader() : fStatusIsClean( true ) {}
61  virtual ~IClassifierReader() {}
62 
63  // return classifier response
64  virtual double GetMvaValue( const std::vector<double>& inputValues ) const = 0;
65 
66  // returns classifier status
67  bool IsStatusClean() const { return fStatusIsClean; }
68 
69  protected:
70 
71  bool fStatusIsClean;
72 };
73 
74 #endif
75 
76 class ReadLD : public IClassifierReader {
77 
78  public:
79 
80  // constructor
81  ReadLD( std::vector<std::string>& theInputVars )
82  : IClassifierReader(),
83  fClassName( "ReadLD" ),
84  fNvars( 4 ),
85  fIsNormalised( false )
86  {
87  // the training input variables
88  const char* inputVars[] = { "var1+var2", "var1-var2", "var3", "var4" };
89 
90  // sanity checks
91  if (theInputVars.size() <= 0) {
92  std::cout << "Problem in class \"" << fClassName << "\": empty input vector" << std::endl;
93  fStatusIsClean = false;
94  }
95 
96  if (theInputVars.size() != fNvars) {
97  std::cout << "Problem in class \"" << fClassName << "\": mismatch in number of input values: "
98  << theInputVars.size() << " != " << fNvars << std::endl;
99  fStatusIsClean = false;
100  }
101 
102  // validate input variables
103  for (size_t ivar = 0; ivar < theInputVars.size(); ivar++) {
104  if (theInputVars[ivar] != inputVars[ivar]) {
105  std::cout << "Problem in class \"" << fClassName << "\": mismatch in input variable names" << std::endl
106  << " for variable [" << ivar << "]: " << theInputVars[ivar].c_str() << " != " << inputVars[ivar] << std::endl;
107  fStatusIsClean = false;
108  }
109  }
110 
111  // initialize min and max vectors (for normalisation)
112  fVmin[0] = 0;
113  fVmax[0] = 0;
114  fVmin[1] = 0;
115  fVmax[1] = 0;
116  fVmin[2] = 0;
117  fVmax[2] = 0;
118  fVmin[3] = 0;
119  fVmax[3] = 0;
120 
121  // initialize input variable types
122  fType[0] = 'F';
123  fType[1] = 'F';
124  fType[2] = 'F';
125  fType[3] = 'F';
126 
127  // initialize constants
128  Initialize();
129 
130  }
131 
132  // destructor
133  virtual ~ReadLD() {
134  Clear(); // method-specific
135  }
136 
137  // the classifier response
138  // "inputValues" is a vector of input values in the same order as the
139  // variables given to the constructor
140  double GetMvaValue( const std::vector<double>& inputValues ) const;
141 
142  private:
143 
144  // method-specific destructor
145  void Clear();
146 
147  // common member variables
148  const char* fClassName;
149 
150  const size_t fNvars;
151  size_t GetNvar() const { return fNvars; }
152  char GetType( int ivar ) const { return fType[ivar]; }
153 
154  // normalisation of input variables
155  const bool fIsNormalised;
156  bool IsNormalised() const { return fIsNormalised; }
157  double fVmin[4];
158  double fVmax[4];
159  double NormVariable( double x, double xmin, double xmax ) const {
160  // normalise to output range: [-1, 1]
161  return 2*(x - xmin)/(xmax - xmin) - 1.0;
162  }
163 
164  // type of input variable: 'F' or 'I'
165  char fType[4];
166 
167  // initialize internal variables
168  void Initialize();
169  double GetMvaValue__( const std::vector<double>& inputValues ) const;
170 
171  // private members (method specific)
172  std::vector<double> fLDCoefficients;
173 };
174 
175 inline void ReadLD::Initialize()
176 {
177  fLDCoefficients.push_back( -0.0555505264046 );
178  fLDCoefficients.push_back( -0.326327978479 );
179  fLDCoefficients.push_back( -0.0802584320043 );
180  fLDCoefficients.push_back( -0.194835566078 );
181  fLDCoefficients.push_back( 0.757623350937 );
182 
183  // sanity check
184  if (fLDCoefficients.size() != fNvars+1) {
185  std::cout << "Problem in class \"" << fClassName << "\"::Initialize: mismatch in number of input values"
186  << fLDCoefficients.size() << " != " << fNvars+1 << std::endl;
187  fStatusIsClean = false;
188  }
189 }
190 
191 inline double ReadLD::GetMvaValue__( const std::vector<double>& inputValues ) const
192 {
193  double retval = fLDCoefficients[0];
194  for (size_t ivar = 1; ivar < fNvars+1; ivar++) {
195  retval += fLDCoefficients[ivar]*inputValues[ivar-1];
196  }
197 
198  return retval;
199 }
200 
201 // Clean up
202 inline void ReadLD::Clear()
203 {
204  // clear coefficients
205  fLDCoefficients.clear();
206 }
207  inline double ReadLD::GetMvaValue( const std::vector<double>& inputValues ) const
208  {
209  // classifier response value
210  double retval = 0;
211 
212  // classifier response, sanity check first
213  if (!IsStatusClean()) {
214  std::cout << "Problem in class \"" << fClassName << "\": cannot return classifier response"
215  << " because status is dirty" << std::endl;
216  retval = 0;
217  }
218  else {
219  if (IsNormalised()) {
220  // normalise variables
221  std::vector<double> iV;
222  iV.reserve(inputValues.size());
223  int ivar = 0;
224  for (std::vector<double>::const_iterator varIt = inputValues.begin();
225  varIt != inputValues.end(); varIt++, ivar++) {
226  iV.push_back(NormVariable( *varIt, fVmin[ivar], fVmax[ivar] ));
227  }
228  retval = GetMvaValue__( iV );
229  }
230  else {
231  retval = GetMvaValue__( inputValues );
232  }
233  }
234 
235  return retval;
236  }
float xmin
Definition: THbookFile.cxx:93
Type GetType(const std::string &Name)
Definition: Systematics.cxx:34
Double_t x[n]
Definition: legend1.C:17
void Initialize(Bool_t useTMVAStyle=kTRUE)
Definition: tmvaglob.cxx:176
float xmax
Definition: THbookFile.cxx:93
PyObject * fType