78 #ifndef IClassifierReader__def 79 #define IClassifierReader__def 81 class IClassifierReader {
86 IClassifierReader() : fStatusIsClean(
true ) {}
87 virtual ~IClassifierReader() {}
90 virtual double GetMvaValue(
const std::vector<double>& inputValues )
const = 0;
93 bool IsStatusClean()
const {
return fStatusIsClean; }
102 class ReadMLPBNN :
public IClassifierReader {
107 ReadMLPBNN( std::vector<std::string>& theInputVars )
108 : IClassifierReader(),
109 fClassName(
"ReadMLPBNN" ),
111 fIsNormalised(
false )
114 const char* inputVars[] = {
"var1+var2",
"var1-var2",
"var3",
"var4" };
117 if (theInputVars.size() <= 0) {
118 std::cout <<
"Problem in class \"" << fClassName <<
"\": empty input vector" << std::endl;
119 fStatusIsClean =
false;
122 if (theInputVars.size() != fNvars) {
123 std::cout <<
"Problem in class \"" << fClassName <<
"\": mismatch in number of input values: " 124 << theInputVars.size() <<
" != " << fNvars << std::endl;
125 fStatusIsClean =
false;
129 for (
size_t ivar = 0; ivar < theInputVars.size(); ivar++) {
130 if (theInputVars[ivar] != inputVars[ivar]) {
131 std::cout <<
"Problem in class \"" << fClassName <<
"\": mismatch in input variable names" << std::endl
132 <<
" for variable [" << ivar <<
"]: " << theInputVars[ivar].c_str() <<
" != " << inputVars[ivar] << std::endl;
133 fStatusIsClean =
false;
161 virtual ~ReadMLPBNN() {
168 double GetMvaValue(
const std::vector<double>& inputValues )
const;
179 void InitTransform_1();
180 void Transform_1( std::vector<double> & iv,
int sigOrBgd )
const;
181 void InitTransform();
182 void Transform( std::vector<double> & iv,
int sigOrBgd )
const;
185 const char* fClassName;
188 size_t GetNvar()
const {
return fNvars; }
192 const bool fIsNormalised;
193 bool IsNormalised()
const {
return fIsNormalised; }
196 double NormVariable(
double x,
double xmin,
double xmax )
const {
198 return 2*(x -
xmin)/(xmax - xmin) - 1.0;
206 double GetMvaValue__(
const std::vector<double>& inputValues )
const;
210 double ActivationFnc(
double x)
const;
211 double OutputActivationFnc(
double x)
const;
215 double fWeightMatrix0to1[10][5];
216 double fWeightMatrix1to2[1][10];
218 double * fWeights[3];
225 fLayerSize[0] = 5; fWeights[0] =
new double[5];
226 fLayerSize[1] = 10; fWeights[1] =
new double[10];
227 fLayerSize[2] = 1; fWeights[2] =
new double[1];
229 fWeightMatrix0to1[0][0] = -1.40922051639239;
230 fWeightMatrix0to1[1][0] = 0.128458225874446;
231 fWeightMatrix0to1[2][0] = 0.0610442324625422;
232 fWeightMatrix0to1[3][0] = -0.983342286926709;
233 fWeightMatrix0to1[4][0] = 1.16266647595013;
234 fWeightMatrix0to1[5][0] = -2.2919892008821;
235 fWeightMatrix0to1[6][0] = -0.527271199900571;
236 fWeightMatrix0to1[7][0] = 0.242877731169356;
237 fWeightMatrix0to1[8][0] = -0.474497997800808;
238 fWeightMatrix0to1[0][1] = -0.73765409936524;
239 fWeightMatrix0to1[1][1] = -0.0481305632351767;
240 fWeightMatrix0to1[2][1] = -0.0349351367919671;
241 fWeightMatrix0to1[3][1] = 0.0436101461728682;
242 fWeightMatrix0to1[4][1] = -0.943589630247706;
243 fWeightMatrix0to1[5][1] = 0.0878659186937728;
244 fWeightMatrix0to1[6][1] = 0.0360454640285985;
245 fWeightMatrix0to1[7][1] = 0.0409955917851326;
246 fWeightMatrix0to1[8][1] = 0.0543001252890545;
247 fWeightMatrix0to1[0][2] = -0.0383037835412182;
248 fWeightMatrix0to1[1][2] = 0.0364662295664198;
249 fWeightMatrix0to1[2][2] = 0.0190219207218807;
250 fWeightMatrix0to1[3][2] = -0.236262364753694;
251 fWeightMatrix0to1[4][2] = -0.139152542947488;
252 fWeightMatrix0to1[5][2] = -0.329476061228832;
253 fWeightMatrix0to1[6][2] = -0.123933385152538;
254 fWeightMatrix0to1[7][2] = 0.0781268071925014;
255 fWeightMatrix0to1[8][2] = -0.118488568702799;
256 fWeightMatrix0to1[0][3] = -0.86334322355238;
257 fWeightMatrix0to1[1][3] = -0.283940847072425;
258 fWeightMatrix0to1[2][3] = -0.151517820740765;
259 fWeightMatrix0to1[3][3] = 1.11007103355057;
260 fWeightMatrix0to1[4][3] = 0.0272209600485835;
261 fWeightMatrix0to1[5][3] = 1.12666128953213;
262 fWeightMatrix0to1[6][3] = 0.778934056107014;
263 fWeightMatrix0to1[7][3] = -0.490610024337247;
264 fWeightMatrix0to1[8][3] = 0.756807208990601;
265 fWeightMatrix0to1[0][4] = 0.0327899630183315;
266 fWeightMatrix0to1[1][4] = 0.00145644157319893;
267 fWeightMatrix0to1[2][4] = 0.000887185334408409;
268 fWeightMatrix0to1[3][4] = -0.000196887835857256;
269 fWeightMatrix0to1[4][4] = -0.165474843600782;
270 fWeightMatrix0to1[5][4] = 0.00853154418720169;
271 fWeightMatrix0to1[6][4] = -0.00533805600277283;
272 fWeightMatrix0to1[7][4] = 0.00561229959930704;
273 fWeightMatrix0to1[8][4] = -0.00615320637343035;
275 fWeightMatrix1to2[0][0] = -4.99047874693271;
276 fWeightMatrix1to2[0][1] = -1.71193821226236;
277 fWeightMatrix1to2[0][2] = -0.865099344872659;
278 fWeightMatrix1to2[0][3] = 8.3508131575791;
279 fWeightMatrix1to2[0][4] = 8.21623116822907;
280 fWeightMatrix1to2[0][5] = 9.92670136419168;
281 fWeightMatrix1to2[0][6] = 5.00926522276548;
282 fWeightMatrix1to2[0][7] = -3.68746448623868;
283 fWeightMatrix1to2[0][8] = 4.7867462021542;
284 fWeightMatrix1to2[0][9] = -1.89377549009733;
287 inline double ReadMLPBNN::GetMvaValue__(
const std::vector<double>& inputValues )
const 289 if (inputValues.size() != (
unsigned int)fLayerSize[0]-1) {
290 std::cout <<
"Input vector needs to be of size " << fLayerSize[0]-1 << std::endl;
294 for (
int l=0;
l<fLayers;
l++)
295 for (
int i=0; i<fLayerSize[
l]; i++) fWeights[
l][i]=0;
297 for (
int l=0;
l<fLayers-1;
l++)
298 fWeights[
l][fLayerSize[
l]-1]=1;
300 for (
int i=0; i<fLayerSize[0]-1; i++)
301 fWeights[0][i]=inputValues[i];
304 for (
int o=0; o<fLayerSize[1]-1; o++) {
305 for (
int i=0; i<fLayerSize[0]; i++) {
306 double inputVal = fWeightMatrix0to1[o][i] * fWeights[0][i];
307 fWeights[1][o] += inputVal;
309 fWeights[1][o] = ActivationFnc(fWeights[1][o]);
312 for (
int o=0; o<fLayerSize[2]; o++) {
313 for (
int i=0; i<fLayerSize[1]; i++) {
314 double inputVal = fWeightMatrix1to2[o][i] * fWeights[1][i];
315 fWeights[2][o] += inputVal;
317 fWeights[2][o] = OutputActivationFnc(fWeights[2][o]);
320 return fWeights[2][0];
323 double ReadMLPBNN::ActivationFnc(
double x)
const {
327 double ReadMLPBNN::OutputActivationFnc(
double x)
const {
329 return 1.0/(1.0+
exp(-x));
333 inline void ReadMLPBNN::Clear()
336 for (
int lIdx = 0; lIdx < 3; lIdx++) {
337 delete[] fWeights[lIdx];
340 inline double ReadMLPBNN::GetMvaValue(
const std::vector<double>& inputValues )
const 346 if (!IsStatusClean()) {
347 std::cout <<
"Problem in class \"" << fClassName <<
"\": cannot return classifier response" 348 <<
" because status is dirty" << std::endl;
352 if (IsNormalised()) {
354 std::vector<double> iV;
355 iV.reserve(inputValues.size());
357 for (std::vector<double>::const_iterator varIt = inputValues.begin();
358 varIt != inputValues.end(); varIt++, ivar++) {
359 iV.push_back(NormVariable( *varIt, fVmin[ivar], fVmax[ivar] ));
362 retval = GetMvaValue__( iV );
365 std::vector<double> iV;
367 for (std::vector<double>::const_iterator varIt = inputValues.begin();
368 varIt != inputValues.end(); varIt++, ivar++) {
369 iV.push_back(*varIt);
372 retval = GetMvaValue__( iV );
380 inline void ReadMLPBNN::InitTransform_1()
383 fMin_1[0][0] = -4.94358778;
384 fMax_1[0][0] = 6.3994679451;
385 fMin_1[1][0] = -8.14423561096;
386 fMax_1[1][0] = 7.26972866058;
387 fMin_1[2][0] = -8.14423561096;
388 fMax_1[2][0] = 7.26972866058;
389 fMin_1[0][1] = -3.96643972397;
390 fMax_1[0][1] = 3.11266636848;
391 fMin_1[1][1] = -3.25508260727;
392 fMax_1[1][1] = 4.0258936882;
393 fMin_1[2][1] = -3.96643972397;
394 fMax_1[2][1] = 4.0258936882;
395 fMin_1[0][2] = -2.78645992279;
396 fMax_1[0][2] = 3.50111722946;
397 fMin_1[1][2] = -5.03730010986;
398 fMax_1[1][2] = 4.27845287323;
399 fMin_1[2][2] = -5.03730010986;
400 fMax_1[2][2] = 4.27845287323;
401 fMin_1[0][3] = -2.42712664604;
402 fMax_1[0][3] = 4.5351858139;
403 fMin_1[1][3] = -5.95050764084;
404 fMax_1[1][3] = 4.64035463333;
405 fMin_1[2][3] = -5.95050764084;
406 fMax_1[2][3] = 4.64035463333;
410 inline void ReadMLPBNN::Transform_1( std::vector<double>& iv,
int cls)
const 413 if (cls < 0 || cls > 2) {
422 static std::vector<int> indicesGet;
423 static std::vector<int> indicesPut;
425 if ( indicesGet.empty() ) {
426 indicesGet.reserve(fNvars);
427 indicesGet.push_back( 0);
428 indicesGet.push_back( 1);
429 indicesGet.push_back( 2);
430 indicesGet.push_back( 3);
432 if ( indicesPut.empty() ) {
433 indicesPut.reserve(fNvars);
434 indicesPut.push_back( 0);
435 indicesPut.push_back( 1);
436 indicesPut.push_back( 2);
437 indicesPut.push_back( 3);
440 static std::vector<double> dv;
442 for (
int ivar=0; ivar<nVar; ivar++) dv[ivar] = iv[indicesGet.at(ivar)];
443 for (
int ivar=0;ivar<4;ivar++) {
444 double offset = fMin_1[cls][ivar];
445 double scale = 1.0/(fMax_1[cls][ivar]-fMin_1[cls][ivar]);
446 iv[indicesPut.at(ivar)] = (dv[ivar]-offset)*scale * 2 - 1;
451 inline void ReadMLPBNN::InitTransform()
457 inline void ReadMLPBNN::Transform( std::vector<double>& iv,
int sigOrBgd )
const 459 Transform_1( iv, sigOrBgd );
Type GetType(const std::string &Name)
void Initialize(Bool_t useTMVAStyle=kTRUE)