Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RuleFitParams.cxx
Go to the documentation of this file.
1// @(#)root/tmva $Id$
2// Author: Andreas Hoecker, Joerg Stelzer, Fredrik Tegenfeldt, Helge Voss
3
4/**********************************************************************************
5 * Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
6 * Package: TMVA *
7 * Class : RuleFitParams *
8 * *
9 * *
10 * Description: *
11 * Implementation *
12 * *
13 * Authors (alphabetical): *
14 * Fredrik Tegenfeldt <Fredrik.Tegenfeldt@cern.ch> - Iowa State U., USA *
15 * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Ger. *
16 * *
17 * Copyright (c) 2005: *
18 * CERN, Switzerland *
19 * Iowa State U. *
20 * MPI-K Heidelberg, Germany *
21 * *
22 * Redistribution and use in source and binary forms, with or without *
23 * modification, are permitted according to the terms listed in LICENSE *
24 * (see tmva/doc/LICENSE) *
25 **********************************************************************************/
26
27/*! \class TMVA::RuleFitParams
28\ingroup TMVA
29A class doing the actual fitting of a linear model using rules as base functions.
30*/
31#include "TMVA/RuleFitParams.h"
32
33#include "TMVA/DataSetInfo.h"
34#include "TMVA/MethodRuleFit.h"
35#include "TMVA/MsgLogger.h"
36#include "TMVA/RuleEnsemble.h"
37#include "TMVA/RuleFit.h"
38#include "TMVA/Timer.h"
39#include "TMVA/Tools.h"
40#include "TMVA/Types.h"
41
42#include "TTree.h"
43#include "TMath.h"
44
45#include <iomanip>
46#include <numeric>
47#include <algorithm>
48#include <functional>
49
52
60
61////////////////////////////////////////////////////////////////////////////////
62/// constructor
63
65 : fRuleFit ( 0 )
66 , fRuleEnsemble ( 0 )
67 , fNRules ( 0 )
68 , fNLinear ( 0 )
69 , fPathIdx1 ( 0 )
70 , fPathIdx2 ( 0 )
71 , fPerfIdx1 ( 0 )
72 , fPerfIdx2 ( 0 )
73 , fGDNTauTstOK( 0 )
74 , fGDNTau ( 51 )
75 , fGDTauPrec ( 0.02 )
76 , fGDTauScan ( 1000 )
77 , fGDTauMin ( 0.0 )
78 , fGDTauMax ( 1.0 )
79 , fGDTau ( -1.0 )
80 , fGDPathStep ( 0.01 )
81 , fGDNPathSteps ( 1000 )
82 , fGDErrScale ( 1.1 )
83 , fAverageTruth( 0 )
84 , fFstarMedian ( 0 )
85 , fGDNtuple ( 0 )
86 , fNTRisk ( 0 )
87 , fNTErrorRate ( 0 )
88 , fNTNuval ( 0 )
89 , fNTCoefRad ( 0 )
90 , fNTOffset ( 0 )
91 , fNTCoeff ( 0 )
92 , fNTLinCoeff ( 0 )
93 , fsigave( 0 )
94 , fsigrms( 0 )
95 , fbkgave( 0 )
96 , fbkgrms( 0 )
97 , fLogger( new MsgLogger("RuleFit") )
98{
99 Init();
100}
101////////////////////////////////////////////////////////////////////////////////
102/// destructor
103
105{
106 if (fNTCoeff) { delete [] fNTCoeff; fNTCoeff = nullptr; }
107 if (fNTLinCoeff) { delete [] fNTLinCoeff; fNTLinCoeff = nullptr; }
108 delete fLogger;
109}
110
111////////////////////////////////////////////////////////////////////////////////
112/// Initializes all parameters using the RuleEnsemble and the training tree
113
115{
116 if (fRuleFit==0) return;
117 if (fRuleFit->GetMethodRuleFit()==0) {
118 Log() << kFATAL << "RuleFitParams::Init() - MethodRuleFit ptr is null" << Endl;
119 }
120 UInt_t neve = fRuleFit->GetTrainingEvents().size();
121 //
122 fRuleEnsemble = fRuleFit->GetRuleEnsemblePtr();
123 fNRules = fRuleEnsemble->GetNRules();
124 fNLinear = fRuleEnsemble->GetNLinear();
125
126 //
127 // Fraction of events used for validation should be close of unity..
128 // Always selection from the END
129 //
130 UInt_t ofs;
131 fPerfIdx1 = 0;
132 if (neve>1) {
133 fPerfIdx2 = static_cast<UInt_t>((neve-1)*fRuleFit->GetMethodRuleFit()->GetGDValidEveFrac());
134 }
135 else {
136 fPerfIdx2 = 0;
137 }
138 ofs = neve - fPerfIdx2 - 1;
139 fPerfIdx1 += ofs;
140 fPerfIdx2 += ofs;
141 //
142 // Fraction of events used for the path search can be allowed to be a smaller value, say 0.5
143 // Alwas select events from the BEGINNING.
144 // This means that the validation and search samples will not overlap if both fractions are <0.5.
145 //
146 fPathIdx1 = 0;
147 if (neve>1) {
148 fPathIdx2 = static_cast<UInt_t>((neve-1)*fRuleFit->GetMethodRuleFit()->GetGDPathEveFrac());
149 }
150 else {
151 fPathIdx2 = 0;
152 }
153 //
154 // summarize weights
155 //
156 fNEveEffPath = 0;
157 for (UInt_t ie=fPathIdx1; ie<fPathIdx2+1; ie++) {
158 fNEveEffPath += fRuleFit->GetTrainingEventWeight(ie);
159 }
160
161 fNEveEffPerf=0;
162 for (UInt_t ie=fPerfIdx1; ie<fPerfIdx2+1; ie++) {
163 fNEveEffPerf += fRuleFit->GetTrainingEventWeight(ie);
164 }
165 //
166 Log() << kVERBOSE << "Path constr. - event index range = [ " << fPathIdx1 << ", " << fPathIdx2 << " ]"
167 << ", effective N(events) = " << fNEveEffPath << Endl;
168 Log() << kVERBOSE << "Error estim. - event index range = [ " << fPerfIdx1 << ", " << fPerfIdx2 << " ]"
169 << ", effective N(events) = " << fNEveEffPerf << Endl;
170 //
171 if (fRuleEnsemble->DoRules())
172 Log() << kDEBUG << "Number of rules in ensemble: " << fNRules << Endl;
173 else
174 Log() << kDEBUG << "Rules are disabled " << Endl;
175
176 if (fRuleEnsemble->DoLinear())
177 Log() << kDEBUG << "Number of linear terms: " << fNLinear << Endl;
178 else
179 Log() << kDEBUG << "Linear terms are disabled " << Endl;
180}
181
182////////////////////////////////////////////////////////////////////////////////
183/// initializes the ntuple
184
186{
187 fGDNtuple= new TTree("MonitorNtuple_RuleFitParams","RuleFit path search");
188 fGDNtuple->Branch("risk", &fNTRisk, "risk/D");
189 fGDNtuple->Branch("error", &fNTErrorRate,"error/D");
190 fGDNtuple->Branch("nuval", &fNTNuval, "nuval/D");
191 fGDNtuple->Branch("coefrad", &fNTCoefRad, "coefrad/D");
192 fGDNtuple->Branch("offset", &fNTOffset, "offset/D");
193 //
194 fNTCoeff = (fNRules >0 ? new Double_t[fNRules] : 0);
195 fNTLinCoeff = (fNLinear>0 ? new Double_t[fNLinear] : 0);
196
197 for (UInt_t i=0; i<fNRules; i++) {
198 fGDNtuple->Branch(TString::Format("a%d",i+1).Data(),&fNTCoeff[i],TString::Format("a%d/D",i+1).Data());
199 }
200 for (UInt_t i=0; i<fNLinear; i++) {
201 fGDNtuple->Branch(TString::Format("b%d",i+1).Data(),&fNTLinCoeff[i],TString::Format("b%d/D",i+1).Data());
202 }
203}
204
205////////////////////////////////////////////////////////////////////////////////
206/// evaluate the average of each variable and f(x) in the given range
207
209 std::vector<Double_t> &avsel,
210 std::vector<Double_t> &avrul )
211{
212 UInt_t neve = ind2-ind1+1;
213 if (neve<1) {
214 Log() << kFATAL << "<EvaluateAverage> - no events selected for path search -> BUG!" << Endl;
215 }
216 //
217 avsel.clear();
218 avrul.clear();
219 //
220 if (fNLinear>0) avsel.resize(fNLinear,0);
221 if (fNRules>0) avrul.resize(fNRules,0);
222 const std::vector<UInt_t> *eventRuleMap=0;
223 Double_t ew;
224 Double_t sumew=0;
225 //
226 // Loop over events and calculate average of linear terms (normalised) and rule response.
227 //
228 if (fRuleEnsemble->IsRuleMapOK()) { // MakeRuleMap() has been called
229 for ( UInt_t i=ind1; i<ind2+1; i++) {
230 ew = fRuleFit->GetTrainingEventWeight(i);
231 sumew += ew;
232 for ( UInt_t sel=0; sel<fNLinear; sel++ ) {
233 avsel[sel] += ew*fRuleEnsemble->EvalLinEvent(i,sel);
234 }
235 // loop over rules
236 UInt_t nrules=0;
237 if (fRuleEnsemble->DoRules()) {
238 eventRuleMap = &(fRuleEnsemble->GetEventRuleMap(i));
239 nrules = (*eventRuleMap).size();
240 }
241 for (UInt_t r=0; r<nrules; r++) {
242 avrul[(*eventRuleMap)[r]] += ew;
243 }
244 }
245 }
246 else { // MakeRuleMap() has not yet been called
247 const std::vector<const Event *> *events = &(fRuleFit->GetTrainingEvents());
248 for ( UInt_t i=ind1; i<ind2+1; i++) {
249 ew = fRuleFit->GetTrainingEventWeight(i);
250 sumew += ew;
251 // first cache rule/lin response
252 /* Double_t val = */ fRuleEnsemble->EvalLinEvent(*((*events)[i]));
253 /* val = */ fRuleEnsemble->EvalEvent(*((*events)[i]));
254 // loop over linear terms
255 for ( UInt_t sel=0; sel<fNLinear; sel++ ) {
256 avsel[sel] += ew*fRuleEnsemble->GetEventLinearValNorm(sel);
257 }
258 // loop over rules
259 for (UInt_t r=0; r<fNRules; r++) {
260 avrul[r] += ew*fRuleEnsemble->GetEventRuleVal(r);
261 }
262 }
263 }
264 // average variable
265 for ( UInt_t sel=0; sel<fNLinear; sel++ ) {
266 avsel[sel] = avsel[sel] / sumew;
267 }
268 // average rule response, excl coeff
269 for (UInt_t r=0; r<fNRules; r++) {
270 avrul[r] = avrul[r] / sumew;
271 }
272}
273
274////////////////////////////////////////////////////////////////////////////////
275/// Implementation of squared-error ramp loss function (eq 39,40 in ref 1)
276/// This is used for binary Classifications where y = {+1,-1} for (sig,bkg)
277
279{
280 Double_t h = TMath::Max( -1.0, TMath::Min(1.0,fRuleEnsemble->EvalEvent( e )) );
281 Double_t diff = (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(&e)?1:-1) - h;
282 //
283 return diff*diff*e.GetWeight();
284}
285
286////////////////////////////////////////////////////////////////////////////////
287/// Implementation of squared-error ramp loss function (eq 39,40 in ref 1)
288/// This is used for binary Classifications where y = {+1,-1} for (sig,bkg)
289
291{
292 Double_t h = TMath::Max( -1.0, TMath::Min(1.0,fRuleEnsemble->EvalEvent( evtidx )) );
293 Double_t diff = (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(fRuleEnsemble->GetRuleMapEvent( evtidx ))?1:-1) - h;
294 //
295 return diff*diff*fRuleFit->GetTrainingEventWeight(evtidx);
296}
297
298////////////////////////////////////////////////////////////////////////////////
299/// Implementation of squared-error ramp loss function (eq 39,40 in ref 1)
300/// This is used for binary Classifications where y = {+1,-1} for (sig,bkg)
301
303{
304 Double_t e = fRuleEnsemble->EvalEvent( evtidx , fGDOfsTst[itau], fGDCoefTst[itau], fGDCoefLinTst[itau]);
305 Double_t h = TMath::Max( -1.0, TMath::Min(1.0,e) );
306 Double_t diff = (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(fRuleEnsemble->GetRuleMapEvent( evtidx ))?1:-1) - h;
307 //
308 return diff*diff*fRuleFit->GetTrainingEventWeight(evtidx);
309}
310
311////////////////////////////////////////////////////////////////////////////////
312/// risk assessment
313
315{
316 UInt_t neve = ind2-ind1+1;
317 if (neve<1) {
318 Log() << kFATAL << "<Risk> Invalid start/end indices! BUG!!!" << Endl;
319 }
320 Double_t rval=0;
321 //
322 // const std::vector<Event *> *events = &(fRuleFit->GetTrainingEvents());
323 for ( UInt_t i=ind1; i<ind2+1; i++) {
324 rval += LossFunction(i);
325 }
326 rval = rval/neff;
327
328 return rval;
329}
330
331////////////////////////////////////////////////////////////////////////////////
332/// risk assessment for tau model `<itau>`
333
335{
336 UInt_t neve = ind2-ind1+1;
337 if (neve<1) {
338 Log() << kFATAL << "<Risk> Invalid start/end indices! BUG!!!" << Endl;
339 }
340 Double_t rval=0;
341 //
342 // const std::vector<Event *> *events = &(fRuleFit->GetTrainingEvents());
343 for ( UInt_t i=ind1; i<ind2+1; i++) {
344 rval += LossFunction(i,itau);
345 }
346 rval = rval/neff;
347
348 return rval;
349}
350
351////////////////////////////////////////////////////////////////////////////////
352/// This is the "lasso" penalty
353/// To be used for regression.
354/// --- NOT USED ---
355
357{
358 Log() << kWARNING << "<Penalty> Using unverified code! Check!" << Endl;
359 Double_t rval=0;
360 const std::vector<Double_t> *lincoeff = & (fRuleEnsemble->GetLinCoefficients());
361 for (UInt_t i=0; i<fNRules; i++) {
362 rval += TMath::Abs(fRuleEnsemble->GetRules(i)->GetCoefficient());
363 }
364 for (UInt_t i=0; i<fNLinear; i++) {
365 rval += TMath::Abs((*lincoeff)[i]);
366 }
367 return rval;
368}
369
370////////////////////////////////////////////////////////////////////////////////
371/// Initialize GD path search
372
374{
375 if (fGDNTau<2) {
376 fGDNTau = 1;
377 fGDTauScan = 0;
378 }
379 if (fGDTau<0.0) {
380 // fGDNTau = 50; already set in MethodRuleFit
381 fGDTauScan = 1000;
382 fGDTauMin = 0.0;
383 fGDTauMax = 1.0;
384 }
385 else {
386 fGDNTau = 1;
387 fGDTauScan = 0;
388 }
389 // set all taus
390 fGDTauVec.clear();
391 fGDTauVec.resize( fGDNTau );
392 if (fGDNTau==1) {
393 fGDTauVec[0] = fGDTau;
394 }
395 else {
396 // set tau vector - TODO: make a smarter choice of range in tau
397 Double_t dtau = (fGDTauMax - fGDTauMin)/static_cast<Double_t>(fGDNTau-1);
398 for (UInt_t itau=0; itau<fGDNTau; itau++) {
399 fGDTauVec[itau] = static_cast<Double_t>(itau)*dtau + fGDTauMin;
400 if (fGDTauVec[itau]>1.0) fGDTauVec[itau]=1.0;
401 }
402 }
403 // initialize path search vectors
404
405 fGradVec.clear();
406 fGradVecLin.clear();
407 fGradVecTst.clear();
408 fGradVecLinTst.clear();
409 fGDErrTst.clear();
410 fGDErrTstOK.clear();
411 fGDOfsTst.clear();
412 fGDCoefTst.clear();
413 fGDCoefLinTst.clear();
414 //
415 // rules
416 //
417 fGDCoefTst.resize(fGDNTau);
418 fGradVec.resize(fNRules,0);
419 fGradVecTst.resize(fGDNTau);
420 for (UInt_t i=0; i<fGDNTau; i++) {
421 fGradVecTst[i].resize(fNRules,0);
422 fGDCoefTst[i].resize(fNRules,0);
423 }
424 //
425 // linear terms
426 //
427 fGDCoefLinTst.resize(fGDNTau);
428 fGradVecLin.resize(fNLinear,0);
429 fGradVecLinTst.resize(fGDNTau);
430 for (UInt_t i=0; i<fGDNTau; i++) {
431 fGradVecLinTst[i].resize(fNLinear,0);
432 fGDCoefLinTst[i].resize(fNLinear,0);
433 }
434 //
435 // error, coefs etc
436 //
437 fGDErrTst.resize(fGDNTau,0);
438 fGDErrTstOK.resize(fGDNTau,kTRUE);
439 fGDOfsTst.resize(fGDNTau,0);
440 fGDNTauTstOK = fGDNTau;
441 //
442 // calculate average selectors and rule responses for the path sample size
443 //
444}
445
446////////////////////////////////////////////////////////////////////////////////
447/// This finds the cutoff parameter tau by scanning several different paths
448
450{
451 if (fGDNTau<2) return 0;
452 if (fGDTauScan==0) return 0;
453
454 if (fGDOfsTst.size()<1)
455 Log() << kFATAL << "BUG! FindGDTau() has been called BEFORE InitGD()." << Endl;
456 //
457 Log() << kINFO << "Estimating the cutoff parameter tau. The estimated time is a pessimistic maximum." << Endl;
458 //
459 // Find how many points to scan and how often to calculate the error
460 UInt_t nscan = fGDTauScan; //std::min(static_cast<Int_t>(fGDTauScan),fGDNPathSteps);
461 UInt_t netst = std::min(nscan,UInt_t(100));
462 UInt_t nscanned=0;
463 //
464 //--------------------
465 // loop over the paths
466 //--------------------
467 // The number of MAXIMUM loops is given by nscan.
468 // At each loop, the paths being far away from the minimum
469 // are rejected. Hence at each check (every netst events), the number
470 // of paths searched will be reduced.
471 // The maximum 'distance' from the minimum error rate is
472 // 1 sigma. See RiskPerfTst() for details.
473 //
474 Bool_t doloop=kTRUE;
475 UInt_t ip=0;
476 UInt_t itauMin=0;
477 Timer timer( nscan, "RuleFit" );
478 while (doloop) {
479 // make gradvec
480 MakeTstGradientVector();
481 // update coefs
482 UpdateTstCoefficients();
483 // estimate error and do the sum
484 // do this at index=0, netst-1, 2*netst-1 ...
485 nscanned++;
486 if ( (ip==0) || ((ip+1)%netst==0) ) {
487 // ErrorRateRocTst( );
488 itauMin = RiskPerfTst();
489 Log() << kVERBOSE << Form("%4d",ip+1) << ". tau = " << Form("%4.4f",fGDTauVec[itauMin])
490 << " => error rate = " << fGDErrTst[itauMin] << Endl;
491 }
492 ip++;
493 doloop = ((ip<nscan) && (fGDNTauTstOK>3));
495 if (Log().GetMinType()>kVERBOSE)
496 timer.DrawProgressBar(ip);
497 }
498 //
499 // Set tau and coefs
500 // Downscale tau slightly in order to avoid numerical problems
501 //
502 if (nscanned==0) {
503 Log() << kERROR << "<FindGDTau> number of scanned loops is zero! Should NOT see this message." << Endl;
504 }
505 fGDTau = fGDTauVec[itauMin];
506 fRuleEnsemble->SetCoefficients( fGDCoefTst[itauMin] );
507 fRuleEnsemble->SetLinCoefficients( fGDCoefLinTst[itauMin] );
508 fRuleEnsemble->SetOffset( fGDOfsTst[itauMin] );
509 Log() << kINFO << "Best path found with tau = " << Form("%4.4f",fGDTau)
510 << " after " << timer.GetElapsedTime() << " " << Endl;
511
512 return nscan;
513}
514
515////////////////////////////////////////////////////////////////////////////////
516/// The following finds the gradient directed path in parameter space.
517/// More work is needed... FT, 24/9/2006
518///
519/// The algorithm is currently as follows (if not otherwise stated, the sample
520/// used below is [fPathIdx1,fPathIdx2]):
521///
522/// 1. Set offset to -average(y(true)) and all coefs=0 => average of F(x)==0
523/// 2. FindGDTau() : start scanning using several paths defined by different tau
524/// choose the tau yielding the best path
525/// 3. start the scanning the chosen path
526/// 4. check error rate at a given frequency
527/// data used for check: [fPerfIdx1,fPerfIdx2]
528/// 5. stop when either of the following conditions are fullfilled:
529/// 1. loop index==fGDNPathSteps
530/// 2. error > fGDErrScale*errmin
531/// 3. only in DEBUG mode: risk is not monotonously decreasing
532///
533/// The algorithm will warn if:
534/// 1. the error rate was still decreasing when loop finished -> increase fGDNPathSteps!
535/// 2. minimum was found at an early stage -> decrease fGDPathStep
536/// 3. DEBUG: risk > previous risk -> entered chaotic region (regularization is too small)
537
539{
540 Log() << kINFO << "GD path scan - the scan stops when the max num. of steps is reached or a min is found"
541 << Endl;
542 Log() << kVERBOSE << "Number of events used per path step = " << fPathIdx2-fPathIdx1+1 << Endl;
543 Log() << kVERBOSE << "Number of events used for error estimation = " << fPerfIdx2-fPerfIdx1+1 << Endl;
544
545 // check if debug mode
546 const Bool_t isVerbose = (Log().GetMinType()<=kVERBOSE);
547 const Bool_t isDebug = (Log().GetMinType()<=kDEBUG);
548
549 // init GD parameters and clear coeff vectors
550 InitGD();
551
552 // evaluate average response of rules/linear terms (with event weights)
553 EvaluateAveragePath();
554 EvaluateAveragePerf();
555
556 // initial estimate; all other a(i) are zero
557 Log() << kVERBOSE << "Creating GD path" << Endl;
558 Log() << kVERBOSE << " N(steps) = " << fGDNPathSteps << Endl;
559 Log() << kVERBOSE << " step = " << fGDPathStep << Endl;
560 Log() << kVERBOSE << " N(tau) = " << fGDNTau << Endl;
561 Log() << kVERBOSE << " N(tau steps) = " << fGDTauScan << Endl;
562 Log() << kVERBOSE << " tau range = [ " << fGDTauVec[0] << " , " << fGDTauVec[fGDNTau-1] << " ]" << Endl;
563
564 // init ntuple
565 if (isDebug) InitNtuple();
566
567 // DEBUG: risk scan
568 Int_t nbadrisk=0; // number of points where risk(i+1)>risk(i)
569 Double_t trisk=0; // time per risk evaluation
570 Double_t strisk=0; // total time
571 Double_t rprev=1e32; // previous risk
572
573 // parameters set at point with min error
574 Double_t errmin=1e32; // min error
575 // Double_t riskMin=0; // risk
576 Int_t indMin=-1; // index
577 std::vector<Double_t> coefsMin; // rule coefs
578 std::vector<Double_t> lincoefsMin; // linear coefs
579 Double_t offsetMin; // offset
580
581
582 // DEBUG: timing
583 clock_t t0=0;
584 Double_t tgradvec;
585 Double_t tupgrade;
586 Double_t tperf;
587 Double_t stgradvec=0;
588 Double_t stupgrade=0;
589 Double_t stperf=0;
590
591 // linear regression to estimate slope of error rate evolution
592 const UInt_t npreg=5;
593 std::vector<Double_t> valx;
594 std::vector<Double_t> valy;
595 std::vector<Double_t> valxy;
596
597 // loop related
598 Bool_t docheck; // true if an error rate check is to be done
599 Int_t iloop=0; // loop index
600 Bool_t found=kFALSE; // true if minimum is found
601 Bool_t riskFlat=kFALSE; // DEBUG: flag is true if risk evolution behaves badly
602 Bool_t done = kFALSE; // flag that the scan is done
603
604 // calculate how often to check error rate
605 int imod = fGDNPathSteps/100;
606 if (imod<100) imod = std::min(100,fGDNPathSteps);
607 if (imod>100) imod=100;
608
609 // reset coefficients
610 fAverageTruth = -CalcAverageTruth();
611 offsetMin = fAverageTruth;
612 fRuleEnsemble->SetOffset(offsetMin);
613 fRuleEnsemble->ClearCoefficients(0);
614 fRuleEnsemble->ClearLinCoefficients(0);
615 for (UInt_t i=0; i<fGDOfsTst.size(); i++) {
616 fGDOfsTst[i] = offsetMin;
617 }
618 Log() << kVERBOSE << "Obtained initial offset = " << offsetMin << Endl;
619
620 // find the best tau - returns the number of steps performed in scan
621 Int_t nprescan = FindGDTau();
622
623 //
624 //
625 // calculate F*
626 //
627 // CalcFStar(fPerfIdx1, fPerfIdx2);
628 //
629
630 // set some ntuple values
631 fNTRisk = rprev;
632 fNTCoefRad = -1.0;
633 fNTErrorRate = 0;
634
635 // a local flag indicating for what reason the search was stopped
636 Int_t stopCondition=0;
637
638 Log() << kINFO << "Fitting model..." << Endl;
639 // start loop with timer
640 Timer timer( fGDNPathSteps, "RuleFit" );
641 Log() << kWARNING;
642 while (!done) {
643 // Make gradient vector (eq 44, ref 1)
644 if (isVerbose) t0 = clock();
645 MakeGradientVector();
646 if (isVerbose) {
647 tgradvec = Double_t(clock()-t0)/CLOCKS_PER_SEC;
648 stgradvec += tgradvec;
649 }
650
651 // Calculate the direction in parameter space (eq 25, ref 1) and update coeffs (eq 22, ref 1)
652 if (isVerbose) t0 = clock();
653 UpdateCoefficients();
654 if (isVerbose) {
655 tupgrade = Double_t(clock()-t0)/CLOCKS_PER_SEC;
656 stupgrade += tupgrade;
657 }
658
659 // don't check error rate every loop
660 docheck = ((iloop==0) ||((iloop+1)%imod==0));
661
662 if (docheck) {
663 // draw progressbar only if not debug
664 if (!isVerbose)
665 timer.DrawProgressBar(iloop);
666 fNTNuval = Double_t(iloop)*fGDPathStep;
667 fNTRisk = 0.0;
668
669 // check risk evolution
670
671 if (isDebug) FillCoefficients();
672 fNTCoefRad = fRuleEnsemble->CoefficientRadius();
673
674 // calculate risk
675 t0 = clock();
676 fNTRisk = RiskPath();
677 trisk = Double_t(clock()-t0)/CLOCKS_PER_SEC;
678 strisk += trisk;
679 //
680 // Check for an increase in risk.
681 // Such an increase would imply that the regularization is too small.
682 // Stop the iteration if this happens.
683 //
684 if (fNTRisk>=rprev) {
685 if (fNTRisk>rprev) {
686 nbadrisk++;
687 Log() << "Risk(i+1)>=Risk(i) in path" << Endl;
688 riskFlat=(nbadrisk>3);
689 if (riskFlat) {
690 Log() << kWARNING << "Chaotic behaviour of risk evolution" << Endl;
691 Log() << "--- STOPPING MINIMISATION ---" << Endl;
692 Log() << "This may be OK if minimum is already found" << Endl;
693 }
694 }
695 }
696 rprev = fNTRisk;
697 //
698 // Estimate the error rate using cross validation
699 // Well, not quite full cross validation since we only
700 // use ONE model.
701 //
702 if (isVerbose) t0 = clock();
703 fNTErrorRate = 0;
704
705 // Check error rate
706 Double_t errroc;//= ErrorRateRoc();
707 Double_t riskPerf = RiskPerf();
708 // Double_t optimism = Optimism();
709 //
710 errroc = riskPerf;
711 //
712 fNTErrorRate = errroc;
713 //
714 if (isVerbose) {
715 tperf = Double_t(clock()-t0)/CLOCKS_PER_SEC;
716 stperf +=tperf;
717 }
718 //
719 // Always take the last min.
720 // For each step the risk is reduced.
721 //
722 if (fNTErrorRate<=errmin) {
723 errmin = fNTErrorRate;
724 // riskMin = fNTRisk;
725 indMin = iloop;
726 fRuleEnsemble->GetCoefficients(coefsMin);
727 lincoefsMin = fRuleEnsemble->GetLinCoefficients();
728 offsetMin = fRuleEnsemble->GetOffset();
729 }
730 if ( fNTErrorRate > fGDErrScale*errmin) found = kTRUE;
731 //
732 // check slope of last couple of points
733 //
734 if (valx.size()==npreg) {
735 valx.erase(valx.begin());
736 valy.erase(valy.begin());
737 valxy.erase(valxy.begin());
738 }
739 valx.push_back(fNTNuval);
740 valy.push_back(fNTErrorRate);
741 valxy.push_back(fNTErrorRate*fNTNuval);
742
744
745 //
746 if (isDebug) fGDNtuple->Fill();
747 if (isVerbose) {
748 Log() << kVERBOSE << "ParamsIRE : "
749 << std::setw(10)
750 << Form("%8d",iloop+1) << " "
751 << Form("%4.4f",fNTRisk) << " "
752 << Form("%4.4f",riskPerf) << " "
753 << Form("%4.4f",fNTRisk+riskPerf) << " "
754 // << Form("%4.4f",fsigave+fbkgave) << " "
755 // << Form("%4.4f",fsigave) << " "
756 // << Form("%4.4f",fsigrms) << " "
757 // << Form("%4.4f",fbkgave) << " "
758 // << Form("%4.4f",fbkgrms) << " "
759
760 // << Form("%4.4f",fRuleEnsemble->CoefficientRadius())
761 << Endl;
762 }
763 }
764 iloop++;
765 // Stop iteration under various conditions
766 // * The condition R(i+1)<R(i) is no longer true (when then implicit regularization is too weak)
767 // * If the current error estimate is > factor*errmin (factor = 1.1)
768 // * We have reach the last step...
769 Bool_t endOfLoop = (iloop==fGDNPathSteps);
770 if ( ((riskFlat) || (endOfLoop)) && (!found) ) {
771 if (riskFlat) {
772 stopCondition = 1;
773 }
774 else if (endOfLoop) {
775 stopCondition = 2;
776 }
777 if (indMin<0) {
778 Log() << kWARNING << "BUG TRAP: should not be here - still, this bug is harmless;)" << Endl;
779 errmin = fNTErrorRate;
780 // riskMin = fNTRisk;
781 indMin = iloop;
782 fRuleEnsemble->GetCoefficients(coefsMin);
783 lincoefsMin = fRuleEnsemble->GetLinCoefficients();
784 offsetMin = fRuleEnsemble->GetOffset();
785 }
786 found = kTRUE;
787 }
788 done = (found);
789 }
790 Log() << Endl;
791 Log() << kINFO << "Minimisation elapsed time : " << timer.GetElapsedTime() << " " << Endl;
792 Log() << kINFO << "----------------------------------------------------------------" << Endl;
793 Log() << kINFO << "Found minimum at step " << indMin+1 << " with error = " << errmin << Endl;
794 Log() << kINFO << "Reason for ending loop: ";
795 switch (stopCondition) {
796 case 0:
797 Log() << kINFO << "clear minima found";
798 break;
799 case 1:
800 Log() << kINFO << "chaotic behaviour of risk";
801 break;
802 case 2:
803 Log() << kINFO << "end of loop reached";
804 break;
805 default:
806 Log() << kINFO << "unknown!";
807 break;
808 }
809 Log() << Endl;
810 Log() << kINFO << "----------------------------------------------------------------" << Endl;
811
812 // check if early minima - might be an indication of too large stepsize
813 if ( Double_t(indMin)/Double_t(nprescan+fGDNPathSteps) < 0.05 ) {
814 Log() << kWARNING << "Reached minimum early in the search" << Endl;
815 Log() << "Check results and maybe decrease GDStep size" << Endl;
816 }
817 //
818 // quick check of the sign of the slope for the last npreg points
819 //
820 Double_t sumx = std::accumulate( valx.begin(), valx.end(), Double_t() );
821 Double_t sumxy = std::accumulate( valxy.begin(), valxy.end(), Double_t() );
822 Double_t sumy = std::accumulate( valy.begin(), valy.end(), Double_t() );
823 Double_t slope = Double_t(valx.size())*sumxy - sumx*sumy;
824 if (slope<0) {
825 Log() << kINFO << "The error rate was still decreasing at the end of the path" << Endl;
826 Log() << kINFO << "Increase number of steps (GDNSteps)." << Endl;
827 }
828 //
829 // set coefficients
830 //
831 if (found) {
832 fRuleEnsemble->SetCoefficients( coefsMin );
833 fRuleEnsemble->SetLinCoefficients( lincoefsMin );
834 fRuleEnsemble->SetOffset( offsetMin );
835 }
836 else {
837 Log() << kFATAL << "BUG TRAP: minimum not found in MakeGDPath()" << Endl;
838 }
839
840 //
841 // print timing info (VERBOSE mode)
842 //
843 if (isVerbose) {
844 Double_t stloop = strisk +stupgrade + stgradvec + stperf;
845 Log() << kVERBOSE << "Timing per loop (ms):" << Endl;
846 Log() << kVERBOSE << " gradvec = " << 1000*stgradvec/iloop << Endl;
847 Log() << kVERBOSE << " upgrade = " << 1000*stupgrade/iloop << Endl;
848 Log() << kVERBOSE << " risk = " << 1000*strisk/iloop << Endl;
849 Log() << kVERBOSE << " perf = " << 1000*stperf/iloop << Endl;
850 Log() << kVERBOSE << " loop = " << 1000*stloop/iloop << Endl;
851 //
852 Log() << kVERBOSE << " GDInit = " << 1000*gGDInit/iloop << Endl;
853 Log() << kVERBOSE << " GDPtr = " << 1000*gGDPtr/iloop << Endl;
854 Log() << kVERBOSE << " GDEval = " << 1000*gGDEval/iloop << Endl;
855 Log() << kVERBOSE << " GDEvalRule = " << 1000*gGDEvalRule/iloop << Endl;
856 Log() << kVERBOSE << " GDNorm = " << 1000*gGDNorm/iloop << Endl;
857 Log() << kVERBOSE << " GDRuleLoop = " << 1000*gGDRuleLoop/iloop << Endl;
858 Log() << kVERBOSE << " GDLinLoop = " << 1000*gGDLinLoop/iloop << Endl;
859 }
860 //
861 // write ntuple (DEBUG)
862 if (isDebug) fGDNtuple->Write();
863}
864
865////////////////////////////////////////////////////////////////////////////////
866/// helper function to store the rule coefficients in local arrays
867
869{
870 fNTOffset = fRuleEnsemble->GetOffset();
871 //
872 for (UInt_t i=0; i<fNRules; i++) {
873 fNTCoeff[i] = fRuleEnsemble->GetRules(i)->GetCoefficient();
874 }
875 for (UInt_t i=0; i<fNLinear; i++) {
876 fNTLinCoeff[i] = fRuleEnsemble->GetLinCoefficients(i);
877 }
878}
879
880////////////////////////////////////////////////////////////////////////////////
881/// Estimates F* (optimum scoring function) for all events for the given sets.
882/// The result is used in ErrorRateReg().
883/// --- NOT USED ---
884
886{
887 Log() << kWARNING << "<CalcFStar> Using unverified code! Check!" << Endl;
888 UInt_t neve = fPerfIdx2-fPerfIdx1+1;
889 if (neve<1) {
890 Log() << kFATAL << "<CalcFStar> Invalid start/end indices!" << Endl;
891 return;
892 }
893 //
894 const std::vector<const Event *> *events = &(fRuleFit->GetTrainingEvents());
895 //
896 fFstar.clear();
897 std::vector<Double_t> fstarSorted;
898 Double_t fstarVal;
899 // loop over all events and estimate F* for each event
900 for (UInt_t i=fPerfIdx1; i<fPerfIdx2+1; i++) {
901 const Event& e = *(*events)[i];
902 fstarVal = fRuleEnsemble->FStar(e);
903 fFstar.push_back(fstarVal);
904 fstarSorted.push_back(fstarVal);
905 if (TMath::IsNaN(fstarVal)) Log() << kFATAL << "F* is NAN!" << Endl;
906 }
907 // sort F* and find median
908 std::sort( fstarSorted.begin(), fstarSorted.end() );
909 UInt_t ind = neve/2;
910 if (neve&1) { // odd number of events
911 fFstarMedian = 0.5*(fstarSorted[ind]+fstarSorted[ind-1]);
912 }
913 else { // even
914 fFstarMedian = fstarSorted[ind];
915 }
916}
917
918////////////////////////////////////////////////////////////////////////////////
919/// implementation of eq. 7.17 in Hastie,Tibshirani & Friedman book
920/// this is the covariance between the estimated response yhat and the
921/// true value y.
922/// NOT REALLY SURE IF THIS IS CORRECT!
923/// --- THIS IS NOT USED ---
924
926{
927 Log() << kWARNING << "<Optimism> Using unverified code! Check!" << Endl;
928 UInt_t neve = fPerfIdx2-fPerfIdx1+1;
929 if (neve<1) {
930 Log() << kFATAL << "<Optimism> Invalid start/end indices!" << Endl;
931 }
932 //
933 const std::vector<const Event *> *events = &(fRuleFit->GetTrainingEvents());
934 //
935 Double_t sumy=0;
936 Double_t sumyhat=0;
937 Double_t sumyhaty=0;
938 Double_t sumw2=0;
939 Double_t yhat;
940 Double_t y;
941 Double_t w;
942 //
943 for (UInt_t i=fPerfIdx1; i<fPerfIdx2+1; i++) {
944 const Event& e = *(*events)[i];
945 yhat = fRuleEnsemble->EvalEvent(i); // evaluated using the model
946 y = (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(&e) ? 1.0:-1.0); // the truth
947 w = fRuleFit->GetTrainingEventWeight(i)/fNEveEffPerf; // the weight, reweighted such that sum=1
948 sumy += w*y;
949 sumyhat += w*yhat;
950 sumyhaty += w*yhat*y;
951 sumw2 += w*w;
952 }
953 Double_t div = 1.0-sumw2;
954 Double_t cov = sumyhaty - sumyhat*sumy;
955 return 2.0*cov/div;
956}
957
958////////////////////////////////////////////////////////////////////////////////
959/// Estimates the error rate with the current set of parameters
960/// This code is pretty messy at the moment.
961/// Cleanup is needed.
962/// -- NOT USED ---
963
965{
966 Log() << kWARNING << "<ErrorRateReg> Using unverified code! Check!" << Endl;
967 UInt_t neve = fPerfIdx2-fPerfIdx1+1;
968 if (neve<1) {
969 Log() << kFATAL << "<ErrorRateReg> Invalid start/end indices!" << Endl;
970 }
971 if (fFstar.size()!=neve) {
972 Log() << kFATAL << "--- RuleFitParams::ErrorRateReg() - F* not initialized! BUG!!!"
973 << " Fstar.size() = " << fFstar.size() << " , N(events) = " << neve << Endl;
974 }
975 //
976 Double_t sF;
977 //
978 const std::vector<const Event *> *events = &(fRuleFit->GetTrainingEvents());
979 //
980 Double_t sumdf = 0;
981 Double_t sumdfmed = 0;
982 //
983 // A bit messy here.
984 // I believe the binary error classification is appropriate here.
985 // The problem is stability.
986 //
987 for (UInt_t i=fPerfIdx1; i<fPerfIdx2+1; i++) {
988 const Event& e = *(*events)[i];
989 sF = fRuleEnsemble->EvalEvent( e );
990 // scaled abs error, eq 20 in RuleFit paper
991 sumdf += TMath::Abs(fFstar[i-fPerfIdx1] - sF);
992 sumdfmed += TMath::Abs(fFstar[i-fPerfIdx1] - fFstarMedian);
993 }
994 // scaled abs error, eq 20
995 // This error (df) is large - need to think on how to compensate...
996 //
997 return sumdf/sumdfmed;
998}
999
1000////////////////////////////////////////////////////////////////////////////////
1001///
1002/// Estimates the error rate with the current set of parameters
1003/// It uses a binary estimate of (y-F*(x))
1004/// (y-F*(x)) = (Num of events where sign(F)!=sign(y))/Neve
1005/// y = {+1 if event is signal, -1 otherwise}
1006/// --- NOT USED ---
1007
1009{
1010 Log() << kWARNING << "<ErrorRateBin> Using unverified code! Check!" << Endl;
1011 UInt_t neve = fPerfIdx2-fPerfIdx1+1;
1012 if (neve<1) {
1013 Log() << kFATAL << "<ErrorRateBin> Invalid start/end indices!" << Endl;
1014 }
1015 //
1016 const std::vector<const Event *> *events = &(fRuleFit->GetTrainingEvents());
1017 //
1018 Double_t sumdfbin = 0;
1019 Double_t dneve = Double_t(neve);
1020 Int_t signF, signy;
1021 Double_t sF;
1022 //
1023 for (UInt_t i=fPerfIdx1; i<fPerfIdx2+1; i++) {
1024 const Event& e = *(*events)[i];
1025 sF = fRuleEnsemble->EvalEvent( e );
1026 // Double_t sFstar = fRuleEnsemble->FStar(e); // THIS CAN BE CALCULATED ONCE!
1027 signF = (sF>0 ? +1:-1);
1028 // signy = (sFStar>0 ? +1:-1);
1029 signy = (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(&e) ? +1:-1);
1030 sumdfbin += TMath::Abs(Double_t(signF-signy))*0.5;
1031 }
1032 Double_t f = sumdfbin/dneve;
1033 // Double_t df = f*TMath::Sqrt((1.0/sumdfbin) + (1.0/dneve));
1034 return f;
1035}
1036
1037////////////////////////////////////////////////////////////////////////////////
1038/// Estimates the error rate with the current set of parameters.
1039/// It calculates the area under the bkg rejection vs signal efficiency curve.
1040/// The value returned is 1-area.
1041
1042Double_t TMVA::RuleFitParams::ErrorRateRocRaw( std::vector<Double_t> & sFsig,
1043 std::vector<Double_t> & sFbkg )
1044
1045{
1046 std::sort(sFsig.begin(), sFsig.end());
1047 std::sort(sFbkg.begin(), sFbkg.end());
1048 const Double_t minsig = sFsig.front();
1049 const Double_t minbkg = sFbkg.front();
1050 const Double_t maxsig = sFsig.back();
1051 const Double_t maxbkg = sFbkg.back();
1052 const Double_t minf = std::min(minsig,minbkg);
1053 const Double_t maxf = std::max(maxsig,maxbkg);
1054 const Int_t nsig = Int_t(sFsig.size());
1055 const Int_t nbkg = Int_t(sFbkg.size());
1056 const Int_t np = std::min((nsig+nbkg)/4,50);
1057 const Double_t df = (maxf-minf)/(np-1);
1058 //
1059 // calculate area under rejection/efficiency curve
1060 //
1061 Double_t fcut;
1062 std::vector<Double_t>::const_iterator indit;
1063 Int_t nrbkg;
1064 Int_t nesig;
1065 Int_t pnesig=0;
1066 Double_t rejb=0;
1067 Double_t effs=1.0;
1068 Double_t prejb=0;
1069 Double_t peffs=1.0;
1070 // Double_t drejb;
1071 Double_t deffs;
1072 Double_t area=0;
1073 //
1074 // loop over range of F [minf,maxf]
1075 //
1076 for (Int_t i=0; i<np; i++) {
1077 fcut = minf + df*Double_t(i);
1078 indit = std::find_if(sFsig.begin(), sFsig.end(),
1079 std::bind(std::greater_equal<Double_t>(), std::placeholders::_1, fcut));
1080 nesig = sFsig.end()-indit; // number of sig accepted with F>cut
1081 if (TMath::Abs(pnesig-nesig)>0) {
1082 indit = std::find_if(sFbkg.begin(), sFbkg.end(),
1083 std::bind(std::greater_equal<Double_t>(), std::placeholders::_1, fcut));
1084 nrbkg = indit-sFbkg.begin(); // number of bkg rejected with F>cut
1085 rejb = Double_t(nrbkg)/Double_t(nbkg);
1086 effs = Double_t(nesig)/Double_t(nsig);
1087 //
1088 // drejb = rejb-prejb;
1089 deffs = effs-peffs;
1090 area += 0.5*TMath::Abs(deffs)*(rejb+prejb); // trapezoid
1091 prejb = rejb;
1092 peffs = effs;
1093 }
1094 pnesig = nesig;
1095 }
1096 area += 0.5*(1+rejb)*effs; // extrapolate to the end point
1097
1098 return (1.0-area);
1099}
1100
1101////////////////////////////////////////////////////////////////////////////////
1102/// Estimates the error rate with the current set of parameters.
1103/// It calculates the area under the bkg rejection vs signal efficiency curve.
1104/// The value returned is 1-area.
1105/// This works but is less efficient than calculating the Risk using RiskPerf().
1106
1108{
1109 Log() << kWARNING << "<ErrorRateRoc> Should not be used in the current version! Check!" << Endl;
1110 UInt_t neve = fPerfIdx2-fPerfIdx1+1;
1111 if (neve<1) {
1112 Log() << kFATAL << "<ErrorRateRoc> Invalid start/end indices!" << Endl;
1113 }
1114 //
1115 const std::vector<const Event *> *events = &(fRuleFit->GetTrainingEvents());
1116 //
1117 Double_t sF;
1118 //
1119 std::vector<Double_t> sFsig;
1120 std::vector<Double_t> sFbkg;
1121 Double_t sumfsig=0;
1122 Double_t sumfbkg=0;
1123 Double_t sumf2sig=0;
1124 Double_t sumf2bkg=0;
1125 //
1126 for (UInt_t i=fPerfIdx1; i<fPerfIdx2+1; i++) {
1127 const Event& e = *(*events)[i];
1128 sF = fRuleEnsemble->EvalEvent(i);// * fRuleFit->GetTrainingEventWeight(i);
1129 if (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(&e)) {
1130 sFsig.push_back(sF);
1131 sumfsig +=sF;
1132 sumf2sig +=sF*sF;
1133 }
1134 else {
1135 sFbkg.push_back(sF);
1136 sumfbkg +=sF;
1137 sumf2bkg +=sF*sF;
1138 }
1139 }
1140 fsigave = sumfsig/sFsig.size();
1141 fbkgave = sumfbkg/sFbkg.size();
1142 fsigrms = TMath::Sqrt(gTools().ComputeVariance(sumf2sig,sumfsig,sFsig.size()));
1143 fbkgrms = TMath::Sqrt(gTools().ComputeVariance(sumf2bkg,sumfbkg,sFbkg.size()));
1144 //
1145 return ErrorRateRocRaw( sFsig, sFbkg );
1146}
1147
1148////////////////////////////////////////////////////////////////////////////////
1149/// Estimates the error rate with the current set of parameters.
1150/// It calculates the area under the bkg rejection vs signal efficiency curve.
1151/// The value returned is 1-area.
1152///
1153/// See comment under ErrorRateRoc().
1154
1156{
1157 Log() << kWARNING << "<ErrorRateRocTst> Should not be used in the current version! Check!" << Endl;
1158 UInt_t neve = fPerfIdx2-fPerfIdx1+1;
1159 if (neve<1) {
1160 Log() << kFATAL << "<ErrorRateRocTst> Invalid start/end indices!" << Endl;
1161 return;
1162 }
1163 //
1164 const std::vector<const Event *> *events = &(fRuleFit->GetTrainingEvents());
1165 //
1166 // std::vector<Double_t> sF;
1167 Double_t sF;
1168 std::vector< std::vector<Double_t> > sFsig;
1169 std::vector< std::vector<Double_t> > sFbkg;
1170 //
1171 sFsig.resize( fGDNTau );
1172 sFbkg.resize( fGDNTau );
1173 // sF.resize( fGDNTau );
1174
1175 for (UInt_t i=fPerfIdx1; i<fPerfIdx2+1; i++) {
1176 for (UInt_t itau=0; itau<fGDNTau; itau++) {
1177 // if (itau==0) sF = fRuleEnsemble->EvalEvent( *(*events)[i], fGDOfsTst[itau], fGDCoefTst[itau], fGDCoefLinTst[itau] );
1178 // else sF = fRuleEnsemble->EvalEvent( fGDOfsTst[itau], fGDCoefTst[itau], fGDCoefLinTst[itau] );
1179 sF = fRuleEnsemble->EvalEvent( i, fGDOfsTst[itau], fGDCoefTst[itau], fGDCoefLinTst[itau] );
1180 if (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal((*events)[i])) {
1181 sFsig[itau].push_back(sF);
1182 }
1183 else {
1184 sFbkg[itau].push_back(sF);
1185 }
1186 }
1187 }
1188 Double_t err;
1189
1190 for (UInt_t itau=0; itau<fGDNTau; itau++) {
1191 err = ErrorRateRocRaw( sFsig[itau], sFbkg[itau] );
1192 fGDErrTst[itau] = err;
1193 }
1194}
1195
1196////////////////////////////////////////////////////////////////////////////////
1197/// Estimates the error rate with the current set of parameters.
1198/// using the `<Perf>` subsample.
1199/// Return the tau index giving the lowest error
1200
1202{
1203 UInt_t neve = fPerfIdx2-fPerfIdx1+1;
1204 if (neve<1) {
1205 Log() << kFATAL << "<ErrorRateRocTst> Invalid start/end indices!" << Endl;
1206 return 0;
1207 }
1208 //
1209 Double_t sumx = 0;
1210 Double_t sumx2 = 0;
1211 Double_t maxx = -100.0;
1212 Double_t minx = 1e30;
1213 UInt_t itaumin = 0;
1214 UInt_t nok=0;
1215 for (UInt_t itau=0; itau<fGDNTau; itau++) {
1216 if (fGDErrTstOK[itau]) {
1217 nok++;
1218 fGDErrTst[itau] = RiskPerf(itau);
1219 sumx += fGDErrTst[itau];
1220 sumx2 += fGDErrTst[itau]*fGDErrTst[itau];
1221 if (fGDErrTst[itau]>maxx) maxx=fGDErrTst[itau];
1222 if (fGDErrTst[itau]<minx) {
1223 minx=fGDErrTst[itau];
1224 itaumin = itau;
1225 }
1226 }
1227 }
1228 Double_t sigx = TMath::Sqrt(gTools().ComputeVariance( sumx2, sumx, nok ) );
1229 Double_t maxacc = minx+sigx;
1230 //
1231 if (nok>0) {
1232 nok = 0;
1233 for (UInt_t itau=0; itau<fGDNTau; itau++) {
1234 if (fGDErrTstOK[itau]) {
1235 if (fGDErrTst[itau] > maxacc) {
1236 fGDErrTstOK[itau] = kFALSE;
1237 }
1238 else {
1239 nok++;
1240 }
1241 }
1242 }
1243 }
1244 fGDNTauTstOK = nok;
1245 Log() << kVERBOSE << "TAU: "
1246 << itaumin << " "
1247 << nok << " "
1248 << minx << " "
1249 << maxx << " "
1250 << sigx << Endl;
1251 //
1252 return itaumin;
1253}
1254
1255////////////////////////////////////////////////////////////////////////////////
1256/// make test gradient vector for all tau
1257/// same algorithm as MakeGradientVector()
1258
1260{
1261 UInt_t neve = fPathIdx1-fPathIdx2+1;
1262 if (neve<1) {
1263 Log() << kFATAL << "<MakeTstGradientVector> Invalid start/end indices!" << Endl;
1264 return;
1265 }
1266 //
1267 Double_t norm = 2.0/fNEveEffPath;
1268 //
1269 const std::vector<const Event *> *events = &(fRuleFit->GetTrainingEvents());
1270
1271 // Clear gradient vectors
1272 for (UInt_t itau=0; itau<fGDNTau; itau++) {
1273 if (fGDErrTstOK[itau]) {
1274 for (UInt_t ir=0; ir<fNRules; ir++) {
1275 fGradVecTst[itau][ir]=0;
1276 }
1277 for (UInt_t il=0; il<fNLinear; il++) {
1278 fGradVecLinTst[itau][il]=0;
1279 }
1280 }
1281 }
1282 //
1283 // Double_t val; // temp store
1284 Double_t sF; // score function value
1285 Double_t r; // eq 35, ref 1
1286 Double_t y; // true score (+1 or -1)
1287 const std::vector<UInt_t> *eventRuleMap=0;
1288 UInt_t rind;
1289 //
1290 // Loop over all events
1291 //
1292 for (UInt_t i=fPathIdx1; i<fPathIdx2+1; i++) {
1293 const Event *e = (*events)[i];
1294 UInt_t nrules=0;
1295 if (fRuleEnsemble->DoRules()) {
1296 eventRuleMap = &(fRuleEnsemble->GetEventRuleMap(i));
1297 nrules = (*eventRuleMap).size();
1298 }
1299 for (UInt_t itau=0; itau<fGDNTau; itau++) { // loop over tau
1300 // if (itau==0) sF = fRuleEnsemble->EvalEvent( *e, fGDOfsTst[itau], fGDCoefTst[itau], fGDCoefLinTst[itau] );
1301 // else sF = fRuleEnsemble->EvalEvent( fGDOfsTst[itau], fGDCoefTst[itau], fGDCoefLinTst[itau] );
1302 if (fGDErrTstOK[itau]) {
1303 sF = fRuleEnsemble->EvalEvent( i, fGDOfsTst[itau], fGDCoefTst[itau], fGDCoefLinTst[itau] );
1304 if (TMath::Abs(sF)<1.0) {
1305 r = 0;
1306 y = (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(e)?1.0:-1.0);
1307 r = norm*(y - sF) * fRuleFit->GetTrainingEventWeight(i);
1308 // rule gradient vector
1309 for (UInt_t ir=0; ir<nrules; ir++) {
1310 rind = (*eventRuleMap)[ir];
1311 fGradVecTst[itau][rind] += r;
1312 }
1313 // linear terms
1314 for (UInt_t il=0; il<fNLinear; il++) {
1315 fGradVecLinTst[itau][il] += r*fRuleEnsemble->EvalLinEventRaw( il,i, kTRUE );
1316 }
1317 } // if (TMath::Abs(F)<xxx)
1318 }
1319 }
1320 }
1321}
1322
1323////////////////////////////////////////////////////////////////////////////////
1324/// Establish maximum gradient for rules, linear terms and the offset
1325/// for all taus TODO: do not need index range!
1326
1328{
1329 Double_t maxr, maxl, cthresh, val;
1330 // loop over all taus
1331 for (UInt_t itau=0; itau<fGDNTau; itau++) {
1332 if (fGDErrTstOK[itau]) {
1333 // find max gradient
1334 maxr = ( (fNRules>0 ?
1335 TMath::Abs(*(std::max_element( fGradVecTst[itau].begin(), fGradVecTst[itau].end(), AbsValue()))):0) );
1336 maxl = ( (fNLinear>0 ?
1337 TMath::Abs(*(std::max_element( fGradVecLinTst[itau].begin(), fGradVecLinTst[itau].end(), AbsValue()))):0) );
1338
1339 // Use the maximum as a threshold
1340 Double_t maxv = (maxr>maxl ? maxr:maxl);
1341 cthresh = maxv * fGDTauVec[itau];
1342
1343 // Add to offset, if gradient is large enough:
1344 // Loop over the gradient vector and move to next set of coefficients
1345 // size of GradVec (and GradVecLin) should be 0 if learner is disabled
1346 //
1347 // Step-size is divided by 10 when looking for the best path.
1348 //
1349 if (maxv>0) {
1350 const Double_t stepScale=1.0;
1351 for (UInt_t i=0; i<fNRules; i++) {
1352 val = fGradVecTst[itau][i];
1353
1354 if (TMath::Abs(val)>=cthresh) {
1355 fGDCoefTst[itau][i] += fGDPathStep*val*stepScale;
1356 }
1357 }
1358 // Loop over the gradient vector for the linear part and move to next set of coefficients
1359 for (UInt_t i=0; i<fNLinear; i++) {
1360 val = fGradVecLinTst[itau][i];
1361 if (TMath::Abs(val)>=cthresh) {
1362 fGDCoefLinTst[itau][i] += fGDPathStep*val*stepScale/fRuleEnsemble->GetLinNorm(i);
1363 }
1364 }
1365 }
1366 }
1367 }
1368 // set the offset - should be outside the itau loop!
1369 CalcTstAverageResponse();
1370}
1371
1372////////////////////////////////////////////////////////////////////////////////
1373/// make gradient vector
1374
1376{
1377 clock_t t0;
1378 // clock_t t10;
1379 t0 = clock();
1380 //
1381 UInt_t neve = fPathIdx2-fPathIdx1+1;
1382 if (neve<1) {
1383 Log() << kFATAL << "<MakeGradientVector> Invalid start/end indices!" << Endl;
1384 return;
1385 }
1386 //
1387 const Double_t norm = 2.0/fNEveEffPath;
1388 //
1389 const std::vector<const Event *> *events = &(fRuleFit->GetTrainingEvents());
1390
1391 // Clear gradient vectors
1392 for (UInt_t ir=0; ir<fNRules; ir++) {
1393 fGradVec[ir]=0;
1394 }
1395 for (UInt_t il=0; il<fNLinear; il++) {
1396 fGradVecLin[il]=0;
1397 }
1398 //
1399 // Double_t val; // temp store
1400 Double_t sF; // score function value
1401 Double_t r; // eq 35, ref 1
1402 Double_t y; // true score (+1 or -1)
1403 const std::vector<UInt_t> *eventRuleMap=0;
1404 UInt_t rind;
1405 //
1406 gGDInit += Double_t(clock()-t0)/CLOCKS_PER_SEC;
1407
1408 for (UInt_t i=fPathIdx1; i<fPathIdx2+1; i++) {
1409 const Event *e = (*events)[i];
1410
1411 // t0 = clock(); //DEB
1412 sF = fRuleEnsemble->EvalEvent( i ); // should not contain the weight
1413 // gGDEval += Double_t(clock()-t0)/CLOCKS_PER_SEC;
1414 if (TMath::Abs(sF)<1.0) {
1415 UInt_t nrules=0;
1416 if (fRuleEnsemble->DoRules()) {
1417 eventRuleMap = &(fRuleEnsemble->GetEventRuleMap(i));
1418 nrules = (*eventRuleMap).size();
1419 }
1420 y = (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(e)?1.0:-1.0);
1421 r = norm*(y - sF) * fRuleFit->GetTrainingEventWeight(i);
1422 // rule gradient vector
1423 for (UInt_t ir=0; ir<nrules; ir++) {
1424 rind = (*eventRuleMap)[ir];
1425 fGradVec[rind] += r;
1426 }
1427 // gGDRuleLoop += Double_t(clock()-t0)/CLOCKS_PER_SEC;
1428 // linear terms
1429 // t0 = clock(); //DEB
1430 for (UInt_t il=0; il<fNLinear; il++) {
1431 fGradVecLin[il] += r*fRuleEnsemble->EvalLinEventRaw( il, i, kTRUE );
1432 }
1433 // gGDLinLoop += Double_t(clock()-t0)/CLOCKS_PER_SEC;
1434 } // if (TMath::Abs(F)<xxx)
1435 }
1436}
1437
1438////////////////////////////////////////////////////////////////////////////////
1439/// Establish maximum gradient for rules, linear terms and the offset
1440
1442{
1443 Double_t maxr = ( (fRuleEnsemble->DoRules() ?
1444 TMath::Abs(*(std::max_element( fGradVec.begin(), fGradVec.end(), AbsValue()))):0) );
1445 Double_t maxl = ( (fRuleEnsemble->DoLinear() ?
1446 TMath::Abs(*(std::max_element( fGradVecLin.begin(), fGradVecLin.end(), AbsValue()))):0) );
1447 // Use the maximum as a threshold
1448 Double_t maxv = (maxr>maxl ? maxr:maxl);
1449 Double_t cthresh = maxv * fGDTau;
1450
1451 Double_t useRThresh;
1452 Double_t useLThresh;
1453 //
1454 // Choose thresholds.
1455 //
1456 useRThresh = cthresh;
1457 useLThresh = cthresh;
1458
1459 Double_t gval, lval, coef, lcoef;
1460
1461 // Add to offset, if gradient is large enough:
1462 // Loop over the gradient vector and move to next set of coefficients
1463 // size of GradVec (and GradVecLin) should be 0 if learner is disabled
1464 if (maxv>0) {
1465 for (UInt_t i=0; i<fGradVec.size(); i++) {
1466 gval = fGradVec[i];
1467 if (TMath::Abs(gval)>=useRThresh) {
1468 coef = fRuleEnsemble->GetRulesConst(i)->GetCoefficient() + fGDPathStep*gval;
1469 fRuleEnsemble->GetRules(i)->SetCoefficient(coef);
1470 }
1471 }
1472
1473 // Loop over the gradient vector for the linear part and move to next set of coefficients
1474 for (UInt_t i=0; i<fGradVecLin.size(); i++) {
1475 lval = fGradVecLin[i];
1476 if (TMath::Abs(lval)>=useLThresh) {
1477 lcoef = fRuleEnsemble->GetLinCoefficients(i) + (fGDPathStep*lval/fRuleEnsemble->GetLinNorm(i));
1478 fRuleEnsemble->SetLinCoefficient(i,lcoef);
1479 }
1480 }
1481 // Set the offset
1482 Double_t offset = CalcAverageResponse();
1483 fRuleEnsemble->SetOffset( offset );
1484 }
1485}
1486
1487////////////////////////////////////////////////////////////////////////////////
1488/// calc average response for all test paths - TODO: see comment under CalcAverageResponse()
1489/// note that 0 offset is used
1490
1492{
1493 for (UInt_t itau=0; itau<fGDNTau; itau++) {
1494 if (fGDErrTstOK[itau]) {
1495 fGDOfsTst[itau] = 0;
1496 for (UInt_t s=0; s<fNLinear; s++) {
1497 fGDOfsTst[itau] -= fGDCoefLinTst[itau][s] * fAverageSelectorPath[s];
1498 }
1499 for (UInt_t r=0; r<fNRules; r++) {
1500 fGDOfsTst[itau] -= fGDCoefTst[itau][r] * fAverageRulePath[r];
1501 }
1502 }
1503 }
1504 //
1505}
1506
1507////////////////////////////////////////////////////////////////////////////////
1508/// calculate the average response - TODO : rewrite bad dependancy on EvaluateAverage() !
1509///
1510/// note that 0 offset is used
1511
1513{
1514 Double_t ofs = 0;
1515 for (UInt_t s=0; s<fNLinear; s++) {
1516 ofs -= fRuleEnsemble->GetLinCoefficients(s) * fAverageSelectorPath[s];
1517 }
1518 for (UInt_t r=0; r<fNRules; r++) {
1519 ofs -= fRuleEnsemble->GetRules(r)->GetCoefficient() * fAverageRulePath[r];
1520 }
1521 return ofs;
1522}
1523
1524////////////////////////////////////////////////////////////////////////////////
1525/// calculate the average truth
1526
1528{
1529 if (fPathIdx2<=fPathIdx1) {
1530 Log() << kFATAL << "<CalcAverageTruth> Invalid start/end indices!" << Endl;
1531 return 0;
1532 }
1533 Double_t sum=0;
1534 Double_t ensig=0;
1535 Double_t enbkg=0;
1536 const std::vector<const Event *> *events = &(fRuleFit->GetTrainingEvents());
1537 for (UInt_t i=fPathIdx1; i<fPathIdx2+1; i++) {
1538 Double_t ew = fRuleFit->GetTrainingEventWeight(i);
1539 if (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal((*events)[i])) ensig += ew;
1540 else enbkg += ew;
1541 sum += ew*(fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal((*events)[i])?1.0:-1.0);
1542 }
1543 Log() << kVERBOSE << "Effective number of signal / background = " << ensig << " / " << enbkg << Endl;
1544
1545 return sum/fNEveEffPath;
1546}
1547
1548////////////////////////////////////////////////////////////////////////////////
1549
1551 return (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(e) ? 1:-1);
1552}
1553
1554////////////////////////////////////////////////////////////////////////////////
1555
1557 fLogger->SetMinType(t);
1558}
#define f(i)
Definition RSha256.hxx:104
#define h(i)
Definition RSha256.hxx:106
#define e(i)
Definition RSha256.hxx:103
bool Bool_t
Definition RtypesCore.h:63
int Int_t
Definition RtypesCore.h:45
unsigned int UInt_t
Definition RtypesCore.h:46
constexpr Bool_t kFALSE
Definition RtypesCore.h:94
double Double_t
Definition RtypesCore.h:59
constexpr Bool_t kTRUE
Definition RtypesCore.h:93
Double_t gGDNorm
Double_t gGDLinLoop
Double_t gGDInit
Double_t gGDEval
Bool_t gFIRSTTST
Double_t gGDPtr
Double_t gGDEvalRule
Bool_t gFIRSTORG
Double_t gGDRuleLoop
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t sel
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t np
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t r
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t gval
char * Form(const char *fmt,...)
Formats a string in a circular formatting buffer.
Definition TString.cxx:2489
ostringstream derivative to redirect and format output
Definition MsgLogger.h:57
void CalcTstAverageResponse()
calc average response for all test paths - TODO: see comment under CalcAverageResponse() note that 0 ...
void MakeGDPath()
The following finds the gradient directed path in parameter space.
void EvaluateAverage(UInt_t ind1, UInt_t ind2, std::vector< Double_t > &avsel, std::vector< Double_t > &avrul)
evaluate the average of each variable and f(x) in the given range
UInt_t RiskPerfTst()
Estimates the error rate with the current set of parameters.
Double_t Risk(UInt_t ind1, UInt_t ind2, Double_t neff) const
risk assessment
Double_t Optimism()
implementation of eq.
Int_t FindGDTau()
This finds the cutoff parameter tau by scanning several different paths.
virtual ~RuleFitParams()
destructor
RuleFitParams()
constructor
void Init()
Initializes all parameters using the RuleEnsemble and the training tree.
Double_t CalcAverageResponse()
calculate the average response - TODO : rewrite bad dependancy on EvaluateAverage() !
void SetMsgType(EMsgType t)
Double_t Penalty() const
This is the "lasso" penalty To be used for regression.
void FillCoefficients()
helper function to store the rule coefficients in local arrays
Double_t LossFunction(const Event &e) const
Implementation of squared-error ramp loss function (eq 39,40 in ref 1) This is used for binary Classi...
void InitGD()
Initialize GD path search.
Int_t Type(const Event *e) const
void ErrorRateRocTst()
Estimates the error rate with the current set of parameters.
void CalcFStar()
Estimates F* (optimum scoring function) for all events for the given sets.
void MakeTstGradientVector()
make test gradient vector for all tau same algorithm as MakeGradientVector()
Double_t CalcAverageTruth()
calculate the average truth
void UpdateTstCoefficients()
Establish maximum gradient for rules, linear terms and the offset for all taus TODO: do not need inde...
void MakeGradientVector()
make gradient vector
void UpdateCoefficients()
Establish maximum gradient for rules, linear terms and the offset.
void InitNtuple()
initializes the ntuple
Double_t ErrorRateBin()
Estimates the error rate with the current set of parameters It uses a binary estimate of (y-F*(x)) (y...
Double_t ErrorRateReg()
Estimates the error rate with the current set of parameters This code is pretty messy at the moment.
Double_t ErrorRateRoc()
Estimates the error rate with the current set of parameters.
Double_t ErrorRateRocRaw(std::vector< Double_t > &sFsig, std::vector< Double_t > &sFbkg)
Estimates the error rate with the current set of parameters.
Timing information for training and evaluation of MVA methods.
Definition Timer.h:58
TString GetElapsedTime(Bool_t Scientific=kTRUE)
returns pretty string with elapsed time
Definition Timer.cxx:146
void DrawProgressBar(Int_t, const TString &comment="")
draws progress bar in color or B&W caution:
Definition Timer.cxx:202
virtual Int_t Write(const char *name=nullptr, Int_t option=0, Int_t bufsize=0)
Write this object to the current directory.
Definition TObject.cxx:898
const char * Data() const
Definition TString.h:376
static TString Format(const char *fmt,...)
Static method which formats a string using a printf style format descriptor and return a TString.
Definition TString.cxx:2378
A TTree represents a columnar dataset.
Definition TTree.h:79
Double_t y[n]
Definition legend1.C:17
Tools & gTools()
MsgLogger & Endl(MsgLogger &ml)
Definition MsgLogger.h:148
Bool_t IsNaN(Double_t x)
Definition TMath.h:896
Short_t Max(Short_t a, Short_t b)
Returns the largest of a and b.
Definition TMathBase.h:250
Double_t Sqrt(Double_t x)
Returns the square root of x.
Definition TMath.h:666
Short_t Min(Short_t a, Short_t b)
Returns the smallest of a and b.
Definition TMathBase.h:198
Short_t Abs(Short_t d)
Returns the absolute value of parameter Short_t d.
Definition TMathBase.h:123
static uint64_t sum(uint64_t i)
Definition Factory.cxx:2345