Logo ROOT   6.07/09
Reference Guide
Fitter.cxx
Go to the documentation of this file.
1 // @(#)root/mathcore:$Id$
2 // Author: L. Moneta Mon Sep 4 17:00:10 2006
3 
4 /**********************************************************************
5  * *
6  * Copyright (c) 2006 LCG ROOT Math Team, CERN/PH-SFT *
7  * *
8  * *
9  **********************************************************************/
10 
11 // Implementation file for class Fitter
12 
13 
14 #include "Fit/Fitter.h"
15 #include "Fit/Chi2FCN.h"
17 #include "Fit/LogLikelihoodFCN.h"
18 #include "Math/Minimizer.h"
19 #include "Math/MinimizerOptions.h"
20 #include "Math/FitMethodFunction.h"
21 #include "Fit/BasicFCN.h"
22 #include "Fit/BinData.h"
23 #include "Fit/UnBinData.h"
24 #include "Fit/FcnAdapter.h"
25 #include "Fit/FitConfig.h"
26 #include "Fit/FitResult.h"
27 #include "Math/Error.h"
28 #include "TF1.h"
29 
30 #include <memory>
31 
32 #include "Math/IParamFunction.h"
33 
35 
36 // #include "TMatrixDSym.h"
37 // for debugging
38 //#include "TMatrixD.h"
39 // #include <iomanip>
40 
41 namespace ROOT {
42 
43  namespace Fit {
44 
45 // use a static variable to get default minimizer options for error def
46 // to see if user has changed it later on. If it has not been changed we set
47 // for the likelihood method an error def of 0.5
48 // t.b.d : multiply likelihood by 2 so have same error def definition as chi2
50 
51 
53  fUseGradient(false),
54  fBinFit(false),
55  fFitType(0),
56  fDataSize(0)
57 {}
58 
59 Fitter::Fitter(const std::shared_ptr<FitResult> & result) :
60  fUseGradient(false),
61  fBinFit(false),
62  fFitType(0),
63  fDataSize(0),
64  fResult(result)
65 {
66  if (result->fFitFunc) SetFunction(*fResult->fFitFunc); // this will create also the configuration
67  if (result->fObjFunc) fObjFunction = fResult->fObjFunc;
68  if (result->fFitData) fData = fResult->fFitData;
69 }
70 
72 {
73  // Destructor implementation.
74 
75  // nothing to do since we use shared_ptr now
76 }
77 
78 Fitter::Fitter(const Fitter & rhs)
79 {
80  // Implementation of copy constructor.
81  // copy FitResult, FitConfig and clone fit function
82  (*this) = rhs;
83 }
84 
86 {
87  // Implementation of assignment operator.
88  // dummy implementation, since it is private
89  if (this == &rhs) return *this; // time saving self-test
90 // fUseGradient = rhs.fUseGradient;
91 // fBinFit = rhs.fBinFit;
92 // fResult = rhs.fResult;
93 // fConfig = rhs.fConfig;
94 // // function is copied and managed by FitResult (maybe should use an auto_ptr)
95 // fFunc = fResult.ModelFunction();
96 // if (rhs.fFunc != 0 && fResult.ModelFunction() == 0) { // case no fit has been done yet - then clone
97 // if (fFunc) delete fFunc;
98 // fFunc = dynamic_cast<IModelFunction *>( (rhs.fFunc)->Clone() );
99 // assert(fFunc != 0);
100 // }
101  return *this;
102 }
103 
105 {
106 
108  if (fUseGradient) {
109  const IGradModelFunction * gradFunc = dynamic_cast<const IGradModelFunction*>(&func);
110  if (gradFunc) {
111  SetFunction(*gradFunc, true);
112  return;
113  }
114  else {
115  MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
116  }
117  }
118  fUseGradient = false;
119 
120  // set the fit model function (clone the given one and keep a copy )
121  //std::cout << "set a non-grad function" << std::endl;
122 
123  fFunc = std::shared_ptr<IModelFunction>(dynamic_cast<IModelFunction *>(func.Clone() ) );
124  assert(fFunc);
125 
126  // creates the parameter settings
128 }
129 
130 
132 {
134  if (fUseGradient) {
135  const IGradModel1DFunction * gradFunc = dynamic_cast<const IGradModel1DFunction*>(&func);
136  if (gradFunc) {
137  SetFunction(*gradFunc, true);
138  return;
139  }
140  else {
141  MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
142  }
143  }
144  fUseGradient = false;
145  //std::cout << "set a 1d function" << std::endl;
146 
147  // function is cloned when creating the adapter
148  fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamFunctionAdapter(func));
149 
150  // creates the parameter settings
152 }
153 
155 {
157  //std::cout << "set a grad function" << std::endl;
158  // set the fit model function (clone the given one and keep a copy )
159  fFunc = std::shared_ptr<IModelFunction>( dynamic_cast<IGradModelFunction *> ( func.Clone() ) );
160  assert(fFunc);
161 
162  // creates the parameter settings
164 }
165 
166 
168 {
169  //std::cout << "set a 1d grad function" << std::endl;
171  // function is cloned when creating the adapter
172  fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamGradFunctionAdapter(func));
173 
174  // creates the parameter settings
176 }
177 
178 
179 bool Fitter::SetFCN(const ROOT::Math::IMultiGenFunction & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
180  // set the objective function for the fit
181  // if params is not NULL create the parameter settings
182  fUseGradient = false;
183  unsigned int npar = fcn.NDim();
184  if (npar == 0) {
185  MATH_ERROR_MSG("Fitter::SetFCN","FCN function has zero parameters ");
186  return false;
187  }
188  if (params != 0 )
189  fConfig.SetParamsSettings(npar, params);
190  else {
191  if ( fConfig.ParamsSettings().size() != npar) {
192  MATH_ERROR_MSG("Fitter::SetFCN","wrong fit parameter settings");
193  return false;
194  }
195  }
196 
197  fBinFit = chi2fit;
198  fDataSize = dataSize;
199 
200  // keep also a copy of FCN function and set this in minimizer so they will be managed together
201  // (remember that cloned copy will still depends on data and model function pointers)
202  fObjFunction = std::auto_ptr<ROOT::Math::IMultiGenFunction> ( fcn.Clone() );
203 
204  return true;
205 }
206 
207 bool Fitter::SetFCN(const ROOT::Math::IMultiGradFunction & fcn, const double * params, unsigned int dataSize , bool chi2fit) {
208  // set the objective function for the fit
209  // if params is not NULL create the parameter settings
210  if (!SetFCN(static_cast<const ROOT::Math::IMultiGenFunction &>(fcn),params, dataSize, chi2fit) ) return false;
211  fUseGradient = true;
212  return true;
213 }
214 
215 bool Fitter::SetFCN(const ROOT::Math::FitMethodFunction & fcn, const double * params) {
216  // set the objective function for the fit
217  // if params is not NULL create the parameter settings
218  bool chi2fit = (fcn.Type() == ROOT::Math::FitMethodFunction::kLeastSquare );
219  if (!SetFCN(fcn,params,fcn.NPoints(), chi2fit) ) return false;
220  fUseGradient = false;
221  fFitType = fcn.Type();
222  return true;
223 }
224 
225 bool Fitter::SetFCN(const ROOT::Math::FitMethodGradFunction & fcn, const double * params) {
226  // set the objective function for the fit
227  // if params is not NULL create the parameter settings
228  bool chi2fit = (fcn.Type() == ROOT::Math::FitMethodGradFunction::kLeastSquare );
229  if (!SetFCN(fcn,params,fcn.NPoints(), chi2fit) ) return false;
230  fUseGradient = true;
231  fFitType = fcn.Type();
232  return true;
233 }
234 
235 
236 
237 bool Fitter::FitFCN(const BaseFunc & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
238  // fit a user provided FCN function
239  // create fit parameter settings
240  if (!SetFCN(fcn, params,dataSize,chi2fit) ) return false;
241  return FitFCN();
242 }
243 
244 
245 
246 bool Fitter::FitFCN(const BaseGradFunc & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
247  // fit a user provided FCN gradient function
248 
249  if (!SetFCN(fcn, params,dataSize, chi2fit) ) return false;
250  return FitFCN();
251 }
252 
253 bool Fitter::FitFCN(const ROOT::Math::FitMethodFunction & fcn, const double * params) {
254  // fit using the passed objective function for the fit
255  if (!SetFCN(fcn, params) ) return false;
256  return FitFCN();
257 }
258 
259 bool Fitter::FitFCN(const ROOT::Math::FitMethodGradFunction & fcn, const double * params) {
260  // fit using the passed objective function for the fit
261  if (!SetFCN(fcn, params) ) return false;
262  return FitFCN();
263 }
264 
265 
266 bool Fitter::SetFCN(MinuitFCN_t fcn, int npar, const double * params , unsigned int dataSize , bool chi2fit ){
267  // set TMinuit style FCN type (global function pointer)
268  // create corresponfing objective function from that function
269 
270  if (npar == 0) {
271  npar = fConfig.ParamsSettings().size();
272  if (npar == 0) {
273  MATH_ERROR_MSG("Fitter::FitFCN","Fit Parameter settings have not been created ");
274  return false;
275  }
276  }
277 
278  ROOT::Fit::FcnAdapter newFcn(fcn,npar);
279  return SetFCN(newFcn,params,dataSize,chi2fit);
280 }
281 
282 bool Fitter::FitFCN(MinuitFCN_t fcn, int npar, const double * params , unsigned int dataSize , bool chi2fit ) {
283  // fit using Minuit style FCN type (global function pointer)
284  // create corresponfing objective function from that function
285  if (!SetFCN(fcn, npar, params, dataSize, chi2fit)) return false;
286  fUseGradient = false;
287  return FitFCN();
288 }
289 
291  // fit using the previously set FCN function
292 
293  // in case a model function exists from a previous fit - reset shared-ptr
294  if (fFunc && fResult->FittedFunction() == 0) fFunc.reset();
295 
296  if (!fObjFunction) {
297  MATH_ERROR_MSG("Fitter::FitFCN","Objective function has not been set");
298  return false;
299  }
300  // look if FCN s of a known type and we can get some modelfunction and data objects
301  ExamineFCN();
302  // init the minimizer
303  if (!DoInitMinimizer() ) return false;
304  // perform the minimization
305  return DoMinimization();
306 }
307 
309  // evaluate the FCN using the stored values in fConfig
310 
311  if (fFunc && fResult->FittedFunction() == 0) fFunc.reset();
312 
313  if (!fObjFunction) {
314  MATH_ERROR_MSG("Fitter::FitFCN","Objective function has not been set");
315  return false;
316  }
317  // create a Fit result from the fit configuration
318  fResult = std::shared_ptr<ROOT::Fit::FitResult>(new ROOT::Fit::FitResult(fConfig) );
319  // evaluate one time the FCN
320  double fcnval = (*fObjFunction)(fResult->GetParams() );
321  // update fit result
322  fResult->fVal = fcnval;
323  fResult->fNCalls++;
324  return true;
325 }
326 
327 
329 
330  // perform a chi2 fit on a set of binned data
331  std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
332  assert(data);
333 
334  // check function
335  if (!fFunc) {
336  MATH_ERROR_MSG("Fitter::DoLeastSquareFit","model function is not set");
337  return false;
338  }
339 
340 #ifdef DEBUG
341  std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[3].IsBound() << " lower limit " << Config().ParamsSettings()[3].LowerLimit() << " upper limit " << Config().ParamsSettings()[3].UpperLimit() << std::endl;
342 #endif
343 
344  fBinFit = true;
345  fDataSize = data->Size();
346 
347  // check if fFunc provides gradient
348  if (!fUseGradient) {
349  // do minimzation without using the gradient
350  Chi2FCN<BaseFunc> chi2(data,fFunc);
351  fFitType = chi2.Type();
352  return DoMinimization (chi2);
353  }
354  else {
355  // use gradient
356  if (fConfig.MinimizerOptions().PrintLevel() > 0)
357  MATH_INFO_MSG("Fitter::DoLeastSquareFit","use gradient from model function");
358  std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
359  if (gradFun) {
360  Chi2FCN<BaseGradFunc> chi2(data,gradFun);
361  fFitType = chi2.Type();
362  return DoMinimization (chi2);
363  }
364  MATH_ERROR_MSG("Fitter::DoLeastSquareFit","wrong type of function - it does not provide gradient");
365  }
366  return false;
367 }
368 
369 bool Fitter::DoBinnedLikelihoodFit(bool extended) {
370  // perform a likelihood fit on a set of binned data
371  // The fit is extended (Poisson logl_ by default
372 
373  std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
374  assert(data);
375 
376  bool useWeight = fConfig.UseWeightCorrection();
377 
378  // check function
379  if (!fFunc) {
380  MATH_ERROR_MSG("Fitter::DoLikelihoodFit","model function is not set");
381  return false;
382  }
383 
384  // logl fit (error should be 0.5) set if different than default values (of 1)
387  }
388 
389  if (useWeight && fConfig.MinosErrors() ) {
390  MATH_INFO_MSG("Fitter::DoLikelihoodFit","MINOS errors cannot be computed in weighted likelihood fits");
391  fConfig.SetMinosErrors(false);
392  }
393 
394 
395  fBinFit = true;
396  fDataSize = data->Size();
397 
398  // create a chi2 function to be used for the equivalent chi-square
399  Chi2FCN<BaseFunc> chi2(data,fFunc);
400 
401  if (!fUseGradient) {
402  // do minimization without using the gradient
403  PoissonLikelihoodFCN<BaseFunc> logl(data,fFunc, useWeight, extended);
404  fFitType = logl.Type();
405  // do minimization
406  if (!DoMinimization (logl, &chi2) ) return false;
407  if (useWeight) {
408  logl.UseSumOfWeightSquare();
409  if (!ApplyWeightCorrection(logl) ) return false;
410  }
411  }
412  else {
413  if (fConfig.MinimizerOptions().PrintLevel() > 0)
414  MATH_INFO_MSG("Fitter::DoLikelihoodFit","use gradient from model function");
415  // check if fFunc provides gradient
416  std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
417  if (!gradFun) {
418  MATH_ERROR_MSG("Fitter::DoLikelihoodFit","wrong type of function - it does not provide gradient");
419  return false;
420  }
421  // use gradient for minimization
422  // not-extended is not impelemented in this case
423  if (!extended) {
424  MATH_WARN_MSG("Fitter::DoLikelihoodFit","Not-extended binned fit with gradient not yet supported - do an extended fit");
425  }
426  PoissonLikelihoodFCN<BaseGradFunc> logl(data,gradFun, useWeight, true);
427  fFitType = logl.Type();
428  // do minimization
429  if (!DoMinimization (logl, &chi2) ) return false;
430  if (useWeight) {
431  logl.UseSumOfWeightSquare();
432  if (!ApplyWeightCorrection(logl) ) return false;
433  }
434  }
435  return true;
436 }
437 
438 
439 bool Fitter::DoUnbinnedLikelihoodFit(bool extended) {
440  // perform a likelihood fit on a set of unbinned data
441 
442  std::shared_ptr<UnBinData> data = std::dynamic_pointer_cast<UnBinData>(fData);
443  assert(data);
444 
445  bool useWeight = fConfig.UseWeightCorrection();
446 
447  if (!fFunc) {
448  MATH_ERROR_MSG("Fitter::DoLikelihoodFit","model function is not set");
449  return false;
450  }
451 
452  if (useWeight && fConfig.MinosErrors() ) {
453  MATH_INFO_MSG("Fitter::DoLikelihoodFit","MINOS errors cannot be computed in weighted likelihood fits");
454  fConfig.SetMinosErrors(false);
455  }
456 
457 
458  fBinFit = false;
459  fDataSize = data->Size();
460 
461 #ifdef DEBUG
462  int ipar = 0;
463  std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[ipar].IsBound() << " lower limit " << Config().ParamsSettings()[ipar].LowerLimit() << " upper limit " << Config().ParamsSettings()[ipar].UpperLimit() << std::endl;
464 #endif
465 
466  // logl fit (error should be 0.5) set if different than default values (of 1)
469  }
470 
471  if (!fUseGradient) {
472  // do minimization without using the gradient
473  LogLikelihoodFCN<BaseFunc> logl(data,fFunc, useWeight, extended);
474  fFitType = logl.Type();
475  if (!DoMinimization (logl) ) return false;
476  if (useWeight) {
477  logl.UseSumOfWeightSquare();
478  if (!ApplyWeightCorrection(logl) ) return false;
479  }
480  return true;
481  }
482  else {
483  // use gradient : check if fFunc provides gradient
484  if (fConfig.MinimizerOptions().PrintLevel() > 0)
485  MATH_INFO_MSG("Fitter::DoLikelihoodFit","use gradient from model function");
486  std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
487  if (gradFun) {
488  if (extended) {
489  MATH_WARN_MSG("Fitter::DoLikelihoodFit","Extended unbinned fit with gradient not yet supported - do a not-extended fit");
490  }
491  LogLikelihoodFCN<BaseGradFunc> logl(data,gradFun,useWeight, extended);
492  fFitType = logl.Type();
493  if (!DoMinimization (logl) ) return false;
494  if (useWeight) {
495  logl.UseSumOfWeightSquare();
496  if (!ApplyWeightCorrection(logl) ) return false;
497  }
498  return true;
499  }
500  MATH_ERROR_MSG("Fitter::DoLikelihoodFit","wrong type of function - it does not provide gradient");
501  }
502  return false;
503 }
504 
505 
507 
508  std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
509  assert(data);
510 
511  // perform a linear fit on a set of binned data
512  std::string prevminimizer = fConfig.MinimizerType();
513  fConfig.SetMinimizer("Linear");
514 
515  fBinFit = true;
516 
517  bool ret = DoLeastSquareFit();
518  fConfig.SetMinimizer(prevminimizer.c_str());
519  return ret;
520 }
521 
522 
524  // compute the Hesse errors according to configuration
525  // set in the parameters and append value in fit result
526  if (fObjFunction.get() == 0) {
527  MATH_ERROR_MSG("Fitter::CalculateHessErrors","Objective function has not been set");
528  return false;
529  }
530  // assume a fResult pointer always exists
531  assert (fResult.get() );
532 
533  // need a special treatment in case of weighted likelihood fit
534  // (not yet implemented)
535  if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
536  MATH_ERROR_MSG("Fitter::CalculateHessErrors","Re-computation of Hesse errors not implemented for weighted likelihood fits");
537  MATH_INFO_MSG("Fitter::CalculateHessErrors","Do the Fit using configure option FitConfig::SetParabErrors()");
538  return false;
539  }
540  // if (!fUseGradient ) {
541  // ROOT::Math::FitMethodFunction * fcn = dynamic_cast< ROOT::Math::FitMethodFunction *>(fObjFunction.get());
542  // if (fcn && fcn->Type() == ROOT::Math::FitMethodFunction::kLogLikelihood) {
543  // if (!fBinFit) {
544  // ROOT::Math::LogLikelihoodFunction * nll = dynamic_cast< ROOT::Math::LogLikelihoodFunction *>(fcn);
545  // assert(nll);
546  // nll->UseSumOfWeightSquare(false);
547  // }
548  // else {
549  // ROOT::Math::PoissonLikelihoodFunction * nll = dynamic_cast< ROOT::Math::PoissonLikelihoodFunction *>(fcn);
550  // assert(nll);
551  // nll->UseSumOfWeightSquare(false);
552  // }
553  // // reset fcn in minimizer
554  // }
555 
556 
557  // create minimizer if not done or if name has changed
558  if ( !fMinimizer ||
559  fResult->MinimizerType().find(fConfig.MinimizerType()) == std::string::npos ) {
560  bool ret = DoInitMinimizer();
561  if (!ret) {
562  MATH_ERROR_MSG("Fitter::CalculateHessErrors","Error initializing the minimizer");
563  return false;
564  }
565  }
566 
567 
568  if (!fMinimizer ) {
569  MATH_ERROR_MSG("Fitter::CalculateHessErrors","Need to do a fit before calculating the errors");
570  return false;
571  }
572 
573  //run Hesse
574  bool ret = fMinimizer->Hesse();
575  if (!ret) MATH_WARN_MSG("Fitter::CalculateHessErrors","Error when calculating Hessian");
576 
577 
578  // update minimizer results with what comes out from Hesse
579  // in case is empty - create from a FitConfig
580  if (fResult->IsEmpty() )
581  fResult = std::auto_ptr<ROOT::Fit::FitResult>(new ROOT::Fit::FitResult(fConfig) );
582 
583 
584  // re-give a minimizer instance in case it has been changed
585  ret |= fResult->Update(fMinimizer, ret);
586 
587  // when possible get ncalls from FCN and set in fit result
589  fResult->fNCalls = GetNCallsFromFCN();
590  }
591 
592  // set also new errors in FitConfig
593  if (fConfig.UpdateAfterFit() && ret) DoUpdateFitConfig();
594 
595  return ret;
596 }
597 
598 
600  // compute the Minos errors according to configuration
601  // set in the parameters and append value in fit result
602  // normally Minos errors are computed just after the minimization
603  // (in DoMinimization) when creating the FItResult if the
604  // FitConfig::MinosErrors() flag is set
605 
606  if (!fMinimizer.get() ) {
607  MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minimizer does not exist - cannot calculate Minos errors");
608  return false;
609  }
610 
611  if (!fResult.get() || fResult->IsEmpty() ) {
612  MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Invalid Fit Result - cannot calculate Minos errors");
613  return false;
614  }
615 
616  if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
617  MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Computation of MINOS errors not implemented for weighted likelihood fits");
618  return false;
619  }
620 
621  // set flag to compute Minos error to false in FitConfig to avoid that
622  // following minimizaiton calls perform unwanted Minos error calculations
623  fConfig.SetMinosErrors(false);
624 
625 
626  const std::vector<unsigned int> & ipars = fConfig.MinosParams();
627  unsigned int n = (ipars.size() > 0) ? ipars.size() : fResult->Parameters().size();
628  bool ok = false;
629  for (unsigned int i = 0; i < n; ++i) {
630  double elow, eup;
631  unsigned int index = (ipars.size() > 0) ? ipars[i] : i;
632  bool ret = fMinimizer->GetMinosError(index, elow, eup);
633  if (ret) fResult->SetMinosError(index, elow, eup);
634  ok |= ret;
635  }
636  if (!ok)
637  MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minos error calculation failed for all parameters");
638 
639  return ok;
640 }
641 
642 
643 
644 // traits for distinhuishing fit methods functions from generic objective functions
645 template<class Func>
646 struct ObjFuncTrait {
647  static unsigned int NCalls(const Func & ) { return 0; }
648  static int Type(const Func & ) { return -1; }
649  static bool IsGrad() { return false; }
650 };
651 template<>
652 struct ObjFuncTrait<ROOT::Math::FitMethodFunction> {
653  static unsigned int NCalls(const ROOT::Math::FitMethodFunction & f ) { return f.NCalls(); }
654  static int Type(const ROOT::Math::FitMethodFunction & f) { return f.Type(); }
655  static bool IsGrad() { return false; }
656 };
657 template<>
658 struct ObjFuncTrait<ROOT::Math::FitMethodGradFunction> {
659  static unsigned int NCalls(const ROOT::Math::FitMethodGradFunction & f ) { return f.NCalls(); }
660  static int Type(const ROOT::Math::FitMethodGradFunction & f) { return f.Type(); }
661  static bool IsGrad() { return true; }
662 };
663 
665  //initialize minimizer by creating it
666  // and set there the objective function
667  // obj function must have been copied before
668  assert(fObjFunction.get() );
669 
670  // check configuration and objective function
671  if ( fConfig.ParamsSettings().size() != fObjFunction->NDim() ) {
672  MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong function dimension or wrong size for FitConfig");
673  return false;
674  }
675 
676  // create first Minimizer
677  // using an auto_Ptr will delete the previous existing one
678  fMinimizer = std::shared_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
679  if (fMinimizer.get() == 0) {
680  MATH_ERROR_MSG("Fitter::FitFCN","Minimizer cannot be created");
681  return false;
682  }
683 
684  // in case of gradient function one needs to downcast the pointer
685  if (fUseGradient) {
686  const ROOT::Math::IMultiGradFunction * gradfcn = dynamic_cast<const ROOT::Math::IMultiGradFunction *> (fObjFunction.get() );
687  if (!gradfcn) {
688  MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong type of function - it does not provide gradient");
689  return false;
690  }
691  fMinimizer->SetFunction( *gradfcn);
692  }
693  else
694  fMinimizer->SetFunction( *fObjFunction);
695 
696 
697  fMinimizer->SetVariables(fConfig.ParamsSettings().begin(), fConfig.ParamsSettings().end() );
698 
699  // if requested parabolic error do correct error analysis by the minimizer (call HESSE)
700  if (fConfig.ParabErrors()) fMinimizer->SetValidError(true);
701 
702  return true;
703 
704 }
705 
706 
708  // perform the minimization (assume we have already initialized the minimizer)
709 
710  assert(fMinimizer );
711 
712  bool ret = fMinimizer->Minimize();
713 
714  // unsigned int ncalls = ObjFuncTrait<ObjFunc>::NCalls(*fcn);
715  // int fitType = ObjFuncTrait<ObjFunc>::Type(objFunc);
716 
717  if (!fResult) fResult = std::shared_ptr<FitResult> ( new FitResult() );
718  fResult->FillResult(fMinimizer,fConfig, fFunc, ret, fDataSize, fBinFit, chi2func );
719 
720  // when possible get ncalls from FCN and set in fit result
722  fResult->fNCalls = GetNCallsFromFCN();
723  }
724 
725  // fill information in fit result
726  fResult->fObjFunc = fObjFunction;
727  fResult->fFitData = fData;
728 
729 
730 #ifdef DEBUG
731  std::cout << "ROOT::Fit::Fitter::DoMinimization : ncalls = " << fResult->fNCalls << " type of objfunc " << fFitFitResType << " typeid: " << typeid(*fObjFunction).name() << " use gradient " << fUseGradient << std::endl;
732 #endif
733 
735  fResult->NormalizeErrors();
736 
737 
738 
739  // set also new parameter values and errors in FitConfig
740  if (fConfig.UpdateAfterFit() && ret) DoUpdateFitConfig();
741 
742  return ret;
743 }
744 
745 bool Fitter::DoMinimization(const BaseFunc & objFunc, const ROOT::Math::IMultiGenFunction * chi2func) {
746  // perform the minimization initializing the minimizer starting from a given obj function
747 
748  // keep also a copy of FCN function and set this in minimizer so they will be managed together
749  // (remember that cloned copy will still depends on data and model function pointers)
750  fObjFunction = std::auto_ptr<ROOT::Math::IMultiGenFunction> ( objFunc.Clone() );
751  if (!DoInitMinimizer()) return false;
752  return DoMinimization(chi2func);
753 }
754 
755 
757  // update the fit configuration after a fit using the obtained result
758  if (fResult->IsEmpty() || !fResult->IsValid() ) return;
759  for (unsigned int i = 0; i < fConfig.NPar(); ++i) {
761  par.SetValue( fResult->Value(i) );
762  if (fResult->Error(i) > 0) par.SetStepSize( fResult->Error(i) );
763  }
764 }
765 
767  // retrieve ncalls from the fit method functions
768  // this function is called when minimizer does not provide a way of returning the nnumber of function calls
769  int ncalls = 0;
770  if (!fUseGradient) {
771  const ROOT::Math::FitMethodFunction * fcn = dynamic_cast<const ROOT::Math::FitMethodFunction *>(fObjFunction.get());
772  if (fcn) ncalls = fcn->NCalls();
773  }
774  else {
775  const ROOT::Math::FitMethodGradFunction * fcn = dynamic_cast<const ROOT::Math::FitMethodGradFunction*>(fObjFunction.get());
776  if (fcn) ncalls = fcn->NCalls();
777  }
778  return ncalls;
779 }
780 
781 
782 bool Fitter::ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction & loglw2, bool minimizeW2L) {
783  // apply correction for weight square
784  // Compute Hessian of the loglikelihood function using the sum of the weight squared
785  // This method assumes:
786  // - a fit has been done before and a covariance matrix exists
787  // - the objective function is a likelihood function and Likelihood::UseSumOfWeightSquare()
788  // has been called before
789 
790  if (fMinimizer.get() == 0) {
791  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Must perform first a fit before applying the correction");
792  return false;
793  }
794 
795  unsigned int n = loglw2.NDim();
796  // correct errors for weight squared
797  std::vector<double> cov(n*n);
798  bool ret = fMinimizer->GetCovMatrix(&cov[0] );
799  if (!ret) {
800  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Previous fit has no valid Covariance matrix");
801  return false;
802  }
803  // need to re-init the minimizer and set w2
804  fObjFunction = std::auto_ptr<ROOT::Math::IMultiGenFunction> ( loglw2.Clone() );
805  // need to re-initialize the minimizer for the changes applied in the
806  // objective functions
807  if (!DoInitMinimizer()) return false;
808 
809  //std::cout << "Running Hesse ..." << std::endl;
810 
811  // run eventually before a minimization
812  // ignore its error
813  if (minimizeW2L) fMinimizer->Minimize();
814  // run Hesse on the log-likelihood build using sum of weight squared
815  ret = fMinimizer->Hesse();
816  if (!ret) {
817  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error running Hesse on weight2 likelihood - cannot compute errors");
818  return false;
819  }
820 
821  if (fMinimizer->CovMatrixStatus() != 3) {
822  MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not accurate, the errors may be not reliable");
823  if (fMinimizer->CovMatrixStatus() == 2)
824  MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood was forced to be defined positive");
825  if (fMinimizer->CovMatrixStatus() <= 0)
826  // probably should have failed before
827  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not valid !");
828  }
829 
830  // std::vector<double> c(n*n);
831  // ret = fMinimizer->GetCovMatrix(&c2[0] );
832  // if (!ret) std::cout << "Error reading cov matrix " << fMinimizer->Status() << std::endl;
833  // TMatrixDSym cmat2(n,&c2[0]);
834  // std::cout << "Cov matrix of w2 " << std::endl;
835  // cmat2.Print();
836  // cmat2.Invert();
837  // std::cout << "Hessian of w2 " << std::endl;
838  // cmat2.Print();
839 
840  // get Hessian matrix from weight-square likelihood
841  std::vector<double> hes(n*n);
842  ret = fMinimizer->GetHessianMatrix(&hes[0] );
843  if (!ret) {
844  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error retrieving Hesse on weight2 likelihood - cannot compute errors");
845  return false;
846  }
847 
848  // for debug
849  // std::cout << "Hessian W2 matrix " << std::endl;
850  // for (unsigned int i = 0; i < n; ++i) {
851  // for (unsigned int j = 0; j < n; ++j) {
852  // std::cout << std::setw(12) << hes[i*n + j] << " , ";
853  // }
854  // std::cout << std::endl;
855  // }
856 
857  // perform product of matvrix cov * hes * cov
858  // since we do not want to add matrix dependence do product by hand
859  // first do hes * cov
860  std::vector<double> tmp(n*n);
861  for (unsigned int i = 0; i < n; ++i) {
862  for (unsigned int j = 0; j < n; ++j) {
863  for (unsigned int k = 0; k < n; ++k)
864  tmp[i*n+j] += hes[i*n + k] * cov[k*n + j];
865  }
866  }
867  // do multiplication now cov * tmp save result
868  std::vector<double> newCov(n*n);
869  for (unsigned int i = 0; i < n; ++i) {
870  for (unsigned int j = 0; j < n; ++j) {
871  for (unsigned int k = 0; k < n; ++k)
872  newCov[i*n+j] += cov[i*n + k] * tmp[k*n + j];
873  }
874  }
875  // update fit result with new corrected covariance matrix
876  unsigned int k = 0;
877  for (unsigned int i = 0; i < n; ++i) {
878  fResult->fErrors[i] = std::sqrt( newCov[i*(n+1)] );
879  for (unsigned int j = 0; j <= i; ++j)
880  fResult->fCovMatrix[k++] = newCov[i *n + j];
881  }
882  //fResult->PrintCovMatrix(std::cout);
883 
884  return true;
885 }
886 
887 
888 
890  // return a pointer to the binned data used in the fit
891  // works only for chi2 or binned likelihood fits
892  // thus when the objective function stored is a Chi2Func or a PoissonLikelihood
893  // The funciton also set the model function correctly if it has not been set
894 
897 
900 
901  //MATH_INFO_MSG("Fitter::ExamineFCN","Objective function is not of a known type - FitData and ModelFunction objects are not available");
902  return;
903 }
904 
905 
906 
907 
908  } // end namespace Fit
909 
910 } // end namespace ROOT
911 
const FitConfig & Config() const
access to the fit configuration (const method)
Definition: Fitter.h:382
Interface (abstract class) for multi-dimensional functions providing a gradient calculation.
Definition: IFunction.h:322
double par[1]
Definition: unuranDistr.cxx:38
const std::string & MinimizerType() const
return type of minimizer package
Definition: FitConfig.h:160
void ExamineFCN()
look at the user provided FCN and get data and model function is they derive from ROOT::Fit FCN class...
Definition: Fitter.cxx:889
virtual unsigned int NDim() const =0
Retrieve the dimension of the function.
This namespace contains pre-defined functions to be used in conjuction with TExecutor::Map and TExecu...
Definition: StringConv.hxx:21
void(* MinuitFCN_t)(int &npar, double *gin, double &f, double *u, int flag)
fit using user provided FCN with Minuit-like interface If npar = 0 it is assumed that the parameters ...
Definition: Fitter.h:310
ROOT::Math::Minimizer * CreateMinimizer()
create a new minimizer according to chosen configuration
Definition: FitConfig.cxx:203
bool CalculateMinosErrors()
perform an error analysis on the result using MINOS To be called only after fitting and when a minimi...
Definition: Fitter.cxx:599
virtual Type_t Type() const
return the type of method, override if needed
Fitter & operator=(const Fitter &rhs)
Assignment operator (disabled, class is not copyable)
Definition: Fitter.cxx:85
bool EvalFCN()
Perform a simple FCN evaluation.
Definition: Fitter.cxx:308
Class, describing value, limits and step size of the parameters Provides functionality also to set/re...
virtual BaseObjFunction::Type_t Type() const
get type of fit method function
bool DoBinnedLikelihoodFit(bool extended=true)
binned likelihood fit
Definition: Fitter.cxx:369
LogLikelihoodFCN class for likelihood fits.
virtual unsigned int NPoints() const
return the number of data points used in evaluating the function
bool ParabErrors() const
do analysis for parabolic errors
Definition: FitConfig.h:174
const std::vector< unsigned int > & MinosParams() const
return vector of parameter indeces for which the Minos Error will be computed
Definition: FitConfig.h:187
std::shared_ptr< IModelFunction > fFunc
Definition: Fitter.h:494
Class describing the unbinned data sets (just x coordinates values) of any dimensions.
Definition: UnBinData.h:47
double ErrorDef() const
error definition
bool DoLinearFit()
linear least square fit
Definition: Fitter.cxx:506
ROOT::Math::MinimizerOptions & MinimizerOptions()
access to the minimizer control parameter (non const method)
Definition: FitConfig.h:138
void SetErrorDef(double err)
set error def
std::shared_ptr< ROOT::Fit::FitResult > fResult
copy of the fitted function containing on output the fit result
Definition: Fitter.h:496
MultiDimParamFunctionAdapter class to wrap a one-dimensional parametric function in a multi dimension...
bool CalculateHessErrors()
perform an error analysis on the result using the Hessian Errors are obtaied from the inverse of the ...
Definition: Fitter.cxx:523
void SetValue(double val)
set the value
virtual BaseObjFunction::Type_t Type() const
get type of fit method function
void CreateParamsSettings(const ROOT::Math::IParamMultiFunction &func)
set the parameter settings from a model function.
Definition: FitConfig.cxx:174
double sqrt(double)
Fitter()
Default constructor.
Definition: Fitter.cxx:52
bool UpdateAfterFit() const
Update configuration after a fit using the FitResult.
Definition: FitConfig.h:180
bool DoInitMinimizer()
Definition: Fitter.cxx:664
class evaluating the log likelihood for binned Poisson likelihood fits it is template to distinguish ...
const std::vector< ROOT::Fit::ParameterSettings > & ParamsSettings() const
get the vector of parameter settings (const method)
Definition: FitConfig.h:90
bool NormalizeErrors() const
flag to check if resulting errors are be normalized according to chi2/ndf
Definition: FitConfig.h:171
bool ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction &loglw2, bool minimizeW2L=false)
apply correction in the error matrix for the weights for likelihood fits This method can be called on...
Definition: Fitter.cxx:782
#define MATH_INFO_MSG(loc, str)
Definition: Error.h:44
int PrintLevel() const
non-static methods for retrieving options
void SetMinosErrors(bool on=true)
set Minos erros computation to be performed after fitting
Definition: FitConfig.h:198
Chi2FCN class for binnned fits using the least square methods.
Definition: Chi2FCN.h:68
~Fitter()
Destructor.
Definition: Fitter.cxx:71
std::shared_ptr< ROOT::Fit::FitData > fData
pointer to used minimizer
Definition: Fitter.h:500
FitConfig fConfig
Definition: Fitter.h:492
void SetMinimizer(const char *type, const char *algo=0)
set minimizer type
Definition: FitConfig.h:152
void SetStepSize(double err)
set the step size
IParamFunction interface (abstract class) describing multi-dimensional parameteric functions It is a ...
BasicFCN class: base class for the objective functions used in the fits It has a reference to the dat...
Definition: BasicFCN.h:44
Fitter class, entry point for performing all type of fits.
Definition: Fitter.h:94
bool FitFCN(const ROOT::Math::FitMethodFunction &fcn, const double *params=0)
Fit using a FitMethodFunction interface.
Definition: Fitter.cxx:253
const ParameterSettings & ParSettings(unsigned int i) const
get the parameter settings for the i-th parameter (const method)
Definition: FitConfig.h:80
unsigned int NPar() const
number of parameters settings
Definition: FitConfig.h:100
bool FitFCN()
Perform a fit with the previously set FCN function.
Definition: Fitter.cxx:290
void UseSumOfWeightSquare(bool on=true)
Class describing the binned data sets : vectors of x coordinates, y values and optionally error on y ...
Definition: BinData.h:61
void SetFunction(const IModelFunction &func, bool useGradient=false)
Set the fitted function (model function) from a parametric function interface.
Definition: Fitter.cxx:104
std::shared_ptr< ROOT::Math::Minimizer > fMinimizer
pointer to the object containing the result of the fit
Definition: Fitter.h:498
Type
enumeration specifying the integration types.
virtual IBaseFunctionMultiDim * Clone() const =0
Clone a function.
bool UseWeightCorrection() const
Apply Weight correction for error matrix computation.
Definition: FitConfig.h:183
Interface (abstract class) for parametric one-dimensional gradient functions providing in addition to...
#define MATH_WARN_MSG(loc, str)
Definition: Error.h:47
class containg the result of the fit and all the related information (fitted parameter values...
Definition: FitResult.h:52
double f(double x)
Specialized IParamFunction interface (abstract class) for one-dimensional parametric functions It is ...
FitMethodFunction class Interface for objective functions (like chi2 and likelihood used in the fit) ...
Definition: Fitter.h:57
TFitResultPtr Fit(FitObject *h1, TF1 *f1, Foption_t &option, const ROOT::Math::MinimizerOptions &moption, const char *goption, ROOT::Fit::DataRange &range)
Definition: HFitImpl.cxx:134
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
double func(double *x, double *p)
Definition: stressTF1.cxx:213
std::shared_ptr< ROOT::Math::IMultiGenFunction > fObjFunction
pointer to the fit data (binned or unbinned data)
Definition: Fitter.h:502
bool GetDataFromFCN()
internal functions to get data set and model function from FCN useful for fits done with customized F...
Definition: Fitter.h:510
bool DoLeastSquareFit()
least square fit
Definition: Fitter.cxx:328
bool useGradient
bool fUseGradient
Definition: Fitter.h:482
bool SetFCN(unsigned int npar, Function &fcn, const double *params=0, unsigned int dataSize=0, bool chi2fit=false)
Set a generic FCN function as a C++ callable object implementing double () (const double *) Note that...
Definition: Fitter.h:543
MultiDimParamGradFunctionAdapter class to wrap a one-dimensional parametric gradient function in a mu...
void SetParamsSettings(unsigned int npar, const double *params, const double *vstep=0)
set the parameter settings from number of parameters and a vector of values and optionally step value...
Definition: FitConfig.cxx:136
int GetNCallsFromFCN()
Definition: Fitter.cxx:766
void DoUpdateFitConfig()
Definition: Fitter.cxx:756
double result[121]
virtual unsigned int NCalls() const
return the total number of function calls (overrided if needed)
bool MinosErrors() const
do minos errros analysis on the parameters
Definition: FitConfig.h:177
Documentation for the abstract class IBaseFunctionMultiDim.
Definition: IFunction.h:63
virtual BaseObjFunction::Type_t Type() const
get type of fit method function
Definition: Chi2FCN.h:148
bool DoMinimization(const BaseFunc &f, const ROOT::Math::IMultiGenFunction *chifunc=0)
do minimization
Definition: Fitter.cxx:745
const Int_t n
Definition: legend1.C:16
double gDefaultErrorDef
Definition: Fitter.cxx:49
bool DoUnbinnedLikelihoodFit(bool extended=false)
un-binned likelihood fit
Definition: Fitter.cxx:439
#define MATH_ERROR_MSG(loc, str)
Definition: Error.h:50