Logo ROOT   6.14/05
Reference Guide
Fitter.cxx
Go to the documentation of this file.
1 // @(#)root/mathcore:$Id$
2 // Author: L. Moneta Mon Sep 4 17:00:10 2006
3 
4 /**********************************************************************
5  * *
6  * Copyright (c) 2006 LCG ROOT Math Team, CERN/PH-SFT *
7  * *
8  * *
9  **********************************************************************/
10 
11 // Implementation file for class Fitter
12 
13 
14 #include "Fit/Fitter.h"
15 #include "Fit/Chi2FCN.h"
17 #include "Fit/LogLikelihoodFCN.h"
18 #include "Math/Minimizer.h"
19 #include "Math/MinimizerOptions.h"
20 #include "Math/FitMethodFunction.h"
21 #include "Fit/BasicFCN.h"
22 #include "Fit/BinData.h"
23 #include "Fit/UnBinData.h"
24 #include "Fit/FcnAdapter.h"
25 #include "Fit/FitConfig.h"
26 #include "Fit/FitResult.h"
27 #include "Math/Error.h"
28 #include "TF1.h"
29 
30 #include <memory>
31 
32 #include "Math/IParamFunction.h"
33 
35 
36 // #include "TMatrixDSym.h"
37 // for debugging
38 //#include "TMatrixD.h"
39 // #include <iomanip>
40 
41 namespace ROOT {
42 
43  namespace Fit {
44 
45 // use a static variable to get default minimizer options for error def
46 // to see if user has changed it later on. If it has not been changed we set
47 // for the likelihood method an error def of 0.5
48 // t.b.d : multiply likelihood by 2 so have same error def definition as chi2
50 
51 
53  fUseGradient(false),
54  fBinFit(false),
55  fFitType(0),
56  fDataSize(0)
57 {}
58 
59 Fitter::Fitter(const std::shared_ptr<FitResult> & result) :
60  fUseGradient(false),
61  fBinFit(false),
62  fFitType(0),
63  fDataSize(0),
64  fResult(result)
65 {
66  if (result->fFitFunc) SetFunction(*fResult->fFitFunc); // this will create also the configuration
67  if (result->fObjFunc) fObjFunction = fResult->fObjFunc;
68  if (result->fFitData) fData = fResult->fFitData;
69 }
70 
72 {
73  // Destructor implementation.
74 
75  // nothing to do since we use shared_ptr now
76 }
77 
78 Fitter::Fitter(const Fitter & rhs)
79 {
80  // Implementation of copy constructor.
81  // copy FitResult, FitConfig and clone fit function
82  (*this) = rhs;
83 }
84 
86 {
87  // Implementation of assignment operator.
88  // dummy implementation, since it is private
89  if (this == &rhs) return *this; // time saving self-test
90 // fUseGradient = rhs.fUseGradient;
91 // fBinFit = rhs.fBinFit;
92 // fResult = rhs.fResult;
93 // fConfig = rhs.fConfig;
94 // // function is copied and managed by FitResult (maybe should use an unique_ptr)
95 // fFunc = fResult.ModelFunction();
96 // if (rhs.fFunc != 0 && fResult.ModelFunction() == 0) { // case no fit has been done yet - then clone
97 // if (fFunc) delete fFunc;
98 // fFunc = dynamic_cast<IModelFunction *>( (rhs.fFunc)->Clone() );
99 // assert(fFunc != 0);
100 // }
101  return *this;
102 }
103 
104 void Fitter::SetFunction(const IModelFunction & func, bool useGradient)
105 {
106 
107  fUseGradient = useGradient;
108  if (fUseGradient) {
109  const IGradModelFunction * gradFunc = dynamic_cast<const IGradModelFunction*>(&func);
110  if (gradFunc) {
111  SetFunction(*gradFunc, true);
112  return;
113  }
114  else {
115  MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
116  }
117  }
118  fUseGradient = false;
119 
120  // set the fit model function (clone the given one and keep a copy )
121  //std::cout << "set a non-grad function" << std::endl;
122 
123  fFunc = std::shared_ptr<IModelFunction>(dynamic_cast<IModelFunction *>(func.Clone() ) );
124  assert(fFunc);
125 
126  // creates the parameter settings
128  fFunc_v.reset();
129 }
130 
131 void Fitter::SetFunction(const IModel1DFunction & func, bool useGradient)
132 {
133  fUseGradient = useGradient;
134  if (fUseGradient) {
135  const IGradModel1DFunction * gradFunc = dynamic_cast<const IGradModel1DFunction*>(&func);
136  if (gradFunc) {
137  SetFunction(*gradFunc, true);
138  return;
139  }
140  else {
141  MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
142  }
143  }
144  fUseGradient = false;
145  //std::cout << "set a 1d function" << std::endl;
146 
147  // function is cloned when creating the adapter
148  fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamFunctionAdapter(func));
149 
150  // creates the parameter settings
152  fFunc_v.reset();
153 }
154 
155 void Fitter::SetFunction(const IGradModelFunction & func, bool useGradient)
156 {
157  fUseGradient = useGradient;
158  //std::cout << "set a grad function" << std::endl;
159  // set the fit model function (clone the given one and keep a copy )
160  fFunc = std::shared_ptr<IModelFunction>( dynamic_cast<IGradModelFunction *> ( func.Clone() ) );
161  assert(fFunc);
162 
163  // creates the parameter settings
165  fFunc_v.reset();
166 }
167 
168 
169 void Fitter::SetFunction(const IGradModel1DFunction & func, bool useGradient)
170 {
171  //std::cout << "set a 1d grad function" << std::endl;
172  fUseGradient = useGradient;
173  // function is cloned when creating the adapter
174  fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamGradFunctionAdapter(func));
175 
176  // creates the parameter settings
178  fFunc_v.reset();
179 }
180 
181 
182 bool Fitter::SetFCN(const ROOT::Math::IMultiGenFunction & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
183  // set the objective function for the fit
184  // if params is not NULL create the parameter settings
185  fUseGradient = false;
186  unsigned int npar = fcn.NDim();
187  if (npar == 0) {
188  MATH_ERROR_MSG("Fitter::SetFCN","FCN function has zero parameters ");
189  return false;
190  }
191  if (params != 0 )
192  fConfig.SetParamsSettings(npar, params);
193  else {
194  if ( fConfig.ParamsSettings().size() != npar) {
195  MATH_ERROR_MSG("Fitter::SetFCN","wrong fit parameter settings");
196  return false;
197  }
198  }
199 
200  fBinFit = chi2fit;
201  fDataSize = dataSize;
202 
203  // keep also a copy of FCN function and set this in minimizer so they will be managed together
204  // (remember that cloned copy will still depends on data and model function pointers)
205  fObjFunction = std::unique_ptr<ROOT::Math::IMultiGenFunction> ( fcn.Clone() );
206 
207  return true;
208 }
209 
210 bool Fitter::SetFCN(const ROOT::Math::IMultiGradFunction & fcn, const double * params, unsigned int dataSize , bool chi2fit) {
211  // set the objective function for the fit
212  // if params is not NULL create the parameter settings
213  if (!SetFCN(static_cast<const ROOT::Math::IMultiGenFunction &>(fcn),params, dataSize, chi2fit) ) return false;
214  fUseGradient = true;
215  return true;
216 }
217 
218 bool Fitter::SetFCN(const ROOT::Math::FitMethodFunction & fcn, const double * params) {
219  // set the objective function for the fit
220  // if params is not NULL create the parameter settings
221  bool chi2fit = (fcn.Type() == ROOT::Math::FitMethodFunction::kLeastSquare );
222  if (!SetFCN(fcn,params,fcn.NPoints(), chi2fit) ) return false;
223  fUseGradient = false;
224  fFitType = fcn.Type();
225  return true;
226 }
227 
228 bool Fitter::SetFCN(const ROOT::Math::FitMethodGradFunction & fcn, const double * params) {
229  // set the objective function for the fit
230  // if params is not NULL create the parameter settings
231  bool chi2fit = (fcn.Type() == ROOT::Math::FitMethodGradFunction::kLeastSquare );
232  if (!SetFCN(fcn,params,fcn.NPoints(), chi2fit) ) return false;
233  fUseGradient = true;
234  fFitType = fcn.Type();
235  return true;
236 }
237 
238 
239 
240 bool Fitter::FitFCN(const BaseFunc & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
241  // fit a user provided FCN function
242  // create fit parameter settings
243  if (!SetFCN(fcn, params,dataSize,chi2fit) ) return false;
244  return FitFCN();
245 }
246 
247 
248 
249 bool Fitter::FitFCN(const BaseGradFunc & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
250  // fit a user provided FCN gradient function
251 
252  if (!SetFCN(fcn, params,dataSize, chi2fit) ) return false;
253  return FitFCN();
254 }
255 
256 bool Fitter::FitFCN(const ROOT::Math::FitMethodFunction & fcn, const double * params) {
257  // fit using the passed objective function for the fit
258  if (!SetFCN(fcn, params) ) return false;
259  return FitFCN();
260 }
261 
262 bool Fitter::FitFCN(const ROOT::Math::FitMethodGradFunction & fcn, const double * params) {
263  // fit using the passed objective function for the fit
264  if (!SetFCN(fcn, params) ) return false;
265  return FitFCN();
266 }
267 
268 
269 bool Fitter::SetFCN(MinuitFCN_t fcn, int npar, const double * params , unsigned int dataSize , bool chi2fit ){
270  // set TMinuit style FCN type (global function pointer)
271  // create corresponfing objective function from that function
272 
273  if (npar == 0) {
274  npar = fConfig.ParamsSettings().size();
275  if (npar == 0) {
276  MATH_ERROR_MSG("Fitter::FitFCN","Fit Parameter settings have not been created ");
277  return false;
278  }
279  }
280 
281  ROOT::Fit::FcnAdapter newFcn(fcn,npar);
282  return SetFCN(newFcn,params,dataSize,chi2fit);
283 }
284 
285 bool Fitter::FitFCN(MinuitFCN_t fcn, int npar, const double * params , unsigned int dataSize , bool chi2fit ) {
286  // fit using Minuit style FCN type (global function pointer)
287  // create corresponfing objective function from that function
288  if (!SetFCN(fcn, npar, params, dataSize, chi2fit)) return false;
289  fUseGradient = false;
290  return FitFCN();
291 }
292 
294  // fit using the previously set FCN function
295 
296  // in case a model function exists from a previous fit - reset shared-ptr
297  if (fFunc && fResult->FittedFunction() == 0) fFunc.reset();
298 
299  if (!fObjFunction) {
300  MATH_ERROR_MSG("Fitter::FitFCN","Objective function has not been set");
301  return false;
302  }
303  // look if FCN s of a known type and we can get some modelfunction and data objects
304  ExamineFCN();
305  // init the minimizer
306  if (!DoInitMinimizer() ) return false;
307  // perform the minimization
308  return DoMinimization();
309 }
310 
312  // evaluate the FCN using the stored values in fConfig
313 
314  if (fFunc && fResult->FittedFunction() == 0) fFunc.reset();
315 
316  if (!fObjFunction) {
317  MATH_ERROR_MSG("Fitter::FitFCN","Objective function has not been set");
318  return false;
319  }
320  // create a Fit result from the fit configuration
321  fResult = std::make_shared<ROOT::Fit::FitResult>(fConfig);
322  // evaluate one time the FCN
323  double fcnval = (*fObjFunction)(fResult->GetParams() );
324  // update fit result
325  fResult->fVal = fcnval;
326  fResult->fNCalls++;
327  return true;
328 }
329 
331 {
332 
333  // perform a chi2 fit on a set of binned data
334  std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
335  assert(data);
336 
337  // check function
338  if (!fFunc && !fFunc_v) {
339  MATH_ERROR_MSG("Fitter::DoLeastSquareFit", "model function is not set");
340  return false;
341  } else {
342 
343 #ifdef DEBUG
344  std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[3].IsBound() << " lower limit "
345  << Config().ParamsSettings()[3].LowerLimit() << " upper limit "
346  << Config().ParamsSettings()[3].UpperLimit() << std::endl;
347 #endif
348 
349  fBinFit = true;
350  fDataSize = data->Size();
351  // check if fFunc provides gradient
352  if (!fUseGradient) {
353  // do minimzation without using the gradient
354  if (fFunc_v) {
355  Chi2FCN<BaseFunc, IModelFunction_v> chi2(data, fFunc_v, executionPolicy);
356  fFitType = chi2.Type();
357  return DoMinimization(chi2);
358  } else {
359  Chi2FCN<BaseFunc> chi2(data, fFunc, executionPolicy);
360  fFitType = chi2.Type();
361  return DoMinimization(chi2);
362  }
363  } else {
364  // use gradient
365  if (fConfig.MinimizerOptions().PrintLevel() > 0)
366  MATH_INFO_MSG("Fitter::DoLeastSquareFit", "use gradient from model function");
367 
368  if (fFunc_v) {
369  std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
370  if (gradFun) {
371  Chi2FCN<BaseGradFunc, IModelFunction_v> chi2(data, gradFun);
372  fFitType = chi2.Type();
373  return DoMinimization(chi2);
374  }
375  } else {
376  std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
377  if (gradFun) {
378  Chi2FCN<BaseGradFunc> chi2(data, gradFun);
379  fFitType = chi2.Type();
380  return DoMinimization(chi2);
381  }
382  }
383  MATH_ERROR_MSG("Fitter::DoLeastSquareFit", "wrong type of function - it does not provide gradient");
384  }
385  }
386  return false;
387 }
388 
389 bool Fitter::DoBinnedLikelihoodFit(bool extended, const ROOT::Fit::ExecutionPolicy &executionPolicy)
390 {
391  // perform a likelihood fit on a set of binned data
392  // The fit is extended (Poisson logl_ by default
393 
394  std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
395  assert(data);
396 
397  bool useWeight = fConfig.UseWeightCorrection();
398 
399  // check function
400  if (!fFunc && !fFunc_v) {
401  MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "model function is not set");
402  return false;
403  }
404 
405  // logl fit (error should be 0.5) set if different than default values (of 1)
408  }
409 
410  if (useWeight && fConfig.MinosErrors()) {
411  MATH_INFO_MSG("Fitter::DoBinnedLikelihoodFit", "MINOS errors cannot be computed in weighted likelihood fits");
412  fConfig.SetMinosErrors(false);
413  }
414 
415  fBinFit = true;
416  fDataSize = data->Size();
417 
418  if (!fUseGradient) {
419  // do minimization without using the gradient
420  if (fFunc_v) {
421  // create a chi2 function to be used for the equivalent chi-square
423  PoissonLikelihoodFCN<BaseFunc, IModelFunction_v> logl(data, fFunc_v, useWeight, extended, executionPolicy);
424  fFitType = logl.Type();
425  // do minimization
426  if (!DoMinimization(logl, &chi2))
427  return false;
428  if (useWeight) {
429  logl.UseSumOfWeightSquare();
430  if (!ApplyWeightCorrection(logl))
431  return false;
432  }
433  } else {
434  // create a chi2 function to be used for the equivalent chi-square
435  Chi2FCN<BaseFunc> chi2(data, fFunc);
436  PoissonLikelihoodFCN<BaseFunc> logl(data, fFunc, useWeight, extended, executionPolicy);
437  fFitType = logl.Type();
438  // do minimization
439  if (!DoMinimization(logl, &chi2))
440  return false;
441  if (useWeight) {
442  logl.UseSumOfWeightSquare();
443  if (!ApplyWeightCorrection(logl))
444  return false;
445  }
446  }
447  } else {
448  if (fFunc_v) {
449  // create a chi2 function to be used for the equivalent chi-square
451  std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
452  if (!gradFun) {
453  MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
454  return false;
455  }
456  PoissonLikelihoodFCN<BaseGradFunc, IModelFunction_v> logl(data, gradFun, useWeight, true, executionPolicy);
457  fFitType = logl.Type();
458  // do minimization
459  if (!DoMinimization(logl, &chi2))
460  return false;
461  if (useWeight) {
462  logl.UseSumOfWeightSquare();
463  if (!ApplyWeightCorrection(logl))
464  return false;
465  }
466  } else {
467  // create a chi2 function to be used for the equivalent chi-square
468  Chi2FCN<BaseFunc> chi2(data, fFunc);
469  if (fConfig.MinimizerOptions().PrintLevel() > 0)
470  MATH_INFO_MSG("Fitter::DoLikelihoodFit", "use gradient from model function");
471  // check if fFunc provides gradient
472  std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
473  if (!gradFun) {
474  MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
475  return false;
476  }
477  // use gradient for minimization
478  // not-extended is not impelemented in this case
479  if (!extended) {
480  MATH_WARN_MSG("Fitter::DoBinnedLikelihoodFit",
481  "Not-extended binned fit with gradient not yet supported - do an extended fit");
482  }
483  PoissonLikelihoodFCN<BaseGradFunc> logl(data, gradFun, useWeight, true, executionPolicy);
484  fFitType = logl.Type();
485  // do minimization
486  if (!DoMinimization(logl, &chi2))
487  return false;
488  if (useWeight) {
489  logl.UseSumOfWeightSquare();
490  if (!ApplyWeightCorrection(logl))
491  return false;
492  }
493  }
494  }
495  return true;
496 }
497 
498 bool Fitter::DoUnbinnedLikelihoodFit(bool extended, const ROOT::Fit::ExecutionPolicy &executionPolicy) {
499  // perform a likelihood fit on a set of unbinned data
500 
501  std::shared_ptr<UnBinData> data = std::dynamic_pointer_cast<UnBinData>(fData);
502  assert(data);
503 
504  bool useWeight = fConfig.UseWeightCorrection();
505 
506  if (!fFunc && !fFunc_v) {
507  MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit","model function is not set");
508  return false;
509  }
510 
511  if (useWeight && fConfig.MinosErrors() ) {
512  MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit","MINOS errors cannot be computed in weighted likelihood fits");
513  fConfig.SetMinosErrors(false);
514  }
515 
516 
517  fBinFit = false;
518  fDataSize = data->Size();
519 
520 #ifdef DEBUG
521  int ipar = 0;
522  std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[ipar].IsBound() << " lower limit " << Config().ParamsSettings()[ipar].LowerLimit() << " upper limit " << Config().ParamsSettings()[ipar].UpperLimit() << std::endl;
523 #endif
524 
525  // logl fit (error should be 0.5) set if different than default values (of 1)
528  }
529 
530  if (!fUseGradient) {
531  // do minimization without using the gradient
532  if (fFunc_v ){
533  LogLikelihoodFCN<BaseFunc, IModelFunction_v> logl(data, fFunc_v, useWeight, extended, executionPolicy);
534  fFitType = logl.Type();
535  if (!DoMinimization (logl) ) return false;
536  if (useWeight) {
537  logl.UseSumOfWeightSquare();
538  if (!ApplyWeightCorrection(logl) ) return false;
539  }
540  return true;
541  } else {
542  LogLikelihoodFCN<BaseFunc> logl(data, fFunc, useWeight, extended, executionPolicy);
543 
544  fFitType = logl.Type();
545  if (!DoMinimization (logl) ) return false;
546  if (useWeight) {
547  logl.UseSumOfWeightSquare();
548  if (!ApplyWeightCorrection(logl) ) return false;
549  }
550  return true;
551  }
552  } else {
553  // use gradient : check if fFunc provides gradient
554  if (fFunc_v) {
555  if (fConfig.MinimizerOptions().PrintLevel() > 0)
556  MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit", "use gradient from model function");
557  std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
558  if (gradFun) {
559  if (extended) {
560  MATH_WARN_MSG("Fitter::DoUnbinnedLikelihoodFit",
561  "Extended unbinned fit with gradient not yet supported - do a not-extended fit");
562  }
563  LogLikelihoodFCN<BaseGradFunc, IModelFunction_v> logl(data, gradFun, useWeight, extended);
564  fFitType = logl.Type();
565  if (!DoMinimization(logl))
566  return false;
567  if (useWeight) {
568  logl.UseSumOfWeightSquare();
569  if (!ApplyWeightCorrection(logl))
570  return false;
571  }
572  return true;
573  }
574  MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
575 
576  } else {
577  if (fConfig.MinimizerOptions().PrintLevel() > 0)
578  MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit", "use gradient from model function");
579  std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
580  if (gradFun) {
581  if (extended) {
582  MATH_WARN_MSG("Fitter::DoUnbinnedLikelihoodFit",
583  "Extended unbinned fit with gradient not yet supported - do a not-extended fit");
584  }
585  LogLikelihoodFCN<BaseGradFunc> logl(data, gradFun, useWeight, extended);
586  fFitType = logl.Type();
587  if (!DoMinimization(logl))
588  return false;
589  if (useWeight) {
590  logl.UseSumOfWeightSquare();
591  if (!ApplyWeightCorrection(logl))
592  return false;
593  }
594  return true;
595  }
596  MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
597  }
598  }
599  return false;
600 }
601 
602 
604 
605  std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
606  assert(data);
607 
608  // perform a linear fit on a set of binned data
609  std::string prevminimizer = fConfig.MinimizerType();
610  fConfig.SetMinimizer("Linear");
611 
612  fBinFit = true;
613 
614  bool ret = DoLeastSquareFit();
615  fConfig.SetMinimizer(prevminimizer.c_str());
616  return ret;
617 }
618 
619 
621  // compute the Hesse errors according to configuration
622  // set in the parameters and append value in fit result
623  if (fObjFunction.get() == 0) {
624  MATH_ERROR_MSG("Fitter::CalculateHessErrors","Objective function has not been set");
625  return false;
626  }
627  // assume a fResult pointer always exists
628  assert (fResult.get() );
629 
630  // need a special treatment in case of weighted likelihood fit
631  // (not yet implemented)
632  if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
633  MATH_ERROR_MSG("Fitter::CalculateHessErrors","Re-computation of Hesse errors not implemented for weighted likelihood fits");
634  MATH_INFO_MSG("Fitter::CalculateHessErrors","Do the Fit using configure option FitConfig::SetParabErrors()");
635  return false;
636  }
637  // if (!fUseGradient ) {
638  // ROOT::Math::FitMethodFunction * fcn = dynamic_cast< ROOT::Math::FitMethodFunction *>(fObjFunction.get());
639  // if (fcn && fcn->Type() == ROOT::Math::FitMethodFunction::kLogLikelihood) {
640  // if (!fBinFit) {
641  // ROOT::Math::LogLikelihoodFunction * nll = dynamic_cast< ROOT::Math::LogLikelihoodFunction *>(fcn);
642  // assert(nll);
643  // nll->UseSumOfWeightSquare(false);
644  // }
645  // else {
646  // ROOT::Math::PoissonLikelihoodFunction * nll = dynamic_cast< ROOT::Math::PoissonLikelihoodFunction *>(fcn);
647  // assert(nll);
648  // nll->UseSumOfWeightSquare(false);
649  // }
650  // // reset fcn in minimizer
651  // }
652 
653 
654  // create minimizer if not done or if name has changed
655  if ( !fMinimizer ||
656  fResult->MinimizerType().find(fConfig.MinimizerType()) == std::string::npos ) {
657  bool ret = DoInitMinimizer();
658  if (!ret) {
659  MATH_ERROR_MSG("Fitter::CalculateHessErrors","Error initializing the minimizer");
660  return false;
661  }
662  }
663 
664 
665  if (!fMinimizer ) {
666  MATH_ERROR_MSG("Fitter::CalculateHessErrors","Need to do a fit before calculating the errors");
667  return false;
668  }
669 
670  //run Hesse
671  bool ret = fMinimizer->Hesse();
672  if (!ret) MATH_WARN_MSG("Fitter::CalculateHessErrors","Error when calculating Hessian");
673 
674 
675  // update minimizer results with what comes out from Hesse
676  // in case is empty - create from a FitConfig
677  if (fResult->IsEmpty() )
678  fResult = std::unique_ptr<ROOT::Fit::FitResult>(new ROOT::Fit::FitResult(fConfig) );
679 
680 
681  // re-give a minimizer instance in case it has been changed
682  ret |= fResult->Update(fMinimizer, ret);
683 
684  // when possible get ncalls from FCN and set in fit result
686  fResult->fNCalls = GetNCallsFromFCN();
687  }
688 
689  // set also new errors in FitConfig
690  if (fConfig.UpdateAfterFit() && ret) DoUpdateFitConfig();
691 
692  return ret;
693 }
694 
695 
697  // compute the Minos errors according to configuration
698  // set in the parameters and append value in fit result
699  // normally Minos errors are computed just after the minimization
700  // (in DoMinimization) when creating the FItResult if the
701  // FitConfig::MinosErrors() flag is set
702 
703  if (!fMinimizer.get() ) {
704  MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minimizer does not exist - cannot calculate Minos errors");
705  return false;
706  }
707 
708  if (!fResult.get() || fResult->IsEmpty() ) {
709  MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Invalid Fit Result - cannot calculate Minos errors");
710  return false;
711  }
712 
713  if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
714  MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Computation of MINOS errors not implemented for weighted likelihood fits");
715  return false;
716  }
717 
718  // set flag to compute Minos error to false in FitConfig to avoid that
719  // following minimizaiton calls perform unwanted Minos error calculations
720  fConfig.SetMinosErrors(false);
721 
722 
723  const std::vector<unsigned int> & ipars = fConfig.MinosParams();
724  unsigned int n = (ipars.size() > 0) ? ipars.size() : fResult->Parameters().size();
725  bool ok = false;
726  for (unsigned int i = 0; i < n; ++i) {
727  double elow, eup;
728  unsigned int index = (ipars.size() > 0) ? ipars[i] : i;
729  bool ret = fMinimizer->GetMinosError(index, elow, eup);
730  if (ret) fResult->SetMinosError(index, elow, eup);
731  ok |= ret;
732  }
733  if (!ok)
734  MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minos error calculation failed for all parameters");
735 
736  return ok;
737 }
738 
739 
740 
741 // traits for distinhuishing fit methods functions from generic objective functions
742 template<class Func>
743 struct ObjFuncTrait {
744  static unsigned int NCalls(const Func & ) { return 0; }
745  static int Type(const Func & ) { return -1; }
746  static bool IsGrad() { return false; }
747 };
748 template<>
749 struct ObjFuncTrait<ROOT::Math::FitMethodFunction> {
750  static unsigned int NCalls(const ROOT::Math::FitMethodFunction & f ) { return f.NCalls(); }
751  static int Type(const ROOT::Math::FitMethodFunction & f) { return f.Type(); }
752  static bool IsGrad() { return false; }
753 };
754 template<>
755 struct ObjFuncTrait<ROOT::Math::FitMethodGradFunction> {
756  static unsigned int NCalls(const ROOT::Math::FitMethodGradFunction & f ) { return f.NCalls(); }
757  static int Type(const ROOT::Math::FitMethodGradFunction & f) { return f.Type(); }
758  static bool IsGrad() { return true; }
759 };
760 
762  //initialize minimizer by creating it
763  // and set there the objective function
764  // obj function must have been copied before
765  assert(fObjFunction.get() );
766 
767  // check configuration and objective function
768  if ( fConfig.ParamsSettings().size() != fObjFunction->NDim() ) {
769  MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong function dimension or wrong size for FitConfig");
770  return false;
771  }
772 
773  // create first Minimizer
774  // using an auto_Ptr will delete the previous existing one
775  fMinimizer = std::shared_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
776  if (fMinimizer.get() == 0) {
777  MATH_ERROR_MSG("Fitter::FitFCN","Minimizer cannot be created");
778  return false;
779  }
780 
781  // in case of gradient function one needs to downcast the pointer
782  if (fUseGradient) {
783  const ROOT::Math::IMultiGradFunction * gradfcn = dynamic_cast<const ROOT::Math::IMultiGradFunction *> (fObjFunction.get() );
784  if (!gradfcn) {
785  MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong type of function - it does not provide gradient");
786  return false;
787  }
788  fMinimizer->SetFunction( *gradfcn);
789  }
790  else
791  fMinimizer->SetFunction( *fObjFunction);
792 
793 
794  fMinimizer->SetVariables(fConfig.ParamsSettings().begin(), fConfig.ParamsSettings().end() );
795 
796  // if requested parabolic error do correct error analysis by the minimizer (call HESSE)
797  if (fConfig.ParabErrors()) fMinimizer->SetValidError(true);
798 
799  return true;
800 
801 }
802 
803 
805  // perform the minimization (assume we have already initialized the minimizer)
806 
807  assert(fMinimizer );
808 
809  bool ret = fMinimizer->Minimize();
810 
811  // unsigned int ncalls = ObjFuncTrait<ObjFunc>::NCalls(*fcn);
812  // int fitType = ObjFuncTrait<ObjFunc>::Type(objFunc);
813 
814  if (!fResult) fResult = std::make_shared<FitResult>();
815  fResult->FillResult(fMinimizer,fConfig, fFunc, ret, fDataSize, fBinFit, chi2func );
816 
817  // when possible get ncalls from FCN and set in fit result
819  fResult->fNCalls = GetNCallsFromFCN();
820  }
821 
822  // fill information in fit result
823  fResult->fObjFunc = fObjFunction;
824  fResult->fFitData = fData;
825 
826 
827 #ifdef DEBUG
828  std::cout << "ROOT::Fit::Fitter::DoMinimization : ncalls = " << fResult->fNCalls << " type of objfunc " << fFitFitResType << " typeid: " << typeid(*fObjFunction).name() << " use gradient " << fUseGradient << std::endl;
829 #endif
830 
832  fResult->NormalizeErrors();
833 
834  // set also new parameter values and errors in FitConfig
835  if (fConfig.UpdateAfterFit() && ret) DoUpdateFitConfig();
836 
837  return ret;
838 }
839 
840 bool Fitter::DoMinimization(const BaseFunc & objFunc, const ROOT::Math::IMultiGenFunction * chi2func) {
841  // perform the minimization initializing the minimizer starting from a given obj function
842 
843  // keep also a copy of FCN function and set this in minimizer so they will be managed together
844  // (remember that cloned copy will still depends on data and model function pointers)
845  fObjFunction = std::unique_ptr<ROOT::Math::IMultiGenFunction> ( objFunc.Clone() );
846  if (!DoInitMinimizer()) return false;
847  return DoMinimization(chi2func);
848 }
849 
850 
852  // update the fit configuration after a fit using the obtained result
853  if (fResult->IsEmpty() || !fResult->IsValid() ) return;
854  for (unsigned int i = 0; i < fConfig.NPar(); ++i) {
856  par.SetValue( fResult->Value(i) );
857  if (fResult->Error(i) > 0) par.SetStepSize( fResult->Error(i) );
858  }
859 }
860 
862  // retrieve ncalls from the fit method functions
863  // this function is called when minimizer does not provide a way of returning the nnumber of function calls
864  int ncalls = 0;
865  if (!fUseGradient) {
866  const ROOT::Math::FitMethodFunction * fcn = dynamic_cast<const ROOT::Math::FitMethodFunction *>(fObjFunction.get());
867  if (fcn) ncalls = fcn->NCalls();
868  }
869  else {
870  const ROOT::Math::FitMethodGradFunction * fcn = dynamic_cast<const ROOT::Math::FitMethodGradFunction*>(fObjFunction.get());
871  if (fcn) ncalls = fcn->NCalls();
872  }
873  return ncalls;
874 }
875 
876 
877 bool Fitter::ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction & loglw2, bool minimizeW2L) {
878  // apply correction for weight square
879  // Compute Hessian of the loglikelihood function using the sum of the weight squared
880  // This method assumes:
881  // - a fit has been done before and a covariance matrix exists
882  // - the objective function is a likelihood function and Likelihood::UseSumOfWeightSquare()
883  // has been called before
884 
885  if (fMinimizer.get() == 0) {
886  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Must perform first a fit before applying the correction");
887  return false;
888  }
889 
890  unsigned int n = loglw2.NDim();
891  // correct errors for weight squared
892  std::vector<double> cov(n*n);
893  bool ret = fMinimizer->GetCovMatrix(&cov[0] );
894  if (!ret) {
895  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Previous fit has no valid Covariance matrix");
896  return false;
897  }
898  // need to re-init the minimizer and set w2
899  fObjFunction = std::unique_ptr<ROOT::Math::IMultiGenFunction> ( loglw2.Clone() );
900  // need to re-initialize the minimizer for the changes applied in the
901  // objective functions
902  if (!DoInitMinimizer()) return false;
903 
904  //std::cout << "Running Hesse ..." << std::endl;
905 
906  // run eventually before a minimization
907  // ignore its error
908  if (minimizeW2L) fMinimizer->Minimize();
909  // run Hesse on the log-likelihood build using sum of weight squared
910  ret = fMinimizer->Hesse();
911  if (!ret) {
912  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error running Hesse on weight2 likelihood - cannot compute errors");
913  return false;
914  }
915 
916  if (fMinimizer->CovMatrixStatus() != 3) {
917  MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not accurate, the errors may be not reliable");
918  if (fMinimizer->CovMatrixStatus() == 2)
919  MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood was forced to be defined positive");
920  if (fMinimizer->CovMatrixStatus() <= 0)
921  // probably should have failed before
922  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not valid !");
923  }
924 
925  // std::vector<double> c(n*n);
926  // ret = fMinimizer->GetCovMatrix(&c2[0] );
927  // if (!ret) std::cout << "Error reading cov matrix " << fMinimizer->Status() << std::endl;
928  // TMatrixDSym cmat2(n,&c2[0]);
929  // std::cout << "Cov matrix of w2 " << std::endl;
930  // cmat2.Print();
931  // cmat2.Invert();
932  // std::cout << "Hessian of w2 " << std::endl;
933  // cmat2.Print();
934 
935  // get Hessian matrix from weight-square likelihood
936  std::vector<double> hes(n*n);
937  ret = fMinimizer->GetHessianMatrix(&hes[0] );
938  if (!ret) {
939  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error retrieving Hesse on weight2 likelihood - cannot compute errors");
940  return false;
941  }
942 
943  // for debug
944  // std::cout << "Hessian W2 matrix " << std::endl;
945  // for (unsigned int i = 0; i < n; ++i) {
946  // for (unsigned int j = 0; j < n; ++j) {
947  // std::cout << std::setw(12) << hes[i*n + j] << " , ";
948  // }
949  // std::cout << std::endl;
950  // }
951 
952  // perform product of matvrix cov * hes * cov
953  // since we do not want to add matrix dependence do product by hand
954  // first do hes * cov
955  std::vector<double> tmp(n*n);
956  for (unsigned int i = 0; i < n; ++i) {
957  for (unsigned int j = 0; j < n; ++j) {
958  for (unsigned int k = 0; k < n; ++k)
959  tmp[i*n+j] += hes[i*n + k] * cov[k*n + j];
960  }
961  }
962  // do multiplication now cov * tmp save result
963  std::vector<double> newCov(n*n);
964  for (unsigned int i = 0; i < n; ++i) {
965  for (unsigned int j = 0; j < n; ++j) {
966  for (unsigned int k = 0; k < n; ++k)
967  newCov[i*n+j] += cov[i*n + k] * tmp[k*n + j];
968  }
969  }
970  // update fit result with new corrected covariance matrix
971  unsigned int k = 0;
972  for (unsigned int i = 0; i < n; ++i) {
973  fResult->fErrors[i] = std::sqrt( newCov[i*(n+1)] );
974  for (unsigned int j = 0; j <= i; ++j)
975  fResult->fCovMatrix[k++] = newCov[i *n + j];
976  }
977  //fResult->PrintCovMatrix(std::cout);
978 
979  return true;
980 }
981 
982 
983 
985  // return a pointer to the binned data used in the fit
986  // works only for chi2 or binned likelihood fits
987  // thus when the objective function stored is a Chi2Func or a PoissonLikelihood
988  // The funciton also set the model function correctly if it has not been set
989 
992 
995 
996  //MATH_INFO_MSG("Fitter::ExamineFCN","Objective function is not of a known type - FitData and ModelFunction objects are not available");
997  return;
998 }
999 
1000  } // end namespace Fit
1001 
1002 } // end namespace ROOT
void ExamineFCN()
look at the user provided FCN and get data and model function is they derive from ROOT::Fit FCN class...
Definition: Fitter.cxx:984
Interface (abstract class) for multi-dimensional functions providing a gradient calculation.
Definition: IFunction.h:326
Namespace for new ROOT classes and functions.
Definition: StringConv.hxx:21
void(* MinuitFCN_t)(int &npar, double *gin, double &f, double *u, int flag)
fit using user provided FCN with Minuit-like interface If npar = 0 it is assumed that the parameters ...
Definition: Fitter.h:310
ROOT::Math::Minimizer * CreateMinimizer()
create a new minimizer according to chosen configuration
Definition: FitConfig.cxx:174
bool CalculateMinosErrors()
perform an error analysis on the result using MINOS To be called only after fitting and when a minimi...
Definition: Fitter.cxx:696
Fitter & operator=(const Fitter &rhs)
Assignment operator (disabled, class is not copyable)
Definition: Fitter.cxx:85
bool EvalFCN()
Perform a simple FCN evaluation.
Definition: Fitter.cxx:311
Class, describing value, limits and step size of the parameters Provides functionality also to set/re...
bool NormalizeErrors() const
flag to check if resulting errors are be normalized according to chi2/ndf
Definition: FitConfig.h:199
const std::vector< unsigned int > & MinosParams() const
return vector of parameter indeces for which the Minos Error will be computed
Definition: FitConfig.h:215
LogLikelihoodFCN class for likelihood fits.
void UseSumOfWeightSquare(bool on=true)
unsigned int NPar() const
number of parameters settings
Definition: FitConfig.h:95
std::shared_ptr< IModelFunction > fFunc
copy of the fitted function containing on output the fit result
Definition: Fitter.h:508
virtual unsigned int NPoints() const
return the number of data points used in evaluating the function
virtual IBaseFunctionMultiDimTempl< T > * Clone() const =0
Clone a function.
Class describing the unbinned data sets (just x coordinates values) of any dimensions.
Definition: UnBinData.h:42
#define f(i)
Definition: RSha256.hxx:104
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
bool DoLinearFit()
linear least square fit
Definition: Fitter.cxx:603
ROOT::Math::MinimizerOptions & MinimizerOptions()
access to the minimizer control parameter (non const method)
Definition: FitConfig.h:166
void SetErrorDef(double err)
set error def
#define MATH_WARN_MSG(loc, str)
Definition: Error.h:47
std::shared_ptr< ROOT::Fit::FitResult > fResult
copy of the fitted function containing on output the fit result
Definition: Fitter.h:510
MultiDimParamFunctionAdapter class to wrap a one-dimensional parametric function in a multi dimension...
bool CalculateHessErrors()
perform an error analysis on the result using the Hessian Errors are obtaied from the inverse of the ...
Definition: Fitter.cxx:620
const ParameterSettings & ParSettings(unsigned int i) const
get the parameter settings for the i-th parameter (const method)
Definition: FitConfig.h:75
bool DoBinnedLikelihoodFit(bool extended=true, const ROOT::Fit::ExecutionPolicy &executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial)
binned likelihood fit
Definition: Fitter.cxx:389
virtual BaseObjFunction::Type_t Type() const
get type of fit method function
void SetValue(double val)
set the value
void CreateParamsSettings(const ROOT::Math::IParamMultiFunctionTempl< T > &func)
set the parameter settings from a model function.
Definition: FitConfig.h:108
double sqrt(double)
Fitter()
Default constructor.
Definition: Fitter.cxx:52
bool UseWeightCorrection() const
Apply Weight correction for error matrix computation.
Definition: FitConfig.h:211
virtual BaseObjFunction::Type_t Type() const
get type of fit method function
bool DoInitMinimizer()
Definition: Fitter.cxx:761
class evaluating the log likelihood for binned Poisson likelihood fits it is template to distinguish ...
int PrintLevel() const
non-static methods for retrieving options
bool DoLeastSquareFit(const ROOT::Fit::ExecutionPolicy &executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial)
least square fit
Definition: Fitter.cxx:330
virtual Type_t Type() const
return the type of method, override if needed
bool UpdateAfterFit() const
Update configuration after a fit using the FitResult.
Definition: FitConfig.h:208
std::shared_ptr< IModelFunction_v > fFunc_v
Definition: Fitter.h:506
bool ParabErrors() const
do analysis for parabolic errors
Definition: FitConfig.h:202
bool ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction &loglw2, bool minimizeW2L=false)
apply correction in the error matrix for the weights for likelihood fits This method can be called on...
Definition: Fitter.cxx:877
const FitConfig & Config() const
access to the fit configuration (const method)
Definition: Fitter.h:393
#define MATH_ERROR_MSG(loc, str)
Definition: Error.h:50
void SetMinosErrors(bool on=true)
set Minos erros computation to be performed after fitting
Definition: FitConfig.h:226
Documentation for the abstract class IBaseFunctionMultiDim.
Definition: IFunction.h:62
Chi2FCN class for binnned fits using the least square methods.
Definition: Chi2FCN.h:49
~Fitter()
Destructor.
Definition: Fitter.cxx:71
std::shared_ptr< ROOT::Fit::FitData > fData
pointer to used minimizer
Definition: Fitter.h:514
FitConfig fConfig
Definition: Fitter.h:504
#define MATH_INFO_MSG(loc, str)
Definition: Error.h:44
void SetMinimizer(const char *type, const char *algo=0)
set minimizer type
Definition: FitConfig.h:180
void SetStepSize(double err)
set the step size
BasicFCN class: base class for the objective functions used in the fits It has a reference to the dat...
Definition: BasicFCN.h:40
Fitter class, entry point for performing all type of fits.
Definition: Fitter.h:77
const std::vector< ROOT::Fit::ParameterSettings > & ParamsSettings() const
get the vector of parameter settings (const method)
Definition: FitConfig.h:85
bool FitFCN()
Perform a fit with the previously set FCN function.
Definition: Fitter.cxx:293
Class describing the binned data sets : vectors of x coordinates, y values and optionally error on y ...
Definition: BinData.h:53
void SetFunction(const IModelFunction &func, bool useGradient=false)
Set the fitted function (model function) from a parametric function interface.
Definition: Fitter.cxx:104
std::shared_ptr< ROOT::Math::Minimizer > fMinimizer
pointer to the object containing the result of the fit
Definition: Fitter.h:512
Type
enumeration specifying the integration types.
Interface (abstract class) for parametric one-dimensional gradient functions providing in addition to...
class containg the result of the fit and all the related information (fitted parameter values...
Definition: FitResult.h:48
Specialized IParamFunction interface (abstract class) for one-dimensional parametric functions It is ...
FitMethodFunction class Interface for objective functions (like chi2 and likelihood used in the fit) ...
Definition: Fitter.h:40
TFitResultPtr Fit(FitObject *h1, TF1 *f1, Foption_t &option, const ROOT::Math::MinimizerOptions &moption, const char *goption, ROOT::Fit::DataRange &range)
Definition: HFitImpl.cxx:134
bool DoUnbinnedLikelihoodFit(bool extended=false, const ROOT::Fit::ExecutionPolicy &executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial)
un-binned likelihood fit
Definition: Fitter.cxx:498
const std::string & MinimizerType() const
return type of minimizer package
Definition: FitConfig.h:188
std::shared_ptr< ROOT::Math::IMultiGenFunction > fObjFunction
pointer to the fit data (binned or unbinned data)
Definition: Fitter.h:516
bool GetDataFromFCN()
internal functions to get data set and model function from FCN useful for fits done with customized F...
Definition: Fitter.h:524
virtual unsigned int NCalls() const
return the total number of function calls (overrided if needed)
bool MinosErrors() const
do minos errros analysis on the parameters
Definition: FitConfig.h:205
bool fUseGradient
Definition: Fitter.h:494
bool SetFCN(unsigned int npar, Function &fcn, const double *params=0, unsigned int dataSize=0, bool chi2fit=false)
Set a generic FCN function as a C++ callable object implementing double () (const double *) Note that...
Definition: Fitter.h:595
MultiDimParamGradFunctionAdapter class to wrap a one-dimensional parametric gradient function in a mu...
void SetParamsSettings(unsigned int npar, const double *params, const double *vstep=0)
set the parameter settings from number of parameters and a vector of values and optionally step value...
Definition: FitConfig.cxx:136
double ErrorDef() const
error definition
virtual BaseObjFunction::Type_t Type() const
get type of fit method function
Definition: Chi2FCN.h:134
int GetNCallsFromFCN()
Definition: Fitter.cxx:861
void DoUpdateFitConfig()
Definition: Fitter.cxx:851
bool DoMinimization(const BaseFunc &f, const ROOT::Math::IMultiGenFunction *chifunc=0)
do minimization
Definition: Fitter.cxx:840
const Int_t n
Definition: legend1.C:16
double gDefaultErrorDef
Definition: Fitter.cxx:49
virtual unsigned int NDim() const =0
Retrieve the dimension of the function.