Logo ROOT  
Reference Guide
Fitter.cxx
Go to the documentation of this file.
1 // @(#)root/mathcore:$Id$
2 // Author: L. Moneta Mon Sep 4 17:00:10 2006
3 
4 /**********************************************************************
5  * *
6  * Copyright (c) 2006 LCG ROOT Math Team, CERN/PH-SFT *
7  * *
8  * *
9  **********************************************************************/
10 
11 // Implementation file for class Fitter
12 
13 
14 #include "Fit/Fitter.h"
15 #include "Fit/Chi2FCN.h"
17 #include "Fit/LogLikelihoodFCN.h"
18 #include "Math/Minimizer.h"
19 #include "Math/MinimizerOptions.h"
20 #include "Math/FitMethodFunction.h"
21 #include "Fit/BasicFCN.h"
22 #include "Fit/BinData.h"
23 #include "Fit/UnBinData.h"
24 #include "Fit/FcnAdapter.h"
25 #include "Fit/FitConfig.h"
26 #include "Fit/FitResult.h"
27 #include "Math/Error.h"
28 
29 #include <memory>
30 
31 #include "Math/IParamFunction.h"
32 
34 
35 // #include "TMatrixDSym.h"
36 // for debugging
37 //#include "TMatrixD.h"
38 // #include <iomanip>
39 
40 namespace ROOT {
41 
42  namespace Fit {
43 
44 // use a static variable to get default minimizer options for error def
45 // to see if user has changed it later on. If it has not been changed we set
46 // for the likelihood method an error def of 0.5
47 // t.b.d : multiply likelihood by 2 so have same error def definition as chi2
49 
50 
52  fUseGradient(false),
53  fBinFit(false),
54  fFitType(0),
55  fDataSize(0)
56 {}
57 
58 Fitter::Fitter(const std::shared_ptr<FitResult> & result) :
59  fUseGradient(false),
60  fBinFit(false),
61  fFitType(0),
62  fDataSize(0),
63  fResult(result)
64 {
65  if (result->fFitFunc) SetFunction(*fResult->fFitFunc); // this will create also the configuration
66  if (result->fObjFunc) fObjFunction = fResult->fObjFunc;
67  if (result->fFitData) fData = fResult->fFitData;
68 }
69 
71 {
72  // Destructor implementation.
73 
74  // nothing to do since we use shared_ptr now
75 }
76 
77 Fitter::Fitter(const Fitter & rhs)
78 {
79  // Implementation of copy constructor.
80  // copy FitResult, FitConfig and clone fit function
81  (*this) = rhs;
82 }
83 
85 {
86  // Implementation of assignment operator.
87  // dummy implementation, since it is private
88  if (this == &rhs) return *this; // time saving self-test
89 // fUseGradient = rhs.fUseGradient;
90 // fBinFit = rhs.fBinFit;
91 // fResult = rhs.fResult;
92 // fConfig = rhs.fConfig;
93 // // function is copied and managed by FitResult (maybe should use an unique_ptr)
94 // fFunc = fResult.ModelFunction();
95 // if (rhs.fFunc != 0 && fResult.ModelFunction() == 0) { // case no fit has been done yet - then clone
96 // if (fFunc) delete fFunc;
97 // fFunc = dynamic_cast<IModelFunction *>( (rhs.fFunc)->Clone() );
98 // assert(fFunc != 0);
99 // }
100  return *this;
101 }
102 
103 void Fitter::SetFunction(const IModelFunction & func, bool useGradient)
104 {
105 
106  fUseGradient = useGradient;
107  if (fUseGradient) {
108  const IGradModelFunction * gradFunc = dynamic_cast<const IGradModelFunction*>(&func);
109  if (gradFunc) {
110  SetFunction(*gradFunc, true);
111  return;
112  }
113  else {
114  MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
115  }
116  }
117  fUseGradient = false;
118 
119  // set the fit model function (clone the given one and keep a copy )
120  //std::cout << "set a non-grad function" << std::endl;
121 
122  fFunc = std::shared_ptr<IModelFunction>(dynamic_cast<IModelFunction *>(func.Clone() ) );
123  assert(fFunc);
124 
125  // creates the parameter settings
127  fFunc_v.reset();
128 }
129 
130 void Fitter::SetFunction(const IModel1DFunction & func, bool useGradient)
131 {
132  fUseGradient = useGradient;
133  if (fUseGradient) {
134  const IGradModel1DFunction * gradFunc = dynamic_cast<const IGradModel1DFunction*>(&func);
135  if (gradFunc) {
136  SetFunction(*gradFunc, true);
137  return;
138  }
139  else {
140  MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
141  }
142  }
143  fUseGradient = false;
144  //std::cout << "set a 1d function" << std::endl;
145 
146  // function is cloned when creating the adapter
147  fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamFunctionAdapter(func));
148 
149  // creates the parameter settings
151  fFunc_v.reset();
152 }
153 
154 void Fitter::SetFunction(const IGradModelFunction & func, bool useGradient)
155 {
156  fUseGradient = useGradient;
157  //std::cout << "set a grad function" << std::endl;
158  // set the fit model function (clone the given one and keep a copy )
159  fFunc = std::shared_ptr<IModelFunction>( dynamic_cast<IGradModelFunction *> ( func.Clone() ) );
160  assert(fFunc);
161 
162  // creates the parameter settings
164  fFunc_v.reset();
165 }
166 
167 
168 void Fitter::SetFunction(const IGradModel1DFunction & func, bool useGradient)
169 {
170  //std::cout << "set a 1d grad function" << std::endl;
171  fUseGradient = useGradient;
172  // function is cloned when creating the adapter
173  fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamGradFunctionAdapter(func));
174 
175  // creates the parameter settings
177  fFunc_v.reset();
178 }
179 
180 
181 bool Fitter::SetFCN(const ROOT::Math::IMultiGenFunction & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
182  // set the objective function for the fit
183  // if params is not NULL create the parameter settings
184  fUseGradient = false;
185  unsigned int npar = fcn.NDim();
186  if (npar == 0) {
187  MATH_ERROR_MSG("Fitter::SetFCN","FCN function has zero parameters ");
188  return false;
189  }
190  if (params != 0 )
191  fConfig.SetParamsSettings(npar, params);
192  else {
193  if ( fConfig.ParamsSettings().size() != npar) {
194  MATH_ERROR_MSG("Fitter::SetFCN","wrong fit parameter settings");
195  return false;
196  }
197  }
198 
199  fBinFit = chi2fit;
200  fDataSize = dataSize;
201 
202  // keep also a copy of FCN function and set this in minimizer so they will be managed together
203  // (remember that cloned copy will still depends on data and model function pointers)
204  fObjFunction = std::unique_ptr<ROOT::Math::IMultiGenFunction> ( fcn.Clone() );
205 
206  // in case a model function and data exists from a previous fit - reset shared-ptr
207  if (fResult && fResult->FittedFunction() == 0 && fFunc) fFunc.reset();
208  if (fData) fData.reset();
209 
210  return true;
211 }
212 
213 bool Fitter::SetFCN(const ROOT::Math::IMultiGenFunction &fcn, const IModelFunction & func, const double *params, unsigned int dataSize, bool chi2fit) {
214  // set the objective function for the fit and a model function
215  if (!SetFCN(fcn, params, dataSize, chi2fit) ) return false;
216  // need to set fFunc afterwards because SetFCN could reset fFUnc
217  fFunc = std::shared_ptr<IModelFunction>(dynamic_cast<IModelFunction *>(func.Clone()));
218  return (fFunc != nullptr);
219 }
220 
221 bool Fitter::SetFCN(const ROOT::Math::IMultiGradFunction &fcn, const double *params, unsigned int dataSize,
222  bool chi2fit)
223 {
224  // set the objective function for the fit
225  // if params is not NULL create the parameter settings
226  if (!SetFCN(static_cast<const ROOT::Math::IMultiGenFunction &>(fcn), params, dataSize, chi2fit))
227  return false;
228  fUseGradient = true;
229  return true;
230 }
231 
232 bool Fitter::SetFCN(const ROOT::Math::IMultiGradFunction &fcn, const IModelFunction &func, const double *params,
233  unsigned int dataSize, bool chi2fit)
234 {
235  // set the objective function for the fit and a model function
236  if (!SetFCN(fcn, params, dataSize, chi2fit) ) return false;
237  fFunc = std::shared_ptr<IModelFunction>(dynamic_cast<IModelFunction *>(func.Clone()));
238  return (fFunc != nullptr);
239 }
240 
241 bool Fitter::SetFCN(const ROOT::Math::FitMethodFunction &fcn, const double *params)
242 {
243  // set the objective function for the fit
244  // if params is not NULL create the parameter settings
245  bool chi2fit = (fcn.Type() == ROOT::Math::FitMethodFunction::kLeastSquare);
246  if (!SetFCN(fcn, params, fcn.NPoints(), chi2fit))
247  return false;
248  fUseGradient = false;
249  fFitType = fcn.Type();
250  return true;
251 }
252 
253 bool Fitter::SetFCN(const ROOT::Math::FitMethodGradFunction &fcn, const double *params)
254 {
255  // set the objective function for the fit
256  // if params is not NULL create the parameter settings
257  bool chi2fit = (fcn.Type() == ROOT::Math::FitMethodGradFunction::kLeastSquare);
258  if (!SetFCN(fcn, params, fcn.NPoints(), chi2fit))
259  return false;
260  fUseGradient = true;
261  fFitType = fcn.Type();
262  return true;
263 }
264 
265 bool Fitter::FitFCN(const BaseFunc &fcn, const double *params, unsigned int dataSize, bool chi2fit)
266 {
267  // fit a user provided FCN function
268  // create fit parameter settings
269  if (!SetFCN(fcn, params, dataSize, chi2fit))
270  return false;
271  return FitFCN();
272 }
273 
274 bool Fitter::FitFCN(const BaseGradFunc &fcn, const double *params, unsigned int dataSize, bool chi2fit)
275 {
276  // fit a user provided FCN gradient function
277 
278  if (!SetFCN(fcn, params, dataSize, chi2fit))
279  return false;
280  return FitFCN();
281 }
282 
283 bool Fitter::FitFCN(const ROOT::Math::FitMethodFunction &fcn, const double *params)
284 {
285  // fit using the passed objective function for the fit
286  if (!SetFCN(fcn, params))
287  return false;
288  return FitFCN();
289 }
290 
291 bool Fitter::FitFCN(const ROOT::Math::FitMethodGradFunction &fcn, const double *params)
292 {
293  // fit using the passed objective function for the fit
294  if (!SetFCN(fcn, params))
295  return false;
296  return FitFCN();
297 }
298 
299 bool Fitter::SetFCN(MinuitFCN_t fcn, int npar, const double *params, unsigned int dataSize, bool chi2fit)
300 {
301  // set TMinuit style FCN type (global function pointer)
302  // create corresponfing objective function from that function
303 
304  if (npar == 0) {
305  npar = fConfig.ParamsSettings().size();
306  if (npar == 0) {
307  MATH_ERROR_MSG("Fitter::FitFCN", "Fit Parameter settings have not been created ");
308  return false;
309  }
310  }
311 
312  ROOT::Fit::FcnAdapter newFcn(fcn, npar);
313  return SetFCN(newFcn, params, dataSize, chi2fit);
314 }
315 
316 bool Fitter::FitFCN(MinuitFCN_t fcn, int npar, const double *params, unsigned int dataSize, bool chi2fit)
317 {
318  // fit using Minuit style FCN type (global function pointer)
319  // create corresponfing objective function from that function
320  if (!SetFCN(fcn, npar, params, dataSize, chi2fit))
321  return false;
322  fUseGradient = false;
323  return FitFCN();
324 }
325 
327 {
328  // fit using the previously set FCN function
329 
330  if (!fObjFunction) {
331  MATH_ERROR_MSG("Fitter::FitFCN", "Objective function has not been set");
332  return false;
333  }
334  // look if FCN s of a known type and we can get some modelfunction and data objects
335  if (!fFunc || !fData)
336  ExamineFCN();
337  // init the minimizer
338  if (!DoInitMinimizer())
339  return false;
340  // perform the minimization
341  return DoMinimization();
342 }
343 
345 {
346  // evaluate the FCN using the stored values in fConfig
347 
348  if (fFunc && fResult->FittedFunction() == 0)
349  fFunc.reset();
350 
351  if (!fObjFunction) {
352  MATH_ERROR_MSG("Fitter::FitFCN", "Objective function has not been set");
353  return false;
354  }
355  // create a Fit result from the fit configuration
356  fResult = std::make_shared<ROOT::Fit::FitResult>(fConfig);
357  // evaluate one time the FCN
358  double fcnval = (*fObjFunction)(fResult->GetParams());
359  // update fit result
360  fResult->fVal = fcnval;
361  fResult->fNCalls++;
362  return true;
363 }
364 
366 {
367 
368  // perform a chi2 fit on a set of binned data
369  std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
370  assert(data);
371 
372  // check function
373  if (!fFunc && !fFunc_v) {
374  MATH_ERROR_MSG("Fitter::DoLeastSquareFit", "model function is not set");
375  return false;
376  } else {
377 
378 #ifdef DEBUG
379  std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[3].IsBound() << " lower limit "
380  << Config().ParamsSettings()[3].LowerLimit() << " upper limit "
381  << Config().ParamsSettings()[3].UpperLimit() << std::endl;
382 #endif
383 
384  fBinFit = true;
385  fDataSize = data->Size();
386  // check if fFunc provides gradient
387  if (!fUseGradient) {
388  // do minimzation without using the gradient
389  if (fFunc_v) {
390  Chi2FCN<BaseFunc, IModelFunction_v> chi2(data, fFunc_v, executionPolicy);
391  fFitType = chi2.Type();
392  return DoMinimization(chi2);
393  } else {
394  Chi2FCN<BaseFunc> chi2(data, fFunc, executionPolicy);
395  fFitType = chi2.Type();
396  return DoMinimization(chi2);
397  }
398  } else {
399  // use gradient
400  if (fConfig.MinimizerOptions().PrintLevel() > 0)
401  MATH_INFO_MSG("Fitter::DoLeastSquareFit", "use gradient from model function");
402 
403  if (fFunc_v) {
404  std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
405  if (gradFun) {
406  Chi2FCN<BaseGradFunc, IModelFunction_v> chi2(data, gradFun);
407  fFitType = chi2.Type();
408  return DoMinimization(chi2);
409  }
410  } else {
411  std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
412  if (gradFun) {
413  Chi2FCN<BaseGradFunc> chi2(data, gradFun);
414  fFitType = chi2.Type();
415  return DoMinimization(chi2);
416  }
417  }
418  MATH_ERROR_MSG("Fitter::DoLeastSquareFit", "wrong type of function - it does not provide gradient");
419  }
420  }
421  return false;
422 }
423 
424 bool Fitter::DoBinnedLikelihoodFit(bool extended, const ROOT::Fit::ExecutionPolicy &executionPolicy)
425 {
426  // perform a likelihood fit on a set of binned data
427  // The fit is extended (Poisson logl_ by default
428 
429  std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
430  assert(data);
431 
432  bool useWeight = fConfig.UseWeightCorrection();
433 
434  // check function
435  if (!fFunc && !fFunc_v) {
436  MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "model function is not set");
437  return false;
438  }
439 
440  // logl fit (error should be 0.5) set if different than default values (of 1)
443  }
444 
445  if (useWeight && fConfig.MinosErrors()) {
446  MATH_INFO_MSG("Fitter::DoBinnedLikelihoodFit", "MINOS errors cannot be computed in weighted likelihood fits");
447  fConfig.SetMinosErrors(false);
448  }
449 
450  fBinFit = true;
451  fDataSize = data->Size();
452 
453  if (!fUseGradient) {
454  // do minimization without using the gradient
455  if (fFunc_v) {
456  // create a chi2 function to be used for the equivalent chi-square
458  PoissonLikelihoodFCN<BaseFunc, IModelFunction_v> logl(data, fFunc_v, useWeight, extended, executionPolicy);
459  fFitType = logl.Type();
460  // do minimization
461  if (!DoMinimization(logl, &chi2))
462  return false;
463  if (useWeight) {
464  logl.UseSumOfWeightSquare();
465  if (!ApplyWeightCorrection(logl))
466  return false;
467  }
468  } else {
469  // create a chi2 function to be used for the equivalent chi-square
470  Chi2FCN<BaseFunc> chi2(data, fFunc);
471  PoissonLikelihoodFCN<BaseFunc> logl(data, fFunc, useWeight, extended, executionPolicy);
472  fFitType = logl.Type();
473  // do minimization
474  if (!DoMinimization(logl, &chi2))
475  return false;
476  if (useWeight) {
477  logl.UseSumOfWeightSquare();
478  if (!ApplyWeightCorrection(logl))
479  return false;
480  }
481  }
482  } else {
483  if (fFunc_v) {
484  // create a chi2 function to be used for the equivalent chi-square
486  std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
487  if (!gradFun) {
488  MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
489  return false;
490  }
491  PoissonLikelihoodFCN<BaseGradFunc, IModelFunction_v> logl(data, gradFun, useWeight, true, executionPolicy);
492  fFitType = logl.Type();
493  // do minimization
494  if (!DoMinimization(logl, &chi2))
495  return false;
496  if (useWeight) {
497  logl.UseSumOfWeightSquare();
498  if (!ApplyWeightCorrection(logl))
499  return false;
500  }
501  } else {
502  // create a chi2 function to be used for the equivalent chi-square
503  Chi2FCN<BaseFunc> chi2(data, fFunc);
504  if (fConfig.MinimizerOptions().PrintLevel() > 0)
505  MATH_INFO_MSG("Fitter::DoLikelihoodFit", "use gradient from model function");
506  // check if fFunc provides gradient
507  std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
508  if (!gradFun) {
509  MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
510  return false;
511  }
512  // use gradient for minimization
513  // not-extended is not impelemented in this case
514  if (!extended) {
515  MATH_WARN_MSG("Fitter::DoBinnedLikelihoodFit",
516  "Not-extended binned fit with gradient not yet supported - do an extended fit");
517  }
518  PoissonLikelihoodFCN<BaseGradFunc> logl(data, gradFun, useWeight, true, executionPolicy);
519  fFitType = logl.Type();
520  // do minimization
521  if (!DoMinimization(logl, &chi2))
522  return false;
523  if (useWeight) {
524  logl.UseSumOfWeightSquare();
525  if (!ApplyWeightCorrection(logl))
526  return false;
527  }
528  }
529  }
530  return true;
531 }
532 
533 bool Fitter::DoUnbinnedLikelihoodFit(bool extended, const ROOT::Fit::ExecutionPolicy &executionPolicy) {
534  // perform a likelihood fit on a set of unbinned data
535 
536  std::shared_ptr<UnBinData> data = std::dynamic_pointer_cast<UnBinData>(fData);
537  assert(data);
538 
539  bool useWeight = fConfig.UseWeightCorrection();
540 
541  if (!fFunc && !fFunc_v) {
542  MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit","model function is not set");
543  return false;
544  }
545 
546  if (useWeight && fConfig.MinosErrors() ) {
547  MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit","MINOS errors cannot be computed in weighted likelihood fits");
548  fConfig.SetMinosErrors(false);
549  }
550 
551 
552  fBinFit = false;
553  fDataSize = data->Size();
554 
555 #ifdef DEBUG
556  int ipar = 0;
557  std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[ipar].IsBound() << " lower limit " << Config().ParamsSettings()[ipar].LowerLimit() << " upper limit " << Config().ParamsSettings()[ipar].UpperLimit() << std::endl;
558 #endif
559 
560  // logl fit (error should be 0.5) set if different than default values (of 1)
563  }
564 
565  if (!fUseGradient) {
566  // do minimization without using the gradient
567  if (fFunc_v ){
568  LogLikelihoodFCN<BaseFunc, IModelFunction_v> logl(data, fFunc_v, useWeight, extended, executionPolicy);
569  fFitType = logl.Type();
570  if (!DoMinimization (logl) ) return false;
571  if (useWeight) {
572  logl.UseSumOfWeightSquare();
573  if (!ApplyWeightCorrection(logl) ) return false;
574  }
575  return true;
576  } else {
577  LogLikelihoodFCN<BaseFunc> logl(data, fFunc, useWeight, extended, executionPolicy);
578 
579  fFitType = logl.Type();
580  if (!DoMinimization (logl) ) return false;
581  if (useWeight) {
582  logl.UseSumOfWeightSquare();
583  if (!ApplyWeightCorrection(logl) ) return false;
584  }
585  return true;
586  }
587  } else {
588  // use gradient : check if fFunc provides gradient
589  if (fFunc_v) {
590  if (fConfig.MinimizerOptions().PrintLevel() > 0)
591  MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit", "use gradient from model function");
592  std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
593  if (gradFun) {
594  if (extended) {
595  MATH_WARN_MSG("Fitter::DoUnbinnedLikelihoodFit",
596  "Extended unbinned fit with gradient not yet supported - do a not-extended fit");
597  }
598  LogLikelihoodFCN<BaseGradFunc, IModelFunction_v> logl(data, gradFun, useWeight, extended);
599  fFitType = logl.Type();
600  if (!DoMinimization(logl))
601  return false;
602  if (useWeight) {
603  logl.UseSumOfWeightSquare();
604  if (!ApplyWeightCorrection(logl))
605  return false;
606  }
607  return true;
608  }
609  MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
610 
611  } else {
612  if (fConfig.MinimizerOptions().PrintLevel() > 0)
613  MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit", "use gradient from model function");
614  std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
615  if (gradFun) {
616  if (extended) {
617  MATH_WARN_MSG("Fitter::DoUnbinnedLikelihoodFit",
618  "Extended unbinned fit with gradient not yet supported - do a not-extended fit");
619  }
620  LogLikelihoodFCN<BaseGradFunc> logl(data, gradFun, useWeight, extended);
621  fFitType = logl.Type();
622  if (!DoMinimization(logl))
623  return false;
624  if (useWeight) {
625  logl.UseSumOfWeightSquare();
626  if (!ApplyWeightCorrection(logl))
627  return false;
628  }
629  return true;
630  }
631  MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
632  }
633  }
634  return false;
635 }
636 
637 
639 
640  std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
641  assert(data);
642 
643  // perform a linear fit on a set of binned data
644  std::string prevminimizer = fConfig.MinimizerType();
645  fConfig.SetMinimizer("Linear");
646 
647  fBinFit = true;
648 
649  bool ret = DoLeastSquareFit();
650  fConfig.SetMinimizer(prevminimizer.c_str());
651  return ret;
652 }
653 
654 
656  // compute the Hesse errors according to configuration
657  // set in the parameters and append value in fit result
658  if (!fObjFunction) {
659  MATH_ERROR_MSG("Fitter::CalculateHessErrors","Objective function has not been set");
660  return false;
661  }
662 
663  // need a special treatment in case of weighted likelihood fit
664  // (not yet implemented)
665  if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
666  MATH_ERROR_MSG("Fitter::CalculateHessErrors","Re-computation of Hesse errors not implemented for weighted likelihood fits");
667  MATH_INFO_MSG("Fitter::CalculateHessErrors","Do the Fit using configure option FitConfig::SetParabErrors()");
668  return false;
669  }
670  // if (!fUseGradient ) {
671  // ROOT::Math::FitMethodFunction * fcn = dynamic_cast< ROOT::Math::FitMethodFunction *>(fObjFunction.get());
672  // if (fcn && fcn->Type() == ROOT::Math::FitMethodFunction::kLogLikelihood) {
673  // if (!fBinFit) {
674  // ROOT::Math::LogLikelihoodFunction * nll = dynamic_cast< ROOT::Math::LogLikelihoodFunction *>(fcn);
675  // assert(nll);
676  // nll->UseSumOfWeightSquare(false);
677  // }
678  // else {
679  // ROOT::Math::PoissonLikelihoodFunction * nll = dynamic_cast< ROOT::Math::PoissonLikelihoodFunction *>(fcn);
680  // assert(nll);
681  // nll->UseSumOfWeightSquare(false);
682  // }
683  // // reset fcn in minimizer
684  // }
685 
686  // a fit Result pointer must exist when a minimizer exists
687  if (fMinimizer && !fResult ) {
688  MATH_ERROR_MSG("Fitter::CalculateHessErrors", "FitResult has not been created");
689  return false;
690  }
691 
692  // update minimizer (recreate if not done or if name has changed
693  if (!DoUpdateMinimizerOptions()) {
694  MATH_ERROR_MSG("Fitter::CalculateHessErrors","Error re-initializing the minimizer");
695  return false;
696  }
697 
698  if (!fMinimizer ) {
699  // this should not happen
700  MATH_ERROR_MSG("Fitter::CalculateHessErrors", "Need to do a fit before calculating the errors");
701  assert(false);
702  return false;
703  }
704 
705  //run Hesse
706  bool ret = fMinimizer->Hesse();
707  if (!ret) MATH_WARN_MSG("Fitter::CalculateHessErrors","Error when calculating Hessian");
708 
709 
710  // update minimizer results with what comes out from Hesse
711  // in case is empty - create from a FitConfig
712  if (fResult->IsEmpty() )
713  fResult = std::unique_ptr<ROOT::Fit::FitResult>(new ROOT::Fit::FitResult(fConfig) );
714 
715 
716  // re-give a minimizer instance in case it has been changed
717  ret |= fResult->Update(fMinimizer, fConfig, ret);
718 
719  // when possible get ncalls from FCN and set in fit result
721  fResult->fNCalls = GetNCallsFromFCN();
722  }
723 
724  // set also new errors in FitConfig
725  if (fConfig.UpdateAfterFit() && ret) DoUpdateFitConfig();
726 
727  return ret;
728 }
729 
730 
732  // compute the Minos errors according to configuration
733  // set in the parameters and append value in fit result
734  // normally Minos errors are computed just after the minimization
735  // (in DoMinimization) aftewr minimizing if the
736  // FitConfig::MinosErrors() flag is set
737 
738  if (!fMinimizer) {
739  MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minimizer does not exist - cannot calculate Minos errors");
740  return false;
741  }
742 
743  if (!fResult || fResult->IsEmpty() ) {
744  MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Invalid Fit Result - cannot calculate Minos errors");
745  return false;
746  }
747 
748  if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
749  MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Computation of MINOS errors not implemented for weighted likelihood fits");
750  return false;
751  }
752 
753  // update minimizer (but cannot re-create in this case). Must use an existing one
754  if (!DoUpdateMinimizerOptions(false)) {
755  MATH_ERROR_MSG("Fitter::CalculateHessErrors","Error re-initializing the minimizer");
756  return false;
757  }
758 
759  // set flag to compute Minos error to false in FitConfig to avoid that
760  // following minimizaiton calls perform unwanted Minos error calculations
761  /// fConfig.SetMinosErrors(false);
762 
763 
764  const std::vector<unsigned int> & ipars = fConfig.MinosParams();
765  unsigned int n = (ipars.size() > 0) ? ipars.size() : fResult->Parameters().size();
766  bool ok = false;
767 
768  int iparNewMin = 0;
769  int iparMax = n;
770  int iter = 0;
771  // rerun minos for the parameters run before a new Minimum has been found
772  do {
773  if (iparNewMin > 0)
774  MATH_INFO_MSG("Fitter::CalculateMinosErrors","Run again Minos for some parameters because a new Minimum has been found");
775  iparNewMin = 0;
776  for (int i = 0; i < iparMax; ++i) {
777  double elow, eup;
778  unsigned int index = (ipars.size() > 0) ? ipars[i] : i;
779  bool ret = fMinimizer->GetMinosError(index, elow, eup);
780  // flags case when a new minimum has been found
781  if ((fMinimizer->MinosStatus() & 8) != 0) {
782  iparNewMin = i;
783  }
784  if (ret)
785  fResult->SetMinosError(index, elow, eup);
786  ok |= ret;
787  }
788 
789  iparMax = iparNewMin;
790  iter++; // to avoid infinite looping
791  }
792  while( iparNewMin > 0 && iter < 10);
793  if (!ok) {
794  MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minos error calculation failed for all the selected parameters");
795  }
796 
797  // re-give a minimizer instance in case it has been changed
798  // but maintain previous valid status. Do not set result to false if minos failed
799 
800  ok &= fResult->Update(fMinimizer, fConfig, fResult->IsValid());
801 
802  return ok;
803 }
804 
805 
806 
807 // traits for distinhuishing fit methods functions from generic objective functions
808 template<class Func>
809 struct ObjFuncTrait {
810  static unsigned int NCalls(const Func & ) { return 0; }
811  static int Type(const Func & ) { return -1; }
812  static bool IsGrad() { return false; }
813 };
814 template<>
815 struct ObjFuncTrait<ROOT::Math::FitMethodFunction> {
816  static unsigned int NCalls(const ROOT::Math::FitMethodFunction & f ) { return f.NCalls(); }
817  static int Type(const ROOT::Math::FitMethodFunction & f) { return f.Type(); }
818  static bool IsGrad() { return false; }
819 };
820 template<>
821 struct ObjFuncTrait<ROOT::Math::FitMethodGradFunction> {
822  static unsigned int NCalls(const ROOT::Math::FitMethodGradFunction & f ) { return f.NCalls(); }
823  static int Type(const ROOT::Math::FitMethodGradFunction & f) { return f.Type(); }
824  static bool IsGrad() { return true; }
825 };
826 
828  //initialize minimizer by creating it
829  // and set there the objective function
830  // obj function must have been copied before
831  assert(fObjFunction.get() );
832 
833  // check configuration and objective function
834  if ( fConfig.ParamsSettings().size() != fObjFunction->NDim() ) {
835  MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong function dimension or wrong size for FitConfig");
836  return false;
837  }
838 
839  // create first Minimizer
840  // using an auto_Ptr will delete the previous existing one
841  fMinimizer = std::shared_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
842  if (fMinimizer.get() == 0) {
843  MATH_ERROR_MSG("Fitter::DoInitMinimizer","Minimizer cannot be created");
844  return false;
845  }
846 
847  // in case of gradient function one needs to downcast the pointer
848  if (fUseGradient) {
849  const ROOT::Math::IMultiGradFunction * gradfcn = dynamic_cast<const ROOT::Math::IMultiGradFunction *> (fObjFunction.get() );
850  if (!gradfcn) {
851  MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong type of function - it does not provide gradient");
852  return false;
853  }
854  fMinimizer->SetFunction( *gradfcn);
855  }
856  else
857  fMinimizer->SetFunction( *fObjFunction);
858 
859 
860  fMinimizer->SetVariables(fConfig.ParamsSettings().begin(), fConfig.ParamsSettings().end() );
861 
862  // if requested parabolic error do correct error analysis by the minimizer (call HESSE)
863  if (fConfig.ParabErrors()) fMinimizer->SetValidError(true);
864 
865  return true;
866 
867 }
868 
869 bool Fitter::DoUpdateMinimizerOptions(bool canDifferentMinim ) {
870  // update minimizer options when re-doing a Fit or computing Hesse or Minos errors
871 
872 
873  // create a new minimizer if it is different type
874  // minimizer type string stored in FitResult is "minimizer name" + " / " + minimizer algo
875  std::string newMinimType = fConfig.MinimizerName();
876  if (fMinimizer && fResult && newMinimType != fResult->MinimizerType()) {
877  // if a different minimizer is allowed (e.g. when calling Hesse)
878  if (canDifferentMinim) {
879  std::string msg = "Using now " + newMinimType;
880  MATH_INFO_MSG("Fitter::DoUpdateMinimizerOptions: ", msg.c_str());
881  if (!DoInitMinimizer() )
882  return false;
883  }
884  else {
885  std::string msg = "Cannot change minimizer. Continue using " + fResult->MinimizerType();
886  MATH_WARN_MSG("Fitter::DoUpdateMinimizerOptions",msg.c_str());
887  }
888  }
889 
890  // create minimizer if it was not done before
891  if (!fMinimizer) {
892  if (!DoInitMinimizer())
893  return false;
894  }
895 
896  // set new minimizer options (but not functions and parameters)
897  fMinimizer->SetOptions(fConfig.MinimizerOptions());
898  return true;
899 }
900 
902  // perform the minimization (assume we have already initialized the minimizer)
903 
904  assert(fMinimizer );
905 
906  bool isValid = fMinimizer->Minimize();
907 
908  // unsigned int ncalls = ObjFuncTrait<ObjFunc>::NCalls(*fcn);
909  // int fitType = ObjFuncTrait<ObjFunc>::Type(objFunc);
910 
911  if (!fResult) fResult = std::make_shared<FitResult>();
912 
913  fResult->FillResult(fMinimizer,fConfig, fFunc, isValid, fDataSize, fBinFit, chi2func );
914 
915  // if requested run Minos after minimization
916  if (isValid && fConfig.MinosErrors()) {
917  // minos error calculation will update also FitResult
919  }
920 
921  // when possible get ncalls from FCN and set in fit result
923  fResult->fNCalls = GetNCallsFromFCN();
924  }
925 
926  // fill information in fit result
927  fResult->fObjFunc = fObjFunction;
928  fResult->fFitData = fData;
929 
930 
931 #ifdef DEBUG
932  std::cout << "ROOT::Fit::Fitter::DoMinimization : ncalls = " << fResult->fNCalls << " type of objfunc " << fFitFitResType << " typeid: " << typeid(*fObjFunction).name() << " use gradient " << fUseGradient << std::endl;
933 #endif
934 
936  fResult->NormalizeErrors();
937 
938  // set also new parameter values and errors in FitConfig
939  if (fConfig.UpdateAfterFit() && isValid) DoUpdateFitConfig();
940 
941  return isValid;
942 }
943 
944 bool Fitter::DoMinimization(const BaseFunc & objFunc, const ROOT::Math::IMultiGenFunction * chi2func) {
945  // perform the minimization initializing the minimizer starting from a given obj function
946 
947  // keep also a copy of FCN function and set this in minimizer so they will be managed together
948  // (remember that cloned copy will still depends on data and model function pointers)
949  fObjFunction = std::unique_ptr<ROOT::Math::IMultiGenFunction> ( objFunc.Clone() );
950  if (!DoInitMinimizer()) return false;
951  return DoMinimization(chi2func);
952 }
953 
954 
956  // update the fit configuration after a fit using the obtained result
957  if (fResult->IsEmpty() || !fResult->IsValid() ) return;
958  for (unsigned int i = 0; i < fConfig.NPar(); ++i) {
960  par.SetValue( fResult->Value(i) );
961  if (fResult->Error(i) > 0) par.SetStepSize( fResult->Error(i) );
962  }
963 }
964 
966  // retrieve ncalls from the fit method functions
967  // this function is called when minimizer does not provide a way of returning the nnumber of function calls
968  int ncalls = 0;
969  if (!fUseGradient) {
970  const ROOT::Math::FitMethodFunction * fcn = dynamic_cast<const ROOT::Math::FitMethodFunction *>(fObjFunction.get());
971  if (fcn) ncalls = fcn->NCalls();
972  }
973  else {
974  const ROOT::Math::FitMethodGradFunction * fcn = dynamic_cast<const ROOT::Math::FitMethodGradFunction*>(fObjFunction.get());
975  if (fcn) ncalls = fcn->NCalls();
976  }
977  return ncalls;
978 }
979 
980 
981 bool Fitter::ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction & loglw2, bool minimizeW2L) {
982  // apply correction for weight square
983  // Compute Hessian of the loglikelihood function using the sum of the weight squared
984  // This method assumes:
985  // - a fit has been done before and a covariance matrix exists
986  // - the objective function is a likelihood function and Likelihood::UseSumOfWeightSquare()
987  // has been called before
988 
989  if (fMinimizer.get() == 0) {
990  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Must perform first a fit before applying the correction");
991  return false;
992  }
993 
994  unsigned int n = loglw2.NDim();
995  // correct errors for weight squared
996  std::vector<double> cov(n*n);
997  bool ret = fMinimizer->GetCovMatrix(&cov[0] );
998  if (!ret) {
999  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Previous fit has no valid Covariance matrix");
1000  return false;
1001  }
1002  // need to re-init the minimizer and set w2
1003  fObjFunction = std::unique_ptr<ROOT::Math::IMultiGenFunction> ( loglw2.Clone() );
1004  // need to re-initialize the minimizer for the changes applied in the
1005  // objective functions
1006  if (!DoInitMinimizer()) return false;
1007 
1008  //std::cout << "Running Hesse ..." << std::endl;
1009 
1010  // run eventually before a minimization
1011  // ignore its error
1012  if (minimizeW2L) fMinimizer->Minimize();
1013  // run Hesse on the log-likelihood build using sum of weight squared
1014  ret = fMinimizer->Hesse();
1015  if (!ret) {
1016  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error running Hesse on weight2 likelihood - cannot compute errors");
1017  return false;
1018  }
1019 
1020  if (fMinimizer->CovMatrixStatus() != 3) {
1021  MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not accurate, the errors may be not reliable");
1022  if (fMinimizer->CovMatrixStatus() == 2)
1023  MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood was forced to be defined positive");
1024  if (fMinimizer->CovMatrixStatus() <= 0)
1025  // probably should have failed before
1026  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not valid !");
1027  }
1028 
1029  // std::vector<double> c(n*n);
1030  // ret = fMinimizer->GetCovMatrix(&c2[0] );
1031  // if (!ret) std::cout << "Error reading cov matrix " << fMinimizer->Status() << std::endl;
1032  // TMatrixDSym cmat2(n,&c2[0]);
1033  // std::cout << "Cov matrix of w2 " << std::endl;
1034  // cmat2.Print();
1035  // cmat2.Invert();
1036  // std::cout << "Hessian of w2 " << std::endl;
1037  // cmat2.Print();
1038 
1039  // get Hessian matrix from weight-square likelihood
1040  std::vector<double> hes(n*n);
1041  ret = fMinimizer->GetHessianMatrix(&hes[0] );
1042  if (!ret) {
1043  MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error retrieving Hesse on weight2 likelihood - cannot compute errors");
1044  return false;
1045  }
1046 
1047  // for debug
1048  // std::cout << "Hessian W2 matrix " << std::endl;
1049  // for (unsigned int i = 0; i < n; ++i) {
1050  // for (unsigned int j = 0; j < n; ++j) {
1051  // std::cout << std::setw(12) << hes[i*n + j] << " , ";
1052  // }
1053  // std::cout << std::endl;
1054  // }
1055 
1056  // perform product of matvrix cov * hes * cov
1057  // since we do not want to add matrix dependence do product by hand
1058  // first do hes * cov
1059  std::vector<double> tmp(n*n);
1060  for (unsigned int i = 0; i < n; ++i) {
1061  for (unsigned int j = 0; j < n; ++j) {
1062  for (unsigned int k = 0; k < n; ++k)
1063  tmp[i*n+j] += hes[i*n + k] * cov[k*n + j];
1064  }
1065  }
1066  // do multiplication now cov * tmp save result
1067  std::vector<double> newCov(n*n);
1068  for (unsigned int i = 0; i < n; ++i) {
1069  for (unsigned int j = 0; j < n; ++j) {
1070  for (unsigned int k = 0; k < n; ++k)
1071  newCov[i*n+j] += cov[i*n + k] * tmp[k*n + j];
1072  }
1073  }
1074  // update fit result with new corrected covariance matrix
1075  unsigned int k = 0;
1076  for (unsigned int i = 0; i < n; ++i) {
1077  fResult->fErrors[i] = std::sqrt( newCov[i*(n+1)] );
1078  for (unsigned int j = 0; j <= i; ++j)
1079  fResult->fCovMatrix[k++] = newCov[i *n + j];
1080  }
1081  //fResult->PrintCovMatrix(std::cout);
1082 
1083  return true;
1084 }
1085 
1086 
1087 
1089  // return a pointer to the binned data used in the fit
1090  // works only for chi2 or binned likelihood fits
1091  // thus when the objective function stored is a Chi2Func or a PoissonLikelihood
1092  // The funciton also set the model function correctly if it has not been set
1093 
1096 
1099 
1100  //MATH_INFO_MSG("Fitter::ExamineFCN","Objective function is not of a known type - FitData and ModelFunction objects are not available");
1101  return;
1102 }
1103 
1104  } // end namespace Fit
1105 
1106 } // end namespace ROOT
ROOT::Fit::FitConfig::MinimizerType
const std::string & MinimizerType() const
return type of minimizer package
Definition: FitConfig.h:189
n
const Int_t n
Definition: legend1.C:16
ROOT::Fit::FitConfig::SetMinimizer
void SetMinimizer(const char *type, const char *algo=0)
set minimizer type
Definition: FitConfig.h:181
ROOT::Fit::LogLikelihoodFCN
LogLikelihoodFCN class for likelihood fits.
Definition: LogLikelihoodFCN.h:42
Minimizer.h
ROOT::Fit::PoissonLikelihoodFCN
class evaluating the log likelihood for binned Poisson likelihood fits it is template to distinguish ...
Definition: PoissonLikelihoodFCN.h:48
ROOT::Math::BasicFitMethodFunction::kLeastSquare
@ kLeastSquare
Definition: FitMethodFunction.h:44
ROOT::Fit::Fitter::fData
std::shared_ptr< ROOT::Fit::FitData > fData
pointer to used minimizer
Definition: Fitter.h:534
HFit::Fit
TFitResultPtr Fit(FitObject *h1, TF1 *f1, Foption_t &option, const ROOT::Math::MinimizerOptions &moption, const char *goption, ROOT::Fit::DataRange &range)
Definition: HFitImpl.cxx:133
ROOT::Fit::Chi2FCN
Chi2FCN class for binnned fits using the least square methods.
Definition: Chi2FCN.h:49
ROOT::Math::FitMethodGradFunction
BasicFitMethodFunction< ROOT::Math::IMultiGradFunction > FitMethodGradFunction
Definition: Fitter.h:44
f
#define f(i)
Definition: RSha256.hxx:104
ROOT::Fit::FitConfig::UpdateAfterFit
bool UpdateAfterFit() const
Update configuration after a fit using the FitResult.
Definition: FitConfig.h:213
ROOT::Fit::FitConfig::ParabErrors
bool ParabErrors() const
do analysis for parabolic errors
Definition: FitConfig.h:207
ROOT::Fit::Fitter::fUseGradient
bool fUseGradient
Definition: Fitter.h:514
ROOT::Fit::FitConfig::MinosParams
const std::vector< unsigned int > & MinosParams() const
return vector of parameter indeces for which the Minos Error will be computed
Definition: FitConfig.h:220
BasicFCN.h
ROOT::Math::BasicFitMethodFunction::kUndefined
@ kUndefined
Definition: FitMethodFunction.h:44
ROOT::Fit::Fitter::SetFunction
void SetFunction(const IModelFunction &func, bool useGradient=false)
Set the fitted function (model function) from a parametric function interface.
Definition: Fitter.cxx:103
PoissonLikelihoodFCN.h
ROOT::Math::BasicFitMethodFunction::NCalls
virtual unsigned int NCalls() const
return the total number of function calls (overrided if needed)
Definition: FitMethodFunction.h:85
ROOT::Math::IBaseFunctionMultiDimTempl::NDim
virtual unsigned int NDim() const =0
Retrieve the dimension of the function.
ROOT::Fit::BasicFCN
BasicFCN class: base class for the objective functions used in the fits It has a reference to the dat...
Definition: BasicFCN.h:40
ROOT::Math::IBaseFunctionMultiDimTempl::Clone
virtual IBaseFunctionMultiDimTempl< T > * Clone() const =0
Clone a function.
ROOT::Fit::FitConfig::MinimizerName
std::string MinimizerName() const
return Minimizer full name (type / algorithm)
Definition: FitConfig.cxx:237
ROOT::Math::IGradientFunctionMultiDimTempl
Interface (abstract class) for multi-dimensional functions providing a gradient calculation.
Definition: IFunction.h:327
ROOT::Fit::Fitter::fResult
std::shared_ptr< ROOT::Fit::FitResult > fResult
copy of the fitted function containing on output the fit result
Definition: Fitter.h:530
ROOT::Fit::Fitter::EvalFCN
bool EvalFCN()
Perform a simple FCN evaluation.
Definition: Fitter.cxx:344
ROOT::Fit::FitConfig::UseWeightCorrection
bool UseWeightCorrection() const
Apply Weight correction for error matrix computation.
Definition: FitConfig.h:216
FitConfig.h
ROOT::Fit::ExecutionPolicy
ExecutionPolicy
Definition: FitExecutionPolicy.h:5
ROOT::Fit::Chi2FCN::Type
virtual BaseObjFunction::Type_t Type() const
get type of fit method function
Definition: Chi2FCN.h:134
ROOT::Fit::Fitter::SetFCN
bool SetFCN(unsigned int npar, Function &fcn, const double *params=0, unsigned int dataSize=0, bool chi2fit=false)
Set a generic FCN function as a C++ callable object implementing double () (const double *) Note that...
Definition: Fitter.h:615
FitMethodFunction.h
ROOT::Fit::Fitter::FitFCN
bool FitFCN()
Perform a fit with the previously set FCN function.
Definition: Fitter.cxx:326
MATH_ERROR_MSG
#define MATH_ERROR_MSG(loc, str)
Definition: Error.h:83
ROOT::Math::IParametricFunctionOneDim
Specialized IParamFunction interface (abstract class) for one-dimensional parametric functions It is ...
Definition: IParamFunction.h:158
ROOT::Fit::Fitter::operator=
Fitter & operator=(const Fitter &rhs)
Assignment operator (disabled, class is not copyable)
Definition: Fitter.cxx:84
MultiDimParamFunctionAdapter.h
ROOT::Fit::Fitter::ApplyWeightCorrection
bool ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction &loglw2, bool minimizeW2L=false)
apply correction in the error matrix for the weights for likelihood fits This method can be called on...
Definition: Fitter.cxx:981
ROOT::Fit::Fitter::fObjFunction
std::shared_ptr< ROOT::Math::IMultiGenFunction > fObjFunction
pointer to the fit data (binned or unbinned data)
Definition: Fitter.h:536
ROOT::Fit::FitConfig::ParSettings
const ParameterSettings & ParSettings(unsigned int i) const
get the parameter settings for the i-th parameter (const method)
Definition: FitConfig.h:76
MinimizerOptions.h
UnBinData.h
ROOT::Math::IntegrationOneDim::Type
Type
enumeration specifying the integration types.
Definition: AllIntegrationTypes.h:45
ROOT::Fit::Fitter::GetDataFromFCN
bool GetDataFromFCN()
internal functions to get data set and model function from FCN useful for fits done with customized F...
Definition: Fitter.h:544
ROOT::Math::IParametricFunctionMultiDimTempl< double >
ROOT::Fit::FitConfig::CreateParamsSettings
void CreateParamsSettings(const ROOT::Math::IParamMultiFunctionTempl< T > &func)
set the parameter settings from a model function.
Definition: FitConfig.h:109
LogLikelihoodFCN.h
ROOT::Fit::gDefaultErrorDef
double gDefaultErrorDef
Definition: Fitter.cxx:48
ROOT::Fit::PoissonLikelihoodFCN::Type
virtual BaseObjFunction::Type_t Type() const
get type of fit method function
Definition: PoissonLikelihoodFCN.h:137
ROOT::Math::IParametricGradFunctionMultiDimTempl
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
Definition: IParamFunction.h:224
MATH_WARN_MSG
#define MATH_WARN_MSG(loc, str)
Definition: Error.h:80
ROOT::Fit::Fitter::fFitType
int fFitType
Definition: Fitter.h:520
ROOT::Math::MinimizerOptions::SetErrorDef
void SetErrorDef(double err)
set error def
Definition: MinimizerOptions.h:139
Chi2FCN.h
ROOT::Math::MultiDimParamFunctionAdapter
MultiDimParamFunctionAdapter class to wrap a one-dimensional parametric function in a multi dimension...
Definition: MultiDimParamFunctionAdapter.h:41
ROOT::Fit::ParameterSettings::SetStepSize
void SetStepSize(double err)
set the step size
Definition: ParameterSettings.h:142
ROOT::Fit::Fitter::CalculateHessErrors
bool CalculateHessErrors()
perform an error analysis on the result using the Hessian Errors are obtaied from the inverse of the ...
Definition: Fitter.cxx:655
ROOT::Fit::ParameterSettings::SetValue
void SetValue(double val)
set the value
Definition: ParameterSettings.h:140
ROOT::Fit::Fitter
Fitter class, entry point for performing all type of fits.
Definition: Fitter.h:77
ROOT::Fit::Fitter::DoInitMinimizer
bool DoInitMinimizer()
Definition: Fitter.cxx:827
ROOT::Fit::FitConfig::ParamsSettings
const std::vector< ROOT::Fit::ParameterSettings > & ParamsSettings() const
get the vector of parameter settings (const method)
Definition: FitConfig.h:86
Error.h
ROOT::Math::MinimizerOptions::DefaultErrorDef
static double DefaultErrorDef()
Definition: MinimizerOptions.cxx:83
ROOT::Fit::FitConfig::NPar
unsigned int NPar() const
number of parameters settings
Definition: FitConfig.h:96
ROOT::Fit::Fitter::DoMinimization
bool DoMinimization(const BaseFunc &f, const ROOT::Math::IMultiGenFunction *chifunc=0)
do minimization
Definition: Fitter.cxx:944
ROOT::Fit::Fitter::DoUpdateMinimizerOptions
bool DoUpdateMinimizerOptions(bool canDifferentMinim=true)
Definition: Fitter.cxx:869
ROOT::Fit::FitResult
class containg the result of the fit and all the related information (fitted parameter values,...
Definition: FitResult.h:47
ROOT::Fit::Fitter::GetNCallsFromFCN
int GetNCallsFromFCN()
Definition: Fitter.cxx:965
ROOT::Fit::FitConfig::SetParamsSettings
void SetParamsSettings(unsigned int npar, const double *params, const double *vstep=0)
set the parameter settings from number of parameters and a vector of values and optionally step value...
Definition: FitConfig.cxx:135
FcnAdapter.h
sqrt
double sqrt(double)
ROOT::Fit::FitConfig::MinimizerOptions
ROOT::Math::MinimizerOptions & MinimizerOptions()
access to the minimizer control parameter (non const method)
Definition: FitConfig.h:167
ROOT::Fit::LogLikelihoodFCN::UseSumOfWeightSquare
void UseSumOfWeightSquare(bool on=true)
Definition: LogLikelihoodFCN.h:141
ROOT::Math::BasicFitMethodFunction::Type
virtual Type_t Type() const
return the type of method, override if needed
Definition: FitMethodFunction.h:80
ROOT::Fit::FitConfig::MinosErrors
bool MinosErrors() const
do minos errros analysis on the parameters
Definition: FitConfig.h:210
ROOT::Fit::Fitter::fBinFit
bool fBinFit
Definition: Fitter.h:516
ROOT::Fit::Fitter::Fitter
Fitter()
Default constructor.
Definition: Fitter.cxx:51
IParamFunction.h
ROOT::Math::BasicFitMethodFunction::NPoints
virtual unsigned int NPoints() const
return the number of data points used in evaluating the function
Definition: FitMethodFunction.h:75
ROOT::Fit::FitConfig::NormalizeErrors
bool NormalizeErrors() const
flag to check if resulting errors are be normalized according to chi2/ndf
Definition: FitConfig.h:204
MATH_INFO_MSG
#define MATH_INFO_MSG(loc, str)
Pre-processor macro to report messages which can be configured to use ROOT error or simply an std::io...
Definition: Error.h:77
ROOT::Fit::ParameterSettings
Class, describing value, limits and step size of the parameters Provides functionality also to set/re...
Definition: ParameterSettings.h:36
ROOT::Fit::LogLikelihoodFCN::Type
virtual BaseObjFunction::Type_t Type() const
get type of fit method function
Definition: LogLikelihoodFCN.h:136
Fitter.h
ROOT::Fit::Fitter::Config
const FitConfig & Config() const
access to the fit configuration (const method)
Definition: Fitter.h:412
ROOT::Fit::Fitter::DoLinearFit
bool DoLinearFit()
linear least square fit
Definition: Fitter.cxx:638
ROOT::Fit::Fitter::fFunc_v
std::shared_ptr< IModelFunction_v > fFunc_v
Definition: Fitter.h:526
ROOT::Fit::Fitter::fDataSize
int fDataSize
Definition: Fitter.h:522
BinData.h
ROOT::Fit::Fitter::fConfig
FitConfig fConfig
Definition: Fitter.h:524
ROOT::Fit::FitConfig::SetMinosErrors
void SetMinosErrors(bool on=true)
set Minos erros computation to be performed after fitting
Definition: FitConfig.h:231
ROOT::Math::IParametricGradFunctionOneDim
Interface (abstract class) for parametric one-dimensional gradient functions providing in addition to...
Definition: IParamFunction.h:311
ROOT::Fit::Fitter::ExamineFCN
void ExamineFCN()
look at the user provided FCN and get data and model function is they derive from ROOT::Fit FCN class...
Definition: Fitter.cxx:1088
ROOT::Fit::Fitter::fMinimizer
std::shared_ptr< ROOT::Math::Minimizer > fMinimizer
pointer to the object containing the result of the fit
Definition: Fitter.h:532
ROOT::Fit::Fitter::DoBinnedLikelihoodFit
bool DoBinnedLikelihoodFit(bool extended=true, const ROOT::Fit::ExecutionPolicy &executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial)
binned likelihood fit
Definition: Fitter.cxx:424
ROOT::Math::MinimizerOptions::PrintLevel
int PrintLevel() const
non-static methods for retrieving options
Definition: MinimizerOptions.h:85
ROOT::Fit::Fitter::DoUnbinnedLikelihoodFit
bool DoUnbinnedLikelihoodFit(bool extended=false, const ROOT::Fit::ExecutionPolicy &executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial)
un-binned likelihood fit
Definition: Fitter.cxx:533
ROOT::Fit::Fitter::CalculateMinosErrors
bool CalculateMinosErrors()
perform an error analysis on the result using MINOS To be called only after fitting and when a minimi...
Definition: Fitter.cxx:731
ROOT::Fit::FitConfig::CreateMinimizer
ROOT::Math::Minimizer * CreateMinimizer()
create a new minimizer according to chosen configuration
Definition: FitConfig.cxx:173
ROOT::Fit::PoissonLikelihoodFCN::UseSumOfWeightSquare
void UseSumOfWeightSquare(bool on=true)
Definition: PoissonLikelihoodFCN.h:149
ROOT::Math::BasicFitMethodFunction
FitMethodFunction class Interface for objective functions (like chi2 and likelihood used in the fit) ...
Definition: FitMethodFunction.h:36
ROOT::Fit::Fitter::DoLeastSquareFit
bool DoLeastSquareFit(const ROOT::Fit::ExecutionPolicy &executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial)
least square fit
Definition: Fitter.cxx:365
ROOT::Math::MultiDimParamGradFunctionAdapter
MultiDimParamGradFunctionAdapter class to wrap a one-dimensional parametric gradient function in a mu...
Definition: MultiDimParamFunctionAdapter.h:172
FitResult.h
ROOT::Fit::Fitter::DoUpdateFitConfig
void DoUpdateFitConfig()
Definition: Fitter.cxx:955
ROOT::Fit::Fitter::fFunc
std::shared_ptr< IModelFunction > fFunc
copy of the fitted function containing on output the fit result
Definition: Fitter.h:528
ROOT::Math::IBaseFunctionMultiDimTempl
Documentation for the abstract class IBaseFunctionMultiDim.
Definition: IFunction.h:62
ROOT::Fit::FcnAdapter
Definition: FcnAdapter.h:27
ROOT::Math::FitMethodFunction
BasicFitMethodFunction< ROOT::Math::IMultiGenFunction > FitMethodFunction
Definition: Fitter.h:40
ROOT::Math::MinimizerOptions::ErrorDef
double ErrorDef() const
error definition
Definition: MinimizerOptions.h:103
ROOT
VSD Structures.
Definition: StringConv.hxx:21
ROOT::Fit::Fitter::~Fitter
~Fitter()
Destructor.
Definition: Fitter.cxx:70