Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
Fitter.cxx
Go to the documentation of this file.
1// @(#)root/mathcore:$Id$
2// Author: L. Moneta Mon Sep 4 17:00:10 2006
3
4/**********************************************************************
5 * *
6 * Copyright (c) 2006 LCG ROOT Math Team, CERN/PH-SFT *
7 * *
8 * *
9 **********************************************************************/
10
11#include "Fit/Fitter.h"
12#include "Fit/Chi2FCN.h"
15#include "Math/Minimizer.h"
18#include "Fit/BasicFCN.h"
19#include "Fit/BinData.h"
20#include "Fit/UnBinData.h"
21#include "Fit/FcnAdapter.h"
22#include "Fit/FitConfig.h"
23#include "Fit/FitResult.h"
24#include "Math/Error.h"
25
26#include <memory>
27
28#include "Math/IParamFunction.h"
29
31
32namespace ROOT::Fit {
33
34// use a static variable to get default minimizer options for error def
35// to see if user has changed it later on. If it has not been changed we set
36// for the likelihood method an error def of 0.5
37// t.b.d : multiply likelihood by 2 so have same error def definition as chi2
39
40
41Fitter::Fitter(const std::shared_ptr<FitResult> & result) :
42 fResult(result)
43{
44 if (result->fFitFunc) SetFunction(*fResult->fFitFunc); // this will create also the configuration
45 if (result->fObjFunc) fObjFunction = fResult->fObjFunc;
46 if (result->fFitData) fData = fResult->fFitData;
47}
48
49void Fitter::SetFunction(const IModelFunction & func, bool useGradient)
50{
51
52 fUseGradient = useGradient;
53 if (fUseGradient) {
54 auto *gradFunc = dynamic_cast<const IGradModelFunction*>(&func);
55 if (gradFunc) {
56 SetFunction(*gradFunc, true);
57 return;
58 }
59 else {
60 MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
61 }
62 }
63 fUseGradient = false;
64
65 // set the fit model function (clone the given one and keep a copy )
66 //std::cout << "set a non-grad function" << std::endl;
67
68 fFunc = std::unique_ptr<IModelFunction>(dynamic_cast<IModelFunction *>(func.Clone() ) );
70
71 // creates the parameter settings
73 fFunc_v.reset();
74}
75
76void Fitter::SetFunction(const IModel1DFunction & func, bool useGradient)
77{
78 fUseGradient = useGradient;
79 if (fUseGradient) {
80 auto *gradFunc = dynamic_cast<const IGradModel1DFunction*>(&func);
81 if (gradFunc) {
82 SetFunction(*gradFunc, true);
83 return;
84 }
85 else {
86 MATH_WARN_MSG("Fitter::SetFunction","Requested function does not provide gradient - use it as non-gradient function ");
87 }
88 }
89 fUseGradient = false;
90 //std::cout << "set a 1d function" << std::endl;
91
92 // function is cloned when creating the adapter
93 fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamFunctionAdapter(func));
94
95 // creates the parameter settings
97 fFunc_v.reset();
98}
99
100void Fitter::SetFunction(const IGradModelFunction & func, bool useGradient)
101{
102 fUseGradient = useGradient;
103 //std::cout << "set a grad function" << std::endl;
104 // set the fit model function (clone the given one and keep a copy )
105 fFunc = std::unique_ptr<IModelFunction>( dynamic_cast<IGradModelFunction *> ( func.Clone() ) );
106 assert(fFunc);
107
108 // creates the parameter settings
110 fFunc_v.reset();
111}
112
113
114void Fitter::SetFunction(const IGradModel1DFunction & func, bool useGradient)
115{
116 //std::cout << "set a 1d grad function" << std::endl;
117 fUseGradient = useGradient;
118 // function is cloned when creating the adapter
119 fFunc = std::shared_ptr<IModelFunction>(new ROOT::Math::MultiDimParamGradFunctionAdapter(func));
120
121 // creates the parameter settings
123 fFunc_v.reset();
124}
125
126
127bool Fitter::DoSetFCN(bool extFcn, const ROOT::Math::IMultiGenFunction & fcn, const double * params, unsigned int dataSize, int fitType) {
128 // Set the objective function for the fit. First parameter specifies if function object is managed external or internal.
129 // In case of an internal function object we need to clone because it is a temporary one
130 // if params is not NULL create the parameter settings
131 fUseGradient = false;
132 unsigned int npar = fcn.NDim();
133 if (npar == 0) {
134 MATH_ERROR_MSG("Fitter::SetFCN","FCN function has zero parameters ");
135 return false;
136 }
137 if (params != nullptr || fConfig.ParamsSettings().empty())
139 else {
140 if ( fConfig.ParamsSettings().size() != npar) {
141 MATH_ERROR_MSG("Fitter::SetFCN","wrong fit parameter settings");
142 return false;
143 }
144 }
147
149
150 // store external provided FCN without cloning it
151 // it will be cloned in fObjFunc after the fit
152 if (extFcn) {
154 fObjFunction.reset();
155 }
156 else {
157 // case FCN is built from Minuit interface so function object is created internally in Fitter class
158 // and needs to be cloned and managed
159 fExtObjFunction = nullptr;
160 fObjFunction.reset(fcn.Clone());
161 }
162
163 // in case a model function and data exists from a previous fit - reset shared-ptr
164 if (fResult && fResult->FittedFunction() == nullptr && fFunc) fFunc.reset();
165 if (fData) fData.reset();
166
167 return true;
168}
169bool Fitter::SetFCN(const ROOT::Math::IMultiGenFunction & fcn, const double * params, unsigned int dataSize, int fitType) {
170 // set the objective function for the fit
171 return DoSetFCN(true, fcn, params, dataSize, fitType);
172}
173bool Fitter::SetFCN(const ROOT::Math::IMultiGenFunction &fcn, const IModelFunction & func, const double *params, unsigned int dataSize, int fitType) {
174 // set the objective function for the fit and a model function
175 if (!SetFCN(fcn, params, dataSize, fitType) ) return false;
176 // need to set fFunc afterwards because SetFCN could reset fFunc
177 fFunc = std::unique_ptr<IModelFunction>(dynamic_cast<IModelFunction *>(func.Clone()));
178 if(fFunc) {
179 fUseGradient = fcn.HasGradient();
180 return true;
181 }
182 return false;
183}
184
185bool Fitter::SetFCN(const ROOT::Math::FitMethodFunction &fcn, const double *params)
186{
187 // set the objective function for the fit
188 // if params is not NULL create the parameter settings
189 int fitType = static_cast<int>(fcn.Type());
190 if (!SetFCN(fcn, params, fcn.NPoints(), fitType))
191 return false;
192 fUseGradient = false;
193 return true;
194}
195
196bool Fitter::SetFCN(const ROOT::Math::FitMethodGradFunction &fcn, const double *params)
197{
198 // set the objective function for the fit
199 // if params is not NULL create the parameter settings
200 int fitType = static_cast<int>(fcn.Type());
201 if (!SetFCN(fcn, params, fcn.NPoints(), fitType))
202 return false;
203 fUseGradient = true;
204 return true;
205}
206
207bool Fitter::FitFCN(const BaseFunc &fcn, const double *params, unsigned int dataSize, int fitType)
208{
209 // fit a user provided FCN function
210 // create fit parameter settings
211 if (!SetFCN(fcn, params, dataSize, fitType))
212 return false;
213 return FitFCN();
214}
215
216bool Fitter::FitFCN(const ROOT::Math::FitMethodFunction &fcn, const double *params)
217{
218 // fit using the passed objective function for the fit
219 if (!SetFCN(fcn, params))
220 return false;
221 return FitFCN();
222}
223
224bool Fitter::FitFCN(const ROOT::Math::FitMethodGradFunction &fcn, const double *params)
225{
226 // fit using the passed objective function for the fit
227 if (!SetFCN(fcn, params))
228 return false;
229 return FitFCN();
230}
231
232bool Fitter::SetFCN(MinuitFCN_t fcn, int npar, const double *params, unsigned int dataSize, int fitType)
233{
234 // set TMinuit style FCN type (global function pointer)
235 // create corresponding objective function from that function
236
237 if (npar == 0) {
238 npar = fConfig.ParamsSettings().size();
239 if (npar == 0) {
240 MATH_ERROR_MSG("Fitter::FitFCN", "Fit Parameter settings have not been created ");
241 return false;
242 }
243 }
244
246 return DoSetFCN(false,newFcn, params, dataSize, fitType);
247}
248
249bool Fitter::FitFCN(MinuitFCN_t fcn, int npar, const double *params, unsigned int dataSize, int fitType)
250{
251 // fit using Minuit style FCN type (global function pointer)
252 // create corresponding objective function from that function
253 if (!SetFCN(fcn, npar, params, dataSize, fitType))
254 return false;
255 fUseGradient = false;
256 return FitFCN();
257}
258
260{
261 // fit using the previously set FCN function
262
263
264 if (!fExtObjFunction && !fObjFunction) {
265 MATH_ERROR_MSG("Fitter::FitFCN", "Objective function has not been set");
266 return false;
267 }
268 // init the minimizer
269 if (!DoInitMinimizer())
270 return false;
271 // perform the minimization
272 return DoMinimization();
273}
274
276{
277 // evaluate the FCN using the stored values in fConfig
278
279 if (fFunc && fResult->FittedFunction() == nullptr)
280 fFunc.reset();
281
282 if (!ObjFunction()) {
283 MATH_ERROR_MSG("Fitter::FitFCN", "Objective function has not been set");
284 return false;
285 }
286 // create a Fit result from the fit configuration
287 fResult = std::make_unique<ROOT::Fit::FitResult>(fConfig);
288 // evaluate one time the FCN
289 double fcnval = (*ObjFunction())(fResult->GetParams());
290 // update fit result
291 fResult->fVal = fcnval;
292 fResult->fNCalls++;
293 return true;
294}
295
297{
298
299 // perform a chi2 fit on a set of binned data
300 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
301 assert(data);
302
303 // check function
304 if (!fFunc && !fFunc_v) {
305 MATH_ERROR_MSG("Fitter::DoLeastSquareFit", "model function is not set");
306 return false;
307 } else {
308
309#ifdef DEBUG
310 std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[3].IsBound() << " lower limit "
311 << Config().ParamsSettings()[3].LowerLimit() << " upper limit "
312 << Config().ParamsSettings()[3].UpperLimit() << std::endl;
313#endif
314
315 fBinFit = true;
316 fDataSize = data->Size();
317 // check if fFunc provides gradient
318 if (!fUseGradient) {
319 // do minimization without using the gradient
320 if (fFunc_v) {
322 } else {
323 return DoMinimization(std::make_unique<Chi2FCN<BaseFunc>>(data, fFunc, executionPolicy));
324 }
325 } else {
326 // use gradient
328 MATH_INFO_MSG("Fitter::DoLeastSquareFit", "use gradient from model function");
329
330 if (fFunc_v) {
331 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
332 if (gradFun) {
334 }
335 } else {
336 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
337 if (gradFun) {
339 }
340 }
341 MATH_ERROR_MSG("Fitter::DoLeastSquareFit", "wrong type of function - it does not provide gradient");
342 }
343 }
344 return false;
345}
346
348{
349 // perform a likelihood fit on a set of binned data
350 // The fit is extended (Poisson logl_ by default
351
352 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
353 assert(data);
354
356
357 // check function
358 if (!fFunc && !fFunc_v) {
359 MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "model function is not set");
360 return false;
361 }
362
363 // logl fit (error should be 0.5) set if different than default values (of 1)
366 }
367
368 if (useWeight && fConfig.MinosErrors()) {
369 MATH_INFO_MSG("Fitter::DoBinnedLikelihoodFit", "MINOS errors cannot be computed in weighted likelihood fits");
370 fConfig.SetMinosErrors(false);
371 }
372
373 fBinFit = true;
374 fDataSize = data->Size();
375
376 if (!fUseGradient) {
377 // do minimization without using the gradient
378 if (fFunc_v) {
379 // create a chi2 function to be used for the equivalent chi-square
381 auto logl = std::make_unique<PoissonLikelihoodFCN<BaseFunc, IModelFunction_v>>(data, fFunc_v, useWeight, extended, executionPolicy);
382 return (useWeight) ? DoWeightMinimization(std::move(logl),&chi2) : DoMinimization(std::move(logl),&chi2);
383 } else {
384 // create a chi2 function to be used for the equivalent chi-square
386 auto logl = std::make_unique<PoissonLikelihoodFCN<BaseFunc>>(data, fFunc, useWeight, extended, executionPolicy);
387 return (useWeight) ? DoWeightMinimization(std::move(logl),&chi2) : DoMinimization(std::move(logl),&chi2);
388 }
389 } else {
391 MATH_INFO_MSG("Fitter::DoLikelihoodFit", "use gradient from model function");
392 // not-extended is not implemented in this case
393 if (!extended) {
394 MATH_WARN_MSG("Fitter::DoBinnedLikelihoodFit",
395 "Not-extended binned fit with gradient not yet supported - do an extended fit");
396 extended = true;
397 }
398 if (fFunc_v) {
399 // create a chi2 function to be used for the equivalent chi-square
401 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
402 if (!gradFun) {
403 MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
404 return false;
405 }
406 auto logl = std::make_unique<PoissonLikelihoodFCN<BaseGradFunc, IModelFunction_v>>(data, gradFun, useWeight, extended, executionPolicy);
407 // do minimization
408 return (useWeight) ? DoWeightMinimization(std::move(logl),&chi2) : DoMinimization(std::move(logl),&chi2);
409 } else {
410 // create a chi2 function to be used for the equivalent chi-square
412 // check if fFunc provides gradient
413 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
414 if (!gradFun) {
415 MATH_ERROR_MSG("Fitter::DoBinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
416 return false;
417 }
418 // use gradient for minimization
419 auto logl = std::make_unique<PoissonLikelihoodFCN<BaseGradFunc>>(data, gradFun, useWeight, extended, executionPolicy);
420 // do minimization
421 return (useWeight) ? DoWeightMinimization(std::move(logl),&chi2) : DoMinimization(std::move(logl),&chi2);
422 }
423 }
424 return false;
425}
426
428 // perform a likelihood fit on a set of unbinned data
429
430 std::shared_ptr<UnBinData> data = std::dynamic_pointer_cast<UnBinData>(fData);
431 assert(data);
432
434
435 if (!fFunc && !fFunc_v) {
436 MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit","model function is not set");
437 return false;
438 }
439
440 if (useWeight && fConfig.MinosErrors() ) {
441 MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit","MINOS errors cannot be computed in weighted likelihood fits");
442 fConfig.SetMinosErrors(false);
443 }
444
445
446 fBinFit = false;
447 fDataSize = data->Size();
448
449#ifdef DEBUG
450 int ipar = 0;
451 std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[ipar].IsBound() << " lower limit " << Config().ParamsSettings()[ipar].LowerLimit() << " upper limit " << Config().ParamsSettings()[ipar].UpperLimit() << std::endl;
452#endif
453
454 // logl fit (error should be 0.5) set if different than default values (of 1)
457 }
458
459 if (!fUseGradient) {
460 // do minimization without using the gradient
461 if (fFunc_v ){
462 auto logl = std::make_unique<LogLikelihoodFCN<BaseFunc, IModelFunction_v>>(data, fFunc_v, useWeight, extended, executionPolicy);
463 // do minimization
464 return (useWeight) ? DoWeightMinimization(std::move(logl)) : DoMinimization(std::move(logl));
465 } else {
466 auto logl = std::make_unique<LogLikelihoodFCN<BaseFunc>>(data, fFunc, useWeight, extended, executionPolicy);
467 return (useWeight) ? DoWeightMinimization(std::move(logl)) : DoMinimization(std::move(logl));
468 }
469 } else {
470 // use gradient : check if fFunc provides gradient
472 MATH_INFO_MSG("Fitter::DoUnbinnedLikelihoodFit", "use gradient from model function");
473 if (extended) {
474 MATH_WARN_MSG("Fitter::DoUnbinnedLikelihoodFit",
475 "Extended unbinned fit with gradient not yet supported - do a not-extended fit");
476 extended = false;
477 }
478 if (fFunc_v) {
479 std::shared_ptr<IGradModelFunction_v> gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(fFunc_v);
480 if (!gradFun) {
481 MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
482 return false;
483 }
484 auto logl = std::make_unique<LogLikelihoodFCN<BaseGradFunc, IModelFunction_v>>(data, gradFun, useWeight, extended, executionPolicy);
485 return (useWeight) ? DoWeightMinimization(std::move(logl)) : DoMinimization(std::move(logl));
486 } else {
487 std::shared_ptr<IGradModelFunction> gradFun = std::dynamic_pointer_cast<IGradModelFunction>(fFunc);
488 if (!gradFun) {
489 MATH_ERROR_MSG("Fitter::DoUnbinnedLikelihoodFit", "wrong type of function - it does not provide gradient");
490 return false;
491 }
492 auto logl = std::make_unique<LogLikelihoodFCN<BaseGradFunc>>(data, gradFun, useWeight, extended, executionPolicy);
493 return (useWeight) ? DoWeightMinimization(std::move(logl)) : DoMinimization(std::move(logl));
494 }
495 }
496 return false;
497}
498
499
501
502 std::shared_ptr<BinData> data = std::dynamic_pointer_cast<BinData>(fData);
503 assert(data);
504
505 // perform a linear fit on a set of binned data
506 std::string prevminimizer = fConfig.MinimizerType();
507 fConfig.SetMinimizer("Linear");
508
509 fBinFit = true;
510
511 bool ret = DoLeastSquareFit();
513 return ret;
514}
515
516
518 // compute the Hesse errors according to configuration
519 // set in the parameters and append value in fit result
520 if (!ObjFunction()) {
521 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Objective function has not been set");
522 return false;
523 }
524
525 // need a special treatment in case of weighted likelihood fit
526 // (not yet implemented)
527 if (fFitType == 2 && fConfig.UseWeightCorrection() ) {
528 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Re-computation of Hesse errors not implemented for weighted likelihood fits");
529 MATH_INFO_MSG("Fitter::CalculateHessErrors","Do the Fit using configure option FitConfig::SetParabErrors()");
530 return false;
531 }
532
533 // a fit Result pointer must exist when a minimizer exists
534 if (fMinimizer && !fResult ) {
535 MATH_ERROR_MSG("Fitter::CalculateHessErrors", "FitResult has not been created");
536 return false;
537 }
538
539 // update minimizer (recreate if not done or if name has changed
541 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Error re-initializing the minimizer");
542 return false;
543 }
544
545 if (!fMinimizer ) {
546 // this should not happen
547 MATH_ERROR_MSG("Fitter::CalculateHessErrors", "Need to do a fit before calculating the errors");
548 assert(false);
549 return false;
550 }
551
552 //run Hesse
553 bool ret = fMinimizer->Hesse();
554 if (!ret) MATH_WARN_MSG("Fitter::CalculateHessErrors","Error when calculating Hessian");
555
556 // update minimizer results with what comes out from Hesse
557 // in case is empty - create from a FitConfig
558 if (fResult->IsEmpty() )
559 fResult = std::make_unique<ROOT::Fit::FitResult>(fConfig );
560
561 // update obj function in case it was an external one
562 if (fExtObjFunction) fObjFunction.reset(fExtObjFunction->Clone());
563 fResult->fObjFunc = fObjFunction;
564
565 // re-give a minimizer instance in case it has been changed
566 ret |= fResult->Update(fMinimizer, fConfig, ret);
567
568 // when possible get ncalls from FCN and set in fit result
570 fResult->fNCalls = GetNCallsFromFCN();
571 }
572
573 // set also new errors in FitConfig
575
576 return ret;
577}
578
579
581 // compute the Minos errors according to configuration
582 // set in the parameters and append value in fit result
583 // normally Minos errors are computed just after the minimization
584 // (in DoMinimization) aftewr minimizing if the
585 // FitConfig::MinosErrors() flag is set
586
587 if (!fMinimizer) {
588 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minimizer does not exist - cannot calculate Minos errors");
589 return false;
590 }
591
592 if (!fResult || fResult->IsEmpty() ) {
593 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Invalid Fit Result - cannot calculate Minos errors");
594 return false;
595 }
596
598 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Computation of MINOS errors not implemented for weighted likelihood fits");
599 return false;
600 }
601
602 // update minimizer (but cannot re-create in this case). Must use an existing one
603 if (!DoUpdateMinimizerOptions(false)) {
604 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Error re-initializing the minimizer");
605 return false;
606 }
607
608 // set flag to compute Minos error to false in FitConfig to avoid that
609 // following minimizaiton calls perform unwanted Minos error calculations
610 /// fConfig.SetMinosErrors(false);
611
612
613 const std::vector<unsigned int> & ipars = fConfig.MinosParams();
614 unsigned int n = !ipars.empty() ? ipars.size() : fResult->Parameters().size();
615 bool ok = false;
616
617 int iparNewMin = 0;
618 int iparMax = n;
619 int iter = 0;
620 // rerun minos for the parameters run before a new Minimum has been found
621 do {
622 if (iparNewMin > 0)
623 MATH_INFO_MSG("Fitter::CalculateMinosErrors","Run again Minos for some parameters because a new Minimum has been found");
624 iparNewMin = 0;
625 for (int i = 0; i < iparMax; ++i) {
626 double elow, eup;
627 unsigned int index = (!ipars.empty()) ? ipars[i] : i;
628 bool ret = fMinimizer->GetMinosError(index, elow, eup);
629 // flags case when a new minimum has been found
630 if ((fMinimizer->MinosStatus() & 8) != 0) {
631 iparNewMin = i;
632 }
633 if (ret)
634 fResult->SetMinosError(index, elow, eup);
635 ok |= ret;
636 }
637
639 iter++; // to avoid infinite looping
640 }
641 while( iparNewMin > 0 && iter < 10);
642 if (!ok) {
643 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minos error calculation failed for all the selected parameters");
644 }
645
646 // update obj function in case it was an external one
647 if (fExtObjFunction) fObjFunction.reset(fExtObjFunction->Clone());
648 fResult->fObjFunc = fObjFunction;
649
650 // re-give a minimizer instance in case it has been changed
651 // but maintain previous valid status. Do not set result to false if minos failed
652 ok &= fResult->Update(fMinimizer, fConfig, fResult->IsValid());
653
654 return ok;
655}
656
657
658
659// traits for distinguishing fit methods functions from generic objective functions
660template<class Func>
662 static unsigned int NCalls(const Func & ) { return 0; }
663 static int Type(const Func & ) { return -1; }
664 static bool IsGrad() { return false; }
665};
666template<>
668 static unsigned int NCalls(const ROOT::Math::FitMethodFunction & f ) { return f.NCalls(); }
669 static int Type(const ROOT::Math::FitMethodFunction & f) { return f.Type(); }
670 static bool IsGrad() { return false; }
671};
672template<>
674 static unsigned int NCalls(const ROOT::Math::FitMethodGradFunction & f ) { return f.NCalls(); }
675 static int Type(const ROOT::Math::FitMethodGradFunction & f) { return f.Type(); }
676 static bool IsGrad() { return true; }
677};
678
680 //initialize minimizer by creating it
681 // and set there the objective function
682 // obj function must have been set before
683 auto objFunction = ObjFunction();
684 if (!objFunction) {
685 MATH_ERROR_MSG("Fitter::DoInitMinimizer","Objective function has not been set");
686 return false;
687 }
688
689 // check configuration and objective function
690 if ( fConfig.ParamsSettings().size() != objFunction->NDim() ) {
691 MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong function dimension or wrong size for FitConfig");
692 return false;
693 }
694
695 // create first Minimizer
696 // using an auto_Ptr will delete the previous existing one
697 fMinimizer = std::shared_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
698 if (fMinimizer == nullptr) {
699 MATH_ERROR_MSG("Fitter::DoInitMinimizer","Minimizer cannot be created");
700 return false;
701 }
702
703 // in case of gradient function one needs to downcast the pointer
704 if (fUseGradient) {
705 auto* gradfcn = dynamic_cast<const ROOT::Math::IMultiGradFunction *> (objFunction );
706 if (!gradfcn) {
707 MATH_ERROR_MSG("Fitter::DoInitMinimizer","wrong type of function - it does not provide gradient");
708 return false;
709 }
710 fMinimizer->SetFunction( *gradfcn);
711 // set also Hessian if available
712 if (Config().MinimizerType() == "Minuit2") {
713 auto *fitGradFcn = dynamic_cast<const ROOT::Math::FitMethodGradFunction *>(gradfcn);
714 if (fitGradFcn && fitGradFcn->HasHessian()) {
715 auto hessFcn = [=](std::span<const double> x, double *hess) {
716 unsigned int ndim = x.size();
717 unsigned int nh = ndim * (ndim + 1) / 2;
718 std::vector<double> h(nh);
719 bool ret = fitGradFcn->Hessian(x.data(), h.data());
720 if (!ret) return false;
721 for (unsigned int i = 0; i < ndim; i++) {
722 for (unsigned int j = 0; j <= i; j++) {
723 unsigned int index = j + i * (i + 1) / 2; // formula for j < i
724 hess[ndim * i + j] = h[index];
725 if (j != i)
726 hess[ndim * j + i] = h[index];
727 }
728 }
729 return true;
730 };
731
732 fMinimizer->SetHessianFunction(hessFcn);
733 }
734 }
735 }
736 else
737 fMinimizer->SetFunction( *objFunction);
738
739
741
742 // if requested parabolic error do correct error analysis by the minimizer (call HESSE)
743 if (fConfig.ParabErrors()) fMinimizer->SetValidError(true);
744
745 return true;
746
747}
748
750 // update minimizer options when re-doing a Fit or computing Hesse or Minos errors
751
752
753 // create a new minimizer if it is different type
754 // minimizer type string stored in FitResult is "minimizer name" + " / " + minimizer algo
755 std::string newMinimType = fConfig.MinimizerName();
756 if (fMinimizer && fResult && newMinimType != fResult->MinimizerType()) {
757 // if a different minimizer is allowed (e.g. when calling Hesse)
758 if (canDifferentMinim) {
759 std::string msg = "Using now " + newMinimType;
760 MATH_INFO_MSG("Fitter::DoUpdateMinimizerOptions: ", msg.c_str());
761 if (!DoInitMinimizer() )
762 return false;
763 }
764 else {
765 std::string msg = "Cannot change minimizer. Continue using " + fResult->MinimizerType();
766 MATH_WARN_MSG("Fitter::DoUpdateMinimizerOptions",msg.c_str());
767 }
768 }
769
770 // create minimizer if it was not done before
771 if (!fMinimizer) {
772 if (!DoInitMinimizer())
773 return false;
774 }
775
776 // set new minimizer options (but not functions and parameters)
777 fMinimizer->SetOptions(fConfig.MinimizerOptions());
778 return true;
779}
780
782 // perform the minimization (assume we have already initialized the minimizer)
783
785
786 bool isValid = fMinimizer->Minimize();
787
788 if (!fResult) fResult = std::make_unique<FitResult>();
789
790 fResult->FillResult(fMinimizer,fConfig, fFunc, isValid, fDataSize, fFitType, chi2func );
791
792 // if requested run Minos after minimization
793 if (isValid && fConfig.MinosErrors()) {
794 // minos error calculation will update also FitResult
796 }
797
798 // when possible get number of calls from FCN and set in fit result
800 fResult->fNCalls = GetNCallsFromFCN();
801 }
802
803 // fill information in fit result
804 // if using an external obj function clone it for storing in FitResult
805 if (fExtObjFunction) fObjFunction.reset(fExtObjFunction->Clone());
806 fResult->fObjFunc = fObjFunction;
807 fResult->fFitData = fData;
808
809#ifdef DEBUG
810 std::cout << "ROOT::Fit::Fitter::DoMinimization : ncalls = " << fResult->fNCalls << " type of objfunc " << fFitFitResType << " typeid: " << typeid(*fObjFunction).name() << " use gradient " << fUseGradient << std::endl;
811#endif
812
814 fResult->NormalizeErrors();
815
816 // set also new parameter values and errors in FitConfig
817 if (fConfig.UpdateAfterFit() && isValid) DoUpdateFitConfig();
818
819 return isValid;
820}
821template<class ObjFunc_t>
822bool Fitter::DoMinimization(std::unique_ptr<ObjFunc_t> objFunc, const ROOT::Math::IMultiGenFunction * chi2func) {
823 // perform the minimization initializing the minimizer starting from a given obj function
824 fFitType = objFunc->Type();
825 fExtObjFunction = nullptr;
826 fObjFunction = std::move(objFunc);
827 return DoInitMinimizer() ? DoMinimization(chi2func): false;
828}
829template<class ObjFunc_t>
831 // perform the minimization initializing the minimizer starting from a given obj function
832 // and apply afterwards the correction for weights. This applies only for logL fitting
833 this->fFitType = objFunc->Type();
834 fExtObjFunction = nullptr;
835 // need to use a temporary shared pointer to the objective function since we cannot use the unique pointer when it has been moved
836 std::shared_ptr<ObjFunc_t> sObjFunc{ std::move(objFunc)};
838 if (!DoInitMinimizer()) return false;
839 if (!DoMinimization(chi2func)) return false;
840 sObjFunc->UseSumOfWeightSquare();
842}
843
844
846 // update the fit configuration after a fit using the obtained result
847 if (fResult->IsEmpty() || !fResult->IsValid() ) return;
848 for (unsigned int i = 0; i < fConfig.NPar(); ++i) {
850 par.SetValue( fResult->Value(i) );
851 if (fResult->Error(i) > 0) par.SetStepSize( fResult->Error(i) );
852 }
853}
854
856{
857 // retrieve ncalls from the fit method functions
858 // this function is called when minimizer does not provide a way of returning the number of function calls
859 if (!fUseGradient) {
860 if (auto *fcn = dynamic_cast<const ROOT::Math::FitMethodFunction *>(fObjFunction.get()))
861 return fcn->NCalls();
862 } else if (auto *fcn = dynamic_cast<const ROOT::Math::FitMethodGradFunction *>(fObjFunction.get()))
863 return fcn->NCalls();
864 return 0;
865}
866
867
869 // apply correction for weight square
870 // Compute Hessian of the loglikelihood function using the sum of the weight squared
871 // This method assumes:
872 // - a fit has been done before and a covariance matrix exists
873 // - the objective function is a likelihood function and Likelihood::UseSumOfWeightSquare()
874 // has been called before
875
876 if (fMinimizer == nullptr) {
877 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Must perform first a fit before applying the correction");
878 return false;
879 }
880
881 unsigned int n = loglw2.NDim();
882 // correct errors for weight squared
883 std::vector<double> cov(n*n);
884 bool ret = fMinimizer->GetCovMatrix(cov.data());
885 if (!ret) {
886 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Previous fit has no valid Covariance matrix");
887 return false;
888 }
889 // need to use new obj function computed with weight-square
890 std::shared_ptr<ROOT::Math::IMultiGenFunction> objFunc(loglw2.Clone());
891 fObjFunction.swap( objFunc );
892
893 // need to re-initialize the minimizer for the changes applied in the
894 // objective functions
895 if (!DoInitMinimizer()) return false;
896
897 //std::cout << "Running Hesse ..." << std::endl;
898
899 // run eventually before a minimization
900 // ignore its error
901 if (minimizeW2L) fMinimizer->Minimize();
902 // run Hesse on the log-likelihood build using sum of weight squared
903 ret = fMinimizer->Hesse();
904 if (!ret) {
905 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error running Hesse on weight2 likelihood - cannot compute errors");
906 return false;
907 }
908
909 if (fMinimizer->CovMatrixStatus() != 3) {
910 MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not accurate, the errors may be not reliable");
911 if (fMinimizer->CovMatrixStatus() == 2)
912 MATH_WARN_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood was forced to be defined positive");
913 if (fMinimizer->CovMatrixStatus() <= 0)
914 // probably should have failed before
915 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Covariance matrix for weighted likelihood is not valid !");
916 }
917
918 // get Hessian matrix from weight-square likelihood
919 std::vector<double> hes(n*n);
920 ret = fMinimizer->GetHessianMatrix(hes.data());
921 if (!ret) {
922 MATH_ERROR_MSG("Fitter::ApplyWeightCorrection","Error retrieving Hesse on weight2 likelihood - cannot compute errors");
923 return false;
924 }
925
926
927 // perform product of matrix cov * hes * cov
928 // since we do not want to add matrix dependence do product by hand
929 // first do hes * cov
930 std::vector<double> tmp(n*n);
931 for (unsigned int i = 0; i < n; ++i) {
932 for (unsigned int j = 0; j < n; ++j) {
933 for (unsigned int k = 0; k < n; ++k)
934 tmp[i*n+j] += hes[i*n + k] * cov[k*n + j];
935 }
936 }
937 // do multiplication now cov * tmp save result
938 std::vector<double> newCov(n*n);
939 for (unsigned int i = 0; i < n; ++i) {
940 for (unsigned int j = 0; j < n; ++j) {
941 for (unsigned int k = 0; k < n; ++k)
942 newCov[i*n+j] += cov[i*n + k] * tmp[k*n + j];
943 }
944 }
945 // update fit result with new corrected covariance matrix
946 unsigned int k = 0;
947 for (unsigned int i = 0; i < n; ++i) {
948 fResult->fErrors[i] = std::sqrt( newCov[i*(n+1)] );
949 for (unsigned int j = 0; j <= i; ++j)
950 fResult->fCovMatrix[k++] = newCov[i *n + j];
951 }
952
953 // restore previous used objective function
954 fObjFunction.swap( objFunc );
955
956 return true;
957}
958
959} // namespace ROOT::Fit
#define MATH_INFO_MSG(loc, str)
Pre-processor macro to report messages which can be configured to use ROOT error or simply an std::io...
Definition Error.h:77
#define MATH_ERROR_MSG(loc, str)
Definition Error.h:83
#define MATH_WARN_MSG(loc, str)
Definition Error.h:80
#define f(i)
Definition RSha256.hxx:104
#define h(i)
Definition RSha256.hxx:106
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t result
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t index
const std::vector< unsigned int > & MinosParams() const
return vector of parameter indices for which the Minos Error will be computed
Definition FitConfig.h:222
bool UpdateAfterFit() const
Update configuration after a fit using the FitResult.
Definition FitConfig.h:215
void SetMinimizer(const char *type, const char *algo=nullptr)
set minimizer type and algorithm
Definition FitConfig.h:183
void SetMinosErrors(bool on=true)
set Minos errors computation to be performed after fitting
Definition FitConfig.h:233
bool NormalizeErrors() const
flag to check if resulting errors are be normalized according to chi2/ndf
Definition FitConfig.h:206
bool ParabErrors() const
do analysis for parabolic errors
Definition FitConfig.h:209
unsigned int NPar() const
number of parameters settings
Definition FitConfig.h:98
void SetParamsSettings(unsigned int npar, const double *params, const double *vstep=nullptr)
set the parameter settings from number of parameters and a vector of values and optionally step value...
std::string MinimizerName() const
return Minimizer full name (type / algorithm)
bool UseWeightCorrection() const
Apply Weight correction for error matrix computation.
Definition FitConfig.h:218
const std::vector< ROOT::Fit::ParameterSettings > & ParamsSettings() const
get the vector of parameter settings (const method)
Definition FitConfig.h:88
ROOT::Math::Minimizer * CreateMinimizer()
create a new minimizer according to chosen configuration
void CreateParamsSettings(const ROOT::Math::IParamMultiFunctionTempl< T > &func)
set the parameter settings from a model function.
Definition FitConfig.h:111
const std::string & MinimizerType() const
return type of minimizer package
Definition FitConfig.h:191
const ParameterSettings & ParSettings(unsigned int i) const
get the parameter settings for the i-th parameter (const method)
Definition FitConfig.h:78
ROOT::Math::MinimizerOptions & MinimizerOptions()
access to the minimizer control parameter (non const method)
Definition FitConfig.h:169
bool MinosErrors() const
do minos errors analysis on the parameters
Definition FitConfig.h:212
bool EvalFCN()
Perform a simple FCN evaluation.
Definition Fitter.cxx:275
const ROOT::Math::IMultiGenFunction * fExtObjFunction
! pointer to an external FCN
Definition Fitter.h:572
bool FitFCN()
Perform a fit with the previously set FCN function.
Definition Fitter.cxx:259
void DoUpdateFitConfig()
Definition Fitter.cxx:845
bool DoMinimization(std::unique_ptr< ObjFunc_t > f, const ROOT::Math::IMultiGenFunction *chifunc=nullptr)
do minimization
Definition Fitter.cxx:822
bool DoSetFCN(bool useExtFCN, const ROOT::Math::IMultiGenFunction &fcn, const double *params, unsigned int dataSize, int fitType)
Set Objective function.
Definition Fitter.cxx:127
int fDataSize
size of data sets (need for Fumili or LM fitters)
Definition Fitter.h:564
bool DoUnbinnedLikelihoodFit(bool extended=false, const ROOT::EExecutionPolicy &executionPolicy=ROOT::EExecutionPolicy::kSequential)
un-binned likelihood fit
Definition Fitter.cxx:427
const ROOT::Math::IBaseFunctionMultiDimTempl< double > * ObjFunction() const
Return pointer to the used objective function for fitting.
Definition Fitter.h:551
std::shared_ptr< ROOT::Math::Minimizer > fMinimizer
! pointer to used minimizer
Definition Fitter.h:569
bool DoWeightMinimization(std::unique_ptr< ObjFunc_t > f, const ROOT::Math::IMultiGenFunction *chifunc=nullptr)
Definition Fitter.cxx:830
bool DoBinnedLikelihoodFit(bool extended=true, const ROOT::EExecutionPolicy &executionPolicy=ROOT::EExecutionPolicy::kSequential)
binned likelihood fit
Definition Fitter.cxx:347
int fFitType
type of fit (0 undefined, 1 least square, 2 likelihood, 3 binned likelihood)
Definition Fitter.h:563
std::shared_ptr< ROOT::Fit::FitData > fData
! pointer to the fit data (binned or unbinned data)
Definition Fitter.h:570
bool fUseGradient
flag to indicate if using gradient or not
Definition Fitter.h:559
bool fBinFit
flag to indicate if fit is binned in case of false the fit is unbinned or undefined) flag it is used ...
Definition Fitter.h:560
std::shared_ptr< ROOT::Math::IMultiGenFunction > fObjFunction
! pointer to used objective function
Definition Fitter.h:571
bool ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction &loglw2, bool minimizeW2L=false)
apply correction in the error matrix for the weights for likelihood fits This method can be called on...
Definition Fitter.cxx:868
const FitConfig & Config() const
access to the fit configuration (const method)
Definition Fitter.h:428
bool DoLeastSquareFit(const ROOT::EExecutionPolicy &executionPolicy=ROOT::EExecutionPolicy::kSequential)
least square fit
Definition Fitter.cxx:296
bool SetFCN(unsigned int npar, Function &fcn, const double *params=nullptr, unsigned int dataSize=0, int fitType=0)
Set a generic FCN function as a C++ callable object implementing double () (const double *) Note that...
Definition Fitter.h:278
std::shared_ptr< IModelFunction_v > fFunc_v
! copy of the fitted function containing on output the fit result
Definition Fitter.h:566
std::shared_ptr< ROOT::Fit::FitResult > fResult
! pointer to the object containing the result of the fit
Definition Fitter.h:568
bool CalculateMinosErrors()
perform an error analysis on the result using MINOS To be called only after fitting and when a minimi...
Definition Fitter.cxx:580
bool DoUpdateMinimizerOptions(bool canDifferentMinim=true)
Definition Fitter.cxx:749
void SetFunction(const IModelFunction &func, bool useGradient=false)
Set the fitted function (model function) from a parametric function interface.
Definition Fitter.cxx:49
bool CalculateHessErrors()
perform an error analysis on the result using the Hessian Errors are obtained from the inverse of the...
Definition Fitter.cxx:517
FitConfig fConfig
fitter configuration (options and parameter settings)
Definition Fitter.h:565
Fitter()
Default constructor.
Definition Fitter.h:103
std::shared_ptr< IModelFunction > fFunc
! copy of the fitted function containing on output the fit result
Definition Fitter.h:567
bool DoLinearFit()
linear least square fit
Definition Fitter.cxx:500
bool DoInitMinimizer()
Definition Fitter.cxx:679
int GetNCallsFromFCN()
Definition Fitter.cxx:855
Class, describing value, limits and step size of the parameters Provides functionality also to set/re...
void SetValue(double val)
set the value
void SetStepSize(double err)
set the step size
FitMethodFunction class Interface for objective functions (like chi2 and likelihood used in the fit) ...
Documentation for the abstract class IBaseFunctionMultiDim.
Definition IFunction.h:63
virtual IBaseFunctionMultiDimTempl< T > * Clone() const =0
Clone a function.
Interface (abstract class) for multi-dimensional functions providing a gradient calculation.
Definition IFunction.h:239
Specialized IParamFunction interface (abstract class) for one-dimensional parametric functions It is ...
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
Interface (abstract class) for parametric one-dimensional gradient functions providing in addition to...
double ErrorDef() const
error definition
int PrintLevel() const
non-static methods for retrieving options
void SetErrorDef(double err)
set error def
MultiDimParamFunctionAdapter class to wrap a one-dimensional parametric function in a multi dimension...
MultiDimParamGradFunctionAdapter class to wrap a one-dimensional parametric gradient function in a mu...
const_iterator begin() const
const_iterator end() const
Double_t x[n]
Definition legend1.C:17
const Int_t n
Definition legend1.C:16
Namespace for the fitting classes.
double gDefaultErrorDef
Definition Fitter.cxx:38
static int Type(const ROOT::Math::FitMethodFunction &f)
Definition Fitter.cxx:669
static unsigned int NCalls(const ROOT::Math::FitMethodFunction &f)
Definition Fitter.cxx:668
static int Type(const ROOT::Math::FitMethodGradFunction &f)
Definition Fitter.cxx:675
static unsigned int NCalls(const ROOT::Math::FitMethodGradFunction &f)
Definition Fitter.cxx:674
static bool IsGrad()
Definition Fitter.cxx:664
static unsigned int NCalls(const Func &)
Definition Fitter.cxx:662
static int Type(const Func &)
Definition Fitter.cxx:663