60 MATH_WARN_MSG(
"Fitter::SetFunction",
"Requested function does not provide gradient - use it as non-gradient function ");
86 MATH_WARN_MSG(
"Fitter::SetFunction",
"Requested function does not provide gradient - use it as non-gradient function ");
132 unsigned int npar =
fcn.NDim();
134 MATH_ERROR_MSG(
"Fitter::SetFCN",
"FCN function has zero parameters ");
240 MATH_ERROR_MSG(
"Fitter::FitFCN",
"Fit Parameter settings have not been created ");
265 MATH_ERROR_MSG(
"Fitter::FitFCN",
"Objective function has not been set");
283 MATH_ERROR_MSG(
"Fitter::FitFCN",
"Objective function has not been set");
300 std::shared_ptr<BinData>
data = std::dynamic_pointer_cast<BinData>(
fData);
305 MATH_ERROR_MSG(
"Fitter::DoLeastSquareFit",
"model function is not set");
310 std::cout <<
"Fitter ParamSettings " <<
Config().ParamsSettings()[3].IsBound() <<
" lower limit "
311 <<
Config().ParamsSettings()[3].LowerLimit() <<
" upper limit "
312 <<
Config().ParamsSettings()[3].UpperLimit() << std::endl;
328 MATH_INFO_MSG(
"Fitter::DoLeastSquareFit",
"use gradient from model function");
331 std::shared_ptr<IGradModelFunction_v>
gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(
fFunc_v);
336 std::shared_ptr<IGradModelFunction>
gradFun = std::dynamic_pointer_cast<IGradModelFunction>(
fFunc);
341 MATH_ERROR_MSG(
"Fitter::DoLeastSquareFit",
"wrong type of function - it does not provide gradient");
352 std::shared_ptr<BinData>
data = std::dynamic_pointer_cast<BinData>(
fData);
359 MATH_ERROR_MSG(
"Fitter::DoBinnedLikelihoodFit",
"model function is not set");
369 MATH_INFO_MSG(
"Fitter::DoBinnedLikelihoodFit",
"MINOS errors cannot be computed in weighted likelihood fits");
391 MATH_INFO_MSG(
"Fitter::DoLikelihoodFit",
"use gradient from model function");
395 "Not-extended binned fit with gradient not yet supported - do an extended fit");
401 std::shared_ptr<IGradModelFunction_v>
gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(
fFunc_v);
403 MATH_ERROR_MSG(
"Fitter::DoBinnedLikelihoodFit",
"wrong type of function - it does not provide gradient");
413 std::shared_ptr<IGradModelFunction>
gradFun = std::dynamic_pointer_cast<IGradModelFunction>(
fFunc);
415 MATH_ERROR_MSG(
"Fitter::DoBinnedLikelihoodFit",
"wrong type of function - it does not provide gradient");
430 std::shared_ptr<UnBinData>
data = std::dynamic_pointer_cast<UnBinData>(
fData);
436 MATH_ERROR_MSG(
"Fitter::DoUnbinnedLikelihoodFit",
"model function is not set");
441 MATH_INFO_MSG(
"Fitter::DoUnbinnedLikelihoodFit",
"MINOS errors cannot be computed in weighted likelihood fits");
451 std::cout <<
"Fitter ParamSettings " <<
Config().ParamsSettings()[ipar].IsBound() <<
" lower limit " <<
Config().ParamsSettings()[ipar].LowerLimit() <<
" upper limit " <<
Config().ParamsSettings()[ipar].UpperLimit() << std::endl;
472 MATH_INFO_MSG(
"Fitter::DoUnbinnedLikelihoodFit",
"use gradient from model function");
475 "Extended unbinned fit with gradient not yet supported - do a not-extended fit");
479 std::shared_ptr<IGradModelFunction_v>
gradFun = std::dynamic_pointer_cast<IGradModelFunction_v>(
fFunc_v);
481 MATH_ERROR_MSG(
"Fitter::DoUnbinnedLikelihoodFit",
"wrong type of function - it does not provide gradient");
487 std::shared_ptr<IGradModelFunction>
gradFun = std::dynamic_pointer_cast<IGradModelFunction>(
fFunc);
489 MATH_ERROR_MSG(
"Fitter::DoUnbinnedLikelihoodFit",
"wrong type of function - it does not provide gradient");
502 std::shared_ptr<BinData>
data = std::dynamic_pointer_cast<BinData>(
fData);
521 MATH_ERROR_MSG(
"Fitter::CalculateHessErrors",
"Objective function has not been set");
528 MATH_ERROR_MSG(
"Fitter::CalculateHessErrors",
"Re-computation of Hesse errors not implemented for weighted likelihood fits");
529 MATH_INFO_MSG(
"Fitter::CalculateHessErrors",
"Do the Fit using configure option FitConfig::SetParabErrors()");
535 MATH_ERROR_MSG(
"Fitter::CalculateHessErrors",
"FitResult has not been created");
541 MATH_ERROR_MSG(
"Fitter::CalculateHessErrors",
"Error re-initializing the minimizer");
547 MATH_ERROR_MSG(
"Fitter::CalculateHessErrors",
"Need to do a fit before calculating the errors");
554 if (!
ret)
MATH_WARN_MSG(
"Fitter::CalculateHessErrors",
"Error when calculating Hessian");
588 MATH_ERROR_MSG(
"Fitter::CalculateMinosErrors",
"Minimizer does not exist - cannot calculate Minos errors");
593 MATH_ERROR_MSG(
"Fitter::CalculateMinosErrors",
"Invalid Fit Result - cannot calculate Minos errors");
598 MATH_ERROR_MSG(
"Fitter::CalculateMinosErrors",
"Computation of MINOS errors not implemented for weighted likelihood fits");
604 MATH_ERROR_MSG(
"Fitter::CalculateHessErrors",
"Error re-initializing the minimizer");
623 MATH_INFO_MSG(
"Fitter::CalculateMinosErrors",
"Run again Minos for some parameters because a new Minimum has been found");
625 for (
int i = 0; i <
iparMax; ++i) {
643 MATH_ERROR_MSG(
"Fitter::CalculateMinosErrors",
"Minos error calculation failed for all the selected parameters");
662 static unsigned int NCalls(
const Func & ) {
return 0; }
663 static int Type(
const Func & ) {
return -1; }
685 MATH_ERROR_MSG(
"Fitter::DoInitMinimizer",
"Objective function has not been set");
691 MATH_ERROR_MSG(
"Fitter::DoInitMinimizer",
"wrong function dimension or wrong size for FitConfig");
699 MATH_ERROR_MSG(
"Fitter::DoInitMinimizer",
"Minimizer cannot be created");
707 MATH_ERROR_MSG(
"Fitter::DoInitMinimizer",
"wrong type of function - it does not provide gradient");
712 if (
Config().MinimizerType() ==
"Minuit2") {
715 auto hessFcn = [=](std::span<const double>
x,
double *
hess) {
716 unsigned int ndim =
x.size();
717 unsigned int nh = ndim * (ndim + 1) / 2;
718 std::vector<double>
h(
nh);
720 if (!
ret)
return false;
721 for (
unsigned int i = 0; i < ndim; i++) {
722 for (
unsigned int j = 0;
j <= i;
j++) {
723 unsigned int index =
j + i * (i + 1) / 2;
765 std::string
msg =
"Cannot change minimizer. Continue using " +
fResult->MinimizerType();
810 std::cout <<
"ROOT::Fit::Fitter::DoMinimization : ncalls = " <<
fResult->fNCalls <<
" type of objfunc " <<
fFitFitResType <<
" typeid: " <<
typeid(*fObjFunction).name() <<
" use gradient " <<
fUseGradient << std::endl;
821template<
class ObjFunc_t>
829template<
class ObjFunc_t>
848 for (
unsigned int i = 0; i <
fConfig.
NPar(); ++i) {
861 return fcn->NCalls();
863 return fcn->NCalls();
877 MATH_ERROR_MSG(
"Fitter::ApplyWeightCorrection",
"Must perform first a fit before applying the correction");
881 unsigned int n =
loglw2.NDim();
883 std::vector<double>
cov(
n*
n);
886 MATH_ERROR_MSG(
"Fitter::ApplyWeightCorrection",
"Previous fit has no valid Covariance matrix");
890 std::shared_ptr<ROOT::Math::IMultiGenFunction>
objFunc(
loglw2.Clone());
905 MATH_ERROR_MSG(
"Fitter::ApplyWeightCorrection",
"Error running Hesse on weight2 likelihood - cannot compute errors");
910 MATH_WARN_MSG(
"Fitter::ApplyWeightCorrection",
"Covariance matrix for weighted likelihood is not accurate, the errors may be not reliable");
912 MATH_WARN_MSG(
"Fitter::ApplyWeightCorrection",
"Covariance matrix for weighted likelihood was forced to be defined positive");
915 MATH_ERROR_MSG(
"Fitter::ApplyWeightCorrection",
"Covariance matrix for weighted likelihood is not valid !");
919 std::vector<double>
hes(
n*
n);
922 MATH_ERROR_MSG(
"Fitter::ApplyWeightCorrection",
"Error retrieving Hesse on weight2 likelihood - cannot compute errors");
930 std::vector<double> tmp(
n*
n);
931 for (
unsigned int i = 0; i <
n; ++i) {
932 for (
unsigned int j = 0;
j <
n; ++
j) {
933 for (
unsigned int k = 0; k <
n; ++k)
939 for (
unsigned int i = 0; i <
n; ++i) {
940 for (
unsigned int j = 0;
j <
n; ++
j) {
941 for (
unsigned int k = 0; k <
n; ++k)
947 for (
unsigned int i = 0; i <
n; ++i) {
949 for (
unsigned int j = 0;
j <= i; ++
j)
#define MATH_INFO_MSG(loc, str)
Pre-processor macro to report messages which can be configured to use ROOT error or simply an std::io...
#define MATH_ERROR_MSG(loc, str)
#define MATH_WARN_MSG(loc, str)
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t result
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t index
const std::vector< unsigned int > & MinosParams() const
return vector of parameter indices for which the Minos Error will be computed
bool UpdateAfterFit() const
Update configuration after a fit using the FitResult.
void SetMinimizer(const char *type, const char *algo=nullptr)
set minimizer type and algorithm
void SetMinosErrors(bool on=true)
set Minos errors computation to be performed after fitting
bool NormalizeErrors() const
flag to check if resulting errors are be normalized according to chi2/ndf
bool ParabErrors() const
do analysis for parabolic errors
unsigned int NPar() const
number of parameters settings
void SetParamsSettings(unsigned int npar, const double *params, const double *vstep=nullptr)
set the parameter settings from number of parameters and a vector of values and optionally step value...
std::string MinimizerName() const
return Minimizer full name (type / algorithm)
bool UseWeightCorrection() const
Apply Weight correction for error matrix computation.
const std::vector< ROOT::Fit::ParameterSettings > & ParamsSettings() const
get the vector of parameter settings (const method)
ROOT::Math::Minimizer * CreateMinimizer()
create a new minimizer according to chosen configuration
void CreateParamsSettings(const ROOT::Math::IParamMultiFunctionTempl< T > &func)
set the parameter settings from a model function.
const std::string & MinimizerType() const
return type of minimizer package
const ParameterSettings & ParSettings(unsigned int i) const
get the parameter settings for the i-th parameter (const method)
ROOT::Math::MinimizerOptions & MinimizerOptions()
access to the minimizer control parameter (non const method)
bool MinosErrors() const
do minos errors analysis on the parameters
bool EvalFCN()
Perform a simple FCN evaluation.
const ROOT::Math::IMultiGenFunction * fExtObjFunction
! pointer to an external FCN
bool FitFCN()
Perform a fit with the previously set FCN function.
bool DoMinimization(std::unique_ptr< ObjFunc_t > f, const ROOT::Math::IMultiGenFunction *chifunc=nullptr)
do minimization
bool DoSetFCN(bool useExtFCN, const ROOT::Math::IMultiGenFunction &fcn, const double *params, unsigned int dataSize, int fitType)
Set Objective function.
int fDataSize
size of data sets (need for Fumili or LM fitters)
bool DoUnbinnedLikelihoodFit(bool extended=false, const ROOT::EExecutionPolicy &executionPolicy=ROOT::EExecutionPolicy::kSequential)
un-binned likelihood fit
const ROOT::Math::IBaseFunctionMultiDimTempl< double > * ObjFunction() const
Return pointer to the used objective function for fitting.
std::shared_ptr< ROOT::Math::Minimizer > fMinimizer
! pointer to used minimizer
bool DoWeightMinimization(std::unique_ptr< ObjFunc_t > f, const ROOT::Math::IMultiGenFunction *chifunc=nullptr)
bool DoBinnedLikelihoodFit(bool extended=true, const ROOT::EExecutionPolicy &executionPolicy=ROOT::EExecutionPolicy::kSequential)
binned likelihood fit
int fFitType
type of fit (0 undefined, 1 least square, 2 likelihood, 3 binned likelihood)
std::shared_ptr< ROOT::Fit::FitData > fData
! pointer to the fit data (binned or unbinned data)
bool fUseGradient
flag to indicate if using gradient or not
bool fBinFit
flag to indicate if fit is binned in case of false the fit is unbinned or undefined) flag it is used ...
std::shared_ptr< ROOT::Math::IMultiGenFunction > fObjFunction
! pointer to used objective function
bool ApplyWeightCorrection(const ROOT::Math::IMultiGenFunction &loglw2, bool minimizeW2L=false)
apply correction in the error matrix for the weights for likelihood fits This method can be called on...
const FitConfig & Config() const
access to the fit configuration (const method)
bool DoLeastSquareFit(const ROOT::EExecutionPolicy &executionPolicy=ROOT::EExecutionPolicy::kSequential)
least square fit
bool SetFCN(unsigned int npar, Function &fcn, const double *params=nullptr, unsigned int dataSize=0, int fitType=0)
Set a generic FCN function as a C++ callable object implementing double () (const double *) Note that...
std::shared_ptr< IModelFunction_v > fFunc_v
! copy of the fitted function containing on output the fit result
std::shared_ptr< ROOT::Fit::FitResult > fResult
! pointer to the object containing the result of the fit
bool CalculateMinosErrors()
perform an error analysis on the result using MINOS To be called only after fitting and when a minimi...
bool DoUpdateMinimizerOptions(bool canDifferentMinim=true)
void SetFunction(const IModelFunction &func, bool useGradient=false)
Set the fitted function (model function) from a parametric function interface.
bool CalculateHessErrors()
perform an error analysis on the result using the Hessian Errors are obtained from the inverse of the...
FitConfig fConfig
fitter configuration (options and parameter settings)
Fitter()
Default constructor.
std::shared_ptr< IModelFunction > fFunc
! copy of the fitted function containing on output the fit result
bool DoLinearFit()
linear least square fit
Class, describing value, limits and step size of the parameters Provides functionality also to set/re...
void SetValue(double val)
set the value
void SetStepSize(double err)
set the step size
FitMethodFunction class Interface for objective functions (like chi2 and likelihood used in the fit) ...
Documentation for the abstract class IBaseFunctionMultiDim.
virtual IBaseFunctionMultiDimTempl< T > * Clone() const =0
Clone a function.
Interface (abstract class) for multi-dimensional functions providing a gradient calculation.
Specialized IParamFunction interface (abstract class) for one-dimensional parametric functions It is ...
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
Interface (abstract class) for parametric one-dimensional gradient functions providing in addition to...
double ErrorDef() const
error definition
int PrintLevel() const
non-static methods for retrieving options
void SetErrorDef(double err)
set error def
static double DefaultErrorDef()
MultiDimParamFunctionAdapter class to wrap a one-dimensional parametric function in a multi dimension...
MultiDimParamGradFunctionAdapter class to wrap a one-dimensional parametric gradient function in a mu...
const_iterator begin() const
const_iterator end() const
Namespace for the fitting classes.
static int Type(const ROOT::Math::FitMethodFunction &f)
static unsigned int NCalls(const ROOT::Math::FitMethodFunction &f)
static int Type(const ROOT::Math::FitMethodGradFunction &f)
static unsigned int NCalls(const ROOT::Math::FitMethodGradFunction &f)
static unsigned int NCalls(const Func &)
static int Type(const Func &)