53 template<
class GradFunc = IGradModelFunction>
54 struct ParamDerivFunc {
55 ParamDerivFunc(
const GradFunc &
f) : fFunc(
f), fIpar(0) {}
56 void SetDerivComponent(
unsigned int ipar) { fIpar = ipar; }
57 double operator() (
const double *
x,
const double *p)
const {
58 return fFunc.ParameterDerivative(
x, p, fIpar );
60 unsigned int NDim()
const {
return fFunc.NDim(); }
61 const GradFunc & fFunc;
67 class SimpleGradientCalculator {
77 SimpleGradientCalculator(
int gdim,
const IModelFunction & func,
double eps = 2.E-8,
int istrat = 1) :
83 fVec(std::vector<
double>(gdim) )
88 double DoParameterDerivative(
const double *
x,
const double *p,
double f0,
int k)
const {
90 double h = std::max( fEps* std::abs(p0), 8.0*fPrecision*(std::abs(p0) + fPrecision) );
95 double f1 = fFunc(
x, &fVec.front() );
98 double f2 = fFunc(
x, &fVec.front() );
99 deriv = 0.5 * ( f2 -
f1 )/
h;
102 deriv = (
f1 - f0 )/
h;
108 unsigned int NDim()
const {
112 unsigned int NPar()
const {
116 double ParameterDerivative(
const double *
x,
const double *p,
int ipar)
const {
118 std::copy(p, p+fN, fVec.begin());
119 double f0 = fFunc(
x, p);
120 return DoParameterDerivative(
x,p,f0,ipar);
124 void ParameterGradient(
const double *
x,
const double * p,
double f0,
double *
g) {
126 std::copy(p, p+fN, fVec.begin());
127 for (
unsigned int k = 0; k < fN; ++k) {
128 g[k] = DoParameterDerivative(
x,p,f0,k);
133 void Gradient(
const double *
x,
const double * p,
double f0,
double *
g) {
135 std::copy(
x,
x+fN, fVec.begin());
136 for (
unsigned int k = 0; k < fN; ++k) {
138 double h = std::max( fEps* std::abs(x0), 8.0*fPrecision*(std::abs(x0) + fPrecision) );
142 double f1 = fFunc( &fVec.front(), p );
145 double f2 = fFunc( &fVec.front(), p );
146 g[k] = 0.5 * ( f2 -
f1 )/
h;
149 g[k] = (
f1 - f0 )/
h;
162 mutable std::vector<double> fVec;
169 if (rval > - std::numeric_limits<double>::max() && rval < std::numeric_limits<double>::max() )
173 return -std::numeric_limits<double>::max();
176 return + std::numeric_limits<double>::max();
183 if (rval > - std::numeric_limits<double>::max() && rval < std::numeric_limits<double>::max() )
187 rval = -std::numeric_limits<double>::max();
192 rval = + std::numeric_limits<double>::max();
201 template <
class GFunc>
203 const double *
x1,
const double *
x2,
const double * p,
double *
g) {
206 ParamDerivFunc<GFunc> pfunc( gfunc);
209 unsigned int npar = gfunc.NPar();
210 for (
unsigned int k = 0; k < npar; ++k ) {
211 pfunc.SetDerivComponent(k);
212 g[k] = igDerEval(
x1,
x2 );
234 unsigned int n = data.
Size();
252 std::cout <<
"\n\nFit data size = " <<
n << std::endl;
253 std::cout <<
"evaluate chi2 using function " << &func <<
" " << p << std::endl;
254 std::cout <<
"use empty bins " << fitOpt.
fUseEmpty << std::endl;
255 std::cout <<
"use integral " << fitOpt.
fIntegral << std::endl;
256 std::cout <<
"use all error=1 " << fitOpt.
fErrors1 << std::endl;
257 if (isWeighted) std::cout <<
"Weighted data set - sumw = " << data.
SumOfContent() <<
" sumw2 = " << data.
SumOfError2() << std::endl;
270 double maxResValue = std::numeric_limits<double>::max() /
n;
271 double wrefVolume = 1.0;
278 auto mapFunction = [&](
const unsigned i){
284 const auto y = data.
Value(i);
289 const double *
x =
nullptr;
290 std::vector<double> xc;
291 double binVolume = 1.0;
293 unsigned int ndim = data.
NDim();
294 xc.resize(data.
NDim());
295 for (
unsigned int j = 0; j < ndim; ++j) {
298 binVolume *= std::abs(
x2 - xx);
299 xc[j] = 0.5*(
x2 + xx);
303 binVolume *= wrefVolume;
304 }
else if(data.
NDim() > 1) {
306 xc.resize(data.
NDim());
308 for (
unsigned int j = 1; j < data.
NDim(); ++j)
316 if (!useBinIntegral) {
320 fval = func (
x, p );
326 std::vector<double>
x2(data.
NDim());
328 fval = igEval(
x,
x2.data());
331 if (useBinVolume) fval *= binVolume;
335 double invWeight = 1.0;
348 double invError2 = (fval > 0) ? invWeight / fval : 0.0;
355 std::cout <<
x[0] <<
" " <<
y <<
" " << 1./invError <<
" params : ";
356 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
357 std::cout << p[ipar] <<
"\t";
358 std::cout <<
"\tfval = " << fval <<
" bin volume " << binVolume <<
" ref " << wrefVolume << std::endl;
364 double tmp = (
y -fval )* invError;
365 double resval = tmp * tmp;
369 if ( resval < maxResValue )
380 auto redFunction = [](
const std::vector<double> & objs){
381 return std::accumulate(objs.begin(), objs.end(),
double{});
388 Warning(
"FitUtil::EvaluateChi2",
"Multithread execution policy requires IMT, which is disabled. Changing "
389 "to ROOT::Fit::ExecutionPolicy::kSerial.");
396 for (
unsigned int i=0; i<
n; ++i) {
397 res += mapFunction(i);
409 Error(
"FitUtil::EvaluateChi2",
"Execution policy unknown. Avalaible choices:\n ROOT::Fit::ExecutionPolicy::kSerial (default)\n ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
424 unsigned int n = data.
Size();
427 std::cout <<
"\n\nFit data size = " <<
n << std::endl;
428 std::cout <<
"evaluate effective chi2 using function " << &func <<
" " << p << std::endl;
439 unsigned int ndim = func.
NDim();
444 double maxResValue = std::numeric_limits<double>::max() /
n;
448 for (
unsigned int i = 0; i <
n; ++ i) {
454 double fval = func(
x, p );
456 double delta_y_func =
y - fval;
460 const double *
ex = 0;
464 double eylow, eyhigh = 0;
466 if ( delta_y_func < 0)
474 while ( j < ndim &&
ex[j] == 0.) { j++; }
481 double kPrecision = 1.E-8;
482 for (
unsigned int icoord = 0; icoord < ndim; ++icoord) {
484 if (
ex[icoord] > 0) {
488 double x0=
x[icoord];
489 double h = std::max( kEps* std::abs(
ex[icoord]), 8.0*kPrecision*(std::abs(x0) + kPrecision) );
491 double edx =
ex[icoord] * deriv;
494 std::cout <<
"error for coord " << icoord <<
" = " <<
ex[icoord] <<
" deriv " << deriv << std::endl;
499 double w2 = (e2 > 0) ? 1.0/e2 : 0;
500 double resval = w2 * (
y - fval ) * (
y - fval);
503 std::cout <<
x[0] <<
" " <<
y <<
" ex " <<
ex[0] <<
" ey " <<
ey <<
" params : ";
504 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
505 std::cout << p[ipar] <<
"\t";
506 std::cout <<
"\tfval = " << fval <<
"\tresval = " << resval << std::endl;
511 if ( resval < maxResValue )
524 std::cout <<
"chi2 = " << chi2 <<
" n = " << nPoints << std::endl;
541 MATH_ERROR_MSG(
"FitUtil::EvaluateChi2Residual",
"Error on the coordinates are not used in calculating Chi2 residual");
548 double y, invError = 0;
559 unsigned int ndim = data.
NDim();
560 double binVolume = 1.0;
561 const double *
x2 = 0;
562 if (useBinVolume || useBinIntegral)
x2 = data.
BinUpEdge(i);
567 xc =
new double[ndim];
568 for (
unsigned int j = 0; j < ndim; ++j) {
569 binVolume *= std::abs(
x2[j]-
x1[j] );
570 xc[j] = 0.5*(
x2[j]+
x1[j]);
576 const double *
x = (useBinVolume) ? xc :
x1;
578 if (!useBinIntegral) {
579 fval = func (
x, p );
584 fval = igEval(
x1,
x2) ;
587 if (useBinVolume) fval *= binVolume;
598 double invError2 = (fval > 0) ? 1.0 / fval : 0.0;
603 double resval = (
y -fval )* invError;
611 unsigned int npar = func.
NPar();
616 if (!useBinIntegral ) {
625 SimpleGradientCalculator gc( npar, func);
626 if (!useBinIntegral )
627 gc.ParameterGradient(
x, p, fval,
g);
634 for (
unsigned int k = 0; k < npar; ++k) {
636 if (useBinVolume)
g[k] *= binVolume;
640 if (useBinVolume)
delete [] xc;
657 "Error on the coordinates are not used in calculating Chi2 gradient");
662 assert(fg !=
nullptr);
667 std::cout <<
"\n\nFit data size = " <<
n << std::endl;
668 std::cout <<
"evaluate chi2 using function gradient " << &func <<
" " << p << std::endl;
675 double wrefVolume = 1.0;
687 unsigned int npar = func.
NPar();
688 unsigned initialNPoints = data.
Size();
690 std::vector<bool> isPointRejected(initialNPoints);
692 auto mapFunction = [&](
const unsigned int i) {
694 std::vector<double> gradFunc(npar);
695 std::vector<double> pointContribution(npar);
698 const auto y = data.
Value(i);
699 auto invError = data.
Error(i);
701 invError = (invError != 0.0) ? 1.0 / invError : 1;
705 const double *
x =
nullptr;
706 std::vector<double> xc;
708 unsigned int ndim = data.
NDim();
709 double binVolume = 1;
712 for (
unsigned int j = 0; j < ndim; ++j) {
715 binVolume *= std::abs(x2_j - x1_j);
716 xc[j] = 0.5 * (x2_j + x1_j);
722 binVolume *= wrefVolume;
723 }
else if (ndim > 1) {
726 for (
unsigned int j = 1; j < ndim; ++j)
733 if (!useBinIntegral) {
737 std::vector<double>
x2(data.
NDim());
741 fval = igEval(
x,
x2.data());
748 std::cout <<
x[0] <<
" " <<
y <<
" " << 1. / invError <<
" params : ";
749 for (
unsigned int ipar = 0; ipar < npar; ++ipar)
750 std::cout << p[ipar] <<
"\t";
751 std::cout <<
"\tfval = " << fval << std::endl;
754 isPointRejected[i] =
true;
756 return pointContribution;
760 unsigned int ipar = 0;
761 for (; ipar < npar; ++ipar) {
765 gradFunc[ipar] *= binVolume;
769 double dfval = gradFunc[ipar];
775 pointContribution[ipar] = -2.0 * (
y - fval) * invError * invError * gradFunc[ipar];
780 isPointRejected[i] =
true;
783 return pointContribution;
787 auto redFunction = [&](
const std::vector<std::vector<double>> &pointContributions) {
788 std::vector<double> result(npar);
790 for (
auto const &pointContribution : pointContributions) {
791 for (
unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
792 result[parameterIndex] += pointContribution[parameterIndex];
798 std::vector<double>
g(npar);
803 Warning(
"FitUtil::EvaluateChi2Gradient",
"Multithread execution policy requires IMT, which is disabled. Changing "
804 "to ROOT::Fit::ExecutionPolicy::kSerial.");
810 std::vector<std::vector<double>> allGradients(initialNPoints);
811 for (
unsigned int i = 0; i < initialNPoints; ++i) {
812 allGradients[i] = mapFunction(i);
814 g = redFunction(allGradients);
828 Error(
"FitUtil::EvaluateChi2Gradient",
829 "Execution policy unknown. Avalaible choices:\n 0: Serial (default)\n 1: MultiThread (requires IMT)\n");
838 nPoints = initialNPoints;
840 if (std::any_of(isPointRejected.begin(), isPointRejected.end(), [](
bool point) { return point; })) {
841 unsigned nRejected = std::accumulate(isPointRejected.begin(), isPointRejected.end(), 0);
842 assert(nRejected <= initialNPoints);
843 nPoints = initialNPoints - nRejected;
847 "Error - too many points rejected for overflow in gradient calculation");
851 std::copy(
g.begin(),
g.end(), grad);
871 const double *
x = data.
Coords(i);
872 double fval = func (
x, p );
875 if (
g == 0)
return logPdf;
887 SimpleGradientCalculator gc(func.
NPar(), func);
888 gc.ParameterGradient(
x, p, fval,
g );
891 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar) {
896 std::cout <<
x[i] <<
"\t";
897 std::cout <<
"\tpar = [ " << func.
NPar() <<
" ] = ";
898 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
899 std::cout << p[ipar] <<
"\t";
900 std::cout <<
"\tfval = " << fval;
901 std::cout <<
"\tgrad = [ ";
902 for (
unsigned int ipar = 0; ipar < func.
NPar(); ++ipar)
903 std::cout <<
g[ipar] <<
"\t";
904 std::cout <<
" ] " << std::endl;
912 int iWeight,
bool extended,
unsigned int &nPoints,
917 unsigned int n = data.
Size();
921 bool normalizeFunc =
false;
925 (
const_cast<IModelFunctionTempl<double> &
>(func)).SetParameters(p);
928 nPoints = data.
Size();
933 if (!normalizeFunc) {
934 if (data.
NDim() == 1) {
939 std::vector<double>
x(data.
NDim());
940 for (
unsigned int j = 0; j < data.
NDim(); ++j)
950 std::vector<double>
xmin(data.
NDim());
951 std::vector<double>
xmax(data.
NDim());
952 IntegralEvaluator<> igEval(func, p,
true);
956 for (
unsigned int ir = 0; ir < data.
Range().
Size(); ++ir) {
958 norm += igEval.Integral(
xmin.data(),
xmax.data());
964 if (func(
xmin.data(), p) != 0 || func(
xmax.data(), p) != 0) {
966 "A range has not been set and the function is not zero at +/- inf");
969 norm = igEval.Integral(&
xmin[0], &
xmax[0]);
975 auto mapFunction = [&](
const unsigned i) {
980 if (data.
NDim() > 1) {
981 std::vector<double>
x(data.
NDim());
982 for (
unsigned int j = 0; j < data.
NDim(); ++j)
985 fval = func(
x.data());
987 fval = func(
x.data(), p);
1001 fval = fval * (1 / norm);
1006 double weight = data.
Weight(i);
1013 W2 = weight * weight;
1017 return LikelihoodAux<double>(logval, W, W2);
1028 auto redFunction = [](
const std::vector<LikelihoodAux<double>> & objs){
1029 auto l0 = LikelihoodAux<double>(0.0,0.0,0.0);
1030 for (
auto &
l : objs ) {
1040 Warning(
"FitUtil::EvaluateLogL",
"Multithread execution policy requires IMT, which is disabled. Changing "
1041 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1050 for (
unsigned int i=0; i<
n; ++i) {
1051 auto resArray = mapFunction(i);
1052 logl+=resArray.logvalue;
1053 sumW+=resArray.weight;
1054 sumW2+=resArray.weight2;
1061 logl=resArray.logvalue;
1062 sumW=resArray.weight;
1063 sumW2=resArray.weight2;
1069 Error(
"FitUtil::EvaluateLogL",
"Execution policy unknown. Avalaible choices:\n ROOT::Fit::ExecutionPolicy::kSerial (default)\n ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
1074 double extendedTerm = 0;
1078 if (!normalizeFunc) {
1079 IntegralEvaluator<> igEval( func, p,
true);
1080 std::vector<double>
xmin(data.
NDim());
1081 std::vector<double>
xmax(data.
NDim());
1086 for (
unsigned int ir = 0; ir < data.
Range().
Size(); ++ir) {
1088 nuTot += igEval.Integral(
xmin.data(),
xmax.data());
1094 if (func(
xmin.data(), p) != 0 || func(
xmax.data(), p) != 0) {
1095 MATH_ERROR_MSG(
"FitUtil::EvaluateLogLikelihood",
"A range has not been set and the function is not zero at +/- inf");
1098 nuTot = igEval.Integral(&
xmin[0],&
xmax[0]);
1104 extendedTerm = - nuTot;
1108 extendedTerm = - (sumW2 / sumW) * nuTot;
1117 logl += extendedTerm;
1122 std::cout <<
"Evaluated log L for parameters (";
1123 for (
unsigned int ip = 0; ip < func.NPar(); ++ip)
1124 std::cout <<
" " << p[ip];
1125 std::cout <<
") fval = " << -logl << std::endl;
1137 assert(fg !=
nullptr);
1141 unsigned int npar = func.
NPar();
1142 unsigned initialNPoints = data.
Size();
1147 std::cout <<
"\n===> Evaluate Gradient for parameters ";
1148 for (
unsigned int ip = 0; ip < npar; ++ip)
1149 std::cout <<
" " << p[ip];
1153 const double kdmax1 =
std::sqrt(std::numeric_limits<double>::max());
1154 const double kdmax2 = std::numeric_limits<double>::max() / (4 * initialNPoints);
1156 auto mapFunction = [&](
const unsigned int i) {
1157 std::vector<double> gradFunc(npar);
1158 std::vector<double> pointContribution(npar);
1161 const double *
x =
nullptr;
1162 std::vector<double> xc;
1163 if (data.
NDim() > 1) {
1164 xc.resize(data.
NDim() );
1165 for (
unsigned int j = 0; j < data.
NDim(); ++j)
1172 double fval = func(
x, p);
1178 if (i < 5 || (i > data.
Size()-5) ) {
1179 if (data.
NDim() > 1) std::cout << i <<
" x " <<
x[0] <<
" y " <<
x[1] <<
" func " << fval
1180 <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1181 else std::cout << i <<
" x " <<
x[0] <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1186 for (
unsigned int kpar = 0; kpar < npar; ++kpar) {
1188 pointContribution[kpar] = -1. / fval * gradFunc[kpar];
1189 else if (gradFunc[kpar] != 0) {
1190 double gg = kdmax1 * gradFunc[kpar];
1192 gg = std::min(gg, kdmax2);
1194 gg = std::max(gg, -kdmax2);
1195 pointContribution[kpar] = -gg;
1200 return pointContribution;
1204 auto redFunction = [&](
const std::vector<std::vector<double>> &pointContributions) {
1205 std::vector<double> result(npar);
1207 for (
auto const &pointContribution : pointContributions) {
1208 for (
unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
1209 result[parameterIndex] += pointContribution[parameterIndex];
1215 std::vector<double>
g(npar);
1220 Warning(
"FitUtil::EvaluateLogLGradient",
"Multithread execution policy requires IMT, which is disabled. Changing "
1221 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1227 std::vector<std::vector<double>> allGradients(initialNPoints);
1228 for (
unsigned int i = 0; i < initialNPoints; ++i) {
1229 allGradients[i] = mapFunction(i);
1231 g = redFunction(allGradients);
1246 Error(
"FitUtil::EvaluateLogLGradient",
"Execution policy unknown. Avalaible choices:\n "
1247 "ROOT::Fit::ExecutionPolicy::kSerial (default)\n "
1248 "ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
1257 std::copy(
g.begin(),
g.end(), grad);
1260 std::cout <<
"FitUtil.cxx : Final gradient ";
1261 for (
unsigned int param = 0; param < npar; param++) {
1262 std::cout <<
" " << grad[param];
1283 const double *
x2 = 0;
1285 double binVolume = 1;
1286 std::vector<double> xc;
1288 unsigned int ndim = data.
NDim();
1290 for (
unsigned int j = 0; j < ndim; ++j) {
1292 binVolume *= std::abs( x2j-
x1[j] );
1293 xc[j] = 0.5*(x2j+
x1[j]);
1299 const double *
x = (useBinVolume) ? &xc.front() :
x1;
1301 if (!useBinIntegral ) {
1302 fval = func (
x, p );
1306 std::vector<double> vx2(data.
NDim());
1308 fval = igEval(
x1, vx2.data() ) ;
1310 if (useBinVolume) fval *= binVolume;
1313 fval = std::max(fval, 0.0);
1314 double logPdf = - fval;
1324 if (
g == 0)
return logPdf;
1326 unsigned int npar = func.
NPar();
1332 if (!useBinIntegral )
1341 SimpleGradientCalculator gc(func.
NPar(), func);
1342 if (!useBinIntegral )
1343 gc.ParameterGradient(
x, p, fval,
g);
1350 for (
unsigned int k = 0; k < npar; ++k) {
1352 if (useBinVolume)
g[k] *= binVolume;
1356 g[k] *= (
y/fval - 1.) ;
1358 const double kdmax1 =
std::sqrt( std::numeric_limits<double>::max() );
1367 std::cout <<
"x = " <<
x[0] <<
" logPdf = " << logPdf <<
" grad";
1368 for (
unsigned int ipar = 0; ipar < npar; ++ipar)
1369 std::cout <<
g[ipar] <<
"\t";
1370 std::cout << std::endl;
1398 unsigned int n = data.
Size();
1400#ifdef USE_PARAMCACHE
1404 nPoints = data.
Size();
1411 bool useW2 = (iWeight == 2);
1414 double wrefVolume = 1.0;
1421 std::cout <<
"Evaluate PoissonLogL for params = [ ";
1422 for (
unsigned int j = 0; j < func.
NPar(); ++j) std::cout << p[j] <<
" , ";
1423 std::cout <<
"] - data size = " <<
n <<
" useBinIntegral " << useBinIntegral <<
" useBinVolume "
1424 << useBinVolume <<
" useW2 " << useW2 <<
" wrefVolume = " << wrefVolume << std::endl;
1433#ifdef USE_PARAMCACHE
1439 auto mapFunction = [&](
const unsigned i) {
1443 const double *
x =
nullptr;
1444 std::vector<double> xc;
1446 double binVolume = 1.0;
1449 unsigned int ndim = data.
NDim();
1450 xc.resize(data.
NDim());
1451 for (
unsigned int j = 0; j < ndim; ++j) {
1454 binVolume *= std::abs(
x2 - xx);
1455 xc[j] = 0.5 * (
x2 + xx);
1459 binVolume *= wrefVolume;
1460 }
else if (data.
NDim() > 1) {
1461 xc.resize(data.
NDim());
1463 for (
unsigned int j = 1; j < data.
NDim(); ++j) {
1471 if (!useBinIntegral) {
1472#ifdef USE_PARAMCACHE
1480 std::vector<double>
x2(data.
NDim());
1482 fval = igEval(
x,
x2.data());
1484 if (useBinVolume) fval *= binVolume;
1490 if (i % NSAMPLE == 0) {
1491 std::cout <<
"evt " << i <<
" x = [ ";
1492 for (
unsigned int j = 0; j < func.
NDim(); ++j) std::cout <<
x[j] <<
" , ";
1495 std::cout <<
"x2 = [ ";
1499 std::cout <<
" y = " <<
y <<
" fval = " << fval << std::endl;
1506 fval = std::max(fval, 0.0);
1508 double nloglike = 0;
1517 double weight = 1.0;
1519 double error = data.
Error(i);
1520 weight = (error * error) /
y;
1528 nloglike += weight * ( fval -
y);
1536 if (extended) nloglike = fval -
y;
1545 std::cout <<
" nll = " << nloglike << std::endl;
1552 auto redFunction = [](
const std::vector<double> &objs) {
1553 return std::accumulate(objs.begin(), objs.end(),
double{});
1560 Warning(
"FitUtil::EvaluatePoissonLogL",
"Multithread execution policy requires IMT, which is disabled. Changing "
1561 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1568 for (
unsigned int i = 0; i <
n; ++i) {
1569 res += mapFunction(i);
1581 Error(
"FitUtil::EvaluatePoissonLogL",
1582 "Execution policy unknown. Avalaible choices:\n ROOT::Fit::ExecutionPolicy::kSerial (default)\n ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
1586 std::cout <<
"Loglikelihood = " << res << std::endl;
1598 assert(fg !=
nullptr);
1602#ifdef USE_PARAMCACHE
1610 double wrefVolume = 1.0;
1622 unsigned int npar = func.
NPar();
1623 unsigned initialNPoints = data.
Size();
1625 auto mapFunction = [&](
const unsigned int i) {
1627 std::vector<double> gradFunc(npar);
1628 std::vector<double> pointContribution(npar);
1631 const auto y = data.
Value(i);
1632 auto invError = data.
Error(i);
1634 invError = (invError != 0.0) ? 1.0 / invError : 1;
1638 const double *
x =
nullptr;
1639 std::vector<double> xc;
1641 unsigned ndim = data.
NDim();
1642 double binVolume = 1.0;
1647 for (
unsigned int j = 0; j < ndim; ++j) {
1650 binVolume *= std::abs(x2_j - x1_j);
1651 xc[j] = 0.5 * (x2_j + x1_j);
1657 binVolume *= wrefVolume;
1658 }
else if (ndim > 1) {
1661 for (
unsigned int j = 1; j < ndim; ++j)
1668 if (!useBinIntegral) {
1674 std::vector<double>
x2(data.
NDim());
1676 fval = igEval(
x,
x2.data());
1685 if (i < 5 || (i > data.
Size()-5) ) {
1686 if (data.
NDim() > 1) std::cout << i <<
" x " <<
x[0] <<
" y " <<
x[1] <<
" func " << fval
1687 <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1688 else std::cout << i <<
" x " <<
x[0] <<
" gradient " << gradFunc[0] <<
" " << gradFunc[1] <<
" " << gradFunc[3] << std::endl;
1694 for (
unsigned int ipar = 0; ipar < npar; ++ipar) {
1698 gradFunc[ipar] *= binVolume;
1702 pointContribution[ipar] = gradFunc[ipar] * (1. -
y / fval);
1703 else if (gradFunc[ipar] != 0) {
1704 const double kdmax1 =
std::sqrt(std::numeric_limits<double>::max());
1705 const double kdmax2 = std::numeric_limits<double>::max() / (4 * initialNPoints);
1706 double gg = kdmax1 * gradFunc[ipar];
1708 gg = std::min(gg, kdmax2);
1710 gg = std::max(gg, -kdmax2);
1711 pointContribution[ipar] = -gg;
1716 return pointContribution;
1720 auto redFunction = [&](
const std::vector<std::vector<double>> &pointContributions) {
1721 std::vector<double> result(npar);
1723 for (
auto const &pointContribution : pointContributions) {
1724 for (
unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
1725 result[parameterIndex] += pointContribution[parameterIndex];
1731 std::vector<double>
g(npar);
1736 Warning(
"FitUtil::EvaluatePoissonLogLGradient",
1737 "Multithread execution policy requires IMT, which is disabled. Changing "
1738 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1744 std::vector<std::vector<double>> allGradients(initialNPoints);
1745 for (
unsigned int i = 0; i < initialNPoints; ++i) {
1746 allGradients[i] = mapFunction(i);
1748 g = redFunction(allGradients);
1763 Error(
"FitUtil::EvaluatePoissonLogLGradient",
1764 "Execution policy unknown. Avalaible choices:\n 0: Serial (default)\n 1: MultiThread (requires IMT)\n");
1773 std::copy(
g.begin(),
g.end(), grad);
1776 std::cout <<
"***** Final gradient : ";
1777 for (
unsigned int ii = 0; ii< npar; ++ii) std::cout << grad[ii] <<
" ";
1786 if (nEvents/ncpu < 1000)
return ncpu;
1787 return nEvents/1000;
#define MATH_ERROR_MSG(loc, str)
static const double x2[5]
static const double x1[5]
void Error(const char *location, const char *msgfmt,...)
void Warning(const char *location, const char *msgfmt,...)
TRObject operator()(const T1 &t1) const
R__EXTERN TVirtualMutex * gROOTMutex
typedef void((*Func_t)())
#define R__LOCKGUARD(mutex)
Class describing the binned data sets : vectors of x coordinates, y values and optionally error on y ...
double SumOfContent() const
compute the total sum of the data content (sum of weights in case of weighted data set)
double RefVolume() const
retrieve the reference volume used to normalize the data when the option bin volume is set
double GetBinUpEdgeComponent(unsigned int ipoint, unsigned int icoord) const
returns a single coordinate error component of a point.
bool HasBinEdges() const
query if the data store the bin edges instead of the center
const double * GetPointError(unsigned int ipoint, double &errvalue) const
Retrieve the errors on the point (coordinate and value) for the given fit point It must be called onl...
const double * GetPoint(unsigned int ipoint, double &value) const
retrieve at the same time a pointer to the coordinate data and the fit value More efficient than call...
const double * BinUpEdge(unsigned int ipoint) const
return an array containing the upper edge of the bin for coordinate i In case of empty bin they could...
double InvError(unsigned int ipoint) const
Return the inverse of error on the value for the given fit point useful when error in the coordinates...
bool IsWeighted() const
return true if the data set is weighted We cannot compute ourselfs because sometimes errors are fille...
double Value(unsigned int ipoint) const
return the value for the given fit point
ErrorType GetErrorType() const
retrieve the errortype
bool HaveCoordErrors() const
flag to control if data provides error on the coordinates
const double * ValuePtr(unsigned int ipoint) const
return a pointer to the value for the given fit point
void GetBinUpEdgeCoordinates(unsigned int ipoint, double *x) const
Thread save version of function retrieving the bin up-edge in case of multidimensions.
double Error(unsigned int ipoint) const
bool HaveAsymErrors() const
flag to control if data provides asymmetric errors on the value
double SumOfError2() const
compute the total sum of the error square (sum of weight square in case of a weighted data set)
unsigned int Size(unsigned int icoord=0) const
return range size for coordinate icoord (starts from zero) Size == 0 indicates no range is present [-...
void GetRange(unsigned int irange, unsigned int icoord, double &xmin, double &xmax) const
get the i-th range for given coordinate.
unsigned int Size() const
return number of fit points
const double * GetCoordComponent(unsigned int ipoint, unsigned int icoord) const
returns a single coordinate component of a point.
unsigned int NDim() const
return coordinate data dimension
const DataOptions & Opt() const
access to options
const double * Coords(unsigned int ipoint) const
return a pointer to the coordinates data for the given fit point
const DataRange & Range() const
access to range
Class describing the unbinned data sets (just x coordinates values) of any dimensions.
double Weight(unsigned int ipoint) const
return weight
unsigned int NDim() const
return coordinate data dimension
virtual unsigned int NDim() const =0
Retrieve the dimension of the function.
virtual void SetParameters(const double *p)=0
Set the parameter values.
virtual unsigned int NPar() const =0
Return the number of Parameters.
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
virtual void ParameterGradient(const T *x, const double *p, T *grad) const
Evaluate the all the derivatives (gradient vector) of the function with respect to the parameters at ...
OneDimMultiFunctionAdapter class to wrap a multidimensional function in one dimensional one.
void SetCoord(int icoord)
User class for calculating the derivatives of a function.
double Derivative1(double x)
Returns the first derivative of the function at point x, computed by Richardson's extrapolation metho...
A pseudo container class which is a generator of indices.
This class provides a simple interface to execute the same task multiple times in parallel,...
auto MapReduce(F func, unsigned nTimes, R redfunc) -> typename std::result_of< F()>::type
This method behaves just like Map, but an additional redfunc function must be provided.
Type
enumeration specifying the integration types.
TFitResultPtr Fit(FitObject *h1, TF1 *f1, Foption_t &option, const ROOT::Math::MinimizerOptions &moption, const char *goption, ROOT::Fit::DataRange &range)
double EvaluateChi2(const IModelFunction &func, const BinData &data, const double *x, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks=0)
Chi2 Functions.
ROOT::Math::IParamMultiFunction IModelFunction
void CalculateGradientIntegral(const GFunc &gfunc, const double *x1, const double *x2, const double *p, double *g)
double EvaluateChi2Residual(const IModelFunction &func, const BinData &data, const double *x, unsigned int ipoint, double *g=0)
evaluate the residual contribution to the Chi2 given a model function and the BinPoint data and if th...
void EvaluateLogLGradient(const IModelFunction &func, const UnBinData &data, const double *x, double *grad, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial, unsigned nChunks=0)
evaluate the LogL gradient given a model function and the data at the point x.
double CorrectValue(double rval)
double EvaluatePoissonLogL(const IModelFunction &func, const BinData &data, const double *x, int iWeight, bool extended, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks=0)
evaluate the Poisson LogL given a model function and the data at the point x.
double EvaluateChi2Effective(const IModelFunction &func, const BinData &data, const double *x, unsigned int &nPoints)
evaluate the effective Chi2 given a model function and the data at the point x.
double EvaluatePoissonBinPdf(const IModelFunction &func, const BinData &data, const double *x, unsigned int ipoint, double *g=0)
evaluate the pdf contribution to the Poisson LogL given a model function and the BinPoint data.
bool CheckInfNaNValue(double &rval)
unsigned setAutomaticChunking(unsigned nEvents)
double EvaluateLogL(const IModelFunction &func, const UnBinData &data, const double *p, int iWeight, bool extended, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks=0)
evaluate the LogL given a model function and the data at the point x.
void EvaluatePoissonLogLGradient(const IModelFunction &func, const BinData &data, const double *x, double *grad, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial, unsigned nChunks=0)
evaluate the Poisson LogL given a model function and the data at the point x.
double EvaluatePdf(const IModelFunction &func, const UnBinData &data, const double *x, unsigned int ipoint, double *g=0)
evaluate the pdf contribution to the LogL given a model function and the BinPoint data.
void EvaluateChi2Gradient(const IModelFunction &func, const BinData &data, const double *x, double *grad, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial, unsigned nChunks=0)
evaluate the Chi2 gradient given a model function and the data at the point x.
T EvalLog(T x)
safe evaluation of log(x) with a protections against negative or zero argument to the log smooth line...
tbb::task_arena is an alias of tbb::interface7::task_arena, which doesn't allow to forward declare tb...
UInt_t GetThreadPoolSize()
Returns the size of ROOT's thread pool.
DataOptions : simple structure holding the options on how the data are filled.