Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
FitUtil.cxx
Go to the documentation of this file.
1// @(#)root/mathcore:$Id$
2// Author: L. Moneta Tue Nov 28 10:52:47 2006
3
4/**********************************************************************
5 * *
6 * Copyright (c) 2006 LCG ROOT Math Team, CERN/PH-SFT *
7 * *
8 * *
9 **********************************************************************/
10
11// Implementation file for class FitUtil
12
13#include "Fit/FitUtil.h"
14
15#include "Fit/BinData.h"
16#include "Fit/UnBinData.h"
17
18#include "Math/IFunctionfwd.h"
19#include "Math/IParamFunction.h"
20#include "Math/Integrator.h"
25
26#include "Math/Error.h"
27#include "Math/Util.h" // for safe log(x)
28
29#include <limits>
30#include <cmath>
31#include <cassert>
32#include <algorithm>
33#include <numeric>
34//#include <memory>
35
36#include "TROOT.h"
37
38//#define DEBUG
39#ifdef DEBUG
40#define NSAMPLE 10
41#include <iostream>
42#endif
43
44// need to implement integral option
45
46namespace ROOT {
47
48 namespace Fit {
49
50 namespace FitUtil {
51
52 // derivative with respect of the parameter to be integrated
53 template<class GradFunc = IGradModelFunction>
55 ParamDerivFunc(const GradFunc & f) : fFunc(f), fIpar(0) {}
56 void SetDerivComponent(unsigned int ipar) { fIpar = ipar; }
57 double operator() (const double *x, const double *p) const {
58 return fFunc.ParameterDerivative( x, p, fIpar );
59 }
60 unsigned int NDim() const { return fFunc.NDim(); }
61 const GradFunc & fFunc;
62 unsigned int fIpar;
63 };
64
65// simple gradient calculator using the 2 points rule
66
68
69 public:
70 // construct from function and gradient dimension gdim
71 // gdim = npar for parameter gradient
72 // gdim = ndim for coordinate gradients
73 // construct (the param values will be passed later)
74 // one can choose between 2 points rule (1 extra evaluation) istrat=1
75 // or two point rule (2 extra evaluation)
76 // (found 2 points rule does not work correctly - minuit2FitBench fails)
77 SimpleGradientCalculator(int gdim, const IModelFunction & func,double eps = 2.E-8, int istrat = 1) :
78 fEps(eps),
79 fPrecision(1.E-8 ), // sqrt(epsilon)
80 fStrategy(istrat),
81 fN(gdim ),
82 fFunc(func),
83 fVec(std::vector<double>(gdim) ) // this can be probably optimized
84 {}
85
86 // internal method to calculate single partial derivative
87 // assume cached vector fVec is already set
88 double DoParameterDerivative(const double *x, const double *p, double f0, int k) const {
89 double p0 = p[k];
90 double h = std::max( fEps* std::abs(p0), 8.0*fPrecision*(std::abs(p0) + fPrecision) );
91 fVec[k] += h;
92 double deriv = 0;
93 // t.b.d : treat case of infinities
94 //if (fval > - std::numeric_limits<double>::max() && fval < std::numeric_limits<double>::max() )
95 double f1 = fFunc(x, &fVec.front() );
96 if (fStrategy > 1) {
97 fVec[k] = p0 - h;
98 double f2 = fFunc(x, &fVec.front() );
99 deriv = 0.5 * ( f2 - f1 )/h;
100 }
101 else
102 deriv = ( f1 - f0 )/h;
103
104 fVec[k] = p[k]; // restore original p value
105 return deriv;
106 }
107 // number of dimension in x (needed when calculating the integrals)
108 unsigned int NDim() const {
109 return fFunc.NDim();
110 }
111 // number of parameters (needed for grad ccalculation)
112 unsigned int NPar() const {
113 return fFunc.NPar();
114 }
115
116 double ParameterDerivative(const double *x, const double *p, int ipar) const {
117 // fVec are the cached parameter values
118 std::copy(p, p+fN, fVec.begin());
119 double f0 = fFunc(x, p);
120 return DoParameterDerivative(x,p,f0,ipar);
121 }
122
123 // calculate all gradient at point (x,p) knnowing already value f0 (we gain a function eval.)
124 void ParameterGradient(const double * x, const double * p, double f0, double * g) {
125 // fVec are the cached parameter values
126 std::copy(p, p+fN, fVec.begin());
127 for (unsigned int k = 0; k < fN; ++k) {
128 g[k] = DoParameterDerivative(x,p,f0,k);
129 }
130 }
131
132 // calculate gradient w.r coordinate values
133 void Gradient(const double * x, const double * p, double f0, double * g) {
134 // fVec are the cached coordinate values
135 std::copy(x, x+fN, fVec.begin());
136 for (unsigned int k = 0; k < fN; ++k) {
137 double x0 = x[k];
138 double h = std::max( fEps* std::abs(x0), 8.0*fPrecision*(std::abs(x0) + fPrecision) );
139 fVec[k] += h;
140 // t.b.d : treat case of infinities
141 //if (fval > - std::numeric_limits<double>::max() && fval < std::numeric_limits<double>::max() )
142 double f1 = fFunc( &fVec.front(), p );
143 if (fStrategy > 1) {
144 fVec[k] = x0 - h;
145 double f2 = fFunc( &fVec.front(), p );
146 g[k] = 0.5 * ( f2 - f1 )/h;
147 }
148 else
149 g[k] = ( f1 - f0 )/h;
150
151 fVec[k] = x[k]; // restore original x value
152 }
153 }
154
155 private:
156
157 double fEps;
159 int fStrategy; // strategy in calculation ( =1 use 2 point rule( 1 extra func) , = 2 use r point rule)
160 unsigned int fN; // gradient dimension
162 mutable std::vector<double> fVec; // cached coordinates (or parameter values in case of gradientpar)
163 };
164
165
166 // function to avoid infinities or nan
167 double CorrectValue(double rval) {
168 // avoid infinities or nan in rval
169 if (rval > - std::numeric_limits<double>::max() && rval < std::numeric_limits<double>::max() )
170 return rval;
171 else if (rval < 0)
172 // case -inf
173 return -std::numeric_limits<double>::max();
174 else
175 // case + inf or nan
176 return + std::numeric_limits<double>::max();
177 }
178
179 // Check if the value is a finite number. The argument rval is updated if it is infinite or NaN,
180 // setting it to the maximum finite value (preserving the sign).
181 bool CheckInfNaNValue(double &rval)
182 {
183 if (rval > - std::numeric_limits<double>::max() && rval < std::numeric_limits<double>::max() )
184 return true;
185 else if (rval < 0) {
186 // case -inf
187 rval = -std::numeric_limits<double>::max();
188 return false;
189 }
190 else {
191 // case + inf or nan
192 rval = + std::numeric_limits<double>::max();
193 return false;
194 }
195 }
196
197
198 // calculation of the integral of the gradient functions
199 // for a function providing derivative w.r.t parameters
200 // x1 and x2 defines the integration interval , p the parameters
201 template <class GFunc>
203 const double *x1, const double * x2, const double * p, double *g) {
204
205 // needs to calculate the integral for each partial derivative
206 ParamDerivFunc<GFunc> pfunc( gfunc);
207 IntegralEvaluator<ParamDerivFunc<GFunc> > igDerEval( pfunc, p, true);
208 // loop on the parameters
209 unsigned int npar = gfunc.NPar();
210 for (unsigned int k = 0; k < npar; ++k ) {
211 pfunc.SetDerivComponent(k);
212 g[k] = igDerEval( x1, x2 );
213 }
214 }
215
216
217
218 } // end namespace FitUtil
219
220
221
222//___________________________________________________________________________________________________________________________
223// for chi2 functions
224//___________________________________________________________________________________________________________________________
225
226double FitUtil::EvaluateChi2(const IModelFunction &func, const BinData &data, const double *p, unsigned int &nPoints,
227 ::ROOT::EExecutionPolicy executionPolicy, unsigned nChunks)
228{
229 // evaluate the chi2 given a function reference , the data and returns the value and also in nPoints
230 // the actual number of used points
231 // normal chi2 using only error on values (from fitting histogram)
232 // optionally the integral of function in the bin is used
233
234 unsigned int n = data.Size();
235
236 // set parameters of the function to cache integral value
237#ifdef USE_PARAMCACHE
238 (const_cast<IModelFunction &>(func)).SetParameters(p);
239#endif
240 // do not cache parameter values (it is not thread safe)
241 //func.SetParameters(p);
242
243
244 // get fit option and check case if using integral of bins
245 const DataOptions & fitOpt = data.Opt();
246 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
247 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
248 bool useExpErrors = (fitOpt.fExpErrors);
249 bool isWeighted = fitOpt.fExpErrors && !fitOpt.fErrors1 && data.IsWeighted(); //used in case of Person weighted chi2 fits
250#ifdef DEBUG
251 std::cout << "\n\nFit data size = " << n << std::endl;
252 std::cout << "evaluate chi2 using function " << &func << " " << p << std::endl;
253 std::cout << "use empty bins " << fitOpt.fUseEmpty << std::endl;
254 std::cout << "use integral " << useBinIntegral << std::endl;
255 std::cout << "use binvolume " << useBinVolume << std::endl;
256 std::cout << "use Exp Errors " << useExpErrors << std::endl;
257 std::cout << "use all error=1 " << fitOpt.fErrors1 << std::endl;
258 if (isWeighted) std::cout << "Weighted data set - sumw = " << data.SumOfContent() << " sumw2 = " << data.SumOfError2() << std::endl;
259#endif
260
262 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
263 // do not use GSL integrator which is not thread safe
265 }
266#ifdef USE_PARAMCACHE
267 IntegralEvaluator<> igEval( func, nullptr, useBinIntegral, igType);
268#else
269 IntegralEvaluator<> igEval( func, p, useBinIntegral, igType);
270#endif
271 double maxResValue = std::numeric_limits<double>::max() /n;
272 double wrefVolume = 1.0;
273 if (useBinVolume) {
274 if (fitOpt.fNormBinVolume) wrefVolume /= data.RefVolume();
275 }
276
277 (const_cast<IModelFunction &>(func)).SetParameters(p);
278
279 auto mapFunction = [&](const unsigned i){
280
281 double chi2{};
282 double fval{};
283
284 const auto x1 = data.GetCoordComponent(i, 0);
285 const auto y = data.Value(i);
286 auto invError = data.InvError(i);
287
288 //invError = (invError!= 0.0) ? 1.0/invError :1;
289
290 const double * x = nullptr;
291 std::vector<double> xc;
292 double binVolume = 1.0;
293 if (useBinVolume) {
294 unsigned int ndim = data.NDim();
295 xc.resize(data.NDim());
296 for (unsigned int j = 0; j < ndim; ++j) {
297 double xx = *data.GetCoordComponent(i, j);
298 double x2 = data.GetBinUpEdgeComponent(i, j);
299 binVolume *= std::abs(x2 - xx);
300 xc[j] = (useBinIntegral) ? xx : 0.5*(x2 + xx);
301 }
302 x = xc.data();
303 // normalize the bin volume using a reference value
304 binVolume *= wrefVolume;
305 } else if(data.NDim() > 1) {
306 // multi-dim case (no bin volume)
307 // in case of bin integral xc is x1
308 xc.resize(data.NDim());
309 xc[0] = *x1;
310 for (unsigned int j = 1; j < data.NDim(); ++j)
311 xc[j] = *data.GetCoordComponent(i, j);
312 x = xc.data();
313 } else {
314 // for dim 1
315 x = x1;
316 }
317
318
319 if (!useBinIntegral) {
320#ifdef USE_PARAMCACHE
321 fval = func ( x );
322#else
323 fval = func ( x, p );
324#endif
325 }
326 else {
327 // calculate integral normalized (divided) by bin volume
328 // need to set function and parameters here in case loop is parallelized
329 std::vector<double> x2(data.NDim());
330 data.GetBinUpEdgeCoordinates(i, x2.data());
331 fval = igEval(x, x2.data());
332 }
333 // normalize result if requested according to bin volume
334 // we need to multiply by the bin volume (e.g. for variable bins histograms)
335 if (useBinVolume) fval *= binVolume;
336
337 // expected errors
338 if (useExpErrors) {
339 double invWeight = 1.0;
340 // case of weighted Pearson chi2 fit
341 if (isWeighted) {
342 // in case of requested a weighted Pearson fit (option "PW") a weight factor needs to be applied
343 // the bin inverse weight is estimated from bin error and bin content
344 if (y != 0)
345 invWeight = y * invError * invError;
346 else
347 // when y is 0 we use a global weight estimated form all histogram (correct if scaling the histogram)
348 // note that if the data is weighted data.SumOfError2 will not be equal to zero
349 invWeight = data.SumOfContent()/ data.SumOfError2();
350 }
351 // compute expected error as f(x) or f(x) / weight (if weighted fit)
352 double invError2 = (fval > 0) ? invWeight / fval : 0.0;
353 invError = std::sqrt(invError2);
354 //std::cout << "using Pearson chi2 " << x[0] << " " << 1./invError2 << " " << fval << std::endl;
355 }
356
357#ifdef DEBUG
358 std::cout << x[0] << " " << y << " " << 1./invError << " params : ";
359 for (unsigned int ipar = 0; ipar < func.NPar(); ++ipar)
360 std::cout << p[ipar] << "\t";
361 std::cout << "\tfval = " << fval << " bin volume " << binVolume << " ref " << wrefVolume << std::endl;
362#endif
363
364 if (invError > 0) {
365
366 double tmp = ( y -fval )* invError;
367 double resval = tmp * tmp;
368
369
370 // avoid infinity or nan in chi2 values due to wrong function values
371 if ( resval < maxResValue )
372 chi2 += resval;
373 else {
374 //nRejected++;
375 chi2 += maxResValue;
376 }
377 }
378 return chi2;
379 };
380
381#ifdef R__USE_IMT
382 auto redFunction = [](const std::vector<double> & objs){
383 return std::accumulate(objs.begin(), objs.end(), double{});
384 };
385#else
386 (void)nChunks;
387
388 // If IMT is disabled, force the execution policy to the serial case
389 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
390 Warning("FitUtil::EvaluateChi2", "Multithread execution policy requires IMT, which is disabled. Changing "
391 "to ROOT::EExecutionPolicy::kSequential.");
392 executionPolicy = ROOT::EExecutionPolicy::kSequential;
393 }
394#endif
395
396 double res{};
397 if(executionPolicy == ROOT::EExecutionPolicy::kSequential){
398 for (unsigned int i=0; i<n; ++i) {
399 res += mapFunction(i);
400 }
401#ifdef R__USE_IMT
402 } else if(executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
404 auto chunks = nChunks !=0? nChunks: setAutomaticChunking(data.Size());
405 res = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction, chunks);
406#endif
407// } else if(executionPolicy == ROOT::Fit::kMultitProcess){
408 // ROOT::TProcessExecutor pool;
409 // res = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction);
410 } else{
411 Error("FitUtil::EvaluateChi2","Execution policy unknown. Available choices:\n ROOT::EExecutionPolicy::kSequential (default)\n ROOT::EExecutionPolicy::kMultiThread (requires IMT)\n");
412 }
413
414 // reset the number of fitting data points
415 nPoints = n; // no points are rejected
416 //if (nRejected != 0) nPoints = n - nRejected;
417
418 return res;
419}
420
421
422//___________________________________________________________________________________________________________________________
423
424double FitUtil::EvaluateChi2Effective(const IModelFunction & func, const BinData & data, const double * p, unsigned int & nPoints) {
425 // evaluate the chi2 given a function reference , the data and returns the value and also in nPoints
426 // the actual number of used points
427 // method using the error in the coordinates
428 // integral of bin does not make sense in this case
429
430 unsigned int n = data.Size();
431
432#ifdef DEBUG
433 std::cout << "\n\nFit data size = " << n << std::endl;
434 std::cout << "evaluate effective chi2 using function " << &func << " " << p << std::endl;
435#endif
436
437 assert(data.HaveCoordErrors() || data.HaveAsymErrors());
438
439 double chi2 = 0;
440 //int nRejected = 0;
441
442
443 //func.SetParameters(p);
444
445 unsigned int ndim = func.NDim();
446
447 // use Richardson derivator
449
450 double maxResValue = std::numeric_limits<double>::max() /n;
451
452
453
454 for (unsigned int i = 0; i < n; ++ i) {
455
456
457 double y = 0;
458 const double * x = data.GetPoint(i,y);
459
460 double fval = func( x, p );
461
462 double delta_y_func = y - fval;
463
464
465 double ey = 0;
466 const double * ex = nullptr;
467 if (!data.HaveAsymErrors() )
468 ex = data.GetPointError(i, ey);
469 else {
470 double eylow, eyhigh = 0;
471 ex = data.GetPointError(i, eylow, eyhigh);
472 if ( delta_y_func < 0)
473 ey = eyhigh; // function is higher than points
474 else
475 ey = eylow;
476 }
477 double e2 = ey * ey;
478 // before calculating the gradient check that all error in x are not zero
479 unsigned int j = 0;
480 while ( j < ndim && ex[j] == 0.) { j++; }
481 // if j is less ndim some elements are not zero
482 if (j < ndim) {
483 // need an adapter from a multi-dim function to a one-dimensional
485 // select optimal step size (use 10--2 by default as was done in TF1:
486 double kEps = 0.01;
487 double kPrecision = 1.E-8;
488 for (unsigned int icoord = 0; icoord < ndim; ++icoord) {
489 // calculate derivative for each coordinate
490 if (ex[icoord] > 0) {
491 //gradCalc.Gradient(x, p, fval, &grad[0]);
492 f1D.SetCoord(icoord);
493 // optimal step size (take ex[] as scale for the points and 1% of it
494 double x0= x[icoord];
495 double h = std::max( kEps* std::abs(ex[icoord]), 8.0*kPrecision*(std::abs(x0) + kPrecision) );
496 double deriv = derivator.Derivative1(f1D, x[icoord], h);
497 double edx = ex[icoord] * deriv;
498 e2 += edx * edx;
499#ifdef DEBUG
500 std::cout << "error for coord " << icoord << " = " << ex[icoord] << " deriv " << deriv << std::endl;
501#endif
502 }
503 }
504 }
505 double w2 = (e2 > 0) ? 1.0/e2 : 0;
506 double resval = w2 * ( y - fval ) * ( y - fval);
507
508#ifdef DEBUG
509 std::cout << x[0] << " " << y << " ex " << ex[0] << " ey " << ey << " params : ";
510 for (unsigned int ipar = 0; ipar < func.NPar(); ++ipar)
511 std::cout << p[ipar] << "\t";
512 std::cout << "\tfval = " << fval << "\tresval = " << resval << std::endl;
513#endif
514
515 // avoid (infinity and nan ) in the chi2 sum
516 // eventually add possibility of excluding some points (like singularity)
517 if ( resval < maxResValue )
518 chi2 += resval;
519 else
520 chi2 += maxResValue;
521 //nRejected++;
522
523 }
524
525 // reset the number of fitting data points
526 nPoints = n; // no points are rejected
527 //if (nRejected != 0) nPoints = n - nRejected;
528
529#ifdef DEBUG
530 std::cout << "chi2 = " << chi2 << " n = " << nPoints << std::endl;
531#endif
532
533 return chi2;
534
535}
536
537
538////////////////////////////////////////////////////////////////////////////////
539/// evaluate the chi2 contribution (residual term) only for data with no coord-errors
540/// This function is used in the specialized least square algorithms like FUMILI or L.M.
541/// if we have error on the coordinates the residual weight depends on the function value and
542/// the approximation used by Fumili and Levenberg-Marquardt cannot be used.
543/// Also the expected error and bin integral options should not be used in this case
544
545double FitUtil::EvaluateChi2Residual(const IModelFunction & func, const BinData & data, const double * p, unsigned int i, double * g, double * h, bool hasGrad, bool useFullHessian) {
546 if (data.GetErrorType() == BinData::kCoordError && data.Opt().fCoordErrors ) {
547 MATH_ERROR_MSG("FitUtil::EvaluateChi2Residual","Error on the coordinates are not used in calculating Chi2 residual");
548 return 0; // it will assert otherwise later in GetPoint
549 }
550
551 double y, invError = 0;
552
553 const DataOptions & fitOpt = data.Opt();
554 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
555 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
556 bool useExpErrors = (fitOpt.fExpErrors);
557 bool useNormBinVolume = (useBinVolume && fitOpt.fNormBinVolume);
558
559 const double * x1 = data.GetPoint(i,y, invError);
560
561 unsigned int ndim = data.NDim();
562 double binVolume = 1.0;
563 const double * x2 = nullptr;
564 if (useBinVolume || useBinIntegral) x2 = data.BinUpEdge(i);
565
566 std::vector<double> xc;
567
568 if (useBinVolume) {
569 if (!useBinIntegral) {
570 xc.resize(ndim);
571 for (unsigned int j = 0; j < ndim; ++j) {
572 binVolume *= std::abs( x2[j]-x1[j] );
573 xc[j] = 0.5*(x2[j]+ x1[j]);
574 }
575 }
576 // normalize the bin volume using a reference value
577 if (useNormBinVolume) binVolume /= data.RefVolume();
578 }
579
580 const double * x = (useBinVolume) ? xc.data() : x1;
581
582 // calculate integral (normalized by bin volume)
583 // need to set function and parameters here in case loop is parallelized
584 IntegralEvaluator<> igEval( func, p, useBinIntegral);
585 double fval0 = (useBinIntegral) ? igEval( x1, x2) : func ( x, p );
586
587 // normalize result if requested according to bin volume
588 double fval = fval0;
589 if (useBinVolume) fval = fval0*binVolume;
590
591 // expected errors
592 if (useExpErrors) {
593 // check if a weight factor needs to be applied
594 // for bins with y = 0 use as weight the global of the full histogram
595 double invWeight = 1.0;
596 if (data.IsWeighted() && !fitOpt.fErrors1 ) { // case of weighted Pearson chi2 fit
597 invWeight = y * invError * invError;
598 if (y == 0) invWeight = (data.SumOfError2() > 0) ? data.SumOfContent()/ data.SumOfError2() : 1.0;
599 }
600 // compute expected error as f(x) / weight
601 double invError2 = (fval > 0) ? invWeight / fval : 0.0;
602 invError = std::sqrt(invError2);
603 }
604
605
606 double resval = ( y -fval ) * invError;
607
608 // avoid infinities or nan in resval
609 resval = CorrectValue(resval);
610
611 // estimate gradient
612 if (g) {
613
614 unsigned int npar = func.NPar();
615
616 // use gradient of model function only if FCN support gradient
617 const IGradModelFunction * gfunc = (hasGrad) ?
618 dynamic_cast<const IGradModelFunction *>( &func) : nullptr;
619
620 if (!h ) useFullHessian = false;
621 // this is not supported yet!
622 if (useFullHessian && (!gfunc || useBinIntegral || (gfunc && !gfunc->HasParameterHessian())))
623 return std::numeric_limits<double>::quiet_NaN();
624
625 if (gfunc) {
626 //case function provides gradient
627 if (!useBinIntegral ) {
628 gfunc->ParameterGradient(x , p, g);
629 if (useFullHessian) {
630 gfunc->ParameterHessian(x, p, h);
631 }
632 }
633 else {
634 // needs to calculate the integral for each partial derivative
635 CalculateGradientIntegral( *gfunc, x1, x2, p, g);
636 }
637 }
638 else {
639 SimpleGradientCalculator gc( npar, func);
640 if (!useBinIntegral ) {
641 // need to use un-normalized fval
642 gc.ParameterGradient(x, p, fval0, g);
643 } else {
644 // needs to calculate the integral for each partial derivative
646 }
647 }
648 // multiply by - 1 * weight
649 for (unsigned int k = 0; k < npar; ++k) {
650 g[k] *= - invError;
651 if (useBinVolume) g[k] *= binVolume;
652 if (h) {
653 for (unsigned int l = 0; l <= k; l++) { // use lower diagonal because I modify g[k]
654 unsigned int idx = l + k * (k + 1) / 2;
655 if (useFullHessian) {
656 h[idx] *= 2.* resval * (-invError); // hessian of model function
657 if (useBinVolume) h[idx] *= binVolume;
658 }
659 else {
660 h[idx] = 0;
661 }
662 // add term depending on only gradient of model function
663 h[idx] += 2. * g[k]*g[l];
664 }
665 }
666 }
667 }
668
669
670 return resval;
671
672}
673
674void FitUtil::EvaluateChi2Gradient(const IModelFunction &f, const BinData &data, const double *p, double *grad,
675 unsigned int &nPoints, ROOT::EExecutionPolicy executionPolicy, unsigned nChunks)
676{
677 // evaluate the gradient of the chi2 function
678 // this function is used when the model function knows how to calculate the derivative and we can
679 // avoid that the minimizer re-computes them
680 //
681 // case of chi2 effective (errors on coordinate) is not supported
682
683 if (data.HaveCoordErrors()) {
684 MATH_ERROR_MSG("FitUtil::EvaluateChi2Gradient",
685 "Error on the coordinates are not used in calculating Chi2 gradient");
686 return; // it will assert otherwise later in GetPoint
687 }
688
689 const IGradModelFunction *fg = dynamic_cast<const IGradModelFunction *>(&f);
690 assert(fg != nullptr); // must be called by a gradient function
691
692 const IGradModelFunction &func = *fg;
693
694#ifdef DEBUG
695 std::cout << "\n\nFit data size = " << nPoints << std::endl;
696 std::cout << "evaluate chi2 using function gradient " << &func << " " << p << std::endl;
697#endif
698
699 const DataOptions &fitOpt = data.Opt();
700 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
701 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
702
703 double wrefVolume = 1.0;
704 if (useBinVolume) {
705 if (fitOpt.fNormBinVolume) wrefVolume /= data.RefVolume();
706 }
707
709 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
710 // do not use GSL integrator which is not thread safe
712 }
713 IntegralEvaluator<> igEval(func, p, useBinIntegral,igType);
714
715 unsigned int npar = func.NPar();
716 unsigned initialNPoints = data.Size();
717
718 std::vector<bool> isPointRejected(initialNPoints);
719
720 auto mapFunction = [&](const unsigned int i) {
721 // set all vector values to zero
722 std::vector<double> gradFunc(npar);
723 std::vector<double> pointContribution(npar);
724
725 const auto x1 = data.GetCoordComponent(i, 0);
726 const auto y = data.Value(i);
727 auto invError = data.Error(i);
728
729 invError = (invError != 0.0) ? 1.0 / invError : 1;
730
731 double fval = 0;
732
733 const double *x = nullptr;
734 std::vector<double> xc;
735
736 unsigned int ndim = data.NDim();
737 double binVolume = 1;
738 if (useBinVolume) {
739 xc.resize(ndim);
740 for (unsigned int j = 0; j < ndim; ++j) {
741 double x1_j = *data.GetCoordComponent(i, j);
742 double x2_j = data.GetBinUpEdgeComponent(i, j);
743 binVolume *= std::abs(x2_j - x1_j);
744 xc[j] = (useBinIntegral) ? x1_j : 0.5 * (x2_j + x1_j);
745 }
746
747 x = xc.data();
748
749 // normalize the bin volume using a reference value
750 binVolume *= wrefVolume;
751 } else if (ndim > 1) {
752 xc.resize(ndim);
753 xc[0] = *x1;
754 for (unsigned int j = 1; j < ndim; ++j)
755 xc[j] = *data.GetCoordComponent(i, j);
756 x = xc.data();
757 } else {
758 x = x1;
759 }
760
761 if (!useBinIntegral) {
762 fval = func(x, p);
763 func.ParameterGradient(x, p, &gradFunc[0]);
764 } else {
765 std::vector<double> x2(data.NDim());
766 data.GetBinUpEdgeCoordinates(i, x2.data());
767 // calculate normalized integral and gradient (divided by bin volume)
768 // need to set function and parameters here in case loop is parallelized
769 fval = igEval(x, x2.data());
770 CalculateGradientIntegral(func, x, x2.data(), p, &gradFunc[0]);
771 }
772 if (useBinVolume)
773 fval *= binVolume;
774
775#ifdef DEBUG
776 std::cout << x[0] << " " << y << " " << 1. / invError << " params : ";
777 for (unsigned int ipar = 0; ipar < npar; ++ipar)
778 std::cout << p[ipar] << "\t";
779 std::cout << "\tfval = " << fval << std::endl;
780#endif
781 if (!CheckInfNaNValue(fval)) {
782 isPointRejected[i] = true;
783 // Return a zero contribution to all partial derivatives on behalf of the current point
784 return pointContribution;
785 }
786
787 // loop on the parameters
788 unsigned int ipar = 0;
789 for (; ipar < npar; ++ipar) {
790
791 // correct gradient for bin volumes
792 if (useBinVolume)
793 gradFunc[ipar] *= binVolume;
794
795 // avoid singularity in the function (infinity and nan ) in the chi2 sum
796 // eventually add possibility of excluding some points (like singularity)
797 double dfval = gradFunc[ipar];
798 if (!CheckInfNaNValue(dfval)) {
799 break; // exit loop on parameters
800 }
801
802 // calculate derivative point contribution
803 pointContribution[ipar] = -2.0 * (y - fval) * invError * invError * gradFunc[ipar];
804 }
805
806 if (ipar < npar) {
807 // case loop was broken for an overflow in the gradient calculation
808 isPointRejected[i] = true;
809 }
810
811 return pointContribution;
812 };
813
814 // Vertically reduce the set of vectors by summing its equally-indexed components
815 auto redFunction = [&](const std::vector<std::vector<double>> &pointContributions) {
816 std::vector<double> result(npar);
817
818 for (auto const &pointContribution : pointContributions) {
819 for (unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
820 result[parameterIndex] += pointContribution[parameterIndex];
821 }
822
823 return result;
824 };
825
826 std::vector<double> g(npar);
827
828#ifndef R__USE_IMT
829 // If IMT is disabled, force the execution policy to the serial case
830 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
831 Warning("FitUtil::EvaluateChi2Gradient", "Multithread execution policy requires IMT, which is disabled. Changing "
832 "to ROOT::EExecutionPolicy::kSequential.");
833 executionPolicy = ROOT::EExecutionPolicy::kSequential;
834 }
835#endif
836
837 if (executionPolicy == ROOT::EExecutionPolicy::kSequential) {
838 std::vector<std::vector<double>> allGradients(initialNPoints);
839 for (unsigned int i = 0; i < initialNPoints; ++i) {
840 allGradients[i] = mapFunction(i);
841 }
842 g = redFunction(allGradients);
843 }
844#ifdef R__USE_IMT
845 else if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
847 auto chunks = nChunks != 0 ? nChunks : setAutomaticChunking(initialNPoints);
848 g = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, initialNPoints), redFunction, chunks);
849 }
850#endif
851 // else if(executionPolicy == ROOT::Fit::kMultiprocess){
852 // ROOT::TProcessExecutor pool;
853 // g = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction);
854 // }
855 else {
856 Error("FitUtil::EvaluateChi2Gradient",
857 "Execution policy unknown. Available choices:\n 0: Serial (default)\n 1: MultiThread (requires IMT)\n");
858 }
859
860#ifndef R__USE_IMT
861 //to fix compiler warning
862 (void)nChunks;
863#endif
864
865 // correct the number of points
866 nPoints = initialNPoints;
867
868 if (std::any_of(isPointRejected.begin(), isPointRejected.end(), [](bool point) { return point; })) {
869 unsigned nRejected = std::accumulate(isPointRejected.begin(), isPointRejected.end(), 0);
870 assert(nRejected <= initialNPoints);
871 nPoints = initialNPoints - nRejected;
872
873 if (nPoints < npar)
874 MATH_ERROR_MSG("FitUtil::EvaluateChi2Gradient",
875 "Error - too many points rejected for overflow in gradient calculation");
876 }
877
878 // copy result
879 std::copy(g.begin(), g.end(), grad);
880}
881
882//______________________________________________________________________________________________________
883//
884// Log Likelihood functions
885//_______________________________________________________________________________________________________
886
887// utility function used by the likelihoods
888
889// for LogLikelihood functions
890
891double FitUtil::EvaluatePdf(const IModelFunction & func, const UnBinData & data, const double * p, unsigned int i, double * g, double * /*h*/, bool hasGrad, bool) {
892 // evaluate the pdf contribution to the generic logl function in case of bin data
893 // return actually the log of the pdf and its derivatives
894
895
896 //func.SetParameters(p);
897
898 const double * x = data.Coords(i);
899 double fval = func ( x, p );
900 double logPdf = ROOT::Math::Util::EvalLog(fval);
901 //return
902 if (g == nullptr) return logPdf;
903
904 const IGradModelFunction * gfunc = (hasGrad) ?
905 dynamic_cast<const IGradModelFunction *>( &func) : nullptr;
906
907 // gradient calculation
908 if (gfunc) {
909 //case function provides gradient
910 gfunc->ParameterGradient( x , p, g );
911 }
912 else {
913 // estimate gradient numerically with simple 2 point rule
914 // should probably calculate gradient of log(pdf) is more stable numerically
915 SimpleGradientCalculator gc(func.NPar(), func);
916 gc.ParameterGradient(x, p, fval, g );
917 }
918 // divide gradient by function value since returning the logs
919 for (unsigned int ipar = 0; ipar < func.NPar(); ++ipar) {
920 g[ipar] /= fval; // this should be checked against infinities
921 }
922
923#ifdef DEBUG
924 std::cout << x[i] << "\t";
925 std::cout << "\tpar = [ " << func.NPar() << " ] = ";
926 for (unsigned int ipar = 0; ipar < func.NPar(); ++ipar)
927 std::cout << p[ipar] << "\t";
928 std::cout << "\tfval = " << fval;
929 std::cout << "\tgrad = [ ";
930 for (unsigned int ipar = 0; ipar < func.NPar(); ++ipar)
931 std::cout << g[ipar] << "\t";
932 std::cout << " ] " << std::endl;
933#endif
934
935
936 return logPdf;
937}
938
939double FitUtil::EvaluateLogL(const IModelFunction &func, const UnBinData &data, const double *p,
940 int iWeight, bool extended, unsigned int &nPoints,
941 ROOT::EExecutionPolicy executionPolicy, unsigned nChunks)
942{
943 // evaluate the LogLikelihood
944
945 unsigned int n = data.Size();
946
947 //unsigned int nRejected = 0;
948
949 bool normalizeFunc = false;
950
951 // set parameters of the function to cache integral value
952#ifdef USE_PARAMCACHE
953 (const_cast<IModelFunctionTempl<double> &>(func)).SetParameters(p);
954#endif
955
956 nPoints = data.Size(); // npoints
957
958#ifdef R__USE_IMT
959 // in case parameter needs to be propagated to user function use trick to set parameters by calling one time the function
960 // this will be done in sequential mode and parameters can be set in a thread safe manner
961 if (!normalizeFunc) {
962 if (data.NDim() == 1) {
963 const double * x = data.GetCoordComponent(0,0);
964 func( x, p);
965 }
966 else {
967 std::vector<double> x(data.NDim());
968 for (unsigned int j = 0; j < data.NDim(); ++j)
969 x[j] = *data.GetCoordComponent(0, j);
970 func( x.data(), p);
971 }
972 }
973#endif
974
975 double norm = 1.0;
976 if (normalizeFunc) {
977 // compute integral of the function
978 std::vector<double> xmin(data.NDim());
979 std::vector<double> xmax(data.NDim());
980 IntegralEvaluator<> igEval(func, p, true);
981 // compute integral in the ranges where is defined
982 if (data.Range().Size() > 0) {
983 norm = 0;
984 for (unsigned int ir = 0; ir < data.Range().Size(); ++ir) {
985 data.Range().GetRange(&xmin[0], &xmax[0], ir);
986 norm += igEval.Integral(xmin.data(), xmax.data());
987 }
988 } else {
989 // use (-inf +inf)
990 data.Range().GetRange(&xmin[0], &xmax[0]);
991 // check if funcition is zero at +- inf
992 if (func(xmin.data(), p) != 0 || func(xmax.data(), p) != 0) {
993 MATH_ERROR_MSG("FitUtil::EvaluateLogLikelihood",
994 "A range has not been set and the function is not zero at +/- inf");
995 return 0;
996 }
997 norm = igEval.Integral(&xmin[0], &xmax[0]);
998 }
999 }
1000
1001 // needed to compute effective global weight in case of extended likelihood
1002
1003 auto mapFunction = [&](const unsigned i) {
1004 double W = 0;
1005 double W2 = 0;
1006 double fval = 0;
1007
1008 if (data.NDim() > 1) {
1009 std::vector<double> x(data.NDim());
1010 for (unsigned int j = 0; j < data.NDim(); ++j)
1011 x[j] = *data.GetCoordComponent(i, j);
1012#ifdef USE_PARAMCACHE
1013 fval = func(x.data());
1014#else
1015 fval = func(x.data(), p);
1016#endif
1017
1018 // one -dim case
1019 } else {
1020 const auto x = data.GetCoordComponent(i, 0);
1021#ifdef USE_PARAMCACHE
1022 fval = func(x);
1023#else
1024 fval = func(x, p);
1025#endif
1026 }
1027
1028 if (normalizeFunc)
1029 fval = fval * (1 / norm);
1030
1031 // function EvalLog protects against negative or too small values of fval
1032 double logval = ROOT::Math::Util::EvalLog(fval);
1033 if (iWeight > 0) {
1034 double weight = data.Weight(i);
1035 logval *= weight;
1036 if (iWeight == 2) {
1037 logval *= weight; // use square of weights in likelihood
1038 if (!extended) {
1039 // needed sum of weights and sum of weight square if likelkihood is extended
1040 W = weight;
1041 W2 = weight * weight;
1042 }
1043 }
1044 }
1045 return LikelihoodAux<double>(logval, W, W2);
1046 };
1047
1048#ifdef R__USE_IMT
1049 // auto redFunction = [](const std::vector<LikelihoodAux<double>> & objs){
1050 // return std::accumulate(objs.begin(), objs.end(), LikelihoodAux<double>(0.0,0.0,0.0),
1051 // [](const LikelihoodAux<double> &l1, const LikelihoodAux<double> &l2){
1052 // return l1+l2;
1053 // });
1054 // };
1055 // do not use std::accumulate to be sure to maintain always the same order
1056 auto redFunction = [](const std::vector<LikelihoodAux<double>> & objs){
1057 auto l0 = LikelihoodAux<double>(0.0,0.0,0.0);
1058 for ( auto & l : objs ) {
1059 l0 = l0 + l;
1060 }
1061 return l0;
1062 };
1063#else
1064 (void)nChunks;
1065
1066 // If IMT is disabled, force the execution policy to the serial case
1067 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1068 Warning("FitUtil::EvaluateLogL", "Multithread execution policy requires IMT, which is disabled. Changing "
1069 "to ROOT::EExecutionPolicy::kSequential.");
1070 executionPolicy = ROOT::EExecutionPolicy::kSequential;
1071 }
1072#endif
1073
1074 double logl{};
1075 double sumW{};
1076 double sumW2{};
1077 if(executionPolicy == ROOT::EExecutionPolicy::kSequential){
1078 for (unsigned int i=0; i<n; ++i) {
1079 auto resArray = mapFunction(i);
1080 logl+=resArray.logvalue;
1081 sumW+=resArray.weight;
1082 sumW2+=resArray.weight2;
1083 }
1084#ifdef R__USE_IMT
1085 } else if(executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1087 auto chunks = nChunks !=0? nChunks: setAutomaticChunking(data.Size());
1088 auto resArray = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction, chunks);
1089 logl=resArray.logvalue;
1090 sumW=resArray.weight;
1091 sumW2=resArray.weight2;
1092#endif
1093// } else if(executionPolicy == ROOT::Fit::kMultiProcess){
1094 // ROOT::TProcessExecutor pool;
1095 // res = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction);
1096 } else{
1097 Error("FitUtil::EvaluateLogL","Execution policy unknown. Available choices:\n ROOT::EExecutionPolicy::kSequential (default)\n ROOT::EExecutionPolicy::kMultiThread (requires IMT)\n");
1098 }
1099
1100 if (extended) {
1101 // add Poisson extended term
1102 double extendedTerm = 0; // extended term in likelihood
1103 double nuTot = 0;
1104 // nuTot is integral of function in the range
1105 // if function has been normalized integral has been already computed
1106 if (!normalizeFunc) {
1107 IntegralEvaluator<> igEval( func, p, true);
1108 std::vector<double> xmin(data.NDim());
1109 std::vector<double> xmax(data.NDim());
1110
1111 // compute integral in the ranges where is defined
1112 if (data.Range().Size() > 0 ) {
1113 nuTot = 0;
1114 for (unsigned int ir = 0; ir < data.Range().Size(); ++ir) {
1115 data.Range().GetRange(&xmin[0],&xmax[0],ir);
1116 nuTot += igEval.Integral(xmin.data(),xmax.data());
1117 }
1118 } else {
1119 // use (-inf +inf)
1120 data.Range().GetRange(&xmin[0],&xmax[0]);
1121 // check if function is zero at +- inf
1122 if (func(xmin.data(), p) != 0 || func(xmax.data(), p) != 0) {
1123 MATH_ERROR_MSG("FitUtil::EvaluateLogLikelihood","A range has not been set and the function is not zero at +/- inf");
1124 return 0;
1125 }
1126 nuTot = igEval.Integral(&xmin[0],&xmax[0]);
1127 }
1128
1129 // force to be last parameter value
1130 //nutot = p[func.NDim()-1];
1131 if (iWeight != 2)
1132 extendedTerm = - nuTot; // no need to add in this case n log(nu) since is already computed before
1133 else {
1134 // case use weight square in likelihood : compute total effective weight = sw2/sw
1135 // ignore for the moment case when sumW is zero
1136 extendedTerm = - (sumW2 / sumW) * nuTot;
1137 }
1138
1139 }
1140 else {
1141 nuTot = norm;
1142 extendedTerm = - nuTot + double(n) * ROOT::Math::Util::EvalLog( nuTot);
1143 // in case of weights need to use here sum of weights (to be done)
1144 }
1145 logl += extendedTerm;
1146
1147 }
1148
1149#ifdef DEBUG
1150 std::cout << "Evaluated log L for parameters (";
1151 for (unsigned int ip = 0; ip < func.NPar(); ++ip)
1152 std::cout << " " << p[ip];
1153 std::cout << ") fval = " << -logl << std::endl;
1154#endif
1155
1156 return -logl;
1157}
1158
1159void FitUtil::EvaluateLogLGradient(const IModelFunction &f, const UnBinData &data, const double *p, double *grad,
1160 unsigned int &nPoints, ROOT::EExecutionPolicy executionPolicy, unsigned nChunks)
1161{
1162 // evaluate the gradient of the log likelihood function
1163
1164 const IGradModelFunction *fg = dynamic_cast<const IGradModelFunction *>(&f);
1165 assert(fg != nullptr); // must be called by a grad function
1166
1167 const IGradModelFunction &func = *fg;
1168
1169 unsigned int npar = func.NPar();
1170 unsigned initialNPoints = data.Size();
1171
1172 (const_cast<IGradModelFunction &>(func)).SetParameters(p);
1173
1174#ifdef DEBUG
1175 std::cout << "\n===> Evaluate Gradient for parameters ";
1176 for (unsigned int ip = 0; ip < npar; ++ip)
1177 std::cout << " " << p[ip];
1178 std::cout << "\n";
1179#endif
1180
1181 const double kdmax1 = std::sqrt(std::numeric_limits<double>::max());
1182 const double kdmax2 = std::numeric_limits<double>::max() / (4 * initialNPoints);
1183
1184 auto mapFunction = [&](const unsigned int i) {
1185 std::vector<double> gradFunc(npar);
1186 std::vector<double> pointContribution(npar);
1187
1188
1189 const double * x = nullptr;
1190 std::vector<double> xc;
1191 if (data.NDim() > 1) {
1192 xc.resize(data.NDim() );
1193 for (unsigned int j = 0; j < data.NDim(); ++j)
1194 xc[j] = *data.GetCoordComponent(i, j);
1195 x = xc.data();
1196 } else {
1197 x = data.GetCoordComponent(i, 0);
1198 }
1199
1200 double fval = func(x, p);
1201 func.ParameterGradient(x, p, &gradFunc[0]);
1202
1203#ifdef DEBUG
1204 {
1206 if (i < 5 || (i > data.Size()-5) ) {
1207 if (data.NDim() > 1) std::cout << i << " x " << x[0] << " y " << x[1] << " func " << fval
1208 << " gradient " << gradFunc[0] << " " << gradFunc[1] << " " << gradFunc[3] << std::endl;
1209 else std::cout << i << " x " << x[0] << " gradient " << gradFunc[0] << " " << gradFunc[1] << " " << gradFunc[3] << std::endl;
1210 }
1211 }
1212#endif
1213
1214 for (unsigned int kpar = 0; kpar < npar; ++kpar) {
1215 if (fval > 0)
1216 pointContribution[kpar] = -1. / fval * gradFunc[kpar];
1217 else if (gradFunc[kpar] != 0) {
1218 double gg = kdmax1 * gradFunc[kpar];
1219 if (gg > 0)
1220 gg = std::min(gg, kdmax2);
1221 else
1222 gg = std::max(gg, -kdmax2);
1223 pointContribution[kpar] = -gg;
1224 }
1225 // if func derivative is zero term is also zero so do not add in g[kpar]
1226 }
1227
1228 return pointContribution;
1229 };
1230
1231 // Vertically reduce the set of vectors by summing its equally-indexed components
1232 auto redFunction = [&](const std::vector<std::vector<double>> &pointContributions) {
1233 std::vector<double> result(npar);
1234
1235 for (auto const &pointContribution : pointContributions) {
1236 for (unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
1237 result[parameterIndex] += pointContribution[parameterIndex];
1238 }
1239
1240 return result;
1241 };
1242
1243 std::vector<double> g(npar);
1244
1245#ifndef R__USE_IMT
1246 // If IMT is disabled, force the execution policy to the serial case
1247 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1248 Warning("FitUtil::EvaluateLogLGradient", "Multithread execution policy requires IMT, which is disabled. Changing "
1249 "to ROOT::EExecutionPolicy::kSequential.");
1250 executionPolicy = ROOT::EExecutionPolicy::kSequential;
1251 }
1252#endif
1253
1254 if (executionPolicy == ROOT::EExecutionPolicy::kSequential) {
1255 std::vector<std::vector<double>> allGradients(initialNPoints);
1256 for (unsigned int i = 0; i < initialNPoints; ++i) {
1257 allGradients[i] = mapFunction(i);
1258 }
1259 g = redFunction(allGradients);
1260 }
1261#ifdef R__USE_IMT
1262 else if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1264 auto chunks = nChunks != 0 ? nChunks : setAutomaticChunking(initialNPoints);
1265 g = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, initialNPoints), redFunction, chunks);
1266 }
1267#endif
1268 else {
1269 Error("FitUtil::EvaluateLogLGradient", "Execution policy unknown. Available choices:\n "
1270 "ROOT::EExecutionPolicy::kSequential (default)\n "
1271 "ROOT::EExecutionPolicy::kMultiThread (requires IMT)\n");
1272 }
1273
1274#ifndef R__USE_IMT
1275 // to fix compiler warning
1276 (void)nChunks;
1277#endif
1278
1279 // copy result
1280 std::copy(g.begin(), g.end(), grad);
1281 nPoints = data.Size(); // npoints
1282
1283#ifdef DEBUG
1284 std::cout << "FitUtil.cxx : Final gradient ";
1285 for (unsigned int param = 0; param < npar; param++) {
1286 std::cout << " " << grad[param];
1287 }
1288 std::cout << "\n";
1289#endif
1290}
1291//_________________________________________________________________________________________________
1292// for binned log likelihood functions
1293////////////////////////////////////////////////////////////////////////////////
1294/// evaluate the pdf (Poisson) contribution to the logl (return actually log of pdf)
1295/// and its gradient (gradient of log(pdf))
1296
1297double FitUtil::EvaluatePoissonBinPdf(const IModelFunction & func, const BinData & data, const double * p, unsigned int i, double * g, double * h, bool hasGrad, bool useFullHessian) {
1298 double y = 0;
1299 const double * x1 = data.GetPoint(i,y);
1300
1301 const DataOptions & fitOpt = data.Opt();
1302 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
1303 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
1304
1305 IntegralEvaluator<> igEval( func, p, useBinIntegral);
1306 const double * x2 = nullptr;
1307 // calculate the bin volume
1308 double binVolume = 1;
1309 std::vector<double> xc;
1310 if (useBinVolume) {
1311 unsigned int ndim = data.NDim();
1312 xc.resize(ndim);
1313 for (unsigned int j = 0; j < ndim; ++j) {
1314 double x2j = data.GetBinUpEdgeComponent(i, j);
1315 binVolume *= std::abs( x2j-x1[j] );
1316 xc[j] = 0.5*(x2j+ x1[j]);
1317 }
1318 // normalize the bin volume using a reference value
1319 binVolume /= data.RefVolume();
1320 }
1321
1322 const double * x = (useBinVolume) ? &xc.front() : x1;
1323
1324 double fval0 = 0;
1325 if (!useBinIntegral ) {
1326 fval0 = func ( x, p );
1327 }
1328 else {
1329 // calculate integral normalized (divided by bin volume)
1330 std::vector<double> vx2(data.NDim());
1331 data.GetBinUpEdgeCoordinates(i, vx2.data());
1332 fval0 = igEval( x1, vx2.data() ) ;
1333 }
1334 double fval = fval0;
1335 if (useBinVolume) fval = fval0*binVolume;
1336
1337 // logPdf for Poisson: ignore constant term depending on N
1338 fval = std::max(fval, 0.0); // avoid negative or too small values
1339 double nlogPdf = fval;
1340 if (y > 0.0) {
1341 // include also constants due to saturate model (see Baker-Cousins paper)
1342 nlogPdf -= y * ROOT::Math::Util::EvalLog( fval / y) - y;
1343 }
1344
1345 if (g == nullptr) return nlogPdf;
1346
1347 unsigned int npar = func.NPar();
1348 const IGradModelFunction * gfunc = (hasGrad) ?
1349 dynamic_cast<const IGradModelFunction *>( &func) : nullptr;
1350
1351 // for full Hessian we need a gradient function and not bin intgegral computation
1352 if (useFullHessian && (!gfunc || useBinIntegral || (gfunc && !gfunc->HasParameterHessian())))
1353 return std::numeric_limits<double>::quiet_NaN();
1354
1355 // gradient calculation
1356 if (gfunc) {
1357 //case function provides gradient
1358 if (!useBinIntegral ) {
1359 gfunc->ParameterGradient( x , p, g );
1360 if (useFullHessian && h) {
1361 if (!gfunc->HasParameterHessian())
1362 return std::numeric_limits<double>::quiet_NaN();
1363 bool goodHessFunc = gfunc->ParameterHessian(x , p, h);
1364 if (!goodHessFunc) {
1365 return std::numeric_limits<double>::quiet_NaN();
1366 }
1367 }
1368 }
1369 else {
1370 // needs to calculate the integral for each partial derivative
1371 CalculateGradientIntegral( *gfunc, x1, x2, p, g);
1372 }
1373
1374 }
1375 else {
1376 SimpleGradientCalculator gc(func.NPar(), func);
1377 if (!useBinIntegral )
1378 gc.ParameterGradient(x, p, fval0, g);
1379 else {
1380 // needs to calculate the integral for each partial derivative
1382 }
1383 }
1384 // correct g[] do be derivative of poisson term. We compute already derivative w.r.t. LL
1385 double coeffGrad = (fval > 0) ? (1. - y/fval) : ( (y > 0) ? std::sqrt( std::numeric_limits<double>::max() ) : 1. );
1386 double coeffHess = (fval > 0) ? y/(fval*fval) : ( (y > 0) ? std::sqrt( std::numeric_limits<double>::max() ) : 0. );
1387 if (useBinVolume) {
1388 coeffGrad *= binVolume;
1389 coeffHess *= binVolume*binVolume;
1390 }
1391 for (unsigned int k = 0; k < npar; ++k) {
1392 // compute also approximate Hessian (excluding term with second derivative of model function)
1393 if (h) {
1394 for (unsigned int l = k; l < npar; ++l) {
1395 unsigned int idx = k + l * (l + 1) / 2;
1396 if (useFullHessian) {
1397 h[idx] *= coeffGrad; // h contains first model function derivatives
1398 }
1399 else {
1400 h[idx] = 0;
1401 }
1402 // add term deoending on only gradient of model function
1403 h[idx] += coeffHess * g[k]*g[l]; // g are model function derivatives
1404 }
1405 }
1406 // compute gradient of NLL element
1407 // and apply bin volume correction if needed
1408 g[k] *= coeffGrad;
1409 if (useBinVolume)
1410 g[k] *= binVolume;
1411 }
1412
1413#ifdef DEBUG
1414 std::cout << "x = " << x[0] << " y " << y << " fval " << fval << " logPdf = " << nlogPdf << " gradient : ";
1415 for (unsigned int ipar = 0; ipar < npar; ++ipar)
1416 std::cout << g[ipar] << "\t";
1417 if (h) {
1418 std::cout << "\thessian : ";
1419 for (unsigned int ipar = 0; ipar < npar; ++ipar) {
1420 std::cout << " {";
1421 for (unsigned int jpar = 0; jpar <= ipar; ++jpar) {
1422 std::cout << h[ipar + jpar * (jpar + 1) / 2] << "\t";
1423 }
1424 std::cout << "}";
1425 }
1426 }
1427 std::cout << std::endl;
1428#endif
1429#undef DEBUG
1430
1431 return nlogPdf;
1432}
1433
1434double FitUtil::EvaluatePoissonLogL(const IModelFunction &func, const BinData &data, const double *p, int iWeight,
1435 bool extended, unsigned int &nPoints, ROOT::EExecutionPolicy executionPolicy,
1436 unsigned nChunks)
1437{
1438 // evaluate the Poisson Log Likelihood
1439 // for binned likelihood fits
1440 // this is Sum ( f(x_i) - y_i * log( f (x_i) ) )
1441 // add as well constant term for saturated model to make it like a Chi2/2
1442 // by default is extended. If extended is false the fit is not extended and
1443 // the global poisson term is removed (i.e is a binomial fit)
1444 // (remember that in this case one needs to have a function with a fixed normalization
1445 // like in a non extended unbinned fit)
1446 //
1447 // if use Weight use a weighted dataset
1448 // iWeight = 1 ==> logL = Sum( w f(x_i) )
1449 // case of iWeight==1 is actually identical to weight==0
1450 // iWeight = 2 ==> logL = Sum( w*w * f(x_i) )
1451 //
1452 // nPoints returns the points where bin content is not zero
1453
1454
1455 unsigned int n = data.Size();
1456
1457#ifdef USE_PARAMCACHE
1458 (const_cast<IModelFunction &>(func)).SetParameters(p);
1459#endif
1460
1461 nPoints = data.Size(); // npoints
1462
1463
1464 // get fit option and check case of using integral of bins
1465 const DataOptions &fitOpt = data.Opt();
1466 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
1467 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
1468 bool useW2 = (iWeight == 2);
1469
1470 // normalize if needed by a reference volume value
1471 double wrefVolume = 1.0;
1472 if (useBinVolume) {
1473 if (fitOpt.fNormBinVolume) wrefVolume /= data.RefVolume();
1474 }
1475
1476//#define DEBUG
1477#ifdef DEBUG
1478 std::cout << "Evaluate PoissonLogL for params = [ ";
1479 for (unsigned int j = 0; j < func.NPar(); ++j) std::cout << p[j] << " , ";
1480 std::cout << "] - data size = " << n << " useBinIntegral " << useBinIntegral << " useBinVolume "
1481 << useBinVolume << " useW2 " << useW2 << " wrefVolume = " << wrefVolume << std::endl;
1482#endif
1483
1484
1486 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1487 // do not use GSL integrator which is not thread safe
1489 }
1490#ifdef USE_PARAMCACHE
1491 IntegralEvaluator<> igEval(func, nullptr, useBinIntegral, igType);
1492#else
1493 IntegralEvaluator<> igEval(func, p, useBinIntegral, igType);
1494#endif
1495
1496 auto mapFunction = [&](const unsigned i) {
1497 auto x1 = data.GetCoordComponent(i, 0);
1498 auto y = *data.ValuePtr(i);
1499
1500 const double *x = nullptr;
1501 std::vector<double> xc;
1502 double fval = 0;
1503 double binVolume = 1.0;
1504
1505 if (useBinVolume) {
1506 unsigned int ndim = data.NDim();
1507 xc.resize(data.NDim());
1508 for (unsigned int j = 0; j < ndim; ++j) {
1509 double xx = *data.GetCoordComponent(i, j);
1510 double x2 = data.GetBinUpEdgeComponent(i, j);
1511 binVolume *= std::abs(x2 - xx);
1512 xc[j] = (useBinIntegral) ? xx : 0.5 * (x2 + xx);
1513 }
1514 x = xc.data();
1515 // normalize the bin volume using a reference value
1516 binVolume *= wrefVolume;
1517 } else if (data.NDim() > 1) {
1518 xc.resize(data.NDim());
1519 xc[0] = *x1;
1520 for (unsigned int j = 1; j < data.NDim(); ++j) {
1521 xc[j] = *data.GetCoordComponent(i, j);
1522 }
1523 x = xc.data();
1524 } else {
1525 x = x1;
1526 }
1527
1528 if (!useBinIntegral) {
1529#ifdef USE_PARAMCACHE
1530 fval = func(x);
1531#else
1532 fval = func(x, p);
1533#endif
1534 } else {
1535 // calculate integral (normalized by bin volume)
1536 // need to set function and parameters here in case loop is parallelized
1537 std::vector<double> x2(data.NDim());
1538 data.GetBinUpEdgeCoordinates(i, x2.data());
1539 fval = igEval(x, x2.data());
1540 }
1541 if (useBinVolume) fval *= binVolume;
1542
1543
1544
1545#ifdef DEBUG
1546 int NSAMPLE = 100;
1547 if (i % NSAMPLE == 0) {
1548 std::cout << "evt " << i << " x = [ ";
1549 for (unsigned int j = 0; j < func.NDim(); ++j) std::cout << x[j] << " , ";
1550 std::cout << "] ";
1551 if (fitOpt.fIntegral) {
1552 std::cout << "x2 = [ ";
1553 for (unsigned int j = 0; j < func.NDim(); ++j) std::cout << data.GetBinUpEdgeComponent(i, j) << " , ";
1554 std::cout << "] ";
1555 }
1556 std::cout << " y = " << y << " fval = " << fval << std::endl;
1557 }
1558#endif
1559
1560
1561 // EvalLog protects against 0 values of fval but don't want to add in the -log sum
1562 // negative values of fval
1563 fval = std::max(fval, 0.0);
1564
1565 double nloglike = 0; // negative loglikelihood
1566 if (useW2) {
1567 // apply weight correction . Effective weight is error^2/ y
1568 // and expected events in bins is fval/weight
1569 // can apply correction only when y is not zero otherwise weight is undefined
1570 // (in case of weighted likelihood I don't care about the constant term due to
1571 // the saturated model)
1572
1573 // use for the empty bins the global weight
1574 double weight = 1.0;
1575 if (y != 0) {
1576 double error = data.Error(i);
1577 weight = (error * error) / y; // this is the bin effective weight
1578 nloglike -= weight * y * ( ROOT::Math::Util::EvalLog(fval/y) );
1579 }
1580 else {
1581 // for empty bin use the average weight computed from the total data weight
1582 weight = data.SumOfError2()/ data.SumOfContent();
1583 }
1584 if (extended) {
1585 nloglike += weight * ( fval - y);
1586 }
1587
1588 } else {
1589 // standard case no weights or iWeight=1
1590 // this is needed for Poisson likelihood (which are extended and not for multinomial)
1591 // the formula below include constant term due to likelihood of saturated model (f(x) = y)
1592 // (same formula as in Baker-Cousins paper, page 439 except a factor of 2
1593 if (extended) nloglike = fval - y;
1594
1595 if (y > 0) {
1596 nloglike += y * (ROOT::Math::Util::EvalLog(y) - ROOT::Math::Util::EvalLog(fval));
1597 }
1598 }
1599#ifdef DEBUG
1600 {
1602 std::cout << " nll = " << nloglike << std::endl;
1603 }
1604#endif
1605 return nloglike;
1606 };
1607
1608#ifdef R__USE_IMT
1609 auto redFunction = [](const std::vector<double> &objs) {
1610 return std::accumulate(objs.begin(), objs.end(), double{});
1611 };
1612#else
1613 (void)nChunks;
1614
1615 // If IMT is disabled, force the execution policy to the serial case
1616 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1617 Warning("FitUtil::EvaluatePoissonLogL", "Multithread execution policy requires IMT, which is disabled. Changing "
1618 "to ROOT::EExecutionPolicy::kSequential.");
1619 executionPolicy = ROOT::EExecutionPolicy::kSequential;
1620 }
1621#endif
1622
1623 double res{};
1624 if (executionPolicy == ROOT::EExecutionPolicy::kSequential) {
1625 for (unsigned int i = 0; i < n; ++i) {
1626 res += mapFunction(i);
1627 }
1628#ifdef R__USE_IMT
1629 } else if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1631 auto chunks = nChunks != 0 ? nChunks : setAutomaticChunking(data.Size());
1632 res = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction, chunks);
1633#endif
1634 // } else if(executionPolicy == ROOT::Fit::kMultitProcess){
1635 // ROOT::TProcessExecutor pool;
1636 // res = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction);
1637 } else {
1638 Error("FitUtil::EvaluatePoissonLogL",
1639 "Execution policy unknown. Available choices:\n ROOT::EExecutionPolicy::kSequential (default)\n ROOT::EExecutionPolicy::kMultiThread (requires IMT)\n");
1640 }
1641
1642#ifdef DEBUG
1643 std::cout << "Loglikelihood = " << res << std::endl;
1644#endif
1645
1646 return res;
1647}
1648
1649void FitUtil::EvaluatePoissonLogLGradient(const IModelFunction &f, const BinData &data, const double *p, double *grad,
1650 unsigned int &, ROOT::EExecutionPolicy executionPolicy, unsigned nChunks)
1651{
1652 // evaluate the gradient of the Poisson log likelihood function
1653
1654 const IGradModelFunction *fg = dynamic_cast<const IGradModelFunction *>(&f);
1655 assert(fg != nullptr); // must be called by a grad function
1656
1657 const IGradModelFunction &func = *fg;
1658
1659#ifdef USE_PARAMCACHE
1660 (const_cast<IGradModelFunction &>(func)).SetParameters(p);
1661#endif
1662
1663 const DataOptions &fitOpt = data.Opt();
1664 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
1665 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
1666
1667 double wrefVolume = 1.0;
1668 if (useBinVolume && fitOpt.fNormBinVolume)
1669 wrefVolume /= data.RefVolume();
1670
1672 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1673 // do not use GSL integrator which is not thread safe
1675 }
1676
1677 IntegralEvaluator<> igEval(func, p, useBinIntegral, igType);
1678
1679 unsigned int npar = func.NPar();
1680 unsigned initialNPoints = data.Size();
1681
1682 auto mapFunction = [&](const unsigned int i) {
1683 // set all vector values to zero
1684 std::vector<double> gradFunc(npar);
1685 std::vector<double> pointContribution(npar);
1686
1687 const auto x1 = data.GetCoordComponent(i, 0);
1688 const auto y = data.Value(i);
1689 auto invError = data.Error(i);
1690
1691 invError = (invError != 0.0) ? 1.0 / invError : 1;
1692
1693 double fval = 0;
1694
1695 const double *x = nullptr;
1696 std::vector<double> xc;
1697
1698 unsigned ndim = data.NDim();
1699 double binVolume = 1.0;
1700 if (useBinVolume) {
1701
1702 xc.resize(ndim);
1703
1704 for (unsigned int j = 0; j < ndim; ++j) {
1705 double x1_j = *data.GetCoordComponent(i, j);
1706 double x2_j = data.GetBinUpEdgeComponent(i, j);
1707 binVolume *= std::abs(x2_j - x1_j);
1708 xc[j] = (useBinIntegral) ? x1_j : 0.5 * (x2_j + x1_j);
1709 }
1710
1711 x = xc.data();
1712
1713 // normalize the bin volume using a reference value
1714 binVolume *= wrefVolume;
1715 } else if (ndim > 1) {
1716 xc.resize(ndim);
1717 xc[0] = *x1;
1718 for (unsigned int j = 1; j < ndim; ++j)
1719 xc[j] = *data.GetCoordComponent(i, j);
1720 x = xc.data();
1721 } else {
1722 x = x1;
1723 }
1724
1725 if (!useBinIntegral) {
1726 fval = func(x, p);
1727 func.ParameterGradient(x, p, &gradFunc[0]);
1728 } else {
1729 // calculate integral (normalized by bin volume)
1730 // need to set function and parameters here in case loop is parallelized
1731 std::vector<double> x2(data.NDim());
1732 data.GetBinUpEdgeCoordinates(i, x2.data());
1733 fval = igEval(x, x2.data());
1734 CalculateGradientIntegral(func, x, x2.data(), p, &gradFunc[0]);
1735 }
1736 if (useBinVolume)
1737 fval *= binVolume;
1738
1739#ifdef DEBUG
1740 {
1742 if (i < 5 || (i > data.Size()-5) ) {
1743 if (data.NDim() > 1) std::cout << i << " x " << x[0] << " y " << x[1] << " func " << fval
1744 << " gradient " << gradFunc[0] << " " << gradFunc[1] << " " << gradFunc[3] << std::endl;
1745 else std::cout << i << " x " << x[0] << " gradient " << gradFunc[0] << " " << gradFunc[1] << " " << gradFunc[3] << std::endl;
1746 }
1747 }
1748#endif
1749
1750 // correct the gradient
1751 for (unsigned int ipar = 0; ipar < npar; ++ipar) {
1752
1753 // correct gradient for bin volumes
1754 if (useBinVolume)
1755 gradFunc[ipar] *= binVolume;
1756
1757 // df/dp * (1. - y/f )
1758 if (fval > 0)
1759 pointContribution[ipar] = gradFunc[ipar] * (1. - y / fval);
1760 else if (gradFunc[ipar] != 0) {
1761 const double kdmax1 = std::sqrt(std::numeric_limits<double>::max());
1762 const double kdmax2 = std::numeric_limits<double>::max() / (4 * initialNPoints);
1763 double gg = kdmax1 * gradFunc[ipar];
1764 if (gg > 0)
1765 gg = std::min(gg, kdmax2);
1766 else
1767 gg = std::max(gg, -kdmax2);
1768 pointContribution[ipar] = -gg;
1769 }
1770 }
1771
1772
1773 return pointContribution;
1774 };
1775
1776 // Vertically reduce the set of vectors by summing its equally-indexed components
1777 auto redFunction = [&](const std::vector<std::vector<double>> &pointContributions) {
1778 std::vector<double> result(npar);
1779
1780 for (auto const &pointContribution : pointContributions) {
1781 for (unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
1782 result[parameterIndex] += pointContribution[parameterIndex];
1783 }
1784
1785 return result;
1786 };
1787
1788 std::vector<double> g(npar);
1789
1790#ifndef R__USE_IMT
1791 // If IMT is disabled, force the execution policy to the serial case
1792 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1793 Warning("FitUtil::EvaluatePoissonLogLGradient",
1794 "Multithread execution policy requires IMT, which is disabled. Changing "
1795 "to ROOT::EExecutionPolicy::kSequential.");
1796 executionPolicy = ROOT::EExecutionPolicy::kSequential;
1797 }
1798#endif
1799
1800 if (executionPolicy == ROOT::EExecutionPolicy::kSequential) {
1801 std::vector<std::vector<double>> allGradients(initialNPoints);
1802 for (unsigned int i = 0; i < initialNPoints; ++i) {
1803 allGradients[i] = mapFunction(i);
1804 }
1805 g = redFunction(allGradients);
1806 }
1807#ifdef R__USE_IMT
1808 else if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1810 auto chunks = nChunks != 0 ? nChunks : setAutomaticChunking(initialNPoints);
1811 g = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, initialNPoints), redFunction, chunks);
1812 }
1813#endif
1814
1815 // else if(executionPolicy == ROOT::Fit::kMultiprocess){
1816 // ROOT::TProcessExecutor pool;
1817 // g = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction);
1818 // }
1819 else {
1820 Error("FitUtil::EvaluatePoissonLogLGradient",
1821 "Execution policy unknown. Available choices:\n 0: Serial (default)\n 1: MultiThread (requires IMT)\n");
1822 }
1823
1824#ifndef R__USE_IMT
1825 //to fix compiler warning
1826 (void)nChunks;
1827#endif
1828
1829 // copy result
1830 std::copy(g.begin(), g.end(), grad);
1831
1832#ifdef DEBUG
1833 std::cout << "***** Final gradient : ";
1834 for (unsigned int ii = 0; ii< npar; ++ii) std::cout << grad[ii] << " ";
1835 std::cout << "\n";
1836#endif
1837
1838}
1839
1840
1841unsigned FitUtil::setAutomaticChunking(unsigned nEvents){
1842 auto ncpu = ROOT::GetThreadPoolSize();
1843 if (nEvents/ncpu < 1000) return ncpu;
1844 return nEvents/1000;
1845 //return ((nEvents/ncpu + 1) % 1000) *40 ; //arbitrary formula
1846}
1847
1848}
1849
1850} // end namespace ROOT
#define MATH_ERROR_MSG(loc, str)
Definition Error.h:83
#define f(i)
Definition RSha256.hxx:104
#define g(i)
Definition RSha256.hxx:105
#define h(i)
Definition RSha256.hxx:106
void Error(const char *location, const char *msgfmt,...)
Use this function in case an error occurred.
Definition TError.cxx:185
void Warning(const char *location, const char *msgfmt,...)
Use this function in warning situations.
Definition TError.cxx:229
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t result
Option_t Option_t TPoint TPoint const char x2
Option_t Option_t TPoint TPoint const char x1
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void gc
float xmin
float xmax
R__EXTERN TVirtualMutex * gROOTMutex
Definition TROOT.h:63
#define R__LOCKGUARD(mutex)
Definition TF1.cxx:151
Class describing the binned data sets : vectors of x coordinates, y values and optionally error on y ...
Definition BinData.h:52
SimpleGradientCalculator(int gdim, const IModelFunction &func, double eps=2.E-8, int istrat=1)
Definition FitUtil.cxx:77
double ParameterDerivative(const double *x, const double *p, int ipar) const
Definition FitUtil.cxx:116
void Gradient(const double *x, const double *p, double f0, double *g)
Definition FitUtil.cxx:133
void ParameterGradient(const double *x, const double *p, double f0, double *g)
Definition FitUtil.cxx:124
double DoParameterDerivative(const double *x, const double *p, double f0, int k) const
Definition FitUtil.cxx:88
Class describing the un-binned data sets (just x coordinates values) of any dimensions.
Definition UnBinData.h:46
virtual unsigned int NDim() const =0
Retrieve the dimension of the function.
virtual void SetParameters(const double *p)=0
Set the parameter values.
virtual unsigned int NPar() const =0
Return the number of Parameters.
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
virtual void ParameterGradient(const T *x, const double *p, T *grad) const
Evaluate the all the derivatives (gradient vector) of the function with respect to the parameters at ...
virtual bool ParameterHessian(const T *, const double *, T *) const
Evaluate the all the Hessian (second derivatives matrix) of the function with respect to the paramete...
OneDimMultiFunctionAdapter class to wrap a multidimensional function in one dimensional one.
User class for calculating the derivatives of a function.
double Derivative1(double x)
Returns the first derivative of the function at point x, computed by Richardson's extrapolation metho...
A pseudo container class which is a generator of indices.
Definition TSeq.hxx:67
This class provides a simple interface to execute the same task multiple times in parallel threads,...
auto MapReduce(F func, unsigned nTimes, R redfunc) -> InvokeResult_t< F >
Execute a function nTimes in parallel (Map) and accumulate the results into a single value (Reduce).
Type
enumeration specifying the integration types.
@ kGAUSS
simple Gauss integration method with fixed rule
@ kDEFAULT
default type specified in the static options
Double_t y[n]
Definition legend1.C:17
Double_t x[n]
Definition legend1.C:17
Double_t ey[n]
Definition legend1.C:17
const Int_t n
Definition legend1.C:16
Double_t ex[n]
Definition legend1.C:17
TF1 * f1
Definition legend1.C:11
TFitResultPtr Fit(FitObject *h1, TF1 *f1, Foption_t &option, const ROOT::Math::MinimizerOptions &moption, const char *goption, ROOT::Fit::DataRange &range)
Definition HFitImpl.cxx:133
ROOT::Math::IParamMultiGradFunction IGradModelFunction
Definition FitUtil.h:65
void CalculateGradientIntegral(const GFunc &gfunc, const double *x1, const double *x2, const double *p, double *g)
Definition FitUtil.cxx:202
double EvaluatePoissonBinPdf(const IModelFunction &func, const BinData &data, const double *x, unsigned int ipoint, double *g=nullptr, double *h=nullptr, bool hasGrad=false, bool fullHessian=false)
evaluate the pdf contribution to the Poisson LogL given a model function and the BinPoint data.
Definition FitUtil.cxx:1297
double CorrectValue(double rval)
Definition FitUtil.cxx:167
void EvaluatePoissonLogLGradient(const IModelFunction &func, const BinData &data, const double *p, double *grad, unsigned int &nPoints, ::ROOT::EExecutionPolicy executionPolicy=::ROOT::EExecutionPolicy::kSequential, unsigned nChunks=0)
evaluate the Poisson LogL given a model function and the data at the point p.
double EvaluateChi2Residual(const IModelFunction &func, const BinData &data, const double *p, unsigned int ipoint, double *g=nullptr, double *h=nullptr, bool hasGrad=false, bool fullHessian=false)
evaluate the residual contribution to the Chi2 given a model function and the BinPoint data and if th...
Definition FitUtil.cxx:545
double EvaluatePoissonLogL(const IModelFunction &func, const BinData &data, const double *p, int iWeight, bool extended, unsigned int &nPoints, ::ROOT::EExecutionPolicy executionPolicy, unsigned nChunks=0)
evaluate the Poisson LogL given a model function and the data at the point p.
double EvaluateChi2Effective(const IModelFunction &func, const BinData &data, const double *x, unsigned int &nPoints)
evaluate the effective Chi2 given a model function and the data at the point x.
Definition FitUtil.cxx:424
void EvaluateLogLGradient(const IModelFunction &func, const UnBinData &data, const double *p, double *grad, unsigned int &nPoints, ::ROOT::EExecutionPolicy executionPolicy=::ROOT::EExecutionPolicy::kSequential, unsigned nChunks=0)
evaluate the LogL gradient given a model function and the data at the point p.
double EvaluatePdf(const IModelFunction &func, const UnBinData &data, const double *p, unsigned int ipoint, double *g=nullptr, double *h=nullptr, bool hasGrad=false, bool fullHessian=false)
evaluate the pdf contribution to the LogL given a model function and the BinPoint data.
Definition FitUtil.cxx:891
bool CheckInfNaNValue(double &rval)
Definition FitUtil.cxx:181
unsigned setAutomaticChunking(unsigned nEvents)
Definition FitUtil.cxx:1841
double EvaluateLogL(const IModelFunction &func, const UnBinData &data, const double *p, int iWeight, bool extended, unsigned int &nPoints, ::ROOT::EExecutionPolicy executionPolicy, unsigned nChunks=0)
evaluate the LogL given a model function and the data at the point x.
double EvaluateChi2(const IModelFunction &func, const BinData &data, const double *p, unsigned int &nPoints, ::ROOT::EExecutionPolicy executionPolicy, unsigned nChunks=0)
Chi2 Functions.
Definition FitUtil.cxx:226
void EvaluateChi2Gradient(const IModelFunction &func, const BinData &data, const double *p, double *grad, unsigned int &nPoints, ::ROOT::EExecutionPolicy executionPolicy=::ROOT::EExecutionPolicy::kSequential, unsigned nChunks=0)
evaluate the Chi2 gradient given a model function and the data at the point p.
void(off) SmallVectorTemplateBase< T
T EvalLog(T x)
safe evaluation of log(x) with a protections against negative or zero argument to the log smooth line...
Definition Util.h:64
tbb::task_arena is an alias of tbb::interface7::task_arena, which doesn't allow to forward declare tb...
UInt_t GetThreadPoolSize()
Returns the size of ROOT's thread pool.
Definition TROOT.cxx:577
const char * Size
Definition TXMLSetup.cxx:56
DataOptions : simple structure holding the options on how the data are filled.
Definition DataOptions.h:28
bool fErrors1
use all errors equal to 1, i.e. fit without errors (default is false)
Definition DataOptions.h:52
bool fNormBinVolume
normalize data by a normalized the bin volume (bin volume divided by a reference value)
Definition DataOptions.h:49
bool fUseEmpty
use empty bins (default is false) with a fixed error of 1
Definition DataOptions.h:50
bool fIntegral
use integral of bin content instead of bin center (default is false)
Definition DataOptions.h:47
bool fExpErrors
use expected errors from the function and not from the data
Definition DataOptions.h:53
bool fBinVolume
normalize data by the bin volume (it is used in the Poisson likelihood fits)
Definition DataOptions.h:48
double operator()(const double *x, const double *p) const
Definition FitUtil.cxx:57
void SetDerivComponent(unsigned int ipar)
Definition FitUtil.cxx:56
unsigned int NDim() const
Definition FitUtil.cxx:60
ParamDerivFunc(const GradFunc &f)
Definition FitUtil.cxx:55
TLine l
Definition textangle.C:4