Logo ROOT  
Reference Guide
FitUtil.cxx
Go to the documentation of this file.
1// @(#)root/mathcore:$Id$
2// Author: L. Moneta Tue Nov 28 10:52:47 2006
3
4/**********************************************************************
5 * *
6 * Copyright (c) 2006 LCG ROOT Math Team, CERN/PH-SFT *
7 * *
8 * *
9 **********************************************************************/
10
11// Implementation file for class FitUtil
12
13#include "Fit/FitUtil.h"
14
15#include "Fit/BinData.h"
16#include "Fit/UnBinData.h"
17
18#include "Math/IFunctionfwd.h"
19#include "Math/IParamFunction.h"
20#include "Math/Integrator.h"
25
26#include "Math/Error.h"
27#include "Math/Util.h" // for safe log(x)
28
29#include <limits>
30#include <cmath>
31#include <cassert>
32#include <algorithm>
33#include <numeric>
34//#include <memory>
35
36#include "TROOT.h"
37
38//#define DEBUG
39#ifdef DEBUG
40#define NSAMPLE 10
41#include <iostream>
42#endif
43
44// need to implement integral option
45
46namespace ROOT {
47
48 namespace Fit {
49
50 namespace FitUtil {
51
52 // derivative with respect of the parameter to be integrated
53 template<class GradFunc = IGradModelFunction>
55 ParamDerivFunc(const GradFunc & f) : fFunc(f), fIpar(0) {}
56 void SetDerivComponent(unsigned int ipar) { fIpar = ipar; }
57 double operator() (const double *x, const double *p) const {
58 return fFunc.ParameterDerivative( x, p, fIpar );
59 }
60 unsigned int NDim() const { return fFunc.NDim(); }
61 const GradFunc & fFunc;
62 unsigned int fIpar;
63 };
64
65// simple gradient calculator using the 2 points rule
66
68
69 public:
70 // construct from function and gradient dimension gdim
71 // gdim = npar for parameter gradient
72 // gdim = ndim for coordinate gradients
73 // construct (the param values will be passed later)
74 // one can choose between 2 points rule (1 extra evaluation) istrat=1
75 // or two point rule (2 extra evaluation)
76 // (found 2 points rule does not work correctly - minuit2FitBench fails)
77 SimpleGradientCalculator(int gdim, const IModelFunction & func,double eps = 2.E-8, int istrat = 1) :
78 fEps(eps),
79 fPrecision(1.E-8 ), // sqrt(epsilon)
80 fStrategy(istrat),
81 fN(gdim ),
82 fFunc(func),
83 fVec(std::vector<double>(gdim) ) // this can be probably optimized
84 {}
85
86 // internal method to calculate single partial derivative
87 // assume cached vector fVec is already set
88 double DoParameterDerivative(const double *x, const double *p, double f0, int k) const {
89 double p0 = p[k];
90 double h = std::max( fEps* std::abs(p0), 8.0*fPrecision*(std::abs(p0) + fPrecision) );
91 fVec[k] += h;
92 double deriv = 0;
93 // t.b.d : treat case of infinities
94 //if (fval > - std::numeric_limits<double>::max() && fval < std::numeric_limits<double>::max() )
95 double f1 = fFunc(x, &fVec.front() );
96 if (fStrategy > 1) {
97 fVec[k] = p0 - h;
98 double f2 = fFunc(x, &fVec.front() );
99 deriv = 0.5 * ( f2 - f1 )/h;
100 }
101 else
102 deriv = ( f1 - f0 )/h;
103
104 fVec[k] = p[k]; // restore original p value
105 return deriv;
106 }
107 // number of dimension in x (needed when calculating the integrals)
108 unsigned int NDim() const {
109 return fFunc.NDim();
110 }
111 // number of parameters (needed for grad ccalculation)
112 unsigned int NPar() const {
113 return fFunc.NPar();
114 }
115
116 double ParameterDerivative(const double *x, const double *p, int ipar) const {
117 // fVec are the cached parameter values
118 std::copy(p, p+fN, fVec.begin());
119 double f0 = fFunc(x, p);
120 return DoParameterDerivative(x,p,f0,ipar);
121 }
122
123 // calculate all gradient at point (x,p) knnowing already value f0 (we gain a function eval.)
124 void ParameterGradient(const double * x, const double * p, double f0, double * g) {
125 // fVec are the cached parameter values
126 std::copy(p, p+fN, fVec.begin());
127 for (unsigned int k = 0; k < fN; ++k) {
128 g[k] = DoParameterDerivative(x,p,f0,k);
129 }
130 }
131
132 // calculate gradient w.r coordinate values
133 void Gradient(const double * x, const double * p, double f0, double * g) {
134 // fVec are the cached coordinate values
135 std::copy(x, x+fN, fVec.begin());
136 for (unsigned int k = 0; k < fN; ++k) {
137 double x0 = x[k];
138 double h = std::max( fEps* std::abs(x0), 8.0*fPrecision*(std::abs(x0) + fPrecision) );
139 fVec[k] += h;
140 // t.b.d : treat case of infinities
141 //if (fval > - std::numeric_limits<double>::max() && fval < std::numeric_limits<double>::max() )
142 double f1 = fFunc( &fVec.front(), p );
143 if (fStrategy > 1) {
144 fVec[k] = x0 - h;
145 double f2 = fFunc( &fVec.front(), p );
146 g[k] = 0.5 * ( f2 - f1 )/h;
147 }
148 else
149 g[k] = ( f1 - f0 )/h;
150
151 fVec[k] = x[k]; // restore original x value
152 }
153 }
154
155 private:
156
157 double fEps;
159 int fStrategy; // strategy in calculation ( =1 use 2 point rule( 1 extra func) , = 2 use r point rule)
160 unsigned int fN; // gradient dimension
162 mutable std::vector<double> fVec; // cached coordinates (or parameter values in case of gradientpar)
163 };
164
165
166 // function to avoid infinities or nan
167 double CorrectValue(double rval) {
168 // avoid infinities or nan in rval
169 if (rval > - std::numeric_limits<double>::max() && rval < std::numeric_limits<double>::max() )
170 return rval;
171 else if (rval < 0)
172 // case -inf
173 return -std::numeric_limits<double>::max();
174 else
175 // case + inf or nan
176 return + std::numeric_limits<double>::max();
177 }
178
179 // Check if the value is a finite number. The argument rval is updated if it is infinite or NaN,
180 // setting it to the maximum finite value (preserving the sign).
181 bool CheckInfNaNValue(double &rval)
182 {
183 if (rval > - std::numeric_limits<double>::max() && rval < std::numeric_limits<double>::max() )
184 return true;
185 else if (rval < 0) {
186 // case -inf
187 rval = -std::numeric_limits<double>::max();
188 return false;
189 }
190 else {
191 // case + inf or nan
192 rval = + std::numeric_limits<double>::max();
193 return false;
194 }
195 }
196
197
198 // calculation of the integral of the gradient functions
199 // for a function providing derivative w.r.t parameters
200 // x1 and x2 defines the integration interval , p the parameters
201 template <class GFunc>
203 const double *x1, const double * x2, const double * p, double *g) {
204
205 // needs to calculate the integral for each partial derivative
206 ParamDerivFunc<GFunc> pfunc( gfunc);
207 IntegralEvaluator<ParamDerivFunc<GFunc> > igDerEval( pfunc, p, true);
208 // loop on the parameters
209 unsigned int npar = gfunc.NPar();
210 for (unsigned int k = 0; k < npar; ++k ) {
211 pfunc.SetDerivComponent(k);
212 g[k] = igDerEval( x1, x2 );
213 }
214 }
215
216
217
218 } // end namespace FitUtil
219
220
221
222//___________________________________________________________________________________________________________________________
223// for chi2 functions
224//___________________________________________________________________________________________________________________________
225
226 double FitUtil::EvaluateChi2(const IModelFunction &func, const BinData &data, const double *p, unsigned int &nPoints,
227 ::ROOT::EExecutionPolicy executionPolicy, unsigned nChunks)
228 {
229 // evaluate the chi2 given a function reference , the data and returns the value and also in nPoints
230 // the actual number of used points
231 // normal chi2 using only error on values (from fitting histogram)
232 // optionally the integral of function in the bin is used
233
234 unsigned int n = data.Size();
235
236 // set parameters of the function to cache integral value
237#ifdef USE_PARAMCACHE
238 (const_cast<IModelFunction &>(func)).SetParameters(p);
239#endif
240 // do not cache parameter values (it is not thread safe)
241 //func.SetParameters(p);
242
243
244 // get fit option and check case if using integral of bins
245 const DataOptions & fitOpt = data.Opt();
246 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
247 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
248 bool useExpErrors = (fitOpt.fExpErrors);
249 bool isWeighted = data.IsWeighted();
250
251#ifdef DEBUG
252 std::cout << "\n\nFit data size = " << n << std::endl;
253 std::cout << "evaluate chi2 using function " << &func << " " << p << std::endl;
254 std::cout << "use empty bins " << fitOpt.fUseEmpty << std::endl;
255 std::cout << "use integral " << fitOpt.fIntegral << std::endl;
256 std::cout << "use all error=1 " << fitOpt.fErrors1 << std::endl;
257 if (isWeighted) std::cout << "Weighted data set - sumw = " << data.SumOfContent() << " sumw2 = " << data.SumOfError2() << std::endl;
258#endif
259
261 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
262 // do not use GSL integrator which is not thread safe
264 }
265#ifdef USE_PARAMCACHE
266 IntegralEvaluator<> igEval( func, 0, useBinIntegral, igType);
267#else
268 IntegralEvaluator<> igEval( func, p, useBinIntegral, igType);
269#endif
270 double maxResValue = std::numeric_limits<double>::max() /n;
271 double wrefVolume = 1.0;
272 if (useBinVolume) {
273 if (fitOpt.fNormBinVolume) wrefVolume /= data.RefVolume();
274 }
275
276 (const_cast<IModelFunction &>(func)).SetParameters(p);
277
278 auto mapFunction = [&](const unsigned i){
279
280 double chi2{};
281 double fval{};
282
283 const auto x1 = data.GetCoordComponent(i, 0);
284 const auto y = data.Value(i);
285 auto invError = data.InvError(i);
286
287 //invError = (invError!= 0.0) ? 1.0/invError :1;
288
289 const double * x = nullptr;
290 std::vector<double> xc;
291 double binVolume = 1.0;
292 if (useBinVolume) {
293 unsigned int ndim = data.NDim();
294 xc.resize(data.NDim());
295 for (unsigned int j = 0; j < ndim; ++j) {
296 double xx = *data.GetCoordComponent(i, j);
297 double x2 = data.GetBinUpEdgeComponent(i, j);
298 binVolume *= std::abs(x2 - xx);
299 xc[j] = 0.5*(x2 + xx);
300 }
301 x = xc.data();
302 // normalize the bin volume using a reference value
303 binVolume *= wrefVolume;
304 } else if(data.NDim() > 1) {
305 // multi-dim case (no bin volume)
306 xc.resize(data.NDim());
307 xc[0] = *x1;
308 for (unsigned int j = 1; j < data.NDim(); ++j)
309 xc[j] = *data.GetCoordComponent(i, j);
310 x = xc.data();
311 } else {
312 x = x1;
313 }
314
315
316 if (!useBinIntegral) {
317#ifdef USE_PARAMCACHE
318 fval = func ( x );
319#else
320 fval = func ( x, p );
321#endif
322 }
323 else {
324 // calculate integral normalized by bin volume
325 // need to set function and parameters here in case loop is parallelized
326 std::vector<double> x2(data.NDim());
327 data.GetBinUpEdgeCoordinates(i, x2.data());
328 fval = igEval(x, x2.data());
329 }
330 // normalize result if requested according to bin volume
331 if (useBinVolume) fval *= binVolume;
332
333 // expected errors
334 if (useExpErrors) {
335 double invWeight = 1.0;
336 if (isWeighted) {
337 // we need first to check if a weight factor needs to be applied
338 // weight = sumw2/sumw = error**2/content
339 //invWeight = y * invError * invError;
340 // we use always the global weight and not the observed one in the bin
341 // for empty bins use global weight (if it is weighted data.SumError2() is not zero)
342 invWeight = data.SumOfContent()/ data.SumOfError2();
343 //if (invError > 0) invWeight = y * invError * invError;
344 }
345
346 // if (invError == 0) invWeight = (data.SumOfError2() > 0) ? data.SumOfContent()/ data.SumOfError2() : 1.0;
347 // compute expected error as f(x) / weight
348 double invError2 = (fval > 0) ? invWeight / fval : 0.0;
349 invError = std::sqrt(invError2);
350 //std::cout << "using Pearson chi2 " << x[0] << " " << 1./invError2 << " " << fval << std::endl;
351 }
352
353//#define DEBUG
354#ifdef DEBUG
355 std::cout << x[0] << " " << y << " " << 1./invError << " params : ";
356 for (unsigned int ipar = 0; ipar < func.NPar(); ++ipar)
357 std::cout << p[ipar] << "\t";
358 std::cout << "\tfval = " << fval << " bin volume " << binVolume << " ref " << wrefVolume << std::endl;
359#endif
360//#undef DEBUG
361
362 if (invError > 0) {
363
364 double tmp = ( y -fval )* invError;
365 double resval = tmp * tmp;
366
367
368 // avoid inifinity or nan in chi2 values due to wrong function values
369 if ( resval < maxResValue )
370 chi2 += resval;
371 else {
372 //nRejected++;
373 chi2 += maxResValue;
374 }
375 }
376 return chi2;
377 };
378
379#ifdef R__USE_IMT
380 auto redFunction = [](const std::vector<double> & objs){
381 return std::accumulate(objs.begin(), objs.end(), double{});
382 };
383#else
384 (void)nChunks;
385
386 // If IMT is disabled, force the execution policy to the serial case
387 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
388 Warning("FitUtil::EvaluateChi2", "Multithread execution policy requires IMT, which is disabled. Changing "
389 "to ROOT::EExecutionPolicy::kSequential.");
390 executionPolicy = ROOT::EExecutionPolicy::kSequential;
391 }
392#endif
393
394 double res{};
395 if(executionPolicy == ROOT::EExecutionPolicy::kSequential){
396 for (unsigned int i=0; i<n; ++i) {
397 res += mapFunction(i);
398 }
399#ifdef R__USE_IMT
400 } else if(executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
402 auto chunks = nChunks !=0? nChunks: setAutomaticChunking(data.Size());
403 res = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction, chunks);
404#endif
405// } else if(executionPolicy == ROOT::Fit::kMultitProcess){
406 // ROOT::TProcessExecutor pool;
407 // res = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction);
408 } else{
409 Error("FitUtil::EvaluateChi2","Execution policy unknown. Avalaible choices:\n ROOT::EExecutionPolicy::kSequential (default)\n ROOT::EExecutionPolicy::kMultiThread (requires IMT)\n");
410 }
411
412 // reset the number of fitting data points
413 nPoints = n; // no points are rejected
414 //if (nRejected != 0) nPoints = n - nRejected;
415
416 return res;
417}
418
419
420//___________________________________________________________________________________________________________________________
421
422double FitUtil::EvaluateChi2Effective(const IModelFunction & func, const BinData & data, const double * p, unsigned int & nPoints) {
423 // evaluate the chi2 given a function reference , the data and returns the value and also in nPoints
424 // the actual number of used points
425 // method using the error in the coordinates
426 // integral of bin does not make sense in this case
427
428 unsigned int n = data.Size();
429
430#ifdef DEBUG
431 std::cout << "\n\nFit data size = " << n << std::endl;
432 std::cout << "evaluate effective chi2 using function " << &func << " " << p << std::endl;
433#endif
434
435 assert(data.HaveCoordErrors() || data.HaveAsymErrors());
436
437 double chi2 = 0;
438 //int nRejected = 0;
439
440
441 //func.SetParameters(p);
442
443 unsigned int ndim = func.NDim();
444
445 // use Richardson derivator
447
448 double maxResValue = std::numeric_limits<double>::max() /n;
449
450
451
452 for (unsigned int i = 0; i < n; ++ i) {
453
454
455 double y = 0;
456 const double * x = data.GetPoint(i,y);
457
458 double fval = func( x, p );
459
460 double delta_y_func = y - fval;
461
462
463 double ey = 0;
464 const double * ex = 0;
465 if (!data.HaveAsymErrors() )
466 ex = data.GetPointError(i, ey);
467 else {
468 double eylow, eyhigh = 0;
469 ex = data.GetPointError(i, eylow, eyhigh);
470 if ( delta_y_func < 0)
471 ey = eyhigh; // function is higher than points
472 else
473 ey = eylow;
474 }
475 double e2 = ey * ey;
476 // before calculating the gradient check that all error in x are not zero
477 unsigned int j = 0;
478 while ( j < ndim && ex[j] == 0.) { j++; }
479 // if j is less ndim some elements are not zero
480 if (j < ndim) {
481 // need an adapter from a multi-dim function to a one-dimensional
483 // select optimal step size (use 10--2 by default as was done in TF1:
484 double kEps = 0.01;
485 double kPrecision = 1.E-8;
486 for (unsigned int icoord = 0; icoord < ndim; ++icoord) {
487 // calculate derivative for each coordinate
488 if (ex[icoord] > 0) {
489 //gradCalc.Gradient(x, p, fval, &grad[0]);
490 f1D.SetCoord(icoord);
491 // optimal spep size (take ex[] as scale for the points and 1% of it
492 double x0= x[icoord];
493 double h = std::max( kEps* std::abs(ex[icoord]), 8.0*kPrecision*(std::abs(x0) + kPrecision) );
494 double deriv = derivator.Derivative1(f1D, x[icoord], h);
495 double edx = ex[icoord] * deriv;
496 e2 += edx * edx;
497#ifdef DEBUG
498 std::cout << "error for coord " << icoord << " = " << ex[icoord] << " deriv " << deriv << std::endl;
499#endif
500 }
501 }
502 }
503 double w2 = (e2 > 0) ? 1.0/e2 : 0;
504 double resval = w2 * ( y - fval ) * ( y - fval);
505
506#ifdef DEBUG
507 std::cout << x[0] << " " << y << " ex " << ex[0] << " ey " << ey << " params : ";
508 for (unsigned int ipar = 0; ipar < func.NPar(); ++ipar)
509 std::cout << p[ipar] << "\t";
510 std::cout << "\tfval = " << fval << "\tresval = " << resval << std::endl;
511#endif
512
513 // avoid (infinity and nan ) in the chi2 sum
514 // eventually add possibility of excluding some points (like singularity)
515 if ( resval < maxResValue )
516 chi2 += resval;
517 else
518 chi2 += maxResValue;
519 //nRejected++;
520
521 }
522
523 // reset the number of fitting data points
524 nPoints = n; // no points are rejected
525 //if (nRejected != 0) nPoints = n - nRejected;
526
527#ifdef DEBUG
528 std::cout << "chi2 = " << chi2 << " n = " << nPoints << std::endl;
529#endif
530
531 return chi2;
532
533}
534
535
536////////////////////////////////////////////////////////////////////////////////
537/// evaluate the chi2 contribution (residual term) only for data with no coord-errors
538/// This function is used in the specialized least square algorithms like FUMILI or L.M.
539/// if we have error on the coordinates the method is not yet implemented
540/// integral option is also not yet implemented
541/// one can use in that case normal chi2 method
542
543double FitUtil::EvaluateChi2Residual(const IModelFunction & func, const BinData & data, const double * p, unsigned int i, double * g, bool hasGrad) {
544 if (data.GetErrorType() == BinData::kCoordError && data.Opt().fCoordErrors ) {
545 MATH_ERROR_MSG("FitUtil::EvaluateChi2Residual","Error on the coordinates are not used in calculating Chi2 residual");
546 return 0; // it will assert otherwise later in GetPoint
547 }
548
549
550 //func.SetParameters(p);
551
552 double y, invError = 0;
553
554 const DataOptions & fitOpt = data.Opt();
555 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
556 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
557 bool useExpErrors = (fitOpt.fExpErrors);
558
559 const double * x1 = data.GetPoint(i,y, invError);
560
561 IntegralEvaluator<> igEval( func, p, useBinIntegral);
562 double fval = 0;
563 unsigned int ndim = data.NDim();
564 double binVolume = 1.0;
565 const double * x2 = 0;
566 if (useBinVolume || useBinIntegral) x2 = data.BinUpEdge(i);
567
568 double * xc = 0;
569
570 if (useBinVolume) {
571 xc = new double[ndim];
572 for (unsigned int j = 0; j < ndim; ++j) {
573 binVolume *= std::abs( x2[j]-x1[j] );
574 xc[j] = 0.5*(x2[j]+ x1[j]);
575 }
576 // normalize the bin volume using a reference value
577 binVolume /= data.RefVolume();
578 }
579
580 const double * x = (useBinVolume) ? xc : x1;
581
582 if (!useBinIntegral) {
583 fval = func ( x, p );
584 }
585 else {
586 // calculate integral (normalized by bin volume)
587 // need to set function and parameters here in case loop is parallelized
588 fval = igEval( x1, x2) ;
589 }
590 // normalize result if requested according to bin volume
591 if (useBinVolume) fval *= binVolume;
592
593 // expected errors
594 if (useExpErrors) {
595 // we need first to check if a weight factor needs to be applied
596 // weight = sumw2/sumw = error**2/content
597 //NOTE: assume histogram is not weighted
598 // don't know how to do with bins with weight = 0
599 //double invWeight = y * invError * invError;
600 // if (invError == 0) invWeight = (data.SumOfError2() > 0) ? data.SumOfContent()/ data.SumOfError2() : 1.0;
601 // compute expected error as f(x) / weight
602 double invError2 = (fval > 0) ? 1.0 / fval : 0.0;
603 invError = std::sqrt(invError2);
604 }
605
606
607 double resval = ( y -fval )* invError;
608
609 // avoid infinities or nan in resval
610 resval = CorrectValue(resval);
611
612 // estimate gradient
613 if (g) {
614
615 unsigned int npar = func.NPar();
616
617 // use gradient of model function only if FCN support gradient
618 const IGradModelFunction * gfunc = (hasGrad) ?
619 dynamic_cast<const IGradModelFunction *>( &func) : nullptr;
620
621 if (gfunc) {
622 //case function provides gradient
623 if (!useBinIntegral ) {
624 gfunc->ParameterGradient( x , p, g);
625 }
626 else {
627 // needs to calculate the integral for each partial derivative
628 CalculateGradientIntegral( *gfunc, x1, x2, p, g);
629 }
630 }
631 else {
632 SimpleGradientCalculator gc( npar, func);
633 if (!useBinIntegral ) {
634 gc.ParameterGradient(x, p, fval, g);
635 } else {
636 // needs to calculate the integral for each partial derivative
638 }
639 }
640 // multiply by - 1 * weight
641 for (unsigned int k = 0; k < npar; ++k) {
642 g[k] *= - invError;
643 if (useBinVolume) g[k] *= binVolume;
644 }
645 }
646
647 if (useBinVolume) delete [] xc;
648
649 return resval;
650
651}
652
653void FitUtil::EvaluateChi2Gradient(const IModelFunction &f, const BinData &data, const double *p, double *grad,
654 unsigned int &nPoints, ROOT::EExecutionPolicy executionPolicy, unsigned nChunks)
655{
656 // evaluate the gradient of the chi2 function
657 // this function is used when the model function knows how to calculate the derivative and we can
658 // avoid that the minimizer re-computes them
659 //
660 // case of chi2 effective (errors on coordinate) is not supported
661
662 if (data.HaveCoordErrors()) {
663 MATH_ERROR_MSG("FitUtil::EvaluateChi2Gradient",
664 "Error on the coordinates are not used in calculating Chi2 gradient");
665 return; // it will assert otherwise later in GetPoint
666 }
667
668 const IGradModelFunction *fg = dynamic_cast<const IGradModelFunction *>(&f);
669 assert(fg != nullptr); // must be called by a gradient function
670
671 const IGradModelFunction &func = *fg;
672
673#ifdef DEBUG
674 std::cout << "\n\nFit data size = " << n << std::endl;
675 std::cout << "evaluate chi2 using function gradient " << &func << " " << p << std::endl;
676#endif
677
678 const DataOptions &fitOpt = data.Opt();
679 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
680 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
681
682 double wrefVolume = 1.0;
683 if (useBinVolume) {
684 if (fitOpt.fNormBinVolume) wrefVolume /= data.RefVolume();
685 }
686
688 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
689 // do not use GSL integrator which is not thread safe
691 }
692 IntegralEvaluator<> igEval(func, p, useBinIntegral,igType);
693
694 unsigned int npar = func.NPar();
695 unsigned initialNPoints = data.Size();
696
697 std::vector<bool> isPointRejected(initialNPoints);
698
699 auto mapFunction = [&](const unsigned int i) {
700 // set all vector values to zero
701 std::vector<double> gradFunc(npar);
702 std::vector<double> pointContribution(npar);
703
704 const auto x1 = data.GetCoordComponent(i, 0);
705 const auto y = data.Value(i);
706 auto invError = data.Error(i);
707
708 invError = (invError != 0.0) ? 1.0 / invError : 1;
709
710 double fval = 0;
711
712 const double *x = nullptr;
713 std::vector<double> xc;
714
715 unsigned int ndim = data.NDim();
716 double binVolume = 1;
717 if (useBinVolume) {
718 xc.resize(ndim);
719 for (unsigned int j = 0; j < ndim; ++j) {
720 double x1_j = *data.GetCoordComponent(i, j);
721 double x2_j = data.GetBinUpEdgeComponent(i, j);
722 binVolume *= std::abs(x2_j - x1_j);
723 xc[j] = 0.5 * (x2_j + x1_j);
724 }
725
726 x = xc.data();
727
728 // normalize the bin volume using a reference value
729 binVolume *= wrefVolume;
730 } else if (ndim > 1) {
731 xc.resize(ndim);
732 xc[0] = *x1;
733 for (unsigned int j = 1; j < ndim; ++j)
734 xc[j] = *data.GetCoordComponent(i, j);
735 x = xc.data();
736 } else {
737 x = x1;
738 }
739
740 if (!useBinIntegral) {
741 fval = func(x, p);
742 func.ParameterGradient(x, p, &gradFunc[0]);
743 } else {
744 std::vector<double> x2(data.NDim());
745 data.GetBinUpEdgeCoordinates(i, x2.data());
746 // calculate normalized integral and gradient (divided by bin volume)
747 // need to set function and parameters here in case loop is parallelized
748 fval = igEval(x, x2.data());
749 CalculateGradientIntegral(func, x, x2.data(), p, &gradFunc[0]);
750 }
751 if (useBinVolume)
752 fval *= binVolume;
753
754#ifdef DEBUG
755 std::cout << x[0] << " " << y << " " << 1. / invError << " params : ";
756 for (unsigned int ipar = 0; ipar < npar; ++ipar)
757 std::cout << p[ipar] << "\t";
758 std::cout << "\tfval = " << fval << std::endl;
759#endif
760 if (!CheckInfNaNValue(fval)) {
761 isPointRejected[i] = true;
762 // Return a zero contribution to all partial derivatives on behalf of the current point
763 return pointContribution;
764 }
765
766 // loop on the parameters
767 unsigned int ipar = 0;
768 for (; ipar < npar; ++ipar) {
769
770 // correct gradient for bin volumes
771 if (useBinVolume)
772 gradFunc[ipar] *= binVolume;
773
774 // avoid singularity in the function (infinity and nan ) in the chi2 sum
775 // eventually add possibility of excluding some points (like singularity)
776 double dfval = gradFunc[ipar];
777 if (!CheckInfNaNValue(dfval)) {
778 break; // exit loop on parameters
779 }
780
781 // calculate derivative point contribution
782 pointContribution[ipar] = -2.0 * (y - fval) * invError * invError * gradFunc[ipar];
783 }
784
785 if (ipar < npar) {
786 // case loop was broken for an overflow in the gradient calculation
787 isPointRejected[i] = true;
788 }
789
790 return pointContribution;
791 };
792
793 // Vertically reduce the set of vectors by summing its equally-indexed components
794 auto redFunction = [&](const std::vector<std::vector<double>> &pointContributions) {
795 std::vector<double> result(npar);
796
797 for (auto const &pointContribution : pointContributions) {
798 for (unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
799 result[parameterIndex] += pointContribution[parameterIndex];
800 }
801
802 return result;
803 };
804
805 std::vector<double> g(npar);
806
807#ifndef R__USE_IMT
808 // If IMT is disabled, force the execution policy to the serial case
809 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
810 Warning("FitUtil::EvaluateChi2Gradient", "Multithread execution policy requires IMT, which is disabled. Changing "
811 "to ROOT::EExecutionPolicy::kSequential.");
812 executionPolicy = ROOT::EExecutionPolicy::kSequential;
813 }
814#endif
815
816 if (executionPolicy == ROOT::EExecutionPolicy::kSequential) {
817 std::vector<std::vector<double>> allGradients(initialNPoints);
818 for (unsigned int i = 0; i < initialNPoints; ++i) {
819 allGradients[i] = mapFunction(i);
820 }
821 g = redFunction(allGradients);
822 }
823#ifdef R__USE_IMT
824 else if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
826 auto chunks = nChunks != 0 ? nChunks : setAutomaticChunking(initialNPoints);
827 g = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, initialNPoints), redFunction, chunks);
828 }
829#endif
830 // else if(executionPolicy == ROOT::Fit::kMultiprocess){
831 // ROOT::TProcessExecutor pool;
832 // g = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction);
833 // }
834 else {
835 Error("FitUtil::EvaluateChi2Gradient",
836 "Execution policy unknown. Available choices:\n 0: Serial (default)\n 1: MultiThread (requires IMT)\n");
837 }
838
839#ifndef R__USE_IMT
840 //to fix compiler warning
841 (void)nChunks;
842#endif
843
844 // correct the number of points
845 nPoints = initialNPoints;
846
847 if (std::any_of(isPointRejected.begin(), isPointRejected.end(), [](bool point) { return point; })) {
848 unsigned nRejected = std::accumulate(isPointRejected.begin(), isPointRejected.end(), 0);
849 assert(nRejected <= initialNPoints);
850 nPoints = initialNPoints - nRejected;
851
852 if (nPoints < npar)
853 MATH_ERROR_MSG("FitUtil::EvaluateChi2Gradient",
854 "Error - too many points rejected for overflow in gradient calculation");
855 }
856
857 // copy result
858 std::copy(g.begin(), g.end(), grad);
859}
860
861//______________________________________________________________________________________________________
862//
863// Log Likelihood functions
864//_______________________________________________________________________________________________________
865
866// utility function used by the likelihoods
867
868// for LogLikelihood functions
869
870double FitUtil::EvaluatePdf(const IModelFunction & func, const UnBinData & data, const double * p, unsigned int i, double * g, bool hasGrad) {
871 // evaluate the pdf contribution to the generic logl function in case of bin data
872 // return actually the log of the pdf and its derivatives
873
874
875 //func.SetParameters(p);
876
877
878 const double * x = data.Coords(i);
879 double fval = func ( x, p );
880 double logPdf = ROOT::Math::Util::EvalLog(fval);
881 //return
882 if (g == 0) return logPdf;
883
884 const IGradModelFunction * gfunc = (hasGrad) ?
885 dynamic_cast<const IGradModelFunction *>( &func) : nullptr;
886
887 // gradient calculation
888 if (gfunc) {
889 //case function provides gradient
890 gfunc->ParameterGradient( x , p, g );
891 }
892 else {
893 // estimate gradient numerically with simple 2 point rule
894 // should probably calculate gradient of log(pdf) is more stable numerically
895 SimpleGradientCalculator gc(func.NPar(), func);
896 gc.ParameterGradient(x, p, fval, g );
897 }
898 // divide gradient by function value since returning the logs
899 for (unsigned int ipar = 0; ipar < func.NPar(); ++ipar) {
900 g[ipar] /= fval; // this should be checked against infinities
901 }
902
903#ifdef DEBUG
904 std::cout << x[i] << "\t";
905 std::cout << "\tpar = [ " << func.NPar() << " ] = ";
906 for (unsigned int ipar = 0; ipar < func.NPar(); ++ipar)
907 std::cout << p[ipar] << "\t";
908 std::cout << "\tfval = " << fval;
909 std::cout << "\tgrad = [ ";
910 for (unsigned int ipar = 0; ipar < func.NPar(); ++ipar)
911 std::cout << g[ipar] << "\t";
912 std::cout << " ] " << std::endl;
913#endif
914
915
916 return logPdf;
917}
918
919double FitUtil::EvaluateLogL(const IModelFunction &func, const UnBinData &data, const double *p,
920 int iWeight, bool extended, unsigned int &nPoints,
921 ROOT::EExecutionPolicy executionPolicy, unsigned nChunks)
922{
923 // evaluate the LogLikelihood
924
925 unsigned int n = data.Size();
926
927 //unsigned int nRejected = 0;
928
929 bool normalizeFunc = false;
930
931 // set parameters of the function to cache integral value
932#ifdef USE_PARAMCACHE
933 (const_cast<IModelFunctionTempl<double> &>(func)).SetParameters(p);
934#endif
935
936 nPoints = data.Size(); // npoints
937
938#ifdef R__USE_IMT
939 // in case parameter needs to be propagated to user function use trick to set parameters by calling one time the function
940 // this will be done in sequential mode and parameters can be set in a thread safe manner
941 if (!normalizeFunc) {
942 if (data.NDim() == 1) {
943 const double * x = data.GetCoordComponent(0,0);
944 func( x, p);
945 }
946 else {
947 std::vector<double> x(data.NDim());
948 for (unsigned int j = 0; j < data.NDim(); ++j)
949 x[j] = *data.GetCoordComponent(0, j);
950 func( x.data(), p);
951 }
952 }
953#endif
954
955 double norm = 1.0;
956 if (normalizeFunc) {
957 // compute integral of the function
958 std::vector<double> xmin(data.NDim());
959 std::vector<double> xmax(data.NDim());
960 IntegralEvaluator<> igEval(func, p, true);
961 // compute integral in the ranges where is defined
962 if (data.Range().Size() > 0) {
963 norm = 0;
964 for (unsigned int ir = 0; ir < data.Range().Size(); ++ir) {
965 data.Range().GetRange(&xmin[0], &xmax[0], ir);
966 norm += igEval.Integral(xmin.data(), xmax.data());
967 }
968 } else {
969 // use (-inf +inf)
970 data.Range().GetRange(&xmin[0], &xmax[0]);
971 // check if funcition is zero at +- inf
972 if (func(xmin.data(), p) != 0 || func(xmax.data(), p) != 0) {
973 MATH_ERROR_MSG("FitUtil::EvaluateLogLikelihood",
974 "A range has not been set and the function is not zero at +/- inf");
975 return 0;
976 }
977 norm = igEval.Integral(&xmin[0], &xmax[0]);
978 }
979 }
980
981 // needed to compute effective global weight in case of extended likelihood
982
983 auto mapFunction = [&](const unsigned i) {
984 double W = 0;
985 double W2 = 0;
986 double fval = 0;
987
988 if (data.NDim() > 1) {
989 std::vector<double> x(data.NDim());
990 for (unsigned int j = 0; j < data.NDim(); ++j)
991 x[j] = *data.GetCoordComponent(i, j);
992#ifdef USE_PARAMCACHE
993 fval = func(x.data());
994#else
995 fval = func(x.data(), p);
996#endif
997
998 // one -dim case
999 } else {
1000 const auto x = data.GetCoordComponent(i, 0);
1001#ifdef USE_PARAMCACHE
1002 fval = func(x);
1003#else
1004 fval = func(x, p);
1005#endif
1006 }
1007
1008 if (normalizeFunc)
1009 fval = fval * (1 / norm);
1010
1011 // function EvalLog protects against negative or too small values of fval
1012 double logval = ROOT::Math::Util::EvalLog(fval);
1013 if (iWeight > 0) {
1014 double weight = data.Weight(i);
1015 logval *= weight;
1016 if (iWeight == 2) {
1017 logval *= weight; // use square of weights in likelihood
1018 if (!extended) {
1019 // needed sum of weights and sum of weight square if likelkihood is extended
1020 W = weight;
1021 W2 = weight * weight;
1022 }
1023 }
1024 }
1025 return LikelihoodAux<double>(logval, W, W2);
1026 };
1027
1028#ifdef R__USE_IMT
1029 // auto redFunction = [](const std::vector<LikelihoodAux<double>> & objs){
1030 // return std::accumulate(objs.begin(), objs.end(), LikelihoodAux<double>(0.0,0.0,0.0),
1031 // [](const LikelihoodAux<double> &l1, const LikelihoodAux<double> &l2){
1032 // return l1+l2;
1033 // });
1034 // };
1035 // do not use std::accumulate to be sure to maintain always the same order
1036 auto redFunction = [](const std::vector<LikelihoodAux<double>> & objs){
1037 auto l0 = LikelihoodAux<double>(0.0,0.0,0.0);
1038 for ( auto & l : objs ) {
1039 l0 = l0 + l;
1040 }
1041 return l0;
1042 };
1043#else
1044 (void)nChunks;
1045
1046 // If IMT is disabled, force the execution policy to the serial case
1047 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1048 Warning("FitUtil::EvaluateLogL", "Multithread execution policy requires IMT, which is disabled. Changing "
1049 "to ROOT::EExecutionPolicy::kSequential.");
1050 executionPolicy = ROOT::EExecutionPolicy::kSequential;
1051 }
1052#endif
1053
1054 double logl{};
1055 double sumW{};
1056 double sumW2{};
1057 if(executionPolicy == ROOT::EExecutionPolicy::kSequential){
1058 for (unsigned int i=0; i<n; ++i) {
1059 auto resArray = mapFunction(i);
1060 logl+=resArray.logvalue;
1061 sumW+=resArray.weight;
1062 sumW2+=resArray.weight2;
1063 }
1064#ifdef R__USE_IMT
1065 } else if(executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1067 auto chunks = nChunks !=0? nChunks: setAutomaticChunking(data.Size());
1068 auto resArray = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction, chunks);
1069 logl=resArray.logvalue;
1070 sumW=resArray.weight;
1071 sumW2=resArray.weight2;
1072#endif
1073// } else if(executionPolicy == ROOT::Fit::kMultiProcess){
1074 // ROOT::TProcessExecutor pool;
1075 // res = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction);
1076 } else{
1077 Error("FitUtil::EvaluateLogL","Execution policy unknown. Avalaible choices:\n ROOT::EExecutionPolicy::kSequential (default)\n ROOT::EExecutionPolicy::kMultiThread (requires IMT)\n");
1078 }
1079
1080 if (extended) {
1081 // add Poisson extended term
1082 double extendedTerm = 0; // extended term in likelihood
1083 double nuTot = 0;
1084 // nuTot is integral of function in the range
1085 // if function has been normalized integral has been already computed
1086 if (!normalizeFunc) {
1087 IntegralEvaluator<> igEval( func, p, true);
1088 std::vector<double> xmin(data.NDim());
1089 std::vector<double> xmax(data.NDim());
1090
1091 // compute integral in the ranges where is defined
1092 if (data.Range().Size() > 0 ) {
1093 nuTot = 0;
1094 for (unsigned int ir = 0; ir < data.Range().Size(); ++ir) {
1095 data.Range().GetRange(&xmin[0],&xmax[0],ir);
1096 nuTot += igEval.Integral(xmin.data(),xmax.data());
1097 }
1098 } else {
1099 // use (-inf +inf)
1100 data.Range().GetRange(&xmin[0],&xmax[0]);
1101 // check if funcition is zero at +- inf
1102 if (func(xmin.data(), p) != 0 || func(xmax.data(), p) != 0) {
1103 MATH_ERROR_MSG("FitUtil::EvaluateLogLikelihood","A range has not been set and the function is not zero at +/- inf");
1104 return 0;
1105 }
1106 nuTot = igEval.Integral(&xmin[0],&xmax[0]);
1107 }
1108
1109 // force to be last parameter value
1110 //nutot = p[func.NDim()-1];
1111 if (iWeight != 2)
1112 extendedTerm = - nuTot; // no need to add in this case n log(nu) since is already computed before
1113 else {
1114 // case use weight square in likelihood : compute total effective weight = sw2/sw
1115 // ignore for the moment case when sumW is zero
1116 extendedTerm = - (sumW2 / sumW) * nuTot;
1117 }
1118
1119 }
1120 else {
1121 nuTot = norm;
1122 extendedTerm = - nuTot + double(n) * ROOT::Math::Util::EvalLog( nuTot);
1123 // in case of weights need to use here sum of weights (to be done)
1124 }
1125 logl += extendedTerm;
1126
1127 }
1128
1129#ifdef DEBUG
1130 std::cout << "Evaluated log L for parameters (";
1131 for (unsigned int ip = 0; ip < func.NPar(); ++ip)
1132 std::cout << " " << p[ip];
1133 std::cout << ") fval = " << -logl << std::endl;
1134#endif
1135
1136 return -logl;
1137}
1138
1139void FitUtil::EvaluateLogLGradient(const IModelFunction &f, const UnBinData &data, const double *p, double *grad,
1140 unsigned int &nPoints, ROOT::EExecutionPolicy executionPolicy, unsigned nChunks)
1141{
1142 // evaluate the gradient of the log likelihood function
1143
1144 const IGradModelFunction *fg = dynamic_cast<const IGradModelFunction *>(&f);
1145 assert(fg != nullptr); // must be called by a grad function
1146
1147 const IGradModelFunction &func = *fg;
1148
1149 unsigned int npar = func.NPar();
1150 unsigned initialNPoints = data.Size();
1151
1152 (const_cast<IGradModelFunction &>(func)).SetParameters(p);
1153
1154#ifdef DEBUG
1155 std::cout << "\n===> Evaluate Gradient for parameters ";
1156 for (unsigned int ip = 0; ip < npar; ++ip)
1157 std::cout << " " << p[ip];
1158 std::cout << "\n";
1159#endif
1160
1161 const double kdmax1 = std::sqrt(std::numeric_limits<double>::max());
1162 const double kdmax2 = std::numeric_limits<double>::max() / (4 * initialNPoints);
1163
1164 auto mapFunction = [&](const unsigned int i) {
1165 std::vector<double> gradFunc(npar);
1166 std::vector<double> pointContribution(npar);
1167
1168
1169 const double * x = nullptr;
1170 std::vector<double> xc;
1171 if (data.NDim() > 1) {
1172 xc.resize(data.NDim() );
1173 for (unsigned int j = 0; j < data.NDim(); ++j)
1174 xc[j] = *data.GetCoordComponent(i, j);
1175 x = xc.data();
1176 } else {
1177 x = data.GetCoordComponent(i, 0);
1178 }
1179
1180 double fval = func(x, p);
1181 func.ParameterGradient(x, p, &gradFunc[0]);
1182
1183#ifdef DEBUG
1184 {
1186 if (i < 5 || (i > data.Size()-5) ) {
1187 if (data.NDim() > 1) std::cout << i << " x " << x[0] << " y " << x[1] << " func " << fval
1188 << " gradient " << gradFunc[0] << " " << gradFunc[1] << " " << gradFunc[3] << std::endl;
1189 else std::cout << i << " x " << x[0] << " gradient " << gradFunc[0] << " " << gradFunc[1] << " " << gradFunc[3] << std::endl;
1190 }
1191 }
1192#endif
1193
1194 for (unsigned int kpar = 0; kpar < npar; ++kpar) {
1195 if (fval > 0)
1196 pointContribution[kpar] = -1. / fval * gradFunc[kpar];
1197 else if (gradFunc[kpar] != 0) {
1198 double gg = kdmax1 * gradFunc[kpar];
1199 if (gg > 0)
1200 gg = std::min(gg, kdmax2);
1201 else
1202 gg = std::max(gg, -kdmax2);
1203 pointContribution[kpar] = -gg;
1204 }
1205 // if func derivative is zero term is also zero so do not add in g[kpar]
1206 }
1207
1208 return pointContribution;
1209 };
1210
1211 // Vertically reduce the set of vectors by summing its equally-indexed components
1212 auto redFunction = [&](const std::vector<std::vector<double>> &pointContributions) {
1213 std::vector<double> result(npar);
1214
1215 for (auto const &pointContribution : pointContributions) {
1216 for (unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
1217 result[parameterIndex] += pointContribution[parameterIndex];
1218 }
1219
1220 return result;
1221 };
1222
1223 std::vector<double> g(npar);
1224
1225#ifndef R__USE_IMT
1226 // If IMT is disabled, force the execution policy to the serial case
1227 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1228 Warning("FitUtil::EvaluateLogLGradient", "Multithread execution policy requires IMT, which is disabled. Changing "
1229 "to ROOT::EExecutionPolicy::kSequential.");
1230 executionPolicy = ROOT::EExecutionPolicy::kSequential;
1231 }
1232#endif
1233
1234 if (executionPolicy == ROOT::EExecutionPolicy::kSequential) {
1235 std::vector<std::vector<double>> allGradients(initialNPoints);
1236 for (unsigned int i = 0; i < initialNPoints; ++i) {
1237 allGradients[i] = mapFunction(i);
1238 }
1239 g = redFunction(allGradients);
1240 }
1241#ifdef R__USE_IMT
1242 else if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1244 auto chunks = nChunks != 0 ? nChunks : setAutomaticChunking(initialNPoints);
1245 g = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, initialNPoints), redFunction, chunks);
1246 }
1247#endif
1248 else {
1249 Error("FitUtil::EvaluateLogLGradient", "Execution policy unknown. Avalaible choices:\n "
1250 "ROOT::EExecutionPolicy::kSequential (default)\n "
1251 "ROOT::EExecutionPolicy::kMultiThread (requires IMT)\n");
1252 }
1253
1254#ifndef R__USE_IMT
1255 // to fix compiler warning
1256 (void)nChunks;
1257#endif
1258
1259 // copy result
1260 std::copy(g.begin(), g.end(), grad);
1261 nPoints = data.Size(); // npoints
1262
1263#ifdef DEBUG
1264 std::cout << "FitUtil.cxx : Final gradient ";
1265 for (unsigned int param = 0; param < npar; param++) {
1266 std::cout << " " << grad[param];
1267 }
1268 std::cout << "\n";
1269#endif
1270}
1271//_________________________________________________________________________________________________
1272// for binned log likelihood functions
1273////////////////////////////////////////////////////////////////////////////////
1274/// evaluate the pdf (Poisson) contribution to the logl (return actually log of pdf)
1275/// and its gradient
1276
1277double FitUtil::EvaluatePoissonBinPdf(const IModelFunction & func, const BinData & data, const double * p, unsigned int i, double * g, bool hasGrad) {
1278 double y = 0;
1279 const double * x1 = data.GetPoint(i,y);
1280
1281 const DataOptions & fitOpt = data.Opt();
1282 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
1283 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
1284
1285 IntegralEvaluator<> igEval( func, p, useBinIntegral);
1286 double fval = 0;
1287 const double * x2 = 0;
1288 // calculate the bin volume
1289 double binVolume = 1;
1290 std::vector<double> xc;
1291 if (useBinVolume) {
1292 unsigned int ndim = data.NDim();
1293 xc.resize(ndim);
1294 for (unsigned int j = 0; j < ndim; ++j) {
1295 double x2j = data.GetBinUpEdgeComponent(i, j);
1296 binVolume *= std::abs( x2j-x1[j] );
1297 xc[j] = 0.5*(x2j+ x1[j]);
1298 }
1299 // normalize the bin volume using a reference value
1300 binVolume /= data.RefVolume();
1301 }
1302
1303 const double * x = (useBinVolume) ? &xc.front() : x1;
1304
1305 if (!useBinIntegral ) {
1306 fval = func ( x, p );
1307 }
1308 else {
1309 // calculate integral normalized (divided by bin volume)
1310 std::vector<double> vx2(data.NDim());
1311 data.GetBinUpEdgeCoordinates(i, vx2.data());
1312 fval = igEval( x1, vx2.data() ) ;
1313 }
1314 if (useBinVolume) fval *= binVolume;
1315
1316 // logPdf for Poisson: ignore constant term depending on N
1317 fval = std::max(fval, 0.0); // avoid negative or too small values
1318 double logPdf = - fval;
1319 if (y > 0.0) {
1320 // include also constants due to saturate model (see Baker-Cousins paper)
1321 logPdf += y * ROOT::Math::Util::EvalLog( fval / y) + y;
1322 }
1323 // need to return the pdf contribution (not the log)
1324
1325 //double pdfval = std::exp(logPdf);
1326
1327 //if (g == 0) return pdfval;
1328 if (g == 0) return logPdf;
1329
1330 unsigned int npar = func.NPar();
1331 const IGradModelFunction * gfunc = (hasGrad) ?
1332 dynamic_cast<const IGradModelFunction *>( &func) : nullptr;
1333
1334 // gradient calculation
1335 if (gfunc) {
1336 //case function provides gradient
1337 if (!useBinIntegral )
1338 gfunc->ParameterGradient( x , p, g );
1339 else {
1340 // needs to calculate the integral for each partial derivative
1341 CalculateGradientIntegral( *gfunc, x1, x2, p, g);
1342 }
1343
1344 }
1345 else {
1346 SimpleGradientCalculator gc(func.NPar(), func);
1347 if (!useBinIntegral )
1348 gc.ParameterGradient(x, p, fval, g);
1349 else {
1350 // needs to calculate the integral for each partial derivative
1352 }
1353 }
1354 // correct g[] do be derivative of poisson term
1355 for (unsigned int k = 0; k < npar; ++k) {
1356 // apply bin volume correction
1357 if (useBinVolume) g[k] *= binVolume;
1358
1359 // correct for Poisson term
1360 if ( fval > 0)
1361 g[k] *= ( y/fval - 1.) ;//* pdfval;
1362 else if (y > 0) {
1363 const double kdmax1 = std::sqrt( std::numeric_limits<double>::max() );
1364 g[k] *= kdmax1;
1365 }
1366 else // y == 0 cannot have negative y
1367 g[k] *= -1;
1368 }
1369
1370
1371#ifdef DEBUG
1372 std::cout << "x = " << x[0] << " logPdf = " << logPdf << " grad";
1373 for (unsigned int ipar = 0; ipar < npar; ++ipar)
1374 std::cout << g[ipar] << "\t";
1375 std::cout << std::endl;
1376#endif
1377
1378// return pdfval;
1379 return logPdf;
1380}
1381
1382double FitUtil::EvaluatePoissonLogL(const IModelFunction &func, const BinData &data, const double *p, int iWeight,
1383 bool extended, unsigned int &nPoints, ROOT::EExecutionPolicy executionPolicy,
1384 unsigned nChunks)
1385{
1386 // evaluate the Poisson Log Likelihood
1387 // for binned likelihood fits
1388 // this is Sum ( f(x_i) - y_i * log( f (x_i) ) )
1389 // add as well constant term for saturated model to make it like a Chi2/2
1390 // by default is etended. If extended is false the fit is not extended and
1391 // the global poisson term is removed (i.e is a binomial fit)
1392 // (remember that in this case one needs to have a function with a fixed normalization
1393 // like in a non extended unbinned fit)
1394 //
1395 // if use Weight use a weighted dataset
1396 // iWeight = 1 ==> logL = Sum( w f(x_i) )
1397 // case of iWeight==1 is actually identical to weight==0
1398 // iWeight = 2 ==> logL = Sum( w*w * f(x_i) )
1399 //
1400 // nPoints returns the points where bin content is not zero
1401
1402
1403 unsigned int n = data.Size();
1404
1405#ifdef USE_PARAMCACHE
1406 (const_cast<IModelFunction &>(func)).SetParameters(p);
1407#endif
1408
1409 nPoints = data.Size(); // npoints
1410
1411
1412 // get fit option and check case of using integral of bins
1413 const DataOptions &fitOpt = data.Opt();
1414 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
1415 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
1416 bool useW2 = (iWeight == 2);
1417
1418 // normalize if needed by a reference volume value
1419 double wrefVolume = 1.0;
1420 if (useBinVolume) {
1421 if (fitOpt.fNormBinVolume) wrefVolume /= data.RefVolume();
1422 }
1423
1424//#define DEBUG
1425#ifdef DEBUG
1426 std::cout << "Evaluate PoissonLogL for params = [ ";
1427 for (unsigned int j = 0; j < func.NPar(); ++j) std::cout << p[j] << " , ";
1428 std::cout << "] - data size = " << n << " useBinIntegral " << useBinIntegral << " useBinVolume "
1429 << useBinVolume << " useW2 " << useW2 << " wrefVolume = " << wrefVolume << std::endl;
1430#endif
1431
1432
1434 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1435 // do not use GSL integrator which is not thread safe
1437 }
1438#ifdef USE_PARAMCACHE
1439 IntegralEvaluator<> igEval(func, 0, useBinIntegral, igType);
1440#else
1441 IntegralEvaluator<> igEval(func, p, useBinIntegral, igType);
1442#endif
1443
1444 auto mapFunction = [&](const unsigned i) {
1445 auto x1 = data.GetCoordComponent(i, 0);
1446 auto y = *data.ValuePtr(i);
1447
1448 const double *x = nullptr;
1449 std::vector<double> xc;
1450 double fval = 0;
1451 double binVolume = 1.0;
1452
1453 if (useBinVolume) {
1454 unsigned int ndim = data.NDim();
1455 xc.resize(data.NDim());
1456 for (unsigned int j = 0; j < ndim; ++j) {
1457 double xx = *data.GetCoordComponent(i, j);
1458 double x2 = data.GetBinUpEdgeComponent(i, j);
1459 binVolume *= std::abs(x2 - xx);
1460 xc[j] = 0.5 * (x2 + xx);
1461 }
1462 x = xc.data();
1463 // normalize the bin volume using a reference value
1464 binVolume *= wrefVolume;
1465 } else if (data.NDim() > 1) {
1466 xc.resize(data.NDim());
1467 xc[0] = *x1;
1468 for (unsigned int j = 1; j < data.NDim(); ++j) {
1469 xc[j] = *data.GetCoordComponent(i, j);
1470 }
1471 x = xc.data();
1472 } else {
1473 x = x1;
1474 }
1475
1476 if (!useBinIntegral) {
1477#ifdef USE_PARAMCACHE
1478 fval = func(x);
1479#else
1480 fval = func(x, p);
1481#endif
1482 } else {
1483 // calculate integral (normalized by bin volume)
1484 // need to set function and parameters here in case loop is parallelized
1485 std::vector<double> x2(data.NDim());
1486 data.GetBinUpEdgeCoordinates(i, x2.data());
1487 fval = igEval(x, x2.data());
1488 }
1489 if (useBinVolume) fval *= binVolume;
1490
1491
1492
1493#ifdef DEBUG
1494 int NSAMPLE = 100;
1495 if (i % NSAMPLE == 0) {
1496 std::cout << "evt " << i << " x = [ ";
1497 for (unsigned int j = 0; j < func.NDim(); ++j) std::cout << x[j] << " , ";
1498 std::cout << "] ";
1499 if (fitOpt.fIntegral) {
1500 std::cout << "x2 = [ ";
1501 for (unsigned int j = 0; j < func.NDim(); ++j) std::cout << data.GetBinUpEdgeComponent(i, j) << " , ";
1502 std::cout << "] ";
1503 }
1504 std::cout << " y = " << y << " fval = " << fval << std::endl;
1505 }
1506#endif
1507
1508
1509 // EvalLog protects against 0 values of fval but don't want to add in the -log sum
1510 // negative values of fval
1511 fval = std::max(fval, 0.0);
1512
1513 double nloglike = 0; // negative loglikelihood
1514 if (useW2) {
1515 // apply weight correction . Effective weight is error^2/ y
1516 // and expected events in bins is fval/weight
1517 // can apply correction only when y is not zero otherwise weight is undefined
1518 // (in case of weighted likelihood I don't care about the constant term due to
1519 // the saturated model)
1520
1521 // use for the empty bins the global weight
1522 double weight = 1.0;
1523 if (y != 0) {
1524 double error = data.Error(i);
1525 weight = (error * error) / y; // this is the bin effective weight
1526 nloglike += weight * y * ( ROOT::Math::Util::EvalLog(y) - ROOT::Math::Util::EvalLog(fval) );
1527 }
1528 else {
1529 // for empty bin use the average weight computed from the total data weight
1530 weight = data.SumOfError2()/ data.SumOfContent();
1531 }
1532 if (extended) {
1533 nloglike += weight * ( fval - y);
1534 }
1535
1536 } else {
1537 // standard case no weights or iWeight=1
1538 // this is needed for Poisson likelihood (which are extened and not for multinomial)
1539 // the formula below include constant term due to likelihood of saturated model (f(x) = y)
1540 // (same formula as in Baker-Cousins paper, page 439 except a factor of 2
1541 if (extended) nloglike = fval - y;
1542
1543 if (y > 0) {
1544 nloglike += y * (ROOT::Math::Util::EvalLog(y) - ROOT::Math::Util::EvalLog(fval));
1545 }
1546 }
1547#ifdef DEBUG
1548 {
1550 std::cout << " nll = " << nloglike << std::endl;
1551 }
1552#endif
1553 return nloglike;
1554 };
1555
1556#ifdef R__USE_IMT
1557 auto redFunction = [](const std::vector<double> &objs) {
1558 return std::accumulate(objs.begin(), objs.end(), double{});
1559 };
1560#else
1561 (void)nChunks;
1562
1563 // If IMT is disabled, force the execution policy to the serial case
1564 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1565 Warning("FitUtil::EvaluatePoissonLogL", "Multithread execution policy requires IMT, which is disabled. Changing "
1566 "to ROOT::EExecutionPolicy::kSequential.");
1567 executionPolicy = ROOT::EExecutionPolicy::kSequential;
1568 }
1569#endif
1570
1571 double res{};
1572 if (executionPolicy == ROOT::EExecutionPolicy::kSequential) {
1573 for (unsigned int i = 0; i < n; ++i) {
1574 res += mapFunction(i);
1575 }
1576#ifdef R__USE_IMT
1577 } else if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1579 auto chunks = nChunks != 0 ? nChunks : setAutomaticChunking(data.Size());
1580 res = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction, chunks);
1581#endif
1582 // } else if(executionPolicy == ROOT::Fit::kMultitProcess){
1583 // ROOT::TProcessExecutor pool;
1584 // res = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction);
1585 } else {
1586 Error("FitUtil::EvaluatePoissonLogL",
1587 "Execution policy unknown. Avalaible choices:\n ROOT::EExecutionPolicy::kSequential (default)\n ROOT::EExecutionPolicy::kMultiThread (requires IMT)\n");
1588 }
1589
1590#ifdef DEBUG
1591 std::cout << "Loglikelihood = " << res << std::endl;
1592#endif
1593
1594 return res;
1595}
1596
1597void FitUtil::EvaluatePoissonLogLGradient(const IModelFunction &f, const BinData &data, const double *p, double *grad,
1598 unsigned int &, ROOT::EExecutionPolicy executionPolicy, unsigned nChunks)
1599{
1600 // evaluate the gradient of the Poisson log likelihood function
1601
1602 const IGradModelFunction *fg = dynamic_cast<const IGradModelFunction *>(&f);
1603 assert(fg != nullptr); // must be called by a grad function
1604
1605 const IGradModelFunction &func = *fg;
1606
1607#ifdef USE_PARAMCACHE
1608 (const_cast<IGradModelFunction &>(func)).SetParameters(p);
1609#endif
1610
1611 const DataOptions &fitOpt = data.Opt();
1612 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
1613 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
1614
1615 double wrefVolume = 1.0;
1616 if (useBinVolume && fitOpt.fNormBinVolume)
1617 wrefVolume /= data.RefVolume();
1618
1620 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1621 // do not use GSL integrator which is not thread safe
1623 }
1624
1625 IntegralEvaluator<> igEval(func, p, useBinIntegral, igType);
1626
1627 unsigned int npar = func.NPar();
1628 unsigned initialNPoints = data.Size();
1629
1630 auto mapFunction = [&](const unsigned int i) {
1631 // set all vector values to zero
1632 std::vector<double> gradFunc(npar);
1633 std::vector<double> pointContribution(npar);
1634
1635 const auto x1 = data.GetCoordComponent(i, 0);
1636 const auto y = data.Value(i);
1637 auto invError = data.Error(i);
1638
1639 invError = (invError != 0.0) ? 1.0 / invError : 1;
1640
1641 double fval = 0;
1642
1643 const double *x = nullptr;
1644 std::vector<double> xc;
1645
1646 unsigned ndim = data.NDim();
1647 double binVolume = 1.0;
1648 if (useBinVolume) {
1649
1650 xc.resize(ndim);
1651
1652 for (unsigned int j = 0; j < ndim; ++j) {
1653 double x1_j = *data.GetCoordComponent(i, j);
1654 double x2_j = data.GetBinUpEdgeComponent(i, j);
1655 binVolume *= std::abs(x2_j - x1_j);
1656 xc[j] = 0.5 * (x2_j + x1_j);
1657 }
1658
1659 x = xc.data();
1660
1661 // normalize the bin volume using a reference value
1662 binVolume *= wrefVolume;
1663 } else if (ndim > 1) {
1664 xc.resize(ndim);
1665 xc[0] = *x1;
1666 for (unsigned int j = 1; j < ndim; ++j)
1667 xc[j] = *data.GetCoordComponent(i, j);
1668 x = xc.data();
1669 } else {
1670 x = x1;
1671 }
1672
1673 if (!useBinIntegral) {
1674 fval = func(x, p);
1675 func.ParameterGradient(x, p, &gradFunc[0]);
1676 } else {
1677 // calculate integral (normalized by bin volume)
1678 // need to set function and parameters here in case loop is parallelized
1679 std::vector<double> x2(data.NDim());
1680 data.GetBinUpEdgeCoordinates(i, x2.data());
1681 fval = igEval(x, x2.data());
1682 CalculateGradientIntegral(func, x, x2.data(), p, &gradFunc[0]);
1683 }
1684 if (useBinVolume)
1685 fval *= binVolume;
1686
1687#ifdef DEBUG
1688 {
1690 if (i < 5 || (i > data.Size()-5) ) {
1691 if (data.NDim() > 1) std::cout << i << " x " << x[0] << " y " << x[1] << " func " << fval
1692 << " gradient " << gradFunc[0] << " " << gradFunc[1] << " " << gradFunc[3] << std::endl;
1693 else std::cout << i << " x " << x[0] << " gradient " << gradFunc[0] << " " << gradFunc[1] << " " << gradFunc[3] << std::endl;
1694 }
1695 }
1696#endif
1697
1698 // correct the gradient
1699 for (unsigned int ipar = 0; ipar < npar; ++ipar) {
1700
1701 // correct gradient for bin volumes
1702 if (useBinVolume)
1703 gradFunc[ipar] *= binVolume;
1704
1705 // df/dp * (1. - y/f )
1706 if (fval > 0)
1707 pointContribution[ipar] = gradFunc[ipar] * (1. - y / fval);
1708 else if (gradFunc[ipar] != 0) {
1709 const double kdmax1 = std::sqrt(std::numeric_limits<double>::max());
1710 const double kdmax2 = std::numeric_limits<double>::max() / (4 * initialNPoints);
1711 double gg = kdmax1 * gradFunc[ipar];
1712 if (gg > 0)
1713 gg = std::min(gg, kdmax2);
1714 else
1715 gg = std::max(gg, -kdmax2);
1716 pointContribution[ipar] = -gg;
1717 }
1718 }
1719
1720
1721 return pointContribution;
1722 };
1723
1724 // Vertically reduce the set of vectors by summing its equally-indexed components
1725 auto redFunction = [&](const std::vector<std::vector<double>> &pointContributions) {
1726 std::vector<double> result(npar);
1727
1728 for (auto const &pointContribution : pointContributions) {
1729 for (unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
1730 result[parameterIndex] += pointContribution[parameterIndex];
1731 }
1732
1733 return result;
1734 };
1735
1736 std::vector<double> g(npar);
1737
1738#ifndef R__USE_IMT
1739 // If IMT is disabled, force the execution policy to the serial case
1740 if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1741 Warning("FitUtil::EvaluatePoissonLogLGradient",
1742 "Multithread execution policy requires IMT, which is disabled. Changing "
1743 "to ROOT::EExecutionPolicy::kSequential.");
1744 executionPolicy = ROOT::EExecutionPolicy::kSequential;
1745 }
1746#endif
1747
1748 if (executionPolicy == ROOT::EExecutionPolicy::kSequential) {
1749 std::vector<std::vector<double>> allGradients(initialNPoints);
1750 for (unsigned int i = 0; i < initialNPoints; ++i) {
1751 allGradients[i] = mapFunction(i);
1752 }
1753 g = redFunction(allGradients);
1754 }
1755#ifdef R__USE_IMT
1756 else if (executionPolicy == ROOT::EExecutionPolicy::kMultiThread) {
1758 auto chunks = nChunks != 0 ? nChunks : setAutomaticChunking(initialNPoints);
1759 g = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, initialNPoints), redFunction, chunks);
1760 }
1761#endif
1762
1763 // else if(executionPolicy == ROOT::Fit::kMultiprocess){
1764 // ROOT::TProcessExecutor pool;
1765 // g = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction);
1766 // }
1767 else {
1768 Error("FitUtil::EvaluatePoissonLogLGradient",
1769 "Execution policy unknown. Avalaible choices:\n 0: Serial (default)\n 1: MultiThread (requires IMT)\n");
1770 }
1771
1772#ifndef R__USE_IMT
1773 //to fix compiler warning
1774 (void)nChunks;
1775#endif
1776
1777 // copy result
1778 std::copy(g.begin(), g.end(), grad);
1779
1780#ifdef DEBUG
1781 std::cout << "***** Final gradient : ";
1782 for (unsigned int ii = 0; ii< npar; ++ii) std::cout << grad[ii] << " ";
1783 std::cout << "\n";
1784#endif
1785
1786}
1787
1788
1789unsigned FitUtil::setAutomaticChunking(unsigned nEvents){
1790 auto ncpu = ROOT::GetThreadPoolSize();
1791 if (nEvents/ncpu < 1000) return ncpu;
1792 return nEvents/1000;
1793 //return ((nEvents/ncpu + 1) % 1000) *40 ; //arbitrary formula
1794}
1795
1796}
1797
1798} // end namespace ROOT
#define MATH_ERROR_MSG(loc, str)
Definition: Error.h:83
#define f(i)
Definition: RSha256.hxx:104
#define h(i)
Definition: RSha256.hxx:106
void Error(const char *location, const char *msgfmt,...)
Use this function in case an error occurred.
Definition: TError.cxx:188
void Warning(const char *location, const char *msgfmt,...)
Use this function in warning situations.
Definition: TError.cxx:232
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t result
Option_t Option_t TPoint TPoint const char x2
Option_t Option_t TPoint TPoint const char x1
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t g
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void gc
float xmin
Definition: THbookFile.cxx:95
float xmax
Definition: THbookFile.cxx:95
R__EXTERN TVirtualMutex * gROOTMutex
Definition: TROOT.h:63
#define R__LOCKGUARD(mutex)
Definition: TF1.cxx:150
Class describing the binned data sets : vectors of x coordinates, y values and optionally error on y ...
Definition: BinData.h:52
SimpleGradientCalculator(int gdim, const IModelFunction &func, double eps=2.E-8, int istrat=1)
Definition: FitUtil.cxx:77
double ParameterDerivative(const double *x, const double *p, int ipar) const
Definition: FitUtil.cxx:116
void Gradient(const double *x, const double *p, double f0, double *g)
Definition: FitUtil.cxx:133
void ParameterGradient(const double *x, const double *p, double f0, double *g)
Definition: FitUtil.cxx:124
double DoParameterDerivative(const double *x, const double *p, double f0, int k) const
Definition: FitUtil.cxx:88
Class describing the un-binned data sets (just x coordinates values) of any dimensions.
Definition: UnBinData.h:46
virtual unsigned int NDim() const =0
Retrieve the dimension of the function.
virtual void SetParameters(const double *p)=0
Set the parameter values.
virtual unsigned int NPar() const =0
Return the number of Parameters.
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
virtual void ParameterGradient(const T *x, const double *p, T *grad) const
Evaluate the all the derivatives (gradient vector) of the function with respect to the parameters at ...
OneDimMultiFunctionAdapter class to wrap a multidimensional function in one dimensional one.
User class for calculating the derivatives of a function.
double Derivative1(double x)
Returns the first derivative of the function at point x, computed by Richardson's extrapolation metho...
A pseudo container class which is a generator of indices.
Definition: TSeq.hxx:67
This class provides a simple interface to execute the same task multiple times in parallel threads,...
auto MapReduce(F func, unsigned nTimes, R redfunc) -> InvokeResult_t< F >
Execute a function nTimes in parallel (Map) and accumulate the results into a single value (Reduce).
Type
enumeration specifying the integration types.
@ kGAUSS
simple Gauss integration method with fixed rule
@ kDEFAULT
default type specified in the static options
RVec< PromoteType< T > > abs(const RVec< T > &v)
Definition: RVec.hxx:1778
Double_t y[n]
Definition: legend1.C:17
Double_t x[n]
Definition: legend1.C:17
Double_t ey[n]
Definition: legend1.C:17
const Int_t n
Definition: legend1.C:16
Double_t ex[n]
Definition: legend1.C:17
TF1 * f1
Definition: legend1.C:11
TFitResultPtr Fit(FitObject *h1, TF1 *f1, Foption_t &option, const ROOT::Math::MinimizerOptions &moption, const char *goption, ROOT::Fit::DataRange &range)
Definition: HFitImpl.cxx:133
ROOT::Math::IParamMultiGradFunction IGradModelFunction
Definition: FitUtil.h:65
ROOT::Math::IParamMultiFunction IModelFunction
Definition: FitUtil.h:64
void CalculateGradientIntegral(const GFunc &gfunc, const double *x1, const double *x2, const double *p, double *g)
Definition: FitUtil.cxx:202
double CorrectValue(double rval)
Definition: FitUtil.cxx:167
void EvaluatePoissonLogLGradient(const IModelFunction &func, const BinData &data, const double *p, double *grad, unsigned int &nPoints, ::ROOT::EExecutionPolicy executionPolicy=::ROOT::EExecutionPolicy::kSequential, unsigned nChunks=0)
evaluate the Poisson LogL given a model function and the data at the point p.
double EvaluatePoissonLogL(const IModelFunction &func, const BinData &data, const double *p, int iWeight, bool extended, unsigned int &nPoints, ::ROOT::EExecutionPolicy executionPolicy, unsigned nChunks=0)
evaluate the Poisson LogL given a model function and the data at the point p.
double EvaluateChi2Effective(const IModelFunction &func, const BinData &data, const double *x, unsigned int &nPoints)
evaluate the effective Chi2 given a model function and the data at the point x.
Definition: FitUtil.cxx:422
void EvaluateLogLGradient(const IModelFunction &func, const UnBinData &data, const double *p, double *grad, unsigned int &nPoints, ::ROOT::EExecutionPolicy executionPolicy=::ROOT::EExecutionPolicy::kSequential, unsigned nChunks=0)
evaluate the LogL gradient given a model function and the data at the point p.
bool CheckInfNaNValue(double &rval)
Definition: FitUtil.cxx:181
unsigned setAutomaticChunking(unsigned nEvents)
Definition: FitUtil.cxx:1789
double EvaluatePdf(const IModelFunction &func, const UnBinData &data, const double *p, unsigned int ipoint, double *g=nullptr, bool hasGrad=false)
evaluate the pdf contribution to the LogL given a model function and the BinPoint data.
Definition: FitUtil.cxx:870
double EvaluateLogL(const IModelFunction &func, const UnBinData &data, const double *p, int iWeight, bool extended, unsigned int &nPoints, ::ROOT::EExecutionPolicy executionPolicy, unsigned nChunks=0)
evaluate the LogL given a model function and the data at the point x.
double EvaluateChi2(const IModelFunction &func, const BinData &data, const double *p, unsigned int &nPoints, ::ROOT::EExecutionPolicy executionPolicy, unsigned nChunks=0)
Chi2 Functions.
Definition: FitUtil.cxx:226
double EvaluatePoissonBinPdf(const IModelFunction &func, const BinData &data, const double *x, unsigned int ipoint, double *g=nullptr, bool hasGrad=false)
evaluate the pdf contribution to the Poisson LogL given a model function and the BinPoint data.
Definition: FitUtil.cxx:1277
double EvaluateChi2Residual(const IModelFunction &func, const BinData &data, const double *p, unsigned int ipoint, double *g=nullptr, bool hasGrad=false)
evaluate the residual contribution to the Chi2 given a model function and the BinPoint data and if th...
Definition: FitUtil.cxx:543
void EvaluateChi2Gradient(const IModelFunction &func, const BinData &data, const double *p, double *grad, unsigned int &nPoints, ::ROOT::EExecutionPolicy executionPolicy=::ROOT::EExecutionPolicy::kSequential, unsigned nChunks=0)
evaluate the Chi2 gradient given a model function and the data at the point p.
void(off) SmallVectorTemplateBase< T
T EvalLog(T x)
safe evaluation of log(x) with a protections against negative or zero argument to the log smooth line...
Definition: Util.h:64
VecExpr< UnaryOp< Sqrt< T >, VecExpr< A, T, D >, T >, T, D > sqrt(const VecExpr< A, T, D > &rhs)
This file contains a specialised ROOT message handler to test for diagnostic in unit tests.
UInt_t GetThreadPoolSize()
Returns the size of ROOT's thread pool.
Definition: TROOT.cxx:565
const char * Size
Definition: TXMLSetup.cxx:56
DataOptions : simple structure holding the options on how the data are filled.
Definition: DataOptions.h:28
bool fErrors1
use all errors equal to 1, i.e. fit without errors (default is false)
Definition: DataOptions.h:52
bool fNormBinVolume
normalize data by a normalized the bin volume (bin volume divided by a reference value)
Definition: DataOptions.h:49
bool fUseEmpty
use empty bins (default is false) with a fixed error of 1
Definition: DataOptions.h:50
bool fIntegral
use integral of bin content instead of bin center (default is false)
Definition: DataOptions.h:47
bool fExpErrors
use expected errors from the function and not from the data
Definition: DataOptions.h:53
bool fBinVolume
normalize data by the bin volume (it is used in the Poisson likelihood fits)
Definition: DataOptions.h:48
double operator()(const double *x, const double *p) const
Definition: FitUtil.cxx:57
void SetDerivComponent(unsigned int ipar)
Definition: FitUtil.cxx:56
unsigned int NDim() const
Definition: FitUtil.cxx:60
ParamDerivFunc(const GradFunc &f)
Definition: FitUtil.cxx:55
TLine l
Definition: textangle.C:4