Logo ROOT   6.18/05
Reference Guide
FitUtil.cxx
Go to the documentation of this file.
1// @(#)root/mathcore:$Id$
2// Author: L. Moneta Tue Nov 28 10:52:47 2006
3
4/**********************************************************************
5 * *
6 * Copyright (c) 2006 LCG ROOT Math Team, CERN/PH-SFT *
7 * *
8 * *
9 **********************************************************************/
10
11// Implementation file for class FitUtil
12
13#include "Fit/FitUtil.h"
14
15#include "Fit/BinData.h"
16#include "Fit/UnBinData.h"
17
18#include "Math/IFunctionfwd.h"
19#include "Math/IParamFunction.h"
20#include "Math/Integrator.h"
25
26#include "Math/Error.h"
27#include "Math/Util.h" // for safe log(x)
28
29#include <limits>
30#include <cmath>
31#include <cassert>
32#include <algorithm>
33#include <numeric>
34//#include <memory>
35
36#include "TROOT.h"
37
38//#define DEBUG
39#ifdef DEBUG
40#define NSAMPLE 10
41#include <iostream>
42#endif
43
44// need to implement integral option
45
46namespace ROOT {
47
48 namespace Fit {
49
50 namespace FitUtil {
51
52 // derivative with respect of the parameter to be integrated
53 template<class GradFunc = IGradModelFunction>
54 struct ParamDerivFunc {
55 ParamDerivFunc(const GradFunc & f) : fFunc(f), fIpar(0) {}
56 void SetDerivComponent(unsigned int ipar) { fIpar = ipar; }
57 double operator() (const double *x, const double *p) const {
58 return fFunc.ParameterDerivative( x, p, fIpar );
59 }
60 unsigned int NDim() const { return fFunc.NDim(); }
61 const GradFunc & fFunc;
62 unsigned int fIpar;
63 };
64
65// simple gradient calculator using the 2 points rule
66
67 class SimpleGradientCalculator {
68
69 public:
70 // construct from function and gradient dimension gdim
71 // gdim = npar for parameter gradient
72 // gdim = ndim for coordinate gradients
73 // construct (the param values will be passed later)
74 // one can choose between 2 points rule (1 extra evaluation) istrat=1
75 // or two point rule (2 extra evaluation)
76 // (found 2 points rule does not work correctly - minuit2FitBench fails)
77 SimpleGradientCalculator(int gdim, const IModelFunction & func,double eps = 2.E-8, int istrat = 1) :
78 fEps(eps),
79 fPrecision(1.E-8 ), // sqrt(epsilon)
80 fStrategy(istrat),
81 fN(gdim ),
82 fFunc(func),
83 fVec(std::vector<double>(gdim) ) // this can be probably optimized
84 {}
85
86 // internal method to calculate single partial derivative
87 // assume cached vector fVec is already set
88 double DoParameterDerivative(const double *x, const double *p, double f0, int k) const {
89 double p0 = p[k];
90 double h = std::max( fEps* std::abs(p0), 8.0*fPrecision*(std::abs(p0) + fPrecision) );
91 fVec[k] += h;
92 double deriv = 0;
93 // t.b.d : treat case of infinities
94 //if (fval > - std::numeric_limits<double>::max() && fval < std::numeric_limits<double>::max() )
95 double f1 = fFunc(x, &fVec.front() );
96 if (fStrategy > 1) {
97 fVec[k] = p0 - h;
98 double f2 = fFunc(x, &fVec.front() );
99 deriv = 0.5 * ( f2 - f1 )/h;
100 }
101 else
102 deriv = ( f1 - f0 )/h;
103
104 fVec[k] = p[k]; // restore original p value
105 return deriv;
106 }
107 // number of dimension in x (needed when calculating the integrals)
108 unsigned int NDim() const {
109 return fFunc.NDim();
110 }
111 // number of parameters (needed for grad ccalculation)
112 unsigned int NPar() const {
113 return fFunc.NPar();
114 }
115
116 double ParameterDerivative(const double *x, const double *p, int ipar) const {
117 // fVec are the cached parameter values
118 std::copy(p, p+fN, fVec.begin());
119 double f0 = fFunc(x, p);
120 return DoParameterDerivative(x,p,f0,ipar);
121 }
122
123 // calculate all gradient at point (x,p) knnowing already value f0 (we gain a function eval.)
124 void ParameterGradient(const double * x, const double * p, double f0, double * g) {
125 // fVec are the cached parameter values
126 std::copy(p, p+fN, fVec.begin());
127 for (unsigned int k = 0; k < fN; ++k) {
128 g[k] = DoParameterDerivative(x,p,f0,k);
129 }
130 }
131
132 // calculate gradient w.r coordinate values
133 void Gradient(const double * x, const double * p, double f0, double * g) {
134 // fVec are the cached coordinate values
135 std::copy(x, x+fN, fVec.begin());
136 for (unsigned int k = 0; k < fN; ++k) {
137 double x0 = x[k];
138 double h = std::max( fEps* std::abs(x0), 8.0*fPrecision*(std::abs(x0) + fPrecision) );
139 fVec[k] += h;
140 // t.b.d : treat case of infinities
141 //if (fval > - std::numeric_limits<double>::max() && fval < std::numeric_limits<double>::max() )
142 double f1 = fFunc( &fVec.front(), p );
143 if (fStrategy > 1) {
144 fVec[k] = x0 - h;
145 double f2 = fFunc( &fVec.front(), p );
146 g[k] = 0.5 * ( f2 - f1 )/h;
147 }
148 else
149 g[k] = ( f1 - f0 )/h;
150
151 fVec[k] = x[k]; // restore original x value
152 }
153 }
154
155 private:
156
157 double fEps;
158 double fPrecision;
159 int fStrategy; // strategy in calculation ( =1 use 2 point rule( 1 extra func) , = 2 use r point rule)
160 unsigned int fN; // gradient dimension
161 const IModelFunction & fFunc;
162 mutable std::vector<double> fVec; // cached coordinates (or parameter values in case of gradientpar)
163 };
164
165
166 // function to avoid infinities or nan
167 double CorrectValue(double rval) {
168 // avoid infinities or nan in rval
169 if (rval > - std::numeric_limits<double>::max() && rval < std::numeric_limits<double>::max() )
170 return rval;
171 else if (rval < 0)
172 // case -inf
173 return -std::numeric_limits<double>::max();
174 else
175 // case + inf or nan
176 return + std::numeric_limits<double>::max();
177 }
178
179 // Check if the value is a finite number. The argument rval is updated if it is infinite or NaN,
180 // setting it to the maximum finite value (preserving the sign).
181 bool CheckInfNaNValue(double &rval)
182 {
183 if (rval > - std::numeric_limits<double>::max() && rval < std::numeric_limits<double>::max() )
184 return true;
185 else if (rval < 0) {
186 // case -inf
187 rval = -std::numeric_limits<double>::max();
188 return false;
189 }
190 else {
191 // case + inf or nan
192 rval = + std::numeric_limits<double>::max();
193 return false;
194 }
195 }
196
197
198 // calculation of the integral of the gradient functions
199 // for a function providing derivative w.r.t parameters
200 // x1 and x2 defines the integration interval , p the parameters
201 template <class GFunc>
202 void CalculateGradientIntegral(const GFunc & gfunc,
203 const double *x1, const double * x2, const double * p, double *g) {
204
205 // needs to calculate the integral for each partial derivative
206 ParamDerivFunc<GFunc> pfunc( gfunc);
207 IntegralEvaluator<ParamDerivFunc<GFunc> > igDerEval( pfunc, p, true);
208 // loop on the parameters
209 unsigned int npar = gfunc.NPar();
210 for (unsigned int k = 0; k < npar; ++k ) {
211 pfunc.SetDerivComponent(k);
212 g[k] = igDerEval( x1, x2 );
213 }
214 }
215
216
217
218 } // end namespace FitUtil
219
220
221
222//___________________________________________________________________________________________________________________________
223// for chi2 functions
224//___________________________________________________________________________________________________________________________
225
226 double FitUtil::EvaluateChi2(const IModelFunction &func, const BinData &data, const double *p, unsigned int &,
227 ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks)
228 {
229 // evaluate the chi2 given a function reference , the data and returns the value and also in nPoints
230 // the actual number of used points
231 // normal chi2 using only error on values (from fitting histogram)
232 // optionally the integral of function in the bin is used
233
234 unsigned int n = data.Size();
235
236 // set parameters of the function to cache integral value
237#ifdef USE_PARAMCACHE
238 (const_cast<IModelFunction &>(func)).SetParameters(p);
239#endif
240 // do not cache parameter values (it is not thread safe)
241 //func.SetParameters(p);
242
243
244 // get fit option and check case if using integral of bins
245 const DataOptions & fitOpt = data.Opt();
246 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
247 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
248 bool useExpErrors = (fitOpt.fExpErrors);
249 bool isWeighted = data.IsWeighted();
250
251#ifdef DEBUG
252 std::cout << "\n\nFit data size = " << n << std::endl;
253 std::cout << "evaluate chi2 using function " << &func << " " << p << std::endl;
254 std::cout << "use empty bins " << fitOpt.fUseEmpty << std::endl;
255 std::cout << "use integral " << fitOpt.fIntegral << std::endl;
256 std::cout << "use all error=1 " << fitOpt.fErrors1 << std::endl;
257 if (isWeighted) std::cout << "Weighted data set - sumw = " << data.SumOfContent() << " sumw2 = " << data.SumOfError2() << std::endl;
258#endif
259
260#ifdef USE_PARAMCACHE
261 IntegralEvaluator<> igEval( func, 0, useBinIntegral);
262#else
263 IntegralEvaluator<> igEval( func, p, useBinIntegral);
264#endif
265 double maxResValue = std::numeric_limits<double>::max() /n;
266 double wrefVolume = 1.0;
267 if (useBinVolume) {
268 if (fitOpt.fNormBinVolume) wrefVolume /= data.RefVolume();
269 }
270
271 (const_cast<IModelFunction &>(func)).SetParameters(p);
272
273 auto mapFunction = [&](const unsigned i){
274
275 double chi2{};
276 double fval{};
277
278 const auto x1 = data.GetCoordComponent(i, 0);
279 const auto y = data.Value(i);
280 auto invError = data.InvError(i);
281
282 //invError = (invError!= 0.0) ? 1.0/invError :1;
283
284 const double * x = nullptr;
285 std::vector<double> xc;
286 double binVolume = 1.0;
287 if (useBinVolume) {
288 unsigned int ndim = data.NDim();
289 const double * x2 = data.BinUpEdge(i);
290 xc.resize(data.NDim());
291 for (unsigned int j = 0; j < ndim; ++j) {
292 auto xx = *data.GetCoordComponent(i, j);
293 binVolume *= std::abs(x2[j]- xx);
294 xc[j] = 0.5*(x2[j]+ xx);
295 }
296 x = xc.data();
297 // normalize the bin volume using a reference value
298 binVolume *= wrefVolume;
299 } else if(data.NDim() > 1) {
300 xc.resize(data.NDim());
301 xc[0] = *x1;
302 for (unsigned int j = 1; j < data.NDim(); ++j)
303 xc[j] = *data.GetCoordComponent(i, j);
304 x = xc.data();
305 } else {
306 x = x1;
307 }
308
309
310 if (!useBinIntegral) {
311#ifdef USE_PARAMCACHE
312 fval = func ( x );
313#else
314 fval = func ( x, p );
315#endif
316 }
317 else {
318 // calculate integral normalized by bin volume
319 // need to set function and parameters here in case loop is parallelized
320 fval = igEval( x, data.BinUpEdge(i)) ;
321 }
322 // normalize result if requested according to bin volume
323 if (useBinVolume) fval *= binVolume;
324
325 // expected errors
326 if (useExpErrors) {
327 double invWeight = 1.0;
328 if (isWeighted) {
329 // we need first to check if a weight factor needs to be applied
330 // weight = sumw2/sumw = error**2/content
331 //invWeight = y * invError * invError;
332 // we use always the global weight and not the observed one in the bin
333 // for empty bins use global weight (if it is weighted data.SumError2() is not zero)
334 invWeight = data.SumOfContent()/ data.SumOfError2();
335 //if (invError > 0) invWeight = y * invError * invError;
336 }
337
338 // if (invError == 0) invWeight = (data.SumOfError2() > 0) ? data.SumOfContent()/ data.SumOfError2() : 1.0;
339 // compute expected error as f(x) / weight
340 double invError2 = (fval > 0) ? invWeight / fval : 0.0;
341 invError = std::sqrt(invError2);
342 //std::cout << "using Pearson chi2 " << x[0] << " " << 1./invError2 << " " << fval << std::endl;
343 }
344
345//#define DEBUG
346#ifdef DEBUG
347 std::cout << x[0] << " " << y << " " << 1./invError << " params : ";
348 for (unsigned int ipar = 0; ipar < func.NPar(); ++ipar)
349 std::cout << p[ipar] << "\t";
350 std::cout << "\tfval = " << fval << " bin volume " << binVolume << " ref " << wrefVolume << std::endl;
351#endif
352//#undef DEBUG
353
354 if (invError > 0) {
355
356 double tmp = ( y -fval )* invError;
357 double resval = tmp * tmp;
358
359
360 // avoid inifinity or nan in chi2 values due to wrong function values
361 if ( resval < maxResValue )
362 chi2 += resval;
363 else {
364 //nRejected++;
365 chi2 += maxResValue;
366 }
367 }
368 return chi2;
369 };
370
371#ifdef R__USE_IMT
372 auto redFunction = [](const std::vector<double> & objs){
373 return std::accumulate(objs.begin(), objs.end(), double{});
374 };
375#else
376 (void)nChunks;
377
378 // If IMT is disabled, force the execution policy to the serial case
379 if (executionPolicy == ROOT::Fit::ExecutionPolicy::kMultithread) {
380 Warning("FitUtil::EvaluateChi2", "Multithread execution policy requires IMT, which is disabled. Changing "
381 "to ROOT::Fit::ExecutionPolicy::kSerial.");
382 executionPolicy = ROOT::Fit::ExecutionPolicy::kSerial;
383 }
384#endif
385
386 double res{};
387 if(executionPolicy == ROOT::Fit::ExecutionPolicy::kSerial){
388 for (unsigned int i=0; i<n; ++i) {
389 res += mapFunction(i);
390 }
391#ifdef R__USE_IMT
392 } else if(executionPolicy == ROOT::Fit::ExecutionPolicy::kMultithread) {
394 auto chunks = nChunks !=0? nChunks: setAutomaticChunking(data.Size());
395 res = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction, chunks);
396#endif
397// } else if(executionPolicy == ROOT::Fit::kMultitProcess){
398 // ROOT::TProcessExecutor pool;
399 // res = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction);
400 } else{
401 Error("FitUtil::EvaluateChi2","Execution policy unknown. Avalaible choices:\n ROOT::Fit::ExecutionPolicy::kSerial (default)\n ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
402 }
403
404 return res;
405}
406
407
408//___________________________________________________________________________________________________________________________
409
410double FitUtil::EvaluateChi2Effective(const IModelFunction & func, const BinData & data, const double * p, unsigned int & nPoints) {
411 // evaluate the chi2 given a function reference , the data and returns the value and also in nPoints
412 // the actual number of used points
413 // method using the error in the coordinates
414 // integral of bin does not make sense in this case
415
416 unsigned int n = data.Size();
417
418#ifdef DEBUG
419 std::cout << "\n\nFit data size = " << n << std::endl;
420 std::cout << "evaluate effective chi2 using function " << &func << " " << p << std::endl;
421#endif
422
423 assert(data.HaveCoordErrors() || data.HaveAsymErrors());
424
425 double chi2 = 0;
426 //int nRejected = 0;
427
428
429 //func.SetParameters(p);
430
431 unsigned int ndim = func.NDim();
432
433 // use Richardson derivator
435
436 double maxResValue = std::numeric_limits<double>::max() /n;
437
438
439
440 for (unsigned int i = 0; i < n; ++ i) {
441
442
443 double y = 0;
444 const double * x = data.GetPoint(i,y);
445
446 double fval = func( x, p );
447
448 double delta_y_func = y - fval;
449
450
451 double ey = 0;
452 const double * ex = 0;
453 if (!data.HaveAsymErrors() )
454 ex = data.GetPointError(i, ey);
455 else {
456 double eylow, eyhigh = 0;
457 ex = data.GetPointError(i, eylow, eyhigh);
458 if ( delta_y_func < 0)
459 ey = eyhigh; // function is higher than points
460 else
461 ey = eylow;
462 }
463 double e2 = ey * ey;
464 // before calculating the gradient check that all error in x are not zero
465 unsigned int j = 0;
466 while ( j < ndim && ex[j] == 0.) { j++; }
467 // if j is less ndim some elements are not zero
468 if (j < ndim) {
469 // need an adapter from a multi-dim function to a one-dimensional
471 // select optimal step size (use 10--2 by default as was done in TF1:
472 double kEps = 0.01;
473 double kPrecision = 1.E-8;
474 for (unsigned int icoord = 0; icoord < ndim; ++icoord) {
475 // calculate derivative for each coordinate
476 if (ex[icoord] > 0) {
477 //gradCalc.Gradient(x, p, fval, &grad[0]);
478 f1D.SetCoord(icoord);
479 // optimal spep size (take ex[] as scale for the points and 1% of it
480 double x0= x[icoord];
481 double h = std::max( kEps* std::abs(ex[icoord]), 8.0*kPrecision*(std::abs(x0) + kPrecision) );
482 double deriv = derivator.Derivative1(f1D, x[icoord], h);
483 double edx = ex[icoord] * deriv;
484 e2 += edx * edx;
485#ifdef DEBUG
486 std::cout << "error for coord " << icoord << " = " << ex[icoord] << " deriv " << deriv << std::endl;
487#endif
488 }
489 }
490 }
491 double w2 = (e2 > 0) ? 1.0/e2 : 0;
492 double resval = w2 * ( y - fval ) * ( y - fval);
493
494#ifdef DEBUG
495 std::cout << x[0] << " " << y << " ex " << ex[0] << " ey " << ey << " params : ";
496 for (unsigned int ipar = 0; ipar < func.NPar(); ++ipar)
497 std::cout << p[ipar] << "\t";
498 std::cout << "\tfval = " << fval << "\tresval = " << resval << std::endl;
499#endif
500
501 // avoid (infinity and nan ) in the chi2 sum
502 // eventually add possibility of excluding some points (like singularity)
503 if ( resval < maxResValue )
504 chi2 += resval;
505 else
506 chi2 += maxResValue;
507 //nRejected++;
508
509 }
510
511 // reset the number of fitting data points
512 nPoints = n; // no points are rejected
513 //if (nRejected != 0) nPoints = n - nRejected;
514
515#ifdef DEBUG
516 std::cout << "chi2 = " << chi2 << " n = " << nPoints << std::endl;
517#endif
518
519 return chi2;
520
521}
522
523
524////////////////////////////////////////////////////////////////////////////////
525/// evaluate the chi2 contribution (residual term) only for data with no coord-errors
526/// This function is used in the specialized least square algorithms like FUMILI or L.M.
527/// if we have error on the coordinates the method is not yet implemented
528/// integral option is also not yet implemented
529/// one can use in that case normal chi2 method
530
531double FitUtil::EvaluateChi2Residual(const IModelFunction & func, const BinData & data, const double * p, unsigned int i, double * g) {
532 if (data.GetErrorType() == BinData::kCoordError && data.Opt().fCoordErrors ) {
533 MATH_ERROR_MSG("FitUtil::EvaluateChi2Residual","Error on the coordinates are not used in calculating Chi2 residual");
534 return 0; // it will assert otherwise later in GetPoint
535 }
536
537
538 //func.SetParameters(p);
539
540 double y, invError = 0;
541
542 const DataOptions & fitOpt = data.Opt();
543 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
544 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
545 bool useExpErrors = (fitOpt.fExpErrors);
546
547 const double * x1 = data.GetPoint(i,y, invError);
548
549 IntegralEvaluator<> igEval( func, p, useBinIntegral);
550 double fval = 0;
551 unsigned int ndim = data.NDim();
552 double binVolume = 1.0;
553 const double * x2 = 0;
554 if (useBinVolume || useBinIntegral) x2 = data.BinUpEdge(i);
555
556 double * xc = 0;
557
558 if (useBinVolume) {
559 xc = new double[ndim];
560 for (unsigned int j = 0; j < ndim; ++j) {
561 binVolume *= std::abs( x2[j]-x1[j] );
562 xc[j] = 0.5*(x2[j]+ x1[j]);
563 }
564 // normalize the bin volume using a reference value
565 binVolume /= data.RefVolume();
566 }
567
568 const double * x = (useBinVolume) ? xc : x1;
569
570 if (!useBinIntegral) {
571 fval = func ( x, p );
572 }
573 else {
574 // calculate integral (normalized by bin volume)
575 // need to set function and parameters here in case loop is parallelized
576 fval = igEval( x1, x2) ;
577 }
578 // normalize result if requested according to bin volume
579 if (useBinVolume) fval *= binVolume;
580
581 // expected errors
582 if (useExpErrors) {
583 // we need first to check if a weight factor needs to be applied
584 // weight = sumw2/sumw = error**2/content
585 //NOTE: assume histogram is not weighted
586 // don't know how to do with bins with weight = 0
587 //double invWeight = y * invError * invError;
588 // if (invError == 0) invWeight = (data.SumOfError2() > 0) ? data.SumOfContent()/ data.SumOfError2() : 1.0;
589 // compute expected error as f(x) / weight
590 double invError2 = (fval > 0) ? 1.0 / fval : 0.0;
591 invError = std::sqrt(invError2);
592 }
593
594
595 double resval = ( y -fval )* invError;
596
597 // avoid infinities or nan in resval
598 resval = CorrectValue(resval);
599
600 // estimate gradient
601 if (g != 0) {
602
603 unsigned int npar = func.NPar();
604 const IGradModelFunction * gfunc = dynamic_cast<const IGradModelFunction *>( &func);
605
606 if (gfunc != 0) {
607 //case function provides gradient
608 if (!useBinIntegral ) {
609 gfunc->ParameterGradient( x , p, g );
610 }
611 else {
612 // needs to calculate the integral for each partial derivative
613 CalculateGradientIntegral( *gfunc, x1, x2, p, g);
614 }
615 }
616 else {
617 SimpleGradientCalculator gc( npar, func);
618 if (!useBinIntegral )
619 gc.ParameterGradient(x, p, fval, g);
620 else {
621 // needs to calculate the integral for each partial derivative
622 CalculateGradientIntegral( gc, x1, x2, p, g);
623 }
624 }
625 // mutiply by - 1 * weight
626 for (unsigned int k = 0; k < npar; ++k) {
627 g[k] *= - invError;
628 if (useBinVolume) g[k] *= binVolume;
629 }
630 }
631
632 if (useBinVolume) delete [] xc;
633
634 return resval;
635
636}
637
638void FitUtil::EvaluateChi2Gradient(const IModelFunction &f, const BinData &data, const double *p, double *grad,
639 unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks)
640{
641 // evaluate the gradient of the chi2 function
642 // this function is used when the model function knows how to calculate the derivative and we can
643 // avoid that the minimizer re-computes them
644 //
645 // case of chi2 effective (errors on coordinate) is not supported
646
647 if (data.HaveCoordErrors()) {
648 MATH_ERROR_MSG("FitUtil::EvaluateChi2Gradient",
649 "Error on the coordinates are not used in calculating Chi2 gradient");
650 return; // it will assert otherwise later in GetPoint
651 }
652
653 const IGradModelFunction *fg = dynamic_cast<const IGradModelFunction *>(&f);
654 assert(fg != nullptr); // must be called by a gradient function
655
656 const IGradModelFunction &func = *fg;
657
658#ifdef DEBUG
659 std::cout << "\n\nFit data size = " << n << std::endl;
660 std::cout << "evaluate chi2 using function gradient " << &func << " " << p << std::endl;
661#endif
662
663 const DataOptions &fitOpt = data.Opt();
664 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
665 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
666
667 double wrefVolume = 1.0;
668 if (useBinVolume) {
669 if (fitOpt.fNormBinVolume) wrefVolume /= data.RefVolume();
670 }
671
672 IntegralEvaluator<> igEval(func, p, useBinIntegral);
673
674 unsigned int npar = func.NPar();
675 unsigned initialNPoints = data.Size();
676
677 std::vector<bool> isPointRejected(initialNPoints);
678
679 auto mapFunction = [&](const unsigned int i) {
680 // set all vector values to zero
681 std::vector<double> gradFunc(npar);
682 std::vector<double> pointContribution(npar);
683
684 const auto x1 = data.GetCoordComponent(i, 0);
685 const auto y = data.Value(i);
686 auto invError = data.Error(i);
687
688 invError = (invError != 0.0) ? 1.0 / invError : 1;
689
690 double fval = 0;
691
692 const double *x = nullptr;
693 std::vector<double> xc;
694
695 unsigned int ndim = data.NDim();
696 double binVolume = 1;
697 if (useBinVolume) {
698 const double *x2 = data.BinUpEdge(i);
699
700 xc.resize(ndim);
701 for (unsigned int j = 0; j < ndim; ++j) {
702 auto x1_j = *data.GetCoordComponent(i, j);
703 binVolume *= std::abs(x2[j] - x1_j);
704 xc[j] = 0.5 * (x2[j] + x1_j);
705 }
706
707 x = xc.data();
708
709 // normalize the bin volume using a reference value
710 binVolume *= wrefVolume;
711 } else if (ndim > 1) {
712 xc.resize(ndim);
713 xc[0] = *x1;
714 for (unsigned int j = 1; j < ndim; ++j)
715 xc[j] = *data.GetCoordComponent(i, j);
716 x = xc.data();
717 } else {
718 x = x1;
719 }
720
721 if (!useBinIntegral) {
722 fval = func(x, p);
723 func.ParameterGradient(x, p, &gradFunc[0]);
724 } else {
725 auto x2 = data.BinUpEdge(i);
726 // calculate normalized integral and gradient (divided by bin volume)
727 // need to set function and parameters here in case loop is parallelized
728 fval = igEval(x, x2);
729 CalculateGradientIntegral(func, x, x2, p, &gradFunc[0]);
730 }
731 if (useBinVolume)
732 fval *= binVolume;
733
734#ifdef DEBUG
735 std::cout << x[0] << " " << y << " " << 1. / invError << " params : ";
736 for (unsigned int ipar = 0; ipar < npar; ++ipar)
737 std::cout << p[ipar] << "\t";
738 std::cout << "\tfval = " << fval << std::endl;
739#endif
740 if (!CheckInfNaNValue(fval)) {
741 isPointRejected[i] = true;
742 // Return a zero contribution to all partial derivatives on behalf of the current point
743 return pointContribution;
744 }
745
746 // loop on the parameters
747 unsigned int ipar = 0;
748 for (; ipar < npar; ++ipar) {
749
750 // correct gradient for bin volumes
751 if (useBinVolume)
752 gradFunc[ipar] *= binVolume;
753
754 // avoid singularity in the function (infinity and nan ) in the chi2 sum
755 // eventually add possibility of excluding some points (like singularity)
756 double dfval = gradFunc[ipar];
757 if (!CheckInfNaNValue(dfval)) {
758 break; // exit loop on parameters
759 }
760
761 // calculate derivative point contribution
762 pointContribution[ipar] = -2.0 * (y - fval) * invError * invError * gradFunc[ipar];
763 }
764
765 if (ipar < npar) {
766 // case loop was broken for an overflow in the gradient calculation
767 isPointRejected[i] = true;
768 }
769
770 return pointContribution;
771 };
772
773 // Vertically reduce the set of vectors by summing its equally-indexed components
774 auto redFunction = [&](const std::vector<std::vector<double>> &pointContributions) {
775 std::vector<double> result(npar);
776
777 for (auto const &pointContribution : pointContributions) {
778 for (unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
779 result[parameterIndex] += pointContribution[parameterIndex];
780 }
781
782 return result;
783 };
784
785 std::vector<double> g(npar);
786
787#ifndef R__USE_IMT
788 // If IMT is disabled, force the execution policy to the serial case
789 if (executionPolicy == ROOT::Fit::ExecutionPolicy::kMultithread) {
790 Warning("FitUtil::EvaluateChi2Gradient", "Multithread execution policy requires IMT, which is disabled. Changing "
791 "to ROOT::Fit::ExecutionPolicy::kSerial.");
792 executionPolicy = ROOT::Fit::ExecutionPolicy::kSerial;
793 }
794#endif
795
796 if (executionPolicy == ROOT::Fit::ExecutionPolicy::kSerial) {
797 std::vector<std::vector<double>> allGradients(initialNPoints);
798 for (unsigned int i = 0; i < initialNPoints; ++i) {
799 allGradients[i] = mapFunction(i);
800 }
801 g = redFunction(allGradients);
802 }
803#ifdef R__USE_IMT
804 else if (executionPolicy == ROOT::Fit::ExecutionPolicy::kMultithread) {
806 auto chunks = nChunks != 0 ? nChunks : setAutomaticChunking(initialNPoints);
807 g = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, initialNPoints), redFunction, chunks);
808 }
809#endif
810 // else if(executionPolicy == ROOT::Fit::kMultiprocess){
811 // ROOT::TProcessExecutor pool;
812 // g = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction);
813 // }
814 else {
815 Error("FitUtil::EvaluateChi2Gradient",
816 "Execution policy unknown. Avalaible choices:\n 0: Serial (default)\n 1: MultiThread (requires IMT)\n");
817 }
818
819#ifndef R__USE_IMT
820 //to fix compiler warning
821 (void)nChunks;
822#endif
823
824 // correct the number of points
825 nPoints = initialNPoints;
826
827 if (std::any_of(isPointRejected.begin(), isPointRejected.end(), [](bool point) { return point; })) {
828 unsigned nRejected = std::accumulate(isPointRejected.begin(), isPointRejected.end(), 0);
829 assert(nRejected <= initialNPoints);
830 nPoints = initialNPoints - nRejected;
831
832 if (nPoints < npar)
833 MATH_ERROR_MSG("FitUtil::EvaluateChi2Gradient",
834 "Error - too many points rejected for overflow in gradient calculation");
835 }
836
837 // copy result
838 std::copy(g.begin(), g.end(), grad);
839}
840
841//______________________________________________________________________________________________________
842//
843// Log Likelihood functions
844//_______________________________________________________________________________________________________
845
846// utility function used by the likelihoods
847
848// for LogLikelihood functions
849
850double FitUtil::EvaluatePdf(const IModelFunction & func, const UnBinData & data, const double * p, unsigned int i, double * g) {
851 // evaluate the pdf contribution to the generic logl function in case of bin data
852 // return actually the log of the pdf and its derivatives
853
854
855 //func.SetParameters(p);
856
857
858 const double * x = data.Coords(i);
859 double fval = func ( x, p );
860 double logPdf = ROOT::Math::Util::EvalLog(fval);
861 //return
862 if (g == 0) return logPdf;
863
864 const IGradModelFunction * gfunc = dynamic_cast<const IGradModelFunction *>( &func);
865
866 // gradient calculation
867 if (gfunc != 0) {
868 //case function provides gradient
869 gfunc->ParameterGradient( x , p, g );
870 }
871 else {
872 // estimate gradieant numerically with simple 2 point rule
873 // should probably calculate gradient of log(pdf) is more stable numerically
874 SimpleGradientCalculator gc(func.NPar(), func);
875 gc.ParameterGradient(x, p, fval, g );
876 }
877 // divide gradient by function value since returning the logs
878 for (unsigned int ipar = 0; ipar < func.NPar(); ++ipar) {
879 g[ipar] /= fval; // this should be checked against infinities
880 }
881
882#ifdef DEBUG
883 std::cout << x[i] << "\t";
884 std::cout << "\tpar = [ " << func.NPar() << " ] = ";
885 for (unsigned int ipar = 0; ipar < func.NPar(); ++ipar)
886 std::cout << p[ipar] << "\t";
887 std::cout << "\tfval = " << fval;
888 std::cout << "\tgrad = [ ";
889 for (unsigned int ipar = 0; ipar < func.NPar(); ++ipar)
890 std::cout << g[ipar] << "\t";
891 std::cout << " ] " << std::endl;
892#endif
893
894
895 return logPdf;
896}
897
898double FitUtil::EvaluateLogL(const IModelFunctionTempl<double> &func, const UnBinData &data, const double *p,
899 int iWeight, bool extended, unsigned int &nPoints,
900 ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks)
901{
902 // evaluate the LogLikelihood
903
904 unsigned int n = data.Size();
905
906 //unsigned int nRejected = 0;
907
908 bool normalizeFunc = false;
909
910 // set parameters of the function to cache integral value
911#ifdef USE_PARAMCACHE
912 (const_cast<IModelFunctionTempl<double> &>(func)).SetParameters(p);
913#endif
914
915 nPoints = data.Size(); // npoints
916
917#ifdef R__USE_IMT
918 // in case parameter needs to be propagated to user function use trick to set parameters by calling one time the function
919 // this will be done in sequential mode and parameters can be set in a thread safe manner
920 if (!normalizeFunc) {
921 if (data.NDim() == 1) {
922 const double * x = data.GetCoordComponent(0,0);
923 func( x, p);
924 }
925 else {
926 std::vector<double> x(data.NDim());
927 for (unsigned int j = 0; j < data.NDim(); ++j)
928 x[j] = *data.GetCoordComponent(0, j);
929 func( x.data(), p);
930 }
931 }
932#endif
933
934 double norm = 1.0;
935 if (normalizeFunc) {
936 // compute integral of the function
937 std::vector<double> xmin(data.NDim());
938 std::vector<double> xmax(data.NDim());
939 IntegralEvaluator<> igEval(func, p, true);
940 // compute integral in the ranges where is defined
941 if (data.Range().Size() > 0) {
942 norm = 0;
943 for (unsigned int ir = 0; ir < data.Range().Size(); ++ir) {
944 data.Range().GetRange(&xmin[0], &xmax[0], ir);
945 norm += igEval.Integral(xmin.data(), xmax.data());
946 }
947 } else {
948 // use (-inf +inf)
949 data.Range().GetRange(&xmin[0], &xmax[0]);
950 // check if funcition is zero at +- inf
951 if (func(xmin.data(), p) != 0 || func(xmax.data(), p) != 0) {
952 MATH_ERROR_MSG("FitUtil::EvaluateLogLikelihood",
953 "A range has not been set and the function is not zero at +/- inf");
954 return 0;
955 }
956 norm = igEval.Integral(&xmin[0], &xmax[0]);
957 }
958 }
959
960 // needed to compue effective global weight in case of extended likelihood
961
962 auto mapFunction = [&](const unsigned i) {
963 double W = 0;
964 double W2 = 0;
965 double fval = 0;
966
967 if (data.NDim() > 1) {
968 std::vector<double> x(data.NDim());
969 for (unsigned int j = 0; j < data.NDim(); ++j)
970 x[j] = *data.GetCoordComponent(i, j);
971#ifdef USE_PARAMCACHE
972 fval = func(x.data());
973#else
974 fval = func(x.data(), p);
975#endif
976
977 // one -dim case
978 } else {
979 const auto x = data.GetCoordComponent(i, 0);
980#ifdef USE_PARAMCACHE
981 fval = func(x);
982#else
983 fval = func(x, p);
984#endif
985 }
986
987 if (normalizeFunc)
988 fval = fval * (1 / norm);
989
990 // function EvalLog protects against negative or too small values of fval
991 double logval = ROOT::Math::Util::EvalLog(fval);
992 if (iWeight > 0) {
993 double weight = data.Weight(i);
994 logval *= weight;
995 if (iWeight == 2) {
996 logval *= weight; // use square of weights in likelihood
997 if (!extended) {
998 // needed sum of weights and sum of weight square if likelkihood is extended
999 W = weight;
1000 W2 = weight * weight;
1001 }
1002 }
1003 }
1004 return LikelihoodAux<double>(logval, W, W2);
1005 };
1006
1007#ifdef R__USE_IMT
1008 // auto redFunction = [](const std::vector<LikelihoodAux<double>> & objs){
1009 // return std::accumulate(objs.begin(), objs.end(), LikelihoodAux<double>(0.0,0.0,0.0),
1010 // [](const LikelihoodAux<double> &l1, const LikelihoodAux<double> &l2){
1011 // return l1+l2;
1012 // });
1013 // };
1014 // do not use std::accumulate to be sure to maintain always the same order
1015 auto redFunction = [](const std::vector<LikelihoodAux<double>> & objs){
1016 auto l0 = LikelihoodAux<double>(0.0,0.0,0.0);
1017 for ( auto & l : objs ) {
1018 l0 = l0 + l;
1019 }
1020 return l0;
1021 };
1022#else
1023 (void)nChunks;
1024
1025 // If IMT is disabled, force the execution policy to the serial case
1026 if (executionPolicy == ROOT::Fit::ExecutionPolicy::kMultithread) {
1027 Warning("FitUtil::EvaluateLogL", "Multithread execution policy requires IMT, which is disabled. Changing "
1028 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1029 executionPolicy = ROOT::Fit::ExecutionPolicy::kSerial;
1030 }
1031#endif
1032
1033 double logl{};
1034 double sumW{};
1035 double sumW2{};
1036 if(executionPolicy == ROOT::Fit::ExecutionPolicy::kSerial){
1037 for (unsigned int i=0; i<n; ++i) {
1038 auto resArray = mapFunction(i);
1039 logl+=resArray.logvalue;
1040 sumW+=resArray.weight;
1041 sumW2+=resArray.weight2;
1042 }
1043#ifdef R__USE_IMT
1044 } else if(executionPolicy == ROOT::Fit::ExecutionPolicy::kMultithread) {
1046 auto chunks = nChunks !=0? nChunks: setAutomaticChunking(data.Size());
1047 auto resArray = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction, chunks);
1048 logl=resArray.logvalue;
1049 sumW=resArray.weight;
1050 sumW2=resArray.weight2;
1051#endif
1052// } else if(executionPolicy == ROOT::Fit::kMultitProcess){
1053 // ROOT::TProcessExecutor pool;
1054 // res = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction);
1055 } else{
1056 Error("FitUtil::EvaluateLogL","Execution policy unknown. Avalaible choices:\n ROOT::Fit::ExecutionPolicy::kSerial (default)\n ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
1057 }
1058
1059 if (extended) {
1060 // add Poisson extended term
1061 double extendedTerm = 0; // extended term in likelihood
1062 double nuTot = 0;
1063 // nuTot is integral of function in the range
1064 // if function has been normalized integral has been already computed
1065 if (!normalizeFunc) {
1066 IntegralEvaluator<> igEval( func, p, true);
1067 std::vector<double> xmin(data.NDim());
1068 std::vector<double> xmax(data.NDim());
1069
1070 // compute integral in the ranges where is defined
1071 if (data.Range().Size() > 0 ) {
1072 nuTot = 0;
1073 for (unsigned int ir = 0; ir < data.Range().Size(); ++ir) {
1074 data.Range().GetRange(&xmin[0],&xmax[0],ir);
1075 nuTot += igEval.Integral(xmin.data(),xmax.data());
1076 }
1077 } else {
1078 // use (-inf +inf)
1079 data.Range().GetRange(&xmin[0],&xmax[0]);
1080 // check if funcition is zero at +- inf
1081 if (func(xmin.data(), p) != 0 || func(xmax.data(), p) != 0) {
1082 MATH_ERROR_MSG("FitUtil::EvaluateLogLikelihood","A range has not been set and the function is not zero at +/- inf");
1083 return 0;
1084 }
1085 nuTot = igEval.Integral(&xmin[0],&xmax[0]);
1086 }
1087
1088 // force to be last parameter value
1089 //nutot = p[func.NDim()-1];
1090 if (iWeight != 2)
1091 extendedTerm = - nuTot; // no need to add in this case n log(nu) since is already computed before
1092 else {
1093 // case use weight square in likelihood : compute total effective weight = sw2/sw
1094 // ignore for the moment case when sumW is zero
1095 extendedTerm = - (sumW2 / sumW) * nuTot;
1096 }
1097
1098 }
1099 else {
1100 nuTot = norm;
1101 extendedTerm = - nuTot + double(n) * ROOT::Math::Util::EvalLog( nuTot);
1102 // in case of weights need to use here sum of weights (to be done)
1103 }
1104 logl += extendedTerm;
1105
1106 }
1107
1108#ifdef DEBUG
1109 std::cout << "Evaluated log L for parameters (";
1110 for (unsigned int ip = 0; ip < func.NPar(); ++ip)
1111 std::cout << " " << p[ip];
1112 std::cout << ") fval = " << -logl << std::endl;
1113#endif
1114
1115 return -logl;
1116}
1117
1118void FitUtil::EvaluateLogLGradient(const IModelFunction &f, const UnBinData &data, const double *p, double *grad,
1119 unsigned int &, ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks)
1120{
1121 // evaluate the gradient of the log likelihood function
1122
1123 const IGradModelFunction *fg = dynamic_cast<const IGradModelFunction *>(&f);
1124 assert(fg != nullptr); // must be called by a grad function
1125
1126 const IGradModelFunction &func = *fg;
1127
1128 unsigned int npar = func.NPar();
1129 unsigned initialNPoints = data.Size();
1130
1131 (const_cast<IGradModelFunction &>(func)).SetParameters(p);
1132
1133#ifdef DEBUG
1134 std::cout << "\n===> Evaluate Gradient for parameters ";
1135 for (unsigned int ip = 0; ip < npar; ++ip)
1136 std::cout << " " << p[ip];
1137 std::cout << "\n";
1138#endif
1139
1140 const double kdmax1 = std::sqrt(std::numeric_limits<double>::max());
1141 const double kdmax2 = std::numeric_limits<double>::max() / (4 * initialNPoints);
1142
1143 auto mapFunction = [&](const unsigned int i) {
1144 std::vector<double> gradFunc(npar);
1145 std::vector<double> pointContribution(npar);
1146
1147
1148 const double * x = nullptr;
1149 std::vector<double> xc;
1150 if (data.NDim() > 1) {
1151 xc.resize(data.NDim() );
1152 for (unsigned int j = 0; j < data.NDim(); ++j)
1153 xc[j] = *data.GetCoordComponent(i, j);
1154 x = xc.data();
1155 } else {
1156 x = data.GetCoordComponent(i, 0);
1157 }
1158
1159 double fval = func(x, p);
1160 func.ParameterGradient(x, p, &gradFunc[0]);
1161
1162#ifdef DEBUG
1163 {
1165 if (i < 5 || (i > data.Size()-5) ) {
1166 if (data.NDim() > 1) std::cout << i << " x " << x[0] << " y " << x[1] << " func " << fval
1167 << " gradient " << gradFunc[0] << " " << gradFunc[1] << " " << gradFunc[3] << std::endl;
1168 else std::cout << i << " x " << x[0] << " gradient " << gradFunc[0] << " " << gradFunc[1] << " " << gradFunc[3] << std::endl;
1169 }
1170 }
1171#endif
1172
1173 for (unsigned int kpar = 0; kpar < npar; ++kpar) {
1174 if (fval > 0)
1175 pointContribution[kpar] = -1. / fval * gradFunc[kpar];
1176 else if (gradFunc[kpar] != 0) {
1177 double gg = kdmax1 * gradFunc[kpar];
1178 if (gg > 0)
1179 gg = std::min(gg, kdmax2);
1180 else
1181 gg = std::max(gg, -kdmax2);
1182 pointContribution[kpar] = -gg;
1183 }
1184 // if func derivative is zero term is also zero so do not add in g[kpar]
1185 }
1186
1187 return pointContribution;
1188 };
1189
1190 // Vertically reduce the set of vectors by summing its equally-indexed components
1191 auto redFunction = [&](const std::vector<std::vector<double>> &pointContributions) {
1192 std::vector<double> result(npar);
1193
1194 for (auto const &pointContribution : pointContributions) {
1195 for (unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
1196 result[parameterIndex] += pointContribution[parameterIndex];
1197 }
1198
1199 return result;
1200 };
1201
1202 std::vector<double> g(npar);
1203
1204#ifndef R__USE_IMT
1205 // If IMT is disabled, force the execution policy to the serial case
1206 if (executionPolicy == ROOT::Fit::ExecutionPolicy::kMultithread) {
1207 Warning("FitUtil::EvaluateLogLGradient", "Multithread execution policy requires IMT, which is disabled. Changing "
1208 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1209 executionPolicy = ROOT::Fit::ExecutionPolicy::kSerial;
1210 }
1211#endif
1212
1213 if (executionPolicy == ROOT::Fit::ExecutionPolicy::kSerial) {
1214 std::vector<std::vector<double>> allGradients(initialNPoints);
1215 for (unsigned int i = 0; i < initialNPoints; ++i) {
1216 allGradients[i] = mapFunction(i);
1217 }
1218 g = redFunction(allGradients);
1219 }
1220#ifdef R__USE_IMT
1221 else if (executionPolicy == ROOT::Fit::ExecutionPolicy::kMultithread) {
1223 auto chunks = nChunks != 0 ? nChunks : setAutomaticChunking(initialNPoints);
1224 g = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, initialNPoints), redFunction, chunks);
1225 }
1226#endif
1227
1228 // else if(executionPolicy == ROOT::Fit::ExecutionPolicy::kMultiprocess){
1229 // ROOT::TProcessExecutor pool;
1230 // g = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction);
1231 // }
1232 else {
1233 Error("FitUtil::EvaluateLogLGradient", "Execution policy unknown. Avalaible choices:\n "
1234 "ROOT::Fit::ExecutionPolicy::kSerial (default)\n "
1235 "ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
1236 }
1237
1238#ifndef R__USE_IMT
1239 // to fix compiler warning
1240 (void)nChunks;
1241#endif
1242
1243 // copy result
1244 std::copy(g.begin(), g.end(), grad);
1245
1246#ifdef DEBUG
1247 std::cout << "FitUtil.cxx : Final gradient ";
1248 for (unsigned int param = 0; param < npar; param++) {
1249 std::cout << " " << grad[param];
1250 }
1251 std::cout << "\n";
1252#endif
1253}
1254//_________________________________________________________________________________________________
1255// for binned log likelihood functions
1256////////////////////////////////////////////////////////////////////////////////
1257/// evaluate the pdf (Poisson) contribution to the logl (return actually log of pdf)
1258/// and its gradient
1259
1260double FitUtil::EvaluatePoissonBinPdf(const IModelFunction & func, const BinData & data, const double * p, unsigned int i, double * g ) {
1261 double y = 0;
1262 const double * x1 = data.GetPoint(i,y);
1263
1264 const DataOptions & fitOpt = data.Opt();
1265 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
1266 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
1267
1268 IntegralEvaluator<> igEval( func, p, useBinIntegral);
1269 double fval = 0;
1270 const double * x2 = 0;
1271 // calculate the bin volume
1272 double binVolume = 1;
1273 std::vector<double> xc;
1274 if (useBinVolume) {
1275 unsigned int ndim = data.NDim();
1276 xc.resize(ndim);
1277 x2 = data.BinUpEdge(i);
1278 for (unsigned int j = 0; j < ndim; ++j) {
1279 binVolume *= std::abs( x2[j]-x1[j] );
1280 xc[j] = 0.5*(x2[j]+ x1[j]);
1281 }
1282 // normalize the bin volume using a reference value
1283 binVolume /= data.RefVolume();
1284 }
1285
1286 const double * x = (useBinVolume) ? &xc.front() : x1;
1287
1288 if (!useBinIntegral ) {
1289 fval = func ( x, p );
1290 }
1291 else {
1292 // calculate integral normalized (divided by bin volume)
1293 x2 = data.BinUpEdge(i);
1294 fval = igEval( x1, x2 ) ;
1295 }
1296 if (useBinVolume) fval *= binVolume;
1297
1298 // logPdf for Poisson: ignore constant term depending on N
1299 fval = std::max(fval, 0.0); // avoid negative or too small values
1300 double logPdf = - fval;
1301 if (y > 0.0) {
1302 // include also constants due to saturate model (see Baker-Cousins paper)
1303 logPdf += y * ROOT::Math::Util::EvalLog( fval / y) + y;
1304 }
1305 // need to return the pdf contribution (not the log)
1306
1307 //double pdfval = std::exp(logPdf);
1308
1309 //if (g == 0) return pdfval;
1310 if (g == 0) return logPdf;
1311
1312 unsigned int npar = func.NPar();
1313 const IGradModelFunction * gfunc = dynamic_cast<const IGradModelFunction *>( &func);
1314
1315 // gradient calculation
1316 if (gfunc != 0) {
1317 //case function provides gradient
1318 if (!useBinIntegral )
1319 gfunc->ParameterGradient( x , p, g );
1320 else {
1321 // needs to calculate the integral for each partial derivative
1322 CalculateGradientIntegral( *gfunc, x1, x2, p, g);
1323 }
1324
1325 }
1326 else {
1327 SimpleGradientCalculator gc(func.NPar(), func);
1328 if (!useBinIntegral )
1329 gc.ParameterGradient(x, p, fval, g);
1330 else {
1331 // needs to calculate the integral for each partial derivative
1332 CalculateGradientIntegral( gc, x1, x2, p, g);
1333 }
1334 }
1335 // correct g[] do be derivative of poisson term
1336 for (unsigned int k = 0; k < npar; ++k) {
1337 // apply bin volume correction
1338 if (useBinVolume) g[k] *= binVolume;
1339
1340 // correct for Poisson term
1341 if ( fval > 0)
1342 g[k] *= ( y/fval - 1.) ;//* pdfval;
1343 else if (y > 0) {
1344 const double kdmax1 = std::sqrt( std::numeric_limits<double>::max() );
1345 g[k] *= kdmax1;
1346 }
1347 else // y == 0 cannot have negative y
1348 g[k] *= -1;
1349 }
1350
1351
1352#ifdef DEBUG
1353 std::cout << "x = " << x[0] << " logPdf = " << logPdf << " grad";
1354 for (unsigned int ipar = 0; ipar < npar; ++ipar)
1355 std::cout << g[ipar] << "\t";
1356 std::cout << std::endl;
1357#endif
1358
1359// return pdfval;
1360 return logPdf;
1361}
1362
1363double FitUtil::EvaluatePoissonLogL(const IModelFunction &func, const BinData &data, const double *p, int iWeight,
1364 bool extended, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy,
1365 unsigned nChunks)
1366{
1367 // evaluate the Poisson Log Likelihood
1368 // for binned likelihood fits
1369 // this is Sum ( f(x_i) - y_i * log( f (x_i) ) )
1370 // add as well constant term for saturated model to make it like a Chi2/2
1371 // by default is etended. If extended is false the fit is not extended and
1372 // the global poisson term is removed (i.e is a binomial fit)
1373 // (remember that in this case one needs to have a function with a fixed normalization
1374 // like in a non extended unbinned fit)
1375 //
1376 // if use Weight use a weighted dataset
1377 // iWeight = 1 ==> logL = Sum( w f(x_i) )
1378 // case of iWeight==1 is actually identical to weight==0
1379 // iWeight = 2 ==> logL = Sum( w*w * f(x_i) )
1380 //
1381 // nPoints returns the points where bin content is not zero
1382
1383
1384 unsigned int n = data.Size();
1385
1386#ifdef USE_PARAMCACHE
1387 (const_cast<IModelFunction &>(func)).SetParameters(p);
1388#endif
1389
1390 nPoints = data.Size(); // npoints
1391
1392
1393 // get fit option and check case of using integral of bins
1394 const DataOptions &fitOpt = data.Opt();
1395 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
1396 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
1397 bool useW2 = (iWeight == 2);
1398
1399 // normalize if needed by a reference volume value
1400 double wrefVolume = 1.0;
1401 if (useBinVolume) {
1402 if (fitOpt.fNormBinVolume) wrefVolume /= data.RefVolume();
1403 }
1404
1405#ifdef DEBUG
1406 std::cout << "Evaluate PoissonLogL for params = [ ";
1407 for (unsigned int j = 0; j < func.NPar(); ++j) std::cout << p[j] << " , ";
1408 std::cout << "] - data size = " << n << " useBinIntegral " << useBinIntegral << " useBinVolume "
1409 << useBinVolume << " useW2 " << useW2 << " wrefVolume = " << wrefVolume << std::endl;
1410#endif
1411
1412#ifdef USE_PARAMCACHE
1413 IntegralEvaluator<> igEval(func, 0, useBinIntegral);
1414#else
1415 IntegralEvaluator<> igEval(func, p, useBinIntegral);
1416#endif
1417
1418 auto mapFunction = [&](const unsigned i) {
1419 auto x1 = data.GetCoordComponent(i, 0);
1420 auto y = *data.ValuePtr(i);
1421
1422 const double *x = nullptr;
1423 std::vector<double> xc;
1424 double fval = 0;
1425 double binVolume = 1.0;
1426
1427 if (useBinVolume) {
1428 unsigned int ndim = data.NDim();
1429 const double *x2 = data.BinUpEdge(i);
1430 xc.resize(data.NDim());
1431 for (unsigned int j = 0; j < ndim; ++j) {
1432 auto xx = *data.GetCoordComponent(i, j);
1433 binVolume *= std::abs(x2[j] - xx);
1434 xc[j] = 0.5 * (x2[j] + xx);
1435 }
1436 x = xc.data();
1437 // normalize the bin volume using a reference value
1438 binVolume *= wrefVolume;
1439 } else if (data.NDim() > 1) {
1440 xc.resize(data.NDim());
1441 xc[0] = *x1;
1442 for (unsigned int j = 1; j < data.NDim(); ++j) {
1443 xc[j] = *data.GetCoordComponent(i, j);
1444 }
1445 x = xc.data();
1446 } else {
1447 x = x1;
1448 }
1449
1450 if (!useBinIntegral) {
1451#ifdef USE_PARAMCACHE
1452 fval = func(x);
1453#else
1454 fval = func(x, p);
1455#endif
1456 } else {
1457 // calculate integral (normalized by bin volume)
1458 // need to set function and parameters here in case loop is parallelized
1459 fval = igEval(x, data.BinUpEdge(i));
1460 }
1461 if (useBinVolume) fval *= binVolume;
1462
1463
1464
1465#ifdef DEBUG
1466 int NSAMPLE = 100;
1467 if (i % NSAMPLE == 0) {
1468 std::cout << "evt " << i << " x = [ ";
1469 for (unsigned int j = 0; j < func.NDim(); ++j) std::cout << x[j] << " , ";
1470 std::cout << "] ";
1471 if (fitOpt.fIntegral) {
1472 std::cout << "x2 = [ ";
1473 for (unsigned int j = 0; j < func.NDim(); ++j) std::cout << data.BinUpEdge(i)[j] << " , ";
1474 std::cout << "] ";
1475 }
1476 std::cout << " y = " << y << " fval = " << fval << std::endl;
1477 }
1478#endif
1479
1480
1481 // EvalLog protects against 0 values of fval but don't want to add in the -log sum
1482 // negative values of fval
1483 fval = std::max(fval, 0.0);
1484
1485 double nloglike = 0; // negative loglikelihood
1486 if (useW2) {
1487 // apply weight correction . Effective weight is error^2/ y
1488 // and expected events in bins is fval/weight
1489 // can apply correction only when y is not zero otherwise weight is undefined
1490 // (in case of weighted likelihood I don't care about the constant term due to
1491 // the saturated model)
1492
1493 // use for the empty bins the global weight
1494 double weight = 1.0;
1495 if (y != 0) {
1496 double error = data.Error(i);
1497 weight = (error * error) / y; // this is the bin effective weight
1498 nloglike += weight * y * ( ROOT::Math::Util::EvalLog(y) - ROOT::Math::Util::EvalLog(fval) );
1499 }
1500 else {
1501 // for empty bin use the average weight computed from the total data weight
1502 weight = data.SumOfError2()/ data.SumOfContent();
1503 }
1504 if (extended) {
1505 nloglike += weight * ( fval - y);
1506 }
1507
1508 } else {
1509 // standard case no weights or iWeight=1
1510 // this is needed for Poisson likelihood (which are extened and not for multinomial)
1511 // the formula below include constant term due to likelihood of saturated model (f(x) = y)
1512 // (same formula as in Baker-Cousins paper, page 439 except a factor of 2
1513 if (extended) nloglike = fval - y;
1514
1515 if (y > 0) {
1516 nloglike += y * (ROOT::Math::Util::EvalLog(y) - ROOT::Math::Util::EvalLog(fval));
1517 }
1518 }
1519 return nloglike;
1520 };
1521
1522#ifdef R__USE_IMT
1523 auto redFunction = [](const std::vector<double> &objs) {
1524 return std::accumulate(objs.begin(), objs.end(), double{});
1525 };
1526#else
1527 (void)nChunks;
1528
1529 // If IMT is disabled, force the execution policy to the serial case
1530 if (executionPolicy == ROOT::Fit::ExecutionPolicy::kMultithread) {
1531 Warning("FitUtil::EvaluatePoissonLogL", "Multithread execution policy requires IMT, which is disabled. Changing "
1532 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1533 executionPolicy = ROOT::Fit::ExecutionPolicy::kSerial;
1534 }
1535#endif
1536
1537 double res{};
1538 if (executionPolicy == ROOT::Fit::ExecutionPolicy::kSerial) {
1539 for (unsigned int i = 0; i < n; ++i) {
1540 res += mapFunction(i);
1541 }
1542#ifdef R__USE_IMT
1543 } else if (executionPolicy == ROOT::Fit::ExecutionPolicy::kMultithread) {
1545 auto chunks = nChunks != 0 ? nChunks : setAutomaticChunking(data.Size());
1546 res = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction, chunks);
1547#endif
1548 // } else if(executionPolicy == ROOT::Fit::kMultitProcess){
1549 // ROOT::TProcessExecutor pool;
1550 // res = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction);
1551 } else {
1552 Error("FitUtil::EvaluatePoissonLogL",
1553 "Execution policy unknown. Avalaible choices:\n ROOT::Fit::ExecutionPolicy::kSerial (default)\n ROOT::Fit::ExecutionPolicy::kMultithread (requires IMT)\n");
1554 }
1555
1556#ifdef DEBUG
1557 std::cout << "Loglikelihood = " << res << std::endl;
1558#endif
1559
1560 return res;
1561}
1562
1563void FitUtil::EvaluatePoissonLogLGradient(const IModelFunction &f, const BinData &data, const double *p, double *grad,
1564 unsigned int &, ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks)
1565{
1566 // evaluate the gradient of the Poisson log likelihood function
1567
1568 const IGradModelFunction *fg = dynamic_cast<const IGradModelFunction *>(&f);
1569 assert(fg != nullptr); // must be called by a grad function
1570
1571 const IGradModelFunction &func = *fg;
1572
1573#ifdef USE_PARAMCACHE
1574 (const_cast<IGradModelFunction &>(func)).SetParameters(p);
1575#endif
1576
1577 const DataOptions &fitOpt = data.Opt();
1578 bool useBinIntegral = fitOpt.fIntegral && data.HasBinEdges();
1579 bool useBinVolume = (fitOpt.fBinVolume && data.HasBinEdges());
1580
1581 double wrefVolume = 1.0;
1582 if (useBinVolume && fitOpt.fNormBinVolume)
1583 wrefVolume /= data.RefVolume();
1584
1585 IntegralEvaluator<> igEval(func, p, useBinIntegral);
1586
1587 unsigned int npar = func.NPar();
1588 unsigned initialNPoints = data.Size();
1589
1590 auto mapFunction = [&](const unsigned int i) {
1591 // set all vector values to zero
1592 std::vector<double> gradFunc(npar);
1593 std::vector<double> pointContribution(npar);
1594
1595 const auto x1 = data.GetCoordComponent(i, 0);
1596 const auto y = data.Value(i);
1597 auto invError = data.Error(i);
1598
1599 invError = (invError != 0.0) ? 1.0 / invError : 1;
1600
1601 double fval = 0;
1602
1603 const double *x = nullptr;
1604 std::vector<double> xc;
1605
1606 unsigned ndim = data.NDim();
1607 double binVolume = 1.0;
1608 if (useBinVolume) {
1609 const double *x2 = data.BinUpEdge(i);
1610
1611 xc.resize(ndim);
1612 for (unsigned int j = 0; j < ndim; ++j) {
1613 auto x1_j = *data.GetCoordComponent(i, j);
1614 binVolume *= std::abs(x2[j] - x1_j);
1615 xc[j] = 0.5 * (x2[j] + x1_j);
1616 }
1617
1618 x = xc.data();
1619
1620 // normalize the bin volume using a reference value
1621 binVolume *= wrefVolume;
1622 } else if (ndim > 1) {
1623 xc.resize(ndim);
1624 xc[0] = *x1;
1625 for (unsigned int j = 1; j < ndim; ++j)
1626 xc[j] = *data.GetCoordComponent(i, j);
1627 x = xc.data();
1628 } else {
1629 x = x1;
1630 }
1631
1632 if (!useBinIntegral) {
1633 fval = func(x, p);
1634 func.ParameterGradient(x, p, &gradFunc[0]);
1635 } else {
1636 // calculate integral (normalized by bin volume)
1637 // need to set function and parameters here in case loop is parallelized
1638 auto x2 = data.BinUpEdge(i);
1639 fval = igEval(x, x2);
1640 CalculateGradientIntegral(func, x, x2, p, &gradFunc[0]);
1641 }
1642 if (useBinVolume)
1643 fval *= binVolume;
1644
1645#ifdef DEBUG
1646 {
1648 if (i < 5 || (i > data.Size()-5) ) {
1649 if (data.NDim() > 1) std::cout << i << " x " << x[0] << " y " << x[1] << " func " << fval
1650 << " gradient " << gradFunc[0] << " " << gradFunc[1] << " " << gradFunc[3] << std::endl;
1651 else std::cout << i << " x " << x[0] << " gradient " << gradFunc[0] << " " << gradFunc[1] << " " << gradFunc[3] << std::endl;
1652 }
1653 }
1654#endif
1655
1656 // correct the gradient
1657 for (unsigned int ipar = 0; ipar < npar; ++ipar) {
1658
1659 // correct gradient for bin volumes
1660 if (useBinVolume)
1661 gradFunc[ipar] *= binVolume;
1662
1663 // df/dp * (1. - y/f )
1664 if (fval > 0)
1665 pointContribution[ipar] = gradFunc[ipar] * (1. - y / fval);
1666 else if (gradFunc[ipar] != 0) {
1667 const double kdmax1 = std::sqrt(std::numeric_limits<double>::max());
1668 const double kdmax2 = std::numeric_limits<double>::max() / (4 * initialNPoints);
1669 double gg = kdmax1 * gradFunc[ipar];
1670 if (gg > 0)
1671 gg = std::min(gg, kdmax2);
1672 else
1673 gg = std::max(gg, -kdmax2);
1674 pointContribution[ipar] = -gg;
1675 }
1676 }
1677
1678
1679 return pointContribution;
1680 };
1681
1682 // Vertically reduce the set of vectors by summing its equally-indexed components
1683 auto redFunction = [&](const std::vector<std::vector<double>> &pointContributions) {
1684 std::vector<double> result(npar);
1685
1686 for (auto const &pointContribution : pointContributions) {
1687 for (unsigned int parameterIndex = 0; parameterIndex < npar; parameterIndex++)
1688 result[parameterIndex] += pointContribution[parameterIndex];
1689 }
1690
1691 return result;
1692 };
1693
1694 std::vector<double> g(npar);
1695
1696#ifndef R__USE_IMT
1697 // If IMT is disabled, force the execution policy to the serial case
1698 if (executionPolicy == ROOT::Fit::ExecutionPolicy::kMultithread) {
1699 Warning("FitUtil::EvaluatePoissonLogLGradient",
1700 "Multithread execution policy requires IMT, which is disabled. Changing "
1701 "to ROOT::Fit::ExecutionPolicy::kSerial.");
1702 executionPolicy = ROOT::Fit::ExecutionPolicy::kSerial;
1703 }
1704#endif
1705
1706 if (executionPolicy == ROOT::Fit::ExecutionPolicy::kSerial) {
1707 std::vector<std::vector<double>> allGradients(initialNPoints);
1708 for (unsigned int i = 0; i < initialNPoints; ++i) {
1709 allGradients[i] = mapFunction(i);
1710 }
1711 g = redFunction(allGradients);
1712 }
1713#ifdef R__USE_IMT
1714 else if (executionPolicy == ROOT::Fit::ExecutionPolicy::kMultithread) {
1716 auto chunks = nChunks != 0 ? nChunks : setAutomaticChunking(initialNPoints);
1717 g = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, initialNPoints), redFunction, chunks);
1718 }
1719#endif
1720
1721 // else if(executionPolicy == ROOT::Fit::kMultiprocess){
1722 // ROOT::TProcessExecutor pool;
1723 // g = pool.MapReduce(mapFunction, ROOT::TSeq<unsigned>(0, n), redFunction);
1724 // }
1725 else {
1726 Error("FitUtil::EvaluatePoissonLogLGradient",
1727 "Execution policy unknown. Avalaible choices:\n 0: Serial (default)\n 1: MultiThread (requires IMT)\n");
1728 }
1729
1730#ifndef R__USE_IMT
1731 //to fix compiler warning
1732 (void)nChunks;
1733#endif
1734
1735 // copy result
1736 std::copy(g.begin(), g.end(), grad);
1737
1738#ifdef DEBUG
1739 std::cout << "***** Final gradient : ";
1740 for (unsigned int ii = 0; ii< npar; ++ii) std::cout << grad[ii] << " ";
1741 std::cout << "\n";
1742#endif
1743
1744}
1745
1746
1747unsigned FitUtil::setAutomaticChunking(unsigned nEvents){
1748 auto ncpu = ROOT::GetImplicitMTPoolSize();
1749 if (nEvents/ncpu < 1000) return ncpu;
1750 return nEvents/1000;
1751 //return ((nEvents/ncpu + 1) % 1000) *40 ; //arbitrary formula
1752}
1753
1754}
1755
1756} // end namespace ROOT
#define MATH_ERROR_MSG(loc, str)
Definition: Error.h:82
#define f(i)
Definition: RSha256.hxx:104
#define g(i)
Definition: RSha256.hxx:105
#define h(i)
Definition: RSha256.hxx:106
static const double x2[5]
static const double x1[5]
void Error(const char *location, const char *msgfmt,...)
void Warning(const char *location, const char *msgfmt,...)
float xmin
Definition: THbookFile.cxx:93
float xmax
Definition: THbookFile.cxx:93
double sqrt(double)
TRObject operator()(const T1 &t1) const
R__EXTERN TVirtualMutex * gROOTMutex
Definition: TROOT.h:59
typedef void((*Func_t)())
#define R__LOCKGUARD(mutex)
Class describing the binned data sets : vectors of x coordinates, y values and optionally error on y ...
Definition: BinData.h:53
double SumOfContent() const
compute the total sum of the data content (sum of weights in case of weighted data set)
Definition: BinData.h:554
double RefVolume() const
retrieve the reference volume used to normalize the data when the option bin volume is set
Definition: BinData.h:535
bool HasBinEdges() const
query if the data store the bin edges instead of the center
Definition: BinData.h:528
const double * GetPointError(unsigned int ipoint, double &errvalue) const
Retrieve the errors on the point (coordinate and value) for the given fit point It must be called onl...
Definition: BinData.h:451
const double * GetPoint(unsigned int ipoint, double &value) const
retrieve at the same time a pointer to the coordinate data and the fit value More efficient than call...
Definition: BinData.h:370
const double * BinUpEdge(unsigned int ipoint) const
return an array containing the upper edge of the bin for coordinate i In case of empty bin they could...
Definition: BinData.h:508
double InvError(unsigned int ipoint) const
Return the inverse of error on the value for the given fit point useful when error in the coordinates...
Definition: BinData.h:314
bool IsWeighted() const
return true if the data set is weighted We cannot compute ourselfs because sometimes errors are fille...
Definition: BinData.h:567
double Value(unsigned int ipoint) const
return the value for the given fit point
Definition: BinData.h:217
ErrorType GetErrorType() const
retrieve the errortype
Definition: BinData.h:545
bool HaveCoordErrors() const
flag to control if data provides error on the coordinates
Definition: BinData.h:131
const double * ValuePtr(unsigned int ipoint) const
return a pointer to the value for the given fit point
Definition: BinData.h:229
double Error(unsigned int ipoint) const
Definition: BinData.h:251
bool HaveAsymErrors() const
flag to control if data provides asymmetric errors on the value
Definition: BinData.h:143
double SumOfError2() const
compute the total sum of the error square (sum of weight square in case of a weighted data set)
Definition: BinData.h:560
unsigned int Size(unsigned int icoord=0) const
return range size for coordinate icoord (starts from zero) Size == 0 indicates no range is present [-...
Definition: DataRange.h:70
void GetRange(unsigned int irange, unsigned int icoord, double &xmin, double &xmax) const
get the i-th range for given coordinate.
Definition: DataRange.h:103
unsigned int Size() const
return number of fit points
Definition: FitData.h:303
const double * GetCoordComponent(unsigned int ipoint, unsigned int icoord) const
returns a single coordinate component of a point.
Definition: FitData.h:229
unsigned int NDim() const
return coordinate data dimension
Definition: FitData.h:311
const DataOptions & Opt() const
access to options
Definition: FitData.h:319
const double * Coords(unsigned int ipoint) const
return a pointer to the coordinates data for the given fit point
Definition: FitData.h:246
const DataRange & Range() const
access to range
Definition: FitData.h:331
Class describing the unbinned data sets (just x coordinates values) of any dimensions.
Definition: UnBinData.h:42
double Weight(unsigned int ipoint) const
return weight
Definition: UnBinData.h:257
unsigned int NDim() const
return coordinate data dimension
Definition: UnBinData.h:280
virtual unsigned int NDim() const =0
Retrieve the dimension of the function.
virtual void SetParameters(const double *p)=0
Set the parameter values.
virtual unsigned int NPar() const =0
Return the number of Parameters.
Interface (abstract class) for parametric gradient multi-dimensional functions providing in addition ...
virtual void ParameterGradient(const T *x, const double *p, T *grad) const
Evaluate the all the derivatives (gradient vector) of the function with respect to the parameters at ...
OneDimMultiFunctionAdapter class to wrap a multidimensional function in one dimensional one.
User class for calculating the derivatives of a function.
double Derivative1(double x)
Returns the first derivative of the function at point x, computed by Richardson's extrapolation metho...
A pseudo container class which is a generator of indices.
Definition: TSeq.hxx:66
This class provides a simple interface to execute the same task multiple times in parallel,...
auto MapReduce(F func, unsigned nTimes, R redfunc) -> typename std::result_of< F()>::type
This method behaves just like Map, but an additional redfunc function must be provided.
Double_t y[n]
Definition: legend1.C:17
Double_t x[n]
Definition: legend1.C:17
const Int_t n
Definition: legend1.C:16
Double_t ey[n]
Definition: legend1.C:17
Double_t ex[n]
Definition: legend1.C:17
TF1 * f1
Definition: legend1.C:11
TFitResultPtr Fit(FitObject *h1, TF1 *f1, Foption_t &option, const ROOT::Math::MinimizerOptions &moption, const char *goption, ROOT::Fit::DataRange &range)
Definition: HFitImpl.cxx:134
double EvaluateChi2(const IModelFunction &func, const BinData &data, const double *x, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks=0)
Chi2 Functions.
Definition: FitUtil.cxx:226
ROOT::Math::IParamMultiFunction IModelFunction
Definition: FitUtil.h:64
void CalculateGradientIntegral(const GFunc &gfunc, const double *x1, const double *x2, const double *p, double *g)
Definition: FitUtil.cxx:202
double EvaluateChi2Residual(const IModelFunction &func, const BinData &data, const double *x, unsigned int ipoint, double *g=0)
evaluate the residual contribution to the Chi2 given a model function and the BinPoint data and if th...
Definition: FitUtil.cxx:531
void EvaluateLogLGradient(const IModelFunction &func, const UnBinData &data, const double *x, double *grad, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial, unsigned nChunks=0)
evaluate the LogL gradient given a model function and the data at the point x.
Definition: FitUtil.cxx:1118
double CorrectValue(double rval)
Definition: FitUtil.cxx:167
double EvaluatePoissonLogL(const IModelFunction &func, const BinData &data, const double *x, int iWeight, bool extended, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks=0)
evaluate the Poisson LogL given a model function and the data at the point x.
Definition: FitUtil.cxx:1363
double EvaluateChi2Effective(const IModelFunction &func, const BinData &data, const double *x, unsigned int &nPoints)
evaluate the effective Chi2 given a model function and the data at the point x.
Definition: FitUtil.cxx:410
double EvaluatePoissonBinPdf(const IModelFunction &func, const BinData &data, const double *x, unsigned int ipoint, double *g=0)
evaluate the pdf contribution to the Poisson LogL given a model function and the BinPoint data.
Definition: FitUtil.cxx:1260
bool CheckInfNaNValue(double &rval)
Definition: FitUtil.cxx:181
unsigned setAutomaticChunking(unsigned nEvents)
Definition: FitUtil.cxx:1747
double EvaluateLogL(const IModelFunction &func, const UnBinData &data, const double *p, int iWeight, bool extended, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy, unsigned nChunks=0)
evaluate the LogL given a model function and the data at the point x.
void EvaluatePoissonLogLGradient(const IModelFunction &func, const BinData &data, const double *x, double *grad, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial, unsigned nChunks=0)
evaluate the Poisson LogL given a model function and the data at the point x.
Definition: FitUtil.cxx:1563
double EvaluatePdf(const IModelFunction &func, const UnBinData &data, const double *x, unsigned int ipoint, double *g=0)
evaluate the pdf contribution to the LogL given a model function and the BinPoint data.
Definition: FitUtil.cxx:850
void EvaluateChi2Gradient(const IModelFunction &func, const BinData &data, const double *x, double *grad, unsigned int &nPoints, ROOT::Fit::ExecutionPolicy executionPolicy=ROOT::Fit::ExecutionPolicy::kSerial, unsigned nChunks=0)
evaluate the Chi2 gradient given a model function and the data at the point x.
Definition: FitUtil.cxx:638
T EvalLog(T x)
safe evaluation of log(x) with a protections against negative or zero argument to the log smooth line...
Definition: Util.h:63
Namespace for new ROOT classes and functions.
Definition: StringConv.hxx:21
UInt_t GetImplicitMTPoolSize()
Returns the size of the pool used for implicit multi-threading.
Definition: TROOT.cxx:617
const char * Size
Definition: TXMLSetup.cxx:55
DataOptions : simple structure holding the options on how the data are filled.
Definition: DataOptions.h:28
auto * l
Definition: textangle.C:4