ROOT logo
ROOT » MATH » MATHCORE » TMath

namespace TMath


TMath

Encapsulate math routines.


Function Members (Methods)

public:
Short_tAbs(Short_t d)
Int_tAbs(Int_t d)
Long_tAbs(Long_t d)
Long64_tAbs(Long64_t d)
Float_tAbs(Float_t d)
Double_tAbs(Double_t d)
Double_tACos(Double_t x)
Double_tACosH(Double_t)
Bool_tAreEqualAbs(Double_t af, Double_t bf, Double_t epsilon)
Bool_tAreEqualRel(Double_t af, Double_t bf, Double_t relPrec)
Double_tASin(Double_t x)
Double_tASinH(Double_t)
Double_tATan(Double_t x)
Double_tATan2(Double_t y, Double_t x)
Double_tATanH(Double_t)
Double_tBesselI(Int_t n, Double_t x)
Double_tBesselI0(Double_t x)
Double_tBesselI1(Double_t x)
Double_tBesselJ0(Double_t x)
Double_tBesselJ1(Double_t x)
Double_tBesselK(Int_t n, Double_t x)
Double_tBesselK0(Double_t x)
Double_tBesselK1(Double_t x)
Double_tBesselY0(Double_t x)
Double_tBesselY1(Double_t x)
Double_tBeta(Double_t p, Double_t q)
Double_tBetaCf(Double_t x, Double_t a, Double_t b)
Double_tBetaDist(Double_t x, Double_t p, Double_t q)
Double_tBetaDistI(Double_t x, Double_t p, Double_t q)
Double_tBetaIncomplete(Double_t x, Double_t a, Double_t b)
Long64_tBinarySearch(Long64_t n, const short* array, short value)
Long64_tBinarySearch(Long64_t n, const short** array, short value)
Long64_tBinarySearch(Long64_t n, const int* array, int value)
Long64_tBinarySearch(Long64_t n, const int** array, int value)
Long64_tBinarySearch(Long64_t n, const float* array, float value)
Long64_tBinarySearch(Long64_t n, const float** array, float value)
Long64_tBinarySearch(Long64_t n, const double* array, double value)
Long64_tBinarySearch(Long64_t n, const double** array, double value)
Long64_tBinarySearch(Long64_t n, const long* array, long value)
Long64_tBinarySearch(Long64_t n, const long** array, long value)
Long64_tBinarySearch(Long64_t n, const long long* array, long long value)
Long64_tBinarySearch(Long64_t n, const long long** array, long long value)
Double_tBinomial(Int_t n, Int_t k)
Double_tBinomialI(Double_t p, Int_t n, Int_t k)
Double_tBreitWigner(Double_t x, Double_t mean = 0, Double_t gamma = 1)
voidBubbleHigh(Int_t Narr, Double_t* arr1, Int_t* arr2)
voidBubbleLow(Int_t Narr, Double_t* arr1, Int_t* arr2)
Double_tC()
Double_tCauchyDist(Double_t x, Double_t t = 0, Double_t s = 1)
Double_tCcgs()
Double_tCeil(Double_t x)
Int_tCeilNint(Double_t x)
Double_tChisquareQuantile(Double_t p, Double_t ndf)
Double_tCos(Double_t x)
Double_tCosH(Double_t x)
float*Cross(const float* v1, const float* v2, float* out)
double*Cross(const double* v1, const double* v2, double* out)
Double_tCUncertainty()
Double_tDegToRad()
Double_tDiLog(Double_t x)
Double_tE()
Double_tErf(Double_t x)
Double_tErfc(Double_t x)
Double_tErfcInverse(Double_t x)
Double_tErfInverse(Double_t x)
Double_tEulerGamma()
Bool_tEven(Long_t a)
Double_tExp(Double_t x)
Double_tFactorial(Int_t i)
Double_tFDist(Double_t F, Double_t N, Double_t M)
Double_tFDistI(Double_t F, Double_t N, Double_t M)
Int_tFinite(Double_t x)
Double_tFloor(Double_t x)
Int_tFloorNint(Double_t x)
Double_tFreq(Double_t x)
Double_tG()
Double_tGamma(Double_t z)
Double_tGamma(Double_t a, Double_t x)
Double_tGammaDist(Double_t x, Double_t gamma, Double_t mu = 0, Double_t beta = 1)
Double_tGaus(Double_t x, Double_t mean = 0, Double_t sigma = 1, Bool_t norm = kFALSE)
Double_tGcgs()
Double_tGeomMean(Long64_t n, const short* a)
Double_tGeomMean(Long64_t n, const int* a)
Double_tGeomMean(Long64_t n, const float* a)
Double_tGeomMean(Long64_t n, const double* a)
Double_tGeomMean(Long64_t n, const long* a)
Double_tGeomMean(Long64_t n, const long long* a)
Double_tGhbarC()
Double_tGhbarCUncertainty()
Double_tGn()
Double_tGnUncertainty()
Double_tGUncertainty()
Double_tH()
ULong_tHash(const char* str)
ULong_tHash(const void* txt, Int_t ntxt)
Double_tHbar()
Double_tHbarcgs()
Double_tHbarUncertainty()
Double_tHC()
Double_tHCcgs()
Double_tHcgs()
Double_tHUncertainty()
Double_tHypot(Double_t x, Double_t y)
Long_tHypot(Long_t x, Long_t y)
Double_tInfinity()
Double_tInvPi()
Bool_tIsInside(float xp, float yp, Int_t np, float* x, float* y)
Bool_tIsInside(int xp, int yp, Int_t np, int* x, int* y)
Int_tIsNaN(Double_t x)
Double_tK()
Double_tKcgs()
Double_tKolmogorovProb(Double_t z)
Double_tKolmogorovTest(Int_t na, const Double_t* a, Int_t nb, const Double_t* b, Option_t* option)
shortKOrdStat(long long n, const short* a, long long k, long long* work = 0)
intKOrdStat(long long n, const int* a, long long k, long long* work = 0)
floatKOrdStat(long long n, const float* a, long long k, long long* work = 0)
doubleKOrdStat(long long n, const double* a, long long k, long long* work = 0)
longKOrdStat(long long n, const long* a, long long k, long long* work = 0)
long longKOrdStat(long long n, const long long* a, long long k, long long* work = 0)
Double_tKUncertainty()
Double_tLandau(Double_t x, Double_t mpv = 0, Double_t sigma = 1, Bool_t norm = kFALSE)
Double_tLandauI(Double_t x)
Double_tLaplaceDist(Double_t x, Double_t alpha = 0, Double_t beta = 1)
Double_tLaplaceDistI(Double_t x, Double_t alpha = 0, Double_t beta = 1)
Double_tLdexp(Double_t x, Int_t exp)
Double_tLn10()
Double_tLnGamma(Double_t z)
Long64_tLocMax(Long64_t n, const short* a)
Long64_tLocMax(Long64_t n, const int* a)
Long64_tLocMax(Long64_t n, const float* a)
Long64_tLocMax(Long64_t n, const double* a)
Long64_tLocMax(Long64_t n, const long* a)
Long64_tLocMax(Long64_t n, const long long* a)
Long64_tLocMin(Long64_t n, const short* a)
Long64_tLocMin(Long64_t n, const int* a)
Long64_tLocMin(Long64_t n, const float* a)
Long64_tLocMin(Long64_t n, const double* a)
Long64_tLocMin(Long64_t n, const long* a)
Long64_tLocMin(Long64_t n, const long long* a)
Double_tLog(Double_t x)
Double_tLog10(Double_t x)
Double_tLog2(Double_t x)
Double_tLogE()
Double_tLogNormal(Double_t x, Double_t sigma, Double_t theta = 0, Double_t m = 1)
Short_tMax(Short_t a, Short_t b)
UShort_tMax(UShort_t a, UShort_t b)
Int_tMax(Int_t a, Int_t b)
UInt_tMax(UInt_t a, UInt_t b)
Long_tMax(Long_t a, Long_t b)
ULong_tMax(ULong_t a, ULong_t b)
Long64_tMax(Long64_t a, Long64_t b)
ULong64_tMax(ULong64_t a, ULong64_t b)
Float_tMax(Float_t a, Float_t b)
Double_tMax(Double_t a, Double_t b)
shortMaxElement(Long64_t n, const short* a)
intMaxElement(Long64_t n, const int* a)
floatMaxElement(Long64_t n, const float* a)
doubleMaxElement(Long64_t n, const double* a)
longMaxElement(Long64_t n, const long* a)
long longMaxElement(Long64_t n, const long long* a)
Double_tMean(Long64_t n, const short* a, const Double_t* w = 0)
Double_tMean(Long64_t n, const int* a, const Double_t* w = 0)
Double_tMean(Long64_t n, const float* a, const Double_t* w = 0)
Double_tMean(Long64_t n, const double* a, const Double_t* w = 0)
Double_tMean(Long64_t n, const long* a, const Double_t* w = 0)
Double_tMean(Long64_t n, const long long* a, const Double_t* w = 0)
Double_tMedian(Long64_t n, const short* a, const Double_t* w = 0, Long64_t* work = 0)
Double_tMedian(Long64_t n, const int* a, const Double_t* w = 0, Long64_t* work = 0)
Double_tMedian(Long64_t n, const float* a, const Double_t* w = 0, Long64_t* work = 0)
Double_tMedian(Long64_t n, const double* a, const Double_t* w = 0, Long64_t* work = 0)
Double_tMedian(Long64_t n, const long* a, const Double_t* w = 0, Long64_t* work = 0)
Double_tMedian(Long64_t n, const long long* a, const Double_t* w = 0, Long64_t* work = 0)
Short_tMin(Short_t a, Short_t b)
UShort_tMin(UShort_t a, UShort_t b)
Int_tMin(Int_t a, Int_t b)
UInt_tMin(UInt_t a, UInt_t b)
Long_tMin(Long_t a, Long_t b)
ULong_tMin(ULong_t a, ULong_t b)
Long64_tMin(Long64_t a, Long64_t b)
ULong64_tMin(ULong64_t a, ULong64_t b)
Float_tMin(Float_t a, Float_t b)
Double_tMin(Double_t a, Double_t b)
shortMinElement(Long64_t n, const short* a)
intMinElement(Long64_t n, const int* a)
floatMinElement(Long64_t n, const float* a)
doubleMinElement(Long64_t n, const double* a)
longMinElement(Long64_t n, const long* a)
long longMinElement(Long64_t n, const long long* a)
Double_tMWair()
Double_tNa()
Double_tNaUncertainty()
Long_tNextPrime(Long_t x)
Int_tNint(Float_t x)
Int_tNint(Double_t x)
float*Normal2Plane(const float* p1, const float* p2, const float* p3, float* normal)
double*Normal2Plane(const double* p1, const double* p2, const double* p3, double* normal)
Float_tNormalize(Float_t* v)
Double_tNormalize(Double_t* v)
floatNormCross(const float* v1, const float* v2, float* out)
doubleNormCross(const double* v1, const double* v2, double* out)
Double_tNormQuantile(Double_t p)
Bool_tOdd(Long_t a)
Bool_tPermute(Int_t n, Int_t* a)
Double_tPi()
Double_tPiOver2()
Double_tPiOver4()
Double_tPoisson(Double_t x, Double_t par)
Double_tPoissonI(Double_t x, Double_t par)
Double_tPower(Double_t x, Double_t y)
Double_tProb(Double_t chi2, Int_t ndf)
Double_tQe()
Double_tQeUncertainty()
voidQuantiles(Int_t n, Int_t nprob, Double_t* x, Double_t* quantiles, Double_t* prob, Bool_t isSorted = kTRUE, Int_t* index = 0, Int_t type = 7)
Double_tQuietNaN()
Double_tR()
Double_tRadToDeg()
Short_tRange(Short_t lb, Short_t ub, Short_t x)
Int_tRange(Int_t lb, Int_t ub, Int_t x)
Long_tRange(Long_t lb, Long_t ub, Long_t x)
ULong_tRange(ULong_t lb, ULong_t ub, ULong_t x)
Double_tRange(Double_t lb, Double_t ub, Double_t x)
Double_tRgair()
Double_tRMS(Long64_t n, const short* a)
Double_tRMS(Long64_t n, const int* a)
Double_tRMS(Long64_t n, const float* a)
Double_tRMS(Long64_t n, const double* a)
Double_tRMS(Long64_t n, const long* a)
Double_tRMS(Long64_t n, const long long* a)
Bool_tRootsCubic(const Double_t* coef, Double_t& a, Double_t& b, Double_t& c)
Double_tRUncertainty()
Double_tSigma()
Double_tSigmaUncertainty()
Short_tSign(Short_t a, Short_t b)
Int_tSign(Int_t a, Int_t b)
Long_tSign(Long_t a, Long_t b)
Long64_tSign(Long64_t a, Long64_t b)
Float_tSign(Float_t a, Float_t b)
Double_tSign(Double_t a, Double_t b)
Double_tSignalingNaN()
Double_tSin(Double_t x)
Double_tSinH(Double_t x)
voidSort(long long n, const short* a, long long* index, Bool_t down = kTRUE)
voidSort(long long n, const int* a, long long* index, Bool_t down = kTRUE)
voidSort(long long n, const float* a, long long* index, Bool_t down = kTRUE)
voidSort(long long n, const double* a, long long* index, Bool_t down = kTRUE)
voidSort(long long n, const long* a, long long* index, Bool_t down = kTRUE)
voidSort(long long n, const long long* a, long long* index, Bool_t down = kTRUE)
voidSort(int n, const short* a, int* index, Bool_t down = kTRUE)
voidSort(int n, const int* a, int* index, Bool_t down = kTRUE)
voidSort(int n, const float* a, int* index, Bool_t down = kTRUE)
voidSort(int n, const double* a, int* index, Bool_t down = kTRUE)
voidSort(int n, const long* a, int* index, Bool_t down = kTRUE)
voidSort(int n, const long long* a, int* index, Bool_t down = kTRUE)
Double_tSqrt(Double_t x)
Double_tSqrt2()
Double_tStruveH0(Double_t x)
Double_tStruveH1(Double_t x)
Double_tStruveL0(Double_t x)
Double_tStruveL1(Double_t x)
Double_tStudent(Double_t T, Double_t ndf)
Double_tStudentI(Double_t T, Double_t ndf)
Double_tStudentQuantile(Double_t p, Double_t ndf, Bool_t lower_tail = kTRUE)
Double_tTan(Double_t x)
Double_tTanH(Double_t x)
Double_tTwoPi()
Double_tVavilov(Double_t x, Double_t kappa, Double_t beta2)
Double_tVavilovI(Double_t x, Double_t kappa, Double_t beta2)
Double_tVoigt(Double_t x, Double_t sigma, Double_t lg, Int_t R = 4)

Class Charts

Function documentation

Long_t Hypot(Long_t x, Long_t y)
Double_t Hypot(Double_t x, Double_t y)
Double_t ASinH(Double_t )
Double_t ACosH(Double_t )
Double_t ATanH(Double_t )
Double_t Log2(Double_t x)
Int_t Nint(Float_t x)
 Round to nearest integer. Rounds half integers to the nearest
 even integer.
Int_t Nint(Double_t x)
 Round to nearest integer. Rounds half integers to the nearest
 even integer.
Double_t DiLog(Double_t x)
 The DiLogarithm function
 Code translated by R.Brun from CERNLIB DILOG function C332
Double_t Erf(Double_t x)
 Computation of the error function erf(x).
 Erf(x) = (2/sqrt(pi)) Integral(exp(-t^2))dt between 0 and x
Double_t Erfc(Double_t x)
 Compute the complementary error function erfc(x).
 Erfc(x) = (2/sqrt(pi)) Integral(exp(-t^2))dt between x and infinity

Double_t ErfInverse(Double_t x)
 returns  the inverse error function
 x must be  <-1<x<1
Double_t ErfcInverse(Double_t x)
 returns  the inverse of the complementary error function
 x must be  0<x<2
 implement using  the quantile of the normal distribution
 instead of ErfInverse for better numerical precision for large x
Double_t Factorial(Int_t i)
 Compute factorial(n).
Double_t Freq(Double_t x)
 Computation of the normal frequency function freq(x).
 Freq(x) = (1/sqrt(2pi)) Integral(exp(-t^2/2))dt between -infinity and x.

 Translated from CERNLIB C300 by Rene Brun.
Double_t Gamma(Double_t z)
 Computation of gamma(z) for all z.

 C.Lanczos, SIAM Journal of Numerical Analysis B1 (1964), 86.

Double_t Gamma(Double_t a, Double_t x)
 Computation of the normalized lower incomplete gamma function P(a,x) as defined in the
 Handbook of Mathematical Functions by Abramowitz and Stegun, formula 6.5.1 on page 260 .
 Its normalization is such that TMath::Gamma(a,+infinity) = 1 .


P(a, x) = #frac{1}{#Gamma(a) } #int_{0}^{x} t^{a-1} e^{-t} dt


--- Nve 14-nov-1998 UU-SAP Utrecht
Double_t BreitWigner(Double_t x, Double_t mean = 0, Double_t gamma = 1)
 Calculate a Breit Wigner function with mean and gamma.
Double_t Gaus(Double_t x, Double_t mean = 0, Double_t sigma = 1, Bool_t norm = kFALSE)
 Calculate a gaussian function with mean and sigma.
 If norm=kTRUE (default is kFALSE) the result is divided
 by sqrt(2*Pi)*sigma.
Double_t Landau(Double_t x, Double_t mpv = 0, Double_t sigma = 1, Bool_t norm = kFALSE)
 The LANDAU function.
 mpv is a location parameter and correspond approximatly to the most probable value
 and sigma is a scale parameter (not the sigma of the full distribution which is not defined)
 Note that for mpv=0 and sigma=1 (default values) the exact location of the maximum of the distribution (most proble value) is at
 x = -0.22278
 This function has been adapted from the CERNLIB routine G110 denlan.
 If norm=kTRUE (default is kFALSE) the result is divided by sigma
Double_t LnGamma(Double_t z)
 Computation of ln[gamma(z)] for all z.

 C.Lanczos, SIAM Journal of Numerical Analysis B1 (1964), 86.

 The accuracy of the result is better than 2e-10.

--- Nve 14-nov-1998 UU-SAP Utrecht
Float_t Normalize(Float_t v[3])
 Normalize a vector v in place.
 Returns the norm of the original vector.
Double_t Normalize(Double_t v[3])
 Normalize a vector v in place.
 Returns the norm of the original vector.
 This implementation (thanks Kevin Lynch <krlynch@bu.edu>) is protected
 against possible overflows.
Double_t Poisson(Double_t x, Double_t par)
 compute the Poisson distribution function for (x,par)
 The Poisson PDF is implemented by means of Euler's Gamma-function
 (for the factorial), so for all integer arguments it is correct.
 BUT for non-integer values it IS NOT equal to the Poisson distribution.
 see TMath::PoissonI to get a non-smooth function.
 Note that for large values of par, it is better to call
     TMath::Gaus(x,par,sqrt(par),kTRUE)

/* */
Double_t PoissonI(Double_t x, Double_t par)
 compute the Poisson distribution function for (x,par)
 This is a non-smooth function.
 This function is equivalent to ROOT::Math::poisson_pdf

/* */
Double_t Prob(Double_t chi2, Int_t ndf)
 Computation of the probability for a certain Chi-squared (chi2)
 and number of degrees of freedom (ndf).

 Calculations are based on the incomplete gamma function P(a,x),
 where a=ndf/2 and x=chi2/2.

 P(a,x) represents the probability that the observed Chi-squared
 for a correct model should be less than the value chi2.

 The returned probability corresponds to 1-P(a,x),
 which denotes the probability that an observed Chi-squared exceeds
 the value chi2 by chance, even for a correct model.

--- NvE 14-nov-1998 UU-SAP Utrecht
Double_t KolmogorovProb(Double_t z)
 Calculates the Kolmogorov distribution function,

/* */
 which gives the probability that Kolmogorov's test statistic will exceed
 the value z assuming the null hypothesis. This gives a very powerful
 test for comparing two one-dimensional distributions.
 see, for example, Eadie et al, "statistocal Methods in Experimental
 Physics', pp 269-270).

 This function returns the confidence level for the null hypothesis, where:
   z = dn*sqrt(n), and
   dn  is the maximum deviation between a hypothetical distribution
       function and an experimental distribution with
   n    events

 NOTE: To compare two experimental distributions with m and n events,
       use z = sqrt(m*n/(m+n))*dn

 Accuracy: The function is far too accurate for any imaginable application.
           Probabilities less than 10^-15 are returned as zero.
           However, remember that the formula is only valid for "large" n.
 Theta function inversion formula is used for z <= 1

 This function was translated by Rene Brun from PROBKL in CERNLIB.
Double_t KolmogorovTest(Int_t na, const Double_t* a, Int_t nb, const Double_t* b, Option_t* option)
  Statistical test whether two one-dimensional sets of points are compatible
  with coming from the same parent distribution, using the Kolmogorov test.
  That is, it is used to compare two experimental distributions of unbinned data.

  Input:
  a,b: One-dimensional arrays of length na, nb, respectively.
       The elements of a and b must be given in ascending order.
  option is a character string to specify options
         "D" Put out a line of "Debug" printout
         "M" Return the Maximum Kolmogorov distance instead of prob

  Output:
 The returned value prob is a calculated confidence level which gives a
 statistical test for compatibility of a and b.
 Values of prob close to zero are taken as indicating a small probability
 of compatibility. For two point sets drawn randomly from the same parent
 distribution, the value of prob should be uniformly distributed between
 zero and one.
   in case of error the function return -1
   If the 2 sets have a different number of points, the minimum of
   the two sets is used.

 Method:
 The Kolmogorov test is used. The test statistic is the maximum deviation
 between the two integrated distribution functions, multiplied by the
 normalizing factor (rdmax*sqrt(na*nb/(na+nb)).

  Code adapted by Rene Brun from CERNLIB routine TKOLMO (Fred James)
   (W.T. Eadie, D. Drijard, F.E. James, M. Roos and B. Sadoulet,
      Statistical Methods in Experimental Physics, (North-Holland,
      Amsterdam 1971) 269-271)

  Method Improvement by Jason A Detwiler (JADetwiler@lbl.gov)

   The nuts-and-bolts of the TMath::KolmogorovTest() algorithm is a for-loop
   over the two sorted arrays a and b representing empirical distribution
   functions. The for-loop handles 3 cases: when the next points to be
   evaluated satisfy a>b, a<b, or a=b:

      for (Int_t i=0;i<na+nb;i++) {
         if (a[ia-1] < b[ib-1]) {
            rdiff -= sa;
            ia++;
            if (ia > na) {ok = kTRUE; break;}
         } else if (a[ia-1] > b[ib-1]) {
            rdiff += sb;
            ib++;
            if (ib > nb) {ok = kTRUE; break;}
         } else {
            rdiff += sb - sa;
            ia++;
            ib++;
            if (ia > na) {ok = kTRUE; break;}
            if (ib > nb) {ok = kTRUE; break;}
        }
         rdmax = TMath::Max(rdmax,TMath::Abs(rdiff));
      }

   For the last case, a=b, the algorithm advances each array by one index in an
   attempt to move through the equality. However, this is incorrect when one or
   the other of a or b (or both) have a repeated value, call it x. For the KS
   statistic to be computed properly, rdiff needs to be calculated after all of
   the a and b at x have been tallied (this is due to the definition of the
   empirical distribution function; another way to convince yourself that the
   old CERNLIB method is wrong is that it implies that the function defined as the
   difference between a and b is multi-valued at x -- besides being ugly, this
   would invalidate Kolmogorov's theorem).

   The solution is to just add while-loops into the equality-case handling to
   perform the tally:

         } else {
            double x = a[ia-1];
            while(a[ia-1] == x && ia <= na) {
              rdiff -= sa;
              ia++;
            }
            while(b[ib-1] == x && ib <= nb) {
              rdiff += sb;
              ib++;
            }
            if (ia > na) {ok = kTRUE; break;}
            if (ib > nb) {ok = kTRUE; break;}
         }


  NOTE1
  A good description of the Kolmogorov test can be seen at:
    http://www.itl.nist.gov/div898/handbook/eda/section3/eda35g.htm
Double_t Voigt(Double_t x, Double_t sigma, Double_t lg, Int_t R = 4)
 Computation of Voigt function (normalised).
 Voigt is a convolution of
 gauss(xx) = 1/(sqrt(2*pi)*sigma) * exp(xx*xx/(2*sigma*sigma)
 and
 lorentz(xx) = (1/pi) * (lg/2) / (xx*xx + lg*lg/4)
 functions.

 The Voigt function is known to be the real part of Faddeeva function also
 called complex error function [2].

 The algoritm was developed by J. Humlicek [1].
 This code is based on fortran code presented by R. J. Wells [2].
 Translated and adapted by Miha D. Puc

 To calculate the Faddeeva function with relative error less than 10^(-r).
 r can be set by the the user subject to the constraints 2 <= r <= 5.

 [1] J. Humlicek, JQSRT, 21, 437 (1982).
 [2] R.J. Wells "Rapid Approximation to the Voigt/Faddeeva Function and its
 Derivatives" JQSRT 62 (1999), pp 29-48.
 http://www-atm.physics.ox.ac.uk/user/wells/voigt.html
Bool_t RootsCubic(const Double_t* coef, Double_t& a, Double_t& b, Double_t& c)
 Calculates roots of polynomial of 3rd order a*x^3 + b*x^2 + c*x + d, where
 a == coef[3], b == coef[2], c == coef[1], d == coef[0]
coef[3] must be different from 0
 If the boolean returned by the method is false:
    ==> there are 3 real roots a,b,c
 If the boolean returned by the method is true:
    ==> there is one real root a and 2 complex conjugates roots (b+i*c,b-i*c)
 Author: Francois-Xavier Gentit
void Quantiles(Int_t n, Int_t nprob, Double_t* x, Double_t* quantiles, Double_t* prob, Bool_t isSorted = kTRUE, Int_t* index = 0, Int_t type = 7)
Computes sample quantiles, corresponding to the given probabilities
Parameters:
  x -the data sample
  n - its size
  quantiles - computed quantiles are returned in there
  prob - probabilities where to compute quantiles
  nprob - size of prob array
  isSorted - is the input array x sorted?
  NOTE, that when the input is not sorted, an array of integers of size n needs
        to be allocated. It can be passed by the user in parameter index,
        or, if not passed, it will be allocated inside the function

  type - method to compute (from 1 to 9). Following types are provided:
  Discontinuous:
    type=1 - inverse of the empirical distribution function
    type=2 - like type 1, but with averaging at discontinuities
    type=3 - SAS definition: nearest even order statistic
  Piecwise linear continuous:
    In this case, sample quantiles can be obtained by linear interpolation
    between the k-th order statistic and p(k).
    type=4 - linear interpolation of empirical cdf, p(k)=k/n;
    type=5 - a very popular definition, p(k) = (k-0.5)/n;
    type=6 - used by Minitab and SPSS, p(k) = k/(n+1);
    type=7 - used by S-Plus and R, p(k) = (k-1)/(n-1);
    type=8 - resulting sample quantiles are approximately median unbiased
             regardless of the distribution of x. p(k) = (k-1/3)/(n+1/3);
    type=9 - resulting sample quantiles are approximately unbiased, when
             the sample comes from Normal distribution. p(k)=(k-3/8)/(n+1/4);

    default type = 7

 References:
 1) Hyndman, R.J and Fan, Y, (1996) "Sample quantiles in statistical packages"
                                     American Statistician, 50, 361-365
 2) R Project documentation for the function quantile of package {stats}
void BubbleHigh(Int_t Narr, Double_t* arr1, Int_t* arr2)
 Bubble sort variant to obtain the order of an array's elements into
 an index in order to do more useful things than the standard built
 in functions.
 *arr1 is unchanged;
 *arr2 is the array of indicies corresponding to the decending value
 of arr1 with arr2[0] corresponding to the largest arr1 value and
 arr2[Narr] the smallest.

  Author: Adrian Bevan (bevan@slac.stanford.edu)
void BubbleLow(Int_t Narr, Double_t* arr1, Int_t* arr2)
 Opposite ordering of the array arr2[] to that of BubbleHigh.

  Author: Adrian Bevan (bevan@slac.stanford.edu)
ULong_t Hash(const void* txt, Int_t ntxt)
 Calculates hash index from any char string.
 Based on precalculated table of 256 specially selected numbers.
 These numbers are selected in such a way, that for string
 length == 4 (integer number) the hash is unambigous, i.e.
 from hash value we can recalculate input (no degeneration).

 The quality of hash method is good enough, that
 "random" numbers made as R = Hash(1), Hash(2), ...Hash(N)
 tested by <R>, <R*R>, <Ri*Ri+1> gives the same result
 as for libc rand().

 For string:  i = TMath::Hash(string,nstring);
 For int:     i = TMath::Hash(&intword,sizeof(int));
 For pointer: i = TMath::Hash(&pointer,sizeof(void*));

              V.Perev
 This function is kept for back compatibility. The code previously in this function
 has been moved to the static function TString::Hash
ULong_t Hash(const char* str)
Double_t BesselI0(Double_t x)
 Compute the modified Bessel function I_0(x) for any real x.

--- NvE 12-mar-2000 UU-SAP Utrecht
Double_t BesselK0(Double_t x)
 Compute the modified Bessel function K_0(x) for positive real x.

  M.Abramowitz and I.A.Stegun, Handbook of Mathematical Functions,
     Applied Mathematics Series vol. 55 (1964), Washington.

--- NvE 12-mar-2000 UU-SAP Utrecht
Double_t BesselI1(Double_t x)
 Compute the modified Bessel function I_1(x) for any real x.

  M.Abramowitz and I.A.Stegun, Handbook of Mathematical Functions,
     Applied Mathematics Series vol. 55 (1964), Washington.

--- NvE 12-mar-2000 UU-SAP Utrecht
Double_t BesselK1(Double_t x)
 Compute the modified Bessel function K_1(x) for positive real x.

  M.Abramowitz and I.A.Stegun, Handbook of Mathematical Functions,
     Applied Mathematics Series vol. 55 (1964), Washington.

--- NvE 12-mar-2000 UU-SAP Utrecht
Double_t BesselK(Int_t n, Double_t x)
 Compute the Integer Order Modified Bessel function K_n(x)
 for n=0,1,2,... and positive real x.

--- NvE 12-mar-2000 UU-SAP Utrecht
Double_t BesselI(Int_t n, Double_t x)
 Compute the Integer Order Modified Bessel function I_n(x)
 for n=0,1,2,... and any real x.

--- NvE 12-mar-2000 UU-SAP Utrecht
Double_t BesselJ0(Double_t x)
 Returns the Bessel function J0(x) for any real x.
Double_t BesselJ1(Double_t x)
 Returns the Bessel function J1(x) for any real x.
Double_t BesselY0(Double_t x)
 Returns the Bessel function Y0(x) for positive x.
Double_t BesselY1(Double_t x)
 Returns the Bessel function Y1(x) for positive x.
Double_t StruveH0(Double_t x)
 Struve Functions of Order 0

 Converted from CERNLIB M342 by Rene Brun.
Double_t StruveH1(Double_t x)
 Struve Functions of Order 1

 Converted from CERNLIB M342 by Rene Brun.
Double_t StruveL0(Double_t x)
 Modified Struve Function of Order 0.
 By Kirill Filimonov.
Double_t StruveL1(Double_t x)
 Modified Struve Function of Order 1.
 By Kirill Filimonov.
Double_t Beta(Double_t p, Double_t q)
 Calculates Beta-function Gamma(p)*Gamma(q)/Gamma(p+q).
Double_t BetaCf(Double_t x, Double_t a, Double_t b)
 Continued fraction evaluation by modified Lentz's method
 used in calculation of incomplete Beta function.
Double_t BetaDist(Double_t x, Double_t p, Double_t q)
 Computes the probability density function of the Beta distribution
 (the distribution function is computed in BetaDistI).
 The first argument is the point, where the function will be
 computed, second and third are the function parameters.
 Since the Beta distribution is bounded on both sides, it's often
 used to represent processes with natural lower and upper limits.
Double_t BetaDistI(Double_t x, Double_t p, Double_t q)
 Computes the distribution function of the Beta distribution.
 The first argument is the point, where the function will be
 computed, second and third are the function parameters.
 Since the Beta distribution is bounded on both sides, it's often
 used to represent processes with natural lower and upper limits.
Double_t BetaIncomplete(Double_t x, Double_t a, Double_t b)
 Calculates the incomplete Beta-function.
Double_t Binomial(Int_t n, Int_t k)
 Calculate the binomial coefficient n over k.
Double_t BinomialI(Double_t p, Int_t n, Int_t k)
 Suppose an event occurs with probability _p_ per trial
 Then the probability P of its occuring _k_ or more times
 in _n_ trials is termed a cumulative binomial probability
 the formula is P = sum_from_j=k_to_n(TMath::Binomial(n, j)*
 *TMath::Power(p, j)*TMath::Power(1-p, n-j)
 For _n_ larger than 12 BetaIncomplete is a much better way
 to evaluate the sum than would be the straightforward sum calculation
 for _n_ smaller than 12 either method is acceptable
 ("Numerical Recipes")
     --implementation by Anna Kreshuk
Double_t CauchyDist(Double_t x, Double_t t = 0, Double_t s = 1)
 Computes the density of Cauchy distribution at point x
 by default, standard Cauchy distribution is used (t=0, s=1)
    t is the location parameter
    s is the scale parameter
 The Cauchy distribution, also called Lorentzian distribution,
 is a continuous distribution describing resonance behavior
 The mean and standard deviation of the Cauchy distribution are undefined.
 The practical meaning of this is that collecting 1,000 data points gives
 no more accurate an estimate of the mean and standard deviation than
 does a single point.
 The formula was taken from "Engineering Statistics Handbook" on site
 http://www.itl.nist.gov/div898/handbook/eda/section3/eda3663.htm
 Implementation by Anna Kreshuk.
 Example:
    TF1* fc = new TF1("fc", "TMath::CauchyDist(x, [0], [1])", -5, 5);
    fc->SetParameters(0, 1);
    fc->Draw();
Double_t ChisquareQuantile(Double_t p, Double_t ndf)
 Evaluate the quantiles of the chi-squared probability distribution function.
 Algorithm AS 91   Appl. Statist. (1975) Vol.24, P.35
 implemented by Anna Kreshuk.
 Incorporates the suggested changes in AS R85 (vol.40(1), pp.233-5, 1991)
 Parameters:
   p   - the probability value, at which the quantile is computed
   ndf - number of degrees of freedom
Double_t FDist(Double_t F, Double_t N, Double_t M)
 Computes the density function of F-distribution
 (probability function, integral of density, is computed in FDistI).

 Parameters N and M stand for degrees of freedom of chi-squares
 mentioned above parameter F is the actual variable x of the
 density function p(x) and the point at which the density function
 is calculated.

 About F distribution:
 F-distribution arises in testing whether two random samples
 have the same variance. It is the ratio of two chi-square
 distributions, with N and M degrees of freedom respectively,
 where each chi-square is first divided by it's number of degrees
 of freedom.
 Implementation by Anna Kreshuk.
Double_t FDistI(Double_t F, Double_t N, Double_t M)
 Calculates the cumulative distribution function of F-distribution,
 this function occurs in the statistical test of whether two observed
 samples have the same variance. For this test a certain statistic F,
 the ratio of observed dispersion of the first sample to that of the
 second sample, is calculated. N and M stand for numbers of degrees
 of freedom in the samples 1-FDistI() is the significance level at
 which the hypothesis "1 has smaller variance than 2" can be rejected.
 A small numerical value of 1 - FDistI() implies a very significant
 rejection, in turn implying high confidence in the hypothesis
 "1 has variance greater than 2".
 Implementation by Anna Kreshuk.
Double_t GammaDist(Double_t x, Double_t gamma, Double_t mu = 0, Double_t beta = 1)
 Computes the density function of Gamma distribution at point x.
   gamma - shape parameter
   mu    - location parameter
   beta  - scale parameter

 The definition can be found in "Engineering Statistics Handbook" on site
 http://www.itl.nist.gov/div898/handbook/eda/section3/eda366b.htm
 use now implementation in ROOT::Math::gamma_pdf

/* */
Double_t LaplaceDist(Double_t x, Double_t alpha = 0, Double_t beta = 1)
 Computes the probability density function of Laplace distribution
 at point x, with location parameter alpha and shape parameter beta.
 By default, alpha=0, beta=1
 This distribution is known under different names, most common is
 double exponential distribution, but it also appears as
 the two-tailed exponential or the bilateral exponential distribution
Double_t LaplaceDistI(Double_t x, Double_t alpha = 0, Double_t beta = 1)
 Computes the distribution function of Laplace distribution
 at point x, with location parameter alpha and shape parameter beta.
 By default, alpha=0, beta=1
 This distribution is known under different names, most common is
 double exponential distribution, but it also appears as
 the two-tailed exponential or the bilateral exponential distribution
Double_t LogNormal(Double_t x, Double_t sigma, Double_t theta = 0, Double_t m = 1)
 Computes the density of LogNormal distribution at point x.
 Variable X has lognormal distribution if Y=Ln(X) has normal distribution
 sigma is the shape parameter
 theta is the location parameter
 m is the scale parameter
 The formula was taken from "Engineering Statistics Handbook" on site
 http://www.itl.nist.gov/div898/handbook/eda/section3/eda3669.htm
 Implementation using ROOT::Math::lognormal_pdf

/* */
Double_t NormQuantile(Double_t p)
 Computes quantiles for standard normal distribution N(0, 1)
 at probability p
 ALGORITHM AS241  APPL. STATIST. (1988) VOL. 37, NO. 3, 477-484.
Bool_t Permute(Int_t n, Int_t* a)
 Simple recursive algorithm to find the permutations of
 n natural numbers, not necessarily all distinct
 adapted from CERNLIB routine PERMU.
 The input array has to be initialised with a non descending
 sequence. The method returns kFALSE when
 all combinations are exhausted.
Double_t Student(Double_t T, Double_t ndf)
 Computes density function for Student's t- distribution
 (the probability function (integral of density) is computed in StudentI).

 First parameter stands for x - the actual variable of the
 density function p(x) and the point at which the density is calculated.
 Second parameter stands for number of degrees of freedom.

 About Student distribution:
 Student's t-distribution is used for many significance tests, for example,
 for the Student's t-tests for the statistical significance of difference
 between two sample means and for confidence intervals for the difference
 between two population means.

 Example: suppose we have a random sample of size n drawn from normal
 distribution with mean Mu and st.deviation Sigma. Then the variable

   t = (sample_mean - Mu)/(sample_deviation / sqrt(n))

 has Student's t-distribution with n-1 degrees of freedom.

 NOTE that this function's second argument is number of degrees of freedom,
 not the sample size.

 As the number of degrees of freedom grows, t-distribution approaches
 Normal(0,1) distribution.
 Implementation by Anna Kreshuk.
Double_t StudentI(Double_t T, Double_t ndf)
 Calculates the cumulative distribution function of Student's
 t-distribution second parameter stands for number of degrees of freedom,
 not for the number of samples
 if x has Student's t-distribution, the function returns the probability of
 x being less than T.
 Implementation by Anna Kreshuk.
Double_t StudentQuantile(Double_t p, Double_t ndf, Bool_t lower_tail = kTRUE)
 Computes quantiles of the Student's t-distribution
 1st argument is the probability, at which the quantile is computed
 2nd argument - the number of degrees of freedom of the
 Student distribution
 When the 3rd argument lower_tail is kTRUE (default)-
 the algorithm returns such x0, that
   P(x < x0)=p
 upper tail (lower_tail is kFALSE)- the algorithm returns such x0, that
   P(x > x0)=p
 the algorithm was taken from
   G.W.Hill, "Algorithm 396, Student's t-quantiles"
             "Communications of the ACM", 13(10), October 1970
Double_t Vavilov(Double_t x, Double_t kappa, Double_t beta2)
Returns the value of the Vavilov density function
Parameters: 1st - the point were the density function is evaluated
            2nd - value of kappa (distribution parameter)
            3rd - value of beta2 (distribution parameter)
The algorithm was taken from the CernLib function vavden(G115)
Reference: A.Rotondi and P.Montagna, Fast Calculation of Vavilov distribution
Nucl.Instr. and Meth. B47(1990), 215-224
Accuracy: quote from the reference above:
"The resuls of our code have been compared with the values of the Vavilov
density function computed numerically in an accurate way: our approximation
shows a difference of less than 3% around the peak of the density function, slowly
increasing going towards the extreme tails to the right and to the left"

/* */
Double_t VavilovI(Double_t x, Double_t kappa, Double_t beta2)
Returns the value of the Vavilov distribution function
Parameters: 1st - the point were the density function is evaluated
            2nd - value of kappa (distribution parameter)
            3rd - value of beta2 (distribution parameter)
The algorithm was taken from the CernLib function vavden(G115)
Reference: A.Rotondi and P.Montagna, Fast Calculation of Vavilov distribution
Nucl.Instr. and Meth. B47(1990), 215-224
Accuracy: quote from the reference above:
"The resuls of our code have been compared with the values of the Vavilov
density function computed numerically in an accurate way: our approximation
shows a difference of less than 3% around the peak of the density function, slowly
increasing going towards the extreme tails to the right and to the left"
Double_t LandauI(Double_t x)
Returns the value of the Landau distribution function at point x.
The algorithm was taken from the Cernlib function dislan(G110)
Reference: K.S.Kolbig and B.Schorr, "A program package for the Landau
distribution", Computer Phys.Comm., 31(1984), 97-111
Double_t Sin(Double_t x)
{ return sin(x); }
Double_t Cos(Double_t x)
{ return cos(x); }
Double_t Tan(Double_t x)
{ return tan(x); }
Double_t SinH(Double_t x)
{ return sinh(x); }
Double_t CosH(Double_t x)
{ return cosh(x); }
Double_t TanH(Double_t x)
{ return tanh(x); }
Double_t ASin(Double_t x)
Double_t ACos(Double_t x)
Double_t ATan(Double_t x)
{ return atan(x); }
Double_t ATan2(Double_t y, Double_t x)
Double_t Sqrt(Double_t x)
{ return sqrt(x); }
Double_t Ceil(Double_t x)
{ return ceil(x); }
Int_t CeilNint(Double_t x)
{ return TMath::Nint(ceil(x)); }
Double_t Floor(Double_t x)
{ return floor(x); }
Int_t FloorNint(Double_t x)
{ return TMath::Nint(floor(x)); }
Double_t Exp(Double_t x)
{ return exp(x); }
Double_t Ldexp(Double_t x, Int_t exp)
{ return ldexp(x, exp); }
Double_t Power(Double_t x, Double_t y)
{ return pow(x, y); }
Double_t Log(Double_t x)
{ return log(x); }
Double_t Log10(Double_t x)
{ return log10(x); }
Int_t Finite(Double_t x)
{ return isfinite(x); }
Int_t IsNaN(Double_t x)
 from math.h
{ return isnan(x); }
Double_t QuietNaN()
--------wrapper to numeric_limits

Double_t SignalingNaN()
Double_t Infinity()
 returns an infinity as defined by the IEEE standard
template <typename T> inline T NormCross(const T v1[3],const T v2[3],T out[3])
 Calculate the Normalized Cross Product of two vectors
T MinElement(Long64_t n, const T *a)
 Return minimum of array a of length n.
T MaxElement(Long64_t n, const T *a)
 Return maximum of array a of length n.
Long64_t LocMin(Long64_t n, const T *a)
 Return index of array with the minimum element.
 If more than one element is minimum returns first found.
Iterator LocMin(Iterator first, Iterator last)
 Return index of array with the minimum element.
 If more than one element is minimum returns first found.
Long64_t LocMax(Long64_t n, const T *a)
 Return index of array with the maximum element.
 If more than one element is maximum returns first found.
Iterator LocMax(Iterator first, Iterator last)
 Return index of array with the maximum element.
 If more than one element is maximum returns first found.
Double_t Mean(Iterator first, Iterator last)
 Return the weighted mean of an array defined by the iterators.
Double_t Mean(Iterator first, Iterator last, WeightIterator w)
 Return the weighted mean of an array defined by the first and
 last iterators. The w iterator should point to the first element
 of a vector of weights of the same size as the main array.
Double_t Mean(Long64_t n, const T *a, const Double_t *w)
 Return the weighted mean of an array a with length n.
Double_t GeomMean(Iterator first, Iterator last)
 Return the geometric mean of an array defined by the iterators.
 geometric_mean = (Prod_i=0,n-1 |a[i]|)^1/n
Double_t GeomMean(Long64_t n, const T *a)
 Return the geometric mean of an array a of size n.
 geometric_mean = (Prod_i=0,n-1 |a[i]|)^1/n
Double_t RMS(Iterator first, Iterator last)
 Return the Standard Deviation of an array defined by the iterators.
 Note that this function returns the sigma(standard deviation) and
 not the root mean square of the array.
Double_t RMS(Long64_t n, const T *a)
 Return the Standard Deviation of an array a with length n.
 Note that this function returns the sigma(standard deviation) and
 not the root mean square of the array.
Iterator BinarySearch(Iterator first, Iterator last, Element value)
 Binary search in an array defined by its iterators.

 The values in the iterators range are supposed to be sorted
 prior to this call.  If match is found, function returns
 position of element.  If no match found, function gives nearest
 element smaller than value.
template <typename T> Long64_t BinarySearch(Long64_t n, const T *array, T value)
 Binary search in an array of n values to locate value.

 Array is supposed  to be sorted prior to this call.
 If match is found, function returns position of element.
 If no match found, function gives nearest element smaller than value.
template <typename T> Long64_t BinarySearch(Long64_t n, const T **array, T value)
 Binary search in an array of n values to locate value.

 Array is supposed  to be sorted prior to this call.
 If match is found, function returns position of element.
 If no match found, function gives nearest element smaller than value.
template <typename Element, typename Index> void Sort(Index n, const Element* a, Index* index, Bool_t down)
 Sort the n elements of the  array a of generic templated type Element.
 In output the array index of type Index contains the indices of the sorted array.
 If down is false sort in increasing order (default is decreasing order).
template <typename T> T * Cross(const T v1[3],const T v2[3], T out[3])
 Calculate the Cross Product of two vectors:
         out = [v1 x v2]
template <typename T> T * Normal2Plane(const T p1[3],const T p2[3],const T p3[3], T normal[3])
 Calculate a normal vector of a plane.

  Input:
     Float_t *p1,*p2,*p3  -  3 3D points belonged the plane to define it.

  Return:
     Pointer to 3D normal vector (normalized)
template <typename T> Bool_t IsInside(T xp, T yp, Int_t np, T *x, T *y)
 Function which returns kTRUE if point xp,yp lies inside the
 polygon defined by the np points in arrays x and y, kFALSE otherwise.
 Note that the polygon may be open or closed.
template <typename T> Double_t Median(Long64_t n, const T *a, const Double_t *w, Long64_t *work)
 Return the median of the array a where each entry i has weight w[i] .
 Both arrays have a length of at least n . The median is a number obtained
 from the sorted array a through

 median = (a[jl]+a[jh])/2.  where (using also the sorted index on the array w)

 sum_i=0,jl w[i] <= sumTot/2
 sum_i=0,jh w[i] >= sumTot/2
 sumTot = sum_i=0,n w[i]

 If w=0, the algorithm defaults to the median definition where it is
 a number that divides the sorted sequence into 2 halves.
 When n is odd or n > 1000, the median is kth element k = (n + 1) / 2.
 when n is even and n < 1000the median is a mean of the elements k = n/2 and k = n/2 + 1.

 If the weights are supplied (w not 0) all weights must be >= 0

 If work is supplied, it is used to store the sorting index and assumed to be
 >= n . If work=0, local storage is used, either on the stack if n < kWorkMax
 or on the heap for n >= kWorkMax .
Element KOrdStat(Size n, const Element *a, Size k, Size *work)
 Returns k_th order statistic of the array a of size n
 (k_th smallest element out of n elements).

 C-convention is used for array indexing, so if you want
 the second smallest element, call KOrdStat(n, a, 1).

 If work is supplied, it is used to store the sorting index and
 assumed to be >= n. If work=0, local storage is used, either on
 the stack if n < kWorkMax or on the heap for n >= kWorkMax.

 Taken from "Numerical Recipes in C++" without the index array
 implemented by Anna Khreshuk.

 See also the declarations at the top of this file
Double_t Pi()
Fundamental constants

{ return 3.14159265358979323846; }
Double_t TwoPi()
{ return 2.0 * Pi(); }
Double_t PiOver2()
{ return Pi() / 2.0; }
Double_t PiOver4()
{ return Pi() / 4.0; }
Double_t InvPi()
{ return 1.0 / Pi(); }
Double_t RadToDeg()
{ return 180.0 / Pi(); }
Double_t DegToRad()
{ return Pi() / 180.0; }
Double_t Sqrt2()
{ return 1.4142135623730950488016887242097; }
Double_t E()
 e (base of natural log)
{ return 2.71828182845904523536; }
Double_t Ln10()
 natural log of 10 (to convert log to ln)
{ return 2.30258509299404568402; }
Double_t LogE()
 base-10 log of e  (to convert ln to log)
{ return 0.43429448190325182765; }
Double_t C()
 velocity of light
{ return 2.99792458e8; }
Double_t Ccgs()
{ return 100.0 * C(); }
Double_t CUncertainty()
{ return 0.0; }
Double_t G()
 gravitational constant
{ return 6.673e-11; }
Double_t Gcgs()
{ return G() / 1000.0; }
Double_t GUncertainty()
{ return 0.010e-11; }
Double_t GhbarC()
 G over h-bar C
{ return 6.707e-39; }
Double_t GhbarCUncertainty()
{ return 0.010e-39; }
Double_t Gn()
 standard acceleration of gravity
{ return 9.80665; }
Double_t GnUncertainty()
{ return 0.0; }
Double_t H()
 Planck's constant
{ return 6.62606876e-34; }
Double_t Hcgs()
{ return 1.0e7 * H(); }
Double_t HUncertainty()
{ return 0.00000052e-34; }
Double_t Hbar()
 h-bar (h over 2 pi)
{ return 1.054571596e-34; }
Double_t Hbarcgs()
{ return 1.0e7 * Hbar(); }
Double_t HbarUncertainty()
{ return 0.000000082e-34; }
Double_t HC()
 hc (h * c)
{ return H() * C(); }
Double_t HCcgs()
{ return Hcgs() * Ccgs(); }
Double_t K()
 Boltzmann's constant
{ return 1.3806503e-23; }
Double_t Kcgs()
{ return 1.0e7 * K(); }
Double_t KUncertainty()
{ return 0.0000024e-23; }
Double_t Sigma()
 Stefan-Boltzmann constant
{ return 5.6704e-8; }
Double_t SigmaUncertainty()
{ return 0.000040e-8; }
Double_t Na()
 Avogadro constant (Avogadro's Number)
{ return 6.02214199e+23; }
Double_t NaUncertainty()
{ return 0.00000047e+23; }
Double_t R()
 universal gas constant (Na * K)
 http://scienceworld.wolfram.com/physics/UniversalGasConstant.html
{ return K() * Na(); }
Double_t RUncertainty()
{ return R()*((KUncertainty()/K()) + (NaUncertainty()/Na())); }
Double_t MWair()
 Molecular weight of dry air
 1976 US Standard Atmosphere,
 also see http://atmos.nmsu.edu/jsdap/encyclopediawork.html
{ return 28.9644; }
Double_t Rgair()
 Dry Air Gas Constant (R / MWair)
 http://atmos.nmsu.edu/education_and_outreach/encyclopedia/gas_constant.htm
{ return (1000.0 * R()) / MWair(); }
Double_t EulerGamma()
 Euler-Mascheroni Constant
{ return 0.577215664901532860606512090082402431042; }
Double_t Qe()
 Elementary charge
{ return 1.602176462e-19; }
Double_t QeUncertainty()
{ return 0.000000063e-19; }
T Min()
T Max()
Bool_t AreEqualAbs(Double_t af, Double_t bf, Double_t epsilon)
 Comparing floating points
return Abs(af-bf)
return kTRUE if absolute difference between af and bf is less than epsilon
Bool_t AreEqualRel(Double_t af, Double_t bf, Double_t relPrec)
return kTRUE if relative difference between af and bf is less than relPrec
return Abs(af-bf)
template <typename T> T MinElement(Long64_t n, const T *a)
Array Algorithms

 Min, Max of an array
template <typename T> T MaxElement(Long64_t n, const T *a)
template <typename T> Long64_t LocMin(Long64_t n, const T *a)
 Locate Min, Max element number in an array
template <typename Iterator> Iterator LocMin(Iterator first, Iterator last)
template <typename T> Long64_t LocMax(Long64_t n, const T *a)
template <typename Iterator> Iterator LocMax(Iterator first, Iterator last)
template <typename T> Long64_t BinarySearch(Long64_t n, const T *array, T value)
 Binary search
template <typename T> Long64_t BinarySearch(Long64_t n, const T **array, T value)
template <typename Iterator, typename Element> Iterator BinarySearch(Iterator first, Iterator last, Element value)
void Sort(Index n, const Element* a, Index* index, Bool_t down=kTRUE)
 Sorting
template <typename T> Bool_t IsInside(T xp, T yp, Int_t np, T *x, T *y)
template <typename T> T * Cross(const T v1[3],const T v2[3], T out[3])
 Calculate the Cross Product of two vectors
template <typename T> inline T NormCross(const T v1[3],const T v2[3],T out[3])
Calculate the Normalized Cross Product of two vectors
template <typename T> T * Normal2Plane(const T v1[3],const T v2[3],const T v3[3], T normal[3])
 Calculate a normal vector of a plane
template <typename T> Double_t Mean(Long64_t n, const T *a, const Double_t *w=0)
Statistics over arrays

Mean, Geometric Mean, Median, RMS(sigma)
template <typename Iterator> Double_t Mean(Iterator first, Iterator last)
template <typename Iterator, typename WeightIterator> Double_t Mean(Iterator first, Iterator last, WeightIterator w)
template <typename T> Double_t GeomMean(Long64_t n, const T *a)
template <typename Iterator> Double_t GeomMean(Iterator first, Iterator last)
template <typename T> Double_t RMS(Long64_t n, const T *a)
template <typename Iterator> Double_t RMS(Iterator first, Iterator last)
template <typename T> Double_t Median(Long64_t n, const T *a, const Double_t *w=0, Long64_t *work=0)
template <class Element, typename Size> Element KOrdStat(Size n, const Element *a, Size k, Size *work = 0)
k-th order statistic
T TMath::Limits<T> Min()
 returns maximum representation for type T
T TMath::Limits<T> Max()
 returns minimum double representation
T MinElement(Long64_t n, const T *a)
 Return minimum of array a of length n.
T MaxElement(Long64_t n, const T *a)
 Return maximum of array a of length n.
Long64_t LocMin(Long64_t n, const T *a)
 Return index of array with the minimum element.
 If more than one element is minimum returns first found.
Iterator LocMin(Iterator first, Iterator last)
 Return index of array with the minimum element.
 If more than one element is minimum returns first found.
Long64_t LocMax(Long64_t n, const T *a)
 Return index of array with the maximum element.
 If more than one element is maximum returns first found.
Iterator LocMax(Iterator first, Iterator last)
 Return index of array with the maximum element.
 If more than one element is maximum returns first found.
Double_t GeomMean(Iterator first, Iterator last)
 Return the geometric mean of an array defined by the iterators.
 geometric_mean = (Prod_i=0,n-1 |a[i]|)^1/n
Double_t GeomMean(Long64_t n, const T *a)
 Return the geometric mean of an array a of size n.
 geometric_mean = (Prod_i=0,n-1 |a[i]|)^1/n
Double_t RMS(Iterator first, Iterator last)
 Return the Standard Deviation of an array defined by the iterators.
 Note that this function returns the sigma(standard deviation) and
 not the root mean square of the array.
Double_t RMS(Long64_t n, const T *a)
 Return the Standard Deviation of an array a with length n.
 Note that this function returns the sigma(standard deviation) and
 not the root mean square of the array.
Iterator BinarySearch(Iterator first, Iterator last, Element value)
 Binary search in an array defined by its iterators.

 The values in the iterators range are supposed to be sorted
 prior to this call.  If match is found, function returns
 position of element.  If no match found, function gives nearest
 element smaller than value.
template <typename T> Long64_t BinarySearch(Long64_t n, const T *array, T value)
 Binary search in an array of n values to locate value.

 Array is supposed  to be sorted prior to this call.
 If match is found, function returns position of element.
 If no match found, function gives nearest element smaller than value.
template <typename T> Long64_t BinarySearch(Long64_t n, const T **array, T value)
 Binary search in an array of n values to locate value.

 Array is supposed  to be sorted prior to this call.
 If match is found, function returns position of element.
 If no match found, function gives nearest element smaller than value.
template <typename Element, typename Index> void Sort(Index n, const Element* a, Index* index, Bool_t down)
 Sort the n elements of the  array a of generic templated type Element.
 In output the array index of type Index contains the indices of the sorted array.
 If down is false sort in increasing order (default is decreasing order).
template <typename T> Double_t Median(Long64_t n, const T *a, const Double_t *w, Long64_t *work)
 Return the median of the array a where each entry i has weight w[i] .
 Both arrays have a length of at least n . The median is a number obtained
 from the sorted array a through

 median = (a[jl]+a[jh])/2.  where (using also the sorted index on the array w)

 sum_i=0,jl w[i] <= sumTot/2
 sum_i=0,jh w[i] >= sumTot/2
 sumTot = sum_i=0,n w[i]

 If w=0, the algorithm defaults to the median definition where it is
 a number that divides the sorted sequence into 2 halves.
 When n is odd or n > 1000, the median is kth element k = (n + 1) / 2.
 when n is even and n < 1000the median is a mean of the elements k = n/2 and k = n/2 + 1.

 If the weights are supplied (w not 0) all weights must be >= 0

 If work is supplied, it is used to store the sorting index and assumed to be
 >= n . If work=0, local storage is used, either on the stack if n < kWorkMax
 or on the heap for n >= kWorkMax .
Sort(n, a, ind, kFALSE)
Element KOrdStat(Size n, const Element *a, Size k, Size *work)
 Returns k_th order statistic of the array a of size n
 (k_th smallest element out of n elements).

 C-convention is used for array indexing, so if you want
 the second smallest element, call KOrdStat(n, a, 1).

 If work is supplied, it is used to store the sorting index and
 assumed to be >= n. If work=0, local storage is used, either on
 the stack if n < kWorkMax or on the heap for n >= kWorkMax.

 Taken from "Numerical Recipes in C++" without the index array
 implemented by Anna Khreshuk.

 See also the declarations at the top of this file