Logo ROOT   6.10/09
Reference Guide
TUnfold.cxx
Go to the documentation of this file.
1 // @(#)root/unfold:$Id$
2 // Author: Stefan Schmitt DESY, 13/10/08
3 
4 /** \class TUnfold
5 \ingroup Unfold
6 
7 An algorithm to unfold distributions from detector to truth level
8 
9 TUnfold is used to decompose a measurement y into several sources x,
10 given the measurement uncertainties and a matrix of migrations A.
11 The method can be applied to a large number of problems,
12 where the measured distribution y is a linear superposition
13 of several Monte Carlo shapes. Beyond such a simple template fit,
14 TUnfold has an adjustable regularisation term and also supports an
15 optional constraint on the total number of events.
16 
17 <b>For most applications, it is better to use the derived class
18 TUnfoldDensity instead of TUnfold</b>. TUnfoldDensity adds various
19 features to TUnfold, such as:
20 background subtraction, propagation of systematic uncertainties,
21 complex multidimensional arrangements of the bins. For innocent
22 users, the most notable improvement of TUnfoldDensity over TUnfold are
23 the getter functions. For TUnfold, histograms have to be booked by the
24 user and the getter functions fill the histogram bins. TUnfoldDensity
25 simply returns a new, already filled histogram.
26 
27 If you use this software, please consider the following citation
28 
29 <b>S.Schmitt, JINST 7 (2012) T10003 [arXiv:1205.6201]</b>
30 
31 Detailed documentation and updates are available on
32 http://www.desy.de/~sschmitt
33 
34 Brief recipe to use TUnfold:
35 
36  - a matrix (truth,reconstructed) is given as a two-dimensional histogram
37  as argument to the constructor of TUnfold
38  - a vector of measurements is given as one-dimensional histogram using
39  the SetInput() method
40  - The unfolding is performed
41 
42  - either once with a fixed parameter tau, method DoUnfold(tau)
43  - or multiple times in a scan to determine the best choice of tau,
44  method ScanLCurve()
45 
46  - Unfolding results are retrieved using various GetXXX() methods
47 
48 Basic formulae:
49 \f[
50 \chi^{2}_{A}=(Ax-y)^{T}V_{yy}^{-1}(Ax-y) \\
51 \chi^{2}_{L}=(x-f*x_{0})^{T}L^{T}L(x-f*x_{0}) \\
52 \chi^{2}_{unf}=\chi^{2}_{A}+\tau^{2}\chi^{2}_{L}+\lambda\Sigma_{i}(Ax-y)_{i}
53 \f]
54 
55  - \f$ x \f$:result,
56  - \f$ A \f$:probabilities,
57  - \f$ y \f$:data,
58  - \f$ V_{yy} \f$:data covariance,
59  - \f$ f \f$:bias scale,
60  - \f$ x_{0} \f$:bias,
61  - \f$ L \f$:regularisation conditions,
62  - \f$ \tau \f$:regularisation strength,
63  - \f$ \lambda \f$:Lagrangian multiplier.
64 
65  Without area constraint, \f$ \lambda \f$ is set to zero, and
66 \f$ \chi^{2}_{unf} \f$ is minimized to determine \f$ x \f$.
67 With area constraint, both \f$ x \f$ and \f$ \lambda \f$ are determined.
68 
69 --------------------------------------------------------------------------------
70 This file is part of TUnfold.
71 
72 TUnfold is free software: you can redistribute it and/or modify
73 it under the terms of the GNU General Public License as published by
74 the Free Software Foundation, either version 3 of the License, or
75 (at your option) any later version.
76 
77 TUnfold is distributed in the hope that it will be useful,
78 but WITHOUT ANY WARRANTY; without even the implied warranty of
79 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
80 GNU General Public License for more details.
81 
82 You should have received a copy of the GNU General Public License
83 along with TUnfold. If not, see <http://www.gnu.org/licenses/>.
84 
85 <b>Version 17.6, updated doxygen-style comments, add one argument for scanLCurve </b>
86 
87 #### History:
88  - Version 17.5, fix memory leak with fVyyInv, bugs in GetInputInverseEmatrix(), GetInput(), bug in MultiplyMSparseMSparseTranspVector
89  - Version 17.4, in parallel to changes in TUnfoldBinning
90  - Version 17.3, in parallel to changes in TUnfoldBinning
91  - Version 17.2, bug fix with GetProbabilityMatrix
92  - Version 17.1, bug fixes in GetFoldedOutput, GetOutput
93  - Version 17.0, option to specify an error matrix with SetInput(), new ScanRho() method
94  - Version 16.2, in parallel to bug-fix in TUnfoldSys
95  - Version 16.1, fix bug with error matrix in case kEConstraintArea is used
96  - Version 16.0, fix calculation of global correlations, improved error messages
97  - Version 15, simplified L-curve scan, new tau definition, new error calc., area preservation
98  - Version 14, with changes in TUnfoldSys.cxx
99  - Version 13, new methods for derived classes and small bug fix
100  - Version 12, report singular matrices
101  - Version 11, reduce the amount of printout
102  - Version 10, more correct definition of the L curve, update references
103  - Version 9, faster matrix inversion and skip edge points for L-curve scan
104  - Version 8, replace all TMatrixSparse matrix operations by private code
105  - Version 7, fix problem with TMatrixDSparse,TMatrixD multiplication
106  - Version 6, replace class XY by std::pair
107  - Version 5, replace run-time dynamic arrays by new and delete[]
108  - Version 4, fix new bug from V3 with initial regularisation condition
109  - Version 3, fix bug with initial regularisation condition
110  - Version 2, with improved ScanLcurve() algorithm
111  - Version 1, added ScanLcurve() method
112  - Version 0, stable version of basic unfolding algorithm
113 */
114 
115 #include <iostream>
116 #include <TMatrixD.h>
117 #include <TMatrixDSparse.h>
118 #include <TMatrixDSym.h>
119 #include <TMatrixDSymEigen.h>
120 #include <TMath.h>
121 #include "TUnfold.h"
122 
123 #include <map>
124 #include <vector>
125 
126 //#define DEBUG
127 //#define DEBUG_DETAIL
128 //#define FORCE_EIGENVALUE_DECOMPOSITION
129 
131 
133 {
134  // delete all data members
135 
136  DeleteMatrix(&fA);
137  DeleteMatrix(&fL);
138  DeleteMatrix(&fVyy);
139  DeleteMatrix(&fY);
140  DeleteMatrix(&fX0);
141  DeleteMatrix(&fVyyInv);
142 
143  ClearResults();
144 }
145 
146 ////////////////////////////////////////////////////////////////////////////////
147 /// Initialize data members, for use in constructors.
148 
150 {
151  // reset all data members
152  fXToHist.Set(0);
153  fHistToX.Set(0);
154  fSumOverY.Set(0);
155  fA = 0;
156  fL = 0;
157  fVyy = 0;
158  fY = 0;
159  fX0 = 0;
160  fTauSquared = 0.0;
161  fBiasScale = 0.0;
164  // output
165  fX = 0;
166  fVyyInv = 0;
167  fVxx = 0;
168  fVxxInv = 0;
169  fAx = 0;
170  fChi2A = 0.0;
171  fLXsquared = 0.0;
172  fRhoMax = 999.0;
173  fRhoAvg = -1.0;
174  fNdf = 0;
175  fDXDAM[0] = 0;
176  fDXDAZ[0] = 0;
177  fDXDAM[1] = 0;
178  fDXDAZ[1] = 0;
179  fDXDtauSquared = 0;
180  fDXDY = 0;
181  fEinv = 0;
182  fE = 0;
183  fEpsMatrix=1.E-13;
184  fIgnoredBins=0;
185 }
186 
187 ////////////////////////////////////////////////////////////////////////////////
188 /// Delete matrix and invalidate pointer.
189 ///
190 /// \param[inout] m pointer to a matrix-pointer
191 ///
192 /// If the matrix pointer os non-zero, the matrix id deleted. The matrix pointer
193 /// is set to zero.
194 
196 {
197  if(*m) delete *m;
198  *m=0;
199 }
200 
201 ////////////////////////////////////////////////////////////////////////////////
202 /// Delete sparse matrix and invalidate pointer
203 ///
204 /// \param[inout] m pointer to a matrix-pointer
205 ///
206 /// if the matrix pointer os non-zero, the matrix id deleted. The matrix pointer
207 /// is set to zero.
208 
210 {
211  if(*m) delete *m;
212  *m=0;
213 }
214 
215 ////////////////////////////////////////////////////////////////////////////////
216 /// Reset all results.
217 
219 {
220  // delete old results (if any)
221  // this function is virtual, so derived classes may implement their own
222  // method to flag results as non-valid
223 
224  // note: the inverse of the input covariance is not cleared
225  // because it does not change until the input is changed
226 
227  DeleteMatrix(&fVxx);
228  DeleteMatrix(&fX);
229  DeleteMatrix(&fAx);
230  for(Int_t i=0;i<2;i++) {
231  DeleteMatrix(fDXDAM+i);
232  DeleteMatrix(fDXDAZ+i);
233  }
237  DeleteMatrix(&fE);
239  fChi2A = 0.0;
240  fLXsquared = 0.0;
241  fRhoMax = 999.0;
242  fRhoAvg = -1.0;
243 }
244 
245 ////////////////////////////////////////////////////////////////////////////////
246 /// Only for use by root streamer or derived classes.
247 
249 {
250  // set all matrix pointers to zero
251  InitTUnfold();
252 }
253 
254 ////////////////////////////////////////////////////////////////////////////////
255 /// Core unfolding algorithm.
256 ///
257 /// Main unfolding algorithm. Declared virtual, because other algorithms
258 /// could be implemented
259 ///
260 /// Purpose: unfold y -> x
261 ///
262 /// - Data members required:
263 /// - fA: matrix to relate x and y
264 /// - fY: measured data points
265 /// - fX0: bias on x
266 /// - fBiasScale: scale factor for fX0
267 /// - fVyy: covariance matrix for y
268 /// - fL: regularisation conditions
269 /// - fTauSquared: regularisation strength
270 /// - fConstraint: whether the constraint is applied
271 /// - Data members modified:
272 /// - fVyyInv: inverse of input data covariance matrix
273 /// - fNdf: number of degrees of freedom
274 /// - fEinv: inverse of the matrix needed for unfolding calculations
275 /// - fE: the matrix needed for unfolding calculations
276 /// - fX: unfolded data points
277 /// - fDXDY: derivative of x wrt y (for error propagation)
278 /// - fVxx: error matrix (covariance matrix) on x
279 /// - fAx: estimate of distribution y from unfolded data
280 /// - fChi2A: contribution to chi**2 from y-Ax
281 /// - fChi2L: contribution to chi**2 from L*(x-x0)
282 /// - fDXDtauSquared: derivative of x wrt tau
283 /// - fDXDAM[0,1]: matrix parts of derivative x wrt A
284 /// - fDXDAZ[0,1]: vector parts of derivative x wrt A
285 /// - fRhoMax: maximum global correlation coefficient
286 /// - fRhoAvg: average global correlation coefficient
287 /// - Return code:
288 /// - fRhoMax if(fRhoMax>=1.0) then the unfolding has failed!
289 
291 {
292  ClearResults();
293 
294  // get pseudo-inverse matrix Vyyinv and NDF
295  if(!fVyyInv) {
298  fNdf--;
299  }
300  }
301  //
302  // get matrix
303  // T
304  // fA fV = mAt_V
305  //
307  //
308  // get
309  // T
310  // fA fVyyinv fY + fTauSquared fBiasScale Lsquared fX0 = rhs
311  //
312  TMatrixDSparse *rhs=MultiplyMSparseM(AtVyyinv,fY);
314  if (fBiasScale != 0.0) {
315  TMatrixDSparse *rhs2=MultiplyMSparseM(lSquared,fX0);
316  AddMSparse(rhs, fTauSquared * fBiasScale ,rhs2);
317  DeleteMatrix(&rhs2);
318  }
319 
320  //
321  // get matrix
322  // T
323  // (fA fV)fA + fTauSquared*fLsquared = fEinv
324  fEinv=MultiplyMSparseMSparse(AtVyyinv,fA);
325  AddMSparse(fEinv,fTauSquared,lSquared);
326 
327  //
328  // get matrix
329  // -1
330  // fEinv = fE
331  //
332  Int_t rank=0;
333  fE = InvertMSparseSymmPos(fEinv,&rank);
334  if(rank != GetNx()) {
335  Warning("DoUnfold","rank of matrix E %d expect %d",rank,GetNx());
336  }
337 
338  //
339  // get result
340  // fE rhs = x
341  //
343  fX = new TMatrixD(*xSparse);
344  DeleteMatrix(&rhs);
345  DeleteMatrix(&xSparse);
346 
347  // additional correction for constraint
348  Double_t lambda_half=0.0;
349  Double_t one_over_epsEeps=0.0;
351  TMatrixDSparse *Eepsilon=0;
353  // calculate epsilon: vector of efficiencies
354  const Int_t *A_rows=fA->GetRowIndexArray();
355  const Int_t *A_cols=fA->GetColIndexArray();
356  const Double_t *A_data=fA->GetMatrixArray();
357  TMatrixD epsilonNosparse(fA->GetNcols(),1);
358  for(Int_t i=0;i<A_rows[fA->GetNrows()];i++) {
359  epsilonNosparse(A_cols[i],0) += A_data[i];
360  }
361  epsilon=new TMatrixDSparse(epsilonNosparse);
362  // calculate vector EE*epsilon
363  Eepsilon=MultiplyMSparseMSparse(fE,epsilon);
364  // calculate scalar product epsilon#*Eepsilon
365  TMatrixDSparse *epsilonEepsilon=MultiplyMSparseTranspMSparse(epsilon,
366  Eepsilon);
367  // if epsilonEepsilon is zero, nothing works...
368  if(epsilonEepsilon->GetRowIndexArray()[1]==1) {
369  one_over_epsEeps=1./epsilonEepsilon->GetMatrixArray()[0];
370  } else {
371  Fatal("Unfold","epsilon#Eepsilon has dimension %d != 1",
372  epsilonEepsilon->GetRowIndexArray()[1]);
373  }
374  DeleteMatrix(&epsilonEepsilon);
375  // calculate sum(Y)
376  Double_t y_minus_epsx=0.0;
377  for(Int_t iy=0;iy<fY->GetNrows();iy++) {
378  y_minus_epsx += (*fY)(iy,0);
379  }
380  // calculate sum(Y)-epsilon#*X
381  for(Int_t ix=0;ix<epsilonNosparse.GetNrows();ix++) {
382  y_minus_epsx -= epsilonNosparse(ix,0) * (*fX)(ix,0);
383  }
384  // calculate lambda_half
385  lambda_half=y_minus_epsx*one_over_epsEeps;
386  // calculate final vector X
387  const Int_t *EEpsilon_rows=Eepsilon->GetRowIndexArray();
388  const Double_t *EEpsilon_data=Eepsilon->GetMatrixArray();
389  for(Int_t ix=0;ix<Eepsilon->GetNrows();ix++) {
390  if(EEpsilon_rows[ix]<EEpsilon_rows[ix+1]) {
391  (*fX)(ix,0) += lambda_half * EEpsilon_data[EEpsilon_rows[ix]];
392  }
393  }
394  }
395  //
396  // get derivative dx/dy
397  // for error propagation
398  // dx/dy = E A# Vyy^-1 ( = B )
399  fDXDY = MultiplyMSparseMSparse(fE,AtVyyinv);
400 
401  // additional correction for constraint
403  // transposed vector of dimension GetNy() all elements equal 1/epseEeps
404  Int_t *rows=new Int_t[GetNy()];
405  Int_t *cols=new Int_t[GetNy()];
406  Double_t *data=new Double_t[GetNy()];
407  for(Int_t i=0;i<GetNy();i++) {
408  rows[i]=0;
409  cols[i]=i;
410  data[i]=one_over_epsEeps;
411  }
413  (1,GetNy(),GetNy(),rows,cols,data);
414  delete[] data;
415  delete[] rows;
416  delete[] cols;
417  // B# * epsilon
418  TMatrixDSparse *epsilonB=MultiplyMSparseTranspMSparse(epsilon,fDXDY);
419  // temp- one_over_epsEeps*Bepsilon
420  AddMSparse(temp, -one_over_epsEeps, epsilonB);
421  DeleteMatrix(&epsilonB);
422  // correction matrix
423  TMatrixDSparse *corr=MultiplyMSparseMSparse(Eepsilon,temp);
424  DeleteMatrix(&temp);
425  // determine new derivative
426  AddMSparse(fDXDY,1.0,corr);
427  DeleteMatrix(&corr);
428  }
429 
430  DeleteMatrix(&AtVyyinv);
431 
432  //
433  // get error matrix on x
434  // fDXDY * Vyy * fDXDY#
435  TMatrixDSparse *fDXDYVyy = MultiplyMSparseMSparse(fDXDY,fVyy);
436  fVxx = MultiplyMSparseMSparseTranspVector(fDXDYVyy,fDXDY,0);
437 
438  DeleteMatrix(&fDXDYVyy);
439 
440  //
441  // get result
442  // fA x = fAx
443  //
445 
446  //
447  // calculate chi**2 etc
448 
449  // chi**2 contribution from (y-Ax)V(y-Ax)
451  TMatrixDSparse *VyyinvDy=MultiplyMSparseM(fVyyInv,&dy);
452 
453  const Int_t *VyyinvDy_rows=VyyinvDy->GetRowIndexArray();
454  const Double_t *VyyinvDy_data=VyyinvDy->GetMatrixArray();
455  fChi2A=0.0;
456  for(Int_t iy=0;iy<VyyinvDy->GetNrows();iy++) {
457  if(VyyinvDy_rows[iy]<VyyinvDy_rows[iy+1]) {
458  fChi2A += VyyinvDy_data[VyyinvDy_rows[iy]]*dy(iy,0);
459  }
460  }
461  TMatrixD dx( fBiasScale * (*fX0), TMatrixD::kMinus,(*fX));
462  TMatrixDSparse *LsquaredDx=MultiplyMSparseM(lSquared,&dx);
463  const Int_t *LsquaredDx_rows=LsquaredDx->GetRowIndexArray();
464  const Double_t *LsquaredDx_data=LsquaredDx->GetMatrixArray();
465  fLXsquared = 0.0;
466  for(Int_t ix=0;ix<LsquaredDx->GetNrows();ix++) {
467  if(LsquaredDx_rows[ix]<LsquaredDx_rows[ix+1]) {
468  fLXsquared += LsquaredDx_data[LsquaredDx_rows[ix]]*dx(ix,0);
469  }
470  }
471 
472  //
473  // get derivative dx/dtau
475 
478  Double_t f=0.0;
479  if(temp->GetRowIndexArray()[1]==1) {
480  f=temp->GetMatrixArray()[0]*one_over_epsEeps;
481  } else if(temp->GetRowIndexArray()[1]>1) {
482  Fatal("Unfold",
483  "epsilon#fDXDtauSquared has dimension %d != 1",
484  temp->GetRowIndexArray()[1]);
485  }
486  if(f!=0.0) {
487  AddMSparse(fDXDtauSquared, -f,Eepsilon);
488  }
489  DeleteMatrix(&temp);
490  }
491  DeleteMatrix(&epsilon);
492 
493  DeleteMatrix(&LsquaredDx);
494  DeleteMatrix(&lSquared);
495 
496  // calculate/store matrices defining the derivatives dx/dA
497  fDXDAM[0]=new TMatrixDSparse(*fE);
498  fDXDAM[1]=new TMatrixDSparse(*fDXDY); // create a copy
499  fDXDAZ[0]=VyyinvDy; // instead of deleting VyyinvDy
500  VyyinvDy=0;
501  fDXDAZ[1]=new TMatrixDSparse(*fX); // create a copy
502 
504  // add correction to fDXDAM[0]
506  (Eepsilon,Eepsilon,0);
507  AddMSparse(fDXDAM[0], -one_over_epsEeps,temp1);
508  DeleteMatrix(&temp1);
509  // add correction to fDXDAZ[0]
510  Int_t *rows=new Int_t[GetNy()];
511  Int_t *cols=new Int_t[GetNy()];
512  Double_t *data=new Double_t[GetNy()];
513  for(Int_t i=0;i<GetNy();i++) {
514  rows[i]=i;
515  cols[i]=0;
516  data[i]=lambda_half;
517  }
519  (GetNy(),1,GetNy(),rows,cols,data);
520  delete[] data;
521  delete[] rows;
522  delete[] cols;
523  AddMSparse(fDXDAZ[0],1.0,temp2);
524  DeleteMatrix(&temp2);
525  }
526 
527  DeleteMatrix(&Eepsilon);
528 
529  rank=0;
531  if(rank != GetNx()) {
532  Warning("DoUnfold","rank of output covariance is %d expect %d",
533  rank,GetNx());
534  }
535 
536  TVectorD VxxInvDiag(fVxxInv->GetNrows());
537  const Int_t *VxxInv_rows=fVxxInv->GetRowIndexArray();
538  const Int_t *VxxInv_cols=fVxxInv->GetColIndexArray();
539  const Double_t *VxxInv_data=fVxxInv->GetMatrixArray();
540  for (int ix = 0; ix < fVxxInv->GetNrows(); ix++) {
541  for(int ik=VxxInv_rows[ix];ik<VxxInv_rows[ix+1];ik++) {
542  if(ix==VxxInv_cols[ik]) {
543  VxxInvDiag(ix)=VxxInv_data[ik];
544  }
545  }
546  }
547 
548  // maximum global correlation coefficient
549  const Int_t *Vxx_rows=fVxx->GetRowIndexArray();
550  const Int_t *Vxx_cols=fVxx->GetColIndexArray();
551  const Double_t *Vxx_data=fVxx->GetMatrixArray();
552 
553  Double_t rho_squared_max = 0.0;
554  Double_t rho_sum = 0.0;
555  Int_t n_rho=0;
556  for (int ix = 0; ix < fVxx->GetNrows(); ix++) {
557  for(int ik=Vxx_rows[ix];ik<Vxx_rows[ix+1];ik++) {
558  if(ix==Vxx_cols[ik]) {
559  Double_t rho_squared =
560  1. - 1. / (VxxInvDiag(ix) * Vxx_data[ik]);
561  if (rho_squared > rho_squared_max)
562  rho_squared_max = rho_squared;
563  if(rho_squared>0.0) {
564  rho_sum += TMath::Sqrt(rho_squared);
565  n_rho++;
566  }
567  break;
568  }
569  }
570  }
571  fRhoMax = TMath::Sqrt(rho_squared_max);
572  fRhoAvg = (n_rho>0) ? (rho_sum/n_rho) : -1.0;
573 
574  return fRhoMax;
575 }
576 
577 ////////////////////////////////////////////////////////////////////////////////
578 /// Create a sparse matrix, given the nonzero elements.
579 ///
580 /// \param[in] nrow number of rows
581 /// \param[in] ncol number of columns
582 /// \param[in] nel number of non-zero elements
583 /// \param[in] row row indexes of non-zero elements
584 /// \param[in] col column indexes of non-zero elements
585 /// \param[in] data non-zero elements data
586 ///
587 /// return pointer to a new sparse matrix
588 ///
589 /// shortcut to new TMatrixDSparse() followed by SetMatrixArray().
590 
592 (Int_t nrow,Int_t ncol,Int_t nel,Int_t *row,Int_t *col,Double_t *data) const
593 {
594  // create a sparse matri
595  // nrow,ncol : dimension of the matrix
596  // nel: number of non-zero elements
597  // row[nel],col[nel],data[nel] : indices and data of the non-zero elements
598  TMatrixDSparse *A=new TMatrixDSparse(nrow,ncol);
599  if(nel>0) {
600  A->SetMatrixArray(nel,row,col,data);
601  }
602  return A;
603 }
604 
605 ////////////////////////////////////////////////////////////////////////////////
606 /// Multiply two sparse matrices.
607 ///
608 /// \param[in] a sparse matrix
609 /// \param[in] b sparse matrix
610 ///
611 /// returns a new sparse matrix a*b.
612 ///
613 /// A replacement for:
614 /// new TMatrixDSparse(a,TMatrixDSparse::kMult,b)
615 /// the root implementation had problems in older versions of root.
616 
618  const TMatrixDSparse *b) const
619 {
620  if(a->GetNcols()!=b->GetNrows()) {
621  Fatal("MultiplyMSparseMSparse",
622  "inconsistent matrix col/ matrix row %d !=%d",
623  a->GetNcols(),b->GetNrows());
624  }
625 
627  const Int_t *a_rows=a->GetRowIndexArray();
628  const Int_t *a_cols=a->GetColIndexArray();
629  const Double_t *a_data=a->GetMatrixArray();
630  const Int_t *b_rows=b->GetRowIndexArray();
631  const Int_t *b_cols=b->GetColIndexArray();
632  const Double_t *b_data=b->GetMatrixArray();
633  // maximum size of the output matrix
634  int nMax=0;
635  for (Int_t irow = 0; irow < a->GetNrows(); irow++) {
636  if(a_rows[irow+1]>a_rows[irow]) nMax += b->GetNcols();
637  }
638  if((nMax>0)&&(a_cols)&&(b_cols)) {
639  Int_t *r_rows=new Int_t[nMax];
640  Int_t *r_cols=new Int_t[nMax];
641  Double_t *r_data=new Double_t[nMax];
642  Double_t *row_data=new Double_t[b->GetNcols()];
643  Int_t n=0;
644  for (Int_t irow = 0; irow < a->GetNrows(); irow++) {
645  if(a_rows[irow+1]<=a_rows[irow]) continue;
646  // clear row data
647  for(Int_t icol=0;icol<b->GetNcols();icol++) {
648  row_data[icol]=0.0;
649  }
650  // loop over a-columns in this a-row
651  for(Int_t ia=a_rows[irow];ia<a_rows[irow+1];ia++) {
652  Int_t k=a_cols[ia];
653  // loop over b-columns in b-row k
654  for(Int_t ib=b_rows[k];ib<b_rows[k+1];ib++) {
655  row_data[b_cols[ib]] += a_data[ia]*b_data[ib];
656  }
657  }
658  // store nonzero elements
659  for(Int_t icol=0;icol<b->GetNcols();icol++) {
660  if(row_data[icol] != 0.0) {
661  r_rows[n]=irow;
662  r_cols[n]=icol;
663  r_data[n]=row_data[icol];
664  n++;
665  }
666  }
667  }
668  if(n>0) {
669  r->SetMatrixArray(n,r_rows,r_cols,r_data);
670  }
671  delete[] r_rows;
672  delete[] r_cols;
673  delete[] r_data;
674  delete[] row_data;
675  }
676 
677  return r;
678 }
679 
680 ////////////////////////////////////////////////////////////////////////////////
681 /// Multiply a transposed Sparse matrix with another sparse matrix,
682 ///
683 /// \param[in] a sparse matrix (to be transposed)
684 /// \param[in] b sparse matrix
685 ///
686 /// returns a new sparse matrix a^{T}*b
687 ///
688 /// this is a replacement for the root constructors
689 /// new TMatrixDSparse(TMatrixDSparse(TMatrixDSparse::kTransposed,*a),
690 /// TMatrixDSparse::kMult,*b)
691 
693 (const TMatrixDSparse *a,const TMatrixDSparse *b) const
694 {
695  if(a->GetNrows() != b->GetNrows()) {
696  Fatal("MultiplyMSparseTranspMSparse",
697  "inconsistent matrix row numbers %d!=%d",
698  a->GetNrows(),b->GetNrows());
699  }
700 
702  const Int_t *a_rows=a->GetRowIndexArray();
703  const Int_t *a_cols=a->GetColIndexArray();
704  const Double_t *a_data=a->GetMatrixArray();
705  const Int_t *b_rows=b->GetRowIndexArray();
706  const Int_t *b_cols=b->GetColIndexArray();
707  const Double_t *b_data=b->GetMatrixArray();
708  // maximum size of the output matrix
709 
710  // matrix multiplication
711  typedef std::map<Int_t,Double_t> MMatrixRow_t;
712  typedef std::map<Int_t, MMatrixRow_t > MMatrix_t;
713  MMatrix_t matrix;
714 
715  for(Int_t iRowAB=0;iRowAB<a->GetNrows();iRowAB++) {
716  for(Int_t ia=a_rows[iRowAB];ia<a_rows[iRowAB+1];ia++) {
717  for(Int_t ib=b_rows[iRowAB];ib<b_rows[iRowAB+1];ib++) {
718  // this creates a new row if necessary
719  MMatrixRow_t &row=matrix[a_cols[ia]];
720  MMatrixRow_t::iterator icol=row.find(b_cols[ib]);
721  if(icol!=row.end()) {
722  // update existing row
723  (*icol).second += a_data[ia]*b_data[ib];
724  } else {
725  // create new row
726  row[b_cols[ib]] = a_data[ia]*b_data[ib];
727  }
728  }
729  }
730  }
731 
732  Int_t n=0;
733  for(MMatrix_t::const_iterator irow=matrix.begin();
734  irow!=matrix.end();irow++) {
735  n += (*irow).second.size();
736  }
737  if(n>0) {
738  // pack matrix into arrays
739  Int_t *r_rows=new Int_t[n];
740  Int_t *r_cols=new Int_t[n];
741  Double_t *r_data=new Double_t[n];
742  n=0;
743  for(MMatrix_t::const_iterator irow=matrix.begin();
744  irow!=matrix.end();irow++) {
745  for(MMatrixRow_t::const_iterator icol=(*irow).second.begin();
746  icol!=(*irow).second.end();icol++) {
747  r_rows[n]=(*irow).first;
748  r_cols[n]=(*icol).first;
749  r_data[n]=(*icol).second;
750  n++;
751  }
752  }
753  // pack arrays into TMatrixDSparse
754  if(n>0) {
755  r->SetMatrixArray(n,r_rows,r_cols,r_data);
756  }
757  delete[] r_rows;
758  delete[] r_cols;
759  delete[] r_data;
760  }
761 
762  return r;
763 }
764 
765 ////////////////////////////////////////////////////////////////////////////////
766 /// Multiply sparse matrix and a non-sparse matrix.
767 ///
768 /// \param[in] a sparse matrix
769 /// \param[in] b matrix
770 ///
771 /// returns a new sparse matrix a*b.
772 /// A replacement for:
773 /// new TMatrixDSparse(a,TMatrixDSparse::kMult,b)
774 /// the root implementation had problems in older versions of root.
775 
777  const TMatrixD *b) const
778 {
779  if(a->GetNcols()!=b->GetNrows()) {
780  Fatal("MultiplyMSparseM","inconsistent matrix col /matrix row %d!=%d",
781  a->GetNcols(),b->GetNrows());
782  }
783 
785  const Int_t *a_rows=a->GetRowIndexArray();
786  const Int_t *a_cols=a->GetColIndexArray();
787  const Double_t *a_data=a->GetMatrixArray();
788  // maximum size of the output matrix
789  int nMax=0;
790  for (Int_t irow = 0; irow < a->GetNrows(); irow++) {
791  if(a_rows[irow+1]-a_rows[irow]>0) nMax += b->GetNcols();
792  }
793  if(nMax>0) {
794  Int_t *r_rows=new Int_t[nMax];
795  Int_t *r_cols=new Int_t[nMax];
796  Double_t *r_data=new Double_t[nMax];
797 
798  Int_t n=0;
799  // fill matrix r
800  for (Int_t irow = 0; irow < a->GetNrows(); irow++) {
801  if(a_rows[irow+1]-a_rows[irow]<=0) continue;
802  for(Int_t icol=0;icol<b->GetNcols();icol++) {
803  r_rows[n]=irow;
804  r_cols[n]=icol;
805  r_data[n]=0.0;
806  for(Int_t i=a_rows[irow];i<a_rows[irow+1];i++) {
807  Int_t j=a_cols[i];
808  r_data[n] += a_data[i]*(*b)(j,icol);
809  }
810  if(r_data[n]!=0.0) n++;
811  }
812  }
813  if(n>0) {
814  r->SetMatrixArray(n,r_rows,r_cols,r_data);
815  }
816  delete[] r_rows;
817  delete[] r_cols;
818  delete[] r_data;
819  }
820  return r;
821 }
822 
823 
824 ////////////////////////////////////////////////////////////////////////////////
825 /// Calculate a sparse matrix product\f$ M1*V*M2^{T} \f$ where the diagonal matrix V is
826 /// given by a vector.
827 ///
828 /// \param[in] m1 pointer to sparse matrix with dimension I*K
829 /// \param[in] m2 pointer to sparse matrix with dimension J*K
830 /// \param[in] v pointer to vector (matrix) with dimension K*1
831 ///
832 /// returns a sparse matrix R with elements
833 /// \f$ r_{ij}=\Sigma_{k}M1_{ik}V_{k}M2_{jk} \f$
834 
836 (const TMatrixDSparse *m1,const TMatrixDSparse *m2,
837  const TMatrixTBase<Double_t> *v) const
838 {
839  if((m1->GetNcols() != m2->GetNcols())||
840  (v && ((m1->GetNcols()!=v->GetNrows())||(v->GetNcols()!=1)))) {
841  if(v) {
842  Fatal("MultiplyMSparseMSparseTranspVector",
843  "matrix cols/vector rows %d!=%d!=%d or vector rows %d!=1\n",
844  m1->GetNcols(),m2->GetNcols(),v->GetNrows(),v->GetNcols());
845  } else {
846  Fatal("MultiplyMSparseMSparseTranspVector",
847  "matrix cols %d!=%d\n",m1->GetNcols(),m2->GetNcols());
848  }
849  }
850  const Int_t *rows_m1=m1->GetRowIndexArray();
851  const Int_t *cols_m1=m1->GetColIndexArray();
852  const Double_t *data_m1=m1->GetMatrixArray();
853  Int_t num_m1=0;
854  for(Int_t i=0;i<m1->GetNrows();i++) {
855  if(rows_m1[i]<rows_m1[i+1]) num_m1++;
856  }
857  const Int_t *rows_m2=m2->GetRowIndexArray();
858  const Int_t *cols_m2=m2->GetColIndexArray();
859  const Double_t *data_m2=m2->GetMatrixArray();
860  Int_t num_m2=0;
861  for(Int_t j=0;j<m2->GetNrows();j++) {
862  if(rows_m2[j]<rows_m2[j+1]) num_m2++;
863  }
864  const TMatrixDSparse *v_sparse=dynamic_cast<const TMatrixDSparse *>(v);
865  const Int_t *v_rows=0;
866  const Double_t *v_data=0;
867  if(v_sparse) {
868  v_rows=v_sparse->GetRowIndexArray();
869  v_data=v_sparse->GetMatrixArray();
870  }
871  Int_t num_r=num_m1*num_m2+1;
872  Int_t *row_r=new Int_t[num_r];
873  Int_t *col_r=new Int_t[num_r];
874  Double_t *data_r=new Double_t[num_r];
875  num_r=0;
876  for(Int_t i=0;i<m1->GetNrows();i++) {
877  for(Int_t j=0;j<m2->GetNrows();j++) {
878  data_r[num_r]=0.0;
879  Int_t index_m1=rows_m1[i];
880  Int_t index_m2=rows_m2[j];
881  while((index_m1<rows_m1[i+1])&&(index_m2<rows_m2[j+1])) {
882  Int_t k1=cols_m1[index_m1];
883  Int_t k2=cols_m2[index_m2];
884  if(k1<k2) {
885  index_m1++;
886  } else if(k1>k2) {
887  index_m2++;
888  } else {
889  if(v_sparse) {
890  Int_t v_index=v_rows[k1];
891  if(v_index<v_rows[k1+1]) {
892  data_r[num_r] += data_m1[index_m1] * data_m2[index_m2]
893  * v_data[v_index];
894  }
895  } else if(v) {
896  data_r[num_r] += data_m1[index_m1] * data_m2[index_m2]
897  * (*v)(k1,0);
898  } else {
899  data_r[num_r] += data_m1[index_m1] * data_m2[index_m2];
900  }
901  index_m1++;
902  index_m2++;
903  }
904  }
905  if(data_r[num_r] !=0.0) {
906  row_r[num_r]=i;
907  col_r[num_r]=j;
908  num_r++;
909  }
910  }
911  }
913  num_r,row_r,col_r,data_r);
914  delete[] row_r;
915  delete[] col_r;
916  delete[] data_r;
917  return r;
918 }
919 
920 ////////////////////////////////////////////////////////////////////////////////
921 /// Add a sparse matrix, scaled by a factor, to another scaled matrix.
922 ///
923 /// \param[inout] dest destination matrix
924 /// \param[in] f scaling factor
925 /// \param[in] src matrix to be added to dest
926 ///
927 /// a replacement for
928 /// ~~~
929 /// (*dest) += f * (*src)
930 /// ~~~
931 /// which suffered from a bug in old root versions.
932 
934  const TMatrixDSparse *src) const
935 {
936  const Int_t *dest_rows=dest->GetRowIndexArray();
937  const Int_t *dest_cols=dest->GetColIndexArray();
938  const Double_t *dest_data=dest->GetMatrixArray();
939  const Int_t *src_rows=src->GetRowIndexArray();
940  const Int_t *src_cols=src->GetColIndexArray();
941  const Double_t *src_data=src->GetMatrixArray();
942 
943  if((dest->GetNrows()!=src->GetNrows())||
944  (dest->GetNcols()!=src->GetNcols())) {
945  Fatal("AddMSparse","inconsistent matrix rows %d!=%d OR cols %d!=%d",
946  src->GetNrows(),dest->GetNrows(),src->GetNcols(),dest->GetNcols());
947  }
948  Int_t nmax=dest->GetNrows()*dest->GetNcols();
949  Double_t *result_data=new Double_t[nmax];
950  Int_t *result_rows=new Int_t[nmax];
951  Int_t *result_cols=new Int_t[nmax];
952  Int_t n=0;
953  for(Int_t row=0;row<dest->GetNrows();row++) {
954  Int_t i_dest=dest_rows[row];
955  Int_t i_src=src_rows[row];
956  while((i_dest<dest_rows[row+1])||(i_src<src_rows[row+1])) {
957  Int_t col_dest=(i_dest<dest_rows[row+1]) ?
958  dest_cols[i_dest] : dest->GetNcols();
959  Int_t col_src =(i_src <src_rows[row+1] ) ?
960  src_cols [i_src] : src->GetNcols();
961  result_rows[n]=row;
962  if(col_dest<col_src) {
963  result_cols[n]=col_dest;
964  result_data[n]=dest_data[i_dest++];
965  } else if(col_dest>col_src) {
966  result_cols[n]=col_src;
967  result_data[n]=f*src_data[i_src++];
968  } else {
969  result_cols[n]=col_dest;
970  result_data[n]=dest_data[i_dest++]+f*src_data[i_src++];
971  }
972  if(result_data[n] !=0.0) {
973  if(!TMath::Finite(result_data[n])) {
974  Fatal("AddMSparse","Nan detected %d %d %d",
975  row,i_dest,i_src);
976  }
977  n++;
978  }
979  }
980  }
981  if(n<=0) {
982  n=1;
983  result_rows[0]=0;
984  result_cols[0]=0;
985  result_data[0]=0.0;
986  }
987  dest->SetMatrixArray(n,result_rows,result_cols,result_data);
988  delete[] result_data;
989  delete[] result_rows;
990  delete[] result_cols;
991 }
992 
993 ////////////////////////////////////////////////////////////////////////////////
994 /// Get the inverse or pseudo-inverse of a positive, sparse matrix.
995 ///
996 /// \param[in] A the sparse matrix to be inverted, has to be positive
997 /// \param[inout] rankPtr if zero, suppress calculation of pseudo-inverse
998 /// otherwise the rank of the matrix is returned in *rankPtr
999 ///
1000 /// return value: 0 or a new sparse matrix
1001 ///
1002 /// - if(rankPtr==0) return the inverse if it exists, or return 0
1003 /// - else return a (pseudo-)inverse and store the rank of the matrix in
1004 /// *rankPtr
1005 ///
1006 ///
1007 /// the matrix inversion is optimized in performance for the case
1008 /// where a large submatrix of A is diagonal
1009 
1011 (const TMatrixDSparse *A,Int_t *rankPtr) const
1012 {
1013 
1014  if(A->GetNcols()!=A->GetNrows()) {
1015  Fatal("InvertMSparseSymmPos","inconsistent matrix row/col %d!=%d",
1016  A->GetNcols(),A->GetNrows());
1017  }
1018 
1019  Bool_t *isZero=new Bool_t[A->GetNrows()];
1020  const Int_t *a_rows=A->GetRowIndexArray();
1021  const Int_t *a_cols=A->GetColIndexArray();
1022  const Double_t *a_data=A->GetMatrixArray();
1023 
1024  // determine diagonal elements
1025  // Aii: diagonals of A
1026  Int_t iDiagonal=0;
1027  Int_t iBlock=A->GetNrows();
1028  Bool_t isDiagonal=kTRUE;
1029  TVectorD aII(A->GetNrows());
1030  Int_t nError=0;
1031  for(Int_t iA=0;iA<A->GetNrows();iA++) {
1032  for(Int_t indexA=a_rows[iA];indexA<a_rows[iA+1];indexA++) {
1033  Int_t jA=a_cols[indexA];
1034  if(iA==jA) {
1035  if(!(a_data[indexA]>=0.0)) nError++;
1036  aII(iA)=a_data[indexA];
1037  }
1038  }
1039  }
1040  if(nError>0) {
1041  Fatal("InvertMSparseSymmPos",
1042  "Matrix has %d negative elements on the diagonal", nError);
1043  delete[] isZero;
1044  return 0;
1045  }
1046 
1047  // reorder matrix such that the largest block of zeros is swapped
1048  // to the lowest indices
1049  // the result of this ordering:
1050  // swap[i] : for index i point to the row in A
1051  // swapBack[iA] : for index iA in A point to the swapped index i
1052  // the indices are grouped into three groups
1053  // 0..iDiagonal-1 : these rows only have diagonal elements
1054  // iDiagonal..iBlock : these rows contain a diagonal block matrix
1055  //
1056  Int_t *swap=new Int_t[A->GetNrows()];
1057  for(Int_t j=0;j<A->GetNrows();j++) swap[j]=j;
1058  for(Int_t iStart=0;iStart<iBlock;iStart++) {
1059  Int_t nZero=0;
1060  for(Int_t i=iStart;i<iBlock;i++) {
1061  Int_t iA=swap[i];
1062  Int_t n=A->GetNrows()-(a_rows[iA+1]-a_rows[iA]);
1063  if(n>nZero) {
1064  Int_t tmp=swap[iStart];
1065  swap[iStart]=swap[i];
1066  swap[i]=tmp;
1067  nZero=n;
1068  }
1069  }
1070  for(Int_t i=0;i<A->GetNrows();i++) isZero[i]=kTRUE;
1071  Int_t iA=swap[iStart];
1072  for(Int_t indexA=a_rows[iA];indexA<a_rows[iA+1];indexA++) {
1073  Int_t jA=a_cols[indexA];
1074  isZero[jA]=kFALSE;
1075  if(iA!=jA) {
1076  isDiagonal=kFALSE;
1077  }
1078  }
1079  if(isDiagonal) {
1080  iDiagonal=iStart+1;
1081  } else {
1082  for(Int_t i=iStart+1;i<iBlock;) {
1083  if(isZero[swap[i]]) {
1084  i++;
1085  } else {
1086  iBlock--;
1087  Int_t tmp=swap[iBlock];
1088  swap[iBlock]=swap[i];
1089  swap[i]=tmp;
1090  }
1091  }
1092  }
1093  }
1094 
1095  // for tests uncomment this:
1096  // iBlock=iDiagonal;
1097 
1098 
1099  // conditioning of the iBlock part
1100 #ifdef CONDITION_BLOCK_PART
1101  Int_t nn=A->GetNrows()-iBlock;
1102  for(int inc=(nn+1)/2;inc;inc /= 2) {
1103  for(int i=inc;i<nn;i++) {
1104  int itmp=swap[i+iBlock];
1105  int j;
1106  for(j=i;(j>=inc)&&(aII(swap[j-inc+iBlock])<aII(itmp));j -=inc) {
1107  swap[j+iBlock]=swap[j-inc+iBlock];
1108  }
1109  swap[j+iBlock]=itmp;
1110  }
1111  }
1112 #endif
1113  // construct array for swapping back
1114  Int_t *swapBack=new Int_t[A->GetNrows()];
1115  for(Int_t i=0;i<A->GetNrows();i++) {
1116  swapBack[swap[i]]=i;
1117  }
1118 #ifdef DEBUG_DETAIL
1119  for(Int_t i=0;i<A->GetNrows();i++) {
1120  std::cout<<" "<<i<<" "<<swap[i]<<" "<<swapBack[i]<<"\n";
1121  }
1122  std::cout<<"after sorting\n";
1123  for(Int_t i=0;i<A->GetNrows();i++) {
1124  if(i==iDiagonal) std::cout<<"iDiagonal="<<i<<"\n";
1125  if(i==iBlock) std::cout<<"iBlock="<<i<<"\n";
1126  std::cout<<" "<<swap[i]<<" "<<aII(swap[i])<<"\n";
1127  }
1128  {
1129  // sanity check:
1130  // (1) sub-matrix swapped[0]..swapped[iDiagonal]
1131  // must not contain off-diagonals
1132  // (2) sub-matrix swapped[0]..swapped[iBlock-1] must be diagonal
1133  Int_t nDiag=0;
1134  Int_t nError1=0;
1135  Int_t nError2=0;
1136  Int_t nBlock=0;
1137  Int_t nNonzero=0;
1138  for(Int_t i=0;i<A->GetNrows();i++) {
1139  Int_t row=swap[i];
1140  for(Int_t indexA=a_rows[row];indexA<a_rows[row+1];indexA++) {
1141  Int_t j=swapBack[a_cols[indexA]];
1142  if(i==j) nDiag++;
1143  else if((i<iDiagonal)||(j<iDiagonal)) nError1++;
1144  else if((i<iBlock)&&(j<iBlock)) nError2++;
1145  else if((i<iBlock)||(j<iBlock)) nBlock++;
1146  else nNonzero++;
1147  }
1148  }
1149  if(nError1+nError2>0) {
1150  Fatal("InvertMSparseSymmPos","sparse matrix analysis failed %d %d %d %d %d",
1151  iDiagonal,iBlock,A->GetNrows(),nError1,nError2);
1152  }
1153  }
1154 #endif
1155 #ifdef DEBUG
1156  Info("InvertMSparseSymmPos","iDiagonal=%d iBlock=%d nRow=%d",
1157  iDiagonal,iBlock,A->GetNrows());
1158 #endif
1159 
1160  //=============================================
1161  // matrix inversion starts here
1162  //
1163  // the matrix is split into several parts
1164  // D1 0 0
1165  // A = ( 0 D2 B# )
1166  // 0 B C
1167  //
1168  // D1,D2 are diagonal matrices
1169  //
1170  // first, the D1 part is inverted by calculating 1/D1
1171  // dim(D1)= iDiagonal
1172  // next, the other parts are inverted using Schur complement
1173  //
1174  // 1/D1 0 0
1175  // Ainv = ( 0 E G# )
1176  // 0 G F^-1
1177  //
1178  // where F = C + (-B/D2) B#
1179  // G = (F^-1) (-B/D2)
1180  // E = 1/D2 + (-B#/D2) G)
1181  //
1182  // the inverse of F is calculated using a Cholesky decomposition
1183  //
1184  // Error handling:
1185  // (a) if rankPtr==0: user requests inverse
1186  //
1187  // if D1 is not strictly positive, return NULL
1188  // if D2 is not strictly positive, return NULL
1189  // if F is not strictly positive (Cholesky decomposition failed)
1190  // return NULL
1191  // else
1192  // return Ainv as defined above
1193  //
1194  // (b) if rankPtr !=0: user requests pseudo-inverse
1195  // if D2 is not strictly positive or F is not strictly positive
1196  // calculate singular-value decomposition of
1197  // D2 B#
1198  // ( ) = O D O^-1
1199  // B C
1200  // if some eigenvalues are negative, return NULL
1201  // else calculate pseudo-inverse
1202  // *rankPtr = rank(D1)+rank(D)
1203  // else
1204  // calculate pseudo-inverse of D1 (D1_ii==0) ? 0 : 1/D1_ii
1205  // *rankPtr=rank(D1)+nrow(D2)+nrow(C)
1206  // return Ainv as defined above
1207 
1208  Double_t *rEl_data=new Double_t[A->GetNrows()*A->GetNrows()];
1209  Int_t *rEl_col=new Int_t[A->GetNrows()*A->GetNrows()];
1210  Int_t *rEl_row=new Int_t[A->GetNrows()*A->GetNrows()];
1211  Int_t rNumEl=0;
1212 
1213  //====================================================
1214  // invert D1
1215  Int_t rankD1=0;
1216  for(Int_t i=0;i<iDiagonal;++i) {
1217  Int_t iA=swap[i];
1218  if(aII(iA)>0.0) {
1219  rEl_col[rNumEl]=iA;
1220  rEl_row[rNumEl]=iA;
1221  rEl_data[rNumEl]=1./aII(iA);
1222  ++rankD1;
1223  ++rNumEl;
1224  }
1225  }
1226  if((!rankPtr)&&(rankD1!=iDiagonal)) {
1227  Fatal("InvertMSparseSymmPos",
1228  "diagonal part 1 has rank %d != %d, matrix can not be inverted",
1229  rankD1,iDiagonal);
1230  rNumEl=-1;
1231  }
1232 
1233 
1234  //====================================================
1235  // invert D2
1236  Int_t nD2=iBlock-iDiagonal;
1237  TMatrixDSparse *D2inv=0;
1238  if((rNumEl>=0)&&(nD2>0)) {
1239  Double_t *D2inv_data=new Double_t[nD2];
1240  Int_t *D2inv_col=new Int_t[nD2];
1241  Int_t *D2inv_row=new Int_t[nD2];
1242  Int_t D2invNumEl=0;
1243  for(Int_t i=0;i<nD2;++i) {
1244  Int_t iA=swap[i+iDiagonal];
1245  if(aII(iA)>0.0) {
1246  D2inv_col[D2invNumEl]=i;
1247  D2inv_row[D2invNumEl]=i;
1248  D2inv_data[D2invNumEl]=1./aII(iA);
1249  ++D2invNumEl;
1250  }
1251  }
1252  if(D2invNumEl==nD2) {
1253  D2inv=CreateSparseMatrix(nD2,nD2,D2invNumEl,D2inv_row,D2inv_col,
1254  D2inv_data);
1255  } else if(!rankPtr) {
1256  Fatal("InvertMSparseSymmPos",
1257  "diagonal part 2 has rank %d != %d, matrix can not be inverted",
1258  D2invNumEl,nD2);
1259  rNumEl=-2;
1260  }
1261  delete [] D2inv_data;
1262  delete [] D2inv_col;
1263  delete [] D2inv_row;
1264  }
1265 
1266  //====================================================
1267  // invert F
1268 
1269  Int_t nF=A->GetNrows()-iBlock;
1270  TMatrixDSparse *Finv=0;
1271  TMatrixDSparse *B=0;
1272  TMatrixDSparse *minusBD2inv=0;
1273  if((rNumEl>=0)&&(nF>0)&&((nD2==0)||D2inv)) {
1274  // construct matrices F and B
1275  Int_t nFmax=nF*nF;
1276  Double_t epsilonF2=fEpsMatrix;
1277  Double_t *F_data=new Double_t[nFmax];
1278  Int_t *F_col=new Int_t[nFmax];
1279  Int_t *F_row=new Int_t[nFmax];
1280  Int_t FnumEl=0;
1281 
1282  Int_t nBmax=nF*(nD2+1);
1283  Double_t *B_data=new Double_t[nBmax];
1284  Int_t *B_col=new Int_t[nBmax];
1285  Int_t *B_row=new Int_t[nBmax];
1286  Int_t BnumEl=0;
1287 
1288  for(Int_t i=0;i<nF;i++) {
1289  Int_t iA=swap[i+iBlock];
1290  for(Int_t indexA=a_rows[iA];indexA<a_rows[iA+1];indexA++) {
1291  Int_t jA=a_cols[indexA];
1292  Int_t j0=swapBack[jA];
1293  if(j0>=iBlock) {
1294  Int_t j=j0-iBlock;
1295  F_row[FnumEl]=i;
1296  F_col[FnumEl]=j;
1297  F_data[FnumEl]=a_data[indexA];
1298  FnumEl++;
1299  } else if(j0>=iDiagonal) {
1300  Int_t j=j0-iDiagonal;
1301  B_row[BnumEl]=i;
1302  B_col[BnumEl]=j;
1303  B_data[BnumEl]=a_data[indexA];
1304  BnumEl++;
1305  }
1306  }
1307  }
1308  TMatrixDSparse *F=0;
1309  if(FnumEl) {
1310 #ifndef FORCE_EIGENVALUE_DECOMPOSITION
1311  F=CreateSparseMatrix(nF,nF,FnumEl,F_row,F_col,F_data);
1312 #endif
1313  }
1314  delete [] F_data;
1315  delete [] F_col;
1316  delete [] F_row;
1317  if(BnumEl) {
1318  B=CreateSparseMatrix(nF,nD2,BnumEl,B_row,B_col,B_data);
1319  }
1320  delete [] B_data;
1321  delete [] B_col;
1322  delete [] B_row;
1323 
1324  if(B && D2inv) {
1325  minusBD2inv=MultiplyMSparseMSparse(B,D2inv);
1326  if(minusBD2inv) {
1327  Int_t mbd2_nMax=minusBD2inv->GetRowIndexArray()
1328  [minusBD2inv->GetNrows()];
1329  Double_t *mbd2_data=minusBD2inv->GetMatrixArray();
1330  for(Int_t i=0;i<mbd2_nMax;i++) {
1331  mbd2_data[i] = - mbd2_data[i];
1332  }
1333  }
1334  }
1335  if(minusBD2inv && F) {
1336  TMatrixDSparse *minusBD2invBt=
1337  MultiplyMSparseMSparseTranspVector(minusBD2inv,B,0);
1338  AddMSparse(F,1.,minusBD2invBt);
1339  DeleteMatrix(&minusBD2invBt);
1340  }
1341 
1342  if(F) {
1343  // cholesky decomposition with preconditioning
1344  const Int_t *f_rows=F->GetRowIndexArray();
1345  const Int_t *f_cols=F->GetColIndexArray();
1346  const Double_t *f_data=F->GetMatrixArray();
1347  // cholesky-type decomposition of F
1348  TMatrixD c(nF,nF);
1349  Int_t nErrorF=0;
1350  for(Int_t i=0;i<nF;i++) {
1351  for(Int_t indexF=f_rows[i];indexF<f_rows[i+1];indexF++) {
1352  if(f_cols[indexF]>=i) c(f_cols[indexF],i)=f_data[indexF];
1353  }
1354  // calculate diagonal element
1355  Double_t c_ii=c(i,i);
1356  for(Int_t j=0;j<i;j++) {
1357  Double_t c_ij=c(i,j);
1358  c_ii -= c_ij*c_ij;
1359  }
1360  if(c_ii<=0.0) {
1361  nErrorF++;
1362  break;
1363  }
1364  c_ii=TMath::Sqrt(c_ii);
1365  c(i,i)=c_ii;
1366  // off-diagonal elements
1367  for(Int_t j=i+1;j<nF;j++) {
1368  Double_t c_ji=c(j,i);
1369  for(Int_t k=0;k<i;k++) {
1370  c_ji -= c(i,k)*c(j,k);
1371  }
1372  c(j,i) = c_ji/c_ii;
1373  }
1374  }
1375  // check condition of dInv
1376  if(!nErrorF) {
1377  Double_t dmin=c(0,0);
1378  Double_t dmax=dmin;
1379  for(Int_t i=1;i<nF;i++) {
1380  dmin=TMath::Min(dmin,c(i,i));
1381  dmax=TMath::Max(dmax,c(i,i));
1382  }
1383 #ifdef DEBUG
1384  std::cout<<"dmin,dmax: "<<dmin<<" "<<dmax<<"\n";
1385 #endif
1386  if(dmin/dmax<epsilonF2*nF) nErrorF=2;
1387  }
1388  if(!nErrorF) {
1389  // here: F = c c#
1390  // construct inverse of c
1391  TMatrixD cinv(nF,nF);
1392  for(Int_t i=0;i<nF;i++) {
1393  cinv(i,i)=1./c(i,i);
1394  }
1395  for(Int_t i=0;i<nF;i++) {
1396  for(Int_t j=i+1;j<nF;j++) {
1397  Double_t tmp=-c(j,i)*cinv(i,i);
1398  for(Int_t k=i+1;k<j;k++) {
1399  tmp -= cinv(k,i)*c(j,k);
1400  }
1401  cinv(j,i)=tmp*cinv(j,j);
1402  }
1403  }
1404  TMatrixDSparse cInvSparse(cinv);
1406  (&cInvSparse,&cInvSparse);
1407  }
1408  DeleteMatrix(&F);
1409  }
1410  }
1411 
1412  // here:
1413  // rNumEl>=0: diagonal part has been inverted
1414  // (nD2==0)||D2inv : D2 part has been inverted or is empty
1415  // (nF==0)||Finv : F part has been inverted or is empty
1416 
1417  Int_t rankD2Block=0;
1418  if(rNumEl>=0) {
1419  if( ((nD2==0)||D2inv) && ((nF==0)||Finv) ) {
1420  // all matrix parts have been inverted, compose full matrix
1421  // 1/D1 0 0
1422  // Ainv = ( 0 E G# )
1423  // 0 G F^-1
1424  //
1425  // G = (F^-1) (-B/D2)
1426  // E = 1/D2 + (-B#/D2) G)
1427 
1428  TMatrixDSparse *G=0;
1429  if(Finv && minusBD2inv) {
1430  G=MultiplyMSparseMSparse(Finv,minusBD2inv);
1431  }
1432  TMatrixDSparse *E=0;
1433  if(D2inv) E=new TMatrixDSparse(*D2inv);
1434  if(G && minusBD2inv) {
1435  TMatrixDSparse *minusBD2invTransG=
1436  MultiplyMSparseTranspMSparse(minusBD2inv,G);
1437  if(E) {
1438  AddMSparse(E,1.,minusBD2invTransG);
1439  DeleteMatrix(&minusBD2invTransG);
1440  } else {
1441  E=minusBD2invTransG;
1442  }
1443  }
1444  // pack matrix E to r
1445  if(E) {
1446  const Int_t *e_rows=E->GetRowIndexArray();
1447  const Int_t *e_cols=E->GetColIndexArray();
1448  const Double_t *e_data=E->GetMatrixArray();
1449  for(Int_t iE=0;iE<E->GetNrows();iE++) {
1450  Int_t iA=swap[iE+iDiagonal];
1451  for(Int_t indexE=e_rows[iE];indexE<e_rows[iE+1];++indexE) {
1452  Int_t jE=e_cols[indexE];
1453  Int_t jA=swap[jE+iDiagonal];
1454  rEl_col[rNumEl]=iA;
1455  rEl_row[rNumEl]=jA;
1456  rEl_data[rNumEl]=e_data[indexE];
1457  ++rNumEl;
1458  }
1459  }
1460  DeleteMatrix(&E);
1461  }
1462  // pack matrix G to r
1463  if(G) {
1464  const Int_t *g_rows=G->GetRowIndexArray();
1465  const Int_t *g_cols=G->GetColIndexArray();
1466  const Double_t *g_data=G->GetMatrixArray();
1467  for(Int_t iG=0;iG<G->GetNrows();iG++) {
1468  Int_t iA=swap[iG+iBlock];
1469  for(Int_t indexG=g_rows[iG];indexG<g_rows[iG+1];++indexG) {
1470  Int_t jG=g_cols[indexG];
1471  Int_t jA=swap[jG+iDiagonal];
1472  // G
1473  rEl_col[rNumEl]=iA;
1474  rEl_row[rNumEl]=jA;
1475  rEl_data[rNumEl]=g_data[indexG];
1476  ++rNumEl;
1477  // G#
1478  rEl_col[rNumEl]=jA;
1479  rEl_row[rNumEl]=iA;
1480  rEl_data[rNumEl]=g_data[indexG];
1481  ++rNumEl;
1482  }
1483  }
1484  DeleteMatrix(&G);
1485  }
1486  if(Finv) {
1487  // pack matrix Finv to r
1488  const Int_t *finv_rows=Finv->GetRowIndexArray();
1489  const Int_t *finv_cols=Finv->GetColIndexArray();
1490  const Double_t *finv_data=Finv->GetMatrixArray();
1491  for(Int_t iF=0;iF<Finv->GetNrows();iF++) {
1492  Int_t iA=swap[iF+iBlock];
1493  for(Int_t indexF=finv_rows[iF];indexF<finv_rows[iF+1];++indexF) {
1494  Int_t jF=finv_cols[indexF];
1495  Int_t jA=swap[jF+iBlock];
1496  rEl_col[rNumEl]=iA;
1497  rEl_row[rNumEl]=jA;
1498  rEl_data[rNumEl]=finv_data[indexF];
1499  ++rNumEl;
1500  }
1501  }
1502  }
1503  rankD2Block=nD2+nF;
1504  } else if(!rankPtr) {
1505  rNumEl=-3;
1506  Fatal("InvertMSparseSymmPos",
1507  "non-trivial part has rank < %d, matrix can not be inverted",
1508  nF);
1509  } else {
1510  // part of the matrix could not be inverted, get eingenvalue
1511  // decomposition
1512  Int_t nEV=nD2+nF;
1513  Double_t epsilonEV2=fEpsMatrix /* *nEV*nEV */;
1514  Info("InvertMSparseSymmPos",
1515  "cholesky-decomposition failed, try eigenvalue analysis");
1516 #ifdef DEBUG
1517  std::cout<<"nEV="<<nEV<<" iDiagonal="<<iDiagonal<<"\n";
1518 #endif
1519  TMatrixDSym EV(nEV);
1520  for(Int_t i=0;i<nEV;i++) {
1521  Int_t iA=swap[i+iDiagonal];
1522  for(Int_t indexA=a_rows[iA];indexA<a_rows[iA+1];indexA++) {
1523  Int_t jA=a_cols[indexA];
1524  Int_t j0=swapBack[jA];
1525  if(j0>=iDiagonal) {
1526  Int_t j=j0-iDiagonal;
1527 #ifdef DEBUG
1528  if((i<0)||(j<0)||(i>=nEV)||(j>=nEV)) {
1529  std::cout<<" error "<<nEV<<" "<<i<<" "<<j<<"\n";
1530  }
1531 #endif
1532  if(!TMath::Finite(a_data[indexA])) {
1533  Fatal("InvertMSparseSymmPos",
1534  "non-finite number detected element %d %d\n",
1535  iA,jA);
1536  }
1537  EV(i,j)=a_data[indexA];
1538  }
1539  }
1540  }
1541  // EV.Print();
1542  TMatrixDSymEigen Eigen(EV);
1543 #ifdef DEBUG
1544  std::cout<<"Eigenvalues\n";
1545  // Eigen.GetEigenValues().Print();
1546 #endif
1547  Int_t errorEV=0;
1548  Double_t condition=1.0;
1549  if(Eigen.GetEigenValues()(0)<0.0) {
1550  errorEV=1;
1551  } else if(Eigen.GetEigenValues()(0)>0.0) {
1552  condition=Eigen.GetEigenValues()(nEV-1)/Eigen.GetEigenValues()(0);
1553  }
1554  if(condition<-epsilonEV2) {
1555  errorEV=2;
1556  }
1557  if(errorEV) {
1558  if(errorEV==1) {
1559  Error("InvertMSparseSymmPos",
1560  "Largest Eigenvalue is negative %f",
1561  Eigen.GetEigenValues()(0));
1562  } else {
1563  Error("InvertMSparseSymmPos",
1564  "Some Eigenvalues are negative (EV%d/EV0=%g epsilon=%g)",
1565  nEV-1,
1566  Eigen.GetEigenValues()(nEV-1)/Eigen.GetEigenValues()(0),
1567  epsilonEV2);
1568  }
1569  }
1570  // compose inverse matrix
1571  rankD2Block=0;
1572  Double_t setToZero=epsilonEV2*Eigen.GetEigenValues()(0);
1573  TMatrixD inverseEV(nEV,1);
1574  for(Int_t i=0;i<nEV;i++) {
1575  Double_t x=Eigen.GetEigenValues()(i);
1576  if(x>setToZero) {
1577  inverseEV(i,0)=1./x;
1578  ++rankD2Block;
1579  }
1580  }
1581  TMatrixDSparse V(Eigen.GetEigenVectors());
1583  (&V,&V,&inverseEV);
1584 
1585  // pack matrix VDVt to r
1586  const Int_t *vdvt_rows=VDVt->GetRowIndexArray();
1587  const Int_t *vdvt_cols=VDVt->GetColIndexArray();
1588  const Double_t *vdvt_data=VDVt->GetMatrixArray();
1589  for(Int_t iVDVt=0;iVDVt<VDVt->GetNrows();iVDVt++) {
1590  Int_t iA=swap[iVDVt+iDiagonal];
1591  for(Int_t indexVDVt=vdvt_rows[iVDVt];
1592  indexVDVt<vdvt_rows[iVDVt+1];++indexVDVt) {
1593  Int_t jVDVt=vdvt_cols[indexVDVt];
1594  Int_t jA=swap[jVDVt+iDiagonal];
1595  rEl_col[rNumEl]=iA;
1596  rEl_row[rNumEl]=jA;
1597  rEl_data[rNumEl]=vdvt_data[indexVDVt];
1598  ++rNumEl;
1599  }
1600  }
1601  DeleteMatrix(&VDVt);
1602  }
1603  }
1604  if(rankPtr) {
1605  *rankPtr=rankD1+rankD2Block;
1606  }
1607 
1608 
1609  DeleteMatrix(&D2inv);
1610  DeleteMatrix(&Finv);
1611  DeleteMatrix(&B);
1612  DeleteMatrix(&minusBD2inv);
1613 
1614  delete [] swap;
1615  delete [] swapBack;
1616  delete [] isZero;
1617 
1618  TMatrixDSparse *r=(rNumEl>=0) ?
1619  CreateSparseMatrix(A->GetNrows(),A->GetNrows(),rNumEl,
1620  rEl_row,rEl_col,rEl_data) : 0;
1621  delete [] rEl_data;
1622  delete [] rEl_col;
1623  delete [] rEl_row;
1624 
1625 #ifdef DEBUG_DETAIL
1626  // sanity test
1627  if(r) {
1631 
1632  TMatrixD ar(*Ar);
1633  TMatrixD a(*A);
1634  TMatrixD ara(*ArA);
1635  TMatrixD R(*r);
1636  TMatrixD rar(*rAr);
1637 
1638  DeleteMatrix(&Ar);
1639  DeleteMatrix(&ArA);
1640  DeleteMatrix(&rAr);
1641 
1642  Double_t epsilonA2=fEpsMatrix /* *a.GetNrows()*a.GetNcols() */;
1643  for(Int_t i=0;i<a.GetNrows();i++) {
1644  for(Int_t j=0;j<a.GetNcols();j++) {
1645  // ar should be symmetric
1646  if(TMath::Abs(ar(i,j)-ar(j,i))>
1647  epsilonA2*(TMath::Abs(ar(i,j))+TMath::Abs(ar(j,i)))) {
1648  std::cout<<"Ar is not symmetric Ar("<<i<<","<<j<<")="<<ar(i,j)
1649  <<" Ar("<<j<<","<<i<<")="<<ar(j,i)<<"\n";
1650  }
1651  // ara should be equal a
1652  if(TMath::Abs(ara(i,j)-a(i,j))>
1653  epsilonA2*(TMath::Abs(ara(i,j))+TMath::Abs(a(i,j)))) {
1654  std::cout<<"ArA is not equal A ArA("<<i<<","<<j<<")="<<ara(i,j)
1655  <<" A("<<i<<","<<j<<")="<<a(i,j)<<"\n";
1656  }
1657  // ara should be equal a
1658  if(TMath::Abs(rar(i,j)-R(i,j))>
1659  epsilonA2*(TMath::Abs(rar(i,j))+TMath::Abs(R(i,j)))) {
1660  std::cout<<"rAr is not equal r rAr("<<i<<","<<j<<")="<<rar(i,j)
1661  <<" r("<<i<<","<<j<<")="<<R(i,j)<<"\n";
1662  }
1663  }
1664  }
1665  if(rankPtr) std::cout<<"rank="<<*rankPtr<<"\n";
1666  } else {
1667  std::cout<<"Matrix is not positive\n";
1668  }
1669 #endif
1670  return r;
1671 
1672 }
1673 
1674 ////////////////////////////////////////////////////////////////////////////////
1675 /// Get bin name of an output bin.
1676 ///
1677 /// \param[in] iBinX bin number
1678 ///
1679 /// Return value: name of the bin
1680 ///
1681 /// For TUnfold and TUnfoldSys, this function simply returns the bin
1682 /// number as a string. This function really only makes sense in the
1683 /// context of TUnfoldDensity, where binning schemes are implemented
1684 /// using the class TUnfoldBinning, and non-trivial bin names are
1685 /// returned.
1686 
1688 {
1689  return TString::Format("#%d",iBinX);
1690 }
1691 
1692 ////////////////////////////////////////////////////////////////////////////////
1693 /// Set up response matrix and regularisation scheme.
1694 ///
1695 /// \param[in] hist_A matrix of MC events that describes the migrations
1696 /// \param[in] histmap mapping of the histogram axes
1697 /// \param[in] regmode (default=kRegModeSize) global regularisation mode
1698 /// \param[in] constraint (default=kEConstraintArea) type of constraint
1699 ///
1700 /// Treatment of overflow bins in the matrix hist_A
1701 ///
1702 /// - Events reconstructed in underflow or overflow bins are counted
1703 /// as inefficiency. They have to be filled properly.
1704 /// - Events where the truth level is in underflow or overflow bins are
1705 /// treated as a part of the generator level distribution.
1706 /// The full truth level distribution (including underflow and
1707 /// overflow) is unfolded.
1708 ///
1709 /// If unsure, do the following:
1710 ///
1711 /// - store evens where the truth is in underflow or overflow
1712 /// (sometimes called "fakes") in a separate TH1. Ensure that the
1713 /// truth-level underflow and overflow bins of hist_A are all zero.
1714 /// - the fakes are background to the
1715 /// measurement. Use the classes TUnfoldSys and TUnfoldDensity instead
1716 /// of the plain TUnfold for subtracting background.
1717 
1718 TUnfold::TUnfold(const TH2 *hist_A, EHistMap histmap, ERegMode regmode,
1719  EConstraint constraint)
1720 {
1721  // data members initialized to something different from zero:
1722  // fA: filled from hist_A
1723  // fDA: filled from hist_A
1724  // fX0: filled from hist_A
1725  // fL: filled depending on the regularisation scheme
1726  InitTUnfold();
1727  SetConstraint(constraint);
1728  Int_t nx0, nx, ny;
1729  if (histmap == kHistMapOutputHoriz) {
1730  // include overflow bins on the X axis
1731  nx0 = hist_A->GetNbinsX()+2;
1732  ny = hist_A->GetNbinsY();
1733  } else {
1734  // include overflow bins on the X axis
1735  nx0 = hist_A->GetNbinsY()+2;
1736  ny = hist_A->GetNbinsX();
1737  }
1738  nx = 0;
1739  // fNx is expected to be nx0, but the input matrix may be ill-formed
1740  // -> all columns with zero events have to be removed
1741  // (because y does not contain any information on that bin in x)
1742  fSumOverY.Set(nx0);
1743  fXToHist.Set(nx0);
1744  fHistToX.Set(nx0);
1745  Int_t nonzeroA=0;
1746  // loop
1747  // - calculate bias distribution
1748  // sum_over_y
1749  // - count those generator bins which can be unfolded
1750  // fNx
1751  // - histogram bins are added to the lookup-tables
1752  // fHistToX[] and fXToHist[]
1753  // these convert histogram bins to matrix indices and vice versa
1754  // - the number of non-zero elements is counted
1755  // nonzeroA
1756  Int_t nskipped=0;
1757  for (Int_t ix = 0; ix < nx0; ix++) {
1758  // calculate sum over all detector bins
1759  // excluding the overflow bins
1760  Double_t sum = 0.0;
1761  Int_t nonZeroY = 0;
1762  for (Int_t iy = 0; iy < ny; iy++) {
1763  Double_t z;
1764  if (histmap == kHistMapOutputHoriz) {
1765  z = hist_A->GetBinContent(ix, iy + 1);
1766  } else {
1767  z = hist_A->GetBinContent(iy + 1, ix);
1768  }
1769  if (z != 0.0) {
1770  nonzeroA++;
1771  nonZeroY++;
1772  sum += z;
1773  }
1774  }
1775  // check whether there is any sensitivity to this generator bin
1776 
1777  if (nonZeroY) {
1778  // update mapping tables to convert bin number to matrix index
1779  fXToHist[nx] = ix;
1780  fHistToX[ix] = nx;
1781  // add overflow bins -> important to keep track of the
1782  // non-reconstructed events
1783  fSumOverY[nx] = sum;
1784  if (histmap == kHistMapOutputHoriz) {
1785  fSumOverY[nx] +=
1786  hist_A->GetBinContent(ix, 0) +
1787  hist_A->GetBinContent(ix, ny + 1);
1788  } else {
1789  fSumOverY[nx] +=
1790  hist_A->GetBinContent(0, ix) +
1791  hist_A->GetBinContent(ny + 1, ix);
1792  }
1793  nx++;
1794  } else {
1795  nskipped++;
1796  // histogram bin pointing to -1 (non-existing matrix entry)
1797  fHistToX[ix] = -1;
1798  }
1799  }
1800  Int_t underflowBin=0,overflowBin=0;
1801  for (Int_t ix = 0; ix < nx; ix++) {
1802  Int_t ibinx = fXToHist[ix];
1803  if(ibinx<1) underflowBin=1;
1804  if (histmap == kHistMapOutputHoriz) {
1805  if(ibinx>hist_A->GetNbinsX()) overflowBin=1;
1806  } else {
1807  if(ibinx>hist_A->GetNbinsY()) overflowBin=1;
1808  }
1809  }
1810  if(nskipped) {
1811  Int_t nprint=0;
1812  Int_t ixfirst=-1,ixlast=-1;
1813  TString binlist;
1814  int nDisconnected=0;
1815  for (Int_t ix = 0; ix < nx0; ix++) {
1816  if(fHistToX[ix]<0) {
1817  nprint++;
1818  if(ixlast<0) {
1819  binlist +=" ";
1820  binlist +=ix;
1821  ixfirst=ix;
1822  }
1823  ixlast=ix;
1824  nDisconnected++;
1825  }
1826  if(((fHistToX[ix]>=0)&&(ixlast>=0))||
1827  (nprint==nskipped)) {
1828  if(ixlast>ixfirst) {
1829  binlist += "-";
1830  binlist += ixlast;
1831  }
1832  ixfirst=-1;
1833  ixlast=-1;
1834  }
1835  if(nprint==nskipped) {
1836  break;
1837  }
1838  }
1839  if(nskipped==(2-underflowBin-overflowBin)) {
1840  Info("TUnfold","underflow and overflow bin "
1841  "do not depend on the input data");
1842  } else {
1843  Warning("TUnfold","%d output bins "
1844  "do not depend on the input data %s",nDisconnected,
1845  static_cast<const char *>(binlist));
1846  }
1847  }
1848  // store bias as matrix
1849  fX0 = new TMatrixD(nx, 1, fSumOverY.GetArray());
1850  // store non-zero elements in sparse matrix fA
1851  // construct sparse matrix fA
1852  Int_t *rowA = new Int_t[nonzeroA];
1853  Int_t *colA = new Int_t[nonzeroA];
1854  Double_t *dataA = new Double_t[nonzeroA];
1855  Int_t index=0;
1856  for (Int_t iy = 0; iy < ny; iy++) {
1857  for (Int_t ix = 0; ix < nx; ix++) {
1858  Int_t ibinx = fXToHist[ix];
1859  Double_t z;
1860  if (histmap == kHistMapOutputHoriz) {
1861  z = hist_A->GetBinContent(ibinx, iy + 1);
1862  } else {
1863  z = hist_A->GetBinContent(iy + 1, ibinx);
1864  }
1865  if (z != 0.0) {
1866  rowA[index]=iy;
1867  colA[index] = ix;
1868  dataA[index] = z / fSumOverY[ix];
1869  index++;
1870  }
1871  }
1872  }
1873  if(underflowBin && overflowBin) {
1874  Info("TUnfold","%d input bins and %d output bins (includes 2 underflow/overflow bins)",ny,nx);
1875  } else if(underflowBin) {
1876  Info("TUnfold","%d input bins and %d output bins (includes 1 underflow bin)",ny,nx);
1877  } else if(overflowBin) {
1878  Info("TUnfold","%d input bins and %d output bins (includes 1 overflow bin)",ny,nx);
1879  } else {
1880  Info("TUnfold","%d input bins and %d output bins",ny,nx);
1881  }
1882  fA = CreateSparseMatrix(ny,nx,index,rowA,colA,dataA);
1883  if(ny<nx) {
1884  Error("TUnfold","too few (ny=%d) input bins for nx=%d output bins",ny,nx);
1885  } else if(ny==nx) {
1886  Warning("TUnfold","too few (ny=%d) input bins for nx=%d output bins",ny,nx);
1887  }
1888  delete[] rowA;
1889  delete[] colA;
1890  delete[] dataA;
1891  // regularisation conditions squared
1892  fL = new TMatrixDSparse(1, GetNx());
1893  if (regmode != kRegModeNone) {
1894  Int_t nError=RegularizeBins(0, 1, nx0, regmode);
1895  if(nError>0) {
1896  if(nError>1) {
1897  Info("TUnfold",
1898  "%d regularisation conditions have been skipped",nError);
1899  } else {
1900  Info("TUnfold",
1901  "One regularisation condition has been skipped");
1902  }
1903  }
1904  }
1905 }
1906 
1907 ////////////////////////////////////////////////////////////////////////////////
1908 /// Set bias vector.
1909 ///
1910 /// \param[in] bias histogram with new bias vector
1911 ///
1912 /// the initial bias vector is determined from the response matrix
1913 /// but may be changed by using this method
1914 
1915 void TUnfold::SetBias(const TH1 *bias)
1916 {
1917  DeleteMatrix(&fX0);
1918  fX0 = new TMatrixD(GetNx(), 1);
1919  for (Int_t i = 0; i < GetNx(); i++) {
1920  (*fX0) (i, 0) = bias->GetBinContent(fXToHist[i]);
1921  }
1922 }
1923 
1924 ////////////////////////////////////////////////////////////////////////////////
1925 /// Add a row of regularisation conditions to the matrix L.
1926 ///
1927 /// \param[in] i0 truth histogram bin number
1928 /// \param[in] f0 entry in the matrix L, column i0
1929 /// \param[in] i1 truth histogram bin number
1930 /// \param[in] f1 entry in the matrix L, column i1
1931 /// \param[in] i2 truth histogram bin number
1932 /// \param[in] f2 entry in the matrix L, column i2
1933 ///
1934 /// the arguments are used to form one row (k) of the matrix L, where
1935 /// \f$ L_{k,i0}=f0 \f$ and \f$ L_{k,i1}=f1 \f$ and \f$ L_{k,i2}=f2 \f$
1936 /// negative indexes i0,i1,i2 are ignored.
1937 
1940 {
1941  Int_t indices[3];
1942  Double_t data[3];
1943  Int_t nEle=0;
1944 
1945  if(i2>=0) {
1946  data[nEle]=f2;
1947  indices[nEle]=i2;
1948  nEle++;
1949  }
1950  if(i1>=0) {
1951  data[nEle]=f1;
1952  indices[nEle]=i1;
1953  nEle++;
1954  }
1955  if(i0>=0) {
1956  data[nEle]=f0;
1957  indices[nEle]=i0;
1958  nEle++;
1959  }
1960  return AddRegularisationCondition(nEle,indices,data);
1961 }
1962 
1963 ////////////////////////////////////////////////////////////////////////////////
1964 /// Add a row of regularisation conditions to the matrix L.
1965 ///
1966 /// \param[in] nEle number of valid entries in indices and rowData
1967 /// \param[in] indices column numbers of L to fill
1968 /// \param[in] rowData data to fill into the new row of L
1969 ///
1970 /// returns true if a row was added, false otherwise
1971 ///
1972 /// A new row k is added to the matrix L, its dimension is expanded.
1973 /// The new elements \f$ L_{ki} \f$ are filled from the array rowData[]
1974 /// where the indices i which are taken from the array indices[].
1975 
1977 (Int_t nEle,const Int_t *indices,const Double_t *rowData)
1978 {
1979  Bool_t r=kTRUE;
1980  const Int_t *l0_rows=fL->GetRowIndexArray();
1981  const Int_t *l0_cols=fL->GetColIndexArray();
1982  const Double_t *l0_data=fL->GetMatrixArray();
1983 
1984  Int_t nF=l0_rows[fL->GetNrows()]+nEle;
1985  Int_t *l_row=new Int_t[nF];
1986  Int_t *l_col=new Int_t[nF];
1987  Double_t *l_data=new Double_t[nF];
1988  // decode original matrix
1989  nF=0;
1990  for(Int_t row=0;row<fL->GetNrows();row++) {
1991  for(Int_t k=l0_rows[row];k<l0_rows[row+1];k++) {
1992  l_row[nF]=row;
1993  l_col[nF]=l0_cols[k];
1994  l_data[nF]=l0_data[k];
1995  nF++;
1996  }
1997  }
1998 
1999  // if the original matrix does not have any data, reset its row count
2000  Int_t rowMax=0;
2001  if(nF>0) {
2002  rowMax=fL->GetNrows();
2003  }
2004 
2005  // add the new regularisation conditions
2006  for(Int_t i=0;i<nEle;i++) {
2007  Int_t col=fHistToX[indices[i]];
2008  if(col<0) {
2009  r=kFALSE;
2010  break;
2011  }
2012  l_row[nF]=rowMax;
2013  l_col[nF]=col;
2014  l_data[nF]=rowData[i];
2015  nF++;
2016  }
2017 
2018  // replace the old matrix fL
2019  if(r) {
2020  DeleteMatrix(&fL);
2021  fL=CreateSparseMatrix(rowMax+1,GetNx(),nF,l_row,l_col,l_data);
2022  }
2023  delete [] l_row;
2024  delete [] l_col;
2025  delete [] l_data;
2026  return r;
2027 }
2028 
2029 ////////////////////////////////////////////////////////////////////////////////
2030 /// Add a regularisation condition on the magnitude of a truth bin.
2031 ///
2032 /// \param[in] bin bin number
2033 /// \param[in] scale (default=1) scale factor
2034 ///
2035 /// this adds one row to L, where the element <b>bin</b> takes the
2036 /// value <b>scale</b>
2037 ///
2038 /// return value: 0 if ok, 1 if the condition has not been
2039 /// added. Conditions which are not added typically correspond to bin
2040 /// numbers where the truth can not be unfolded (either response
2041 /// matrix is empty or the data do not constrain).
2042 ///
2043 /// The RegularizeXXX() methods can be used to set up a custom matrix
2044 /// of regularisation conditions. In this case, start with an empty
2045 /// matrix L (argument regmode=kRegModeNone in the constructor)
2046 
2048 {
2049  // add regularisation on the size of bin i
2050  // bin: bin number
2051  // scale: size of the regularisation
2052  // return value: number of conditions which have been skipped
2053  // modifies data member fL
2054 
2057 
2058  return AddRegularisationCondition(bin,scale) ? 0 : 1;
2059 }
2060 
2061 ////////////////////////////////////////////////////////////////////////////////
2062 /// Add a regularisation condition on the difference of two truth bin.
2063 ///
2064 /// \param[in] left_bin bin number
2065 /// \param[in] right_bin bin number
2066 /// \param[in] scale (default=1) scale factor
2067 ///
2068 /// this adds one row to L, where the element <b>left_bin</b> takes the
2069 /// value <b>-scale</b> and the element <b>right_bin</b> takes the
2070 /// value <b>+scale</b>
2071 ///
2072 /// return value: 0 if ok, 1 if the condition has not been
2073 /// added. Conditions which are not added typically correspond to bin
2074 /// numbers where the truth can not be unfolded (either response
2075 /// matrix is empty or the data do not constrain).
2076 ///
2077 /// The RegularizeXXX() methods can be used to set up a custom matrix
2078 /// of regularisation conditions. In this case, start with an empty
2079 /// matrix L (argument regmode=kRegModeNone in the constructor)
2080 
2081 Int_t TUnfold::RegularizeDerivative(int left_bin, int right_bin,
2082  Double_t scale)
2083 {
2084  // add regularisation on the difference of two bins
2085  // left_bin: 1st bin
2086  // right_bin: 2nd bin
2087  // scale: size of the regularisation
2088  // return value: number of conditions which have been skipped
2089  // modifies data member fL
2090 
2093 
2094  return AddRegularisationCondition(left_bin,-scale,right_bin,scale) ? 0 : 1;
2095 }
2096 
2097 ////////////////////////////////////////////////////////////////////////////////
2098 /// Add a regularisation condition on the curvature of three truth bin.
2099 ///
2100 /// \param[in] left_bin bin number
2101 /// \param[in] center_bin bin number
2102 /// \param[in] right_bin bin number
2103 /// \param[in] scale_left (default=1) scale factor
2104 /// \param[in] scale_right (default=1) scale factor
2105 ///
2106 /// this adds one row to L, where the element <b>left_bin</b> takes the
2107 /// value <b>-scale_left</b>, the element <b>right_bin</b> takes the
2108 /// value <b>-scale_right</b> and the element <b>center_bin</b> takes
2109 /// the value <b>scale_left+scale_right</b>
2110 ///
2111 /// return value: 0 if ok, 1 if the condition has not been
2112 /// added. Conditions which are not added typically correspond to bin
2113 /// numbers where the truth can not be unfolded (either response
2114 /// matrix is empty or the data do not constrain).
2115 ///
2116 /// The RegularizeXXX() methods can be used to set up a custom matrix
2117 /// of regularisation conditions. In this case, start with an empty
2118 /// matrix L (argument regmode=kRegModeNone in the constructor)
2119 
2120 Int_t TUnfold::RegularizeCurvature(int left_bin, int center_bin,
2121  int right_bin,
2122  Double_t scale_left,
2123  Double_t scale_right)
2124 {
2125  // add regularisation on the curvature through 3 bins (2nd derivative)
2126  // left_bin: 1st bin
2127  // center_bin: 2nd bin
2128  // right_bin: 3rd bin
2129  // scale_left: scale factor on center-left difference
2130  // scale_right: scale factor on right-center difference
2131  // return value: number of conditions which have been skipped
2132  // modifies data member fL
2133 
2136 
2138  (left_bin,-scale_left,
2139  center_bin,scale_left+scale_right,
2140  right_bin,-scale_right)
2141  ? 0 : 1;
2142 }
2143 
2144 ////////////////////////////////////////////////////////////////////////////////
2145 /// Add regularisation conditions for a group of bins.
2146 ///
2147 /// \param[in] start first bin number
2148 /// \param[in] step step size
2149 /// \param[in] nbin number of bins
2150 /// \param[in] regmode regularisation mode (one of: kRegModeSize,
2151 /// kRegModeDerivative, kRegModeCurvature)
2152 ///
2153 /// add regularisation conditions for a group of equidistant
2154 /// bins. There are <b>nbin</b> bins, starting with bin <b>start</b>
2155 /// and with a distance of <b>step</b> between bins.
2156 ///
2157 /// Return value: number of regularisation conditions which could not
2158 /// be added.
2159 ///
2160 /// Conditions which are not added typically correspond to bin
2161 /// numbers where the truth can not be unfolded (either response
2162 /// matrix is empty or the data do not constrain).
2163 
2164 Int_t TUnfold::RegularizeBins(int start, int step, int nbin,
2165  ERegMode regmode)
2166 {
2167  // set regulatisation on a 1-dimensional curve
2168  // start: first bin
2169  // step: distance between neighbouring bins
2170  // nbin: total number of bins
2171  // regmode: regularisation mode
2172  // return value:
2173  // number of errors (i.e. conditions which have been skipped)
2174  // modifies data member fL
2175 
2176  Int_t i0, i1, i2;
2177  i0 = start;
2178  i1 = i0 + step;
2179  i2 = i1 + step;
2180  Int_t nSkip = 0;
2181  Int_t nError= 0;
2182  if (regmode == kRegModeDerivative) {
2183  nSkip = 1;
2184  } else if (regmode == kRegModeCurvature) {
2185  nSkip = 2;
2186  } else if(regmode != kRegModeSize) {
2187  Error("RegularizeBins","regmode = %d is not valid",regmode);
2188  }
2189  for (Int_t i = nSkip; i < nbin; i++) {
2190  if (regmode == kRegModeSize) {
2191  nError += RegularizeSize(i0);
2192  } else if (regmode == kRegModeDerivative) {
2193  nError += RegularizeDerivative(i0, i1);
2194  } else if (regmode == kRegModeCurvature) {
2195  nError += RegularizeCurvature(i0, i1, i2);
2196  }
2197  i0 = i1;
2198  i1 = i2;
2199  i2 += step;
2200  }
2201  return nError;
2202 }
2203 
2204 ////////////////////////////////////////////////////////////////////////////////
2205 /// Add regularisation conditions for 2d unfolding.
2206 ///
2207 /// \param[in] start_bin first bin number
2208 /// \param[in] step1 step size, 1st dimension
2209 /// \param[in] nbin1 number of bins, 1st dimension
2210 /// \param[in] step2 step size, 2nd dimension
2211 /// \param[in] nbin2 number of bins, 2nd dimension
2212 /// \param[in] regmode regularisation mode (one of: kRegModeSize,
2213 /// kRegModeDerivative, kRegModeCurvature)
2214 ///
2215 /// add regularisation conditions for a grid of bins. The start bin is
2216 /// <b>start_bin</b>. Along the first (second) dimension, there are
2217 /// <b>nbin1</b> (<b>nbin2</b>) bins and adjacent bins are spaced by
2218 /// <b>step1</b> (<b>step2</b>) units.
2219 ///
2220 /// Return value: number of regularisation conditions which could not
2221 /// be added. Conditions which are not added typically correspond to bin
2222 /// numbers where the truth can not be unfolded (either response
2223 /// matrix is empty or the data do not constrain).
2224 
2225 Int_t TUnfold::RegularizeBins2D(int start_bin, int step1, int nbin1,
2226  int step2, int nbin2, ERegMode regmode)
2227 {
2228  // set regularisation on a 2-dimensional grid of bins
2229  // start_bin: first bin
2230  // step1: distance between bins in 1st direction
2231  // nbin1: number of bins in 1st direction
2232  // step2: distance between bins in 2nd direction
2233  // nbin2: number of bins in 2nd direction
2234  // return value:
2235  // number of errors (i.e. conditions which have been skipped)
2236  // modifies data member fL
2237 
2238  Int_t nError = 0;
2239  for (Int_t i1 = 0; i1 < nbin1; i1++) {
2240  nError += RegularizeBins(start_bin + step1 * i1, step2, nbin2, regmode);
2241  }
2242  for (Int_t i2 = 0; i2 < nbin2; i2++) {
2243  nError += RegularizeBins(start_bin + step2 * i2, step1, nbin1, regmode);
2244  }
2245  return nError;
2246 }
2247 
2248 ////////////////////////////////////////////////////////////////////////////////
2249 /// Perform the unfolding for a given input and regularisation.
2250 ///
2251 /// \param[in] tau_reg regularisation parameter
2252 /// \param[in] input input distribution with uncertainties
2253 /// \param[in] scaleBias (default=0.0) scale factor applied to the bias
2254 ///
2255 /// This is a shortcut for `{ SetInput(input,scaleBias); DoUnfold(tau); }`
2256 ///
2257 /// Data members required:
2258 /// - fA, fX0, fL
2259 /// Data members modified:
2260 /// - those documented in SetInput()
2261 /// and those documented in DoUnfold(Double_t)
2262 /// Return value:
2263 /// - maximum global correlation coefficient
2264 /// NOTE!!! return value >=1.0 means error, and the result is junk
2265 ///
2266 /// Overflow bins of the input distribution are ignored!
2267 
2269  Double_t scaleBias)
2270 {
2271 
2272  SetInput(input,scaleBias);
2273  return DoUnfold(tau_reg);
2274 }
2275 
2276 ////////////////////////////////////////////////////////////////////////////////
2277 /// Define input data for subsequent calls to DoUnfold(tau).
2278 ///
2279 /// \param[in] input input distribution with uncertainties
2280 /// \param[in] scaleBias (default=0) scale factor applied to the bias
2281 /// \param[in] oneOverZeroError (default=0) for bins with zero error, this number defines 1/error.
2282 /// \param[in] hist_vyy (default=0) if non-zero, this defines the data covariance matrix
2283 /// \param[in] hist_vyy_inv (default=0) if non-zero and hist_vyy is
2284 /// set, defines the inverse of the data covariance matrix. This
2285 /// feature can be useful for repeated unfoldings in cases where the
2286 /// inversion of the input covariance matrix is lengthy
2287 ///
2288 /// Return value: nError1+10000*nError2
2289 ///
2290 /// - nError1: number of bins where the uncertainty is zero.
2291 /// these bins either are not used for the unfolding (if
2292 /// oneOverZeroError==0) or 1/uncertainty is set to oneOverZeroError.
2293 /// - nError2: return values>10000 are fatal errors, because the
2294 /// unfolding can not be done. The number nError2 corresponds to the
2295 /// number of truth bins which are not constrained by data points.
2296 ///
2297 /// Data members modified:
2298 /// - fY, fVyy, , fBiasScale
2299 /// Data members cleared
2300 /// - fVyyInv, fNdf
2301 /// - + see ClearResults
2302 
2303 Int_t TUnfold::SetInput(const TH1 *input, Double_t scaleBias,
2304  Double_t oneOverZeroError,const TH2 *hist_vyy,
2305  const TH2 *hist_vyy_inv)
2306 {
2308  fNdf=0;
2309 
2310  fBiasScale = scaleBias;
2311 
2312  // delete old results (if any)
2313  ClearResults();
2314 
2315  // construct error matrix and inverted error matrix of measured quantities
2316  // from errors of input histogram or use error matrix
2317 
2318  Int_t *rowVyyN=new Int_t[GetNy()*GetNy()+1];
2319  Int_t *colVyyN=new Int_t[GetNy()*GetNy()+1];
2320  Double_t *dataVyyN=new Double_t[GetNy()*GetNy()+1];
2321 
2322  Int_t *rowVyy1=new Int_t[GetNy()];
2323  Int_t *colVyy1=new Int_t[GetNy()];
2324  Double_t *dataVyy1=new Double_t[GetNy()];
2325  Double_t *dataVyyDiag=new Double_t[GetNy()];
2326 
2327  Int_t nVarianceZero=0;
2328  Int_t nVarianceForced=0;
2329  Int_t nVyyN=0;
2330  Int_t nVyy1=0;
2331  for (Int_t iy = 0; iy < GetNy(); iy++) {
2332  // diagonals
2333  Double_t dy2;
2334  if(!hist_vyy) {
2335  Double_t dy = input->GetBinError(iy + 1);
2336  dy2=dy*dy;
2337  if (dy2 <= 0.0) {
2338  if(oneOverZeroError>0.0) {
2339  dy2 = 1./ ( oneOverZeroError*oneOverZeroError);
2340  nVarianceForced++;
2341  } else {
2342  nVarianceZero++;
2343  }
2344  }
2345  } else {
2346  dy2 = hist_vyy->GetBinContent(iy+1,iy+1);
2347  if (dy2 <= 0.0) {
2348  nVarianceZero++;
2349  }
2350  }
2351  rowVyyN[nVyyN] = iy;
2352  colVyyN[nVyyN] = iy;
2353  rowVyy1[nVyy1] = iy;
2354  colVyy1[nVyy1] = 0;
2355  dataVyyDiag[iy] = dy2;
2356  if(dy2>0.0) {
2357  dataVyyN[nVyyN++] = dy2;
2358  dataVyy1[nVyy1++] = dy2;
2359  }
2360  }
2361  if(hist_vyy) {
2362  // non-diagonal elements
2363  Int_t nOffDiagNonzero=0;
2364  for (Int_t iy = 0; iy < GetNy(); iy++) {
2365  // ignore rows where the diagonal is zero
2366  if(dataVyyDiag[iy]<=0.0) {
2367  for (Int_t jy = 0; jy < GetNy(); jy++) {
2368  if(hist_vyy->GetBinContent(iy+1,jy+1)!=0.0) {
2369  nOffDiagNonzero++;
2370  }
2371  }
2372  continue;
2373  }
2374  for (Int_t jy = 0; jy < GetNy(); jy++) {
2375  // skip diagonal elements
2376  if(iy==jy) continue;
2377  // ignore columns where the diagonal is zero
2378  if(dataVyyDiag[jy]<=0.0) continue;
2379 
2380  rowVyyN[nVyyN] = iy;
2381  colVyyN[nVyyN] = jy;
2382  dataVyyN[nVyyN]= hist_vyy->GetBinContent(iy+1,jy+1);
2383  if(dataVyyN[nVyyN] == 0.0) continue;
2384  nVyyN ++;
2385  }
2386  }
2387  if(hist_vyy_inv) {
2388  Warning("SetInput",
2389  "inverse of input covariance is taken from user input");
2390  Int_t *rowVyyInv=new Int_t[GetNy()*GetNy()+1];
2391  Int_t *colVyyInv=new Int_t[GetNy()*GetNy()+1];
2392  Double_t *dataVyyInv=new Double_t[GetNy()*GetNy()+1];
2393  Int_t nVyyInv=0;
2394  for (Int_t iy = 0; iy < GetNy(); iy++) {
2395  for (Int_t jy = 0; jy < GetNy(); jy++) {
2396  rowVyyInv[nVyyInv] = iy;
2397  colVyyInv[nVyyInv] = jy;
2398  dataVyyInv[nVyyInv]= hist_vyy_inv->GetBinContent(iy+1,jy+1);
2399  if(dataVyyInv[nVyyInv] == 0.0) continue;
2400  nVyyInv ++;
2401  }
2402  }
2404  (GetNy(),GetNy(),nVyyInv,rowVyyInv,colVyyInv,dataVyyInv);
2405  delete [] rowVyyInv;
2406  delete [] colVyyInv;
2407  delete [] dataVyyInv;
2408  } else {
2409  if(nOffDiagNonzero) {
2410  Error("SetInput",
2411  "input covariance has elements C(X,Y)!=0 where V(X)==0");
2412  }
2413  }
2414  }
2415  DeleteMatrix(&fVyy);
2417  (GetNy(),GetNy(),nVyyN,rowVyyN,colVyyN,dataVyyN);
2418 
2419  delete[] rowVyyN;
2420  delete[] colVyyN;
2421  delete[] dataVyyN;
2422 
2424  (GetNy(),1,nVyy1,rowVyy1,colVyy1, dataVyy1);
2425 
2426  delete[] rowVyy1;
2427  delete[] colVyy1;
2428  delete[] dataVyy1;
2429 
2430  //
2431  // get input vector
2432  DeleteMatrix(&fY);
2433  fY = new TMatrixD(GetNy(), 1);
2434  for (Int_t i = 0; i < GetNy(); i++) {
2435  (*fY) (i, 0) = input->GetBinContent(i + 1);
2436  }
2437  // simple check whether unfolding is possible, given the matrices fA and fV
2439  DeleteMatrix(&vecV);
2440  Int_t nError2=0;
2441  for (Int_t i = 0; i <mAtV->GetNrows();i++) {
2442  if(mAtV->GetRowIndexArray()[i]==
2443  mAtV->GetRowIndexArray()[i+1]) {
2444  nError2 ++;
2445  }
2446  }
2447  if(nVarianceForced) {
2448  if(nVarianceForced>1) {
2449  Warning("SetInput","%d/%d input bins have zero error,"
2450  " 1/error set to %lf.",
2451  nVarianceForced,GetNy(),oneOverZeroError);
2452  } else {
2453  Warning("SetInput","One input bin has zero error,"
2454  " 1/error set to %lf.",oneOverZeroError);
2455  }
2456  }
2457  if(nVarianceZero) {
2458  if(nVarianceZero>1) {
2459  Warning("SetInput","%d/%d input bins have zero error,"
2460  " and are ignored.",nVarianceZero,GetNy());
2461  } else {
2462  Warning("SetInput","One input bin has zero error,"
2463  " and is ignored.");
2464  }
2465  fIgnoredBins=nVarianceZero;
2466  }
2467  if(nError2>0) {
2468  // check whether data points with zero error are responsible
2469  if(oneOverZeroError<=0.0) {
2470  //const Int_t *a_rows=fA->GetRowIndexArray();
2471  //const Int_t *a_cols=fA->GetColIndexArray();
2472  for (Int_t col = 0; col <mAtV->GetNrows();col++) {
2473  if(mAtV->GetRowIndexArray()[col]==
2474  mAtV->GetRowIndexArray()[col+1]) {
2475  TString binlist("no data to constrain output bin ");
2476  binlist += GetOutputBinName(fXToHist[col]);
2477  /* binlist +=" depends on ignored input bins ";
2478  for(Int_t row=0;row<fA->GetNrows();row++) {
2479  if(dataVyyDiag[row]>0.0) continue;
2480  for(Int_t i=a_rows[row];i<a_rows[row+1];i++) {
2481  if(a_cols[i]!=col) continue;
2482  binlist +=" ";
2483  binlist +=row;
2484  }
2485  } */
2486  Warning("SetInput","%s",(char const *)binlist);
2487  }
2488  }
2489  }
2490  if(nError2>1) {
2491  Error("SetInput","%d/%d output bins are not constrained by any data.",
2492  nError2,mAtV->GetNrows());
2493  } else {
2494  Error("SetInput","One output bins is not constrained by any data.");
2495  }
2496  }
2497  DeleteMatrix(&mAtV);
2498 
2499  delete[] dataVyyDiag;
2500 
2501  return nVarianceForced+nVarianceZero+10000*nError2;
2502 }
2503 
2504 ////////////////////////////////////////////////////////////////////////////////
2505 /// Perform the unfolding for a given regularisation parameter tau.
2506 ///
2507 /// \param[in] tau regularisation parameter
2508 ///
2509 /// This method sets tau and then calls the core unfolding algorithm
2510 /// required data members:
2511 /// - fA: matrix to relate x and y
2512 /// - fY: measured data points
2513 /// - fX0: bias on x
2514 /// - fBiasScale: scale factor for fX0
2515 /// - fV: inverse of covariance matrix for y
2516 /// - fL: regularisation conditions
2517 /// modified data members:
2518 /// - fTauSquared and those documented in DoUnfold(void)
2519 
2521 {
2522  fTauSquared=tau*tau;
2523  return DoUnfold();
2524 }
2525 
2526 ////////////////////////////////////////////////////////////////////////////////
2527 /// Scan the L curve, determine tau and unfold at the final value of tau.
2528 ///
2529 /// \param[in] nPoint number of points used for the scan
2530 /// \param[in] tauMin smallest tau value to study
2531 /// \param[in] tauMax largest tau value to study. If tauMin=tauMax=0,
2532 /// a scan interval is determined automatically.
2533 /// \param[out] lCurve if nonzero, a new TGraph is returned,
2534 /// containing the L-curve
2535 /// \param[out] logTauX if nonzero, a new TSpline is returned, to
2536 /// parameterize the L-curve's x-coordinates as a function of log10(tau)
2537 /// \param[out] logTauY if nonzero, a new TSpline is returned, to
2538 /// parameterize the L-curve's y-coordinates as a function of log10(tau)
2539 /// \param[out] logTauCurvature if nonzero, a new TSpline is returned
2540 /// of the L-curve curvature as a function of log10(tau)
2541 ///
2542 /// return value: the coordinate number in the logTauX,logTauY graphs
2543 /// corresponding to the "final" choice of tau
2544 ///
2545 /// Recommendation: always check <b>logTauCurvature</b>, it
2546 /// should be a peaked function (similar to a Gaussian), the maximum
2547 /// corresponding to the final choice of tau. Also, check the <b>lCurve</b>
2548 /// it should be approximately L-shaped. If in doubt, adjust tauMin
2549 /// and tauMax until the results are satisfactory.
2550 
2552  Double_t tauMin,Double_t tauMax,
2553  TGraph **lCurve,TSpline **logTauX,
2554  TSpline **logTauY,TSpline **logTauCurvature)
2555 {
2556  typedef std::map<Double_t,std::pair<Double_t,Double_t> > XYtau_t;
2557  XYtau_t curve;
2558 
2559  //==========================================================
2560  // algorithm:
2561  // (1) do the unfolding for nPoint-1 points
2562  // and store the results in the map
2563  // curve
2564  // (1a) store minimum and maximum tau to curve
2565  // (1b) insert additional points, until nPoint-1 values
2566  // have been calculated
2567  //
2568  // (2) determine the best choice of tau
2569  // do the unfolding for this point
2570  // and store the result in
2571  // curve
2572  // (3) return the result in
2573  // lCurve logTauX logTauY
2574 
2575  //==========================================================
2576  // (1) do the unfolding for nPoint-1 points
2577  // and store the results in
2578  // curve
2579  // (1a) store minimum and maximum tau to curve
2580 
2581  if((tauMin<=0)||(tauMax<=0.0)||(tauMin>=tauMax)) {
2582  // here no range is given, has to be determined automatically
2583  // the maximum tau is determined from the chi**2 values
2584  // observed from unfolding without regulatisation
2585 
2586  // first unfolding, without regularisation
2587  DoUnfold(0.0);
2588 
2589  // if the number of degrees of freedom is too small, create an error
2590  if(GetNdf()<=0) {
2591  Error("ScanLcurve","too few input bins, NDF<=0 %d",GetNdf());
2592  }
2593 
2594  Double_t x0=GetLcurveX();
2595  Double_t y0=GetLcurveY();
2596  Info("ScanLcurve","logtau=-Infinity X=%lf Y=%lf",x0,y0);
2597  if(!TMath::Finite(x0)) {
2598  Fatal("ScanLcurve","problem (too few input bins?) X=%f",x0);
2599  }
2600  if(!TMath::Finite(y0)) {
2601  Fatal("ScanLcurve","problem (missing regularisation?) Y=%f",y0);
2602  }
2603  {
2604  // unfolding guess maximum tau and store it
2605  Double_t logTau=
2606  0.5*(TMath::Log10(fChi2A+3.*TMath::Sqrt(GetNdf()+1.0))
2607  -GetLcurveY());
2608  DoUnfold(TMath::Power(10.,logTau));
2609  if((!TMath::Finite(GetLcurveX())) ||(!TMath::Finite(GetLcurveY()))) {
2610  Fatal("ScanLcurve","problem (missing regularisation?) X=%f Y=%f",
2611  GetLcurveX(),GetLcurveY());
2612  }
2613  curve[logTau]=std::make_pair(GetLcurveX(),GetLcurveY());
2614  Info("ScanLcurve","logtau=%lf X=%lf Y=%lf",
2615  logTau,GetLcurveX(),GetLcurveY());
2616  }
2617  if((*curve.begin()).second.first<x0) {
2618  // if the point at tau==0 seems numerically unstable,
2619  // try to find the minimum chi**2 as start value
2620  //
2621  // "unstable" means that there is a finite tau where the
2622  // unfolding chi**2 is smaller than for the case of no
2623  // regularisation. Ideally this should never happen
2624  do {
2625  x0=GetLcurveX();
2626  Double_t logTau=(*curve.begin()).first-0.5;
2627  DoUnfold(TMath::Power(10.,logTau));
2628  if((!TMath::Finite(GetLcurveX())) ||(!TMath::Finite(GetLcurveY()))) {
2629  Fatal("ScanLcurve","problem (missing regularisation?) X=%f Y=%f",
2630  GetLcurveX(),GetLcurveY());
2631  }
2632  curve[logTau]=std::make_pair(GetLcurveX(),GetLcurveY());
2633  Info("ScanLcurve","logtau=%lf X=%lf Y=%lf",
2634  logTau,GetLcurveX(),GetLcurveY());
2635  }
2636  while(((int)curve.size()<(nPoint-1)/2)&&
2637  ((*curve.begin()).second.first<x0));
2638  } else {
2639  // minimum tau is chosen such that is less than
2640  // 1% different from the case of no regularization
2641  // log10(1.01) = 0.00432
2642 
2643  // here, more than one point are inserted if necessary
2644  while(((int)curve.size()<nPoint-1)&&
2645  (((*curve.begin()).second.first-x0>0.00432)||
2646  ((*curve.begin()).second.second-y0>0.00432)||
2647  (curve.size()<2))) {
2648  Double_t logTau=(*curve.begin()).first-0.5;
2649  DoUnfold(TMath::Power(10.,logTau));
2650  if((!TMath::Finite(GetLcurveX())) ||(!TMath::Finite(GetLcurveY()))) {
2651  Fatal("ScanLcurve","problem (missing regularisation?) X=%f Y=%f",
2652  GetLcurveX(),GetLcurveY());
2653  }
2654  curve[logTau]=std::make_pair(GetLcurveX(),GetLcurveY());
2655  Info("ScanLcurve","logtau=%lf X=%lf Y=%lf",
2656  logTau,GetLcurveX(),GetLcurveY());
2657  }
2658  }
2659  } else {
2660  Double_t logTauMin=TMath::Log10(tauMin);
2661  Double_t logTauMax=TMath::Log10(tauMax);
2662  if(nPoint>1) {
2663  // insert maximum tau
2664  DoUnfold(TMath::Power(10.,logTauMax));
2665  if((!TMath::Finite(GetLcurveX())) ||(!TMath::Finite(GetLcurveY()))) {
2666  Fatal("ScanLcurve","problem (missing regularisation?) X=%f Y=%f",
2667  GetLcurveX(),GetLcurveY());
2668  }
2669  Info("ScanLcurve","logtau=%lf X=%lf Y=%lf",
2670  logTauMax,GetLcurveX(),GetLcurveY());
2671  curve[logTauMax]=std::make_pair(GetLcurveX(),GetLcurveY());
2672  }
2673  // insert minimum tau
2674  DoUnfold(TMath::Power(10.,logTauMin));
2675  if((!TMath::Finite(GetLcurveX())) ||(!TMath::Finite(GetLcurveY()))) {
2676  Fatal("ScanLcurve","problem (missing regularisation?) X=%f Y=%f",
2677  GetLcurveX(),GetLcurveY());
2678  }
2679  Info("ScanLcurve","logtau=%lf X=%lf Y=%lf",
2680  logTauMin,GetLcurveX(),GetLcurveY());
2681  curve[logTauMin]=std::make_pair(GetLcurveX(),GetLcurveY());
2682  }
2683 
2684 
2685  //==========================================================
2686  // (1b) insert additional points, until nPoint-1 values
2687  // have been calculated
2688 
2689  while(int(curve.size())<nPoint-1) {
2690  // insert additional points, such that the sizes of the delta(XY) vectors
2691  // are getting smaller and smaller
2692  XYtau_t::const_iterator i0,i1;
2693  i0=curve.begin();
2694  i1=i0;
2695  Double_t logTau=(*i0).first;
2696  Double_t distMax=0.0;
2697  for(i1++;i1!=curve.end();i1++) {
2698  const std::pair<Double_t,Double_t> &xy0=(*i0).second;
2699  const std::pair<Double_t,Double_t> &xy1=(*i1).second;
2700  Double_t dx=xy1.first-xy0.first;
2701  Double_t dy=xy1.second-xy0.second;
2702  Double_t d=TMath::Sqrt(dx*dx+dy*dy);
2703  if(d>=distMax) {
2704  distMax=d;
2705  logTau=0.5*((*i0).first+(*i1).first);
2706  }
2707  i0=i1;
2708  }
2709  DoUnfold(TMath::Power(10.,logTau));
2710  if((!TMath::Finite(GetLcurveX())) ||(!TMath::Finite(GetLcurveY()))) {
2711  Fatal("ScanLcurve","problem (missing regularisation?) X=%f Y=%f",
2712  GetLcurveX(),GetLcurveY());
2713  }
2714  Info("ScanLcurve","logtau=%lf X=%lf Y=%lf",logTau,GetLcurveX(),GetLcurveY());
2715  curve[logTau]=std::make_pair(GetLcurveX(),GetLcurveY());
2716  }
2717 
2718  //==========================================================
2719  // (2) determine the best choice of tau
2720  // do the unfolding for this point
2721  // and store the result in
2722  // curve
2723  XYtau_t::const_iterator i0,i1;
2724  i0=curve.begin();
2725  i1=i0;
2726  i1++;
2727  Double_t logTauFin=(*i0).first;
2728  if( ((int)curve.size())<nPoint) {
2729  // set up splines and determine (x,y) curvature in each point
2730  Double_t *cTi=new Double_t[curve.size()-1]();
2731  Double_t *cCi=new Double_t[curve.size()-1]();
2732  Int_t n=0;
2733  {
2734  Double_t *lXi=new Double_t[curve.size()]();
2735  Double_t *lYi=new Double_t[curve.size()]();
2736  Double_t *lTi=new Double_t[curve.size()]();
2737  for( XYtau_t::const_iterator i=curve.begin();i!=curve.end();i++) {
2738  lXi[n]=(*i).second.first;
2739  lYi[n]=(*i).second.second;
2740  lTi[n]=(*i).first;
2741  n++;
2742  }
2743  TSpline3 *splineX=new TSpline3("x vs tau",lTi,lXi,n);
2744  TSpline3 *splineY=new TSpline3("y vs tau",lTi,lYi,n);
2745  // calculate (x,y) curvature for all points
2746  // the curvature is stored in the array cCi[] as a function of cTi[]
2747  for(Int_t i=0;i<n-1;i++) {
2748  Double_t ltau,xy,bi,ci,di;
2749  splineX->GetCoeff(i,ltau,xy,bi,ci,di);
2750  Double_t tauBar=0.5*(lTi[i]+lTi[i+1]);
2751  Double_t dTau=0.5*(lTi[i+1]-lTi[i]);
2752  Double_t dx1=bi+dTau*(2.*ci+3.*di*dTau);
2753  Double_t dx2=2.*ci+6.*di*dTau;
2754  splineY->GetCoeff(i,ltau,xy,bi,ci,di);
2755  Double_t dy1=bi+dTau*(2.*ci+3.*di*dTau);
2756  Double_t dy2=2.*ci+6.*di*dTau;
2757  cTi[i]=tauBar;
2758  cCi[i]=(dy2*dx1-dy1*dx2)/TMath::Power(dx1*dx1+dy1*dy1,1.5);
2759  }
2760  delete splineX;
2761  delete splineY;
2762  delete[] lXi;
2763  delete[] lYi;
2764  delete[] lTi;
2765  }
2766  // create curvature Spline
2767  TSpline3 *splineC=new TSpline3("L curve curvature",cTi,cCi,n-1);
2768  // find the maximum of the curvature
2769  // if the parameter iskip is non-zero, then iskip points are
2770  // ignored when looking for the largest curvature
2771  // (there are problems with the curvature determined from the first
2772  // few points of splineX,splineY in the algorithm above)
2773  Int_t iskip=0;
2774  if(n>4) iskip=1;
2775  if(n>7) iskip=2;
2776  Double_t cCmax=cCi[iskip];
2777  Double_t cTmax=cTi[iskip];
2778  for(Int_t i=iskip;i<n-2-iskip;i++) {
2779  // find maximum on this spline section
2780  // check boundary conditions for x[i+1]
2781  Double_t xMax=cTi[i+1];
2782  Double_t yMax=cCi[i+1];
2783  if(cCi[i]>yMax) {
2784  yMax=cCi[i];
2785  xMax=cTi[i];
2786  }
2787  // find maximum for x[i]<x<x[i+1]
2788  // get spline coefficients and solve equation
2789  // derivative(x)==0
2790  Double_t x,y,b,c,d;
2791  splineC->GetCoeff(i,x,y,b,c,d);
2792  // coefficients of quadratic equation
2793  Double_t m_p_half=-c/(3.*d);
2794  Double_t q=b/(3.*d);
2795  Double_t discr=m_p_half*m_p_half-q;
2796  if(discr>=0.0) {
2797  // solution found
2798  discr=TMath::Sqrt(discr);
2799  Double_t xx;
2800  if(m_p_half>0.0) {
2801  xx = m_p_half + discr;
2802  } else {
2803  xx = m_p_half - discr;
2804  }
2805  Double_t dx=cTi[i+1]-x;
2806  // check first solution
2807  if((xx>0.0)&&(xx<dx)) {
2808  y=splineC->Eval(x+xx);
2809  if(y>yMax) {
2810  yMax=y;
2811  xMax=x+xx;
2812  }
2813  }
2814  // second solution
2815  if(xx !=0.0) {
2816  xx= q/xx;
2817  } else {
2818  xx=0.0;
2819  }
2820  // check second solution
2821  if((xx>0.0)&&(xx<dx)) {
2822  y=splineC->Eval(x+xx);
2823  if(y>yMax) {
2824  yMax=y;
2825  xMax=x+xx;
2826  }
2827  }
2828  }
2829  // check whether this local minimum is a global minimum
2830  if(yMax>cCmax) {
2831  cCmax=yMax;
2832  cTmax=xMax;
2833  }
2834  }
2835  if(logTauCurvature) {
2836  *logTauCurvature=splineC;
2837  } else {
2838  delete splineC;
2839  }
2840  delete[] cTi;
2841  delete[] cCi;
2842  logTauFin=cTmax;
2843  DoUnfold(TMath::Power(10.,logTauFin));
2844  if((!TMath::Finite(GetLcurveX())) ||(!TMath::Finite(GetLcurveY()))) {
2845  Fatal("ScanLcurve","problem (missing regularisation?) X=%f Y=%f",
2846  GetLcurveX(),GetLcurveY());
2847  }
2848  Info("ScanLcurve","Result logtau=%lf X=%lf Y=%lf",
2849  logTauFin,GetLcurveX(),GetLcurveY());
2850  curve[logTauFin]=std::make_pair(GetLcurveX(),GetLcurveY());
2851  }
2852 
2853 
2854  //==========================================================
2855  // (3) return the result in
2856  // lCurve logTauX logTauY
2857 
2858  Int_t bestChoice=-1;
2859  if(curve.size()>0) {
2860  Double_t *x=new Double_t[curve.size()]();
2861  Double_t *y=new Double_t[curve.size()]();
2862  Double_t *logT=new Double_t[curve.size()]();
2863  int n=0;
2864  for( XYtau_t::const_iterator i=curve.begin();i!=curve.end();i++) {
2865  if(logTauFin==(*i).first) {
2866  bestChoice=n;
2867  }
2868  x[n]=(*i).second.first;
2869  y[n]=(*i).second.second;
2870  logT[n]=(*i).first;
2871  n++;
2872  }
2873  if(lCurve) {
2874  (*lCurve)=new TGraph(n,x,y);
2875  (*lCurve)->SetTitle("L curve");
2876  }
2877  if(logTauX) (*logTauX)=new TSpline3("log(chi**2)%log(tau)",logT,x,n);
2878  if(logTauY) (*logTauY)=new TSpline3("log(reg.cond)%log(tau)",logT,y,n);
2879  delete[] x;
2880  delete[] y;
2881  delete[] logT;
2882  }
2883 
2884  return bestChoice;
2885 }
2886 
2887 ////////////////////////////////////////////////////////////////////////////////
2888 /// Histogram of truth bins, determined from summing over the response matrix.
2889 ///
2890 /// \param[out] out histogram to store the truth bins. The bin contents
2891 /// are overwritten
2892 /// \param[in] binMap (default=0) array for mapping truth bins to histogram bins
2893 ///
2894 /// This vector is also used to initialize the bias
2895 /// x_{0}. However, the bias vector may be changed using the
2896 /// SetBias() method.
2897 ///
2898 /// The use of <b>binMap</b> is explained with the documentation of
2899 /// the GetOutput() method.
2900 
2901 void TUnfold::GetNormalisationVector(TH1 *out,const Int_t *binMap) const
2902 {
2903 
2904  ClearHistogram(out);
2905  for (Int_t i = 0; i < GetNx(); i++) {
2906  int dest=binMap ? binMap[fXToHist[i]] : fXToHist[i];
2907  if(dest>=0) {
2908  out->SetBinContent(dest, fSumOverY[i] + out->GetBinContent(dest));
2909  }
2910  }
2911 }
2912 
2913 ////////////////////////////////////////////////////////////////////////////////
2914 /// Get bias vector including bias scale.
2915 ///
2916 /// \param[out] out histogram to store the scaled bias vector. The bin
2917 /// contents are overwritten
2918 /// \param[in] binMap (default=0) array for mapping truth bins to histogram bins
2919 ///
2920 /// This method returns the bias vector times scaling factor, f*x_{0}
2921 ///
2922 /// The use of <b>binMap</b> is explained with the documentation of
2923 /// the GetOutput() method
2924 
2925 void TUnfold::GetBias(TH1 *out,const Int_t *binMap) const
2926 {
2927 
2928  ClearHistogram(out);
2929  for (Int_t i = 0; i < GetNx(); i++) {
2930  int dest=binMap ? binMap[fXToHist[i]] : fXToHist[i];
2931  if(dest>=0) {
2932  out->SetBinContent(dest, fBiasScale*(*fX0) (i, 0) +
2933  out->GetBinContent(dest));
2934  }
2935  }
2936 }
2937 
2938 ////////////////////////////////////////////////////////////////////////////////
2939 /// Get unfolding result on detector level.
2940 ///
2941 /// \param[out] out histogram to store the correlation coefficients. The bin
2942 /// contents and errors are overwritten.
2943 /// \param[in] binMap (default=0) array for mapping truth bins to histogram bins
2944 ///
2945 /// This method returns the unfolding output folded by the response
2946 /// matrix, i.e. the vector Ax.
2947 ///
2948 /// The use of <b>binMap</b> is explained with the documentation of
2949 /// the GetOutput() method
2950 
2951 void TUnfold::GetFoldedOutput(TH1 *out,const Int_t *binMap) const
2952 {
2953  ClearHistogram(out);
2954 
2956 
2957  const Int_t *rows_A=fA->GetRowIndexArray();
2958  const Int_t *cols_A=fA->GetColIndexArray();
2959  const Double_t *data_A=fA->GetMatrixArray();
2960  const Int_t *rows_AVxx=AVxx->GetRowIndexArray();
2961  const Int_t *cols_AVxx=AVxx->GetColIndexArray();
2962  const Double_t *data_AVxx=AVxx->GetMatrixArray();
2963 
2964  for (Int_t i = 0; i < GetNy(); i++) {
2965  Int_t destI = binMap ? binMap[i+1] : i+1;
2966  if(destI<0) continue;
2967 
2968  out->SetBinContent(destI, (*fAx) (i, 0)+ out->GetBinContent(destI));
2969  Double_t e2=0.0;
2970  Int_t index_a=rows_A[i];
2971  Int_t index_av=rows_AVxx[i];
2972  while((index_a<rows_A[i+1])&&(index_av<rows_AVxx[i+1])) {
2973  Int_t j_a=cols_A[index_a];
2974  Int_t j_av=cols_AVxx[index_av];
2975  if(j_a<j_av) {
2976  index_a++;
2977  } else if(j_a>j_av) {
2978  index_av++;
2979  } else {
2980  e2 += data_AVxx[index_av] * data_A[index_a];
2981  index_a++;
2982  index_av++;
2983  }
2984  }
2985  out->SetBinError(destI,TMath::Sqrt(e2));
2986  }
2987  DeleteMatrix(&AVxx);
2988 }
2989 
2990 ////////////////////////////////////////////////////////////////////////////////
2991 /// Get matrix of probabilities.
2992 ///
2993 /// \param[out] A two-dimensional histogram to store the
2994 /// probabilities (normalized response matrix). The bin contents are
2995 /// overwritten
2996 /// \param[in] histmap specify axis along which the truth bins are
2997 /// oriented
2998 
3000 {
3001  // retrieve matrix of probabilities
3002  // histmap: on which axis to arrange the input/output vector
3003  // A: histogram to store the probability matrix
3004  const Int_t *rows_A=fA->GetRowIndexArray();
3005  const Int_t *cols_A=fA->GetColIndexArray();
3006  const Double_t *data_A=fA->GetMatrixArray();
3007  for (Int_t iy = 0; iy <fA->GetNrows(); iy++) {
3008  for(Int_t indexA=rows_A[iy];indexA<rows_A[iy+1];indexA++) {
3009  Int_t ix = cols_A[indexA];
3010  Int_t ih=fXToHist[ix];
3011  if (histmap == kHistMapOutputHoriz) {
3012  A->SetBinContent(ih, iy+1,data_A[indexA]);
3013  } else {
3014  A->SetBinContent(iy+1, ih,data_A[indexA]);
3015  }
3016  }
3017  }
3018 }
3019 
3020 ////////////////////////////////////////////////////////////////////////////////
3021 /// Input vector of measurements
3022 ///
3023 /// \param[out] out histogram to store the measurements. Bin content
3024 /// and bin errors are overwrite.
3025 /// \param[in] binMap (default=0) array for mapping truth bins to histogram bins
3026 ///
3027 /// Bins which had an uncertainty of zero in the call to SetInput()
3028 /// may acquire bin contents or bin errors different from the
3029 /// original settings in SetInput().
3030 ///
3031 /// The use of <b>binMap</b> is explained with the documentation of
3032 /// the GetOutput() method
3033 
3034 void TUnfold::GetInput(TH1 *out,const Int_t *binMap) const
3035 {
3036  ClearHistogram(out);
3037 
3038  const Int_t *rows_Vyy=fVyy->GetRowIndexArray();
3039  const Int_t *cols_Vyy=fVyy->GetColIndexArray();
3040  const Double_t *data_Vyy=fVyy->GetMatrixArray();
3041 
3042  for (Int_t i = 0; i < GetNy(); i++) {
3043  Int_t destI=binMap ? binMap[i+1] : i+1;
3044  if(destI<0) continue;
3045 
3046  out->SetBinContent(destI, (*fY) (i, 0)+out->GetBinContent(destI));
3047 
3048  Double_t e=0.0;
3049  for(int index=rows_Vyy[i];index<rows_Vyy[i+1];index++) {
3050  if(cols_Vyy[index]==i) {
3051  e=TMath::Sqrt(data_Vyy[index]);
3052  }
3053  }
3054  out->SetBinError(destI, e);
3055  }
3056 }
3057 
3058 ////////////////////////////////////////////////////////////////////////////////
3059 /// Get inverse of the measurement's covariance matrix.
3060 ///
3061 /// \param[out] out histogram to store the inverted covariance
3062 
3064 {
3065  // calculate the inverse of the contribution to the error matrix
3066  // corresponding to the input data
3067  if(!fVyyInv) {
3068  Int_t rank=0;
3070  // and count number of degrees of freedom
3071  fNdf = rank-GetNpar();
3072 
3073  if(rank<GetNy()-fIgnoredBins) {
3074  Warning("GetInputInverseEmatrix","input covariance matrix has rank %d expect %d",
3075  rank,GetNy());
3076  }
3077  if(fNdf<0) {
3078  Error("GetInputInverseEmatrix","number of parameters %d > %d (rank of input covariance). Problem can not be solved",GetNpar(),rank);
3079  } else if(fNdf==0) {
3080  Warning("GetInputInverseEmatrix","number of parameters %d = input rank %d. Problem is ill posed",GetNpar(),rank);
3081  }
3082  }
3083  if(out) {
3084  // return matrix as histogram
3085  const Int_t *rows_VyyInv=fVyyInv->GetRowIndexArray();
3086  const Int_t *cols_VyyInv=fVyyInv->GetColIndexArray();
3087  const Double_t *data_VyyInv=fVyyInv->GetMatrixArray();
3088 
3089  for(int i=0;i<=out->GetNbinsX()+1;i++) {
3090  for(int j=0;j<=out->GetNbinsY()+1;j++) {
3091  out->SetBinContent(i,j,0.);
3092  }
3093  }
3094 
3095  for (Int_t i = 0; i < fVyyInv->GetNrows(); i++) {
3096  for(int index=rows_VyyInv[i];index<rows_VyyInv[i+1];index++) {
3097  Int_t j=cols_VyyInv[index];
3098  out->SetBinContent(i+1,j+1,data_VyyInv[index]);
3099  }
3100  }
3101  }
3102 }
3103 
3104 ////////////////////////////////////////////////////////////////////////////////
3105 /// Get matrix of regularisation conditions squared.
3106 ///
3107 /// \param[out] out histogram to store the squared matrix of
3108 /// regularisation conditions. the bin contents are overwritten
3109 ///
3110 /// This returns the square matrix L^{T}L as a histogram
3111 ///
3112 /// The histogram should have dimension nx times nx, where nx
3113 /// corresponds to the number of histogram bins in the response matrix
3114 /// along the truth axis.
3115 
3116 void TUnfold::GetLsquared(TH2 *out) const
3117 {
3118  // retrieve matrix of regularisation conditions squared
3119  // out: pre-booked matrix
3120 
3122  // loop over sparse matrix
3123  const Int_t *rows=lSquared->GetRowIndexArray();
3124  const Int_t *cols=lSquared->GetColIndexArray();
3125  const Double_t *data=lSquared->GetMatrixArray();
3126  for (Int_t i = 0; i < GetNx(); i++) {
3127  for (Int_t cindex = rows[i]; cindex < rows[i+1]; cindex++) {
3128  Int_t j=cols[cindex];
3129  out->SetBinContent(fXToHist[i], fXToHist[j],data[cindex]);
3130  }
3131  }
3132  DeleteMatrix(&lSquared);
3133 }
3134 
3135 ////////////////////////////////////////////////////////////////////////////////
3136 /// Get number of regularisation conditions.
3137 ///
3138 /// This returns the number of regularisation conditions, useful for
3139 /// booking a histogram for a subsequent call of GetL().
3140 
3141 Int_t TUnfold::GetNr(void) const {
3142  return fL->GetNrows();
3143 }
3144 
3145 ////////////////////////////////////////////////////////////////////////////////
3146 /// Get matrix of regularisation conditions.
3147 ///
3148 /// \param[out] out histogram to store the regularisation conditions.
3149 /// the bin contents are overwritten
3150 ///
3151 /// The histogram should have dimension nr (x-axis) times nx (y-axis).
3152 /// nr corresponds to the number of regularisation conditions, it can
3153 /// be obtained using the method GetNr(). nx corresponds to the number
3154 /// of histogram bins in the response matrix along the truth axis.
3155 
3156 void TUnfold::GetL(TH2 *out) const
3157 {
3158  // loop over sparse matrix
3159  const Int_t *rows=fL->GetRowIndexArray();
3160  const Int_t *cols=fL->GetColIndexArray();
3161  const Double_t *data=fL->GetMatrixArray();
3162  for (Int_t row = 0; row < GetNr(); row++) {
3163  for (Int_t cindex = rows[row]; cindex < rows[row+1]; cindex++) {
3164  Int_t col=cols[cindex];
3165  Int_t indexH=fXToHist[col];
3166  out->SetBinContent(indexH,row+1,data[cindex]);
3167  }
3168  }
3169 }
3170 
3171 ////////////////////////////////////////////////////////////////////////////////
3172 /// Set type of area constraint.
3173 ///
3174 /// results of a previous unfolding are reset
3175 
3177 {
3178  // set type of constraint for the next unfolding
3179  if(fConstraint !=constraint) ClearResults();
3180  fConstraint=constraint;
3181  Info("SetConstraint","fConstraint=%d",fConstraint);
3182 }
3183 
3184 
3185 ////////////////////////////////////////////////////////////////////////////////
3186 /// Return regularisation parameter.
3187 
3189 {
3190  // return regularisation parameter
3191  return TMath::Sqrt(fTauSquared);
3192 }
3193 
3194 ////////////////////////////////////////////////////////////////////////////////
3195 /// Get \f$ chi^{2}_{L} \f$ contribution determined in recent unfolding.
3196 
3198 {
3199  // return chi**2 contribution from regularisation conditions
3200  return fLXsquared*fTauSquared;
3201 }
3202 
3203 ////////////////////////////////////////////////////////////////////////////////
3204 /// Get number of truth parameters determined in recent unfolding.
3205 ///
3206 /// empty bins of the response matrix or bins which can not be
3207 /// unfolded due to rank deficits are not counted
3208 
3210 {
3211  return GetNx();
3212 }
3213 
3214 ////////////////////////////////////////////////////////////////////////////////
3215 /// Get value on x-axis of L-curve determined in recent unfolding.
3216 ///
3217 /// \f$ x=log_{10}(GetChi2A()) \f$
3218 
3220 {
3221  return TMath::Log10(fChi2A);
3222 }
3223 
3224 ////////////////////////////////////////////////////////////////////////////////
3225 /// Get value on y-axis of L-curve determined in recent unfolding.
3226 ///
3227 /// \f$ y=log_{10}(GetChi2L()) \f$
3228 
3230 {
3231  return TMath::Log10(fLXsquared);
3232 }
3233 
3234 ////////////////////////////////////////////////////////////////////////////////
3235 /// Get output distribution, possibly cumulated over several bins.
3236 ///
3237 /// \param[out] output existing output histogram. content and errors
3238 /// will be updated.
3239 /// \param[in] binMap (default=0) array for mapping truth bins to histogram bins
3240 ///
3241 /// If nonzero, the array <b>binMap</b> must have dimension n+2, where n
3242 /// corresponds to the number of bins on the truth axis of the response
3243 /// matrix (the histogram specified with the TUnfold
3244 /// constructor). The indexes of <b>binMap</b> correspond to the truth
3245 /// bins (including underflow and overflow) of the response matrix.
3246 /// The element binMap[i] specifies the histogram number in
3247 /// <b>output</b> where the corresponding truth bin will be stored. It is
3248 /// possible to specify the same <b>output</b> bin number for multiple
3249 /// indexes, in which case these bins are added. Set binMap[i]=-1 to
3250 /// ignore an unfolded truth bin. The uncertainties are
3251 /// calculated from the corresponding parts of the covariance matrix,
3252 /// properly taking care of added truth bins.
3253 ///
3254 /// If the pointer <b>binMap</b> is zero, the bins are mapped
3255 /// one-to-one. Truth bin zero (underflow) is stored in the
3256 /// <b>output</b> underflow, truth bin 1 is stored in bin number 1, etc.
3257 ///
3258 /// - output: output histogram
3259 /// - binMap: for each bin of the original output distribution
3260 /// specify the destination bin. A value of -1 means that the bin
3261 /// is discarded. 0 means underflow bin, 1 first bin, ...
3262 /// - binMap[0] : destination of underflow bin
3263 /// - binMap[1] : destination of first bin
3264 /// ...
3265 
3266 void TUnfold::GetOutput(TH1 *output,const Int_t *binMap) const
3267 {
3268  ClearHistogram(output);
3269  /* Int_t nbin=output->GetNbinsX();
3270  Double_t *c=new Double_t[nbin+2];
3271  Double_t *e2=new Double_t[nbin+2];
3272  for(Int_t i=0;i<nbin+2;i++) {
3273  c[i]=0.0;
3274  e2[i]=0.0;
3275  } */
3276 
3277  std::map<Int_t,Double_t> e2;
3278 
3279  const Int_t *rows_Vxx=fVxx->GetRowIndexArray();
3280  const Int_t *cols_Vxx=fVxx->GetColIndexArray();
3281  const Double_t *data_Vxx=fVxx->GetMatrixArray();
3282 
3283  Int_t binMapSize = fHistToX.GetSize();
3284  for(Int_t i=0;i<binMapSize;i++) {
3285  Int_t destBinI=binMap ? binMap[i] : i; // histogram bin
3286  Int_t srcBinI=fHistToX[i]; // matrix row index
3287  if((destBinI>=0)&&(srcBinI>=0)) {
3288  output->SetBinContent
3289  (destBinI, (*fX)(srcBinI,0)+ output->GetBinContent(destBinI));
3290  // here we loop over the columns of the error matrix
3291  // j: counts histogram bins
3292  // index: counts sparse matrix index
3293  // the algorithm makes use of the fact that fHistToX is ordered
3294  Int_t j=0; // histogram bin
3295  Int_t index_vxx=rows_Vxx[srcBinI];
3296  //Double_t e2=TMath::Power(output->GetBinError(destBinI),2.);
3297  while((j<binMapSize)&&(index_vxx<rows_Vxx[srcBinI+1])) {
3298  Int_t destBinJ=binMap ? binMap[j] : j; // histogram bin
3299  if(destBinI!=destBinJ) {
3300  // only diagonal elements are calculated
3301  j++;
3302  } else {
3303  Int_t srcBinJ=fHistToX[j]; // matrix column index
3304  if(srcBinJ<0) {
3305  // bin was not unfolded, check next bin
3306  j++;
3307  } else {
3308  if(cols_Vxx[index_vxx]<srcBinJ) {
3309  // index is too small
3310  index_vxx++;
3311  } else if(cols_Vxx[index_vxx]>srcBinJ) {
3312  // index is too large, skip bin
3313  j++;
3314  } else {
3315  // add this bin
3316  e2[destBinI] += data_Vxx[index_vxx];
3317  j++;
3318  index_vxx++;
3319  }
3320  }
3321  }
3322  }
3323  //output->SetBinError(destBinI,TMath::Sqrt(e2));
3324  }
3325  }
3326  for(std::map<Int_t,Double_t>::const_iterator i=e2.begin();
3327  i!=e2.end();i++) {
3328  //cout<<(*i).first<<" "<<(*i).second<<"\n";
3329  output->SetBinError((*i).first,TMath::Sqrt((*i).second));
3330  }
3331 }
3332 
3333 ////////////////////////////////////////////////////////////////////////////////
3334 /// Add up an error matrix, also respecting the bin mapping.
3335 ///
3336 /// \param[inout] ematrix error matrix histogram
3337 /// \param[in] emat error matrix stored with internal mapping (member fXToHist)
3338 /// \param[in] binMap mapping of histogram bins
3339 /// \param[in] doClear if true, ematrix is cleared prior to adding
3340 /// elements of emat to it.
3341 ///
3342 /// the array <b>binMap</b> is explained with the method GetOutput(). The
3343 /// matrix emat must have dimension NxN where N=fXToHist.size()
3344 /// The flag <b>doClear</b> may be used to add covariance matrices from
3345 /// several uncertainty sources.
3346 
3348  const Int_t *binMap,Bool_t doClear) const
3349 {
3350  Int_t nbin=ematrix->GetNbinsX();
3351  if(doClear) {
3352  for(Int_t i=0;i<nbin+2;i++) {
3353  for(Int_t j=0;j<nbin+2;j++) {
3354  ematrix->SetBinContent(i,j,0.0);
3355  ematrix->SetBinError(i,j,0.0);
3356  }
3357  }
3358  }
3359 
3360  if(emat) {
3361  const Int_t *rows_emat=emat->GetRowIndexArray();
3362  const Int_t *cols_emat=emat->GetColIndexArray();
3363  const Double_t *data_emat=emat->GetMatrixArray();
3364 
3365  Int_t binMapSize = fHistToX.GetSize();
3366  for(Int_t i=0;i<binMapSize;i++) {
3367  Int_t destBinI=binMap ? binMap[i] : i;
3368  Int_t srcBinI=fHistToX[i];
3369  if((destBinI>=0)&&(destBinI<nbin+2)&&(srcBinI>=0)) {
3370  // here we loop over the columns of the source matrix
3371  // j: counts histogram bins
3372  // index: counts sparse matrix index
3373  // the algorithm makes use of the fact that fHistToX is ordered
3374  Int_t j=0;
3375  Int_t index_vxx=rows_emat[srcBinI];
3376  while((j<binMapSize)&&(index_vxx<rows_emat[srcBinI+1])) {
3377  Int_t destBinJ=binMap ? binMap[j] : j;
3378  Int_t srcBinJ=fHistToX[j];
3379  if((destBinJ<0)||(destBinJ>=nbin+2)||(srcBinJ<0)) {
3380  // check next bin
3381  j++;
3382  } else {
3383  if(cols_emat[index_vxx]<srcBinJ) {
3384  // index is too small
3385  index_vxx++;
3386  } else if(cols_emat[index_vxx]>srcBinJ) {
3387  // index is too large, skip bin
3388  j++;
3389  } else {
3390  // add this bin
3391  Double_t e2= ematrix->GetBinContent(destBinI,destBinJ)
3392  + data_emat[index_vxx];
3393  ematrix->SetBinContent(destBinI,destBinJ,e2);
3394  j++;
3395  index_vxx++;
3396  }
3397  }
3398  }
3399  }
3400  }
3401  }
3402 }
3403 
3404 ////////////////////////////////////////////////////////////////////////////////
3405 /// Get output covariance matrix, possibly cumulated over several bins.
3406 ///
3407 /// \param[out] ematrix histogram to store the covariance. The bin
3408 /// contents are overwritten.
3409 /// \param[in] binMap (default=0) array for mapping truth bins to histogram bins
3410 ///
3411 /// The use of <b>binMap</b> is explained with the documentation of
3412 /// the GetOutput() method
3413 
3414 void TUnfold::GetEmatrix(TH2 *ematrix,const Int_t *binMap) const
3415 {
3416  ErrorMatrixToHist(ematrix,fVxx,binMap,kTRUE);
3417 }
3418 
3419 ////////////////////////////////////////////////////////////////////////////////
3420 /// Get correlation coefficients, possibly cumulated over several bins.
3421 ///
3422 /// \param[out] rhoij histogram to store the correlation coefficients. The bin
3423 /// contents are overwritten.
3424 /// \param[in] binMap (default=0) array for mapping truth bins to histogram bins
3425 ///
3426 /// The use of <b>binMap</b> is explained with the documentation of
3427 /// the GetOutput() method
3428 
3429 void TUnfold::GetRhoIJ(TH2 *rhoij,const Int_t *binMap) const
3430 {
3431  GetEmatrix(rhoij,binMap);
3432  Int_t nbin=rhoij->GetNbinsX();
3433  Double_t *e=new Double_t[nbin+2];
3434  for(Int_t i=0;i<nbin+2;i++) {
3435  e[i]=TMath::Sqrt(rhoij->GetBinContent(i,i));
3436  }
3437  for(Int_t i=0;i<nbin+2;i++) {
3438  for(Int_t j=0;j<nbin+2;j++) {
3439  if((e[i]>0.0)&&(e[j]>0.0)) {
3440  rhoij->SetBinContent(i,j,rhoij->GetBinContent(i,j)/e[i]/e[j]);
3441  } else {
3442  rhoij->SetBinContent(i,j,0.0);
3443  }
3444  }
3445  }
3446  delete[] e;
3447 }
3448 
3449 ////////////////////////////////////////////////////////////////////////////////
3450 /// Get global correlation coefficients, possibly cumulated over several bins.
3451 ///
3452 /// \param[out] rhoi histogram to store the global correlation
3453 /// coefficients. The bin contents are overwritten.
3454 /// \param[in] binMap (default=0) array for mapping truth bins to
3455 /// histogram bins
3456 /// \param[out] invEmat (default=0) histogram to store the inverted
3457 /// covariance matrix
3458 ///
3459 /// for a given bin, the global correlation coefficient is defined
3460 /// as \f$ \rho_{i} = \sqrt{1-\frac{1}{(V_{ii}*V^{-1}_{ii})}} \f$
3461 ///
3462 /// such that the calculation of global correlation coefficients
3463 /// possibly involves the inversion of a covariance matrix.
3464 ///
3465 /// return value: maximum global correlation coefficient
3466 ///
3467 /// The use of <b>binMap</b> is explained with the documentation of
3468 /// the GetOutput() method
3469 
3470 Double_t TUnfold::GetRhoI(TH1 *rhoi,const Int_t *binMap,TH2 *invEmat) const
3471 {
3472  ClearHistogram(rhoi,-1.);
3473 
3474  if(binMap) {
3475  // if there is a bin map, the matrix needs to be inverted
3476  // otherwise, use the existing inverse, such that
3477  // no matrix inversion is needed
3478  return GetRhoIFromMatrix(rhoi,fVxx,binMap,invEmat);
3479  } else {
3480  Double_t rhoMax=0.0;
3481 
3482  const Int_t *rows_Vxx=fVxx->GetRowIndexArray();
3483  const Int_t *cols_Vxx=fVxx->GetColIndexArray();
3484  const Double_t *data_Vxx=fVxx->GetMatrixArray();
3485 
3486  const Int_t *rows_VxxInv=fVxxInv->GetRowIndexArray();
3487  const Int_t *cols_VxxInv=fVxxInv->GetColIndexArray();
3488  const Double_t *data_VxxInv=fVxxInv->GetMatrixArray();
3489 
3490  for(Int_t i=0;i<GetNx();i++) {
3491  Int_t destI=fXToHist[i];
3492  Double_t e_ii=0.0,einv_ii=0.0;
3493  for(int index_vxx=rows_Vxx[i];index_vxx<rows_Vxx[i+1];index_vxx++) {
3494  if(cols_Vxx[index_vxx]==i) {
3495  e_ii=data_Vxx[index_vxx];
3496  break;
3497  }
3498  }
3499  for(int index_vxxinv=rows_VxxInv[i];index_vxxinv<rows_VxxInv[i+1];
3500  index_vxxinv++) {
3501  if(cols_VxxInv[index_vxxinv]==i) {
3502  einv_ii=data_VxxInv[index_vxxinv];
3503  break;
3504  }
3505  }
3506  Double_t rho=1.0;
3507  if((einv_ii>0.0)&&(e_ii>0.0)) rho=1.-1./(einv_ii*e_ii);
3508  if(rho>=0.0) rho=TMath::Sqrt(rho);
3509  else rho=-TMath::Sqrt(-rho);
3510  if(rho>rhoMax) {
3511  rhoMax = rho;
3512  }
3513  rhoi->SetBinContent(destI,rho);
3514  }
3515  return rhoMax;
3516  }
3517 }
3518 
3519 ////////////////////////////////////////////////////////////////////////////////
3520 /// Get global correlation coefficients with arbitrary min map.
3521 ///
3522 /// - rhoi: global correlation histogram
3523 /// - emat: error matrix
3524 /// - binMap: for each bin of the original output distribution
3525 /// specify the destination bin. A value of -1 means that the bin
3526 /// is discarded. 0 means underflow bin, 1 first bin, ...
3527 /// - binMap[0] : destination of underflow bin
3528 /// - binMap[1] : destination of first bin
3529 /// ...
3530 /// return value: maximum global correlation
3531 
3533  const Int_t *binMap,TH2 *invEmat) const
3534 {
3535  Double_t rhoMax=0.;
3536  // original number of bins:
3537  // fHistToX.GetSize()
3538  // loop over binMap and find number of used bins
3539 
3540  Int_t binMapSize = fHistToX.GetSize();
3541 
3542  // histToLocalBin[iBin] points to a local index
3543  // only bins iBin of the histogram rhoi whih are referenced
3544  // in the bin map have a local index
3545  std::map<int,int> histToLocalBin;
3546  Int_t nBin=0;
3547  for(Int_t i=0;i<binMapSize;i++) {
3548  Int_t mapped_i=binMap ? binMap[i] : i;
3549  if(mapped_i>=0) {
3550  if(histToLocalBin.find(mapped_i)==histToLocalBin.end()) {
3551  histToLocalBin[mapped_i]=nBin;
3552  nBin++;
3553  }
3554  }
3555  }
3556  // number of local indices: nBin
3557  if(nBin>0) {
3558  // construct inverse mapping function
3559  // local index -> histogram bin
3560  Int_t *localBinToHist=new Int_t[nBin];
3561  for(std::map<int,int>::const_iterator i=histToLocalBin.begin();
3562  i!=histToLocalBin.end();i++) {
3563  localBinToHist[(*i).second]=(*i).first;
3564  }
3565 
3566  const Int_t *rows_eOrig=eOrig->GetRowIndexArray();
3567  const Int_t *cols_eOrig=eOrig->GetColIndexArray();
3568  const Double_t *data_eOrig=eOrig->GetMatrixArray();
3569 
3570  // remap error matrix
3571  // matrix row i -> origI (fXToHist[i])
3572  // origI -> destI (binMap)
3573  // destI -> ematBinI (histToLocalBin)
3574  TMatrixD eRemap(nBin,nBin);
3575  // i: row of the matrix eOrig
3576  for(Int_t i=0;i<GetNx();i++) {
3577  // origI: pointer in output histogram with all bins
3578  Int_t origI=fXToHist[i];
3579  // destI: pointer in the histogram rhoi
3580  Int_t destI=binMap ? binMap[origI] : origI;
3581  if(destI<0) continue;
3582  Int_t ematBinI=histToLocalBin[destI];
3583  for(int index_eOrig=rows_eOrig[i];index_eOrig<rows_eOrig[i+1];
3584  index_eOrig++) {
3585  // j: column of the matrix fVxx
3586  Int_t j=cols_eOrig[index_eOrig];
3587  // origJ: pointer in output histogram with all bins
3588  Int_t origJ=fXToHist[j];
3589  // destJ: pointer in the histogram rhoi
3590  Int_t destJ=binMap ? binMap[origJ] : origJ;
3591  if(destJ<0) continue;
3592  Int_t ematBinJ=histToLocalBin[destJ];
3593  eRemap(ematBinI,ematBinJ) += data_eOrig[index_eOrig];
3594  }
3595  }
3596  // invert remapped error matrix
3597  TMatrixDSparse eSparse(eRemap);
3598  Int_t rank=0;
3599  TMatrixDSparse *einvSparse=InvertMSparseSymmPos(&eSparse,&rank);
3600  if(rank!=einvSparse->GetNrows()) {
3601  Warning("GetRhoIFormMatrix","Covariance matrix has rank %d expect %d",
3602  rank,einvSparse->GetNrows());
3603  }
3604  // fill to histogram
3605  const Int_t *rows_eInv=einvSparse->GetRowIndexArray();
3606  const Int_t *cols_eInv=einvSparse->GetColIndexArray();
3607  const Double_t *data_eInv=einvSparse->GetMatrixArray();
3608 
3609  for(Int_t i=0;i<einvSparse->GetNrows();i++) {
3610  Double_t e_ii=eRemap(i,i);
3611  Double_t einv_ii=0.0;
3612  for(Int_t index_einv=rows_eInv[i];index_einv<rows_eInv[i+1];
3613  index_einv++) {
3614  Int_t j=cols_eInv[index_einv];
3615  if(j==i) {
3616  einv_ii=data_eInv[index_einv];
3617  }
3618  if(invEmat) {
3619  invEmat->SetBinContent(localBinToHist[i],localBinToHist[j],
3620  data_eInv[index_einv]);
3621  }
3622  }
3623  Double_t rho=1.0;
3624  if((einv_ii>0.0)&&(e_ii>0.0)) rho=1.-1./(einv_ii*e_ii);
3625  if(rho>=0.0) rho=TMath::Sqrt(rho);
3626  else rho=-TMath::Sqrt(-rho);
3627  if(rho>rhoMax) {
3628  rhoMax = rho;
3629  }
3630  //std::cout<<i<<" "<<localBinToHist[i]<<" "<<rho<<"\n";
3631  rhoi->SetBinContent(localBinToHist[i],rho);
3632  }
3633 
3634  DeleteMatrix(&einvSparse);
3635  delete [] localBinToHist;
3636  }
3637  return rhoMax;
3638 }
3639 
3640 ////////////////////////////////////////////////////////////////////////////////
3641 /// Initialize bin contents and bin errors for a given histogram.
3642 ///
3643 /// \param[out] h histogram
3644 /// \param[in] x new histogram content
3645 ///
3646 /// all histgram errors are set to zero, all contents are set to <b>x</b>
3647 
3649 {
3650  Int_t nxyz[3];
3651  nxyz[0]=h->GetNbinsX()+1;
3652  nxyz[1]=h->GetNbinsY()+1;
3653  nxyz[2]=h->GetNbinsZ()+1;
3654  for(int i=h->GetDimension();i<3;i++) nxyz[i]=0;
3655  Int_t ixyz[3];
3656  for(int i=0;i<3;i++) ixyz[i]=0;
3657  while((ixyz[0]<=nxyz[0])&&
3658  (ixyz[1]<=nxyz[1])&&
3659  (ixyz[2]<=nxyz[2])){
3660  Int_t ibin=h->GetBin(ixyz[0],ixyz[1],ixyz[2]);
3661  h->SetBinContent(ibin,x);
3662  h->SetBinError(ibin,0.0);
3663  for(Int_t i=0;i<3;i++) {
3664  ixyz[i] += 1;
3665  if(ixyz[i]<=nxyz[i]) break;
3666  if(i<2) ixyz[i]=0;
3667  }
3668  }
3669 }
3670 
3672  // set accuracy for matrix inversion
3673  if((eps>0.0)&&(eps<1.0)) fEpsMatrix=eps;
3674 }
3675 
3676 ////////////////////////////////////////////////////////////////////////////////
3677 /// Return a string describing the TUnfold version.
3678 ///
3679 /// The version is reported in the form Vmajor.minor
3680 /// Changes of the minor version number typically correspond to
3681 /// bug-fixes. Changes of the major version may result in adding or
3682 /// removing data attributes, such that the streamer methods are not
3683 /// compatible between different major versions.
3684 
3686 {
3687  return TUnfold_VERSION;
3688 }
3689 
const int nx
Definition: kalman.C:16
void GetNormalisationVector(TH1 *s, const Int_t *binMap=0) const
Histogram of truth bins, determined from summing over the response matrix.
Definition: TUnfold.cxx:2901
virtual const Element * GetMatrixArray() const
void SetBias(const TH1 *bias)
Set bias vector.
Definition: TUnfold.cxx:1915
EConstraint fConstraint
type of constraint to use for the unfolding
Definition: TUnfold.h:171
static double B[]
static long int sum(long int i)
Definition: Factory.cxx:2162
TMatrixDSparse * InvertMSparseSymmPos(const TMatrixDSparse *A, Int_t *rank) const
Get the inverse or pseudo-inverse of a positive, sparse matrix.
Definition: TUnfold.cxx:1011
virtual void Info(const char *method, const char *msgfmt,...) const
Issue info message.
Definition: TObject.cxx:847
static void DeleteMatrix(TMatrixD **m)
delete matrix and invalidate pointer
Definition: TUnfold.cxx:195
Int_t GetNdf(void) const
get number of degrees of freedom determined in recent unfolding
Definition: TUnfold.h:324
TUnfold(void)
Only for use by root streamer or derived classes.
Definition: TUnfold.cxx:248
virtual const Int_t * GetRowIndexArray() const
virtual Int_t ScanLcurve(Int_t nPoint, Double_t tauMin, Double_t tauMax, TGraph **lCurve, TSpline **logTauX=0, TSpline **logTauY=0, TSpline **logTauCurvature=0)
Scan the L curve, determine tau and unfold at the final value of tau.
Definition: TUnfold.cxx:2551
void swap(TDirectoryEntry &e1, TDirectoryEntry &e2) noexcept
Int_t fNdf
number of degrees of freedom
Definition: TUnfold.h:198
const Double_t * GetArray() const
Definition: TArrayD.h:43
Double_t GetRhoIFromMatrix(TH1 *rhoi, const TMatrixDSparse *eOrig, const Int_t *binMap, TH2 *invEmat) const
Get global correlation coefficients with arbitrary min map.
Definition: TUnfold.cxx:3532
TArrayI fHistToX
mapping of histogram bins to matrix indices
Definition: TUnfold.h:167
Int_t GetNcols() const
Definition: TMatrixTBase.h:125
TH1 * h
Definition: legend2.C:5
Base class for spline implementation containing the Draw/Paint methods.
Definition: TSpline.h:20
virtual Double_t GetBinContent(Int_t bin) const
Return content of bin number bin.
Definition: TH1.cxx:4639
TMatrixD * fX
unfolding result x
Definition: TUnfold.h:180
virtual Int_t GetNbinsZ() const
Definition: TH1.h:279
no regularisation, or defined later by RegularizeXXX() methods
Definition: TUnfold.h:123
Basic string class.
Definition: TString.h:129
virtual TMatrixTBase< Element > & SetMatrixArray(const Element *data, Option_t *="")
Copy array data to matrix .
Short_t Min(Short_t a, Short_t b)
Definition: TMathBase.h:168
int Int_t
Definition: RtypesCore.h:41
bool Bool_t
Definition: RtypesCore.h:59
TArc * a
Definition: textangle.C:12
TMatrixDSparse * fDXDtauSquared
derivative of the result wrt tau squared
Definition: TUnfold.h:204
Int_t RegularizeBins2D(int start_bin, int step1, int nbin1, int step2, int nbin2, ERegMode regmode)
Add regularisation conditions for 2d unfolding.
Definition: TUnfold.cxx:2225
TMatrixDSparse * MultiplyMSparseMSparse(const TMatrixDSparse *a, const TMatrixDSparse *b) const
Multiply two sparse matrices.
Definition: TUnfold.cxx:617
TMatrixDSparse * MultiplyMSparseM(const TMatrixDSparse *a, const TMatrixD *b) const
Multiply sparse matrix and a non-sparse matrix.
Definition: TUnfold.cxx:776
TMatrixDSparse * MultiplyMSparseMSparseTranspVector(const TMatrixDSparse *m1, const TMatrixDSparse *m2, const TMatrixTBase< Double_t > *v) const
Calculate a sparse matrix product where the diagonal matrix V is given by a vector.
Definition: TUnfold.cxx:836
TMatrixD * fX0
bias vector x0
Definition: TUnfold.h:161
void GetInputInverseEmatrix(TH2 *ematrix)
Get inverse of the measurement&#39;s covariance matrix.
Definition: TUnfold.cxx:3063
TMatrixDSparse * CreateSparseMatrix(Int_t nrow, Int_t ncol, Int_t nele, Int_t *row, Int_t *col, Double_t *data) const
Create a sparse matrix, given the nonzero elements.
Definition: TUnfold.cxx:592
TMatrixDSparse * fDXDAZ[2]
vector contribution to the of derivative dx_k/dA_ij
Definition: TUnfold.h:202
static double A[]
Short_t Abs(Short_t d)
Definition: TMathBase.h:108
#define G(x, y, z)
LongDouble_t Power(LongDouble_t x, LongDouble_t y)
Definition: TMath.h:628
TMatrixDSymEigen.
Class to create third splines to interpolate knots Arbitrary conditions can be introduced for first a...
Definition: TSpline.h:191
TMatrixT.
Definition: TMatrixDfwd.h:22
TMatrixDSparse * fL
regularisation conditions L
Definition: TUnfold.h:153
Double_t GetRhoI(TH1 *rhoi, const Int_t *binMap=0, TH2 *invEmat=0) const
Get global correlation coefficients, possibly cumulated over several bins.
Definition: TUnfold.cxx:3470
virtual Int_t GetDimension() const
Definition: TH1.h:263
Bool_t AddRegularisationCondition(Int_t i0, Double_t f0, Int_t i1=-1, Double_t f1=0., Int_t i2=-1, Double_t f2=0.)
Add a row of regularisation conditions to the matrix L.
Definition: TUnfold.cxx:1939
Double_t x[n]
Definition: legend1.C:17
ERegMode fRegMode
type of regularisation
Definition: TUnfold.h:173
static TString Format(const char *fmt,...)
Static method which formats a string using a printf style format descriptor and return a TString...
Definition: TString.cxx:2345
truth level on x-axis of the response matrix
Definition: TUnfold.h:143
you should not use this method at all Int_t Int_t Double_t Double_t Double_t Int_t Double_t Double_t Double_t tau
Definition: TRolke.cxx:630
TMatrixDSparse * fDXDAM[2]
matrix contribution to the of derivative dx_k/dA_ij
Definition: TUnfold.h:200
TArrayD fSumOverY
truth vector calculated from the non-normalized response matrix
Definition: TUnfold.h:169
regularise the amplitude of the output distribution
Definition: TUnfold.h:126
const int ny
Definition: kalman.C:17
EHistMap
arrangement of axes for the response matrix (TH2 histogram)
Definition: TUnfold.h:140
EConstraint
type of extra constraint
Definition: TUnfold.h:110
Double_t Log10(Double_t x)
Definition: TMath.h:652
virtual Double_t GetLcurveY(void) const
Get value on y-axis of L-curve determined in recent unfolding.
Definition: TUnfold.cxx:3229
TMatrixDSparse * MultiplyMSparseTranspMSparse(const TMatrixDSparse *a, const TMatrixDSparse *b) const
Multiply a transposed Sparse matrix with another sparse matrix,.
Definition: TUnfold.cxx:693
Int_t RegularizeCurvature(int left_bin, int center_bin, int right_bin, Double_t scale_left=1.0, Double_t scale_right=1.0)
Add a regularisation condition on the curvature of three truth bin.
Definition: TUnfold.cxx:2120
Int_t Finite(Double_t x)
Definition: TMath.h:655
TArrayI fXToHist
mapping of matrix indices to histogram bins
Definition: TUnfold.h:165
Double_t fChi2A
chi**2 contribution from (y-Ax)Vyy-1(y-Ax)
Definition: TUnfold.h:190
void Set(Int_t n)
Set size of this array to n ints.
Definition: TArrayI.cxx:105
void SetEpsMatrix(Double_t eps)
set numerical accuracy for Eigenvalue analysis when inverting matrices with rank problems ...
Definition: TUnfold.cxx:3671
virtual void SetBinError(Int_t bin, Double_t error)
See convention for numbering bins in TH1::GetBin.
Definition: TH1.cxx:8311
TMatrixTSparse< Double_t > TMatrixDSparse
ERegMode
choice of regularisation scheme
Definition: TUnfold.h:120
TMatrixT< Double_t > TMatrixD
Definition: TMatrixDfwd.h:22
#define F(x, y, z)
TMatrixDSparse * fVxxInv
inverse of covariance matrix Vxx-1
Definition: TUnfold.h:184
void GetBias(TH1 *bias, const Int_t *binMap=0) const
Get bias vector including bias scale.
Definition: TUnfold.cxx:2925
Double_t fRhoMax
maximum global correlation coefficient
Definition: TUnfold.h:194
TMatrixDSparse * fAx
result x folded back A*x
Definition: TUnfold.h:188
virtual Int_t SetInput(const TH1 *hist_y, Double_t scaleBias=0.0, Double_t oneOverZeroError=0.0, const TH2 *hist_vyy=0, const TH2 *hist_vyy_inv=0)
Define input data for subsequent calls to DoUnfold(tau).
Definition: TUnfold.cxx:2303
TMatrixDSparse * fVyyInv
inverse of the input covariance matrix Vyy-1
Definition: TUnfold.h:186
virtual TString GetOutputBinName(Int_t iBinX) const
Get bin name of an output bin.
Definition: TUnfold.cxx:1687
virtual Double_t DoUnfold(void)
Core unfolding algorithm.
Definition: TUnfold.cxx:290
TMatrixDSparse * fDXDY
derivative of the result wrt dx/dy
Definition: TUnfold.h:206
TRandom2 r(17)
Service class for 2-Dim histogram classes.
Definition: TH2.h:30
Int_t GetSize() const
Definition: TArray.h:47
SVector< double, 2 > v
Definition: Dict.h:5
XPoint xy[kMAXMK]
Definition: TGX11.cxx:122
void GetLsquared(TH2 *lsquared) const
Get matrix of regularisation conditions squared.
Definition: TUnfold.cxx:3116
TMatrixTSparse.
Double_t fBiasScale
scale factor for the bias
Definition: TUnfold.h:159
virtual void SetBinContent(Int_t bin, Double_t content)
Set bin content see convention for numbering bins in TH1::GetBin In case the bin number is greater th...
Definition: TH1.cxx:8325
void ClearHistogram(TH1 *h, Double_t x=0.) const
Initialize bin contents and bin errors for a given histogram.
Definition: TUnfold.cxx:3648
virtual Double_t GetLcurveX(void) const
Get value on x-axis of L-curve determined in recent unfolding.
Definition: TUnfold.cxx:3219
TMarker * m
Definition: textangle.C:8
void InitTUnfold(void)
Initialize data members, for use in constructors.
Definition: TUnfold.cxx:149
virtual void Error(const char *method, const char *msgfmt,...) const
Issue error message.
Definition: TObject.cxx:873
static const char * GetTUnfoldVersion(void)
Return a string describing the TUnfold version.
Definition: TUnfold.cxx:3685
TMatrixD * fY
input (measured) data y
Definition: TUnfold.h:155
regularize the 1st derivative of the output distribution
Definition: TUnfold.h:129
virtual Int_t GetBin(Int_t binx, Int_t biny=0, Int_t binz=0) const
Return Global bin number corresponding to binx,y,z.
Definition: TH1.cxx:4541
const TMatrixD & GetEigenVectors() const
Double_t fTauSquared
regularisation parameter tau squared
Definition: TUnfold.h:163
Linear Algebra Package.
REAL epsilon
Definition: triangle.c:617
constexpr Double_t E()
Definition: TMath.h:74
TMatrixDSparse * fVyy
covariance matrix Vyy corresponding to y
Definition: TUnfold.h:157
virtual const Int_t * GetColIndexArray() const
Int_t GetNrows() const
Definition: TMatrixTBase.h:122
const Bool_t kFALSE
Definition: RtypesCore.h:92
Int_t GetNx(void) const
returns internal number of output (truth) matrix rows
Definition: TUnfold.h:227
const TVectorD & GetEigenValues() const
Int_t GetNr(void) const
Get number of regularisation conditions.
Definition: TUnfold.cxx:3141
mixed regularisation pattern
Definition: TUnfold.h:136
#define ClassImp(name)
Definition: Rtypes.h:336
double f(double x)
double Double_t
Definition: RtypesCore.h:55
TMatrixDSparse * fEinv
matrix E^(-1)
Definition: TUnfold.h:208
virtual void ClearResults(void)
Reset all results.
Definition: TUnfold.cxx:218
void GetProbabilityMatrix(TH2 *A, EHistMap histmap) const
Get matrix of probabilities.
Definition: TUnfold.cxx:2999
Double_t fRhoAvg
average global correlation coefficient
Definition: TUnfold.h:196
void SetConstraint(EConstraint constraint)
Set type of area constraint.
Definition: TUnfold.cxx:3176
Double_t fLXsquared
chi**2 contribution from (x-s*x0)TLTL(x-s*x0)
Definition: TUnfold.h:192
void GetFoldedOutput(TH1 *folded, const Int_t *binMap=0) const
Get unfolding result on detector level.
Definition: TUnfold.cxx:2951
Double_t y[n]
Definition: legend1.C:17
The TH1 histogram class.
Definition: TH1.h:56
you should not use this method at all Int_t Int_t Double_t Double_t Double_t e
Definition: TRolke.cxx:630
void GetInput(TH1 *inputData, const Int_t *binMap=0) const
Input vector of measurements.
Definition: TUnfold.cxx:3034
Int_t RegularizeBins(int start, int step, int nbin, ERegMode regmode)
Add regularisation conditions for a group of bins.
Definition: TUnfold.cxx:2164
you should not use this method at all Int_t Int_t z
Definition: TRolke.cxx:630
use no extra constraint
Definition: TUnfold.h:113
TMatrixDSparse * fA
response matrix A
Definition: TUnfold.h:151
void GetCoeff(Int_t i, Double_t &x, Double_t &y, Double_t &b, Double_t &c, Double_t &d)
Definition: TSpline.h:231
void GetRhoIJ(TH2 *rhoij, const Int_t *binMap=0) const
Get correlation coefficients, possibly cumulated over several bins.
Definition: TUnfold.cxx:3429
Int_t fIgnoredBins
number of input bins which are dropped because they have error=0
Definition: TUnfold.h:176
Int_t RegularizeDerivative(int left_bin, int right_bin, Double_t scale=1.0)
Add a regularisation condition on the difference of two truth bin.
Definition: TUnfold.cxx:2081
An algorithm to unfold distributions from detector to truth level.
Definition: TUnfold.h:104
double f2(const double *x)
#define dest(otri, vertexptr)
Definition: triangle.c:1040
Short_t Max(Short_t a, Short_t b)
Definition: TMathBase.h:200
Double_t fEpsMatrix
machine accuracy used to determine matrix rank after eigenvalue analysis
Definition: TUnfold.h:178
void GetOutput(TH1 *output, const Int_t *binMap=0) const
Get output distribution, possibly cumulated over several bins.
Definition: TUnfold.cxx:3266
TMatrixDSparse * fE
matrix E
Definition: TUnfold.h:210
A Graph is a graphics object made of two arrays X and Y with npoints each.
Definition: TGraph.h:41
TMatrixDSparse * fVxx
covariance matrix Vxx
Definition: TUnfold.h:182
void ErrorMatrixToHist(TH2 *ematrix, const TMatrixDSparse *emat, const Int_t *binMap, Bool_t doClear) const
Add up an error matrix, also respecting the bin mapping.
Definition: TUnfold.cxx:3347
TF1 * f1
Definition: legend1.C:11
you should not use this method at all Int_t Int_t Double_t Double_t Double_t Int_t Double_t Double_t Double_t Double_t b
Definition: TRolke.cxx:630
Int_t GetNy(void) const
returns the number of measurement bins
Definition: TUnfold.h:235
virtual Double_t GetBinContent(Int_t bin) const
Return content of bin number bin.
Definition: TH2.h:84
void AddMSparse(TMatrixDSparse *dest, Double_t f, const TMatrixDSparse *src) const
Add a sparse matrix, scaled by a factor, to another scaled matrix.
Definition: TUnfold.cxx:933
Double_t GetChi2L(void) const
Get contribution determined in recent unfolding.
Definition: TUnfold.cxx:3197
virtual void SetBinContent(Int_t bin, Double_t content)
Set bin content.
Definition: TH2.cxx:2471
void GetL(TH2 *l) const
Get matrix of regularisation conditions.
Definition: TUnfold.cxx:3156
Definition: first.py:1
const int nn
virtual Int_t GetNbinsX() const
Definition: TH1.h:277
Double_t Sqrt(Double_t x)
Definition: TMath.h:591
virtual void Fatal(const char *method, const char *msgfmt,...) const
Issue fatal error message.
Definition: TObject.cxx:901
regularize the 2nd derivative of the output distribution
Definition: TUnfold.h:132
float * q
Definition: THbookFile.cxx:87
const Bool_t kTRUE
Definition: RtypesCore.h:91
Double_t Eval(Double_t x) const
Eval this spline at x.
Definition: TSpline.cxx:782
Int_t RegularizeSize(int bin, Double_t scale=1.0)
Add a regularisation condition on the magnitude of a truth bin.
Definition: TUnfold.cxx:2047
Int_t GetNpar(void) const
Get number of truth parameters determined in recent unfolding.
Definition: TUnfold.cxx:3209
void Set(Int_t n)
Set size of this array to n doubles.
Definition: TArrayD.cxx:105
const Int_t n
Definition: legend1.C:16
void GetEmatrix(TH2 *ematrix, const Int_t *binMap=0) const
Get output covariance matrix, possibly cumulated over several bins.
Definition: TUnfold.cxx:3414
TRandom3 R
a TMatrixD.
Definition: testIO.cxx:28
Double_t GetTau(void) const
Return regularisation parameter.
Definition: TUnfold.cxx:3188
virtual void Warning(const char *method, const char *msgfmt,...) const
Issue warning message.
Definition: TObject.cxx:859
virtual Int_t GetNbinsY() const
Definition: TH1.h:278
virtual Double_t GetBinError(Int_t bin) const
Return value of error associated to bin number bin.
Definition: TH1.cxx:8175
#define TUnfold_VERSION
Definition: TUnfold.h:100