Logo ROOT  
Reference Guide
MPIProcess.cxx
Go to the documentation of this file.
1// @(#)root/minuit2:$Id$
2// Author: A. Lazzaro 2009
3/***************************************************************************
4 * Package: Minuit2 *
5 * File: $Id$ *
6 * Author: Alfio Lazzaro, alfio.lazzaro@mi.infn.it *
7 * *
8 * Copyright: (C) 2008 by Universita' and INFN, Milan *
9 ***************************************************************************/
10
11#include "Minuit2/MPIProcess.h"
12
13#include <iostream>
14
15namespace ROOT {
16
17namespace Minuit2 {
18
19 unsigned int MPIProcess::fgGlobalSize = 1;
20 unsigned int MPIProcess::fgGlobalRank = 0;
21
22 // By default all procs are for X
23 unsigned int MPIProcess::fgCartSizeX = 0;
24 unsigned int MPIProcess::fgCartSizeY = 0;
25 unsigned int MPIProcess::fgCartDimension = 0;
26 bool MPIProcess::fgNewCart = true;
27
28#ifdef MPIPROC
29 MPI::Intracomm* MPIProcess::fgCommunicator = 0;
30 int MPIProcess::fgIndexComm = -1; // -1 for no-initialization
31 MPI::Intracomm* MPIProcess::fgCommunicators[2] = {0};
32 unsigned int MPIProcess::fgIndecesComm[2] = {0};
33#endif
34
35 MPIProcess::MPIProcess(unsigned int nelements, unsigned int indexComm) :
36 fNelements(nelements), fSize(1), fRank(0)
37 {
38
39 // check local requested index for communicator, valid values are 0 and 1
40 indexComm = (indexComm==0) ? 0 : 1;
41
42#ifdef MPIPROC
43
44 StartMPI();
45
48 //declare the cartesian topology
49
50 if (fgCommunicator==0 && fgIndexComm<0 && fgNewCart) {
51 // first call, declare the topology
52 std::cout << "Info --> MPIProcess::MPIProcess: Declare cartesian Topology ("
53 << fgCartSizeX << "x" << fgCartSizeY << ")" << std::endl;
54
55 int color = fgGlobalRank / fgCartSizeY;
56 int key = fgGlobalRank % fgCartSizeY;
57
58 fgCommunicators[0] = new MPI::Intracomm(MPI::COMM_WORLD.Split(key,color)); // rows for Minuit
59 fgCommunicators[1] = new MPI::Intracomm(MPI::COMM_WORLD.Split(color,key)); // columns for NLL
60
61 fgNewCart = false;
62
63 }
64
65 fgIndexComm++;
66
67 if (fgIndexComm>1 || fgCommunicator==(&(MPI::COMM_WORLD))) { // Remember, no more than 2 dimensions in the topology!
68 std::cerr << "Error --> MPIProcess::MPIProcess: Requiring more than 2 dimensions in the topology!" << std::endl;
69 MPI::COMM_WORLD.Abort(-1);
70 }
71
72 // requiring columns as first call. In this case use all nodes
73 if (((unsigned int)fgIndexComm)<indexComm)
74 fgCommunicator = &(MPI::COMM_WORLD);
75 else {
76 fgIndecesComm[fgIndexComm] = indexComm;
77 fgCommunicator = fgCommunicators[fgIndecesComm[fgIndexComm]];
78 }
79
80 }
81 else {
82 // no cartesian topology
84 std::cout << "Warning --> MPIProcess::MPIProcess: Cartesian dimension doesn't correspond to # total procs!" << std::endl;
85 std::cout << "Warning --> MPIProcess::MPIProcess: Ignoring topology, use all procs for X." << std::endl;
86 std::cout << "Warning --> MPIProcess::MPIProcess: Resetting topology..." << std::endl;
88 fgCartSizeY = 1;
90 }
91
92 if (fgIndexComm<0) {
94 fgCommunicators[0] = &(MPI::COMM_WORLD);
95 fgCommunicators[1] = 0;
96 }
97 else {
98 fgCommunicators[0] = 0;
99 fgCommunicators[1] = &(MPI::COMM_WORLD);
100 }
101 }
102
103 fgIndexComm++;
104
105 if (fgIndexComm>1) { // Remember, no more than 2 nested MPI calls!
106 std::cerr << "Error --> MPIProcess::MPIProcess: More than 2 nested MPI calls!" << std::endl;
107 MPI::COMM_WORLD.Abort(-1);
108 }
109
110 fgIndecesComm[fgIndexComm] = indexComm;
111
112 // require 2 nested communicators
113 if (fgCommunicator!=0 && fgCommunicators[indexComm]!=0) {
114 std::cout << "Warning --> MPIProcess::MPIProcess: Requiring 2 nested MPI calls!" << std::endl;
115 std::cout << "Warning --> MPIProcess::MPIProcess: Ignoring second call." << std::endl;
116 fgIndecesComm[fgIndexComm] = (indexComm==0) ? 1 : 0;
117 }
118
119 fgCommunicator = fgCommunicators[fgIndecesComm[fgIndexComm]];
120
121 }
122
123 // set size and rank
124 if (fgCommunicator!=0) {
125 fSize = fgCommunicator->Get_size();
126 fRank = fgCommunicator->Get_rank();
127 }
128 else {
129 // no MPI calls
130 fSize = 1;
131 fRank = 0;
132 }
133
134
135 if (fSize>fNelements) {
136 std::cerr << "Error --> MPIProcess::MPIProcess: more processors than elements!" << std::endl;
137 MPI::COMM_WORLD.Abort(-1);
138 }
139
140#endif
141
144
145 }
146
148 {
149 // destructor
150#ifdef MPIPROC
151 fgCommunicator = 0;
152 fgIndexComm--;
153 if (fgIndexComm==0)
154 fgCommunicator = fgCommunicators[fgIndecesComm[fgIndexComm]];
155
156#endif
157
158 }
159
161 {
162
163 // In case of just one job, don't need sync, just go
164 if (fSize<2)
165 return false;
166
167 if (mnvector.size()!=fNelements) {
168 std::cerr << "Error --> MPIProcess::SyncVector: # defined elements different from # requested elements!" << std::endl;
169 std::cerr << "Error --> MPIProcess::SyncVector: no MPI syncronization is possible!" << std::endl;
170 exit(-1);
171 }
172
173#ifdef MPIPROC
174 unsigned int numElements4ThisJob = NumElements4Job(fRank);
175 unsigned int startElementIndex = StartElementIndex();
176 unsigned int endElementIndex = EndElementIndex();
177
178 double dvectorJob[numElements4ThisJob];
179 for(unsigned int i = startElementIndex; i<endElementIndex; i++)
180 dvectorJob[i-startElementIndex] = mnvector(i);
181
182 double dvector[fNelements];
183 MPISyncVector(dvectorJob,numElements4ThisJob,dvector);
184
185 for (unsigned int i = 0; i<fNelements; i++) {
186 mnvector(i) = dvector[i];
187 }
188
189 return true;
190
191#else
192
193 std::cerr << "Error --> MPIProcess::SyncVector: no MPI syncronization is possible!" << std::endl;
194 exit(-1);
195
196#endif
197
198 }
199
200
202 {
203
204 // In case of just one job, don't need sync, just go
205 if (fSize<2)
206 return false;
207
208 if (mnmatrix.size()-mnmatrix.Nrow()!=fNelements) {
209 std::cerr << "Error --> MPIProcess::SyncSymMatrixOffDiagonal: # defined elements different from # requested elements!" << std::endl;
210 std::cerr << "Error --> MPIProcess::SyncSymMatrixOffDiagonal: no MPI syncronization is possible!" << std::endl;
211 exit(-1);
212 }
213
214#ifdef MPIPROC
215 unsigned int numElements4ThisJob = NumElements4Job(fRank);
216 unsigned int startElementIndex = StartElementIndex();
217 unsigned int endElementIndex = EndElementIndex();
218 unsigned int nrow = mnmatrix.Nrow();
219
220 unsigned int offsetVect = 0;
221 for (unsigned int i = 0; i<startElementIndex; i++)
222 if ((i+offsetVect)%(nrow-1)==0) offsetVect += (i+offsetVect)/(nrow-1);
223
224 double dvectorJob[numElements4ThisJob];
225 for(unsigned int i = startElementIndex; i<endElementIndex; i++) {
226
227 int x = (i+offsetVect)/(nrow-1);
228 if ((i+offsetVect)%(nrow-1)==0) offsetVect += x;
229 int y = (i+offsetVect)%(nrow-1)+1;
230
231 dvectorJob[i-startElementIndex] = mnmatrix(x,y);
232
233 }
234
235 double dvector[fNelements];
236 MPISyncVector(dvectorJob,numElements4ThisJob,dvector);
237
238 offsetVect = 0;
239 for (unsigned int i = 0; i<fNelements; i++) {
240
241 int x = (i+offsetVect)/(nrow-1);
242 if ((i+offsetVect)%(nrow-1)==0) offsetVect += x;
243 int y = (i+offsetVect)%(nrow-1)+1;
244
245 mnmatrix(x,y) = dvector[i];
246
247 }
248
249 return true;
250
251#else
252
253 std::cerr << "Error --> MPIProcess::SyncMatrix: no MPI syncronization is possible!" << std::endl;
254 exit(-1);
255
256#endif
257
258 }
259
260#ifdef MPIPROC
261 void MPIProcess::MPISyncVector(double *ivector, int svector, double *ovector)
262 {
263 int offsets[fSize];
264 int nconts[fSize];
265 nconts[0] = NumElements4Job(0);
266 offsets[0] = 0;
267 for (unsigned int i = 1; i<fSize; i++) {
268 nconts[i] = NumElements4Job(i);
269 offsets[i] = nconts[i-1] + offsets[i-1];
270 }
271
272 fgCommunicator->Allgatherv(ivector,svector,MPI::DOUBLE,
273 ovector,nconts,offsets,MPI::DOUBLE);
274
275 }
276
277 bool MPIProcess::SetCartDimension(unsigned int dimX, unsigned int dimY)
278 {
279 if (fgCommunicator!=0 || fgIndexComm>=0) {
280 std::cout << "Warning --> MPIProcess::SetCartDimension: MPIProcess already declared! Ignoring command..." << std::endl;
281 return false;
282 }
283 if (dimX*dimY<=0) {
284 std::cout << "Warning --> MPIProcess::SetCartDimension: Invalid topology! Ignoring command..." << std::endl;
285 return false;
286 }
287
288 StartMPI();
289
290 if (fgGlobalSize!=dimX*dimY) {
291 std::cout << "Warning --> MPIProcess::SetCartDimension: Cartesian dimension doesn't correspond to # total procs!" << std::endl;
292 std::cout << "Warning --> MPIProcess::SetCartDimension: Ignoring command..." << std::endl;
293 return false;
294 }
295
296 if (fgCartSizeX!=dimX || fgCartSizeY!=dimY) {
297 fgCartSizeX = dimX; fgCartSizeY = dimY;
299 fgNewCart = true;
300
301 if (fgCommunicators[0]!=0 && fgCommunicators[1]!=0) {
302 delete fgCommunicators[0]; fgCommunicators[0] = 0; fgIndecesComm[0] = 0;
303 delete fgCommunicators[1]; fgCommunicators[1] = 0; fgIndecesComm[1] = 0;
304 }
305 }
306
307 return true;
308
309 }
310
311 bool MPIProcess::SetDoFirstMPICall(bool doFirstMPICall)
312 {
313
314 StartMPI();
315
316 bool ret;
317 if (doFirstMPICall)
319 else
321
322 return ret;
323
324 }
325
326#endif
327
328#ifdef MPIPROC
329 MPITerminate dummyMPITerminate = MPITerminate();
330#endif
331
332} // namespace Minuit2
333
334} // namespace ROOT
size_t fSize
Class describing a symmetric matrix of size n.
Definition: LASymMatrix.h:51
unsigned int Nrow() const
Definition: LASymMatrix.h:239
unsigned int size() const
Definition: LASymMatrix.h:237
unsigned int size() const
Definition: LAVector.h:198
unsigned int fNumElements4JobIn
Definition: MPIProcess.h:129
bool SyncVector(ROOT::Minuit2::MnAlgebraicVector &mnvector)
Definition: MPIProcess.cxx:160
static bool SetCartDimension(unsigned int dimX, unsigned int dimY)
static void StartMPI()
Definition: MPIProcess.h:72
MPIProcess(unsigned int nelements, unsigned int indexComm)
Definition: MPIProcess.cxx:35
static unsigned int fgCartSizeY
Definition: MPIProcess.h:125
unsigned int fNumElements4JobOut
Definition: MPIProcess.h:130
static unsigned int fgGlobalRank
Definition: MPIProcess.h:122
unsigned int NumElements4Job(unsigned int rank) const
Definition: MPIProcess.h:54
unsigned int StartElementIndex() const
Definition: MPIProcess.h:57
bool SyncSymMatrixOffDiagonal(ROOT::Minuit2::MnAlgebraicSymMatrix &mnmatrix)
Definition: MPIProcess.cxx:201
static unsigned int fgGlobalSize
Definition: MPIProcess.h:121
static unsigned int fgCartDimension
Definition: MPIProcess.h:126
static unsigned int fgCartSizeX
Definition: MPIProcess.h:124
static bool SetDoFirstMPICall(bool doFirstMPICall=true)
unsigned int EndElementIndex() const
Definition: MPIProcess.h:61
Double_t y[n]
Definition: legend1.C:17
Double_t x[n]
Definition: legend1.C:17
tbb::task_arena is an alias of tbb::interface7::task_arena, which doesn't allow to forward declare tb...
Definition: StringConv.hxx:21
#define Split(a, ahi, alo)
Definition: triangle.c:4775