Nektar++
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
CommMpi.cpp
Go to the documentation of this file.
1 ///////////////////////////////////////////////////////////////////////////////
2 //
3 // File CommMpi.cpp
4 //
5 // For more information, please see: http://www.nektar.info
6 //
7 // The MIT License
8 //
9 // Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10 // Department of Aeronautics, Imperial College London (UK), and Scientific
11 // Computing and Imaging Institute, University of Utah (USA).
12 //
13 // License for the specific language governing rights and limitations under
14 // Permission is hereby granted, free of charge, to any person obtaining a
15 // copy of this software and associated documentation files (the "Software"),
16 // to deal in the Software without restriction, including without limitation
17 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 // and/or sell copies of the Software, and to permit persons to whom the
19 // Software is furnished to do so, subject to the following conditions:
20 //
21 // The above copyright notice and this permission notice shall be included
22 // in all copies or substantial portions of the Software.
23 //
24 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
25 // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
30 // DEALINGS IN THE SOFTWARE.
31 //
32 // Description: MPI communication implementation
33 //
34 ///////////////////////////////////////////////////////////////////////////////
35 
36 #ifdef NEKTAR_USING_PETSC
37 #include "petscsys.h"
38 #endif
39 
42 
43 namespace Nektar
44 {
45 namespace LibUtilities
46 {
48  "ParallelMPI", CommMpi::create, "Parallel communication using MPI.");
49 
50 /**
51  *
52  */
53 CommMpi::CommMpi(int narg, char *arg[]) : Comm(narg, arg)
54 {
55  int init = 0;
56  MPI_Initialized(&init);
57  ASSERTL0(!init, "MPI has already been initialised.");
58 
59  int retval = MPI_Init(&narg, &arg);
60  if (retval != MPI_SUCCESS)
61  {
62  ASSERTL0(false, "Failed to initialise MPI");
63  }
64 
65  m_comm = MPI_COMM_WORLD;
66  MPI_Comm_size(m_comm, &m_size);
67  MPI_Comm_rank(m_comm, &m_rank);
68 
69 #ifdef NEKTAR_USING_PETSC
70  PetscInitializeNoArguments();
71 #endif
72 
73  m_type = "Parallel MPI";
74 }
75 
76 /**
77  *
78  */
79 CommMpi::CommMpi(MPI_Comm pComm) : Comm()
80 {
81  m_comm = pComm;
82  MPI_Comm_size(m_comm, &m_size);
83  MPI_Comm_rank(m_comm, &m_rank);
84 
85  m_type = "Parallel MPI";
86 }
87 
88 /**
89  *
90  */
92 {
93 }
94 
95 /**
96  *
97  */
98 MPI_Comm CommMpi::GetComm()
99 {
100  return m_comm;
101 }
102 
103 /**
104  *
105  */
107 {
108 #ifdef NEKTAR_USING_PETSC
109  PetscFinalize();
110 #endif
111  int flag;
112  MPI_Finalized(&flag);
113  if (!flag)
114  {
115  MPI_Finalize();
116  }
117 }
118 
119 /**
120  *
121  */
123 {
124  return m_rank;
125 }
126 
127 /**
128  *
129  */
131 {
132  if (m_rank == 0)
133  {
134  return true;
135  }
136  else
137  {
138  return false;
139  }
140  return true;
141 }
142 
143 /**
144  *
145  */
147 {
148  MPI_Barrier(m_comm);
149 }
150 
151 /**
152  *
153  */
155 {
156  return MPI_Wtime();
157 }
158 
159 /**
160  *
161  */
162 void CommMpi::v_Send(void *buf, int count, CommDataType dt, int dest)
163 {
164  if (MPISYNC)
165  {
166  MPI_Ssend(buf, count, dt, dest, 0, m_comm);
167  }
168  else
169  {
170  MPI_Send(buf, count, dt, dest, 0, m_comm);
171  }
172 }
173 
174 /**
175  *
176  */
177 void CommMpi::v_Recv(void *buf, int count, CommDataType dt, int source)
178 {
179  MPI_Recv(buf, count, dt, source, 0, m_comm, MPI_STATUS_IGNORE);
180  // ASSERTL0(status.MPI_ERROR == MPI_SUCCESS,
181  // "MPI error receiving data.");
182 }
183 
184 /**
185  *
186  */
187 void CommMpi::v_SendRecv(void *sendbuf, int sendcount, CommDataType sendtype,
188  int dest, void *recvbuf, int recvcount,
189  CommDataType recvtype, int source)
190 {
191  MPI_Status status;
192  int retval = MPI_Sendrecv(sendbuf, sendcount, sendtype, dest, 0, recvbuf,
193  recvcount, recvtype, source, 0, m_comm, &status);
194 
195  ASSERTL0(retval == MPI_SUCCESS,
196  "MPI error performing send-receive of data.");
197 }
198 
199 /**
200 *
201 */
202 void CommMpi::v_SendRecvReplace(void *buf, int count, CommDataType dt,
203  int pSendProc, int pRecvProc)
204 {
205  MPI_Status status;
206  int retval = MPI_Sendrecv_replace(buf, count, dt, pRecvProc, 0, pSendProc,
207  0, m_comm, &status);
208 
209  ASSERTL0(retval == MPI_SUCCESS,
210  "MPI error performing Send-Receive-Replace of data.");
211 }
212 
213 /**
214  *
215  */
216 void CommMpi::v_AllReduce(void *buf, int count, CommDataType dt,
217  enum ReduceOperator pOp)
218 {
219  if (GetSize() == 1)
220  {
221  return;
222  }
223 
224  MPI_Op vOp;
225  switch (pOp)
226  {
227  case ReduceMax:
228  vOp = MPI_MAX;
229  break;
230  case ReduceMin:
231  vOp = MPI_MIN;
232  break;
233  case ReduceSum:
234  default:
235  vOp = MPI_SUM;
236  break;
237  }
238  int retval = MPI_Allreduce(MPI_IN_PLACE, buf, count, dt, vOp, m_comm);
239 
240  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing All-reduce.");
241 }
242 
243 /**
244  *
245  */
246 void CommMpi::v_AlltoAll(void *sendbuf, int sendcount, CommDataType sendtype,
247  void *recvbuf, int recvcount, CommDataType recvtype)
248 {
249  int retval = MPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount,
250  recvtype, m_comm);
251 
252  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing All-to-All.");
253 }
254 
255 /**
256  *
257  */
258 void CommMpi::v_AlltoAllv(void *sendbuf, int sendcounts[], int sdispls[],
259  CommDataType sendtype, void *recvbuf,
260  int recvcounts[], int rdispls[],
261  CommDataType recvtype)
262 {
263  int retval = MPI_Alltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf,
264  recvcounts, rdispls, recvtype, m_comm);
265 
266  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing All-to-All-v.");
267 }
268 
269 void CommMpi::v_Bcast(void *buffer, int count, CommDataType dt, int root)
270 {
271  int retval = MPI_Bcast(buffer, count, dt, root, m_comm);
272  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Bcast-v.");
273 }
274 
276  const enum ReduceOperator pOp,
278 {
279  int n = pData.num_elements();
280  ASSERTL0(n == ans.num_elements(), "Array sizes differ in Exscan");
281 
282  MPI_Op vOp;
283  switch (pOp)
284  {
285  case ReduceMax:
286  vOp = MPI_MAX;
287  break;
288  case ReduceMin:
289  vOp = MPI_MIN;
290  break;
291  case ReduceSum:
292  default:
293  vOp = MPI_SUM;
294  break;
295  }
296 
297  int retval = MPI_Exscan(pData.get(), ans.get(), n, MPI_UNSIGNED_LONG_LONG,
298  vOp, m_comm);
299  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Exscan-v.");
300 }
301 
302 void CommMpi::v_Gather(void *sendbuf, int sendcount, CommDataType sendtype,
303  void *recvbuf, int recvcount, CommDataType recvtype,
304  int root)
305 {
306  int retval = MPI_Gather(sendbuf, sendcount, sendtype, recvbuf, recvcount,
307  recvtype, root, m_comm);
308 
309  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Gather.");
310 }
311 
312 void CommMpi::v_Scatter(void *sendbuf, int sendcount, CommDataType sendtype,
313  void *recvbuf, int recvcount, CommDataType recvtype,
314  int root)
315 {
316  int retval = MPI_Scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount,
317  recvtype, root, m_comm);
318  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Scatter.");
319 }
320 
321 /**
322  * Processes are considered as a grid of size pRows*pColumns. Comm
323  * objects are created corresponding to the rows and columns of this
324  * grid. The row and column to which this process belongs is stored in
325  * #m_commRow and #m_commColumn.
326  */
327 void CommMpi::v_SplitComm(int pRows, int pColumns)
328 {
329  ASSERTL0(pRows * pColumns == m_size,
330  "Rows/Columns do not match comm size.");
331 
332  MPI_Comm newComm;
333 
334  // Compute row and column in grid.
335  int myCol = m_rank % pColumns;
336  int myRow = (m_rank - myCol) / pColumns;
337 
338  // Split Comm into rows - all processes with same myRow are put in
339  // the same communicator. The rank within this communicator is the
340  // column index.
341  MPI_Comm_split(m_comm, myRow, myCol, &newComm);
342  m_commRow = boost::shared_ptr<Comm>(new CommMpi(newComm));
343 
344  // Split Comm into columns - all processes with same myCol are put
345  // in the same communicator. The rank within this communicator is
346  // the row index.
347  MPI_Comm_split(m_comm, myCol, myRow, &newComm);
348  m_commColumn = boost::shared_ptr<Comm>(new CommMpi(newComm));
349 }
350 
351 /**
352  * Create a new communicator if the flag is non-zero.
353  */
355 {
356  MPI_Comm newComm;
357  // color == MPI_UNDEF => not in the new communicator
358  // key == 0 on all => use rank to order them. OpenMPI, at least,
359  // implies this is faster than ordering them ourselves.
360  MPI_Comm_split(m_comm, flag ? 0 : MPI_UNDEFINED, 0, &newComm);
361 
362  if (flag == 0)
363  {
364  // flag == 0 => get back MPI_COMM_NULL, return a null ptr instead.
365  return boost::shared_ptr<Comm>();
366  }
367  else
368  {
369  // Return a real communicator
370  return boost::shared_ptr<Comm>(new CommMpi(newComm));
371  }
372 }
373 }
374 }
virtual void v_SendRecvReplace(void *buf, int count, CommDataType dt, int pSendProc, int pRecvProc)
Definition: CommMpi.cpp:202
#define ASSERTL0(condition, msg)
Definition: ErrorUtil.hpp:198
ReduceOperator
Type of operation to perform in AllReduce.
Definition: Comm.h:67
virtual void v_Recv(void *buf, int count, CommDataType dt, int source)
Definition: CommMpi.cpp:177
CommSharedPtr m_commColumn
Column communicator.
Definition: Comm.h:133
virtual void v_Send(void *buf, int count, CommDataType dt, int dest)
Definition: CommMpi.cpp:162
std::string m_type
Type of communication.
Definition: Comm.h:131
virtual void v_AllReduce(void *buf, int count, CommDataType dt, enum ReduceOperator pOp)
Definition: CommMpi.cpp:216
array buffer
Definition: GsLib.hpp:60
virtual void v_AlltoAllv(void *sendbuf, int sendcounts[], int sensdispls[], CommDataType sendtype, void *recvbuf, int recvcounts[], int rdispls[], CommDataType recvtype)
Definition: CommMpi.cpp:258
CommFactory & GetCommFactory()
Definition: Comm.cpp:61
CommSharedPtr m_commRow
Row communicator.
Definition: Comm.h:132
#define MPISYNC
Definition: CommMpi.h:45
static CommSharedPtr create(int narg, char *arg[])
Creates an instance of this class.
Definition: CommMpi.h:65
virtual CommSharedPtr v_CommCreateIf(int flag)
Definition: CommMpi.cpp:354
boost::shared_ptr< Comm > CommSharedPtr
Pointer to a Communicator object.
Definition: Comm.h:55
virtual void v_SplitComm(int pRows, int pColumns)
Definition: CommMpi.cpp:327
CommMpi(int narg, char *arg[])
Definition: CommMpi.cpp:53
virtual double v_Wtime()
Definition: CommMpi.cpp:154
virtual bool v_TreatAsRankZero(void)
Definition: CommMpi.cpp:130
virtual void v_Scatter(void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype, int root)
Definition: CommMpi.cpp:312
Base communications class.
Definition: Comm.h:75
static std::string className
Name of class.
Definition: CommMpi.h:71
#define dest(otri, vertexptr)
virtual void v_Exscan(Array< OneD, unsigned long long > &pData, const enum ReduceOperator pOp, Array< OneD, unsigned long long > &ans)
Definition: CommMpi.cpp:275
virtual void v_Bcast(void *buffer, int count, CommDataType dt, int root)
Definition: CommMpi.cpp:269
virtual void v_SendRecv(void *sendbuf, int sendcount, CommDataType sendtype, int dest, void *recvbuf, int recvcount, CommDataType recvtype, int source)
Definition: CommMpi.cpp:187
int GetSize()
Returns number of processes.
Definition: Comm.h:188
virtual void v_Gather(void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype, int root)
Definition: CommMpi.cpp:302
int m_size
Number of processes.
Definition: Comm.h:130
virtual void v_AlltoAll(void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype)
Definition: CommMpi.cpp:246
tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, tDescription pDesc="")
Register a class with the factory.
Definition: NekFactory.hpp:215