Nektar++
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
CommMpi.cpp
Go to the documentation of this file.
1 ///////////////////////////////////////////////////////////////////////////////
2 //
3 // File CommMpi.cpp
4 //
5 // For more information, please see: http://www.nektar.info
6 //
7 // The MIT License
8 //
9 // Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10 // Department of Aeronautics, Imperial College London (UK), and Scientific
11 // Computing and Imaging Institute, University of Utah (USA).
12 //
13 // License for the specific language governing rights and limitations under
14 // Permission is hereby granted, free of charge, to any person obtaining a
15 // copy of this software and associated documentation files (the "Software"),
16 // to deal in the Software without restriction, including without limitation
17 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 // and/or sell copies of the Software, and to permit persons to whom the
19 // Software is furnished to do so, subject to the following conditions:
20 //
21 // The above copyright notice and this permission notice shall be included
22 // in all copies or substantial portions of the Software.
23 //
24 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
25 // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
30 // DEALINGS IN THE SOFTWARE.
31 //
32 // Description: MPI communication implementation
33 //
34 ///////////////////////////////////////////////////////////////////////////////
35 
36 #ifdef NEKTAR_USING_PETSC
37 #include "petscsys.h"
38 #endif
39 
42 
43 namespace Nektar
44 {
45  namespace LibUtilities
46  {
47  std::string CommMpi::className
49  "ParallelMPI",
51  "Parallel communication using MPI.");
52 
53  /**
54  *
55  */
56  CommMpi::CommMpi(int narg, char* arg[])
57  : Comm(narg,arg)
58  {
59  int init = 0;
60  MPI_Initialized(&init);
61  ASSERTL0(!init, "MPI has already been initialised.");
62 
63  int retval = MPI_Init(&narg, &arg);
64  if (retval != MPI_SUCCESS)
65  {
66  ASSERTL0(false, "Failed to initialise MPI");
67  }
68 
69  m_comm = MPI_COMM_WORLD;
70  MPI_Comm_size( m_comm, &m_size );
71  MPI_Comm_rank( m_comm, &m_rank );
72 
73 #ifdef NEKTAR_USING_PETSC
74  PetscInitializeNoArguments();
75 #endif
76 
77  m_type = "Parallel MPI";
78  }
79 
80 
81  /**
82  *
83  */
84  CommMpi::CommMpi(MPI_Comm pComm)
85  : Comm()
86  {
87  m_comm = pComm;
88  MPI_Comm_size( m_comm, &m_size );
89  MPI_Comm_rank( m_comm, &m_rank );
90 
91  m_type = "Parallel MPI";
92  }
93 
94 
95  /**
96  *
97  */
99  {
100 
101  }
102 
103 
104  /**
105  *
106  */
107  MPI_Comm CommMpi::GetComm()
108  {
109  return m_comm;
110  }
111 
112 
113  /**
114  *
115  */
117  {
118 #ifdef NEKTAR_USING_PETSC
119  PetscFinalize();
120 #endif
121  MPI_Finalize();
122  }
123 
124 
125  /**
126  *
127  */
129  {
130  return m_rank;
131  }
132 
133  /**
134  *
135  */
137  {
138  if(m_rank == 0)
139  {
140  return true;
141  }
142  else
143  {
144  return false;
145  }
146  return true;
147  }
148 
149  /**
150  *
151  */
153  {
154  MPI_Barrier(m_comm);
155  }
156 
157 
158  /**
159  *
160  */
161  void CommMpi::v_Send(int pProc, Array<OneD, NekDouble>& pData)
162  {
163  if (MPISYNC)
164  {
165  MPI_Ssend( pData.get(),
166  (int) pData.num_elements(),
167  MPI_DOUBLE,
168  pProc,
169  0,
170  m_comm);
171  }
172  else
173  {
174  MPI_Send( pData.get(),
175  (int) pData.num_elements(),
176  MPI_DOUBLE,
177  pProc,
178  0,
179  m_comm);
180  }
181  }
182 
183 
184  /**
185  *
186  */
187  void CommMpi::v_Recv(int pProc, Array<OneD, NekDouble>& pData)
188  {
189  MPI_Status status;
190  MPI_Recv( pData.get(),
191  (int) pData.num_elements(),
192  MPI_DOUBLE,
193  pProc,
194  0,
195  m_comm,
196  &status);
197 
198  //ASSERTL0(status.MPI_ERROR == MPI_SUCCESS,
199  // "MPI error receiving data.");
200  }
201 
202 
203  /**
204  *
205  */
206  void CommMpi::v_Send(int pProc, Array<OneD, int>& pData)
207  {
208  if (MPISYNC)
209  {
210  MPI_Ssend( pData.get(),
211  (int) pData.num_elements(),
212  MPI_INT,
213  pProc,
214  0,
215  m_comm);
216  }
217  else
218  {
219  MPI_Send( pData.get(),
220  (int) pData.num_elements(),
221  MPI_INT,
222  pProc,
223  0,
224  m_comm);
225  }
226  }
227 
228 
229  /**
230  *
231  */
232  void CommMpi::v_Recv(int pProc, Array<OneD, int>& pData)
233  {
234  MPI_Status status;
235  MPI_Recv( pData.get(),
236  (int) pData.num_elements(),
237  MPI_INT,
238  pProc,
239  0,
240  m_comm,
241  &status);
242 
243  //ASSERTL0(status.MPI_ERROR == MPI_SUCCESS,
244  // "MPI error receiving data.");
245  }
246 
247 
248  /**
249  *
250  */
251  void CommMpi::v_Send(int pProc, std::vector<unsigned int>& pData)
252  {
253  if (MPISYNC)
254  {
255  MPI_Ssend( &pData[0],
256  (int) pData.size(),
257  MPI_UNSIGNED,
258  pProc,
259  0,
260  m_comm);
261  }
262  else
263  {
264  MPI_Send( &pData[0],
265  (int) pData.size(),
266  MPI_UNSIGNED,
267  pProc,
268  0,
269  m_comm);
270  }
271  }
272 
273 
274  /**
275  *
276  */
277  void CommMpi::v_Recv(int pProc, std::vector<unsigned int>& pData)
278  {
279  MPI_Status status;
280  MPI_Recv( &pData[0],
281  (int) pData.size(),
282  MPI_UNSIGNED,
283  pProc,
284  0,
285  m_comm,
286  &status);
287 
288  //ASSERTL0(status.MPI_ERROR == MPI_SUCCESS,
289  // "MPI error receiving data.");
290  }
291 
292 
293  /**
294  *
295  */
296  void CommMpi::v_SendRecv(int pSendProc,
297  Array<OneD, NekDouble>& pSendData,
298  int pRecvProc,
299  Array<OneD, NekDouble>& pRecvData)
300  {
301  MPI_Status status;
302  int retval = MPI_Sendrecv(pSendData.get(),
303  (int) pSendData.num_elements(),
304  MPI_DOUBLE,
305  pSendProc,
306  0,
307  pRecvData.get(),
308  (int) pRecvData.num_elements(),
309  MPI_DOUBLE,
310  pRecvProc,
311  0,
312  m_comm,
313  &status);
314 
315  ASSERTL0(retval == MPI_SUCCESS,
316  "MPI error performing send-receive of data.");
317  }
318 
319 
320  /**
321  *
322  */
323  void CommMpi::v_SendRecv(int pSendProc,
324  Array<OneD, int>& pSendData,
325  int pRecvProc,
326  Array<OneD, int>& pRecvData)
327  {
328  MPI_Status status;
329  int retval = MPI_Sendrecv(pSendData.get(),
330  (int) pSendData.num_elements(),
331  MPI_INT,
332  pRecvProc,
333  0,
334  pRecvData.get(),
335  (int) pRecvData.num_elements(),
336  MPI_INT,
337  pSendProc,
338  0,
339  m_comm,
340  &status);
341 
342  ASSERTL0(retval == MPI_SUCCESS,
343  "MPI error performing send-receive of data.");
344  }
345 
346  /**
347  *
348  */
349  void CommMpi::v_SendRecvReplace(int pSendProc,
350  int pRecvProc,
351  Array<OneD, NekDouble>& pSendData)
352  {
353  MPI_Status status;
354  int retval = MPI_Sendrecv_replace(pSendData.get(),
355  (int) pSendData.num_elements(),
356  MPI_DOUBLE,
357  pRecvProc,
358  0,
359  pSendProc,
360  0,
361  m_comm,
362  &status);
363 
364  ASSERTL0(retval == MPI_SUCCESS,
365  "MPI error performing Send-Receive-Replace of data.");
366  }
367 
368 
369  /**
370  *
371  */
372  void CommMpi::v_SendRecvReplace(int pSendProc,
373  int pRecvProc,
374  Array<OneD, int>& pSendData)
375 
376  {
377  MPI_Status status;
378  int retval = MPI_Sendrecv_replace(pSendData.get(),
379  (int) pSendData.num_elements(),
380  MPI_INT,
381  pRecvProc,
382  0,
383  pSendProc,
384  0,
385  m_comm,
386  &status);
387 
388  ASSERTL0(retval == MPI_SUCCESS,
389  "MPI error performing Send-Receive-Replace of data.");
390  }
391 
392 
393  /**
394  *
395  */
397  {
398  if (GetSize() == 1)
399  {
400  return;
401  }
402 
403  MPI_Op vOp;
404  switch (pOp)
405  {
406  case ReduceMax: vOp = MPI_MAX; break;
407  case ReduceMin: vOp = MPI_MIN; break;
408  case ReduceSum:
409  default: vOp = MPI_SUM; break;
410  }
411  int retval = MPI_Allreduce( MPI_IN_PLACE,
412  &pData,
413  1,
414  MPI_DOUBLE,
415  vOp,
416  m_comm);
417 
418  ASSERTL0(retval == MPI_SUCCESS,
419  "MPI error performing All-reduce.");
420  }
421 
422 
423  /**
424  *
425  */
426  void CommMpi::v_AllReduce(int& pData, enum ReduceOperator pOp)
427  {
428  if (GetSize() == 1)
429  {
430  return;
431  }
432 
433  MPI_Op vOp;
434  switch (pOp)
435  {
436  case ReduceMax: vOp = MPI_MAX; break;
437  case ReduceMin: vOp = MPI_MIN; break;
438  case ReduceSum:
439  default: vOp = MPI_SUM; break;
440  }
441  int retval = MPI_Allreduce( MPI_IN_PLACE,
442  &pData,
443  1,
444  MPI_INT,
445  vOp,
446  m_comm);
447 
448  ASSERTL0(retval == MPI_SUCCESS,
449  "MPI error performing All-reduce.");
450  }
451 
452 
453  /**
454  *
455  */
457  {
458  if (GetSize() == 1)
459  {
460  return;
461  }
462 
463  MPI_Op vOp;
464  switch (pOp)
465  {
466  case ReduceMax: vOp = MPI_MAX; break;
467  case ReduceMin: vOp = MPI_MIN; break;
468  case ReduceSum:
469  default: vOp = MPI_SUM; break;
470  }
471  int retval = MPI_Allreduce( MPI_IN_PLACE,
472  pData.get(),
473  (int) pData.num_elements(),
474  MPI_DOUBLE,
475  vOp,
476  m_comm);
477 
478  ASSERTL0(retval == MPI_SUCCESS,
479  "MPI error performing All-reduce.");
480  }
481 
482 
483  /**
484  *
485  */
487  {
488  if (GetSize() == 1)
489  {
490  return;
491  }
492 
493  MPI_Op vOp;
494  switch (pOp)
495  {
496  case ReduceMax: vOp = MPI_MAX; break;
497  case ReduceMin: vOp = MPI_MIN; break;
498  case ReduceSum:
499  default: vOp = MPI_SUM; break;
500  }
501  int retval = MPI_Allreduce( MPI_IN_PLACE,
502  pData.get(),
503  (int) pData.num_elements(),
504  MPI_INT,
505  vOp,
506  m_comm);
507 
508  ASSERTL0(retval == MPI_SUCCESS,
509  "MPI error performing All-reduce.");
510  }
511 
512 
513  /**
514  *
515  */
516  void CommMpi::v_AllReduce(std::vector<unsigned int>& pData, enum ReduceOperator pOp)
517  {
518  if (GetSize() == 1)
519  {
520  return;
521  }
522 
523  MPI_Op vOp;
524  switch (pOp)
525  {
526  case ReduceMax: vOp = MPI_MAX; break;
527  case ReduceMin: vOp = MPI_MIN; break;
528  case ReduceSum:
529  default: vOp = MPI_SUM; break;
530  }
531  int retval = MPI_Allreduce( MPI_IN_PLACE,
532  &pData[0],
533  (int) pData.size(),
534  MPI_INT,
535  vOp,
536  m_comm);
537 
538  ASSERTL0(retval == MPI_SUCCESS,
539  "MPI error performing All-reduce.");
540  }
541 
542 
543  /**
544  *
545  */
547  {
548  int retval = MPI_Alltoall(pSendData.get(),
549  (int) pSendData.num_elements()/GetSize(),
550  MPI_DOUBLE,
551  pRecvData.get(),
552  (int) pRecvData.num_elements()/GetSize(),
553  MPI_DOUBLE,
554  m_comm);
555 
556  ASSERTL0(retval == MPI_SUCCESS,
557  "MPI error performing All-to-All.");
558  }
559 
560 
561  /**
562  *
563  */
565  {
566  int retval = MPI_Alltoall(pSendData.get(),
567  (int) pSendData.num_elements()/GetSize(),
568  MPI_INT,
569  pRecvData.get(),
570  (int) pRecvData.num_elements()/GetSize(),
571  MPI_INT,
572  m_comm);
573 
574  ASSERTL0(retval == MPI_SUCCESS,
575  "MPI error performing All-to-All.");
576  }
577 
578 
579  /**
580  *
581  */
583  Array<OneD, int>& pSendDataSizeMap,
584  Array<OneD, int>& pSendDataOffsetMap,
585  Array<OneD, NekDouble>& pRecvData,
586  Array<OneD, int>& pRecvDataSizeMap,
587  Array<OneD, int>& pRecvDataOffsetMap)
588  {
589  int retval = MPI_Alltoallv(pSendData.get(),
590  pSendDataSizeMap.get(),
591  pSendDataOffsetMap.get(),
592  MPI_DOUBLE,
593  pRecvData.get(),
594  pRecvDataSizeMap.get(),
595  pRecvDataOffsetMap.get(),
596  MPI_DOUBLE,
597  m_comm);
598 
599  ASSERTL0(retval == MPI_SUCCESS,
600  "MPI error performing All-to-All-v.");
601  }
602 
603  /**
604  *
605  */
607  Array<OneD, int>& pSendDataSizeMap,
608  Array<OneD, int>& pSendDataOffsetMap,
609  Array<OneD, int>& pRecvData,
610  Array<OneD, int>& pRecvDataSizeMap,
611  Array<OneD, int>& pRecvDataOffsetMap)
612  {
613  int retval = MPI_Alltoallv(pSendData.get(),
614  pSendDataSizeMap.get(),
615  pSendDataOffsetMap.get(),
616  MPI_INT,
617  pRecvData.get(),
618  pRecvDataSizeMap.get(),
619  pRecvDataOffsetMap.get(),
620  MPI_INT,
621  m_comm);
622 
623  ASSERTL0(retval == MPI_SUCCESS,
624  "MPI error performing All-to-All-v.");
625  }
626 
627 
628  /**
629  * Processes are considered as a grid of size pRows*pColumns. Comm
630  * objects are created corresponding to the rows and columns of this
631  * grid. The row and column to which this process belongs is stored in
632  * #m_commRow and #m_commColumn.
633  */
634  void CommMpi::v_SplitComm(int pRows, int pColumns)
635  {
636  ASSERTL0(pRows*pColumns == m_size,
637  "Rows/Columns do not match comm size.");
638 
639  MPI_Comm newComm;
640 
641  // Compute row and column in grid.
642  int myCol = m_rank % pColumns;
643  int myRow = (m_rank - myCol) / pColumns;
644 
645  // Split Comm into rows - all processes with same myRow are put in
646  // the same communicator. The rank within this communicator is the
647  // column index.
648  MPI_Comm_split(m_comm, myRow, myCol, &newComm);
649  m_commRow = boost::shared_ptr<Comm>(new CommMpi(newComm));
650 
651  // Split Comm into columns - all processes with same myCol are put
652  // in the same communicator. The rank within this communicator is
653  // the row index.
654  MPI_Comm_split(m_comm, myCol, myRow, &newComm);
655  m_commColumn = boost::shared_ptr<Comm>(new CommMpi(newComm));
656  }
657  }
658 }
#define ASSERTL0(condition, msg)
Definition: ErrorUtil.hpp:188
ReduceOperator
Type of operation to perform in AllReduce.
Definition: Comm.h:65
virtual void v_SendRecv(int pSendProc, Array< OneD, NekDouble > &pSendData, int pRecvProc, Array< OneD, NekDouble > &pRecvData)
Definition: CommMpi.cpp:296
CommSharedPtr m_commColumn
Column communicator.
Definition: Comm.h:145
virtual void v_Recv(int pProc, Array< OneD, NekDouble > &pData)
Definition: CommMpi.cpp:187
std::string m_type
Type of communication.
Definition: Comm.h:143
CommFactory & GetCommFactory()
Definition: Comm.cpp:64
CommSharedPtr m_commRow
Row communicator.
Definition: Comm.h:144
#define MPISYNC
Definition: CommMpi.h:45
virtual void v_SendRecvReplace(int pSendProc, int pRecvProc, Array< OneD, NekDouble > &pSendData)
Definition: CommMpi.cpp:349
static CommSharedPtr create(int narg, char *arg[])
Creates an instance of this class.
Definition: CommMpi.h:65
virtual void v_SplitComm(int pRows, int pColumns)
Definition: CommMpi.cpp:634
CommMpi(int narg, char *arg[])
Definition: CommMpi.cpp:56
virtual void v_AllReduce(NekDouble &pData, enum ReduceOperator pOp)
Definition: CommMpi.cpp:396
virtual bool v_TreatAsRankZero(void)
Definition: CommMpi.cpp:136
virtual void v_AlltoAll(Array< OneD, NekDouble > &pSendData, Array< OneD, NekDouble > &pRecvData)
Definition: CommMpi.cpp:546
virtual void v_Send(int pProc, Array< OneD, NekDouble > &pData)
Definition: CommMpi.cpp:161
double NekDouble
Base communications class.
Definition: Comm.h:73
static std::string className
Name of class.
Definition: CommMpi.h:71
virtual void v_AlltoAllv(Array< OneD, NekDouble > &pSendData, Array< OneD, int > &pSendDataSizeMap, Array< OneD, int > &pSendDataOffsetMap, Array< OneD, NekDouble > &pRecvData, Array< OneD, int > &pRecvDataSizeMap, Array< OneD, int > &pRecvDataOffsetMap)
Definition: CommMpi.cpp:582
int GetSize()
Returns number of processes.
Definition: Comm.h:215
int m_size
Number of processes.
Definition: Comm.h:142
tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, tDescription pDesc="")
Register a class with the factory.
Definition: NekFactory.hpp:215