Nektar++
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
CommMpi.cpp
Go to the documentation of this file.
1 ///////////////////////////////////////////////////////////////////////////////
2 //
3 // File CommMpi.cpp
4 //
5 // For more information, please see: http://www.nektar.info
6 //
7 // The MIT License
8 //
9 // Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10 // Department of Aeronautics, Imperial College London (UK), and Scientific
11 // Computing and Imaging Institute, University of Utah (USA).
12 //
13 // License for the specific language governing rights and limitations under
14 // Permission is hereby granted, free of charge, to any person obtaining a
15 // copy of this software and associated documentation files (the "Software"),
16 // to deal in the Software without restriction, including without limitation
17 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 // and/or sell copies of the Software, and to permit persons to whom the
19 // Software is furnished to do so, subject to the following conditions:
20 //
21 // The above copyright notice and this permission notice shall be included
22 // in all copies or substantial portions of the Software.
23 //
24 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
25 // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
30 // DEALINGS IN THE SOFTWARE.
31 //
32 // Description: MPI communication implementation
33 //
34 ///////////////////////////////////////////////////////////////////////////////
35 
38 
39 namespace Nektar
40 {
41  namespace LibUtilities
42  {
43  std::string CommMpi::className
45  "ParallelMPI",
47  "Parallel communication using MPI.");
48 
49  /**
50  *
51  */
52  CommMpi::CommMpi(int narg, char* arg[])
53  : Comm(narg,arg)
54  {
55  int init = 0;
56  MPI_Initialized(&init);
57  ASSERTL0(!init, "MPI has already been initialised.");
58 
59  int retval = MPI_Init(&narg, &arg);
60  if (retval != MPI_SUCCESS)
61  {
62  ASSERTL0(false, "Failed to initialise MPI");
63  }
64 
65  m_comm = MPI_COMM_WORLD;
66  MPI_Comm_size( m_comm, &m_size );
67  MPI_Comm_rank( m_comm, &m_rank );
68 
69  m_type = "Parallel MPI";
70  }
71 
72 
73  /**
74  *
75  */
76  CommMpi::CommMpi(MPI_Comm pComm)
77  : Comm()
78  {
79  m_comm = pComm;
80  MPI_Comm_size( m_comm, &m_size );
81  MPI_Comm_rank( m_comm, &m_rank );
82 
83  m_type = "Parallel MPI";
84  }
85 
86 
87  /**
88  *
89  */
91  {
92 
93  }
94 
95 
96  /**
97  *
98  */
99  MPI_Comm CommMpi::GetComm()
100  {
101  return m_comm;
102  }
103 
104 
105  /**
106  *
107  */
109  {
110  MPI_Finalize();
111  }
112 
113 
114  /**
115  *
116  */
118  {
119  return m_rank;
120  }
121 
122  /**
123  *
124  */
126  {
127  if(m_rank == 0)
128  {
129  return true;
130  }
131  else
132  {
133  return false;
134  }
135  return true;
136  }
137 
138  /**
139  *
140  */
142  {
143  MPI_Barrier(m_comm);
144  }
145 
146 
147  /**
148  *
149  */
150  void CommMpi::v_Send(int pProc, Array<OneD, NekDouble>& pData)
151  {
152  if (MPISYNC)
153  {
154  MPI_Ssend( pData.get(),
155  (int) pData.num_elements(),
156  MPI_DOUBLE,
157  pProc,
158  0,
159  m_comm);
160  }
161  else
162  {
163  MPI_Send( pData.get(),
164  (int) pData.num_elements(),
165  MPI_DOUBLE,
166  pProc,
167  0,
168  m_comm);
169  }
170  }
171 
172 
173  /**
174  *
175  */
176  void CommMpi::v_Recv(int pProc, Array<OneD, NekDouble>& pData)
177  {
178  MPI_Status status;
179  MPI_Recv( pData.get(),
180  (int) pData.num_elements(),
181  MPI_DOUBLE,
182  pProc,
183  0,
184  m_comm,
185  &status);
186 
187  //ASSERTL0(status.MPI_ERROR == MPI_SUCCESS,
188  // "MPI error receiving data.");
189  }
190 
191 
192  /**
193  *
194  */
195  void CommMpi::v_Send(int pProc, Array<OneD, int>& pData)
196  {
197  if (MPISYNC)
198  {
199  MPI_Ssend( pData.get(),
200  (int) pData.num_elements(),
201  MPI_INT,
202  pProc,
203  0,
204  m_comm);
205  }
206  else
207  {
208  MPI_Send( pData.get(),
209  (int) pData.num_elements(),
210  MPI_INT,
211  pProc,
212  0,
213  m_comm);
214  }
215  }
216 
217 
218  /**
219  *
220  */
221  void CommMpi::v_Recv(int pProc, Array<OneD, int>& pData)
222  {
223  MPI_Status status;
224  MPI_Recv( pData.get(),
225  (int) pData.num_elements(),
226  MPI_INT,
227  pProc,
228  0,
229  m_comm,
230  &status);
231 
232  //ASSERTL0(status.MPI_ERROR == MPI_SUCCESS,
233  // "MPI error receiving data.");
234  }
235 
236 
237  /**
238  *
239  */
240  void CommMpi::v_Send(int pProc, std::vector<unsigned int>& pData)
241  {
242  if (MPISYNC)
243  {
244  MPI_Ssend( &pData[0],
245  (int) pData.size(),
246  MPI_UNSIGNED,
247  pProc,
248  0,
249  m_comm);
250  }
251  else
252  {
253  MPI_Send( &pData[0],
254  (int) pData.size(),
255  MPI_UNSIGNED,
256  pProc,
257  0,
258  m_comm);
259  }
260  }
261 
262 
263  /**
264  *
265  */
266  void CommMpi::v_Recv(int pProc, std::vector<unsigned int>& pData)
267  {
268  MPI_Status status;
269  MPI_Recv( &pData[0],
270  (int) pData.size(),
271  MPI_UNSIGNED,
272  pProc,
273  0,
274  m_comm,
275  &status);
276 
277  //ASSERTL0(status.MPI_ERROR == MPI_SUCCESS,
278  // "MPI error receiving data.");
279  }
280 
281 
282  /**
283  *
284  */
285  void CommMpi::v_SendRecv(int pSendProc,
286  Array<OneD, NekDouble>& pSendData,
287  int pRecvProc,
288  Array<OneD, NekDouble>& pRecvData)
289  {
290  MPI_Status status;
291  int retval = MPI_Sendrecv(pSendData.get(),
292  (int) pSendData.num_elements(),
293  MPI_DOUBLE,
294  pRecvProc,
295  0,
296  pRecvData.get(),
297  (int) pRecvData.num_elements(),
298  MPI_DOUBLE,
299  pSendProc,
300  0,
301  m_comm,
302  &status);
303 
304  ASSERTL0(retval == MPI_SUCCESS,
305  "MPI error performing send-receive of data.");
306  }
307 
308 
309  /**
310  *
311  */
312  void CommMpi::v_SendRecv(int pSendProc,
313  Array<OneD, int>& pSendData,
314  int pRecvProc,
315  Array<OneD, int>& pRecvData)
316  {
317  MPI_Status status;
318  int retval = MPI_Sendrecv(pSendData.get(),
319  (int) pSendData.num_elements(),
320  MPI_INT,
321  pRecvProc,
322  0,
323  pRecvData.get(),
324  (int) pRecvData.num_elements(),
325  MPI_INT,
326  pSendProc,
327  0,
328  m_comm,
329  &status);
330 
331  ASSERTL0(retval == MPI_SUCCESS,
332  "MPI error performing send-receive of data.");
333  }
334 
335  /**
336  *
337  */
338  void CommMpi::v_SendRecvReplace(int pSendProc,
339  int pRecvProc,
340  Array<OneD, NekDouble>& pSendData)
341  {
342  MPI_Status status;
343  int retval = MPI_Sendrecv_replace(pSendData.get(),
344  (int) pSendData.num_elements(),
345  MPI_DOUBLE,
346  pRecvProc,
347  0,
348  pSendProc,
349  0,
350  m_comm,
351  &status);
352 
353  ASSERTL0(retval == MPI_SUCCESS,
354  "MPI error performing Send-Receive-Replace of data.");
355  }
356 
357 
358  /**
359  *
360  */
361  void CommMpi::v_SendRecvReplace(int pSendProc,
362  int pRecvProc,
363  Array<OneD, int>& pSendData)
364 
365  {
366  MPI_Status status;
367  int retval = MPI_Sendrecv_replace(pSendData.get(),
368  (int) pSendData.num_elements(),
369  MPI_INT,
370  pRecvProc,
371  0,
372  pSendProc,
373  0,
374  m_comm,
375  &status);
376 
377  ASSERTL0(retval == MPI_SUCCESS,
378  "MPI error performing Send-Receive-Replace of data.");
379  }
380 
381 
382  /**
383  *
384  */
386  {
387  if (GetSize() == 1)
388  {
389  return;
390  }
391 
392  MPI_Op vOp;
393  switch (pOp)
394  {
395  case ReduceMax: vOp = MPI_MAX; break;
396  case ReduceMin: vOp = MPI_MIN; break;
397  case ReduceSum:
398  default: vOp = MPI_SUM; break;
399  }
400  int retval = MPI_Allreduce( MPI_IN_PLACE,
401  &pData,
402  1,
403  MPI_DOUBLE,
404  vOp,
405  m_comm);
406 
407  ASSERTL0(retval == MPI_SUCCESS,
408  "MPI error performing All-reduce.");
409  }
410 
411 
412  /**
413  *
414  */
415  void CommMpi::v_AllReduce(int& pData, enum ReduceOperator pOp)
416  {
417  if (GetSize() == 1)
418  {
419  return;
420  }
421 
422  MPI_Op vOp;
423  switch (pOp)
424  {
425  case ReduceMax: vOp = MPI_MAX; break;
426  case ReduceMin: vOp = MPI_MIN; break;
427  case ReduceSum:
428  default: vOp = MPI_SUM; break;
429  }
430  int retval = MPI_Allreduce( MPI_IN_PLACE,
431  &pData,
432  1,
433  MPI_INT,
434  vOp,
435  m_comm);
436 
437  ASSERTL0(retval == MPI_SUCCESS,
438  "MPI error performing All-reduce.");
439  }
440 
441 
442  /**
443  *
444  */
445  void CommMpi::v_AllReduce(Array<OneD, NekDouble>& pData, enum ReduceOperator pOp)
446  {
447  if (GetSize() == 1)
448  {
449  return;
450  }
451 
452  MPI_Op vOp;
453  switch (pOp)
454  {
455  case ReduceMax: vOp = MPI_MAX; break;
456  case ReduceMin: vOp = MPI_MIN; break;
457  case ReduceSum:
458  default: vOp = MPI_SUM; break;
459  }
460  int retval = MPI_Allreduce( MPI_IN_PLACE,
461  pData.get(),
462  (int) pData.num_elements(),
463  MPI_DOUBLE,
464  vOp,
465  m_comm);
466 
467  ASSERTL0(retval == MPI_SUCCESS,
468  "MPI error performing All-reduce.");
469  }
470 
471 
472  /**
473  *
474  */
475  void CommMpi::v_AllReduce(Array<OneD, int>& pData, enum ReduceOperator pOp)
476  {
477  if (GetSize() == 1)
478  {
479  return;
480  }
481 
482  MPI_Op vOp;
483  switch (pOp)
484  {
485  case ReduceMax: vOp = MPI_MAX; break;
486  case ReduceMin: vOp = MPI_MIN; break;
487  case ReduceSum:
488  default: vOp = MPI_SUM; break;
489  }
490  int retval = MPI_Allreduce( MPI_IN_PLACE,
491  pData.get(),
492  (int) pData.num_elements(),
493  MPI_INT,
494  vOp,
495  m_comm);
496 
497  ASSERTL0(retval == MPI_SUCCESS,
498  "MPI error performing All-reduce.");
499  }
500 
501 
502  /**
503  *
504  */
505  void CommMpi::v_AllReduce(std::vector<unsigned int>& pData, enum ReduceOperator pOp)
506  {
507  if (GetSize() == 1)
508  {
509  return;
510  }
511 
512  MPI_Op vOp;
513  switch (pOp)
514  {
515  case ReduceMax: vOp = MPI_MAX; break;
516  case ReduceMin: vOp = MPI_MIN; break;
517  case ReduceSum:
518  default: vOp = MPI_SUM; break;
519  }
520  int retval = MPI_Allreduce( MPI_IN_PLACE,
521  &pData[0],
522  (int) pData.size(),
523  MPI_INT,
524  vOp,
525  m_comm);
526 
527  ASSERTL0(retval == MPI_SUCCESS,
528  "MPI error performing All-reduce.");
529  }
530 
531 
532  /**
533  *
534  */
535  void CommMpi::v_AlltoAll(Array<OneD, NekDouble>& pSendData,Array<OneD, NekDouble>& pRecvData)
536  {
537  int retval = MPI_Alltoall(pSendData.get(),
538  (int) pSendData.num_elements()/GetSize(),
539  MPI_DOUBLE,
540  pRecvData.get(),
541  (int) pRecvData.num_elements()/GetSize(),
542  MPI_DOUBLE,
543  m_comm);
544 
545  ASSERTL0(retval == MPI_SUCCESS,
546  "MPI error performing All-to-All.");
547  }
548 
549 
550  /**
551  *
552  */
553  void CommMpi::v_AlltoAll(Array<OneD, int>& pSendData,Array<OneD, int>& pRecvData)
554  {
555  int retval = MPI_Alltoall(pSendData.get(),
556  (int) pSendData.num_elements()/GetSize(),
557  MPI_INT,
558  pRecvData.get(),
559  (int) pRecvData.num_elements()/GetSize(),
560  MPI_INT,
561  m_comm);
562 
563  ASSERTL0(retval == MPI_SUCCESS,
564  "MPI error performing All-to-All.");
565  }
566 
567 
568  /**
569  *
570  */
571  void CommMpi::v_AlltoAllv(Array<OneD, NekDouble>& pSendData,
572  Array<OneD, int>& pSendDataSizeMap,
573  Array<OneD, int>& pSendDataOffsetMap,
574  Array<OneD, NekDouble>& pRecvData,
575  Array<OneD, int>& pRecvDataSizeMap,
576  Array<OneD, int>& pRecvDataOffsetMap)
577  {
578  int retval = MPI_Alltoallv(pSendData.get(),
579  pSendDataSizeMap.get(),
580  pSendDataOffsetMap.get(),
581  MPI_DOUBLE,
582  pRecvData.get(),
583  pRecvDataSizeMap.get(),
584  pRecvDataOffsetMap.get(),
585  MPI_DOUBLE,
586  m_comm);
587 
588  ASSERTL0(retval == MPI_SUCCESS,
589  "MPI error performing All-to-All-v.");
590  }
591 
592  /**
593  *
594  */
595  void CommMpi::v_AlltoAllv(Array<OneD, int>& pSendData,
596  Array<OneD, int>& pSendDataSizeMap,
597  Array<OneD, int>& pSendDataOffsetMap,
598  Array<OneD, int>& pRecvData,
599  Array<OneD, int>& pRecvDataSizeMap,
600  Array<OneD, int>& pRecvDataOffsetMap)
601  {
602  int retval = MPI_Alltoallv(pSendData.get(),
603  pSendDataSizeMap.get(),
604  pSendDataOffsetMap.get(),
605  MPI_INT,
606  pRecvData.get(),
607  pRecvDataSizeMap.get(),
608  pRecvDataOffsetMap.get(),
609  MPI_INT,
610  m_comm);
611 
612  ASSERTL0(retval == MPI_SUCCESS,
613  "MPI error performing All-to-All-v.");
614  }
615 
616 
617  /**
618  * Processes are considered as a grid of size pRows*pColumns. Comm
619  * objects are created corresponding to the rows and columns of this
620  * grid. The row and column to which this process belongs is stored in
621  * #m_commRow and #m_commColumn.
622  */
623  void CommMpi::v_SplitComm(int pRows, int pColumns)
624  {
625  ASSERTL0(pRows*pColumns == m_size,
626  "Rows/Columns do not match comm size.");
627 
628  MPI_Comm newComm;
629 
630  // Compute row and column in grid.
631  int myCol = m_rank % pColumns;
632  int myRow = (m_rank - myCol) / pColumns;
633 
634  // Split Comm into rows - all processes with same myRow are put in
635  // the same communicator. The rank within this communicator is the
636  // column index.
637  MPI_Comm_split(m_comm, myRow, myCol, &newComm);
638  m_commRow = boost::shared_ptr<Comm>(new CommMpi(newComm));
639 
640  // Split Comm into columns - all processes with same myCol are put
641  // in the same communicator. The rank within this communicator is
642  // the row index.
643  MPI_Comm_split(m_comm, myCol, myRow, &newComm);
644  m_commColumn = boost::shared_ptr<Comm>(new CommMpi(newComm));
645  }
646  }
647 }