Nektar++
CommMpi.cpp
Go to the documentation of this file.
1 ///////////////////////////////////////////////////////////////////////////////
2 //
3 // File: CommMpi.cpp
4 //
5 // For more information, please see: http://www.nektar.info
6 //
7 // The MIT License
8 //
9 // Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10 // Department of Aeronautics, Imperial College London (UK), and Scientific
11 // Computing and Imaging Institute, University of Utah (USA).
12 //
13 // Permission is hereby granted, free of charge, to any person obtaining a
14 // copy of this software and associated documentation files (the "Software"),
15 // to deal in the Software without restriction, including without limitation
16 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 // and/or sell copies of the Software, and to permit persons to whom the
18 // Software is furnished to do so, subject to the following conditions:
19 //
20 // The above copyright notice and this permission notice shall be included
21 // in all copies or substantial portions of the Software.
22 //
23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
24 // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29 // DEALINGS IN THE SOFTWARE.
30 //
31 // Description: MPI communication implementation
32 //
33 ///////////////////////////////////////////////////////////////////////////////
34 
35 #ifdef NEKTAR_USING_PETSC
36 #include "petscsys.h"
37 #endif
38 
41 
42 namespace Nektar
43 {
44 namespace LibUtilities
45 {
46 
48  "ParallelMPI", CommMpi::create, "Parallel communication using MPI.");
49 
50 /**
51  *
52  */
53 CommMpi::CommMpi(int narg, char *arg[]) : Comm(narg, arg)
54 {
55  int init = 0;
56  MPI_Initialized(&init);
57 
58  if (!init)
59  {
60  ASSERTL0(MPI_Init(&narg, &arg) == MPI_SUCCESS,
61  "Failed to initialise MPI");
62  // store bool to indicate that Nektar++ is in charge of finalizing MPI.
63  m_controls_mpi = true;
64  }
65  else
66  {
67  // Another code is in charge of finalizing MPI and this is not the
68  // responsiblity of Nektar++
69  m_controls_mpi = false;
70  }
71 
72  m_comm = MPI_COMM_WORLD;
73  MPI_Comm_size(m_comm, &m_size);
74  MPI_Comm_rank(m_comm, &m_rank);
75 
76 #ifdef NEKTAR_USING_PETSC
77  PetscInitializeNoArguments();
78 #endif
79 
80  m_type = "Parallel MPI";
81 }
82 
83 /**
84  *
85  */
86 CommMpi::CommMpi(MPI_Comm pComm) : Comm()
87 {
88  m_comm = pComm;
89  MPI_Comm_size(m_comm, &m_size);
90  MPI_Comm_rank(m_comm, &m_rank);
91 
92  m_type = "Parallel MPI";
93 }
94 
95 /**
96  *
97  */
99 {
100  int flag;
101  MPI_Finalized(&flag);
102  if (!flag && m_comm != MPI_COMM_WORLD)
103  {
104  MPI_Comm_free(&m_comm);
105  }
106 }
107 
108 /**
109  *
110  */
112 {
113  return m_comm;
114 }
115 
116 /**
117  *
118  */
120 {
121 #ifdef NEKTAR_USING_PETSC
122  PetscFinalize();
123 #endif
124  int flag;
125  MPI_Finalized(&flag);
126  if ((!flag) && m_controls_mpi)
127  {
128  MPI_Finalize();
129  }
130 }
131 
132 /**
133  *
134  */
136 {
137  return m_rank;
138 }
139 
140 /**
141  *
142  */
144 {
145  return m_rank == 0;
146 }
147 
148 /**
149  *
150  */
152 {
153  return m_size == 1;
154 }
155 
156 std::tuple<int, int, int> CommMpi::v_GetVersion()
157 {
158  int version, subversion;
159  int retval = MPI_Get_version(&version, &subversion);
160 
161  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing GetVersion.");
162 
163  return std::make_tuple(version, subversion, 0);
164 }
165 
166 /**
167  *
168  */
170 {
171  MPI_Barrier(m_comm);
172 }
173 
174 /**
175  *
176  */
178 {
179  return MPI_Wtime();
180 }
181 
182 /**
183  *
184  */
185 void CommMpi::v_Send(void *buf, int count, CommDataType dt, int dest)
186 {
187  if (MPISYNC)
188  {
189  MPI_Ssend(buf, count, dt, dest, 0, m_comm);
190  }
191  else
192  {
193  MPI_Send(buf, count, dt, dest, 0, m_comm);
194  }
195 }
196 
197 /**
198  *
199  */
200 void CommMpi::v_Recv(void *buf, int count, CommDataType dt, int source)
201 {
202  MPI_Recv(buf, count, dt, source, 0, m_comm, MPI_STATUS_IGNORE);
203  // ASSERTL0(status.MPI_ERROR == MPI_SUCCESS,
204  // "MPI error receiving data.");
205 }
206 
207 /**
208  *
209  */
210 void CommMpi::v_SendRecv(void *sendbuf, int sendcount, CommDataType sendtype,
211  int dest, void *recvbuf, int recvcount,
212  CommDataType recvtype, int source)
213 {
214  MPI_Status status;
215  int retval = MPI_Sendrecv(sendbuf, sendcount, sendtype, dest, 0, recvbuf,
216  recvcount, recvtype, source, 0, m_comm, &status);
217 
218  ASSERTL0(retval == MPI_SUCCESS,
219  "MPI error performing send-receive of data.");
220 }
221 
222 /**
223  *
224  */
225 void CommMpi::v_SendRecvReplace(void *buf, int count, CommDataType dt,
226  int pSendProc, int pRecvProc)
227 {
228  MPI_Status status;
229  int retval = MPI_Sendrecv_replace(buf, count, dt, pRecvProc, 0, pSendProc,
230  0, m_comm, &status);
231 
232  ASSERTL0(retval == MPI_SUCCESS,
233  "MPI error performing Send-Receive-Replace of data.");
234 }
235 
236 /**
237  *
238  */
239 void CommMpi::v_AllReduce(void *buf, int count, CommDataType dt,
240  enum ReduceOperator pOp)
241 {
242  if (GetSize() == 1)
243  {
244  return;
245  }
246 
247  MPI_Op vOp;
248  switch (pOp)
249  {
250  case ReduceMax:
251  vOp = MPI_MAX;
252  break;
253  case ReduceMin:
254  vOp = MPI_MIN;
255  break;
256  case ReduceSum:
257  default:
258  vOp = MPI_SUM;
259  break;
260  }
261  int retval = MPI_Allreduce(MPI_IN_PLACE, buf, count, dt, vOp, m_comm);
262 
263  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing All-reduce.");
264 }
265 
266 /**
267  *
268  */
269 void CommMpi::v_AlltoAll(void *sendbuf, int sendcount, CommDataType sendtype,
270  void *recvbuf, int recvcount, CommDataType recvtype)
271 {
272  int retval = MPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount,
273  recvtype, m_comm);
274 
275  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing All-to-All.");
276 }
277 
278 /**
279  *
280  */
281 void CommMpi::v_AlltoAllv(void *sendbuf, int sendcounts[], int sdispls[],
282  CommDataType sendtype, void *recvbuf,
283  int recvcounts[], int rdispls[],
284  CommDataType recvtype)
285 {
286  int retval = MPI_Alltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf,
287  recvcounts, rdispls, recvtype, m_comm);
288 
289  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing All-to-All-v.");
290 }
291 
292 /**
293  *
294  */
295 void CommMpi::v_AllGather(void *sendbuf, int sendcount, CommDataType sendtype,
296  void *recvbuf, int recvcount, CommDataType recvtype)
297 {
298  int retval = MPI_Allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount,
299  recvtype, m_comm);
300 
301  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Allgather.");
302 }
303 
304 void CommMpi::v_AllGatherv(void *sendbuf, int sendcount, CommDataType sendtype,
305  void *recvbuf, int recvcounts[], int rdispls[],
306  CommDataType recvtype)
307 {
308  int retval = MPI_Allgatherv(sendbuf, sendcount, sendtype, recvbuf,
309  recvcounts, rdispls, recvtype, m_comm);
310 
311  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Allgather.");
312 }
313 
314 void CommMpi::v_AllGatherv(void *recvbuf, int recvcounts[], int rdispls[],
315  CommDataType recvtype)
316 {
317  int retval = MPI_Allgatherv(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, recvbuf,
318  recvcounts, rdispls, recvtype, m_comm);
319 
320  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Allgatherv.");
321 }
322 
323 void CommMpi::v_Bcast(void *buffer, int count, CommDataType dt, int root)
324 {
325  int retval = MPI_Bcast(buffer, count, dt, root, m_comm);
326  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Bcast-v.");
327 }
328 
330  const enum ReduceOperator pOp,
332 {
333  int n = pData.size();
334  ASSERTL0(n == ans.size(), "Array sizes differ in Exscan");
335 
336  MPI_Op vOp;
337  switch (pOp)
338  {
339  case ReduceMax:
340  vOp = MPI_MAX;
341  break;
342  case ReduceMin:
343  vOp = MPI_MIN;
344  break;
345  case ReduceSum:
346  default:
347  vOp = MPI_SUM;
348  break;
349  }
350 
351  int retval = MPI_Exscan(pData.get(), ans.get(), n, MPI_UNSIGNED_LONG_LONG,
352  vOp, m_comm);
353  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Exscan-v.");
354 }
355 
356 void CommMpi::v_Gather(void *sendbuf, int sendcount, CommDataType sendtype,
357  void *recvbuf, int recvcount, CommDataType recvtype,
358  int root)
359 {
360  int retval = MPI_Gather(sendbuf, sendcount, sendtype, recvbuf, recvcount,
361  recvtype, root, m_comm);
362 
363  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Gather.");
364 }
365 
366 void CommMpi::v_Scatter(void *sendbuf, int sendcount, CommDataType sendtype,
367  void *recvbuf, int recvcount, CommDataType recvtype,
368  int root)
369 {
370  int retval = MPI_Scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount,
371  recvtype, root, m_comm);
372  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Scatter.");
373 }
374 
375 void CommMpi::v_DistGraphCreateAdjacent(int indegree, const int sources[],
376  const int sourceweights[], int reorder)
377 {
378 #if MPI_VERSION < 3
379  boost::ignore_unused(indegree, sources, sourceweights, reorder);
380  ASSERTL0(false, "MPI_Dist_graph_create_adjacent is not supported in your "
381  "installed MPI version.");
382 #else
383  int retval = MPI_Dist_graph_create_adjacent(
384  m_comm, indegree, sources, sourceweights, indegree, sources,
385  sourceweights, MPI_INFO_NULL, reorder, &m_comm);
386 
387  ASSERTL0(retval == MPI_SUCCESS,
388  "MPI error performing Dist_graph_create_adjacent.")
389 #endif
390 }
391 
392 void CommMpi::v_NeighborAlltoAllv(void *sendbuf, int sendcounts[],
393  int sdispls[], CommDataType sendtype,
394  void *recvbuf, int recvcounts[],
395  int rdispls[], CommDataType recvtype)
396 {
397 #if MPI_VERSION < 3
398  boost::ignore_unused(sendbuf, sendcounts, sdispls, sendtype, recvbuf,
399  recvcounts, rdispls, recvtype);
400  ASSERTL0(false, "MPI_Neighbor_alltoallv is not supported in your "
401  "installed MPI version.");
402 #else
403  int retval =
404  MPI_Neighbor_alltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf,
405  recvcounts, rdispls, recvtype, m_comm);
406 
407  ASSERTL0(retval == MPI_SUCCESS, "MPI error performing NeighborAllToAllV.");
408 #endif
409 }
410 
411 void CommMpi::v_Irsend(void *buf, int count, CommDataType dt, int dest,
412  CommRequestSharedPtr request, int loc)
413 {
415  std::static_pointer_cast<CommRequestMpi>(request);
416  MPI_Irsend(buf, count, dt, dest, 0, m_comm, req->GetRequest(loc));
417 }
418 
419 void CommMpi::v_Isend(void *buf, int count, CommDataType dt, int dest,
420  CommRequestSharedPtr request, int loc)
421 {
423  std::static_pointer_cast<CommRequestMpi>(request);
424  MPI_Isend(buf, count, dt, dest, 0, m_comm, req->GetRequest(loc));
425 }
426 
427 void CommMpi::v_SendInit(void *buf, int count, CommDataType dt, int dest,
428  CommRequestSharedPtr request, int loc)
429 {
431  std::static_pointer_cast<CommRequestMpi>(request);
432  MPI_Send_init(buf, count, dt, dest, 0, m_comm, req->GetRequest(loc));
433 }
434 
435 void CommMpi::v_Irecv(void *buf, int count, CommDataType dt, int source,
436  CommRequestSharedPtr request, int loc)
437 {
439  std::static_pointer_cast<CommRequestMpi>(request);
440  MPI_Irecv(buf, count, dt, source, 0, m_comm, req->GetRequest(loc));
441 }
442 
443 void CommMpi::v_RecvInit(void *buf, int count, CommDataType dt, int source,
444  CommRequestSharedPtr request, int loc)
445 {
447  std::static_pointer_cast<CommRequestMpi>(request);
448  MPI_Recv_init(buf, count, dt, source, 0, m_comm, req->GetRequest(loc));
449 }
450 
452 {
454  std::static_pointer_cast<CommRequestMpi>(request);
455  if (req->GetNumRequest() != 0)
456  {
457  MPI_Startall(req->GetNumRequest(), req->GetRequest(0));
458  }
459 }
460 
462 {
464  std::static_pointer_cast<CommRequestMpi>(request);
465  if (req->GetNumRequest() != 0)
466  {
467  MPI_Waitall(req->GetNumRequest(), req->GetRequest(0),
468  MPI_STATUSES_IGNORE);
469  }
470 }
471 
473 {
474  return std::shared_ptr<CommRequest>(new CommRequestMpi(num));
475 }
476 
477 /**
478  * Processes are considered as a grid of size pRows*pColumns. Comm
479  * objects are created corresponding to the rows and columns of this
480  * grid. The row and column to which this process belongs is stored in
481  * #m_commRow and #m_commColumn.
482  */
483 void CommMpi::v_SplitComm(int pRows, int pColumns, int pTime)
484 {
485  ASSERTL0(pRows * pColumns * pTime == m_size,
486  "Rows/Columns do not match comm size.");
487 
488  MPI_Comm newComm;
489  MPI_Comm gridComm;
490  if (pTime == 1)
491  {
492  // There is a bug in OpenMPI 3.1.3. This bug cause some cases to fail in
493  // buster-full-build-and-test. Failed cases are:
494  //
495  // IncNavierStokesSolver_ChanFlow_3DH1D_FlowrateExplicit_MVM_par
496  // IncNavierStokesSolver_ChanFlow_3DH1D_FlowrateExplicit_MVM_par_hybrid
497 
498  // See: https://github.com/open-mpi/ompi/issues/6522
499 
500  // Compute row and column in grid.
501  int myCol = m_rank % pColumns;
502  int myRow = (m_rank - myCol) / pColumns;
503 
504  // Split Comm into rows - all processes with same myRow are put in
505  // the same communicator. The rank within this communicator is the
506  // column index.
507  MPI_Comm_split(m_comm, myRow, myCol, &newComm);
508  m_commRow = std::shared_ptr<Comm>(new CommMpi(newComm));
509 
510  // Split Comm into columns - all processes with same myCol are put
511  // in the same communicator. The rank within this communicator is
512  // the row index.
513  MPI_Comm_split(m_comm, myCol, myRow, &newComm);
514  m_commColumn = std::shared_ptr<Comm>(new CommMpi(newComm));
515  }
516  else
517  {
518  constexpr int dims = 3;
519  const int sizes[dims] = {pRows, pColumns, pTime};
520  const int periods[dims] = {0, 0, 0};
521  constexpr int reorder = 1;
522 
523  MPI_Cart_create(m_comm, dims, sizes, periods, reorder, &gridComm);
524 
525  constexpr int keepRow[dims] = {0, 1, 0};
526  MPI_Cart_sub(gridComm, keepRow, &newComm);
527  m_commRow = std::shared_ptr<Comm>(new CommMpi(newComm));
528 
529  constexpr int keepCol[dims] = {1, 0, 0};
530  MPI_Cart_sub(gridComm, keepCol, &newComm);
531  m_commColumn = std::shared_ptr<Comm>(new CommMpi(newComm));
532 
533  constexpr int keepTime[dims] = {0, 0, 1};
534  MPI_Cart_sub(gridComm, keepTime, &newComm);
535  m_commTime = std::shared_ptr<Comm>(new CommMpi(newComm));
536 
537  constexpr int keepSpace[dims] = {1, 1, 0};
538  MPI_Cart_sub(gridComm, keepSpace, &newComm);
539  m_commSpace = std::shared_ptr<Comm>(new CommMpi(newComm));
540  }
541 }
542 
543 /**
544  * Create a new communicator if the flag is non-zero.
545  */
547 {
548  MPI_Comm newComm;
549  // color == MPI_UNDEF => not in the new communicator
550  // key == 0 on all => use rank to order them. OpenMPI, at least,
551  // implies this is faster than ordering them ourselves.
552  MPI_Comm_split(m_comm, flag ? flag : MPI_UNDEFINED, 0, &newComm);
553 
554  if (flag == 0)
555  {
556  // flag == 0 => get back MPI_COMM_NULL, return a null ptr instead.
557  return std::shared_ptr<Comm>();
558  }
559  else
560  {
561  // Return a real communicator
562  return std::shared_ptr<Comm>(new CommMpi(newComm));
563  }
564 }
565 
566 std::pair<CommSharedPtr, CommSharedPtr> CommMpi::v_SplitCommNode()
567 {
568  std::pair<CommSharedPtr, CommSharedPtr> ret;
569 
570 #if MPI_VERSION < 3
571  ASSERTL0(false, "Not implemented for non-MPI-3 versions.");
572 #else
573  // Create an intra-node communicator.
574  MPI_Comm nodeComm;
575  MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, m_rank,
576  MPI_INFO_NULL, &nodeComm);
577 
578  // For rank 0 of the intra-node communicator, split the main
579  // communicator. Everyone else will get a null communicator.
580  ret.first = std::shared_ptr<Comm>(new CommMpi(nodeComm));
581  ret.second = CommMpi::v_CommCreateIf(ret.first->GetRank() == 0);
582  if (ret.first->GetRank() == 0)
583  {
584  ret.second->SplitComm(1, ret.second->GetSize());
585  }
586 #endif
587 
588  return ret;
589 }
590 
591 } // namespace LibUtilities
592 } // namespace Nektar
#define MPI_UNSIGNED_LONG_LONG
Definition: CommDataType.h:97
#define MPISYNC
Definition: CommMpi.h:45
#define ASSERTL0(condition, msg)
Definition: ErrorUtil.hpp:215
Base communications class.
Definition: Comm.h:90
CommSharedPtr m_commColumn
Column communicator.
Definition: Comm.h:184
CommSharedPtr m_commRow
Row communicator.
Definition: Comm.h:183
int GetSize() const
Returns number of processes.
Definition: Comm.h:279
CommSharedPtr m_commTime
Definition: Comm.h:185
int m_size
Number of processes.
Definition: Comm.h:181
std::string m_type
Type of communication.
Definition: Comm.h:182
CommSharedPtr m_commSpace
Definition: Comm.h:186
virtual void v_AllGather(void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype) override final
Definition: CommMpi.cpp:295
virtual void v_WaitAll(CommRequestSharedPtr request) override final
Definition: CommMpi.cpp:461
CommMpi(int narg, char *arg[])
Definition: CommMpi.cpp:53
virtual std::pair< CommSharedPtr, CommSharedPtr > v_SplitCommNode() override final
Definition: CommMpi.cpp:566
virtual void v_Isend(void *buf, int count, CommDataType dt, int dest, CommRequestSharedPtr request, int loc) override final
Definition: CommMpi.cpp:419
virtual std::tuple< int, int, int > v_GetVersion() override final
Definition: CommMpi.cpp:156
virtual void v_DistGraphCreateAdjacent(int indegree, const int sources[], const int sourceweights[], int reorder) override final
Definition: CommMpi.cpp:375
virtual void v_Gather(void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype, int root) override final
Definition: CommMpi.cpp:356
virtual CommSharedPtr v_CommCreateIf(int flag) override final
Definition: CommMpi.cpp:546
virtual void v_AlltoAll(void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype) override final
Definition: CommMpi.cpp:269
virtual void v_Exscan(Array< OneD, unsigned long long > &pData, enum ReduceOperator pOp, Array< OneD, unsigned long long > &ans) override final
Definition: CommMpi.cpp:329
virtual void v_AlltoAllv(void *sendbuf, int sendcounts[], int sensdispls[], CommDataType sendtype, void *recvbuf, int recvcounts[], int rdispls[], CommDataType recvtype) override final
Definition: CommMpi.cpp:281
virtual bool v_TreatAsRankZero() override final
Definition: CommMpi.cpp:143
virtual void v_Send(void *buf, int count, CommDataType dt, int dest) override final
Definition: CommMpi.cpp:185
virtual void v_SendRecv(void *sendbuf, int sendcount, CommDataType sendtype, int dest, void *recvbuf, int recvcount, CommDataType recvtype, int source) override final
Definition: CommMpi.cpp:210
virtual void v_Irecv(void *buf, int count, CommDataType dt, int source, CommRequestSharedPtr request, int loc) override final
Definition: CommMpi.cpp:435
virtual void v_Finalise() override
Definition: CommMpi.cpp:119
virtual void v_AllGatherv(void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcounts[], int rdispls[], CommDataType recvtype) override final
Definition: CommMpi.cpp:304
virtual void v_AllReduce(void *buf, int count, CommDataType dt, enum ReduceOperator pOp) override final
Definition: CommMpi.cpp:239
virtual void v_SendInit(void *buf, int count, CommDataType dt, int dest, CommRequestSharedPtr request, int loc) override final
Definition: CommMpi.cpp:427
virtual CommRequestSharedPtr v_CreateRequest(int num) override final
Definition: CommMpi.cpp:472
virtual int v_GetRank() override final
Definition: CommMpi.cpp:135
static std::string className
Name of class.
Definition: CommMpi.h:101
static CommSharedPtr create(int narg, char *arg[])
Creates an instance of this class.
Definition: CommMpi.h:95
virtual double v_Wtime() override final
Definition: CommMpi.cpp:177
virtual void v_RecvInit(void *buf, int count, CommDataType dt, int source, CommRequestSharedPtr request, int loc) override final
Definition: CommMpi.cpp:443
virtual void v_Block() override final
Definition: CommMpi.cpp:169
virtual void v_SendRecvReplace(void *buf, int count, CommDataType dt, int pSendProc, int pRecvProc) override final
Definition: CommMpi.cpp:225
virtual void v_Bcast(void *buffer, int count, CommDataType dt, int root) override final
Definition: CommMpi.cpp:323
virtual void v_StartAll(CommRequestSharedPtr request) override final
Definition: CommMpi.cpp:451
virtual void v_SplitComm(int pRows, int pColumns, int pTime) override
Definition: CommMpi.cpp:483
virtual void v_Irsend(void *buf, int count, CommDataType dt, int dest, CommRequestSharedPtr request, int loc) override final
Definition: CommMpi.cpp:411
virtual ~CommMpi() override
Definition: CommMpi.cpp:98
virtual void v_Recv(void *buf, int count, CommDataType dt, int source) override final
Definition: CommMpi.cpp:200
virtual void v_NeighborAlltoAllv(void *sendbuf, int sendcounts[], int sensdispls[], CommDataType sendtype, void *recvbuf, int recvcounts[], int rdispls[], CommDataType recvtype) override final
Definition: CommMpi.cpp:392
virtual void v_Scatter(void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype, int root) override final
Definition: CommMpi.cpp:366
virtual bool v_IsSerial() override final
Definition: CommMpi.cpp:151
Class for communicator request type.
Definition: CommMpi.h:62
tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, std::string pDesc="")
Register a class with the factory.
Definition: NekFactory.hpp:198
array buffer
Definition: GsLib.hpp:83
unsigned int CommDataType
Definition: CommDataType.h:70
std::shared_ptr< CommRequest > CommRequestSharedPtr
Definition: Comm.h:86
std::shared_ptr< CommRequestMpi > CommRequestMpiSharedPtr
Definition: CommMpi.h:88
CommFactory & GetCommFactory()
ReduceOperator
Type of operation to perform in AllReduce.
Definition: Comm.h:67
std::shared_ptr< Comm > CommSharedPtr
Pointer to a Communicator object.
Definition: Comm.h:54
The above copyright notice and this permission notice shall be included.
Definition: CoupledSolver.h:2