Nektar++
CommMpi.cpp
Go to the documentation of this file.
1///////////////////////////////////////////////////////////////////////////////
2//
3// File: CommMpi.cpp
4//
5// For more information, please see: http://www.nektar.info
6//
7// The MIT License
8//
9// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10// Department of Aeronautics, Imperial College London (UK), and Scientific
11// Computing and Imaging Institute, University of Utah (USA).
12//
13// Permission is hereby granted, free of charge, to any person obtaining a
14// copy of this software and associated documentation files (the "Software"),
15// to deal in the Software without restriction, including without limitation
16// the rights to use, copy, modify, merge, publish, distribute, sublicense,
17// and/or sell copies of the Software, and to permit persons to whom the
18// Software is furnished to do so, subject to the following conditions:
19//
20// The above copyright notice and this permission notice shall be included
21// in all copies or substantial portions of the Software.
22//
23// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
24// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29// DEALINGS IN THE SOFTWARE.
30//
31// Description: MPI communication implementation
32//
33///////////////////////////////////////////////////////////////////////////////
34
35#ifdef NEKTAR_USING_PETSC
36#include "petscsys.h"
37#endif
38
40
41namespace Nektar
42{
43namespace LibUtilities
44{
45
47 "ParallelMPI", CommMpi::create, "Parallel communication using MPI.");
48
49/**
50 *
51 */
52CommMpi::CommMpi(int narg, char *arg[]) : Comm(narg, arg)
53{
54 int init = 0;
55 MPI_Initialized(&init);
56
57 if (!init)
58 {
59 ASSERTL0(MPI_Init(&narg, &arg) == MPI_SUCCESS,
60 "Failed to initialise MPI");
61 // store bool to indicate that Nektar++ is in charge of finalizing MPI.
62 m_controls_mpi = true;
63 }
64 else
65 {
66 // Another code is in charge of finalizing MPI and this is not the
67 // responsiblity of Nektar++
68 m_controls_mpi = false;
69 }
70
71 m_comm = MPI_COMM_WORLD;
72 MPI_Comm_size(m_comm, &m_size);
73 MPI_Comm_rank(m_comm, &m_rank);
74
75#ifdef NEKTAR_USING_PETSC
76 PetscInitializeNoArguments();
77#endif
78
79 m_type = "Parallel MPI";
80}
81
82/**
83 *
84 */
85CommMpi::CommMpi(MPI_Comm pComm) : Comm()
86{
87 m_comm = pComm;
88 MPI_Comm_size(m_comm, &m_size);
89 MPI_Comm_rank(m_comm, &m_rank);
90
91 m_type = "Parallel MPI";
92}
93
94/**
95 *
96 */
98{
99 int flag;
100 MPI_Finalized(&flag);
101 if (!flag && m_comm != MPI_COMM_WORLD)
102 {
103 MPI_Comm_free(&m_comm);
104 }
105}
106
107/**
108 *
109 */
111{
112 return m_comm;
113}
114
115/**
116 *
117 */
119{
120#ifdef NEKTAR_USING_PETSC
121 PetscFinalize();
122#endif
123 int flag;
124 MPI_Finalized(&flag);
125 if ((!flag) && m_controls_mpi)
126 {
127 MPI_Finalize();
128 }
129}
130
131/**
132 *
133 */
135{
136 return m_rank;
137}
138
139/**
140 *
141 */
143{
144 return m_rank == 0;
145}
146
147/**
148 *
149 */
151{
152 return m_size == 1;
153}
154
155/**
156 *
157 */
158std::tuple<int, int, int> CommMpi::v_GetVersion()
159{
160 int version, subversion;
161 int retval = MPI_Get_version(&version, &subversion);
162
163 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing GetVersion.");
164
165 return std::make_tuple(version, subversion, 0);
166}
167
168/**
169 *
170 */
172{
173 MPI_Barrier(m_comm);
174}
175
176/**
177 *
178 */
180{
181 return MPI_Wtime();
182}
183
184/**
185 *
186 */
187void CommMpi::v_Send(void *buf, int count, CommDataType dt, int dest)
188{
189 if (MPISYNC)
190 {
191 MPI_Ssend(buf, count, dt, dest, 0, m_comm);
192 }
193 else
194 {
195 MPI_Send(buf, count, dt, dest, 0, m_comm);
196 }
197}
198
199/**
200 *
201 */
202void CommMpi::v_Recv(void *buf, int count, CommDataType dt, int source)
203{
204 MPI_Recv(buf, count, dt, source, 0, m_comm, MPI_STATUS_IGNORE);
205}
206
207/**
208 *
209 */
210void CommMpi::v_SendRecv(void *sendbuf, int sendcount, CommDataType sendtype,
211 int dest, void *recvbuf, int recvcount,
212 CommDataType recvtype, int source)
213{
214 MPI_Status status;
215 int retval = MPI_Sendrecv(sendbuf, sendcount, sendtype, dest, 0, recvbuf,
216 recvcount, recvtype, source, 0, m_comm, &status);
217
218 ASSERTL0(retval == MPI_SUCCESS,
219 "MPI error performing send-receive of data.");
220}
221
222/**
223 *
224 */
225void CommMpi::v_AllReduce(void *buf, int count, CommDataType dt,
226 enum ReduceOperator pOp)
227{
228 if (GetSize() == 1)
229 {
230 return;
231 }
232
233 MPI_Op vOp;
234 switch (pOp)
235 {
236 case ReduceMax:
237 vOp = MPI_MAX;
238 break;
239 case ReduceMin:
240 vOp = MPI_MIN;
241 break;
242 case ReduceSum:
243 default:
244 vOp = MPI_SUM;
245 break;
246 }
247 int retval = MPI_Allreduce(MPI_IN_PLACE, buf, count, dt, vOp, m_comm);
248
249 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing All-reduce.");
250}
251
252/**
253 *
254 */
255void CommMpi::v_AlltoAll(void *sendbuf, int sendcount, CommDataType sendtype,
256 void *recvbuf, int recvcount, CommDataType recvtype)
257{
258 int retval = MPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount,
259 recvtype, m_comm);
260
261 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing All-to-All.");
262}
263
264/**
265 *
266 */
267void CommMpi::v_AlltoAllv(void *sendbuf, int sendcounts[], int sdispls[],
268 CommDataType sendtype, void *recvbuf,
269 int recvcounts[], int rdispls[],
270 CommDataType recvtype)
271{
272 int retval = MPI_Alltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf,
273 recvcounts, rdispls, recvtype, m_comm);
274
275 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing All-to-All-v.");
276}
277
278/**
279 *
280 */
281void CommMpi::v_AllGather(void *sendbuf, int sendcount, CommDataType sendtype,
282 void *recvbuf, int recvcount, CommDataType recvtype)
283{
284 int retval = MPI_Allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount,
285 recvtype, m_comm);
286
287 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Allgather.");
288}
289
290/**
291 *
292 */
293void CommMpi::v_AllGatherv(void *sendbuf, int sendcount, CommDataType sendtype,
294 void *recvbuf, int recvcounts[], int rdispls[],
295 CommDataType recvtype)
296{
297 int retval = MPI_Allgatherv(sendbuf, sendcount, sendtype, recvbuf,
298 recvcounts, rdispls, recvtype, m_comm);
299
300 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Allgatherv.");
301}
302
303/**
304 *
305 */
306void CommMpi::v_AllGatherv(void *recvbuf, int recvcounts[], int rdispls[],
307 CommDataType recvtype)
308{
309 int retval = MPI_Allgatherv(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, recvbuf,
310 recvcounts, rdispls, recvtype, m_comm);
311
312 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Allgatherv.");
313}
314
315/**
316 *
317 */
318void CommMpi::v_Bcast(void *buffer, int count, CommDataType dt, int root)
319{
320 int retval = MPI_Bcast(buffer, count, dt, root, m_comm);
321
322 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Bcast-v.");
323}
324
325/**
326 *
327 */
328void CommMpi::v_Gather(void *sendbuf, int sendcount, CommDataType sendtype,
329 void *recvbuf, int recvcount, CommDataType recvtype,
330 int root)
331{
332 int retval = MPI_Gather(sendbuf, sendcount, sendtype, recvbuf, recvcount,
333 recvtype, root, m_comm);
334
335 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Gather.");
336}
337
338/**
339 *
340 */
341void CommMpi::v_Scatter(void *sendbuf, int sendcount, CommDataType sendtype,
342 void *recvbuf, int recvcount, CommDataType recvtype,
343 int root)
344{
345 int retval = MPI_Scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount,
346 recvtype, root, m_comm);
347
348 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Scatter.");
349}
350
351/**
352 *
353 */
354void CommMpi::v_DistGraphCreateAdjacent(int indegree, const int sources[],
355 const int sourceweights[], int reorder)
356{
357#if MPI_VERSION < 3
358 boost::ignore_unused(indegree, sources, sourceweights, reorder);
359
360 ASSERTL0(false, "MPI_Dist_graph_create_adjacent is not supported in your "
361 "installed MPI version.");
362#else
363 int retval = MPI_Dist_graph_create_adjacent(
364 m_comm, indegree, sources, sourceweights, indegree, sources,
365 sourceweights, MPI_INFO_NULL, reorder, &m_comm);
366
367 ASSERTL0(retval == MPI_SUCCESS,
368 "MPI error performing Dist_graph_create_adjacent.")
369#endif
370}
371
372/**
373 *
374 */
375void CommMpi::v_NeighborAlltoAllv(void *sendbuf, int sendcounts[],
376 int sdispls[], CommDataType sendtype,
377 void *recvbuf, int recvcounts[],
378 int rdispls[], CommDataType recvtype)
379{
380#if MPI_VERSION < 3
381 boost::ignore_unused(sendbuf, sendcounts, sdispls, sendtype, recvbuf,
382 recvcounts, rdispls, recvtype);
383 ASSERTL0(false, "MPI_Neighbor_alltoallv is not supported in your "
384 "installed MPI version.");
385#else
386 int retval =
387 MPI_Neighbor_alltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf,
388 recvcounts, rdispls, recvtype, m_comm);
389
390 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing NeighborAllToAllV.");
391#endif
392}
393
394/**
395 *
396 */
397void CommMpi::v_Irsend(void *buf, int count, CommDataType dt, int dest,
398 CommRequestSharedPtr request, int loc)
399{
401 std::static_pointer_cast<CommRequestMpi>(request);
402 MPI_Irsend(buf, count, dt, dest, 0, m_comm, req->GetRequest(loc));
403}
404
405/**
406 *
407 */
408void CommMpi::v_Isend(void *buf, int count, CommDataType dt, int dest,
409 CommRequestSharedPtr request, int loc)
410{
412 std::static_pointer_cast<CommRequestMpi>(request);
413 MPI_Isend(buf, count, dt, dest, 0, m_comm, req->GetRequest(loc));
414}
415
416/**
417 *
418 */
419void CommMpi::v_SendInit(void *buf, int count, CommDataType dt, int dest,
420 CommRequestSharedPtr request, int loc)
421{
423 std::static_pointer_cast<CommRequestMpi>(request);
424 MPI_Send_init(buf, count, dt, dest, 0, m_comm, req->GetRequest(loc));
425}
426
427/**
428 *
429 */
430void CommMpi::v_Irecv(void *buf, int count, CommDataType dt, int source,
431 CommRequestSharedPtr request, int loc)
432{
434 std::static_pointer_cast<CommRequestMpi>(request);
435 MPI_Irecv(buf, count, dt, source, 0, m_comm, req->GetRequest(loc));
436}
437
438/**
439 *
440 */
441void CommMpi::v_RecvInit(void *buf, int count, CommDataType dt, int source,
442 CommRequestSharedPtr request, int loc)
443{
445 std::static_pointer_cast<CommRequestMpi>(request);
446 MPI_Recv_init(buf, count, dt, source, 0, m_comm, req->GetRequest(loc));
447}
448
449/**
450 *
451 */
453{
455 std::static_pointer_cast<CommRequestMpi>(request);
456 if (req->GetNumRequest() != 0)
457 {
458 MPI_Startall(req->GetNumRequest(), req->GetRequest(0));
459 }
460}
461
462/**
463 *
464 */
466{
468 std::static_pointer_cast<CommRequestMpi>(request);
469 if (req->GetNumRequest() != 0)
470 {
471 MPI_Waitall(req->GetNumRequest(), req->GetRequest(0),
472 MPI_STATUSES_IGNORE);
473 }
474}
475
476/**
477 *
478 */
480{
481 return std::shared_ptr<CommRequest>(new CommRequestMpi(num));
482}
483
484/**
485 * Processes are considered as a grid of size pRows*pColumns. Comm
486 * objects are created corresponding to the rows and columns of this
487 * grid. The row and column to which this process belongs is stored in
488 * #m_commRow and #m_commColumn.
489 */
490void CommMpi::v_SplitComm(int pRows, int pColumns, int pTime)
491{
492 ASSERTL0(pRows * pColumns * pTime == m_size,
493 "Rows/Columns/Time do not match comm size.");
494
495 MPI_Comm newComm;
496 MPI_Comm gridComm;
497 if (pTime == 1)
498 {
499 // There is a bug in OpenMPI 3.1.3. This bug cause some cases to fail in
500 // buster-full-build-and-test. Failed cases are:
501 //
502 // IncNavierStokesSolver_ChanFlow_3DH1D_FlowrateExplicit_MVM_par
503 // IncNavierStokesSolver_ChanFlow_3DH1D_FlowrateExplicit_MVM_par_hybrid
504
505 // See: https://github.com/open-mpi/ompi/issues/6522
506
507 // Compute row and column in grid.
508 int myCol = m_rank % pColumns;
509 int myRow = (m_rank - myCol) / pColumns;
510
511 // Split Comm into rows - all processes with same myRow are put in
512 // the same communicator. The rank within this communicator is the
513 // column index.
514 MPI_Comm_split(m_comm, myRow, myCol, &newComm);
515 m_commRow = std::shared_ptr<Comm>(new CommMpi(newComm));
516
517 // Split Comm into columns - all processes with same myCol are put
518 // in the same communicator. The rank within this communicator is
519 // the row index.
520 MPI_Comm_split(m_comm, myCol, myRow, &newComm);
521 m_commColumn = std::shared_ptr<Comm>(new CommMpi(newComm));
522 }
523 else
524 {
525 constexpr int dims = 3;
526 const int sizes[dims] = {pRows, pColumns, pTime};
527 const int periods[dims] = {0, 0, 0};
528 constexpr int reorder = 1;
529
530 MPI_Cart_create(m_comm, dims, sizes, periods, reorder, &gridComm);
531
532 constexpr int keepRow[dims] = {0, 1, 0};
533 MPI_Cart_sub(gridComm, keepRow, &newComm);
534 m_commRow = std::shared_ptr<Comm>(new CommMpi(newComm));
535
536 constexpr int keepCol[dims] = {1, 0, 0};
537 MPI_Cart_sub(gridComm, keepCol, &newComm);
538 m_commColumn = std::shared_ptr<Comm>(new CommMpi(newComm));
539
540 constexpr int keepTime[dims] = {0, 0, 1};
541 MPI_Cart_sub(gridComm, keepTime, &newComm);
542 m_commTime = std::shared_ptr<Comm>(new CommMpi(newComm));
543
544 constexpr int keepSpace[dims] = {1, 1, 0};
545 MPI_Cart_sub(gridComm, keepSpace, &newComm);
546 m_commSpace = std::shared_ptr<Comm>(new CommMpi(newComm));
547 }
548}
549
550/**
551 * Create a new communicator if the flag is non-zero.
552 */
554{
555 MPI_Comm newComm;
556 // color == MPI_UNDEF => not in the new communicator
557 // key == 0 on all => use rank to order them. OpenMPI, at least,
558 // implies this is faster than ordering them ourselves.
559 MPI_Comm_split(m_comm, flag ? flag : MPI_UNDEFINED, 0, &newComm);
560
561 if (flag == 0)
562 {
563 // flag == 0 => get back MPI_COMM_NULL, return a null ptr instead.
564 return std::shared_ptr<Comm>();
565 }
566 else
567 {
568 // Return a real communicator
569 return std::shared_ptr<Comm>(new CommMpi(newComm));
570 }
571}
572
573/**
574 *
575 */
576std::pair<CommSharedPtr, CommSharedPtr> CommMpi::v_SplitCommNode()
577{
578 std::pair<CommSharedPtr, CommSharedPtr> ret;
579
580#if MPI_VERSION < 3
581 ASSERTL0(false, "Not implemented for non-MPI-3 versions.");
582#else
583 // Create an intra-node communicator.
584 MPI_Comm nodeComm;
585 MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, m_rank,
586 MPI_INFO_NULL, &nodeComm);
587
588 // For rank 0 of the intra-node communicator, split the main
589 // communicator. Everyone else will get a null communicator.
590 ret.first = std::shared_ptr<Comm>(new CommMpi(nodeComm));
591 ret.second = CommMpi::v_CommCreateIf(ret.first->GetRank() == 0);
592 if (ret.first->GetRank() == 0)
593 {
594 ret.second->SplitComm(1, ret.second->GetSize());
595 }
596#endif
597
598 return ret;
599}
600
601} // namespace LibUtilities
602} // namespace Nektar
#define MPISYNC
Definition: CommMpi.h:45
#define ASSERTL0(condition, msg)
Definition: ErrorUtil.hpp:215
Base communications class.
Definition: Comm.h:90
CommSharedPtr m_commColumn
Column communicator.
Definition: Comm.h:179
CommSharedPtr m_commRow
Row communicator.
Definition: Comm.h:178
int GetSize() const
Returns number of processes.
Definition: Comm.h:265
CommSharedPtr m_commTime
Definition: Comm.h:180
int m_size
Number of processes.
Definition: Comm.h:176
std::string m_type
Type of communication.
Definition: Comm.h:177
CommSharedPtr m_commSpace
Definition: Comm.h:181
virtual void v_AllGather(void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype) override final
Definition: CommMpi.cpp:281
virtual void v_WaitAll(CommRequestSharedPtr request) override final
Definition: CommMpi.cpp:465
CommMpi(int narg, char *arg[])
Definition: CommMpi.cpp:52
virtual std::pair< CommSharedPtr, CommSharedPtr > v_SplitCommNode() override final
Definition: CommMpi.cpp:576
virtual void v_Isend(void *buf, int count, CommDataType dt, int dest, CommRequestSharedPtr request, int loc) override final
Definition: CommMpi.cpp:408
virtual std::tuple< int, int, int > v_GetVersion() override final
Definition: CommMpi.cpp:158
virtual void v_DistGraphCreateAdjacent(int indegree, const int sources[], const int sourceweights[], int reorder) override final
Definition: CommMpi.cpp:354
virtual void v_Gather(void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype, int root) override final
Definition: CommMpi.cpp:328
virtual CommSharedPtr v_CommCreateIf(int flag) override final
Definition: CommMpi.cpp:553
virtual void v_AlltoAll(void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype) override final
Definition: CommMpi.cpp:255
virtual void v_AlltoAllv(void *sendbuf, int sendcounts[], int sensdispls[], CommDataType sendtype, void *recvbuf, int recvcounts[], int rdispls[], CommDataType recvtype) override final
Definition: CommMpi.cpp:267
virtual bool v_TreatAsRankZero() override final
Definition: CommMpi.cpp:142
virtual void v_Send(void *buf, int count, CommDataType dt, int dest) override final
Definition: CommMpi.cpp:187
virtual void v_SendRecv(void *sendbuf, int sendcount, CommDataType sendtype, int dest, void *recvbuf, int recvcount, CommDataType recvtype, int source) override final
Definition: CommMpi.cpp:210
virtual void v_Irecv(void *buf, int count, CommDataType dt, int source, CommRequestSharedPtr request, int loc) override final
Definition: CommMpi.cpp:430
virtual void v_Finalise() override
Definition: CommMpi.cpp:118
virtual void v_AllGatherv(void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcounts[], int rdispls[], CommDataType recvtype) override final
Definition: CommMpi.cpp:293
virtual void v_AllReduce(void *buf, int count, CommDataType dt, enum ReduceOperator pOp) override final
Definition: CommMpi.cpp:225
virtual void v_SendInit(void *buf, int count, CommDataType dt, int dest, CommRequestSharedPtr request, int loc) override final
Definition: CommMpi.cpp:419
virtual CommRequestSharedPtr v_CreateRequest(int num) override final
Definition: CommMpi.cpp:479
virtual int v_GetRank() override final
Definition: CommMpi.cpp:134
static std::string className
Name of class.
Definition: CommMpi.h:101
static CommSharedPtr create(int narg, char *arg[])
Creates an instance of this class.
Definition: CommMpi.h:95
virtual double v_Wtime() override final
Definition: CommMpi.cpp:179
virtual void v_RecvInit(void *buf, int count, CommDataType dt, int source, CommRequestSharedPtr request, int loc) override final
Definition: CommMpi.cpp:441
virtual void v_Block() override final
Definition: CommMpi.cpp:171
virtual void v_Bcast(void *buffer, int count, CommDataType dt, int root) override final
Definition: CommMpi.cpp:318
virtual void v_StartAll(CommRequestSharedPtr request) override final
Definition: CommMpi.cpp:452
virtual void v_SplitComm(int pRows, int pColumns, int pTime) override
Definition: CommMpi.cpp:490
virtual void v_Irsend(void *buf, int count, CommDataType dt, int dest, CommRequestSharedPtr request, int loc) override final
Definition: CommMpi.cpp:397
virtual ~CommMpi() override
Definition: CommMpi.cpp:97
virtual void v_Recv(void *buf, int count, CommDataType dt, int source) override final
Definition: CommMpi.cpp:202
virtual void v_NeighborAlltoAllv(void *sendbuf, int sendcounts[], int sensdispls[], CommDataType sendtype, void *recvbuf, int recvcounts[], int rdispls[], CommDataType recvtype) override final
Definition: CommMpi.cpp:375
virtual void v_Scatter(void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype, int root) override final
Definition: CommMpi.cpp:341
virtual bool v_IsSerial() override final
Definition: CommMpi.cpp:150
Class for communicator request type.
Definition: CommMpi.h:62
tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, std::string pDesc="")
Register a class with the factory.
Definition: NekFactory.hpp:198
array buffer
Definition: GsLib.hpp:83
unsigned int CommDataType
Definition: CommDataType.h:70
std::shared_ptr< CommRequest > CommRequestSharedPtr
Definition: Comm.h:86
std::shared_ptr< CommRequestMpi > CommRequestMpiSharedPtr
Definition: CommMpi.h:88
CommFactory & GetCommFactory()
ReduceOperator
Type of operation to perform in AllReduce.
Definition: Comm.h:67
std::shared_ptr< Comm > CommSharedPtr
Pointer to a Communicator object.
Definition: Comm.h:57
The above copyright notice and this permission notice shall be included.
Definition: CoupledSolver.h:2