Nektar++
Loading...
Searching...
No Matches
CommMpi.cpp
Go to the documentation of this file.
1///////////////////////////////////////////////////////////////////////////////
2//
3// File: CommMpi.cpp
4//
5// For more information, please see: http://www.nektar.info
6//
7// The MIT License
8//
9// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10// Department of Aeronautics, Imperial College London (UK), and Scientific
11// Computing and Imaging Institute, University of Utah (USA).
12//
13// Permission is hereby granted, free of charge, to any person obtaining a
14// copy of this software and associated documentation files (the "Software"),
15// to deal in the Software without restriction, including without limitation
16// the rights to use, copy, modify, merge, publish, distribute, sublicense,
17// and/or sell copies of the Software, and to permit persons to whom the
18// Software is furnished to do so, subject to the following conditions:
19//
20// The above copyright notice and this permission notice shall be included
21// in all copies or substantial portions of the Software.
22//
23// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
24// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29// DEALINGS IN THE SOFTWARE.
30//
31// Description: MPI communication implementation
32//
33///////////////////////////////////////////////////////////////////////////////
34
35#ifdef NEKTAR_USING_PETSC
36#include "petscsys.h"
37#endif
38
40
42{
43
45 "ParallelMPI", CommMpi::create, "Parallel communication using MPI.");
46
47/**
48 *
49 */
50CommMpi::CommMpi(int narg, char *arg[]) : Comm(narg, arg)
51{
52 int init = 0;
53 MPI_Initialized(&init);
54
55 if (!init)
56 {
57 int thread_support = 0;
58 if (MPI_Init_thread(&narg, &arg, MPI_THREAD_MULTIPLE,
59 &thread_support) != MPI_SUCCESS)
60 {
63 "Initializing MPI using MPI_Init, if scotch version > 6 and is "
64 "compiled with multi-threading, it might cause deadlocks.")
65 ASSERTL0(MPI_Init(&narg, &arg) == MPI_SUCCESS,
66 "Failed to initialise MPI");
67 }
68 // store bool to indicate that Nektar++ is in charge of finalizing MPI.
69 m_controls_mpi = true;
70 }
71 else
72 {
73 // Another code is in charge of finalizing MPI and this is not the
74 // responsiblity of Nektar++
75 m_controls_mpi = false;
76 }
77
78 m_comm = MPI_COMM_WORLD;
79 MPI_Comm_size(m_comm, &m_size);
80 MPI_Comm_rank(m_comm, &m_rank);
81
82#ifdef NEKTAR_USING_PETSC
83 PetscInitializeNoArguments();
84#endif
85
86 m_type = "Parallel MPI";
87}
88
89/**
90 *
91 */
92CommMpi::CommMpi(MPI_Comm pComm) : Comm()
93{
94 m_comm = pComm;
95 MPI_Comm_size(m_comm, &m_size);
96 MPI_Comm_rank(m_comm, &m_rank);
97
98 m_type = "Parallel MPI";
99}
100
101/**
102 *
103 */
105{
106 int flag;
107 MPI_Finalized(&flag);
108 if (!flag && m_comm != MPI_COMM_WORLD)
109 {
110 MPI_Comm_free(&m_comm);
111 }
112}
113
114/**
115 *
116 */
118{
119 return m_comm;
120}
121
122/**
123 *
124 */
126{
127#ifdef NEKTAR_USING_PETSC
128 PetscFinalize();
129#endif
130 int flag;
131 MPI_Finalized(&flag);
132 if ((!flag) && m_controls_mpi)
133 {
134 MPI_Finalize();
135 }
136}
137
138/**
139 *
140 */
142{
143 return m_rank;
144}
145
146/**
147 *
148 */
150{
151 return m_rank == 0;
152}
153
154/**
155 *
156 */
158{
159 return m_size == 1;
160}
161
162/**
163 *
164 */
165std::tuple<int, int, int> CommMpi::v_GetVersion()
166{
167 int version, subversion;
168 int retval = MPI_Get_version(&version, &subversion);
169
170 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing GetVersion.");
171
172 return std::make_tuple(version, subversion, 0);
173}
174
175/**
176 *
177 */
179{
180 MPI_Barrier(m_comm);
181}
182
183/**
184 *
185 */
187{
188 return MPI_Wtime();
189}
190
191/**
192 *
193 */
194void CommMpi::v_Send(const void *buf, int count, CommDataType dt, int dest)
195{
196 if (MPISYNC)
197 {
198 MPI_Ssend(buf, count, dt, dest, 0, m_comm);
199 }
200 else
201 {
202 MPI_Send(buf, count, dt, dest, 0, m_comm);
203 }
204}
205
206/**
207 *
208 */
209void CommMpi::v_Recv(void *buf, int count, CommDataType dt, int source)
210{
211 MPI_Recv(buf, count, dt, source, 0, m_comm, MPI_STATUS_IGNORE);
212}
213
214/**
215 *
216 */
217void CommMpi::v_SendRecv(const void *sendbuf, int sendcount,
218 CommDataType sendtype, int dest, void *recvbuf,
219 int recvcount, CommDataType recvtype, int source)
220{
221 MPI_Status status;
222 int retval = MPI_Sendrecv(sendbuf, sendcount, sendtype, dest, 0, recvbuf,
223 recvcount, recvtype, source, 0, m_comm, &status);
224
225 ASSERTL0(retval == MPI_SUCCESS,
226 "MPI error performing send-receive of data.");
227}
228
229/**
230 *
231 */
232void CommMpi::v_AllReduce(void *buf, int count, CommDataType dt,
233 enum ReduceOperator pOp)
234{
235 if (GetSize() == 1)
236 {
237 return;
238 }
239
240 MPI_Op vOp;
241 switch (pOp)
242 {
243 case ReduceMax:
244 vOp = MPI_MAX;
245 break;
246 case ReduceMin:
247 vOp = MPI_MIN;
248 break;
249 case ReduceSum:
250 default:
251 vOp = MPI_SUM;
252 break;
253 }
254 int retval = MPI_Allreduce(MPI_IN_PLACE, buf, count, dt, vOp, m_comm);
255
256 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing All-reduce.");
257}
258
259/**
260 *
261 */
262void CommMpi::v_AlltoAll(const void *sendbuf, int sendcount,
263 CommDataType sendtype, void *recvbuf, int recvcount,
264 CommDataType recvtype)
265{
266 int retval = MPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount,
267 recvtype, m_comm);
268
269 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing All-to-All.");
270}
271
272/**
273 *
274 */
275void CommMpi::v_AlltoAllv(const void *sendbuf, const int *sendcounts,
276 const int *senddispls, CommDataType sendtype,
277 void *recvbuf, const int *recvcounts,
278 const int *recvdispls, CommDataType recvtype)
279{
280 int retval =
281 MPI_Alltoallv(sendbuf, sendcounts, senddispls, sendtype, recvbuf,
282 recvcounts, recvdispls, recvtype, m_comm);
283
284 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing All-to-All-v.");
285}
286
287/**
288 *
289 */
290void CommMpi::v_AllGather(const void *sendbuf, int sendcount,
291 CommDataType sendtype, void *recvbuf, int recvcount,
292 CommDataType recvtype)
293{
294 int retval = MPI_Allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount,
295 recvtype, m_comm);
296
297 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Allgather.");
298}
299
300/**
301 *
302 */
303void CommMpi::v_AllGatherv(const void *sendbuf, int sendcount,
304 CommDataType sendtype, void *recvbuf,
305 const int *recvcounts, const int *recvdispls,
306 CommDataType recvtype)
307{
308 int retval = MPI_Allgatherv(sendbuf, sendcount, sendtype, recvbuf,
309 recvcounts, recvdispls, recvtype, m_comm);
310
311 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Allgatherv.");
312}
313
314/**
315 *
316 */
317void CommMpi::v_AllGatherv(void *recvbuf, const int *recvcounts,
318 const int *recvdispls, CommDataType recvtype)
319{
320 int retval = MPI_Allgatherv(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, recvbuf,
321 recvcounts, recvdispls, recvtype, m_comm);
322
323 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Allgatherv.");
324}
325
326/**
327 *
328 */
329void CommMpi::v_Bcast(void *buffer, int count, CommDataType dt, int root)
330{
331 int retval = MPI_Bcast(buffer, count, dt, root, m_comm);
332
333 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Bcast-v.");
334}
335
336/**
337 *
338 */
339void CommMpi::v_Gather(const void *sendbuf, int sendcount,
340 CommDataType sendtype, void *recvbuf, int recvcount,
341 CommDataType recvtype, int root)
342{
343 int retval = MPI_Gather(sendbuf, sendcount, sendtype, recvbuf, recvcount,
344 recvtype, root, m_comm);
345
346 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Gather.");
347}
348
349/**
350 *
351 */
352void CommMpi::v_Scatter(const void *sendbuf, int sendcount,
353 CommDataType sendtype, void *recvbuf, int recvcount,
354 CommDataType recvtype, int root)
355{
356 int retval = MPI_Scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount,
357 recvtype, root, m_comm);
358
359 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing Scatter.");
360}
361
362/**
363 *
364 */
366 [[maybe_unused]] int indegree, [[maybe_unused]] const int *sources,
367 [[maybe_unused]] const int *sourceweights, [[maybe_unused]] int reorder)
368{
369#if MPI_VERSION < 3
370 ASSERTL0(false, "MPI_Dist_graph_create_adjacent is not supported in your "
371 "installed MPI version.");
372#else
373 int retval = MPI_Dist_graph_create_adjacent(
374 m_comm, indegree, sources, sourceweights, indegree, sources,
375 sourceweights, MPI_INFO_NULL, reorder, &m_comm);
376
377 ASSERTL0(retval == MPI_SUCCESS,
378 "MPI error performing Dist_graph_create_adjacent.")
379#endif
380}
381
382/**
383 *
384 */
385void CommMpi::v_NeighborAlltoAllv([[maybe_unused]] const void *sendbuf,
386 [[maybe_unused]] const int *sendcounts,
387 [[maybe_unused]] const int *senddispls,
388 [[maybe_unused]] CommDataType sendtype,
389 [[maybe_unused]] void *recvbuf,
390 [[maybe_unused]] const int *recvcounts,
391 [[maybe_unused]] const int *recvdispls,
392 [[maybe_unused]] CommDataType recvtype)
393{
394#if MPI_VERSION < 3
395 ASSERTL0(false, "MPI_Neighbor_alltoallv is not supported in your "
396 "installed MPI version.");
397#else
398 int retval = MPI_Neighbor_alltoallv(sendbuf, sendcounts, senddispls,
399 sendtype, recvbuf, recvcounts,
400 recvdispls, recvtype, m_comm);
401
402 ASSERTL0(retval == MPI_SUCCESS, "MPI error performing NeighborAllToAllV.");
403#endif
404}
405
406/**
407 *
408 */
409void CommMpi::v_Irsend(const void *buf, int count, CommDataType dt, int dest,
410 CommRequestSharedPtr request, int loc)
411{
413 std::static_pointer_cast<CommRequestMpi>(request);
414 MPI_Irsend(buf, count, dt, dest, 0, m_comm, req->GetRequest(loc));
415}
416
417/**
418 *
419 */
420void CommMpi::v_Isend(const void *buf, int count, CommDataType dt, int dest,
421 CommRequestSharedPtr request, int loc)
422{
424 std::static_pointer_cast<CommRequestMpi>(request);
425 MPI_Isend(buf, count, dt, dest, 0, m_comm, req->GetRequest(loc));
426}
427
428/**
429 *
430 */
431void CommMpi::v_SendInit(const void *buf, int count, CommDataType dt, int dest,
432 CommRequestSharedPtr request, int loc)
433{
435 std::static_pointer_cast<CommRequestMpi>(request);
436 MPI_Send_init(buf, count, dt, dest, 0, m_comm, req->GetRequest(loc));
437}
438
439/**
440 *
441 */
442void CommMpi::v_Irecv(void *buf, int count, CommDataType dt, int source,
443 CommRequestSharedPtr request, int loc)
444{
446 std::static_pointer_cast<CommRequestMpi>(request);
447 MPI_Irecv(buf, count, dt, source, 0, m_comm, req->GetRequest(loc));
448}
449
450/**
451 *
452 */
453void CommMpi::v_RecvInit(void *buf, int count, CommDataType dt, int source,
454 CommRequestSharedPtr request, int loc)
455{
457 std::static_pointer_cast<CommRequestMpi>(request);
458 MPI_Recv_init(buf, count, dt, source, 0, m_comm, req->GetRequest(loc));
459}
460
461/**
462 *
463 */
465{
467 std::static_pointer_cast<CommRequestMpi>(request);
468 if (req->GetNumRequest() != 0)
469 {
470 MPI_Startall(req->GetNumRequest(), req->GetRequest(0));
471 }
472}
473
474/**
475 *
476 */
478{
480 std::static_pointer_cast<CommRequestMpi>(request);
481 if (req->GetNumRequest() != 0)
482 {
483 MPI_Waitall(req->GetNumRequest(), req->GetRequest(0),
484 MPI_STATUSES_IGNORE);
485 }
486}
487
488/**
489 *
490 */
492{
493 return std::shared_ptr<CommRequest>(new CommRequestMpi(num));
494}
495
496/**
497 * Processes are considered as a grid of size pRows*pColumns. Comm
498 * objects are created corresponding to the rows and columns of this
499 * grid. The row and column to which this process belongs is stored in
500 * #m_commRow and #m_commColumn.
501 */
502void CommMpi::v_SplitComm(int pRows, int pColumns, int pTime)
503{
504 if (pTime == 0)
505 {
506 ASSERTL0(pRows * pColumns == m_size,
507 "Rows/Columns do not match comm size.");
508
509 MPI_Comm newComm;
510
511 // Compute row and column in grid.
512 int myCol = m_rank % pColumns;
513 int myRow = (m_rank - myCol) / pColumns;
514
515 // Split Comm into rows - all processes with same myRow are put in
516 // the same communicator. The rank within this communicator is the
517 // column index.
518 MPI_Comm_split(m_comm, myRow, myCol, &newComm);
519 m_commRow = std::shared_ptr<Comm>(new CommMpi(newComm));
520
521 // Split Comm into columns - all processes with same myCol are put
522 // in the same communicator. The rank within this communicator is
523 // the row index.
524 MPI_Comm_split(m_comm, myCol, myRow, &newComm);
525 m_commColumn = std::shared_ptr<Comm>(new CommMpi(newComm));
526 }
527 else
528 {
529 ASSERTL0(pRows * pColumns * pTime == m_size,
530 "Rows/Columns/Time do not match comm size.");
531
532 MPI_Comm newComm;
533
534 // Compute row and column in grid.
535 int mySpace = m_rank % (pRows * pColumns);
536 int myTime = (m_rank - mySpace) / (pRows * pColumns);
537 int myCol = mySpace % pColumns;
538 int myRow = (mySpace - myCol) / pColumns;
539
540 // Split Comm - all processes with same mySpace are put in
541 // the same communicator. The rank within this communicator is the
542 // time index.
543 MPI_Comm_split(m_comm, mySpace, myTime, &newComm);
544 m_commTime = std::shared_ptr<Comm>(new CommMpi(newComm));
545
546 // Split Comm - all processes with same myTime are put in
547 // the same communicator. The rank within this communicator is the
548 // spatial index.
549 MPI_Comm_split(m_comm, myTime, mySpace, &newComm);
550 m_commSpace = std::shared_ptr<Comm>(new CommMpi(newComm));
551
552 // Split Comm into rows - all processes with same myRow are put in
553 // the same communicator. The rank within this communicator is the
554 // column index.
555 MPI_Comm_split(m_comm, myRow + pRows * pColumns * myTime, myCol,
556 &newComm);
557 m_commRow = std::shared_ptr<Comm>(new CommMpi(newComm));
558
559 // Split Comm into columns - all processes with same myCol are put
560 // in the same communicator. The rank within this communicator is
561 // the row index.
562 MPI_Comm_split(m_comm, myCol + pRows * pColumns * myTime, myRow,
563 &newComm);
564 m_commColumn = std::shared_ptr<Comm>(new CommMpi(newComm));
565 }
566}
567
568/**
569 * Create a new communicator if the flag is non-zero.
570 */
572{
573 MPI_Comm newComm;
574 // color == MPI_UNDEF => not in the new communicator
575 // key == 0 on all => use rank to order them. OpenMPI, at least,
576 // implies this is faster than ordering them ourselves.
577 MPI_Comm_split(m_comm, flag ? flag : MPI_UNDEFINED, 0, &newComm);
578
579 if (flag == 0)
580 {
581 // flag == 0 => get back MPI_COMM_NULL, return a null ptr instead.
582 return std::shared_ptr<Comm>();
583 }
584 else
585 {
586 // Return a real communicator
587 return std::shared_ptr<Comm>(new CommMpi(newComm));
588 }
589}
590
591/**
592 *
593 */
594std::pair<CommSharedPtr, CommSharedPtr> CommMpi::v_SplitCommNode()
595{
596 std::pair<CommSharedPtr, CommSharedPtr> ret;
597
598#if MPI_VERSION < 3
599 ASSERTL0(false, "Not implemented for non-MPI-3 versions.");
600#else
601 // Create an intra-node communicator.
602 MPI_Comm nodeComm;
603 MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, m_rank,
604 MPI_INFO_NULL, &nodeComm);
605
606 // For rank 0 of the intra-node communicator, split the main
607 // communicator. Everyone else will get a null communicator.
608 ret.first = std::shared_ptr<Comm>(new CommMpi(nodeComm));
609 ret.second = CommMpi::v_CommCreateIf(ret.first->GetRank() == 0);
610 if (ret.first->GetRank() == 0)
611 {
612 ret.second->SplitComm(1, ret.second->GetSize());
613 }
614#endif
615
616 return ret;
617}
618
619} // namespace Nektar::LibUtilities
#define MPISYNC
Definition CommMpi.h:45
#define ASSERTL0(condition, msg)
#define NEKERROR(type, msg)
Assert Level 0 – Fundamental assert which is used whether in FULLDEBUG, DEBUG or OPT compilation mode...
Base communications class.
Definition Comm.h:88
CommSharedPtr m_commColumn
Column communicator.
Definition Comm.h:178
CommSharedPtr m_commRow
Row communicator.
Definition Comm.h:177
int GetSize() const
Returns number of processes.
Definition Comm.h:269
CommSharedPtr m_commTime
Definition Comm.h:179
int m_size
Number of processes.
Definition Comm.h:175
std::string m_type
Type of communication.
Definition Comm.h:176
CommSharedPtr m_commSpace
Definition Comm.h:180
A global linear system.
Definition CommMpi.h:90
void v_WaitAll(CommRequestSharedPtr request) final
Definition CommMpi.cpp:477
void v_Gather(const void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype, int root) final
Definition CommMpi.cpp:339
void v_SendInit(const void *buf, int count, CommDataType dt, int dest, CommRequestSharedPtr request, int loc) final
Definition CommMpi.cpp:431
CommRequestSharedPtr v_CreateRequest(int num) final
Definition CommMpi.cpp:491
CommMpi(int narg, char *arg[])
Definition CommMpi.cpp:50
void v_AllGather(const void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype) final
Definition CommMpi.cpp:290
void v_Isend(const void *buf, int count, CommDataType dt, int dest, CommRequestSharedPtr request, int loc) final
Definition CommMpi.cpp:420
void v_Bcast(void *buffer, int count, CommDataType dt, int root) final
Definition CommMpi.cpp:329
void v_Scatter(const void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype, int root) final
Definition CommMpi.cpp:352
void v_AllGatherv(const void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, const int *recvcounts, const int *recvdispls, CommDataType recvtype) final
Definition CommMpi.cpp:303
CommSharedPtr v_CommCreateIf(int flag) final
Definition CommMpi.cpp:571
void v_Send(const void *buf, int count, CommDataType dt, int dest) final
Definition CommMpi.cpp:194
std::pair< CommSharedPtr, CommSharedPtr > v_SplitCommNode() final
Definition CommMpi.cpp:594
void v_SendRecv(const void *sendbuf, int sendcount, CommDataType sendtype, int dest, void *recvbuf, int recvcount, CommDataType recvtype, int source) final
Definition CommMpi.cpp:217
void v_NeighborAlltoAllv(const void *sendbuf, const int *sendcounts, const int *senddispls, CommDataType sendtype, void *recvbuf, const int *recvcounts, const int *recvdispls, CommDataType recvtype) final
Definition CommMpi.cpp:385
void v_AlltoAllv(const void *sendbuf, const int *sendcounts, const int *senddispls, CommDataType sendtype, void *recvbuf, const int *recvcounts, const int *recvdispls, CommDataType recvtype) final
Definition CommMpi.cpp:275
void v_Recv(void *buf, int count, CommDataType dt, int source) final
Definition CommMpi.cpp:209
void v_StartAll(CommRequestSharedPtr request) final
Definition CommMpi.cpp:464
void v_Irsend(const void *buf, int count, CommDataType dt, int dest, CommRequestSharedPtr request, int loc) final
Definition CommMpi.cpp:409
void v_DistGraphCreateAdjacent(int indegree, const int *sources, const int *sourceweights, int reorder) final
Definition CommMpi.cpp:365
void v_AllReduce(void *buf, int count, CommDataType dt, enum ReduceOperator pOp) final
Definition CommMpi.cpp:232
void v_Irecv(void *buf, int count, CommDataType dt, int source, CommRequestSharedPtr request, int loc) final
Definition CommMpi.cpp:442
static std::string className
Name of class.
Definition CommMpi.h:99
void v_RecvInit(void *buf, int count, CommDataType dt, int source, CommRequestSharedPtr request, int loc) final
Definition CommMpi.cpp:453
static CommSharedPtr create(int narg, char *arg[])
Creates an instance of this class.
Definition CommMpi.h:93
void v_AlltoAll(const void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype) final
Definition CommMpi.cpp:262
void v_SplitComm(int pRows, int pColumns, int pTime) override
Definition CommMpi.cpp:502
std::tuple< int, int, int > v_GetVersion() final
Definition CommMpi.cpp:165
Class for communicator request type.
Definition CommMpi.h:60
tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, std::string pDesc="")
Register a class with the factory.
unsigned int CommDataType
std::shared_ptr< CommRequest > CommRequestSharedPtr
Definition Comm.h:84
std::shared_ptr< CommRequestMpi > CommRequestMpiSharedPtr
Definition CommMpi.h:86
CommFactory & GetCommFactory()
ReduceOperator
Type of operation to perform in AllReduce.
Definition Comm.h:65
std::shared_ptr< Comm > CommSharedPtr
Pointer to a Communicator object.
Definition Comm.h:55