35#ifdef NEKTAR_USING_PETSC
53 MPI_Initialized(&init);
57 int thread_support = 0;
58 if (MPI_Init_thread(&narg, &arg, MPI_THREAD_MULTIPLE,
59 &thread_support) != MPI_SUCCESS)
63 "Initializing MPI using MPI_Init, if scotch version > 6 and is "
64 "compiled with multi-threading, it might cause deadlocks.")
65 ASSERTL0(MPI_Init(&narg, &arg) == MPI_SUCCESS,
66 "Failed to initialise MPI");
82#ifdef NEKTAR_USING_PETSC
83 PetscInitializeNoArguments();
107 MPI_Finalized(&flag);
108 if (!flag &&
m_comm != MPI_COMM_WORLD)
127#ifdef NEKTAR_USING_PETSC
131 MPI_Finalized(&flag);
167 int version, subversion;
168 int retval = MPI_Get_version(&version, &subversion);
170 ASSERTL0(retval == MPI_SUCCESS,
"MPI error performing GetVersion.");
172 return std::make_tuple(version, subversion, 0);
198 MPI_Ssend(buf, count, dt, dest, 0,
m_comm);
202 MPI_Send(buf, count, dt, dest, 0,
m_comm);
211 MPI_Recv(buf, count, dt, source, 0,
m_comm, MPI_STATUS_IGNORE);
218 int dest,
void *recvbuf,
int recvcount,
222 int retval = MPI_Sendrecv(sendbuf, sendcount, sendtype, dest, 0, recvbuf,
223 recvcount, recvtype, source, 0,
m_comm, &status);
226 "MPI error performing send-receive of data.");
254 int retval = MPI_Allreduce(MPI_IN_PLACE, buf, count, dt, vOp,
m_comm);
256 ASSERTL0(retval == MPI_SUCCESS,
"MPI error performing All-reduce.");
265 int retval = MPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount,
268 ASSERTL0(retval == MPI_SUCCESS,
"MPI error performing All-to-All.");
276 int recvcounts[],
int rdispls[],
279 int retval = MPI_Alltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf,
280 recvcounts, rdispls, recvtype,
m_comm);
282 ASSERTL0(retval == MPI_SUCCESS,
"MPI error performing All-to-All-v.");
291 int retval = MPI_Allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount,
294 ASSERTL0(retval == MPI_SUCCESS,
"MPI error performing Allgather.");
301 void *recvbuf,
int recvcounts[],
int rdispls[],
304 int retval = MPI_Allgatherv(sendbuf, sendcount, sendtype, recvbuf,
305 recvcounts, rdispls, recvtype,
m_comm);
307 ASSERTL0(retval == MPI_SUCCESS,
"MPI error performing Allgatherv.");
316 int retval = MPI_Allgatherv(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, recvbuf,
317 recvcounts, rdispls, recvtype,
m_comm);
319 ASSERTL0(retval == MPI_SUCCESS,
"MPI error performing Allgatherv.");
327 int retval = MPI_Bcast(
buffer, count, dt, root,
m_comm);
329 ASSERTL0(retval == MPI_SUCCESS,
"MPI error performing Bcast-v.");
339 int retval = MPI_Gather(sendbuf, sendcount, sendtype, recvbuf, recvcount,
342 ASSERTL0(retval == MPI_SUCCESS,
"MPI error performing Gather.");
352 int retval = MPI_Scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount,
355 ASSERTL0(retval == MPI_SUCCESS,
"MPI error performing Scatter.");
362 [[maybe_unused]]
int indegree, [[maybe_unused]]
const int sources[],
363 [[maybe_unused]]
const int sourceweights[], [[maybe_unused]]
int reorder)
366 ASSERTL0(
false,
"MPI_Dist_graph_create_adjacent is not supported in your "
367 "installed MPI version.");
369 int retval = MPI_Dist_graph_create_adjacent(
370 m_comm, indegree, sources, sourceweights, indegree, sources,
371 sourceweights, MPI_INFO_NULL, reorder, &
m_comm);
374 "MPI error performing Dist_graph_create_adjacent.")
382 [[maybe_unused]]
void *sendbuf, [[maybe_unused]]
int sendcounts[],
383 [[maybe_unused]]
int sdispls[], [[maybe_unused]]
CommDataType sendtype,
384 [[maybe_unused]]
void *recvbuf, [[maybe_unused]]
int recvcounts[],
385 [[maybe_unused]]
int rdispls[], [[maybe_unused]]
CommDataType recvtype)
388 ASSERTL0(
false,
"MPI_Neighbor_alltoallv is not supported in your "
389 "installed MPI version.");
392 MPI_Neighbor_alltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf,
393 recvcounts, rdispls, recvtype,
m_comm);
395 ASSERTL0(retval == MPI_SUCCESS,
"MPI error performing NeighborAllToAllV.");
406 std::static_pointer_cast<CommRequestMpi>(request);
407 MPI_Irsend(buf, count, dt, dest, 0,
m_comm, req->GetRequest(
loc));
417 std::static_pointer_cast<CommRequestMpi>(request);
418 MPI_Isend(buf, count, dt, dest, 0,
m_comm, req->GetRequest(
loc));
428 std::static_pointer_cast<CommRequestMpi>(request);
429 MPI_Send_init(buf, count, dt, dest, 0,
m_comm, req->GetRequest(
loc));
439 std::static_pointer_cast<CommRequestMpi>(request);
440 MPI_Irecv(buf, count, dt, source, 0,
m_comm, req->GetRequest(
loc));
450 std::static_pointer_cast<CommRequestMpi>(request);
451 MPI_Recv_init(buf, count, dt, source, 0,
m_comm, req->GetRequest(
loc));
460 std::static_pointer_cast<CommRequestMpi>(request);
461 if (req->GetNumRequest() != 0)
463 MPI_Startall(req->GetNumRequest(), req->GetRequest(0));
473 std::static_pointer_cast<CommRequestMpi>(request);
474 if (req->GetNumRequest() != 0)
476 MPI_Waitall(req->GetNumRequest(), req->GetRequest(0),
477 MPI_STATUSES_IGNORE);
498 "Rows/Columns/Time do not match comm size.");
505 int myCol =
m_rank % pColumns;
506 int myRow = (
m_rank - myCol) / pColumns;
511 MPI_Comm_split(
m_comm, myRow, myCol, &newComm);
517 MPI_Comm_split(
m_comm, myCol, myRow, &newComm);
522 constexpr int dims = 3;
523 const int sizes[dims] = {pRows, pColumns, pTime};
524 const int periods[dims] = {0, 0, 0};
525 constexpr int reorder = 1;
527 MPI_Cart_create(
m_comm, dims, sizes, periods, reorder, &gridComm);
529 constexpr int keepRow[dims] = {0, 1, 0};
530 MPI_Cart_sub(gridComm, keepRow, &newComm);
533 constexpr int keepCol[dims] = {1, 0, 0};
534 MPI_Cart_sub(gridComm, keepCol, &newComm);
537 constexpr int keepTime[dims] = {0, 0, 1};
538 MPI_Cart_sub(gridComm, keepTime, &newComm);
541 constexpr int keepSpace[dims] = {1, 1, 0};
542 MPI_Cart_sub(gridComm, keepSpace, &newComm);
556 MPI_Comm_split(
m_comm, flag ? flag : MPI_UNDEFINED, 0, &newComm);
561 return std::shared_ptr<Comm>();
566 return std::shared_ptr<Comm>(
new CommMpi(newComm));
575 std::pair<CommSharedPtr, CommSharedPtr> ret;
578 ASSERTL0(
false,
"Not implemented for non-MPI-3 versions.");
582 MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED,
m_rank,
583 MPI_INFO_NULL, &nodeComm);
587 ret.first = std::shared_ptr<Comm>(
new CommMpi(nodeComm));
589 if (ret.first->GetRank() == 0)
591 ret.second->SplitComm(1, ret.second->GetSize());
#define ASSERTL0(condition, msg)
#define NEKERROR(type, msg)
Assert Level 0 – Fundamental assert which is used whether in FULLDEBUG, DEBUG or OPT compilation mode...
Base communications class.
CommSharedPtr m_commColumn
Column communicator.
CommSharedPtr m_commRow
Row communicator.
int GetSize() const
Returns number of processes.
int m_size
Number of processes.
std::string m_type
Type of communication.
CommSharedPtr m_commSpace
void v_WaitAll(CommRequestSharedPtr request) final
void v_AlltoAll(void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype) final
CommRequestSharedPtr v_CreateRequest(int num) final
CommMpi(int narg, char *arg[])
void v_DistGraphCreateAdjacent(int indegree, const int sources[], const int sourceweights[], int reorder) final
void v_Bcast(void *buffer, int count, CommDataType dt, int root) final
void v_AllGatherv(void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcounts[], int rdispls[], CommDataType recvtype) final
CommSharedPtr v_CommCreateIf(int flag) final
std::pair< CommSharedPtr, CommSharedPtr > v_SplitCommNode() final
void v_SendRecv(void *sendbuf, int sendcount, CommDataType sendtype, int dest, void *recvbuf, int recvcount, CommDataType recvtype, int source) final
void v_NeighborAlltoAllv(void *sendbuf, int sendcounts[], int sensdispls[], CommDataType sendtype, void *recvbuf, int recvcounts[], int rdispls[], CommDataType recvtype) final
void v_AllGather(void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype) final
void v_Recv(void *buf, int count, CommDataType dt, int source) final
void v_StartAll(CommRequestSharedPtr request) final
void v_AllReduce(void *buf, int count, CommDataType dt, enum ReduceOperator pOp) final
void v_Finalise() override
void v_Irecv(void *buf, int count, CommDataType dt, int source, CommRequestSharedPtr request, int loc) final
void v_Irsend(void *buf, int count, CommDataType dt, int dest, CommRequestSharedPtr request, int loc) final
void v_Gather(void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype, int root) final
static std::string className
Name of class.
void v_RecvInit(void *buf, int count, CommDataType dt, int source, CommRequestSharedPtr request, int loc) final
static CommSharedPtr create(int narg, char *arg[])
Creates an instance of this class.
void v_Isend(void *buf, int count, CommDataType dt, int dest, CommRequestSharedPtr request, int loc) final
void v_AlltoAllv(void *sendbuf, int sendcounts[], int sensdispls[], CommDataType sendtype, void *recvbuf, int recvcounts[], int rdispls[], CommDataType recvtype) final
bool v_TreatAsRankZero() final
void v_Send(void *buf, int count, CommDataType dt, int dest) final
void v_SplitComm(int pRows, int pColumns, int pTime) override
void v_Scatter(void *sendbuf, int sendcount, CommDataType sendtype, void *recvbuf, int recvcount, CommDataType recvtype, int root) final
std::tuple< int, int, int > v_GetVersion() final
void v_SendInit(void *buf, int count, CommDataType dt, int dest, CommRequestSharedPtr request, int loc) final
Class for communicator request type.
tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, std::string pDesc="")
Register a class with the factory.
unsigned int CommDataType
std::shared_ptr< CommRequest > CommRequestSharedPtr
std::shared_ptr< CommRequestMpi > CommRequestMpiSharedPtr
CommFactory & GetCommFactory()
ReduceOperator
Type of operation to perform in AllReduce.
std::shared_ptr< Comm > CommSharedPtr
Pointer to a Communicator object.