
Members for persistent operations are added to the module structure in a prior commit. Signed-off-by: KAWASHIMA Takahiro <t-kawashima@jp.fujitsu.com>
347 строки
11 KiB
C
347 строки
11 KiB
C
/* -*- Mode: C; c-basic-offset:2 ; indent-tabs-mode:nil -*- */
|
|
/*
|
|
* Copyright (c) 2006 The Trustees of Indiana University and Indiana
|
|
* University Research and Technology
|
|
* Corporation. All rights reserved.
|
|
* Copyright (c) 2006 The Technical University of Chemnitz. All
|
|
* rights reserved.
|
|
* Copyright (c) 2014-2017 Research Organization for Information Science
|
|
* and Technology (RIST). All rights reserved.
|
|
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
|
|
* reserved.
|
|
* Copyright (c) 2017 IBM Corporation. All rights reserved.
|
|
* Copyright (c) 2018 FUJITSU LIMITED. All rights reserved.
|
|
* $COPYRIGHT$
|
|
*
|
|
* Additional copyrights may follow
|
|
*
|
|
* Author(s): Torsten Hoefler <htor@cs.indiana.edu>
|
|
*
|
|
*/
|
|
#include "nbc_internal.h"
|
|
|
|
/* cannot cache schedules because one cannot check locally if the pattern is the same!! */
|
|
#undef NBC_CACHE_SCHEDULE
|
|
|
|
#ifdef NBC_CACHE_SCHEDULE
|
|
/* tree comparison function for schedule cache */
|
|
int NBC_Ineighbor_allgather_args_compare(NBC_Ineighbor_allgather_args *a, NBC_Ineighbor_allgather_args *b, void *param) {
|
|
if ((a->sbuf == b->sbuf) &&
|
|
(a->scount == b->scount) &&
|
|
(a->stype == b->stype) &&
|
|
(a->rbuf == b->rbuf) &&
|
|
(a->rcount == b->rcount) &&
|
|
(a->rtype == b->rtype) ) {
|
|
return 0;
|
|
}
|
|
|
|
if( a->sbuf < b->sbuf ) {
|
|
return -1;
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
#endif
|
|
|
|
|
|
static int nbc_neighbor_allgather_init(const void *sbuf, int scount, MPI_Datatype stype, void *rbuf,
|
|
int rcount, MPI_Datatype rtype, struct ompi_communicator_t *comm,
|
|
ompi_request_t ** request,
|
|
struct mca_coll_base_module_2_3_0_t *module, bool persistent) {
|
|
int res, indegree, outdegree, *srcs, *dsts;
|
|
MPI_Aint rcvext;
|
|
ompi_coll_libnbc_module_t *libnbc_module = (ompi_coll_libnbc_module_t*) module;
|
|
NBC_Schedule *schedule;
|
|
|
|
res = ompi_datatype_type_extent (rtype, &rcvext);
|
|
if (MPI_SUCCESS != res) {
|
|
NBC_Error("MPI Error in ompi_datatype_type_extent() (%i)", res);
|
|
return res;
|
|
}
|
|
|
|
#ifdef NBC_CACHE_SCHEDULE
|
|
NBC_Ineighbor_allgather_args *args, *found, search;
|
|
|
|
/* search schedule in communicator specific tree */
|
|
search.sbuf = sbuf;
|
|
search.scount = scount;
|
|
search.stype = stype;
|
|
search.rbuf = rbuf;
|
|
search.rcount = rcount;
|
|
search.rtype = rtype;
|
|
found = (NBC_Ineighbor_allgather_args *) hb_tree_search ((hb_tree *) libnbc_module->NBC_Dict[NBC_NEIGHBOR_ALLGATHER],
|
|
&search);
|
|
if (NULL == found) {
|
|
#endif
|
|
schedule = OBJ_NEW(NBC_Schedule);
|
|
if (OPAL_UNLIKELY(NULL == schedule)) {
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
}
|
|
|
|
res = NBC_Comm_neighbors (comm, &srcs, &indegree, &dsts, &outdegree);
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
|
|
OBJ_RELEASE(schedule);
|
|
return res;
|
|
}
|
|
|
|
for (int i = 0 ; i < indegree ; ++i) {
|
|
if (MPI_PROC_NULL != srcs[i]) {
|
|
res = NBC_Sched_recv ((char *) rbuf + i * rcount * rcvext, true, rcount, rtype, srcs[i], schedule, false);
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
free (srcs);
|
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
|
|
OBJ_RELEASE(schedule);
|
|
free (dsts);
|
|
return res;
|
|
}
|
|
|
|
for (int i = 0 ; i < outdegree ; ++i) {
|
|
if (MPI_PROC_NULL != dsts[i]) {
|
|
res = NBC_Sched_send ((char *) sbuf, false, scount, stype, dsts[i], schedule, false);
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
free (dsts);
|
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
|
|
OBJ_RELEASE(schedule);
|
|
return res;
|
|
}
|
|
|
|
res = NBC_Sched_commit (schedule);
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
|
|
OBJ_RELEASE(schedule);
|
|
return res;
|
|
}
|
|
|
|
#ifdef NBC_CACHE_SCHEDULE
|
|
/* save schedule to tree */
|
|
args = (NBC_Ineighbor_allgather_args *) malloc (sizeof (args));
|
|
if (NULL != args) {
|
|
args->sbuf = sbuf;
|
|
args->scount = scount;
|
|
args->stype = stype;
|
|
args->rbuf = rbuf;
|
|
args->rcount = rcount;
|
|
args->rtype = rtype;
|
|
args->schedule = schedule;
|
|
res = hb_tree_insert ((hb_tree *) libnbc_module->NBC_Dict[NBC_NEIGHBOR_ALLGATHER], args, args, 0);
|
|
if (0 == res) {
|
|
OBJ_RETAIN(schedule);
|
|
|
|
/* increase number of elements for A2A */
|
|
if (++libnbc_module->NBC_Dict_size[NBC_NEIGHBOR_ALLGATHER] > NBC_SCHED_DICT_UPPER) {
|
|
NBC_SchedCache_dictwipe ((hb_tree *) libnbc_module->NBC_Dict[NBC_NEIGHBOR_ALLGATHER],
|
|
&libnbc_module->NBC_Dict_size[NBC_NEIGHBOR_ALLGATHER]);
|
|
}
|
|
} else {
|
|
NBC_Error("error in dict_insert() (%i)", res);
|
|
free (args);
|
|
}
|
|
} else {
|
|
/* found schedule */
|
|
schedule = found->schedule;
|
|
OBJ_RETAIN(schedule);
|
|
}
|
|
#endif
|
|
|
|
res = NBC_Schedule_request(schedule, comm, libnbc_module, persistent, request, NULL);
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
|
|
OBJ_RELEASE(schedule);
|
|
return res;
|
|
}
|
|
|
|
return OMPI_SUCCESS;
|
|
}
|
|
|
|
int ompi_coll_libnbc_ineighbor_allgather(const void *sbuf, int scount, MPI_Datatype stype, void *rbuf,
|
|
int rcount, MPI_Datatype rtype, struct ompi_communicator_t *comm,
|
|
ompi_request_t ** request, struct mca_coll_base_module_2_3_0_t *module) {
|
|
int res = nbc_neighbor_allgather_init(sbuf, scount, stype, rbuf, rcount, rtype,
|
|
comm, request, module, false);
|
|
if (OPAL_LIKELY(OMPI_SUCCESS != res)) {
|
|
return res;
|
|
}
|
|
res = NBC_Start(*(ompi_coll_libnbc_request_t **)request);
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
|
|
NBC_Return_handle ((ompi_coll_libnbc_request_t *)request);
|
|
*request = &ompi_request_null.request;
|
|
return res;
|
|
}
|
|
|
|
return OMPI_SUCCESS;
|
|
}
|
|
|
|
/* better binomial bcast
|
|
* working principle:
|
|
* - each node gets a virtual rank vrank
|
|
* - the 'root' node get vrank 0
|
|
* - node 0 gets the vrank of the 'root'
|
|
* - all other ranks stay identical (they do not matter)
|
|
*
|
|
* Algorithm:
|
|
* - each node with vrank > 2^r and vrank < 2^r+1 receives from node
|
|
* vrank - 2^r (vrank=1 receives from 0, vrank 0 receives never)
|
|
* - each node sends each round r to node vrank + 2^r
|
|
* - a node stops to send if 2^r > commsize
|
|
*/
|
|
#define RANK2VRANK(rank, vrank, root) \
|
|
{ \
|
|
vrank = rank; \
|
|
if (rank == 0) vrank = root; \
|
|
if (rank == root) vrank = 0; \
|
|
}
|
|
#define VRANK2RANK(rank, vrank, root) \
|
|
{ \
|
|
rank = vrank; \
|
|
if (vrank == 0) rank = root; \
|
|
if (vrank == root) rank = 0; \
|
|
}
|
|
static inline int bcast_sched_binomial(int rank, int p, int root, NBC_Schedule *schedule, void *buffer, int count, MPI_Datatype datatype) {
|
|
int maxr, vrank, peer, res;
|
|
|
|
maxr = (int)ceil((log((double)p)/LOG2));
|
|
|
|
RANK2VRANK(rank, vrank, root);
|
|
|
|
/* receive from the right hosts */
|
|
if (vrank != 0) {
|
|
for (int r = 0 ; r < maxr ; ++r) {
|
|
if ((vrank >= (1 << r)) && (vrank < (1 << (r + 1)))) {
|
|
VRANK2RANK(peer, vrank - (1 << r), root);
|
|
res = NBC_Sched_recv (buffer, false, count, datatype, peer, schedule, false);
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
|
|
return res;
|
|
}
|
|
}
|
|
}
|
|
|
|
res = NBC_Sched_barrier (schedule);
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
|
|
return res;
|
|
}
|
|
}
|
|
|
|
/* now send to the right hosts */
|
|
for (int r = 0 ; r < maxr ; ++r) {
|
|
if (((vrank + (1 << r) < p) && (vrank < (1 << r))) || (vrank == 0)) {
|
|
VRANK2RANK(peer, vrank + (1 << r), root);
|
|
res = NBC_Sched_send (buffer, false, count, datatype, peer, schedule, false);
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
|
|
return res;
|
|
}
|
|
}
|
|
}
|
|
|
|
return OMPI_SUCCESS;
|
|
}
|
|
|
|
/* simple linear MPI_Ibcast */
|
|
static inline int bcast_sched_linear(int rank, int p, int root, NBC_Schedule *schedule, void *buffer, int count, MPI_Datatype datatype) {
|
|
int res;
|
|
|
|
/* send to all others */
|
|
if(rank == root) {
|
|
for (int peer = 0 ; peer < p ; ++peer) {
|
|
if (peer != root) {
|
|
/* send msg to peer */
|
|
res = NBC_Sched_send (buffer, false, count, datatype, peer, schedule, false);
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
|
|
return res;
|
|
}
|
|
}
|
|
}
|
|
} else {
|
|
/* recv msg from root */
|
|
res = NBC_Sched_recv (buffer, false, count, datatype, root, schedule, false);
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
|
|
return res;
|
|
}
|
|
}
|
|
|
|
return OMPI_SUCCESS;
|
|
}
|
|
|
|
/* simple chained MPI_Ibcast */
|
|
static inline int bcast_sched_chain(int rank, int p, int root, NBC_Schedule *schedule, void *buffer, int count, MPI_Datatype datatype, int fragsize, size_t size) {
|
|
int res, vrank, rpeer, speer, numfrag, fragcount, thiscount;
|
|
MPI_Aint ext;
|
|
char *buf;
|
|
|
|
RANK2VRANK(rank, vrank, root);
|
|
VRANK2RANK(rpeer, vrank-1, root);
|
|
VRANK2RANK(speer, vrank+1, root);
|
|
res = ompi_datatype_type_extent(datatype, &ext);
|
|
if (MPI_SUCCESS != res) {
|
|
NBC_Error("MPI Error in ompi_datatype_type_extent() (%i)", res);
|
|
return res;
|
|
}
|
|
|
|
if (count == 0) {
|
|
return OMPI_SUCCESS;
|
|
}
|
|
|
|
numfrag = count * size/fragsize;
|
|
if ((count * size) % fragsize != 0) {
|
|
numfrag++;
|
|
}
|
|
|
|
fragcount = count/numfrag;
|
|
|
|
for (int fragnum = 0 ; fragnum < numfrag ; ++fragnum) {
|
|
buf = (char *) buffer + fragnum * fragcount * ext;
|
|
thiscount = fragcount;
|
|
if (fragnum == numfrag-1) {
|
|
/* last fragment may not be full */
|
|
thiscount = count - fragcount * fragnum;
|
|
}
|
|
|
|
/* root does not receive */
|
|
if (vrank != 0) {
|
|
res = NBC_Sched_recv (buf, false, thiscount, datatype, rpeer, schedule, true);
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
|
|
return res;
|
|
}
|
|
}
|
|
|
|
/* last rank does not send */
|
|
if (vrank != p-1) {
|
|
res = NBC_Sched_send (buf, false, thiscount, datatype, speer, schedule, false);
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
|
|
return res;
|
|
}
|
|
|
|
/* this barrier here seems awaward but isn't!!!! */
|
|
if (vrank == 0) {
|
|
res = NBC_Sched_barrier (schedule);
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
|
|
return res;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return OMPI_SUCCESS;
|
|
}
|
|
|
|
int ompi_coll_libnbc_neighbor_allgather_init(const void *sbuf, int scount, MPI_Datatype stype, void *rbuf,
|
|
int rcount, MPI_Datatype rtype, struct ompi_communicator_t *comm,
|
|
MPI_Info info, ompi_request_t ** request, struct mca_coll_base_module_2_3_0_t *module) {
|
|
int res = nbc_neighbor_allgather_init(sbuf, scount, stype, rbuf, rcount, rtype,
|
|
comm, request, module, true);
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
|
|
return res;
|
|
}
|
|
|
|
return OMPI_SUCCESS;
|
|
}
|