1
1

Update the basic module to dynamically allocate the right

number of requests.

Remove unnecessary fields.We don't need these fields.
Этот коммит содержится в:
George Bosilca 2015-09-22 03:32:24 +02:00
родитель a324602174
Коммит 01b32caf98
19 изменённых файлов: 363 добавлений и 324 удалений

Просмотреть файл

@ -119,9 +119,11 @@ OBJ_CLASS_INSTANCE(mca_coll_base_comm_t, opal_object_t,
ompi_request_t** coll_base_comm_get_reqs(mca_coll_base_comm_t* data, int nreqs)
{
if( data->mcct_num_reqs <= nreqs ) {
if( 0 == nreqs ) return NULL;
if( data->mcct_num_reqs <= nreqs )
data->mcct_reqs = (ompi_request_t**)realloc(data->mcct_reqs, sizeof(ompi_request_t*) * nreqs);
}
if( NULL != data->mcct_reqs ) {
for( int i = data->mcct_num_reqs; i < nreqs; i++ )
data->mcct_reqs[i] = MPI_REQUEST_NULL;

Просмотреть файл

@ -353,9 +353,11 @@ OMPI_DECLSPEC OBJ_CLASS_DECLARATION(mca_coll_base_comm_t);
static inline void ompi_coll_base_free_reqs(ompi_request_t **reqs, int count)
{
int i;
for (i = 0; i < count; ++i)
if( MPI_REQUEST_NULL != reqs[i] )
for (i = 0; i < count; ++i) {
if( MPI_REQUEST_NULL != reqs[i] ) {
ompi_request_free(&reqs[i]);
}
}
}
/**

Просмотреть файл

@ -283,17 +283,6 @@ BEGIN_C_DECLS
int mca_coll_basic_ft_event(int status);
/* Utility functions */
static inline void mca_coll_basic_free_reqs(ompi_request_t ** reqs,
int count)
{
int i;
for (i = 0; i < count; ++i)
ompi_request_free(&reqs[i]);
}
struct mca_coll_basic_module_t {
mca_coll_base_module_t super;
@ -303,6 +292,23 @@ struct mca_coll_basic_module_t {
typedef struct mca_coll_basic_module_t mca_coll_basic_module_t;
OBJ_CLASS_DECLARATION(mca_coll_basic_module_t);
/* Utility functions */
static inline void mca_coll_basic_free_reqs(ompi_request_t ** reqs, int count)
{
int i;
for (i = 0; i < count; ++i)
if( MPI_REQUEST_NULL != reqs[i] ) {
ompi_request_free(&reqs[i]);
}
}
/**
* Return the array of requests on the data. If the array was not initialized
* or if it's size was too small, allocate it to fit the requested size.
*/
ompi_request_t** mca_coll_basic_get_reqs(mca_coll_basic_module_t* data, int nreqs);
END_C_DECLS
#endif /* MCA_COLL_BASIC_EXPORT_H */

Просмотреть файл

@ -47,12 +47,12 @@ mca_coll_basic_allgather_inter(const void *sbuf, int scount,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
int rank, root = 0, size, rsize, err, i;
int rank, root = 0, size, rsize, err, i, line;
char *tmpbuf = NULL, *ptmp;
ptrdiff_t rlb, slb, rextent, sextent, incr;
ompi_request_t *req;
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t*) module;
ompi_request_t **reqs = basic_module->mccb_reqs;
ompi_request_t **reqs = NULL;
rank = ompi_comm_rank(comm);
size = ompi_comm_size(comm);
@ -71,35 +71,29 @@ mca_coll_basic_allgather_inter(const void *sbuf, int scount,
err = MCA_PML_CALL(send(sbuf, scount, sdtype, root,
MCA_COLL_BASE_TAG_ALLGATHER,
MCA_PML_BASE_SEND_STANDARD, comm));
if (OMPI_SUCCESS != err) {
return err;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
} else {
/* receive a msg. from all other procs. */
err = ompi_datatype_get_extent(rdtype, &rlb, &rextent);
if (OMPI_SUCCESS != err) {
return err;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
err = ompi_datatype_get_extent(sdtype, &slb, &sextent);
if (OMPI_SUCCESS != err) {
return err;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
/* Get a requests arrays of the right size */
reqs = mca_coll_basic_get_reqs(basic_module, rsize + 1);
if( NULL == reqs ) { line = __LINE__; goto exit; }
/* Do a send-recv between the two root procs. to avoid deadlock */
err = MCA_PML_CALL(isend(sbuf, scount, sdtype, 0,
MCA_COLL_BASE_TAG_ALLGATHER,
MCA_PML_BASE_SEND_STANDARD,
comm, &reqs[rsize]));
if (OMPI_SUCCESS != err) {
return err;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
err = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, 0,
MCA_COLL_BASE_TAG_ALLGATHER, comm,
&reqs[0]));
if (OMPI_SUCCESS != err) {
return err;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
incr = rextent * rcount;
ptmp = (char *) rbuf + incr;
@ -107,45 +101,33 @@ mca_coll_basic_allgather_inter(const void *sbuf, int scount,
err = MCA_PML_CALL(irecv(ptmp, rcount, rdtype, i,
MCA_COLL_BASE_TAG_ALLGATHER,
comm, &reqs[i]));
if (MPI_SUCCESS != err) {
return err;
}
if (MPI_SUCCESS != err) { line = __LINE__; goto exit; }
}
err = ompi_request_wait_all(rsize + 1, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != err) {
return err;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
/* Step 2: exchange the resuts between the root processes */
tmpbuf = (char *) malloc(scount * size * sextent);
if (NULL == tmpbuf) {
return err;
}
if (NULL == tmpbuf) { line = __LINE__; goto exit; }
err = MCA_PML_CALL(isend(rbuf, rsize * rcount, rdtype, 0,
MCA_COLL_BASE_TAG_ALLGATHER,
MCA_PML_BASE_SEND_STANDARD, comm, &req));
if (OMPI_SUCCESS != err) {
goto exit;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
err = MCA_PML_CALL(recv(tmpbuf, size * scount, sdtype, 0,
MCA_COLL_BASE_TAG_ALLGATHER, comm,
MPI_STATUS_IGNORE));
if (OMPI_SUCCESS != err) {
goto exit;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
err = ompi_request_wait( &req, MPI_STATUS_IGNORE);
if (OMPI_SUCCESS != err) {
goto exit;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
}
/* Step 3: bcast the data to the remote group. This
* happens in both groups simultaniously, thus we can
* happens in both groups simultaneously, thus we can
* not use coll_bcast (this would deadlock).
*/
if (rank != root) {
@ -153,9 +135,7 @@ mca_coll_basic_allgather_inter(const void *sbuf, int scount,
err = MCA_PML_CALL(recv(rbuf, rsize * rcount, rdtype, 0,
MCA_COLL_BASE_TAG_ALLGATHER, comm,
MPI_STATUS_IGNORE));
if (OMPI_SUCCESS != err) {
goto exit;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
} else {
/* Send the data to every other process in the remote group
@ -165,19 +145,19 @@ mca_coll_basic_allgather_inter(const void *sbuf, int scount,
MCA_COLL_BASE_TAG_ALLGATHER,
MCA_PML_BASE_SEND_STANDARD,
comm, &reqs[i - 1]));
if (OMPI_SUCCESS != err) {
goto exit;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
}
err = ompi_request_wait_all(rsize - 1, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != err) {
goto exit;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
}
exit:
if( MPI_SUCCESS != err ) {
OPAL_OUTPUT( (ompi_coll_base_framework.framework_output,"%s:%4d\tError occurred %d, rank %2d",
__FILE__, line, err, rank) );
if( NULL != reqs ) mca_coll_basic_free_reqs(reqs, rsize+1);
}
if (NULL != tmpbuf) {
free(tmpbuf);
}

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2014 The University of Tennessee and The University
* Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -80,13 +80,13 @@ mca_coll_basic_allreduce_inter(const void *sbuf, void *rbuf, int count,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
int err, i, rank, root = 0, rsize;
int err, i, rank, root = 0, rsize, line;
ptrdiff_t lb, extent;
ptrdiff_t true_lb, true_extent;
char *tmpbuf = NULL, *pml_buffer = NULL;
ompi_request_t *req[2];
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t*) module;
ompi_request_t **reqs = basic_module->mccb_reqs;
ompi_request_t **reqs = NULL;
rank = ompi_comm_rank(comm);
rsize = ompi_comm_remote_size(comm);
@ -111,41 +111,33 @@ mca_coll_basic_allreduce_inter(const void *sbuf, void *rbuf, int count,
}
tmpbuf = (char *) malloc(true_extent + (count - 1) * extent);
if (NULL == tmpbuf) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
if (NULL == tmpbuf) { err = OMPI_ERR_OUT_OF_RESOURCE; line = __LINE__; goto exit; }
pml_buffer = tmpbuf - true_lb;
reqs = mca_coll_basic_get_reqs(basic_module, rsize - 1);
if( NULL == reqs ) { err = OMPI_ERR_OUT_OF_RESOURCE; line = __LINE__; goto exit; }
/* Do a send-recv between the two root procs. to avoid deadlock */
err = MCA_PML_CALL(irecv(rbuf, count, dtype, 0,
MCA_COLL_BASE_TAG_ALLREDUCE, comm,
&(req[0])));
if (OMPI_SUCCESS != err) {
goto exit;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
err = MCA_PML_CALL(isend(sbuf, count, dtype, 0,
MCA_COLL_BASE_TAG_ALLREDUCE,
MCA_PML_BASE_SEND_STANDARD,
comm, &(req[1])));
if (OMPI_SUCCESS != err) {
goto exit;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
err = ompi_request_wait_all(2, req, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != err) {
goto exit;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
/* Loop receiving and calling reduction function (C or Fortran). */
for (i = 1; i < rsize; i++) {
err = MCA_PML_CALL(recv(pml_buffer, count, dtype, i,
MCA_COLL_BASE_TAG_ALLREDUCE, comm,
MPI_STATUS_IGNORE));
if (MPI_SUCCESS != err) {
goto exit;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
/* Perform the reduction */
ompi_op_reduce(op, pml_buffer, rbuf, count, dtype);
@ -155,9 +147,7 @@ mca_coll_basic_allreduce_inter(const void *sbuf, void *rbuf, int count,
err = MCA_PML_CALL(send(sbuf, count, dtype, root,
MCA_COLL_BASE_TAG_ALLREDUCE,
MCA_PML_BASE_SEND_STANDARD, comm));
if (OMPI_SUCCESS != err) {
goto exit;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
}
@ -171,21 +161,16 @@ mca_coll_basic_allreduce_inter(const void *sbuf, void *rbuf, int count,
err = MCA_PML_CALL(irecv(pml_buffer, count, dtype, 0,
MCA_COLL_BASE_TAG_ALLREDUCE,
comm, &(req[1])));
if (OMPI_SUCCESS != err) {
goto exit;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
err = MCA_PML_CALL(isend(rbuf, count, dtype, 0,
MCA_COLL_BASE_TAG_ALLREDUCE,
MCA_PML_BASE_SEND_STANDARD, comm,
&(req[0])));
if (OMPI_SUCCESS != err) {
goto exit;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
err = ompi_request_wait_all(2, req, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != err) {
goto exit;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
/* distribute the data to other processes in remote group.
* Note that we start from 1 (not from zero), since zero
@ -198,17 +183,13 @@ mca_coll_basic_allreduce_inter(const void *sbuf, void *rbuf, int count,
MCA_COLL_BASE_TAG_ALLREDUCE,
MCA_PML_BASE_SEND_STANDARD, comm,
&reqs[i - 1]));
if (OMPI_SUCCESS != err) {
goto exit;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
}
err =
ompi_request_wait_all(rsize - 1, reqs,
MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != err) {
goto exit;
}
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
}
} else {
err = MCA_PML_CALL(recv(rbuf, count, dtype, root,
@ -217,10 +198,14 @@ mca_coll_basic_allreduce_inter(const void *sbuf, void *rbuf, int count,
}
exit:
if( MPI_SUCCESS != err ) {
OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"%s:%4d\tError occurred %d, rank %2d", __FILE__,
line, err, rank));
mca_coll_basic_free_reqs(reqs, rsize - 1);
}
if (NULL != tmpbuf) {
free(tmpbuf);
}
return err;
}

Просмотреть файл

@ -57,11 +57,7 @@ mca_coll_basic_alltoall_inter(const void *sbuf, int scount,
MPI_Aint sndinc;
MPI_Aint rcvinc;
ompi_request_t **req;
ompi_request_t **sreq;
ompi_request_t **rreq;
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t*) module;
ompi_request_t **req, **sreq, **rreq;
/* Initialize. */
@ -81,7 +77,7 @@ mca_coll_basic_alltoall_inter(const void *sbuf, int scount,
/* Initiate all send/recv to/from others. */
nreqs = size * 2;
req = rreq = basic_module->mccb_reqs;
req = rreq = mca_coll_basic_get_reqs( (mca_coll_basic_module_t*) module, nreqs);
sreq = rreq + size;
prcv = (char *) rbuf;
@ -92,6 +88,7 @@ mca_coll_basic_alltoall_inter(const void *sbuf, int scount,
err = MCA_PML_CALL(irecv(prcv + (i * rcvinc), rcount, rdtype, i,
MCA_COLL_BASE_TAG_ALLTOALL, comm, rreq));
if (OMPI_SUCCESS != err) {
mca_coll_basic_free_reqs(req, nreqs);
return err;
}
}
@ -102,6 +99,7 @@ mca_coll_basic_alltoall_inter(const void *sbuf, int scount,
MCA_COLL_BASE_TAG_ALLTOALL,
MCA_PML_BASE_SEND_STANDARD, comm, sreq));
if (OMPI_SUCCESS != err) {
mca_coll_basic_free_reqs(req, nreqs);
return err;
}
}
@ -113,6 +111,9 @@ mca_coll_basic_alltoall_inter(const void *sbuf, int scount,
* So free them anyway -- even if there was an error, and return
* the error after we free everything. */
err = ompi_request_wait_all(nreqs, req, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != err) {
mca_coll_basic_free_reqs(req, nreqs);
}
/* All done */
return err;

Просмотреть файл

@ -57,8 +57,7 @@ mca_coll_basic_alltoallv_inter(const void *sbuf, const int *scounts, const int *
MPI_Aint sndextent;
MPI_Aint rcvextent;
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t*) module;
ompi_request_t **preq = basic_module->mccb_reqs;
ompi_request_t **preq;
/* Initialize. */
@ -69,6 +68,7 @@ mca_coll_basic_alltoallv_inter(const void *sbuf, const int *scounts, const int *
/* Initiate all send/recv to/from others. */
nreqs = rsize * 2;
preq = mca_coll_basic_get_reqs((mca_coll_basic_module_t*) module, nreqs);
/* Post all receives first */
/* A simple optimization: do not send and recv msgs of length zero */
@ -79,10 +79,9 @@ mca_coll_basic_alltoallv_inter(const void *sbuf, const int *scounts, const int *
i, MCA_COLL_BASE_TAG_ALLTOALLV, comm,
&preq[i]));
if (MPI_SUCCESS != err) {
mca_coll_basic_free_reqs(preq, i);
return err;
}
} else {
preq[i] = MPI_REQUEST_NULL;
}
}
@ -95,14 +94,16 @@ mca_coll_basic_alltoallv_inter(const void *sbuf, const int *scounts, const int *
MCA_PML_BASE_SEND_STANDARD, comm,
&preq[rsize + i]));
if (MPI_SUCCESS != err) {
mca_coll_basic_free_reqs(preq, rsize + i);
return err;
}
} else {
preq[rsize + i] = MPI_REQUEST_NULL;
}
}
err = ompi_request_wait_all(nreqs, preq, MPI_STATUSES_IGNORE);
if (MPI_SUCCESS != err) {
mca_coll_basic_free_reqs(preq, nreqs);
}
/* All done */
return err;

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -41,9 +41,8 @@ mca_coll_basic_alltoallw_intra_inplace(const void *rbuf, const int *rcounts, con
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t*) module;
int i, j, size, rank, err=MPI_SUCCESS, max_size;
MPI_Request *preq;
MPI_Request *preq, *reqs = NULL;
char *tmp_buffer;
ptrdiff_t ext;
@ -71,6 +70,7 @@ mca_coll_basic_alltoallw_intra_inplace(const void *rbuf, const int *rcounts, con
return OMPI_ERR_OUT_OF_RESOURCE;
}
reqs = mca_coll_basic_get_reqs( (mca_coll_basic_module_t*) module, 2);
/* in-place alltoallw slow algorithm (but works) */
for (i = 0 ; i < size ; ++i) {
size_t msg_size_i;
@ -82,7 +82,7 @@ mca_coll_basic_alltoallw_intra_inplace(const void *rbuf, const int *rcounts, con
msg_size_j *= rcounts[j];
/* Initiate all send/recv to/from others. */
preq = basic_module->mccb_reqs;
preq = reqs;
if (i == rank && msg_size_j != 0) {
/* Copy the data into the temporary buffer */
@ -119,17 +119,19 @@ mca_coll_basic_alltoallw_intra_inplace(const void *rbuf, const int *rcounts, con
}
/* Wait for the requests to complete */
err = ompi_request_wait_all (2, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
err = ompi_request_wait_all (2, reqs, MPI_STATUSES_IGNORE);
if (MPI_SUCCESS != err) { goto error_hndl; }
/* Free the requests. */
mca_coll_basic_free_reqs(basic_module->mccb_reqs, 2);
}
}
error_hndl:
/* Free the temporary buffer */
free (tmp_buffer);
if( MPI_SUCCESS != err ) { /* Free the requests. */
if( NULL != reqs ) {
mca_coll_basic_free_reqs(reqs, 2);
}
}
/* All done */
@ -159,8 +161,7 @@ mca_coll_basic_alltoallw_intra(const void *sbuf, const int *scounts, const int *
char *psnd;
char *prcv;
int nreqs;
MPI_Request *preq;
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t*) module;
MPI_Request *preq, *reqs;
/* Initialize. */
if (MPI_IN_PLACE == sbuf) {
@ -191,7 +192,7 @@ mca_coll_basic_alltoallw_intra(const void *sbuf, const int *scounts, const int *
/* Initiate all send/recv to/from others. */
nreqs = 0;
preq = basic_module->mccb_reqs;
reqs = preq = mca_coll_basic_get_reqs((mca_coll_basic_module_t*) module, 2 * size);
/* Post all receives first -- a simple optimization */
@ -209,8 +210,7 @@ mca_coll_basic_alltoallw_intra(const void *sbuf, const int *scounts, const int *
preq++));
++nreqs;
if (MPI_SUCCESS != err) {
mca_coll_basic_free_reqs(basic_module->mccb_reqs,
nreqs);
mca_coll_basic_free_reqs(reqs, nreqs);
return err;
}
}
@ -232,15 +232,14 @@ mca_coll_basic_alltoallw_intra(const void *sbuf, const int *scounts, const int *
preq++));
++nreqs;
if (MPI_SUCCESS != err) {
mca_coll_basic_free_reqs(basic_module->mccb_reqs,
nreqs);
mca_coll_basic_free_reqs(reqs, nreqs);
return err;
}
}
/* Start your engines. This will never return an error. */
MCA_PML_CALL(start(nreqs, basic_module->mccb_reqs));
MCA_PML_CALL(start(nreqs, reqs));
/* Wait for them all. If there's an error, note that we don't care
* what the error was -- just that there *was* an error. The PML
@ -249,13 +248,11 @@ mca_coll_basic_alltoallw_intra(const void *sbuf, const int *scounts, const int *
* So free them anyway -- even if there was an error, and return the
* error after we free everything. */
err = ompi_request_wait_all(nreqs, basic_module->mccb_reqs,
MPI_STATUSES_IGNORE);
err = ompi_request_wait_all(nreqs, reqs, MPI_STATUSES_IGNORE);
/* Free the requests. */
mca_coll_basic_free_reqs(basic_module->mccb_reqs, nreqs);
if( MPI_SUCCESS != err ) {
mca_coll_basic_free_reqs(reqs, nreqs);
}
/* All done */
return err;
@ -283,15 +280,14 @@ mca_coll_basic_alltoallw_inter(const void *sbuf, const int *scounts, const int *
char *psnd;
char *prcv;
int nreqs;
MPI_Request *preq;
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t*) module;
MPI_Request *preq, *reqs;
/* Initialize. */
size = ompi_comm_remote_size(comm);
/* Initiate all send/recv to/from others. */
nreqs = 0;
preq = basic_module->mccb_reqs;
reqs = preq = mca_coll_basic_get_reqs((mca_coll_basic_module_t*) module, 2 * size);
/* Post all receives first -- a simple optimization */
for (i = 0; i < size; ++i) {
@ -308,8 +304,7 @@ mca_coll_basic_alltoallw_inter(const void *sbuf, const int *scounts, const int *
comm, preq++));
++nreqs;
if (OMPI_SUCCESS != err) {
mca_coll_basic_free_reqs(basic_module->mccb_reqs,
nreqs);
mca_coll_basic_free_reqs(reqs, nreqs);
return err;
}
}
@ -330,14 +325,13 @@ mca_coll_basic_alltoallw_inter(const void *sbuf, const int *scounts, const int *
preq++));
++nreqs;
if (OMPI_SUCCESS != err) {
mca_coll_basic_free_reqs(basic_module->mccb_reqs,
nreqs);
mca_coll_basic_free_reqs(reqs, nreqs);
return err;
}
}
/* Start your engines. This will never return an error. */
MCA_PML_CALL(start(nreqs, basic_module->mccb_reqs));
MCA_PML_CALL(start(nreqs, reqs));
/* Wait for them all. If there's an error, note that we don't care
* what the error was -- just that there *was* an error. The PML
@ -345,11 +339,11 @@ mca_coll_basic_alltoallw_inter(const void *sbuf, const int *scounts, const int *
* i.e., by the end of this call, all the requests are free-able.
* So free them anyway -- even if there was an error, and return the
* error after we free everything. */
err = ompi_request_wait_all(nreqs, basic_module->mccb_reqs,
MPI_STATUSES_IGNORE);
err = ompi_request_wait_all(nreqs, reqs, MPI_STATUSES_IGNORE);
/* Free the requests. */
mca_coll_basic_free_reqs(basic_module->mccb_reqs, nreqs);
if (OMPI_SUCCESS != err) { /* Free the requests. */
mca_coll_basic_free_reqs(reqs, nreqs);
}
/* All done */
return err;

Просмотреть файл

@ -53,9 +53,7 @@ mca_coll_basic_bcast_log_intra(void *buff, int count,
int mask;
int err;
int nreqs;
ompi_request_t **preq;
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t*) module;
ompi_request_t **reqs = basic_module->mccb_reqs;
ompi_request_t **preq, **reqs;
size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm);
@ -83,6 +81,8 @@ mca_coll_basic_bcast_log_intra(void *buff, int count,
/* Send data to the children. */
reqs = mca_coll_basic_get_reqs((mca_coll_basic_module_t*) module, size);
err = MPI_SUCCESS;
preq = reqs;
nreqs = 0;
@ -119,12 +119,11 @@ mca_coll_basic_bcast_log_intra(void *buff, int count,
* error, and return the error after we free everything. */
err = ompi_request_wait_all(nreqs, reqs, MPI_STATUSES_IGNORE);
/* Free the reqs */
mca_coll_basic_free_reqs(reqs, nreqs);
}
if( MPI_SUCCESS != err ) {
mca_coll_basic_free_reqs(reqs, nreqs);
}
/* All done */
return err;
@ -147,8 +146,7 @@ mca_coll_basic_bcast_lin_inter(void *buff, int count,
int i;
int rsize;
int err;
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t*) module;
ompi_request_t **reqs = basic_module->mccb_reqs;
ompi_request_t **reqs = NULL;
rsize = ompi_comm_remote_size(comm);
@ -161,6 +159,7 @@ mca_coll_basic_bcast_lin_inter(void *buff, int count,
MCA_COLL_BASE_TAG_BCAST, comm,
MPI_STATUS_IGNORE));
} else {
reqs = mca_coll_basic_get_reqs((mca_coll_basic_module_t*) module, rsize);
/* root section */
for (i = 0; i < rsize; i++) {
err = MCA_PML_CALL(isend(buff, count, datatype, i,
@ -168,10 +167,14 @@ mca_coll_basic_bcast_lin_inter(void *buff, int count,
MCA_PML_BASE_SEND_STANDARD,
comm, &(reqs[i])));
if (OMPI_SUCCESS != err) {
mca_coll_basic_free_reqs(reqs, rsize);
return err;
}
}
err = ompi_request_wait_all(rsize, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != err) {
mca_coll_basic_free_reqs(reqs, rsize);
}
}

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -120,8 +120,7 @@ mca_coll_basic_gatherv_inter(const void *sbuf, int scount,
int i, size, err;
char *ptmp;
ptrdiff_t lb, extent;
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t*) module;
ompi_request_t **reqs = basic_module->mccb_reqs;
ompi_request_t **reqs = NULL;
size = ompi_comm_remote_size(comm);
@ -143,17 +142,22 @@ mca_coll_basic_gatherv_inter(const void *sbuf, int scount,
return OMPI_ERROR;
}
reqs = mca_coll_basic_get_reqs((mca_coll_basic_module_t*) module, size);
for (i = 0; i < size; ++i) {
ptmp = ((char *) rbuf) + (extent * disps[i]);
err = MCA_PML_CALL(irecv(ptmp, rcounts[i], rdtype, i,
MCA_COLL_BASE_TAG_GATHERV,
comm, &reqs[i]));
if (OMPI_SUCCESS != err) {
mca_coll_basic_free_reqs(reqs, size);
return err;
}
}
err = ompi_request_wait_all(size, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != err) {
mca_coll_basic_free_reqs(reqs, size);
}
}
/* All done */

Просмотреть файл

@ -101,9 +101,6 @@ mca_coll_basic_comm_query(struct ompi_communicator_t *comm,
size = dist_graph_size;
}
}
basic_module->mccb_num_reqs = size;
basic_module->mccb_reqs = (ompi_request_t**)
malloc(sizeof(ompi_request_t *) * basic_module->mccb_num_reqs);
/* Choose whether to use [intra|inter], and [linear|log]-based
* algorithms. */

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -42,7 +42,6 @@ mca_coll_basic_neighbor_allgather_cart(const void *sbuf, int scount,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_cart_2_2_0_t *cart = comm->c_topo->mtc.cart;
const int rank = ompi_comm_rank (comm);
ompi_request_t **reqs;
@ -51,9 +50,10 @@ mca_coll_basic_neighbor_allgather_cart(const void *sbuf, int scount,
ompi_datatype_get_extent(rdtype, &lb, &extent);
reqs = mca_coll_basic_get_reqs( (mca_coll_basic_module_t *) module, 4 * cart->ndims );
/* The ordering is defined as -1 then +1 in each dimension in
* order of dimension. */
for (dim = 0, reqs = basic_module->mccb_reqs, nreqs = 0 ; dim < cart->ndims ; ++dim) {
for (dim = 0, nreqs = 0 ; dim < cart->ndims ; ++dim) {
int srank = MPI_PROC_NULL, drank = MPI_PROC_NULL;
if (cart->dims[dim] > 1) {
@ -63,6 +63,7 @@ mca_coll_basic_neighbor_allgather_cart(const void *sbuf, int scount,
}
if (MPI_PROC_NULL != srank) {
nreqs += 2;
rc = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, srank,
MCA_COLL_BASE_TAG_ALLGATHER,
comm, reqs++));
@ -75,13 +76,12 @@ mca_coll_basic_neighbor_allgather_cart(const void *sbuf, int scount,
MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs += 2;
}
rbuf = (char *) rbuf + extent * rcount;
if (MPI_PROC_NULL != drank) {
nreqs += 2;
rc = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, drank,
MCA_COLL_BASE_TAG_ALLGATHER,
comm, reqs++));
@ -93,19 +93,21 @@ mca_coll_basic_neighbor_allgather_cart(const void *sbuf, int scount,
MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs += 2;
}
rbuf = (char *) rbuf + extent * rcount;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs(reqs, nreqs);
return rc;
}
return ompi_request_wait_all (nreqs, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
rc = ompi_request_wait_all (nreqs, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != rc) {
mca_coll_basic_free_reqs(reqs, nreqs);
}
return rc;
}
static int
@ -115,12 +117,11 @@ mca_coll_basic_neighbor_allgather_graph(const void *sbuf, int scount,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_graph_2_2_0_t *graph = comm->c_topo->mtc.graph;
const int rank = ompi_comm_rank (comm);
const int *edges;
int degree;
ompi_request_t **reqs;
ompi_request_t **reqs, **preqs;
ptrdiff_t lb, extent;
int rc = MPI_SUCCESS, neighbor;
@ -132,10 +133,11 @@ mca_coll_basic_neighbor_allgather_graph(const void *sbuf, int scount,
}
ompi_datatype_get_extent(rdtype, &lb, &extent);
reqs = preqs = mca_coll_basic_get_reqs((mca_coll_basic_module_t *) module, 2 * degree);
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < degree ; ++neighbor) {
for (neighbor = 0; neighbor < degree ; ++neighbor) {
rc = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, edges[neighbor], MCA_COLL_BASE_TAG_ALLGATHER,
comm, reqs++));
comm, preqs++));
if (OMPI_SUCCESS != rc) break;
rbuf = (char *) rbuf + extent * rcount;
@ -143,16 +145,20 @@ mca_coll_basic_neighbor_allgather_graph(const void *sbuf, int scount,
* a const for the send buffer. */
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, edges[neighbor],
MCA_COLL_BASE_TAG_ALLGATHER, MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs( reqs, (2 * neighbor + 1));
return rc;
}
return ompi_request_wait_all (degree * 2, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
rc = ompi_request_wait_all (degree * 2, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != rc) {
mca_coll_basic_free_reqs( reqs, degree * 2);
}
return rc;
}
static int
@ -162,11 +168,10 @@ mca_coll_basic_neighbor_allgather_dist_graph(const void *sbuf, int scount,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_dist_graph_2_2_0_t *dist_graph = comm->c_topo->mtc.dist_graph;
const int *inedges, *outedges;
int indegree, outdegree;
ompi_request_t **reqs;
ompi_request_t **reqs, **preqs;
ptrdiff_t lb, extent;
int rc = MPI_SUCCESS, neighbor;
@ -177,17 +182,18 @@ mca_coll_basic_neighbor_allgather_dist_graph(const void *sbuf, int scount,
outedges = dist_graph->out;
ompi_datatype_get_extent(rdtype, &lb, &extent);
reqs = preqs = mca_coll_basic_get_reqs((mca_coll_basic_module_t *) module, indegree + outdegree);
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < indegree ; ++neighbor) {
for (neighbor = 0; neighbor < indegree ; ++neighbor) {
rc = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, inedges[neighbor],
MCA_COLL_BASE_TAG_ALLGATHER,
comm, reqs++));
comm, preqs++));
if (OMPI_SUCCESS != rc) break;
rbuf = (char *) rbuf + extent * rcount;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs(reqs, neighbor);
return rc;
}
@ -197,16 +203,20 @@ mca_coll_basic_neighbor_allgather_dist_graph(const void *sbuf, int scount,
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, outedges[neighbor],
MCA_COLL_BASE_TAG_ALLGATHER,
MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs(reqs, indegree + neighbor);
return rc;
}
return ompi_request_wait_all (indegree + outdegree, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
rc = ompi_request_wait_all (indegree + outdegree, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != rc) {
mca_coll_basic_free_reqs(reqs, indegree + outdegree);
}
return rc;
}
int mca_coll_basic_neighbor_allgather(const void *sbuf, int scount,

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -41,16 +41,15 @@ mca_coll_basic_neighbor_allgatherv_cart(const void *sbuf, int scount, struct omp
struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_cart_2_2_0_t *cart = comm->c_topo->mtc.cart;
const int rank = ompi_comm_rank (comm);
ompi_request_t **reqs;
ompi_request_t **reqs, **preqs;
ptrdiff_t lb, extent;
int rc = MPI_SUCCESS, dim, i, nreqs;
ompi_datatype_get_extent(rdtype, &lb, &extent);
reqs = basic_module->mccb_reqs;
reqs = preqs = mca_coll_basic_get_reqs( (mca_coll_basic_module_t *) module, 4 * cart->ndims);
/* The ordering is defined as -1 then +1 in each dimension in
* order of dimension. */
@ -64,36 +63,40 @@ mca_coll_basic_neighbor_allgatherv_cart(const void *sbuf, int scount, struct omp
}
if (MPI_PROC_NULL != srank) {
nreqs += 2;
rc = MCA_PML_CALL(irecv((char *) rbuf + disps[i] * extent, rcounts[i], rdtype, srank,
MCA_COLL_BASE_TAG_ALLGATHER, comm, reqs++));
MCA_COLL_BASE_TAG_ALLGATHER, comm, preqs++));
if (OMPI_SUCCESS != rc) break;
/* remove cast from const when the pml layer is updated to take
* a const for the send buffer. */
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, srank, MCA_COLL_BASE_TAG_ALLGATHER,
MCA_PML_BASE_SEND_STANDARD, comm, reqs++));
MCA_PML_BASE_SEND_STANDARD, comm, preqs++));
if (OMPI_SUCCESS != rc) break;
nreqs += 2;
}
if (MPI_PROC_NULL != drank) {
nreqs += 2;
rc = MCA_PML_CALL(irecv((char *) rbuf + disps[i+1] * extent, rcounts[i+1], rdtype, drank,
MCA_COLL_BASE_TAG_ALLGATHER, comm, reqs++));
MCA_COLL_BASE_TAG_ALLGATHER, comm, preqs++));
if (OMPI_SUCCESS != rc) break;
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, drank, MCA_COLL_BASE_TAG_ALLGATHER,
MCA_PML_BASE_SEND_STANDARD, comm, reqs++));
MCA_PML_BASE_SEND_STANDARD, comm, preqs++));
if (OMPI_SUCCESS != rc) break;
nreqs += 2;
}
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs( reqs, nreqs );
return rc;
}
return ompi_request_wait_all (nreqs, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
rc = ompi_request_wait_all (nreqs, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != rc) {
mca_coll_basic_free_reqs( reqs, nreqs );
}
return rc;
}
static int
@ -102,14 +105,12 @@ mca_coll_basic_neighbor_allgatherv_graph(const void *sbuf, int scount, struct om
struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_graph_2_2_0_t *graph = comm->c_topo->mtc.graph;
const int rank = ompi_comm_rank (comm);
const int *edges;
int degree;
ompi_request_t **reqs;
int rc = MPI_SUCCESS, neighbor, degree;
ompi_request_t **reqs, **preqs;
ptrdiff_t lb, extent;
int rc = MPI_SUCCESS, neighbor;
mca_topo_base_graph_neighbors_count (comm, rank, &degree);
@ -119,26 +120,31 @@ mca_coll_basic_neighbor_allgatherv_graph(const void *sbuf, int scount, struct om
}
ompi_datatype_get_extent(rdtype, &lb, &extent);
reqs = preqs = mca_coll_basic_get_reqs( (mca_coll_basic_module_t *) module, 2 * degree);
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < degree ; ++neighbor) {
for (neighbor = 0; neighbor < degree ; ++neighbor) {
rc = MCA_PML_CALL(irecv((char *) rbuf + disps[neighbor] * extent, rcounts[neighbor],
rdtype, edges[neighbor], MCA_COLL_BASE_TAG_ALLGATHER, comm, reqs++));
rdtype, edges[neighbor], MCA_COLL_BASE_TAG_ALLGATHER, comm, preqs++));
if (OMPI_SUCCESS != rc) break;
/* remove cast from const when the pml layer is updated to take
* a const for the send buffer. */
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, edges[neighbor],
MCA_COLL_BASE_TAG_ALLGATHER, MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs( reqs, 2 * (neighbor + 1) );
return rc;
}
return ompi_request_wait_all (degree * 2, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
rc = ompi_request_wait_all (degree * 2, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != rc) {
mca_coll_basic_free_reqs( reqs, 2 * degree );
}
return rc;
}
static int
@ -147,11 +153,10 @@ mca_coll_basic_neighbor_allgatherv_dist_graph(const void *sbuf, int scount, stru
struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_dist_graph_2_2_0_t *dist_graph = comm->c_topo->mtc.dist_graph;
const int *inedges, *outedges;
int indegree, outdegree;
ompi_request_t **reqs;
ompi_request_t **reqs, **preqs;
ptrdiff_t lb, extent;
int rc = MPI_SUCCESS, neighbor;
@ -162,15 +167,16 @@ mca_coll_basic_neighbor_allgatherv_dist_graph(const void *sbuf, int scount, stru
outedges = dist_graph->out;
ompi_datatype_get_extent(rdtype, &lb, &extent);
reqs = preqs = mca_coll_basic_get_reqs( (mca_coll_basic_module_t *) module, indegree + outdegree);
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < indegree ; ++neighbor) {
for (neighbor = 0; neighbor < indegree ; ++neighbor) {
rc = MCA_PML_CALL(irecv((char *) rbuf + disps[neighbor] * extent, rcounts[neighbor], rdtype,
inedges[neighbor], MCA_COLL_BASE_TAG_ALLGATHER, comm, reqs++));
inedges[neighbor], MCA_COLL_BASE_TAG_ALLGATHER, comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs(reqs, neighbor);
return rc;
}
@ -179,16 +185,20 @@ mca_coll_basic_neighbor_allgatherv_dist_graph(const void *sbuf, int scount, stru
* a const for the send buffer. */
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, outedges[neighbor],
MCA_COLL_BASE_TAG_ALLGATHER, MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs(reqs, indegree + neighbor);
return rc;
}
return ompi_request_wait_all (indegree + outdegree, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
rc = ompi_request_wait_all (indegree + outdegree, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != rc) {
mca_coll_basic_free_reqs(reqs, indegree + outdegree);
}
return rc;
}
int mca_coll_basic_neighbor_allgatherv(const void *sbuf, int scount, struct ompi_datatype_t *sdtype,

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -40,18 +40,18 @@ mca_coll_basic_neighbor_alltoall_cart(const void *sbuf, int scount, struct ompi_
int rcount, struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_cart_2_2_0_t *cart = comm->c_topo->mtc.cart;
const int rank = ompi_comm_rank (comm);
ompi_request_t **reqs;
ompi_request_t **reqs, **preqs;
ptrdiff_t lb, rdextent, sdextent;
int rc = MPI_SUCCESS, dim, nreqs;
ompi_datatype_get_extent(rdtype, &lb, &rdextent);
ompi_datatype_get_extent(sdtype, &lb, &sdextent);
reqs = preqs = mca_coll_basic_get_reqs( (mca_coll_basic_module_t *) module, 4 * cart->ndims);
/* post receives first */
for (dim = 0, nreqs = 0, reqs = basic_module->mccb_reqs ; dim < cart->ndims ; ++dim) {
for (dim = 0, nreqs = 0; dim < cart->ndims ; ++dim) {
int srank = MPI_PROC_NULL, drank = MPI_PROC_NULL;
if (cart->dims[dim] > 1) {
@ -61,28 +61,28 @@ mca_coll_basic_neighbor_alltoall_cart(const void *sbuf, int scount, struct ompi_
}
if (MPI_PROC_NULL != srank) {
nreqs++;
rc = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, srank,
MCA_COLL_BASE_TAG_ALLTOALL,
comm, reqs++));
comm, preqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
}
rbuf = (char *) rbuf + rdextent * rcount;
if (MPI_PROC_NULL != drank) {
nreqs++;
rc = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, drank,
MCA_COLL_BASE_TAG_ALLTOALL,
comm, reqs++));
comm, preqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
}
rbuf = (char *) rbuf + rdextent * rcount;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs( reqs, nreqs);
return rc;
}
@ -98,34 +98,38 @@ mca_coll_basic_neighbor_alltoall_cart(const void *sbuf, int scount, struct ompi_
if (MPI_PROC_NULL != srank) {
/* remove cast from const when the pml layer is updated to take
* a const for the send buffer. */
nreqs++;
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, srank,
MCA_COLL_BASE_TAG_ALLTOALL,
MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
comm, preqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
}
sbuf = (const char *) sbuf + sdextent * scount;
if (MPI_PROC_NULL != drank) {
nreqs++;
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, drank,
MCA_COLL_BASE_TAG_ALLTOALL,
MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
comm, preqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
}
sbuf = (const char *) sbuf + sdextent * scount;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs( reqs, nreqs);
return rc;
}
return ompi_request_wait_all (nreqs, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
rc = ompi_request_wait_all (nreqs, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != rc) {
mca_coll_basic_free_reqs( reqs, nreqs);
}
return rc;
}
static int
@ -133,12 +137,11 @@ mca_coll_basic_neighbor_alltoall_graph(const void *sbuf, int scount, struct ompi
int rcount, struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_graph_2_2_0_t *graph = comm->c_topo->mtc.graph;
const int rank = ompi_comm_rank (comm);
int rc = MPI_SUCCESS, neighbor, degree;
ptrdiff_t lb, rdextent, sdextent;
ompi_request_t **reqs;
ompi_request_t **reqs, **preqs;
const int *edges;
mca_topo_base_graph_neighbors_count (comm, rank, &degree);
@ -150,31 +153,40 @@ mca_coll_basic_neighbor_alltoall_graph(const void *sbuf, int scount, struct ompi
ompi_datatype_get_extent(rdtype, &lb, &rdextent);
ompi_datatype_get_extent(sdtype, &lb, &sdextent);
reqs = preqs = mca_coll_basic_get_reqs( (mca_coll_basic_module_t *) module, 2 * degree);
/* post receives first */
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < degree ; ++neighbor) {
for (neighbor = 0; neighbor < degree ; ++neighbor) {
rc = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, edges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL,
comm, reqs++));
comm, preqs++));
if (OMPI_SUCCESS != rc) break;
rbuf = (char *) rbuf + rdextent * rcount;
}
if( MPI_SUCCESS != rc ) {
mca_coll_basic_free_reqs( reqs, neighbor );
return rc;
}
for (neighbor = 0 ; neighbor < degree ; ++neighbor) {
/* remove cast from const when the pml layer is updated to take
* a const for the send buffer. */
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, edges[neighbor],
MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
comm, preqs++));
if (OMPI_SUCCESS != rc) break;
sbuf = (const char *) sbuf + sdextent * scount;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
if( MPI_SUCCESS != rc ) {
mca_coll_basic_free_reqs( reqs, degree + neighbor );
return rc;
}
return ompi_request_wait_all (degree * 2, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
rc = ompi_request_wait_all (degree * 2, reqs, MPI_STATUSES_IGNORE);
if( MPI_SUCCESS != rc ) {
mca_coll_basic_free_reqs( reqs, 2 * degree );
}
return rc;
}
static int
@ -182,13 +194,12 @@ mca_coll_basic_neighbor_alltoall_dist_graph(const void *sbuf, int scount,struct
int rcount, struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_dist_graph_2_2_0_t *dist_graph = comm->c_topo->mtc.dist_graph;
ptrdiff_t lb, rdextent, sdextent;
int rc = MPI_SUCCESS, neighbor;
const int *inedges, *outedges;
int indegree, outdegree;
ompi_request_t **reqs;
ompi_request_t **reqs, **preqs;
indegree = dist_graph->indegree;
outdegree = dist_graph->outdegree;
@ -198,36 +209,41 @@ mca_coll_basic_neighbor_alltoall_dist_graph(const void *sbuf, int scount,struct
ompi_datatype_get_extent(rdtype, &lb, &rdextent);
ompi_datatype_get_extent(sdtype, &lb, &sdextent);
reqs = preqs = mca_coll_basic_get_reqs( (mca_coll_basic_module_t *) module, indegree + outdegree);
/* post receives first */
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < indegree ; ++neighbor, ++reqs) {
for (neighbor = 0; neighbor < indegree ; ++neighbor) {
rc = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, inedges[neighbor],
MCA_COLL_BASE_TAG_ALLTOALL,
comm, reqs));
comm, preqs++));
if (OMPI_SUCCESS != rc) break;
rbuf = (char *) rbuf + rdextent * rcount;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs(reqs, neighbor);
return rc;
}
for (neighbor = 0 ; neighbor < outdegree ; ++neighbor, ++reqs) {
for (neighbor = 0 ; neighbor < outdegree ; ++neighbor) {
/* remove cast from const when the pml layer is updated to take a const for the send buffer */
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, outedges[neighbor],
MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD,
comm, reqs));
comm, preqs++));
if (OMPI_SUCCESS != rc) break;
sbuf = (char *) sbuf + sdextent * scount;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs(reqs, indegree + neighbor);
return rc;
}
return ompi_request_wait_all (indegree + outdegree, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
rc = ompi_request_wait_all (indegree + outdegree, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != rc) {
mca_coll_basic_free_reqs(reqs, indegree + outdegree);
}
return rc;
}
int mca_coll_basic_neighbor_alltoall(const void *sbuf, int scount, struct ompi_datatype_t *sdtype, void *rbuf,

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -41,18 +41,18 @@ mca_coll_basic_neighbor_alltoallv_cart(const void *sbuf, const int scounts[], co
const int rdisps[], struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_cart_2_2_0_t *cart = comm->c_topo->mtc.cart;
const int rank = ompi_comm_rank (comm);
int rc = MPI_SUCCESS, dim, i, nreqs;
ptrdiff_t lb, rdextent, sdextent;
ompi_request_t **reqs;
ompi_request_t **reqs, **preqs;
ompi_datatype_get_extent(rdtype, &lb, &rdextent);
ompi_datatype_get_extent(sdtype, &lb, &sdextent);
reqs = preqs = mca_coll_basic_get_reqs( (mca_coll_basic_module_t *) module, 4 * cart->ndims );
/* post receives first */
for (dim = 0, nreqs = 0, i = 0, reqs = basic_module->mccb_reqs ; dim < cart->ndims ; ++dim, i += 2) {
for (dim = 0, nreqs = 0, i = 0; dim < cart->ndims ; ++dim, i += 2) {
int srank = MPI_PROC_NULL, drank = MPI_PROC_NULL;
if (cart->dims[dim] > 1) {
@ -62,22 +62,22 @@ mca_coll_basic_neighbor_alltoallv_cart(const void *sbuf, const int scounts[], co
}
if (MPI_PROC_NULL != srank) {
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[i] * rdextent, rcounts[i], rdtype, srank,
MCA_COLL_BASE_TAG_ALLTOALL, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[i] * rdextent, rcounts[i], rdtype, srank,
MCA_COLL_BASE_TAG_ALLTOALL, comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
if (MPI_PROC_NULL != drank) {
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[i+1] * rdextent, rcounts[i+1], rdtype, drank,
MCA_COLL_BASE_TAG_ALLTOALL, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[i+1] * rdextent, rcounts[i+1], rdtype, drank,
MCA_COLL_BASE_TAG_ALLTOALL, comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs( reqs, nreqs );
return rc;
}
@ -91,27 +91,31 @@ mca_coll_basic_neighbor_alltoallv_cart(const void *sbuf, const int scounts[], co
}
if (MPI_PROC_NULL != srank) {
nreqs++;
/* remove cast from const when the pml layer is updated to take a const for the send buffer */
rc = MCA_PML_CALL(isend((char *) sbuf + sdisps[i] * sdextent, scounts[i], sdtype, srank,
MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD, comm, reqs++));
MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD, comm, preqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
}
if (MPI_PROC_NULL != drank) {
rc = MCA_PML_CALL(isend((char *) sbuf + sdisps[i+1] * sdextent, scounts[i+1], sdtype, drank,
MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
rc = MCA_PML_CALL(isend((char *) sbuf + sdisps[i+1] * sdextent, scounts[i+1], sdtype, drank,
MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD, comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs( reqs, nreqs );
return rc;
}
return ompi_request_wait_all (nreqs, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
rc = ompi_request_wait_all (nreqs, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != rc) {
mca_coll_basic_free_reqs( reqs, nreqs );
}
return rc;
}
static int
@ -120,12 +124,11 @@ mca_coll_basic_neighbor_alltoallv_graph(const void *sbuf, const int scounts[], c
const int rdisps[], struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_graph_2_2_0_t *graph = comm->c_topo->mtc.graph;
int rc = MPI_SUCCESS, neighbor, degree;
const int rank = ompi_comm_rank (comm);
ptrdiff_t lb, rdextent, sdextent;
ompi_request_t **reqs;
ompi_request_t **reqs, **preqs;
const int *edges;
mca_topo_base_graph_neighbors_count (comm, rank, &degree);
@ -137,16 +140,17 @@ mca_coll_basic_neighbor_alltoallv_graph(const void *sbuf, const int scounts[], c
ompi_datatype_get_extent(rdtype, &lb, &rdextent);
ompi_datatype_get_extent(sdtype, &lb, &sdextent);
reqs = preqs = mca_coll_basic_get_reqs( (mca_coll_basic_module_t *) module, 2 * degree );
/* post all receives first */
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < degree ; ++neighbor) {
for (neighbor = 0; neighbor < degree ; ++neighbor) {
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[neighbor] * rdextent, rcounts[neighbor], rdtype,
edges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, comm, reqs++));
edges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs( reqs, neighbor );
return rc;
}
@ -154,16 +158,20 @@ mca_coll_basic_neighbor_alltoallv_graph(const void *sbuf, const int scounts[], c
/* remove cast from const when the pml layer is updated to take a const for the send buffer */
rc = MCA_PML_CALL(isend((char *) sbuf + sdisps[neighbor] * sdextent, scounts[neighbor], sdtype,
edges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs( reqs, degree + neighbor );
return rc;
}
return ompi_request_wait_all (degree * 2, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
rc = ompi_request_wait_all (degree * 2, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != rc) {
mca_coll_basic_free_reqs( reqs, degree * 2);
}
return rc;
}
static int
@ -172,13 +180,12 @@ mca_coll_basic_neighbor_alltoallv_dist_graph(const void *sbuf, const int scounts
const int rdisps[], struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_dist_graph_2_2_0_t *dist_graph = comm->c_topo->mtc.dist_graph;
ptrdiff_t lb, rdextent, sdextent;
int rc = MPI_SUCCESS, neighbor;
const int *inedges, *outedges;
int indegree, outdegree;
ompi_request_t **reqs;
ompi_request_t **reqs, **preqs;
indegree = dist_graph->indegree;
outdegree = dist_graph->outdegree;
@ -188,16 +195,17 @@ mca_coll_basic_neighbor_alltoallv_dist_graph(const void *sbuf, const int scounts
ompi_datatype_get_extent(rdtype, &lb, &rdextent);
ompi_datatype_get_extent(sdtype, &lb, &sdextent);
reqs = preqs = mca_coll_basic_get_reqs((mca_coll_basic_module_t *) module, indegree + outdegree);
/* post all receives first */
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < indegree ; ++neighbor) {
for (neighbor = 0; neighbor < indegree ; ++neighbor) {
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[neighbor] * rdextent, rcounts[neighbor], rdtype,
inedges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, comm, reqs++));
inedges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs( reqs, neighbor );
return rc;
}
@ -205,16 +213,20 @@ mca_coll_basic_neighbor_alltoallv_dist_graph(const void *sbuf, const int scounts
/* remove cast from const when the pml layer is updated to take a const for the send buffer */
rc = MCA_PML_CALL(isend((char *) sbuf + sdisps[neighbor] * sdextent, scounts[neighbor], sdtype,
outedges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs( reqs, indegree + neighbor );
return rc;
}
return ompi_request_wait_all (indegree + outdegree, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
rc = ompi_request_wait_all (indegree + outdegree, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != rc) {
mca_coll_basic_free_reqs( reqs, indegree + outdegree );
}
return rc;
}
int mca_coll_basic_neighbor_alltoallv(const void *sbuf, const int scounts[], const int sdisps[],

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -41,15 +41,15 @@ mca_coll_basic_neighbor_alltoallw_cart(const void *sbuf, const int scounts[], co
const MPI_Aint rdisps[], struct ompi_datatype_t * const *rdtypes,
struct ompi_communicator_t *comm, mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_cart_2_2_0_t *cart = comm->c_topo->mtc.cart;
const int rank = ompi_comm_rank (comm);
int rc = MPI_SUCCESS, dim, i, nreqs;
ompi_request_t **reqs;
ompi_request_t **reqs, **preqs;
reqs = preqs = mca_coll_basic_get_reqs( (mca_coll_basic_module_t *) module, 4 * cart->ndims );
/* post receives first */
for (dim = 0, i = 0, nreqs = 0, reqs = basic_module->mccb_reqs ; dim < cart->ndims ; ++dim, i += 2) {
for (dim = 0, i = 0, nreqs = 0; dim < cart->ndims ; ++dim, i += 2) {
int srank = MPI_PROC_NULL, drank = MPI_PROC_NULL;
if (cart->dims[dim] > 1) {
@ -59,22 +59,22 @@ mca_coll_basic_neighbor_alltoallw_cart(const void *sbuf, const int scounts[], co
}
if (MPI_PROC_NULL != srank) {
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[i], rcounts[i], rdtypes[i], srank,
MCA_COLL_BASE_TAG_ALLTOALL, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[i], rcounts[i], rdtypes[i], srank,
MCA_COLL_BASE_TAG_ALLTOALL, comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
if (MPI_PROC_NULL != drank) {
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[i+1], rcounts[i+1], rdtypes[i+1], drank,
MCA_COLL_BASE_TAG_ALLTOALL, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[i+1], rcounts[i+1], rdtypes[i+1], drank,
MCA_COLL_BASE_TAG_ALLTOALL, comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs( reqs, nreqs );
return rc;
}
@ -88,27 +88,31 @@ mca_coll_basic_neighbor_alltoallw_cart(const void *sbuf, const int scounts[], co
}
if (MPI_PROC_NULL != srank) {
nreqs++;
/* remove cast from const when the pml layer is updated to take a const for the send buffer */
rc = MCA_PML_CALL(isend((char *) sbuf + sdisps[i], scounts[i], sdtypes[i], srank,
MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD, comm, reqs++));
MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD, comm, preqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
}
if (MPI_PROC_NULL != drank) {
rc = MCA_PML_CALL(isend((char *) sbuf + sdisps[i+1], scounts[i+1], sdtypes[i+1], drank,
MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
rc = MCA_PML_CALL(isend((char *) sbuf + sdisps[i+1], scounts[i+1], sdtypes[i+1], drank,
MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD, comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs( reqs, nreqs );
return rc;
}
return ompi_request_wait_all (nreqs, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
rc = ompi_request_wait_all (nreqs, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != rc) {
mca_coll_basic_free_reqs( reqs, nreqs );
}
return rc;
}
static int
@ -117,14 +121,14 @@ mca_coll_basic_neighbor_alltoallw_graph(const void *sbuf, const int scounts[], c
const MPI_Aint rdisps[], struct ompi_datatype_t * const rdtypes[],
struct ompi_communicator_t *comm, mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_graph_2_2_0_t *graph = comm->c_topo->mtc.graph;
int rc = MPI_SUCCESS, neighbor, degree;
const int rank = ompi_comm_rank (comm);
ompi_request_t **reqs;
ompi_request_t **reqs, **preqs;
const int *edges;
mca_topo_base_graph_neighbors_count (comm, rank, &degree);
reqs = preqs = mca_coll_basic_get_reqs( (mca_coll_basic_module_t *) module, 2 * degree );
edges = graph->edges;
if (rank > 0) {
@ -132,14 +136,14 @@ mca_coll_basic_neighbor_alltoallw_graph(const void *sbuf, const int scounts[], c
}
/* post all receives first */
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < degree ; ++neighbor) {
for (neighbor = 0; neighbor < degree ; ++neighbor) {
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[neighbor], rcounts[neighbor], rdtypes[neighbor],
edges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, comm, reqs++));
edges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs( reqs, neighbor );
return rc;
}
@ -147,16 +151,20 @@ mca_coll_basic_neighbor_alltoallw_graph(const void *sbuf, const int scounts[], c
/* remove cast from const when the pml layer is updated to take a const for the send buffer */
rc = MCA_PML_CALL(isend((char *) sbuf + sdisps[neighbor], scounts[neighbor], sdtypes[neighbor],
edges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs( reqs, neighbor + degree );
return rc;
}
return ompi_request_wait_all (degree * 2, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
rc = ompi_request_wait_all (degree * 2, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != rc) {
mca_coll_basic_free_reqs( reqs, degree * 2 );
}
return rc;
}
static int
@ -165,12 +173,11 @@ mca_coll_basic_neighbor_alltoallw_dist_graph(const void *sbuf, const int scounts
const MPI_Aint rdisps[], struct ompi_datatype_t * const *rdtypes,
struct ompi_communicator_t *comm, mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_dist_graph_2_2_0_t *dist_graph = comm->c_topo->mtc.dist_graph;
int rc = MPI_SUCCESS, neighbor;
const int *inedges, *outedges;
int indegree, outdegree;
ompi_request_t **reqs;
ompi_request_t **reqs, **preqs;
indegree = dist_graph->indegree;
outdegree = dist_graph->outdegree;
@ -178,15 +185,16 @@ mca_coll_basic_neighbor_alltoallw_dist_graph(const void *sbuf, const int scounts
inedges = dist_graph->in;
outedges = dist_graph->out;
reqs = preqs = mca_coll_basic_get_reqs( (mca_coll_basic_module_t *) module, indegree + outdegree );
/* post all receives first */
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < indegree ; ++neighbor) {
for (neighbor = 0; neighbor < indegree ; ++neighbor) {
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[neighbor], rcounts[neighbor], rdtypes[neighbor],
inedges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, comm, reqs++));
inedges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs( reqs, neighbor );
return rc;
}
@ -194,16 +202,20 @@ mca_coll_basic_neighbor_alltoallw_dist_graph(const void *sbuf, const int scounts
/* remove cast from const when the pml layer is updated to take a const for the send buffer */
rc = MCA_PML_CALL(isend((char *) sbuf + sdisps[neighbor], scounts[neighbor], sdtypes[neighbor],
outedges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
comm, preqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
mca_coll_basic_free_reqs( reqs, indegree + neighbor );
return rc;
}
return ompi_request_wait_all (indegree + outdegree, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
rc = ompi_request_wait_all (indegree + outdegree, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != rc) {
mca_coll_basic_free_reqs( reqs, indegree + outdegree );
}
return rc;
}
int mca_coll_basic_neighbor_alltoallw(const void *sbuf, const int scounts[], const MPI_Aint sdisps[],

Просмотреть файл

@ -48,8 +48,7 @@ mca_coll_basic_scatter_inter(const void *sbuf, int scount,
int i, size, err;
char *ptmp;
ptrdiff_t lb, incr;
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t*) module;
ompi_request_t **reqs = basic_module->mccb_reqs;
ompi_request_t **reqs;
/* Initialize */
size = ompi_comm_remote_size(comm);
@ -69,6 +68,8 @@ mca_coll_basic_scatter_inter(const void *sbuf, int scount,
return OMPI_ERROR;
}
reqs = mca_coll_basic_get_reqs((mca_coll_basic_module_t*) module, size);
incr *= scount;
for (i = 0, ptmp = (char *) sbuf; i < size; ++i, ptmp += incr) {
err = MCA_PML_CALL(isend(ptmp, scount, sdtype, i,
@ -76,13 +77,15 @@ mca_coll_basic_scatter_inter(const void *sbuf, int scount,
MCA_PML_BASE_SEND_STANDARD, comm,
reqs++));
if (OMPI_SUCCESS != err) {
mca_coll_basic_free_reqs(reqs, i);
return err;
}
}
err =
ompi_request_wait_all(size, basic_module->mccb_reqs,
MPI_STATUSES_IGNORE);
err = ompi_request_wait_all(size, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != err) {
mca_coll_basic_free_reqs(reqs, size);
}
}
return err;

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -121,8 +121,7 @@ mca_coll_basic_scatterv_inter(const void *sbuf, const int *scounts,
int i, size, err;
char *ptmp;
ptrdiff_t lb, extent;
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t*) module;
ompi_request_t **reqs = basic_module->mccb_reqs;
ompi_request_t **reqs;
/* Initialize */
size = ompi_comm_remote_size(comm);
@ -145,6 +144,7 @@ mca_coll_basic_scatterv_inter(const void *sbuf, const int *scounts,
return OMPI_ERROR;
}
reqs = mca_coll_basic_get_reqs((mca_coll_basic_module_t*) module, size);
for (i = 0; i < size; ++i) {
ptmp = ((char *) sbuf) + (extent * disps[i]);
err = MCA_PML_CALL(isend(ptmp, scounts[i], sdtype, i,
@ -152,11 +152,15 @@ mca_coll_basic_scatterv_inter(const void *sbuf, const int *scounts,
MCA_PML_BASE_SEND_STANDARD, comm,
&(reqs[i])));
if (OMPI_SUCCESS != err) {
mca_coll_basic_free_reqs(reqs, i);
return err;
}
}
err = ompi_request_wait_all(size, reqs, MPI_STATUSES_IGNORE);
if (OMPI_SUCCESS != err) {
mca_coll_basic_free_reqs(reqs, size);
}
}
/* All done */

Просмотреть файл

@ -145,9 +145,6 @@ int mca_coll_self_ft_event(int state);
struct mca_coll_self_module_t {
mca_coll_base_module_t super;
ompi_request_t **mccb_reqs;
int mccb_num_reqs;
};
typedef struct mca_coll_self_module_t mca_coll_self_module_t;
OBJ_CLASS_DECLARATION(mca_coll_self_module_t);