Fix MPI_Alltoallv in coll/tuned.
Correctly handle the corner case in MPI_Alltoallv when some tasks have no data to transfer and some other tasks do have data to transfer. This test case is covered in ibm/collective/alltoallv_somezeros from the ompi-tests repo. cmr=v1.8.2:reviewer=bosilca This commit was SVN r31985.
Этот коммит содержится в:
родитель
b51a42aeca
Коммит
50256c62c5
@ -9,6 +9,8 @@
|
||||
* University of Stuttgart. All rights reserved.
|
||||
* Copyright (c) 2004-2005 The Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 2014 Research Organization for Information Science
|
||||
* and Technology (RIST). All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -90,6 +92,82 @@ int ompi_coll_tuned_sendrecv_actual( void* sendbuf, size_t scount,
|
||||
return (err);
|
||||
}
|
||||
|
||||
int ompi_coll_tuned_sendrecv_nonzero_actual( void* sendbuf, size_t scount,
|
||||
ompi_datatype_t* sdatatype,
|
||||
int dest, int stag,
|
||||
void* recvbuf, size_t rcount,
|
||||
ompi_datatype_t* rdatatype,
|
||||
int source, int rtag,
|
||||
struct ompi_communicator_t* comm,
|
||||
ompi_status_public_t* status )
|
||||
|
||||
{ /* post receive first, then send, then waitall... should be fast (I hope) */
|
||||
int err, line = 0, nreqs = 0;
|
||||
size_t typesize;
|
||||
ompi_request_t* reqs[2], **req = reqs;
|
||||
ompi_status_public_t statuses[2];
|
||||
|
||||
/* post new irecv */
|
||||
ompi_datatype_type_size(rdatatype, &typesize);
|
||||
if (0 != rcount && 0 != typesize) {
|
||||
err = MCA_PML_CALL(irecv( recvbuf, rcount, rdatatype, source, rtag,
|
||||
comm, req++));
|
||||
++nreqs;
|
||||
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; }
|
||||
}
|
||||
|
||||
/* send data to children */
|
||||
ompi_datatype_type_size(sdatatype, &typesize);
|
||||
if (0 != scount && 0 != typesize) {
|
||||
err = MCA_PML_CALL(isend( sendbuf, scount, sdatatype, dest, stag,
|
||||
MCA_PML_BASE_SEND_STANDARD, comm, req++));
|
||||
++nreqs;
|
||||
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; }
|
||||
}
|
||||
|
||||
if (0 != nreqs) {
|
||||
err = ompi_request_wait_all( nreqs, reqs, statuses );
|
||||
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; }
|
||||
|
||||
if (MPI_STATUS_IGNORE != status) {
|
||||
*status = statuses[0];
|
||||
}
|
||||
} else {
|
||||
/* FIXME this is currently unsupported but unused */
|
||||
assert (MPI_STATUS_IGNORE == status);
|
||||
}
|
||||
|
||||
return (MPI_SUCCESS);
|
||||
|
||||
error_handler:
|
||||
/* As we use wait_all we will get MPI_ERR_IN_STATUS which is not an error
|
||||
* code that we can propagate up the stack. Instead, look for the real
|
||||
* error code from the MPI_ERROR in the status.
|
||||
*/
|
||||
if( MPI_ERR_IN_STATUS == err ) {
|
||||
/* At least we know the error was detected during the wait_all */
|
||||
int err_index = 0;
|
||||
if( MPI_SUCCESS == statuses[0].MPI_ERROR ) {
|
||||
err_index = 1;
|
||||
}
|
||||
if (MPI_STATUS_IGNORE != status) {
|
||||
*status = statuses[err_index];
|
||||
}
|
||||
err = statuses[err_index].MPI_ERROR;
|
||||
OPAL_OUTPUT ((ompi_coll_tuned_stream, "%s:%d: Error %d occurred (req index %d)\n",
|
||||
__FILE__, line, err, err_index));
|
||||
} else {
|
||||
/* Error discovered during the posting of the irecv or isend,
|
||||
* and no status is available.
|
||||
*/
|
||||
OPAL_OUTPUT ((ompi_coll_tuned_stream, "%s:%d: Error %d occurred\n",
|
||||
__FILE__, line, err));
|
||||
if (MPI_STATUS_IGNORE != status) {
|
||||
status->MPI_ERROR = err;
|
||||
}
|
||||
}
|
||||
return (err);
|
||||
}
|
||||
/*
|
||||
* localcompleted version that makes sure the send has completed locally
|
||||
* Currently this is a sync call, but will change to locally completed
|
||||
|
@ -9,6 +9,8 @@
|
||||
* University of Stuttgart. All rights reserved.
|
||||
* Copyright (c) 2004-2005 The Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 2014 Research Organization for Information Science
|
||||
* and Technology (RIST). All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -39,6 +41,15 @@ int ompi_coll_tuned_sendrecv_actual( void* sendbuf, size_t scount,
|
||||
struct ompi_communicator_t* comm,
|
||||
ompi_status_public_t* status );
|
||||
|
||||
int ompi_coll_tuned_sendrecv_nonzero_actual( void* sendbuf, size_t scount,
|
||||
ompi_datatype_t* sdatatype,
|
||||
int dest, int stag,
|
||||
void* recvbuf, size_t rcount,
|
||||
ompi_datatype_t* rdatatype,
|
||||
int source, int rtag,
|
||||
struct ompi_communicator_t* comm,
|
||||
ompi_status_public_t* status );
|
||||
|
||||
|
||||
/* inline functions */
|
||||
|
||||
@ -54,7 +65,7 @@ ompi_coll_tuned_sendrecv( void* sendbuf, size_t scount, ompi_datatype_t* sdataty
|
||||
return (int) ompi_datatype_sndrcv(sendbuf, (int32_t) scount, sdatatype,
|
||||
recvbuf, (int32_t) rcount, rdatatype);
|
||||
}
|
||||
return ompi_coll_tuned_sendrecv_actual (sendbuf, scount, sdatatype,
|
||||
return ompi_coll_tuned_sendrecv_nonzero_actual (sendbuf, scount, sdatatype,
|
||||
dest, stag,
|
||||
recvbuf, rcount, rdatatype,
|
||||
source, rtag, comm, status);
|
||||
|
Загрузка…
x
Ссылка в новой задаче
Block a user