1
1

coll/base: give a boost to ompi_coll_base_sendrecv_nonzero_actual()

Based on current implementation it is faster to use a blocking
send than the non-blocking version. Switch the exchange function
used in the barrier to use the blocking version combined with
the non-blocking version of the receive.

This is similar to open-mpi/ompi@223d75595d
Этот коммит содержится в:
Gilles Gouaillardet 2016-08-04 13:30:12 +09:00
родитель 3e17e2fb29
Коммит 60e91e890a

Просмотреть файл

@ -39,54 +39,33 @@ int ompi_coll_base_sendrecv_nonzero_actual( void* sendbuf, size_t scount,
ompi_status_public_t* status )
{ /* post receive first, then send, then waitall... should be fast (I hope) */
int err, line = 0, nreqs = 0;
size_t typesize;
ompi_request_t* reqs[2], **req = reqs;
ompi_status_public_t statuses[2];
int err, line = 0;
size_t rtypesize, stypesize;
ompi_request_t *req;
ompi_status_public_t rstatus;
/* post new irecv */
ompi_datatype_type_size(rdatatype, &typesize);
if (0 != rcount && 0 != typesize) {
ompi_datatype_type_size(rdatatype, &rtypesize);
if (0 != rcount && 0 != rtypesize) {
err = MCA_PML_CALL(irecv( recvbuf, rcount, rdatatype, source, rtag,
comm, req++));
++nreqs;
comm, &req));
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; }
}
/* send data to children */
ompi_datatype_type_size(sdatatype, &typesize);
if (0 != scount && 0 != typesize) {
err = MCA_PML_CALL(isend( sendbuf, scount, sdatatype, dest, stag,
MCA_PML_BASE_SEND_STANDARD, comm, req++));
++nreqs;
ompi_datatype_type_size(sdatatype, &stypesize);
if (0 != scount && 0 != stypesize) {
err = MCA_PML_CALL(send( sendbuf, scount, sdatatype, dest, stag,
MCA_PML_BASE_SEND_STANDARD, comm));
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; }
}
if (0 != nreqs) {
err = ompi_request_wait_all( nreqs, reqs, statuses );
if( MPI_ERR_IN_STATUS == err ) { line = __LINE__;
/* As we use wait_all we will get MPI_ERR_IN_STATUS which is not an error
* code that we can propagate up the stack. Instead, look for the real
* error code from the MPI_ERROR in the status.
*/
int err_index = 0;
if( MPI_SUCCESS == statuses[0].MPI_ERROR
|| MPI_ERR_PENDING == statuses[0].MPI_ERROR ) {
err_index = 1;
}
if (MPI_STATUS_IGNORE != status) {
*status = statuses[err_index];
}
err = statuses[err_index].MPI_ERROR;
OPAL_OUTPUT ((ompi_coll_base_framework.framework_output, "%s:%d: Error %d occurred in the %s"
" stage of ompi_coll_base_sendrecv_zero\n",
__FILE__, line, err, (0 == err_index ? "receive" : "send")));
return err;
}
if (0 != rcount && 0 != rtypesize) {
err = ompi_request_wait( &req, &rstatus);
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; }
if (MPI_STATUS_IGNORE != status) {
*status = statuses[0];
*status = rstatus;
}
} else {
if( MPI_STATUS_IGNORE != status )
@ -96,7 +75,7 @@ int ompi_coll_base_sendrecv_nonzero_actual( void* sendbuf, size_t scount,
return (MPI_SUCCESS);
error_handler:
/* Error discovered during the posting of the irecv or isend,
/* Error discovered during the posting of the irecv or send,
* and no status is available.
*/
OPAL_OUTPUT ((ompi_coll_base_framework.framework_output, "%s:%d: Error %d occurred\n",