Cleanups.
This commit was SVN r24458.
Этот коммит содержится в:
родитель
cdd1928edf
Коммит
87f3109df4
@ -83,7 +83,7 @@ int ompi_coll_tuned_allgather_intra_bruck(void *sbuf, int scount,
|
||||
void* rbuf, int rcount,
|
||||
struct ompi_datatype_t *rdtype,
|
||||
struct ompi_communicator_t *comm,
|
||||
mca_coll_base_module_t *module)
|
||||
mca_coll_base_module_t *module)
|
||||
{
|
||||
int line = -1;
|
||||
int rank, size;
|
||||
@ -158,7 +158,7 @@ int ompi_coll_tuned_allgather_intra_bruck(void *sbuf, int scount,
|
||||
|
||||
/* Finalization step:
|
||||
On all nodes except 0, data needs to be shifted locally:
|
||||
- create temprary shift buffer,
|
||||
- create temporary shift buffer,
|
||||
see discussion in coll_basic_reduce.c about the size and begining
|
||||
of temporary buffer.
|
||||
- copy blocks [0 .. (size - rank - 1)] in rbuf to shift buffer
|
||||
|
@ -52,7 +52,7 @@ int ompi_coll_tuned_sendrecv_actual( void* sendbuf, int scount,
|
||||
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; }
|
||||
|
||||
err = ompi_request_wait_all( 2, reqs, statuses );
|
||||
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler_waitall; }
|
||||
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; }
|
||||
|
||||
if (MPI_STATUS_IGNORE != status) {
|
||||
*status = statuses[0];
|
||||
@ -60,15 +60,15 @@ int ompi_coll_tuned_sendrecv_actual( void* sendbuf, int scount,
|
||||
|
||||
return (MPI_SUCCESS);
|
||||
|
||||
error_handler_waitall:
|
||||
error_handler:
|
||||
/* As we use wait_all we will get MPI_ERR_IN_STATUS which is not an error
|
||||
* code that we can propagate up the stack. Instead, look for the real
|
||||
* error code from the MPI_ERROR in the status.
|
||||
*/
|
||||
if( MPI_ERR_IN_STATUS == err ) {
|
||||
/* At least we know he error was detected during the wait_all */
|
||||
/* At least we know the error was detected during the wait_all */
|
||||
int err_index = 0;
|
||||
if( MPI_SUCCESS != statuses[1].MPI_ERROR ) {
|
||||
if( MPI_SUCCESS == statuses[0].MPI_ERROR ) {
|
||||
err_index = 1;
|
||||
}
|
||||
if (MPI_STATUS_IGNORE != status) {
|
||||
@ -78,7 +78,6 @@ int ompi_coll_tuned_sendrecv_actual( void* sendbuf, int scount,
|
||||
OPAL_OUTPUT ((ompi_coll_tuned_stream, "%s:%d: Error %d occurred (req index %d)\n",
|
||||
__FILE__, line, err, err_index));
|
||||
} else {
|
||||
error_handler:
|
||||
/* Error discovered during the posting of the irecv or isend,
|
||||
* and no status is available.
|
||||
*/
|
||||
@ -136,8 +135,9 @@ int ompi_coll_tuned_sendrecv_actual_localcompleted( void* sendbuf, int scount,
|
||||
* error code from the MPI_ERROR in the status.
|
||||
*/
|
||||
if( MPI_ERR_IN_STATUS == err ) {
|
||||
/* At least we know the error was detected during the wait_all */
|
||||
int err_index = 0;
|
||||
if( MPI_SUCCESS != statuses[1].MPI_ERROR ) {
|
||||
if( MPI_SUCCESS == statuses[0].MPI_ERROR ) {
|
||||
err_index = 1;
|
||||
}
|
||||
if (MPI_STATUS_IGNORE != status) {
|
||||
@ -147,8 +147,11 @@ int ompi_coll_tuned_sendrecv_actual_localcompleted( void* sendbuf, int scount,
|
||||
OPAL_OUTPUT ((ompi_coll_tuned_stream, "%s:%d: Error %d occurred (req index %d)\n",
|
||||
__FILE__,line,err, err_index));
|
||||
} else {
|
||||
/* Error discovered during the posting of the irecv or isend,
|
||||
* and no status is available.
|
||||
*/
|
||||
OPAL_OUTPUT ((ompi_coll_tuned_stream, "%s:%d: Error %d occurred\n",
|
||||
__FILE__,line,err));
|
||||
__FILE__, line, err));
|
||||
if (MPI_STATUS_IGNORE != status) {
|
||||
status->MPI_ERROR = err;
|
||||
}
|
||||
|
Загрузка…
Ссылка в новой задаче
Block a user