1
1

Adjust patch in r26172 to only set the MPI_ERROR field in the status object returned from MPI_Waitall instead of using the internal req_status object to carry it around.

Note that the previous patch allowed the following test to -pass-:
  ompi-tests/mpich_tester/mpich_pt2pt/truncmult.c

This patch makes that test -fail- due to the assumption that MPI_Wait will update the status.MPI_ERROR field. In Open MPI we do not do this, so the MPI_ERROR field being inspected will remain set to MPI_ERR_PENDING. See comments in req_wait.c for why we do this.

If we change the test to not inspect the MPI_ERROR field after calling MPI_Wait successfully, then the test would pass correctly with this patch.

This change was made per discussion on the below email thread:
  http://www.open-mpi.org/community/lists/devel/2012/03/10753.php

This commit was SVN r26177.

The following SVN revision numbers were found above:
  r26172 --> open-mpi/ompi@03a33417d5
Этот коммит содержится в:
Josh Hursey 2012-03-22 14:09:19 +00:00
родитель 3bf038bb1c
Коммит 5af13d0d86
4 изменённых файлов: 31 добавлений и 92 удалений

Просмотреть файл

@ -142,14 +142,10 @@ int ompi_errhandler_request_invoke(int count,
/* Since errors on requests cause them to not be freed (until we
can examine them here), go through and free all requests with
errors. We only invoke the exception on the *first* request
that had an error.
Make sure we do not free the reqeust if it is marked as pending
since the user will need to wait on it again for completion.
*/
that had an error. */
for (; i < count; ++i) {
if (MPI_REQUEST_NULL != requests[i] &&
MPI_SUCCESS != requests[i]->req_status.MPI_ERROR &&
MPI_ERR_PENDING != requests[i]->req_status.MPI_ERROR ) {
MPI_SUCCESS != requests[i]->req_status.MPI_ERROR) {
/* Ignore the error -- what are we going to do? We're
already going to invoke an exception */
ompi_request_free(&(requests[i]));

Просмотреть файл

@ -47,13 +47,6 @@ int ompi_request_default_test( ompi_request_t ** rptr,
}
if (request->req_complete) {
/*
* Reset the error code of a previously pending request.
*/
if( MPI_ERR_PENDING == request->req_status.MPI_ERROR ) {
request->req_status.MPI_ERROR = MPI_SUCCESS;
}
OMPI_CRCP_REQUEST_COMPLETE(request);
*completed = true;
@ -125,13 +118,6 @@ int ompi_request_default_test_any(
}
if( request->req_complete ) {
/*
* Reset the error code of a previously pending request.
*/
if( MPI_ERR_PENDING == request->req_status.MPI_ERROR ) {
request->req_status.MPI_ERROR = MPI_SUCCESS;
}
OMPI_CRCP_REQUEST_COMPLETE(request);
*index = i;
@ -208,13 +194,6 @@ int ompi_request_default_test_all(
if( request->req_state == OMPI_REQUEST_INACTIVE ||
request->req_complete) {
/*
* Reset the error code of a previously pending request.
*/
if( MPI_ERR_PENDING == request->req_status.MPI_ERROR ) {
request->req_status.MPI_ERROR = MPI_SUCCESS;
}
OMPI_CRCP_REQUEST_COMPLETE(request);
num_completed++;
}
@ -320,13 +299,6 @@ int ompi_request_default_test_some(
continue;
}
if (true == request->req_complete) {
/*
* Reset the error code of a previously pending request.
*/
if( MPI_ERR_PENDING == request->req_status.MPI_ERROR ) {
request->req_status.MPI_ERROR = MPI_SUCCESS;
}
OMPI_CRCP_REQUEST_COMPLETE(request);
indices[num_requests_done++] = i;
}

Просмотреть файл

@ -36,16 +36,6 @@ int ompi_request_default_wait(
{
ompi_request_t *req = *req_ptr;
/*
* Reset the error code of a previously pending request.
*/
if( MPI_ERR_PENDING == req->req_status.MPI_ERROR ) {
req->req_status.MPI_ERROR = MPI_SUCCESS;
if( MPI_STATUS_IGNORE != status ) {
status->MPI_ERROR = MPI_SUCCESS;
}
}
ompi_request_wait_completion(req);
#if OPAL_ENABLE_FT_CR == 1
@ -156,12 +146,6 @@ int ompi_request_default_wait_any(
continue;
}
if (request->req_complete == true) {
/*
* Reset the error code of a previously pending request.
*/
if( MPI_ERR_PENDING == request->req_status.MPI_ERROR ) {
request->req_status.MPI_ERROR = MPI_SUCCESS;
}
completed = i;
break;
}
@ -241,13 +225,7 @@ int ompi_request_default_wait_all( size_t count,
request = *rptr++;
if (request->req_complete == true) {
/*
* Reset the error code of a previously pending request.
*/
if( MPI_ERR_PENDING == request->req_status.MPI_ERROR ) {
request->req_status.MPI_ERROR = MPI_SUCCESS;
}
else if( MPI_SUCCESS != request->req_status.MPI_ERROR ) {
if( OPAL_UNLIKELY( MPI_SUCCESS != request->req_status.MPI_ERROR ) ) {
failed++;
}
completed++;
@ -306,7 +284,7 @@ int ompi_request_default_wait_all( size_t count,
* this operation completes in error marking the remaining
* requests as PENDING.
*/
if( 0 < (ompi_request_failed - start_failed) ) {
if( OPAL_UNLIKELY( 0 < (ompi_request_failed - start_failed) ) ) {
failed += (ompi_request_failed - start_failed);
ompi_request_waiting--;
OPAL_THREAD_UNLOCK(&ompi_request_lock);
@ -351,16 +329,10 @@ int ompi_request_default_wait_all( size_t count,
request = *rptr;
/*
* Per MPI 2.2 p 60:
* Allows requests to be marked as MPI_ERR_PENDING if they are
* "neither failed nor completed." Which can only happen if
* there was an error in one of the other requests.
* Assert only if no requests were failed.
* Since some may still be pending.
*/
if( 0 < failed ) {
if( !request->req_complete ) {
request->req_status.MPI_ERROR = MPI_ERR_PENDING;
}
} else {
if( 0 >= failed ) {
assert( true == request->req_complete );
}
@ -369,6 +341,19 @@ int ompi_request_default_wait_all( size_t count,
}
OMPI_STATUS_SET(&statuses[i], &request->req_status);
/*
* Per MPI 2.2 p 60:
* Allows requests to be marked as MPI_ERR_PENDING if they are
* "neither failed nor completed." Which can only happen if
* there was an error in one of the other requests.
*/
if( OPAL_UNLIKELY(0 < failed) ) {
if( !request->req_complete ) {
statuses[i].MPI_ERROR = MPI_ERR_PENDING;
mpi_error = MPI_ERR_IN_STATUS;
continue;
}
}
if( request->req_persistent ) {
request->req_state = OMPI_REQUEST_INACTIVE;
@ -395,18 +380,18 @@ int ompi_request_default_wait_all( size_t count,
request = *rptr;
/*
* Per MPI 2.2 p 60:
* Some requests are allowed to be pending if there was an error.
* However, without a status argument, it is difficult to tell
* the user. For completeness mark the status appropriately,
* even though it is not exposed to the user.
* Assert only if no requests were failed.
* Since some may still be pending.
*/
if( 0 < failed ) {
if( !request->req_complete ) {
request->req_status.MPI_ERROR = MPI_ERR_PENDING;
}
} else {
if( 0 >= failed ) {
assert( true == request->req_complete );
} else {
/* If the request is still pending due to a failed request
* then skip it in this loop.
*/
if( !request->req_complete ) {
continue;
}
}
/* Per note above, we have to call gen request query_fn
@ -482,13 +467,6 @@ int ompi_request_default_wait_some(
continue;
}
if (true == request->req_complete) {
/*
* Reset the error code of a previously pending request.
*/
if( MPI_ERR_PENDING == request->req_status.MPI_ERROR ) {
request->req_status.MPI_ERROR = MPI_SUCCESS;
}
indices[i] = 1;
num_requests_done++;
}
@ -524,13 +502,6 @@ int ompi_request_default_wait_some(
continue;
}
if (request->req_complete == true) {
/*
* Reset the error code of a previously pending request.
*/
if( MPI_ERR_PENDING == request->req_status.MPI_ERROR ) {
request->req_status.MPI_ERROR = MPI_SUCCESS;
}
indices[i] = 1;
num_requests_done++;
}

Просмотреть файл

@ -402,7 +402,7 @@ static inline int ompi_request_complete(ompi_request_t* request, bool with_signa
}
ompi_request_completed++;
request->req_complete = true;
if( MPI_SUCCESS != request->req_status.MPI_ERROR ) {
if( OPAL_UNLIKELY(MPI_SUCCESS != request->req_status.MPI_ERROR) ) {
ompi_request_failed++;
}
if(with_signal && ompi_request_waiting) {