- As according to MPI-1.2, sec 3.2.5, p22, single request
functions MPI_Test, MPI_Testany, MPI_Wait, MPI_Waitany should not reset the status.MPI_ERROR as passed by user. - This needed implementing the MPI_Waitsome and MPI_Testsome. This commit was SVN r10980.
Этот коммит содержится в:
родитель
31c66d92aa
Коммит
ee27f7e2c7
@ -5,7 +5,7 @@
|
||||
* Copyright (c) 2004-2006 The University of Tennessee and The University
|
||||
* of Tennessee Research Foundation. All rights
|
||||
* reserved.
|
||||
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
||||
* Copyright (c) 2004-2006 High Performance Computing Center Stuttgart,
|
||||
* University of Stuttgart. All rights reserved.
|
||||
* Copyright (c) 2004-2005 The Regents of the University of California.
|
||||
* All rights reserved.
|
||||
@ -33,12 +33,11 @@
|
||||
static const char FUNC_NAME[] = "MPI_Testsome";
|
||||
|
||||
|
||||
int MPI_Testsome(int incount, MPI_Request requests[],
|
||||
int *outcount, int indices[],
|
||||
MPI_Status statuses[])
|
||||
int MPI_Testsome(int incount, MPI_Request *requests,
|
||||
int *outcount, int *indices,
|
||||
MPI_Status *statuses)
|
||||
{
|
||||
int rc, index, completed;
|
||||
ompi_status_public_t *pstatus;
|
||||
int rc;
|
||||
|
||||
if ( MPI_PARAM_CHECK ) {
|
||||
int rc = MPI_SUCCESS;
|
||||
@ -46,30 +45,13 @@ int MPI_Testsome(int incount, MPI_Request requests[],
|
||||
if( 0 != incount ) {
|
||||
if( NULL == requests) {
|
||||
rc = MPI_ERR_REQUEST;
|
||||
} else if (NULL == indices) {
|
||||
} else if ((NULL == outcount) || (NULL == indices)) {
|
||||
rc = MPI_ERR_ARG;
|
||||
}
|
||||
}
|
||||
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
|
||||
}
|
||||
|
||||
if( MPI_STATUSES_IGNORE != statuses ) {
|
||||
pstatus = statuses;
|
||||
} else {
|
||||
pstatus = MPI_STATUS_IGNORE;
|
||||
}
|
||||
/* optimize this in the future */
|
||||
rc = ompi_request_test_any(incount, requests, &index, &completed, pstatus);
|
||||
if(completed) {
|
||||
if( MPI_UNDEFINED == index ) {
|
||||
*outcount = MPI_UNDEFINED;
|
||||
} else {
|
||||
*outcount = 1;
|
||||
indices[0] = index;
|
||||
}
|
||||
} else {
|
||||
*outcount = 0;
|
||||
}
|
||||
rc = ompi_request_test_some(incount, requests, outcount, indices, statuses);
|
||||
OMPI_ERRHANDLER_RETURN(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Copyright (c) 2004-2006 The University of Tennessee and The University
|
||||
* of Tennessee Research Foundation. All rights
|
||||
* reserved.
|
||||
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
||||
* Copyright (c) 2004-2006 High Performance Computing Center Stuttgart,
|
||||
* University of Stuttgart. All rights reserved.
|
||||
* Copyright (c) 2004-2005 The Regents of the University of California.
|
||||
* All rights reserved.
|
||||
@ -39,31 +39,21 @@ int MPI_Waitsome(int incount, MPI_Request *requests,
|
||||
int *outcount, int *indices,
|
||||
MPI_Status *statuses)
|
||||
{
|
||||
int index, rc;
|
||||
MPI_Status *pstatus;
|
||||
int rc;
|
||||
|
||||
if ( MPI_PARAM_CHECK ) {
|
||||
int rc = MPI_SUCCESS;
|
||||
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
||||
if( (requests == NULL) && (0 != incount) ) {
|
||||
if ((0 != incount) && (NULL == requests)) {
|
||||
rc = MPI_ERR_REQUEST;
|
||||
}
|
||||
if ((NULL == outcount) || (NULL == indices)) {
|
||||
rc = MPI_ERR_ARG;
|
||||
}
|
||||
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
|
||||
}
|
||||
|
||||
if( MPI_STATUSES_IGNORE != statuses ) {
|
||||
pstatus = statuses;
|
||||
} else {
|
||||
pstatus = MPI_STATUS_IGNORE;
|
||||
}
|
||||
/* optimize this in the future */
|
||||
rc = ompi_request_wait_any( incount, requests, &index, pstatus );
|
||||
if( MPI_UNDEFINED == index ) {
|
||||
*outcount = MPI_UNDEFINED;
|
||||
} else {
|
||||
*outcount = 1;
|
||||
indices[0] = index;
|
||||
}
|
||||
rc = ompi_request_wait_some( incount, requests, outcount, indices, statuses );
|
||||
OMPI_ERRHANDLER_RETURN(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,10 @@ int ompi_request_test_any(
|
||||
*index = i;
|
||||
*completed = true;
|
||||
if (MPI_STATUS_IGNORE != status) {
|
||||
/* See MPI-1.2, sec 3.2.5, p.22 */
|
||||
int old_error = status->MPI_ERROR;
|
||||
*status = request->req_status;
|
||||
status->MPI_ERROR = old_error;
|
||||
}
|
||||
if( request->req_persistent ) {
|
||||
request->req_state = OMPI_REQUEST_INACTIVE;
|
||||
@ -146,3 +149,79 @@ int ompi_request_test_all(
|
||||
return OMPI_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
int ompi_request_test_some(
|
||||
size_t count,
|
||||
ompi_request_t ** requests,
|
||||
int * outcount,
|
||||
int * indices,
|
||||
ompi_status_public_t * statuses)
|
||||
{
|
||||
size_t i, num_requests_null_inactive=0, num_requests_done = 0;
|
||||
int rc = OMPI_SUCCESS;
|
||||
ompi_request_t **rptr;
|
||||
ompi_request_t *request;
|
||||
|
||||
opal_atomic_mb();
|
||||
rptr = requests;
|
||||
for (i = 0; i < count; i++, rptr++) {
|
||||
request = *rptr;
|
||||
if (request->req_state == OMPI_REQUEST_INACTIVE) {
|
||||
num_requests_null_inactive++;
|
||||
continue;
|
||||
}
|
||||
if (true == request->req_complete) {
|
||||
indices[num_requests_done++] = i;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If there are no active requests, no need to progress
|
||||
*/
|
||||
if (num_requests_null_inactive == count) {
|
||||
*outcount = MPI_UNDEFINED;
|
||||
return OMPI_SUCCESS;
|
||||
}
|
||||
|
||||
*outcount = num_requests_done;
|
||||
|
||||
if (num_requests_done == 0) {
|
||||
#if OMPI_ENABLE_PROGRESS_THREADS == 0
|
||||
opal_progress();
|
||||
#endif
|
||||
return OMPI_SUCCESS;
|
||||
}
|
||||
|
||||
/* fill out completion status and free request if required */
|
||||
for( i = 0; i < num_requests_done; i++) {
|
||||
request = requests[indices[i]];
|
||||
|
||||
if (MPI_STATUSES_IGNORE != statuses) {
|
||||
statuses[i] = request->req_status;
|
||||
}
|
||||
|
||||
rc += request->req_status.MPI_ERROR;
|
||||
|
||||
if( request->req_persistent ) {
|
||||
request->req_state = OMPI_REQUEST_INACTIVE;
|
||||
} else {
|
||||
int tmp;
|
||||
/* return request to pool */
|
||||
tmp = ompi_request_free(&(requests[indices[i]]));
|
||||
/*
|
||||
* If it fails, we are screwed. We cannot put the
|
||||
* request_free return code into the status, possibly
|
||||
* overwriting some other important error; therefore just quit.
|
||||
*/
|
||||
if (OMPI_SUCCESS != tmp) {
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (OMPI_SUCCESS != rc) {
|
||||
rc = MPI_ERR_IN_STATUS;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -52,7 +52,10 @@ finished:
|
||||
|
||||
/* return status */
|
||||
if( MPI_STATUS_IGNORE != status ) {
|
||||
/* See MPI-1.2, sec 3.2.5, p.22 */
|
||||
int old_error = status->MPI_ERROR;
|
||||
*status = req->req_status;
|
||||
status->MPI_ERROR = old_error;
|
||||
}
|
||||
if( req->req_state == OMPI_REQUEST_INACTIVE ) {
|
||||
return OMPI_SUCCESS;
|
||||
@ -160,7 +163,10 @@ finished:
|
||||
assert( true == request->req_complete );
|
||||
/* return status */
|
||||
if (MPI_STATUS_IGNORE != status) {
|
||||
/* See MPI-1.2, sec 3.2.5, p.22 */
|
||||
int old_error = status->MPI_ERROR;
|
||||
*status = request->req_status;
|
||||
status->MPI_ERROR = old_error;
|
||||
}
|
||||
if( request->req_persistent ) {
|
||||
request->req_state = OMPI_REQUEST_INACTIVE;
|
||||
@ -265,7 +271,7 @@ int ompi_request_wait_all(
|
||||
(void)ompi_request_free(rptr);
|
||||
}
|
||||
if( statuses[i].MPI_ERROR != OMPI_SUCCESS) {
|
||||
mpi_error = OMPI_ERR_IN_ERRNO;
|
||||
mpi_error = MPI_ERR_IN_STATUS;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -293,3 +299,139 @@ int ompi_request_wait_all(
|
||||
return mpi_error;
|
||||
}
|
||||
|
||||
|
||||
int ompi_request_wait_some(
|
||||
size_t count,
|
||||
ompi_request_t ** requests,
|
||||
int * outcount,
|
||||
int * indices,
|
||||
ompi_status_public_t * statuses)
|
||||
{
|
||||
#if OMPI_ENABLE_PROGRESS_THREADS
|
||||
int c;
|
||||
#endif
|
||||
size_t i, num_requests_null_inactive=0, num_requests_done=0;
|
||||
int rc = OMPI_SUCCESS;
|
||||
ompi_request_t **rptr=NULL;
|
||||
ompi_request_t *request=NULL;
|
||||
|
||||
*outcount = 0;
|
||||
for (i = 0; i < count; i++){
|
||||
indices[i] = 0;
|
||||
}
|
||||
|
||||
#if OMPI_ENABLE_PROGRESS_THREADS
|
||||
/* poll for completion */
|
||||
OPAL_THREAD_ADD32(&opal_progress_thread_count,1);
|
||||
for (c = 0; c < opal_progress_spin_count; c++) {
|
||||
rptr = requests;
|
||||
num_requests_null_inactive = 0;
|
||||
num_requests_done = 0;
|
||||
for (i = 0; i < count; i++, rptr++) {
|
||||
request = *rptr;
|
||||
/*
|
||||
* Check for null or completed persistent request.
|
||||
* For MPI_REQUEST_NULL, the req_state is always OMPI_REQUEST_INACTIVE
|
||||
*/
|
||||
if (request->req_state == OMPI_REQUEST_INACTIVE ) {
|
||||
num_requests_null_inactive++;
|
||||
continue;
|
||||
}
|
||||
if (true == request->req_complete) {
|
||||
indices[i] = 1;
|
||||
num_requests_done++;
|
||||
}
|
||||
}
|
||||
if (num_requests_null_inactive == count ||
|
||||
num_requests_done > 0) {
|
||||
OPAL_THREAD_ADD32(&opal_progress_thread_count,-1);
|
||||
goto finished;
|
||||
}
|
||||
opal_progress();
|
||||
}
|
||||
OPAL_THREAD_ADD32(&opal_progress_thread_count,-1);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We only get here when outcount still is 0.
|
||||
* give up and sleep until completion
|
||||
*/
|
||||
OPAL_THREAD_LOCK(&ompi_request_lock);
|
||||
ompi_request_waiting++;
|
||||
do {
|
||||
rptr = requests;
|
||||
num_requests_null_inactive = 0;
|
||||
num_requests_done = 0;
|
||||
for (i = 0; i < count; i++, rptr++) {
|
||||
request = *rptr;
|
||||
/*
|
||||
* Check for null or completed persistent request.
|
||||
* For MPI_REQUEST_NULL, the req_state is always OMPI_REQUEST_INACTIVE.
|
||||
*/
|
||||
if( request->req_state == OMPI_REQUEST_INACTIVE ) {
|
||||
num_requests_null_inactive++;
|
||||
continue;
|
||||
}
|
||||
if (request->req_complete == true) {
|
||||
indices[i] = 1;
|
||||
num_requests_done++;
|
||||
}
|
||||
}
|
||||
if (num_requests_null_inactive == count ||
|
||||
num_requests_done > 0)
|
||||
break;
|
||||
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
|
||||
} while (1);
|
||||
ompi_request_waiting--;
|
||||
OPAL_THREAD_UNLOCK(&ompi_request_lock);
|
||||
|
||||
#if OMPI_ENABLE_PROGRESS_THREADS
|
||||
finished:
|
||||
#endif /* OMPI_ENABLE_PROGRESS_THREADS */
|
||||
|
||||
if(num_requests_null_inactive == count) {
|
||||
*outcount = MPI_UNDEFINED;
|
||||
} else {
|
||||
/*
|
||||
* Compress the index array.
|
||||
*/
|
||||
for (i = 0, num_requests_done = 0; i < count; i++) {
|
||||
if (0 != indices[i]) {
|
||||
indices[num_requests_done++] = i;
|
||||
}
|
||||
}
|
||||
|
||||
*outcount = num_requests_done;
|
||||
|
||||
for (i = 0; i < num_requests_done; i++) {
|
||||
request = requests[indices[i]];
|
||||
assert( true == request->req_complete );
|
||||
/* return status */
|
||||
if (MPI_STATUSES_IGNORE != statuses) {
|
||||
statuses[i] = request->req_status;
|
||||
}
|
||||
|
||||
rc += request->req_status.MPI_ERROR;
|
||||
|
||||
if( request->req_persistent ) {
|
||||
request->req_state = OMPI_REQUEST_INACTIVE;
|
||||
} else {
|
||||
int tmp;
|
||||
/* return request to pool */
|
||||
tmp = ompi_request_free(&(requests[indices[i]]));
|
||||
/*
|
||||
* If it fails, we are screwed. We cannot put the
|
||||
* request_free return code into the status, possibly
|
||||
* overwriting some other important error; therefore just quit.
|
||||
*/
|
||||
if (OMPI_SUCCESS != tmp) {
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (OMPI_SUCCESS != rc) {
|
||||
rc = MPI_ERR_IN_STATUS;
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
@ -231,7 +231,10 @@ static inline int ompi_request_test( ompi_request_t ** rptr,
|
||||
if (request->req_complete) {
|
||||
*completed = true;
|
||||
if (MPI_STATUS_IGNORE != status) {
|
||||
/* See MPI-1.2, sec 3.2.5, p.22 */
|
||||
int old_error = status->MPI_ERROR;
|
||||
*status = request->req_status;
|
||||
status->MPI_ERROR = old_error;
|
||||
}
|
||||
if( request->req_persistent ) {
|
||||
request->req_state = OMPI_REQUEST_INACTIVE;
|
||||
@ -302,6 +305,27 @@ OMPI_DECLSPEC int ompi_request_test_all(
|
||||
ompi_status_public_t * statuses);
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Non-blocking test for some of N requests to complete.
|
||||
*
|
||||
* @param count (IN) Number of requests
|
||||
* @param requests (INOUT) Array of requests
|
||||
* @param outcount (OUT) Number of finished requests
|
||||
* @param indices (OUT) Indices of the finished requests
|
||||
* @param statuses (OUT) Array of completion statuses.
|
||||
* @return OMPI_SUCCESS, OMPI_ERR_IN_STATUS or failure status.
|
||||
*
|
||||
*/
|
||||
|
||||
OMPI_DECLSPEC int ompi_request_test_some(
|
||||
size_t count,
|
||||
ompi_request_t ** requests,
|
||||
int * outcount,
|
||||
int * indices,
|
||||
ompi_status_public_t * statuses);
|
||||
|
||||
|
||||
/**
|
||||
* Wait (blocking-mode) for one requests to complete.
|
||||
*
|
||||
@ -348,6 +372,26 @@ OMPI_DECLSPEC int ompi_request_wait_all(
|
||||
ompi_status_public_t * statuses);
|
||||
|
||||
|
||||
/**
|
||||
* Wait (blocking-mode) for some of N requests to complete.
|
||||
*
|
||||
* @param count (IN) Number of requests
|
||||
* @param requests (INOUT) Array of requests
|
||||
* @param outcount (OUT) Number of finished requests
|
||||
* @param indices (OUT) Indices of the finished requests
|
||||
* @param statuses (OUT) Array of completion statuses.
|
||||
* @return OMPI_SUCCESS, OMPI_ERR_IN_STATUS or failure status.
|
||||
*
|
||||
*/
|
||||
|
||||
OMPI_DECLSPEC int ompi_request_wait_some(
|
||||
size_t count,
|
||||
ompi_request_t ** requests,
|
||||
int * outcount,
|
||||
int * indices,
|
||||
ompi_status_public_t * statuses);
|
||||
|
||||
|
||||
#if defined(c_plusplus) || defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
Загрузка…
Ссылка в новой задаче
Block a user