1
1

attach buffer in start and detach when the request completes - this adds some

overhead to the critical path - but ensures buffer reference counts are released
when requests are not released by the application

This commit was SVN r5311.
Этот коммит содержится в:
Tim Woodall 2005-04-13 17:28:06 +00:00
родитель 682af1ee0f
Коммит 55f9f800b1
7 изменённых файлов: 16 добавлений и 76 удалений

Просмотреть файл

@ -174,16 +174,20 @@ int mca_pml_base_bsend_detach(void* addr, int* size)
}
/*
* Initialize a request for use w/ buffered send
/*
* pack send buffer into buffer
*/
int mca_pml_base_bsend_request_init(ompi_request_t* request, bool persistent)
int mca_pml_base_bsend_request_start(ompi_request_t* request)
{
mca_pml_base_send_request_t* sendreq = (mca_pml_base_send_request_t*)request;
/* alloc buffer and pack user data */
struct iovec iov;
unsigned int max_data, iov_count;
int rc, freeAfter;
if(sendreq->req_count > 0) {
/* has a buffer been provided */
OMPI_THREAD_LOCK(&mca_pml_bsend_mutex);
if(NULL == mca_pml_bsend_addr) {
sendreq->req_addr = NULL;
@ -208,25 +212,7 @@ int mca_pml_base_bsend_request_init(ompi_request_t* request, bool persistent)
/* increment count of pending requests */
mca_pml_bsend_count++;
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
}
/* set flag indicating mpi layer is done */
sendreq->req_base.req_persistent = persistent;
return OMPI_SUCCESS;
}
/*
* pack send buffer into buffer
*/
int mca_pml_base_bsend_request_start(ompi_request_t* request)
{
mca_pml_base_send_request_t* sendreq = (mca_pml_base_send_request_t*)request;
struct iovec iov;
unsigned int max_data, iov_count;
int rc, freeAfter;
if(sendreq->req_count > 0) {
/* setup convertor to point to app buffer */
ompi_convertor_init_for_send( &sendreq->req_convertor,
0,

Просмотреть файл

@ -30,7 +30,6 @@ OMPI_DECLSPEC int mca_pml_base_bsend_fini(void);
OMPI_DECLSPEC int mca_pml_base_bsend_attach(void* addr, int size);
OMPI_DECLSPEC int mca_pml_base_bsend_detach(void* addr, int* size);
OMPI_DECLSPEC int mca_pml_base_bsend_request_init(ompi_request_t*, bool persistent);
OMPI_DECLSPEC int mca_pml_base_bsend_request_start(ompi_request_t*);
OMPI_DECLSPEC int mca_pml_base_bsend_request_fini(ompi_request_t*);
#if defined(c_plusplus) || defined(__cplusplus)

Просмотреть файл

@ -180,7 +180,11 @@ void mca_pml_teg_send_request_progress(
}
} else if (req->req_base.req_free_called) {
MCA_PML_TEG_FREE((ompi_request_t**)&req);
}
} else if (req->req_base.req_persistent) {
if(req->req_base.req_type == MCA_PML_BASE_SEND_BUFFERED) {
mca_pml_base_bsend_request_fini((ompi_request_t*)req);
}
}
/* test to see if we have scheduled the entire request */
} else if (req->req_offset < req->req_bytes_packed)
schedule = true;

Просмотреть файл

@ -69,9 +69,6 @@ int mca_pml_teg_start(size_t count, ompi_request_t** requests)
sendmode,
pml_request->req_comm,
&request);
if (sendmode == MCA_PML_BASE_SEND_BUFFERED) {
mca_pml_base_bsend_request_init(request, true);
}
break;
}
case MCA_PML_REQUEST_RECV:

Просмотреть файл

@ -36,8 +36,6 @@ static const char FUNC_NAME[] = "MPI_Bsend";
int MPI_Bsend(void *buf, int count, MPI_Datatype type, int dest, int tag, MPI_Comm comm)
{
int rc;
ompi_request_t* request;
if (dest == MPI_PROC_NULL) {
return MPI_SUCCESS;
}
@ -59,29 +57,7 @@ int MPI_Bsend(void *buf, int count, MPI_Datatype type, int dest, int tag, MPI_Co
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
rc = MCA_PML_CALL(isend_init(buf, count, type, dest, tag, MCA_PML_BASE_SEND_BUFFERED, comm, &request));
if(OMPI_SUCCESS != rc)
goto error_return;
rc = mca_pml_base_bsend_request_init(request, false);
if(OMPI_SUCCESS != rc) {
ompi_request_free(&request);
goto error_return;
}
rc = MCA_PML_CALL(start(1, &request));
if(OMPI_SUCCESS != rc) {
ompi_request_free(&request);
goto error_return;
}
rc = ompi_request_wait(&request, MPI_STATUS_IGNORE);
if(OMPI_SUCCESS != rc) {
ompi_request_free(&request);
return rc;
}
error_return:
rc = MCA_PML_CALL(send(buf, count, type, dest, tag, MCA_PML_BASE_SEND_BUFFERED, comm));
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -61,16 +61,6 @@ int MPI_Bsend_init(void *buf, int count, MPI_Datatype type,
}
rc = MCA_PML_CALL(isend_init(buf,count,type,dest,tag,MCA_PML_BASE_SEND_BUFFERED,comm,request));
if(OMPI_SUCCESS != rc)
goto error_return;
rc = mca_pml_base_bsend_request_init(*request, true);
if(OMPI_SUCCESS != rc) {
ompi_request_free(request);
goto error_return;
}
error_return:
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -61,19 +61,7 @@ int MPI_Ibsend(void *buf, int count, MPI_Datatype type, int dest,
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
rc = MCA_PML_CALL(isend_init(buf, count, type, dest, tag, MCA_PML_BASE_SEND_BUFFERED, comm, request));
if(OMPI_SUCCESS != rc)
goto error_return;
rc = mca_pml_base_bsend_request_init(*request, false);
if(OMPI_SUCCESS != rc)
goto error_return;
rc = MCA_PML_CALL(start(1, request));
if(OMPI_SUCCESS != rc)
goto error_return;
error_return:
rc = MCA_PML_CALL(isend(buf, count, type, dest, tag, MCA_PML_BASE_SEND_BUFFERED, comm, request));
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}