1
1

Specialize the MCA_PML_OB1_FREE macro. When we call this macro we already know what kind

of request we are playing with (send or receive). Therefore, it's useless to have another
switch inside this macro and make the code bigger. Now, we have 2 versions
MCA_PML_OB1_SEND_REQUEST_FREE and MCA_PML_OB1_RECV_REQUEST_FREE.

This commit was SVN r8945.
Этот коммит содержится в:
George Bosilca 2006-02-08 22:42:00 +00:00
родитель 1c71ab73d1
Коммит 83f83e5730
6 изменённых файлов: 44 добавлений и 43 удалений

Просмотреть файл

@ -210,37 +210,6 @@ extern int mca_pml_ob1_start(
} }
#endif #endif
#define MCA_PML_OB1_FREE(request) \
{ \
mca_pml_base_request_t* pml_request = *(mca_pml_base_request_t**)(request); \
pml_request->req_free_called = true; \
if( pml_request->req_pml_complete == true) \
{ \
switch(pml_request->req_type) { \
case MCA_PML_REQUEST_SEND: \
{ \
mca_pml_ob1_send_request_t* sendreq = (mca_pml_ob1_send_request_t*)pml_request; \
if(sendreq->req_send.req_send_mode == MCA_PML_BASE_SEND_BUFFERED && \
sendreq->req_send.req_addr != sendreq->req_send.req_base.req_addr) { \
mca_pml_base_bsend_request_fini((ompi_request_t*)sendreq); \
} \
MCA_PML_OB1_SEND_REQUEST_RETURN(sendreq); \
break; \
} \
case MCA_PML_REQUEST_RECV: \
{ \
mca_pml_ob1_recv_request_t* recvreq = (mca_pml_ob1_recv_request_t*)pml_request; \
MCA_PML_OB1_RECV_REQUEST_RETURN(recvreq); \
break; \
} \
default: \
break; \
} \
} \
*(request) = MPI_REQUEST_NULL; \
}
#define MCA_PML_OB1_DES_ALLOC(bml_btl, des, size) \ #define MCA_PML_OB1_DES_ALLOC(bml_btl, des, size) \
MCA_BML_BASE_BTL_DES_ALLOC(bml_btl, des, \ MCA_BML_BASE_BTL_DES_ALLOC(bml_btl, des, \
sizeof(mca_pml_ob1_hdr_t) + (sizeof(mca_btl_base_segment_t) << 4), size) sizeof(mca_pml_ob1_hdr_t) + (sizeof(mca_btl_base_segment_t) << 4), size)

Просмотреть файл

@ -103,7 +103,7 @@ int mca_pml_ob1_send(void *buf,
MCA_PML_OB1_SEND_REQUEST_START(sendreq, rc); MCA_PML_OB1_SEND_REQUEST_START(sendreq, rc);
if (rc != OMPI_SUCCESS) { if (rc != OMPI_SUCCESS) {
MCA_PML_OB1_FREE((ompi_request_t **) & sendreq); MCA_PML_OB1_SEND_REQUEST_FREE( sendreq );
return rc; return rc;
} }
@ -125,7 +125,7 @@ int mca_pml_ob1_send(void *buf,
} }
/* return request to pool */ /* return request to pool */
MCA_PML_OB1_FREE((ompi_request_t **) & sendreq); MCA_PML_OB1_SEND_REQUEST_FREE( sendreq );
return OMPI_SUCCESS; return OMPI_SUCCESS;
} }

Просмотреть файл

@ -39,19 +39,19 @@ static int mca_pml_ob1_recv_request_fini(struct ompi_request_t** request)
mca_pml_ob1_recv_request_t* recvreq = *(mca_pml_ob1_recv_request_t**)request; mca_pml_ob1_recv_request_t* recvreq = *(mca_pml_ob1_recv_request_t**)request;
if(recvreq->req_recv.req_base.req_persistent) { if(recvreq->req_recv.req_base.req_persistent) {
if(recvreq->req_recv.req_base.req_free_called) { if(recvreq->req_recv.req_base.req_free_called) {
MCA_PML_OB1_FREE(request); MCA_PML_OB1_RECV_REQUEST_FREE(recvreq);
} else { } else {
recvreq->req_recv.req_base.req_ompi.req_state = OMPI_REQUEST_INACTIVE; recvreq->req_recv.req_base.req_ompi.req_state = OMPI_REQUEST_INACTIVE;
} }
} else { } else {
MCA_PML_OB1_FREE(request); MCA_PML_OB1_RECV_REQUEST_FREE(recvreq);
} }
return OMPI_SUCCESS; return OMPI_SUCCESS;
} }
static int mca_pml_ob1_recv_request_free(struct ompi_request_t** request) static int mca_pml_ob1_recv_request_free(struct ompi_request_t** request)
{ {
MCA_PML_OB1_FREE(request); MCA_PML_OB1_RECV_REQUEST_FREE( *(mca_pml_ob1_recv_request_t**)request );
return OMPI_SUCCESS; return OMPI_SUCCESS;
} }

Просмотреть файл

@ -121,6 +121,19 @@ do {
OMPI_FREE_LIST_RETURN(&mca_pml_ob1.recv_requests, (opal_list_item_t*)(recvreq)); \ OMPI_FREE_LIST_RETURN(&mca_pml_ob1.recv_requests, (opal_list_item_t*)(recvreq)); \
} while(0) } while(0)
/*
* Free the PML receive request
*/
#define MCA_PML_OB1_RECV_REQUEST_FREE(recvreq) \
{ \
mca_pml_base_request_t* pml_request = (mca_pml_base_request_t*)(recvreq); \
pml_request->req_free_called = true; \
if( pml_request->req_pml_complete == true) { \
MCA_PML_OB1_RECV_REQUEST_RETURN((recvreq)); \
} \
(recvreq) = (mca_pml_ob1_recv_request_t*)MPI_REQUEST_NULL; \
}
/** /**
* Attempt to match the request against the unexpected fragment list * Attempt to match the request against the unexpected fragment list
* for all source ranks w/in the communicator. * for all source ranks w/in the communicator.

Просмотреть файл

@ -40,7 +40,7 @@ static int mca_pml_ob1_send_request_fini(struct ompi_request_t** request)
mca_pml_ob1_send_request_t* sendreq = *(mca_pml_ob1_send_request_t**)(request); mca_pml_ob1_send_request_t* sendreq = *(mca_pml_ob1_send_request_t**)(request);
if(sendreq->req_send.req_base.req_persistent) { if(sendreq->req_send.req_base.req_persistent) {
if(sendreq->req_send.req_base.req_free_called) { if(sendreq->req_send.req_base.req_free_called) {
MCA_PML_OB1_FREE(request); MCA_PML_OB1_SEND_REQUEST_FREE(sendreq);
} else { } else {
sendreq->req_send.req_base.req_ompi.req_state = OMPI_REQUEST_INACTIVE; sendreq->req_send.req_base.req_ompi.req_state = OMPI_REQUEST_INACTIVE;
/* rewind convertor */ /* rewind convertor */
@ -55,14 +55,14 @@ static int mca_pml_ob1_send_request_fini(struct ompi_request_t** request)
} }
} }
} else { } else {
MCA_PML_OB1_FREE(request); MCA_PML_OB1_SEND_REQUEST_FREE(sendreq);
} }
return OMPI_SUCCESS; return OMPI_SUCCESS;
} }
static int mca_pml_ob1_send_request_free(struct ompi_request_t** request) static int mca_pml_ob1_send_request_free(struct ompi_request_t** request)
{ {
MCA_PML_OB1_FREE(request); MCA_PML_OB1_SEND_REQUEST_FREE( *(mca_pml_ob1_send_request_t**)request );
return OMPI_SUCCESS; return OMPI_SUCCESS;
} }
@ -263,6 +263,8 @@ static void mca_pml_ob1_frag_completion(
req_bytes_delivered ); req_bytes_delivered );
if (OPAL_THREAD_ADD_SIZE_T(&sendreq->req_pipeline_depth,-1) == 0 && if (OPAL_THREAD_ADD_SIZE_T(&sendreq->req_pipeline_depth,-1) == 0 &&
req_bytes_delivered == sendreq->req_send.req_bytes_packed) { req_bytes_delivered == sendreq->req_send.req_bytes_packed) {
/*if( OPAL_THREAD_ADD_SIZE_T( &sendreq->req_bytes_delivered, req_bytes_delivered )
== sendreq->req_send.req_bytes_packed) {*/
OPAL_THREAD_LOCK(&ompi_request_lock); OPAL_THREAD_LOCK(&ompi_request_lock);
MCA_PML_OB1_SEND_REQUEST_PML_COMPLETE(sendreq); MCA_PML_OB1_SEND_REQUEST_PML_COMPLETE(sendreq);
OPAL_THREAD_UNLOCK(&ompi_request_lock); OPAL_THREAD_UNLOCK(&ompi_request_lock);
@ -863,12 +865,12 @@ static void mca_pml_ob1_put_completion(
} }
/* check for request completion */ /* check for request completion */
OPAL_THREAD_LOCK(&ompi_request_lock); if( OPAL_THREAD_ADD_SIZE_T(&sendreq->req_bytes_delivered, frag->rdma_length)
sendreq->req_bytes_delivered += frag->rdma_length; >= sendreq->req_send.req_bytes_packed) {
if(sendreq->req_bytes_delivered >= sendreq->req_send.req_bytes_packed) { OPAL_THREAD_LOCK(&ompi_request_lock);
MCA_PML_OB1_SEND_REQUEST_PML_COMPLETE(sendreq); MCA_PML_OB1_SEND_REQUEST_PML_COMPLETE(sendreq);
OPAL_THREAD_UNLOCK(&ompi_request_lock);
} }
OPAL_THREAD_UNLOCK(&ompi_request_lock);
/* allocate descriptor for fin control message - note that /* allocate descriptor for fin control message - note that
* the rdma descriptor cannot be reused as it points directly * the rdma descriptor cannot be reused as it points directly

Просмотреть файл

@ -308,6 +308,23 @@ do {
&mca_pml_ob1.send_requests, (opal_list_item_t*)sendreq); \ &mca_pml_ob1.send_requests, (opal_list_item_t*)sendreq); \
} }
/*
* Free a send request
*/
#define MCA_PML_OB1_SEND_REQUEST_FREE(sendreq) \
{ \
mca_pml_base_request_t* pml_request = (mca_pml_base_request_t*)(sendreq); \
pml_request->req_free_called = true; \
if( pml_request->req_pml_complete == true) { \
if((sendreq)->req_send.req_send_mode == MCA_PML_BASE_SEND_BUFFERED && \
(sendreq)->req_send.req_addr != (sendreq)->req_send.req_base.req_addr) { \
mca_pml_base_bsend_request_fini((ompi_request_t*)(sendreq)); \
} \
MCA_PML_OB1_SEND_REQUEST_RETURN(sendreq); \
} \
(sendreq) = (mca_pml_ob1_send_request_t*)MPI_REQUEST_NULL; \
}
/* /*
* Attempt to process any pending requests * Attempt to process any pending requests
*/ */