1
1

Redo of r12871, without the preconnect code change:

Move the req_mtl structure back to the end of each of the structures in 
the CM PML. The req_mtl structure is cast into a mtl_*_request_structure 
for each MTL, which is larger than the req_mtl itself. The cast will cause
the *_request to overwrite parts of the heavy requests if the req_mtl
isn't the *LAST* thing on each structure (hence the comment). This was 
moved as an optimization at some point, which caused buffer sends to fail...

Refs trac:669

This commit was SVN r12873.

The following SVN revision numbers were found above:
  r12871 --> open-mpi/ompi@597598b712

The following Trac tickets were found above:
  Ticket 669 --> https://svn.open-mpi.org/trac/ompi/ticket/669
Этот коммит содержится в:
Brian Barrett 2006-12-15 17:54:14 +00:00
родитель bdf0b231b2
Коммит 01e8fc5f91
6 изменённых файлов: 20 добавлений и 17 удалений

Просмотреть файл

@ -31,7 +31,7 @@ mca_pml_cm_cancel(struct ompi_request_t *ompi_req, int flag)
{
mca_pml_cm_hvy_send_request_t *request =
(mca_pml_cm_hvy_send_request_t*) base_request;
mtl_req = &request->req_send.req_base.req_mtl;
mtl_req = &request->req_mtl;
}
break;
@ -39,7 +39,7 @@ mca_pml_cm_cancel(struct ompi_request_t *ompi_req, int flag)
{
mca_pml_cm_thin_send_request_t *request =
(mca_pml_cm_thin_send_request_t*) base_request;
mtl_req = &request->req_send.req_base.req_mtl;
mtl_req = &request->req_mtl;
}
break;
@ -47,7 +47,7 @@ mca_pml_cm_cancel(struct ompi_request_t *ompi_req, int flag)
{
mca_pml_cm_hvy_recv_request_t *request =
(mca_pml_cm_hvy_recv_request_t*) base_request;
mtl_req = &request->req_base.req_mtl;
mtl_req = &request->req_mtl;
}
break;
@ -55,7 +55,7 @@ mca_pml_cm_cancel(struct ompi_request_t *ompi_req, int flag)
{
mca_pml_cm_thin_recv_request_t *request =
(mca_pml_cm_thin_recv_request_t*) base_request;
mtl_req = &request->req_base.req_mtl;
mtl_req = &request->req_mtl;
}
break;

Просмотреть файл

@ -64,8 +64,8 @@ mca_pml_cm_hvy_recv_request_free(struct ompi_request_t** request)
static void
mca_pml_cm_thin_recv_request_construct(mca_pml_cm_thin_recv_request_t* recvreq)
{
recvreq->req_base.req_mtl.ompi_req = (ompi_request_t*) recvreq;
recvreq->req_base.req_mtl.completion_callback = mca_pml_cm_thin_recv_request_completion;
recvreq->req_mtl.ompi_req = (ompi_request_t*) recvreq;
recvreq->req_mtl.completion_callback = mca_pml_cm_thin_recv_request_completion;
recvreq->req_base.req_ompi.req_free = mca_pml_cm_thin_recv_request_free;
recvreq->req_base.req_ompi.req_cancel = mca_pml_cm_cancel;
@ -77,8 +77,8 @@ mca_pml_cm_thin_recv_request_construct(mca_pml_cm_thin_recv_request_t* recvreq)
static void
mca_pml_cm_hvy_recv_request_construct(mca_pml_cm_hvy_recv_request_t* recvreq)
{
recvreq->req_base.req_mtl.ompi_req = (ompi_request_t*) recvreq;
recvreq->req_base.req_mtl.completion_callback = mca_pml_cm_hvy_recv_request_completion;
recvreq->req_mtl.ompi_req = (ompi_request_t*) recvreq;
recvreq->req_mtl.completion_callback = mca_pml_cm_hvy_recv_request_completion;
recvreq->req_base.req_ompi.req_free = mca_pml_cm_hvy_recv_request_free;
recvreq->req_base.req_ompi.req_cancel = mca_pml_cm_cancel;

Просмотреть файл

@ -25,6 +25,7 @@
struct mca_pml_cm_thin_recv_request_t {
mca_pml_cm_request_t req_base;
mca_mtl_request_t req_mtl; /**< the mtl specific memory. This field should be the last in the struct */
};
typedef struct mca_pml_cm_thin_recv_request_t mca_pml_cm_thin_recv_request_t;
OMPI_DECLSPEC OBJ_CLASS_DECLARATION(mca_pml_cm_thin_recv_request_t);
@ -38,6 +39,7 @@ struct mca_pml_cm_hvy_recv_request_t {
void *req_buff; /**< pointer to send buffer - may not be application buffer */
size_t req_bytes_packed; /**< packed size of a message given the datatype and count */
bool req_blocking;
mca_mtl_request_t req_mtl; /**< the mtl specific memory. This field should be the last in the struct */
};
typedef struct mca_pml_cm_hvy_recv_request_t mca_pml_cm_hvy_recv_request_t;
@ -171,7 +173,7 @@ do { \
src, \
tag, \
&recvreq->req_base.req_convertor, \
&recvreq->req_base.req_mtl)); \
&recvreq->req_mtl)); \
} while (0)
@ -195,7 +197,7 @@ do { \
request->req_peer, \
request->req_tag, \
&recvreq->req_base.req_convertor, \
&recvreq->req_base.req_mtl)); \
&recvreq->req_mtl)); \
} while (0)

Просмотреть файл

@ -46,7 +46,6 @@ struct mca_pml_cm_request_t {
struct ompi_communicator_t *req_comm; /**< communicator pointer */
struct ompi_datatype_t *req_datatype; /**< pointer to data type */
ompi_convertor_t req_convertor; /**< always need the convertor */
mca_mtl_request_t req_mtl; /**< the mtl specific memory. This field should be the last in the struct */
};
typedef struct mca_pml_cm_request_t mca_pml_cm_request_t;
OMPI_DECLSPEC OBJ_CLASS_DECLARATION(mca_pml_cm_request_t);

Просмотреть файл

@ -48,8 +48,8 @@ OBJ_CLASS_INSTANCE(mca_pml_cm_hvy_send_request_t,
static void mca_pml_cm_thin_send_request_construct(mca_pml_cm_thin_send_request_t* sendreq)
{
/* no need to reinit for every send -- never changes */
sendreq->req_send.req_base.req_mtl.ompi_req = (ompi_request_t*) sendreq;
sendreq->req_send.req_base.req_mtl.completion_callback = mca_pml_cm_thin_send_request_completion;
sendreq->req_mtl.ompi_req = (ompi_request_t*) sendreq;
sendreq->req_mtl.completion_callback = mca_pml_cm_thin_send_request_completion;
sendreq->req_send.req_base.req_ompi.req_free = mca_pml_cm_thin_send_request_free;
sendreq->req_send.req_base.req_ompi.req_cancel = mca_pml_cm_cancel;
sendreq->req_send.req_base.req_pml_type = MCA_PML_CM_REQUEST_SEND_THIN;
@ -59,8 +59,8 @@ static void mca_pml_cm_thin_send_request_construct(mca_pml_cm_thin_send_request_
static void mca_pml_cm_hvy_send_request_construct(mca_pml_cm_hvy_send_request_t* sendreq)
{
/* no need to reinit for every send -- never changes */
sendreq->req_send.req_base.req_mtl.ompi_req = (ompi_request_t*) sendreq;
sendreq->req_send.req_base.req_mtl.completion_callback = mca_pml_cm_hvy_send_request_completion;
sendreq->req_mtl.ompi_req = (ompi_request_t*) sendreq;
sendreq->req_mtl.completion_callback = mca_pml_cm_hvy_send_request_completion;
sendreq->req_send.req_base.req_ompi.req_free = mca_pml_cm_hvy_send_request_free;
sendreq->req_send.req_base.req_ompi.req_cancel = mca_pml_cm_cancel;
sendreq->req_send.req_base.req_pml_type = MCA_PML_CM_REQUEST_SEND_HEAVY;

Просмотреть файл

@ -35,6 +35,7 @@ OMPI_DECLSPEC OBJ_CLASS_DECLARATION(mca_pml_cm_send_request_t);
struct mca_pml_cm_thin_send_request_t {
mca_pml_cm_send_request_t req_send;
mca_mtl_request_t req_mtl; /**< the mtl specific memory. This field should be the last in the struct */
};
typedef struct mca_pml_cm_thin_send_request_t mca_pml_cm_thin_send_request_t;
OMPI_DECLSPEC OBJ_CLASS_DECLARATION(mca_pml_cm_thin_send_request_t);
@ -48,6 +49,7 @@ struct mca_pml_cm_hvy_send_request_t {
int32_t req_tag; /**< user defined tag */
void *req_buff; /**< pointer to send buffer - may not be application buffer */
bool req_blocking;
mca_mtl_request_t req_mtl; /**< the mtl specific memory. This field should be the last in the struct */
};
typedef struct mca_pml_cm_hvy_send_request_t mca_pml_cm_hvy_send_request_t;
OMPI_DECLSPEC OBJ_CLASS_DECLARATION(mca_pml_cm_hvy_send_request_t);
@ -203,7 +205,7 @@ do { \
&sendreq->req_send.req_base.req_convertor, \
sendmode, \
blocking, \
&sendreq->req_send.req_base.req_mtl)); \
&sendreq->req_mtl)); \
} while (0)
#define MCA_PML_CM_HVY_SEND_REQUEST_BSEND_ALLOC(sendreq, ret) \
@ -247,7 +249,7 @@ do { \
&sendreq->req_send.req_base.req_convertor, \
sendreq->req_send.req_send_mode, \
sendreq->req_blocking, \
&sendreq->req_send.req_base.req_mtl)); \
&sendreq->req_mtl)); \
if(OMPI_SUCCESS == ret && \
sendreq->req_send.req_send_mode == MCA_PML_BASE_SEND_BUFFERED) { \
MCA_PML_BASE_REQUEST_MPI_COMPLETE(&(sendreq)->req_send.req_base.req_ompi); \