1
1

Move the req_mtl structure back to the end of each of the structures in the

CM PML.  The req_mtl structure is cast into a mtl_*_request_structure for
each MTL, which is larger than the req_mtl itself.  The cast will cause
the *_request to overwrite parts of the heavy requests if the req_mtl
isn't the *LAST* thing on each structure (hence the comment).  This was
moved as an optimization at some point, which caused buffer sends to
fail...

Refs trac:669

This commit was SVN r12871.

The following Trac tickets were found above:
  Ticket 669 --> https://svn.open-mpi.org/trac/ompi/ticket/669
Этот коммит содержится в:
Brian Barrett 2006-12-15 17:46:53 +00:00
родитель 1e1d0e8a89
Коммит 597598b712
7 изменённых файлов: 50 добавлений и 40 удалений

Просмотреть файл

@ -31,7 +31,7 @@ mca_pml_cm_cancel(struct ompi_request_t *ompi_req, int flag)
{
mca_pml_cm_hvy_send_request_t *request =
(mca_pml_cm_hvy_send_request_t*) base_request;
mtl_req = &request->req_send.req_base.req_mtl;
mtl_req = &request->req_mtl;
}
break;
@ -39,7 +39,7 @@ mca_pml_cm_cancel(struct ompi_request_t *ompi_req, int flag)
{
mca_pml_cm_thin_send_request_t *request =
(mca_pml_cm_thin_send_request_t*) base_request;
mtl_req = &request->req_send.req_base.req_mtl;
mtl_req = &request->req_mtl;
}
break;
@ -47,7 +47,7 @@ mca_pml_cm_cancel(struct ompi_request_t *ompi_req, int flag)
{
mca_pml_cm_hvy_recv_request_t *request =
(mca_pml_cm_hvy_recv_request_t*) base_request;
mtl_req = &request->req_base.req_mtl;
mtl_req = &request->req_mtl;
}
break;
@ -55,7 +55,7 @@ mca_pml_cm_cancel(struct ompi_request_t *ompi_req, int flag)
{
mca_pml_cm_thin_recv_request_t *request =
(mca_pml_cm_thin_recv_request_t*) base_request;
mtl_req = &request->req_base.req_mtl;
mtl_req = &request->req_mtl;
}
break;

Просмотреть файл

@ -64,8 +64,8 @@ mca_pml_cm_hvy_recv_request_free(struct ompi_request_t** request)
static void
mca_pml_cm_thin_recv_request_construct(mca_pml_cm_thin_recv_request_t* recvreq)
{
recvreq->req_base.req_mtl.ompi_req = (ompi_request_t*) recvreq;
recvreq->req_base.req_mtl.completion_callback = mca_pml_cm_thin_recv_request_completion;
recvreq->req_mtl.ompi_req = (ompi_request_t*) recvreq;
recvreq->req_mtl.completion_callback = mca_pml_cm_thin_recv_request_completion;
recvreq->req_base.req_ompi.req_free = mca_pml_cm_thin_recv_request_free;
recvreq->req_base.req_ompi.req_cancel = mca_pml_cm_cancel;
@ -77,8 +77,8 @@ mca_pml_cm_thin_recv_request_construct(mca_pml_cm_thin_recv_request_t* recvreq)
static void
mca_pml_cm_hvy_recv_request_construct(mca_pml_cm_hvy_recv_request_t* recvreq)
{
recvreq->req_base.req_mtl.ompi_req = (ompi_request_t*) recvreq;
recvreq->req_base.req_mtl.completion_callback = mca_pml_cm_hvy_recv_request_completion;
recvreq->req_mtl.ompi_req = (ompi_request_t*) recvreq;
recvreq->req_mtl.completion_callback = mca_pml_cm_hvy_recv_request_completion;
recvreq->req_base.req_ompi.req_free = mca_pml_cm_hvy_recv_request_free;
recvreq->req_base.req_ompi.req_cancel = mca_pml_cm_cancel;

Просмотреть файл

@ -25,6 +25,7 @@
struct mca_pml_cm_thin_recv_request_t {
mca_pml_cm_request_t req_base;
mca_mtl_request_t req_mtl; /**< the mtl specific memory. This field should be the last in the struct */
};
typedef struct mca_pml_cm_thin_recv_request_t mca_pml_cm_thin_recv_request_t;
OMPI_DECLSPEC OBJ_CLASS_DECLARATION(mca_pml_cm_thin_recv_request_t);
@ -38,6 +39,7 @@ struct mca_pml_cm_hvy_recv_request_t {
void *req_buff; /**< pointer to send buffer - may not be application buffer */
size_t req_bytes_packed; /**< packed size of a message given the datatype and count */
bool req_blocking;
mca_mtl_request_t req_mtl; /**< the mtl specific memory. This field should be the last in the struct */
};
typedef struct mca_pml_cm_hvy_recv_request_t mca_pml_cm_hvy_recv_request_t;
@ -171,7 +173,7 @@ do { \
src, \
tag, \
&recvreq->req_base.req_convertor, \
&recvreq->req_base.req_mtl)); \
&recvreq->req_mtl)); \
} while (0)
@ -195,7 +197,7 @@ do { \
request->req_peer, \
request->req_tag, \
&recvreq->req_base.req_convertor, \
&recvreq->req_base.req_mtl)); \
&recvreq->req_mtl)); \
} while (0)

Просмотреть файл

@ -46,7 +46,6 @@ struct mca_pml_cm_request_t {
struct ompi_communicator_t *req_comm; /**< communicator pointer */
struct ompi_datatype_t *req_datatype; /**< pointer to data type */
ompi_convertor_t req_convertor; /**< always need the convertor */
mca_mtl_request_t req_mtl; /**< the mtl specific memory. This field should be the last in the struct */
};
typedef struct mca_pml_cm_request_t mca_pml_cm_request_t;
OMPI_DECLSPEC OBJ_CLASS_DECLARATION(mca_pml_cm_request_t);

Просмотреть файл

@ -48,8 +48,8 @@ OBJ_CLASS_INSTANCE(mca_pml_cm_hvy_send_request_t,
static void mca_pml_cm_thin_send_request_construct(mca_pml_cm_thin_send_request_t* sendreq)
{
/* no need to reinit for every send -- never changes */
sendreq->req_send.req_base.req_mtl.ompi_req = (ompi_request_t*) sendreq;
sendreq->req_send.req_base.req_mtl.completion_callback = mca_pml_cm_thin_send_request_completion;
sendreq->req_mtl.ompi_req = (ompi_request_t*) sendreq;
sendreq->req_mtl.completion_callback = mca_pml_cm_thin_send_request_completion;
sendreq->req_send.req_base.req_ompi.req_free = mca_pml_cm_thin_send_request_free;
sendreq->req_send.req_base.req_ompi.req_cancel = mca_pml_cm_cancel;
sendreq->req_send.req_base.req_pml_type = MCA_PML_CM_REQUEST_SEND_THIN;
@ -59,8 +59,8 @@ static void mca_pml_cm_thin_send_request_construct(mca_pml_cm_thin_send_request_
static void mca_pml_cm_hvy_send_request_construct(mca_pml_cm_hvy_send_request_t* sendreq)
{
/* no need to reinit for every send -- never changes */
sendreq->req_send.req_base.req_mtl.ompi_req = (ompi_request_t*) sendreq;
sendreq->req_send.req_base.req_mtl.completion_callback = mca_pml_cm_hvy_send_request_completion;
sendreq->req_mtl.ompi_req = (ompi_request_t*) sendreq;
sendreq->req_mtl.completion_callback = mca_pml_cm_hvy_send_request_completion;
sendreq->req_send.req_base.req_ompi.req_free = mca_pml_cm_hvy_send_request_free;
sendreq->req_send.req_base.req_ompi.req_cancel = mca_pml_cm_cancel;
sendreq->req_send.req_base.req_pml_type = MCA_PML_CM_REQUEST_SEND_HEAVY;

Просмотреть файл

@ -35,6 +35,7 @@ OMPI_DECLSPEC OBJ_CLASS_DECLARATION(mca_pml_cm_send_request_t);
struct mca_pml_cm_thin_send_request_t {
mca_pml_cm_send_request_t req_send;
mca_mtl_request_t req_mtl; /**< the mtl specific memory. This field should be the last in the struct */
};
typedef struct mca_pml_cm_thin_send_request_t mca_pml_cm_thin_send_request_t;
OMPI_DECLSPEC OBJ_CLASS_DECLARATION(mca_pml_cm_thin_send_request_t);
@ -48,6 +49,7 @@ struct mca_pml_cm_hvy_send_request_t {
int32_t req_tag; /**< user defined tag */
void *req_buff; /**< pointer to send buffer - may not be application buffer */
bool req_blocking;
mca_mtl_request_t req_mtl; /**< the mtl specific memory. This field should be the last in the struct */
};
typedef struct mca_pml_cm_hvy_send_request_t mca_pml_cm_hvy_send_request_t;
OMPI_DECLSPEC OBJ_CLASS_DECLARATION(mca_pml_cm_hvy_send_request_t);
@ -203,7 +205,7 @@ do { \
&sendreq->req_send.req_base.req_convertor, \
sendmode, \
blocking, \
&sendreq->req_send.req_base.req_mtl)); \
&sendreq->req_mtl)); \
} while (0)
#define MCA_PML_CM_HVY_SEND_REQUEST_BSEND_ALLOC(sendreq, ret) \
@ -247,7 +249,7 @@ do { \
&sendreq->req_send.req_base.req_convertor, \
sendreq->req_send.req_send_mode, \
sendreq->req_blocking, \
&sendreq->req_send.req_base.req_mtl)); \
&sendreq->req_mtl)); \
if(OMPI_SUCCESS == ret && \
sendreq->req_send.req_send_mode == MCA_PML_BASE_SEND_BUFFERED) { \
MCA_PML_BASE_REQUEST_MPI_COMPLETE(&(sendreq)->req_send.req_base.req_ompi); \

Просмотреть файл

@ -29,35 +29,42 @@ int ompi_init_do_preconnect(void)
{
int comm_size = ompi_comm_size(MPI_COMM_WORLD);
int my_rank = ompi_comm_rank(MPI_COMM_WORLD);
int i, j, ret;
struct ompi_request_t **requests;
requests = (ompi_request_t**)malloc(comm_size * sizeof(struct ompi_request_t *));
if (NULL == requests) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
for (i = j = 0; i < comm_size; ++i) {
if (i == my_rank) {
continue;
} else if (my_rank < i) {
ret = MCA_PML_CALL(isend(MPI_BOTTOM, 0, MPI_BYTE,
i, 1,
MCA_PML_BASE_SEND_STANDARD,
MPI_COMM_WORLD,
&requests[j++]));
int i, ret;
int next,prev;
struct ompi_request_t * requests[2];
if(comm_size == 2) {
if(my_rank){
ret = MCA_PML_CALL(send(MPI_BOTTOM, 0, MPI_BYTE,
0, 1,
MCA_PML_BASE_SEND_SYNCHRONOUS,
MPI_COMM_WORLD));
} else {
ret = MCA_PML_CALL(irecv(MPI_BOTTOM,0, MPI_BYTE, i,
1, MPI_COMM_WORLD,
&requests[j++]));
ret = MCA_PML_CALL(recv(MPI_BOTTOM,0, MPI_BYTE, 1,
1, MPI_COMM_WORLD,
MPI_STATUS_IGNORE));
}
} else {
for (i = 1; i < comm_size/2; ++i) {
next = (my_rank + i) % comm_size;
prev = (my_rank - i + comm_size) % comm_size;
ret = MCA_PML_CALL(irecv(MPI_BOTTOM,0, MPI_BYTE,
prev, 1,
MPI_COMM_WORLD,
&requests[0]));
ret = MCA_PML_CALL(isend(MPI_BOTTOM, 0, MPI_BYTE,
next, 1,
MCA_PML_BASE_SEND_STANDARD,
MPI_COMM_WORLD,
&requests[1]));
ret = ompi_request_wait_all(2, requests, MPI_STATUSES_IGNORE);
}
if (OMPI_SUCCESS != ret) {
return ret;
}
}
ret = ompi_request_wait_all(j, requests, MPI_STATUSES_IGNORE);
free(requests);
return ret;
}