1
1

Collective MPI function's back end implementation

This commit was SVN r1120.
Этот коммит содержится в:
Vishal Sahay 2004-05-07 23:09:55 +00:00
родитель 13b2738768
Коммит c01488ab28
4 изменённых файлов: 264 добавлений и 306 удалений

Просмотреть файл

@ -25,15 +25,12 @@ int mca_coll_basic_allgather(void *sbuf, int scount,
int rcount, MPI_Datatype rdtype, int rcount, MPI_Datatype rdtype,
MPI_Comm comm) MPI_Comm comm)
{ {
#if 1
return LAM_ERR_NOT_IMPLEMENTED;
#else
int size; int size;
int err; int err;
/* Gather and broadcast. */ /* Gather and broadcast. */
MPI_Comm_size(comm, &size); size = lam_comm_size(comm);
err = comm->c_coll.coll_gather_intra(sbuf, scount, sdtype, rbuf, rcount, err = comm->c_coll.coll_gather_intra(sbuf, scount, sdtype, rbuf, rcount,
rdtype, 0, comm); rdtype, 0, comm);
@ -43,5 +40,4 @@ int mca_coll_basic_allgather(void *sbuf, int scount,
err = comm->c_coll.coll_bcast_intra(rbuf, rcount * size, rdtype, err = comm->c_coll.coll_bcast_intra(rbuf, rcount * size, rdtype,
0, comm); 0, comm);
return err; return err;
#endif
} }

Просмотреть файл

@ -14,12 +14,13 @@
#include "mca/coll/coll.h" #include "mca/coll/coll.h"
#include "mca/coll/base/coll_tags.h" #include "mca/coll/base/coll_tags.h"
#include "coll_basic.h" #include "coll_basic.h"
#include "mca/pml/pml.h"
/* /*
* alltoall * alltoall
* *
* Function: - MPI_Alltoall for non-lamd RPI's * Function: - MPI_Alltoall
* Accepts: - same as MPI_Alltoall() * Accepts: - same as MPI_Alltoall()
* Returns: - MPI_SUCCESS or an MPI error code * Returns: - MPI_SUCCESS or an MPI error code
*/ */
@ -28,9 +29,6 @@ int mca_coll_basic_alltoall(void *sbuf, int scount,
int rcount, MPI_Datatype rdtype, int rcount, MPI_Datatype rdtype,
MPI_Comm comm) MPI_Comm comm)
{ {
#if 1
return LAM_ERR_NOT_IMPLEMENTED;
#else
int i; int i;
int rank; int rank;
int size; int size;
@ -38,18 +36,30 @@ int mca_coll_basic_alltoall(void *sbuf, int scount,
int err; int err;
char *psnd; char *psnd;
char *prcv; char *prcv;
MPI_Aint lb;
MPI_Aint sndinc; MPI_Aint sndinc;
MPI_Aint rcvinc; MPI_Aint rcvinc;
MPI_Request *req;
MPI_Request *preq; lam_request_t **req;
MPI_Request *qreq; lam_request_t **sreq;
lam_request_t **rreq;
/* Initialize. */ /* Initialize. */
MPI_Comm_size(comm, &size); size = lam_comm_size(comm);
MPI_Comm_rank(comm, &rank); rank = lam_comm_rank(comm);
MPI_Type_extent(sdtype, &sndinc);
MPI_Type_extent(rdtype, &rcvinc);
err = lam_ddt_get_extent(sdtype, &lb, &sndinc);
if (0 != err) {
return LAM_ERROR;
}
err = lam_ddt_get_extent(rdtype, &lb, &rcvinc);
if (0 != err) {
return LAM_ERROR;
}
sndinc *= scount; sndinc *= scount;
rcvinc *= rcount; rcvinc *= rcount;
@ -57,9 +67,8 @@ int mca_coll_basic_alltoall(void *sbuf, int scount,
nreqs = 2 * (size - 1); nreqs = 2 * (size - 1);
if (nreqs > 0) { if (nreqs > 0) {
req = malloc(nreqs * sizeof(MPI_Request)); req = malloc(nreqs * sizeof(lam_request_t *));
if (NULL == req) { if (NULL == req) {
free(req);
return ENOMEM; return ENOMEM;
} }
} else { } else {
@ -70,18 +79,16 @@ int mca_coll_basic_alltoall(void *sbuf, int scount,
psnd = ((char *) sbuf) + (rank * sndinc); psnd = ((char *) sbuf) + (rank * sndinc);
prcv = ((char *) rbuf) + (rank * rcvinc); prcv = ((char *) rbuf) + (rank * rcvinc);
#if 0
/* JMS: Need a lam_datatype_something() here that allows two err = lam_ddt_sndrcv(psnd, scount, sdtype,
different datatypes */ prcv, rcount, rdtype,
err = lam_dtsndrcv(psnd, scount, sdtype, MCA_COLL_BASE_TAG_ALLTOALL, comm);
prcv, rcount, rdtype, BLKMPIALLTOALL, comm);
if (MPI_SUCCESS != err) { if (MPI_SUCCESS != err) {
if (NULL != req) if (NULL != req)
LAM_FREE(req); free(req);
lam_mkpt(comm);
return err; return err;
} }
#endif
/* If only one process, we're done. */ /* If only one process, we're done. */
@ -91,38 +98,31 @@ int mca_coll_basic_alltoall(void *sbuf, int scount,
/* Initiate all send/recv to/from others. */ /* Initiate all send/recv to/from others. */
preq = req; rreq = req;
qreq = req + size - 1; sreq = req + size - 1;
prcv = (char*) rbuf; prcv = (char*) rbuf;
psnd = (char*) sbuf; psnd = (char*) sbuf;
for (i = (rank + 1) % size; i != rank; for (i = (rank + 1) % size; i != rank;
i = (i + 1) % size, ++preq, ++qreq) { i = (i + 1) % size, ++rreq, ++sreq) {
#if 0
/* JMS: Need to replace this with negative tags and and direct PML err = mca_pml.pml_irecv_init(prcv + (i * rcvinc), rcount, rdtype, i,
calls */ MCA_COLL_BASE_TAG_ALLTOALL, comm, rreq);
err = MPI_Recv_init(prcv + (i * rcvinc), rcount, rdtype, i,
BLKMPIALLTOALL, comm, preq);
if (MPI_SUCCESS != err) { if (MPI_SUCCESS != err) {
LAM_FREE(req); free(req);
return err; return err;
} }
#endif
#if 0 err = mca_pml.pml_isend(psnd + (i * sndinc), scount, sdtype, i,
/* JMS: Need to replace this with negative tags and and direct PML MCA_COLL_BASE_TAG_ALLTOALL,
calls */ MCA_PML_BASE_SEND_STANDARD, comm, sreq);
err = MPI_Send_init(psnd + (i * sndinc), scount, sdtype, i,
BLKMPIALLTOALL, comm, qreq);
if (MPI_SUCCESS != err) { if (MPI_SUCCESS != err) {
LAM_FREE(req); free(req);
return err; return err;
} }
#endif
} }
/* Start all the requests. */
err = MPI_Startall(nreqs, req);
if (MPI_SUCCESS != err) { if (MPI_SUCCESS != err) {
free(req); free(req);
return err; return err;
@ -130,23 +130,20 @@ int mca_coll_basic_alltoall(void *sbuf, int scount,
/* Wait for them all. */ /* Wait for them all. */
err = MPI_Waitall(nreqs, req, MPI_STATUSES_IGNORE); err = mca_pml.pml_wait_all(nreqs, req, MPI_STATUSES_IGNORE);
if (MPI_SUCCESS != err) { if (MPI_SUCCESS != err) {
free(req); free(req);
return err; return err;
} }
for (i = 0, preq = req; i < nreqs; ++i, ++preq) { /* Free the reqs */
err = MPI_Request_free(preq);
if (MPI_SUCCESS != err) { for (i = 0, rreq = req; i < nreqs; ++i, ++rreq) {
free(req); mca_pml.pml_free(rreq);
return err;
}
} }
/* All done */ /* All done */
free(req); free(req);
return MPI_SUCCESS; return MPI_SUCCESS;
#endif
} }

Просмотреть файл

@ -7,9 +7,12 @@
#include "constants.h" #include "constants.h"
#include "mpi.h" #include "mpi.h"
#include "datatype/datatype.h"
#include "mca/coll/coll.h" #include "mca/coll/coll.h"
#include "mca/coll/base/coll_tags.h" #include "mca/coll/base/coll_tags.h"
#include "coll_basic.h" #include "coll_basic.h"
#include "mca/pml/pml.h"
#include "util/hibit.h"
/* /*
@ -23,59 +26,41 @@ int mca_coll_basic_bcast_lin(void *buff, int count,
MPI_Datatype datatype, int root, MPI_Datatype datatype, int root,
MPI_Comm comm) MPI_Comm comm)
{ {
#if 1
return LAM_ERR_NOT_IMPLEMENTED;
#else
int i; int i;
int size; int size;
int rank; int rank;
int err; int err;
MPI_Request *preq; lam_request_t **preq;
/* JMS: Need to define this somewhere */ lam_request_t **reqs = comm->bcast_lin_reqs;
#define LAM_COLLMAXLIN 4
MPI_Request reqs[LAM_COLLMAXLIN];
MPI_Comm_size(comm, &size); size = lam_comm_size(comm);
MPI_Comm_rank(comm, &rank); rank = lam_comm_rank(comm);
/* Non-root receive the data. */ /* Non-root receive the data. */
if (rank != root) { if (rank != root) {
#if 0 return mca_pml.pml_recv(buff, count, datatype, root,
/* JMS: Need to replace this with negative tags and and direct PML MCA_COLL_BASE_TAG_BCAST, comm,
calls */ MPI_STATUS_IGNORE);
return MPI_Recv(buff, count, datatype, root,
BLKMPIBCAST, comm, MPI_STATUS_IGNORE);
#endif
} }
/* Root sends data to all others. */ /* Root sends data to all others. */
/* VPS: as per Tim's suggestion there is no advantage of having
isend_init/start over normal isend. So just trying a normal isend */
for (i = 0, preq = reqs; i < size; ++i) { for (i = 0, preq = reqs; i < size; ++i) {
if (i == rank) if (i == rank)
continue; continue;
#if 0 err = mca_pml.pml_isend(buff, count, datatype, i,
/* JMS: Need to replace this with negative tags and and direct PML MCA_COLL_BASE_TAG_BCAST,
calls */ MCA_PML_BASE_SEND_STANDARD,
err = MPI_Send_init(buff, count, datatype, i, BLKMPIBCAST,
comm, preq++); comm, preq++);
if (MPI_SUCCESS != err) { if (MPI_SUCCESS != err) {
return err; return err;
} }
#endif
}
/* Start and wait on all requests. */
err = MPI_Startall(size - 1, reqs);
if (MPI_SUCCESS != err) {
return err;
}
err = MPI_Waitall(size - 1, reqs, MPI_STATUSES_IGNORE);
if (MPI_SUCCESS != err) {
return err;
} }
/* Free the requests. */ /* Free the requests. */
@ -84,20 +69,17 @@ int mca_coll_basic_bcast_lin(void *buff, int count,
if (i == rank) if (i == rank)
continue; continue;
err = MPI_Request_free(preq); err = mca_pml.pml_free(preq);
if (MPI_SUCCESS != err)
return err;
++preq; ++preq;
} }
/* All done */ /* All done */
return MPI_SUCCESS; return MPI_SUCCESS;
#endif
} }
/* /*
* bcast_log * bcast_log
* *
@ -109,9 +91,6 @@ int mca_coll_basic_bcast_log(void *buff, int count,
MPI_Datatype datatype, int root, MPI_Datatype datatype, int root,
MPI_Comm comm) MPI_Comm comm)
{ {
#if 1
return LAM_ERR_NOT_IMPLEMENTED;
#else
int i; int i;
int size; int size;
int rank; int rank;
@ -122,34 +101,28 @@ int mca_coll_basic_bcast_log(void *buff, int count,
int mask; int mask;
int err; int err;
int nreqs; int nreqs;
MPI_Request *preq; lam_request_t **preq;
/* JMS: Need to define this somewhere */ lam_request_t **reqs = comm->bcast_log_reqs;
#define LAM_COLLMAXDIM 64
MPI_Request reqs[LAM_COLLMAXDIM];
MPI_Comm_rank(comm, &rank); size = lam_comm_size(comm);
MPI_Comm_size(comm, &size); rank = lam_comm_rank(comm);
vrank = (rank + size - root) % size; vrank = (rank + size - root) % size;
#if 0
/* JMS Need to cache this somewhere */
dim = comm->c_cube_dim; dim = comm->c_cube_dim;
hibit = lam_hibit(vrank, dim); hibit = lam_hibit(vrank, dim);
#endif
--dim; --dim;
/* Receive data from parent in the tree. */ /* Receive data from parent in the tree. */
if (vrank > 0) { if (vrank > 0) {
peer = ((vrank & ~(1 << hibit)) + root) % size; peer = ((vrank & ~(1 << hibit)) + root) % size;
#if 0
/* JMS: Need to replace this with negative tags and and direct PML err = mca_pml.pml_recv(buff, count, datatype, peer,
calls */ MCA_COLL_BASE_TAG_BCAST,
err = MPI_Recv(buff, count, datatype, peer, comm, MPI_STATUS_IGNORE);
BLKMPIBCAST, comm, MPI_STATUS_IGNORE);
if (MPI_SUCCESS != err) { if (MPI_SUCCESS != err) {
return err; return err;
} }
#endif
} }
/* Send data to the children. */ /* Send data to the children. */
@ -162,41 +135,32 @@ int mca_coll_basic_bcast_log(void *buff, int count,
peer = (peer + root) % size; peer = (peer + root) % size;
++nreqs; ++nreqs;
#if 0 err = mca_pml.pml_isend(buff, count, datatype, peer,
/* JMS: Need to replace this with negative tags and and direct PML MCA_COLL_BASE_TAG_BCAST,
calls */ MCA_PML_BASE_SEND_STANDARD,
err = MPI_Send_init(buff, count, datatype, peer, BLKMPIBCAST,
comm, preq++); comm, preq++);
if (MPI_SUCCESS != err) { if (MPI_SUCCESS != err) {
return err; return err;
} }
#endif
} }
} }
/* Start and wait on all requests. */ /* Start and wait on all requests. */
if (nreqs > 0) { if (nreqs > 0) {
err = MPI_Startall(nreqs, reqs);
if (MPI_SUCCESS != err) {
return err;
}
err = MPI_Waitall(nreqs, reqs, MPI_STATUSES_IGNORE); err = mca_pml.pml_wait_all(nreqs, reqs, MPI_STATUSES_IGNORE);
if (MPI_SUCCESS != err) { if (MPI_SUCCESS != err) {
return err; return err;
} }
for (i = 0, preq = reqs; i < nreqs; ++i, ++preq) { for (i = 0, preq = reqs; i < nreqs; ++i, ++preq) {
err = MPI_Request_free(preq); mca_pml.pml_free(preq);
if (MPI_SUCCESS != err) {
return err;
}
} }
} }
/* All done */ /* All done */
return MPI_SUCCESS; return MPI_SUCCESS;
#endif
} }

Просмотреть файл

@ -6,11 +6,12 @@
#include "coll_basic.h" #include "coll_basic.h"
#include "constants.h" #include "constants.h"
#include "coll_basic.h"
#include "mpi.h" #include "mpi.h"
#include "datatype/datatype.h"
#include "mca/coll/coll.h" #include "mca/coll/coll.h"
#include "mca/coll/base/coll_tags.h" #include "mca/coll/base/coll_tags.h"
#include "coll_basic.h" #include "mca/pml/pml.h"
/* /*
* gather * gather
@ -23,9 +24,6 @@ int mca_coll_basic_gather(void *sbuf, int scount, MPI_Datatype sdtype,
void *rbuf, int rcount, MPI_Datatype rdtype, void *rbuf, int rcount, MPI_Datatype rdtype,
int root, MPI_Comm comm) int root, MPI_Comm comm)
{ {
#if 1
return LAM_ERR_NOT_IMPLEMENTED;
#else
int i; int i;
int err; int err;
int rank; int rank;
@ -33,35 +31,39 @@ int mca_coll_basic_gather(void *sbuf, int scount, MPI_Datatype sdtype,
char *ptmp; char *ptmp;
MPI_Aint incr; MPI_Aint incr;
MPI_Aint extent; MPI_Aint extent;
MPI_Aint lb;
/* JMS: Need to replace lots things in this file: lam_dt* stuff with size = lam_comm_size(comm);
lam_datatype_*() functions. Also need to replace lots of rank = lam_comm_rank(comm);
MPI_Send/MPI_Recv with negative tags and PML entry points. */
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
/* Everyone but root sends data and returns. */ /* Everyone but root sends data and returns. */
if (rank != root) { if (rank != root) {
err = MPI_Send(sbuf, scount, sdtype, root, BLKMPIGATHER, comm); err = mca_pml.pml_send(sbuf, scount, sdtype, root,
MCA_COLL_BASE_TAG_GATHER,
MCA_PML_BASE_SEND_STANDARD, comm);
return err; return err;
} }
/* I am the root, loop receiving the data. */ /* I am the root, loop receiving the data. */
MPI_Type_extent(rdtype, &extent); err = lam_ddt_get_extent(rdtype, &lb, &extent);
if (0 != err)
return LAM_ERROR;
incr = extent * rcount; incr = extent * rcount;
for (i = 0, ptmp = (char *) rbuf; i < size; ++i, ptmp += incr) { for (i = 0, ptmp = (char *) rbuf; i < size; ++i, ptmp += incr) {
/* simple optimization */ /* simple optimization */
if (i == rank) { if (i == rank) {
err = lam_dtsndrcv(sbuf, scount, sdtype, ptmp, err = lam_ddt_sndrcv(sbuf, scount, sdtype, ptmp,
rcount, rdtype, BLKMPIGATHER, comm); rcount, rdtype,
MCA_COLL_BASE_TAG_GATHER, comm);
} else { } else {
err = MPI_Recv(ptmp, rcount, rdtype, i, err = mca_pml.pml_recv(ptmp, rcount, rdtype, i,
BLKMPIGATHER, comm, MPI_STATUS_IGNORE); MCA_COLL_BASE_TAG_GATHER,
comm, MPI_STATUS_IGNORE);
} }
if (MPI_SUCCESS != err) { if (MPI_SUCCESS != err) {
return err; return err;
@ -71,5 +73,4 @@ int mca_coll_basic_gather(void *sbuf, int scount, MPI_Datatype sdtype,
/* All done */ /* All done */
return MPI_SUCCESS; return MPI_SUCCESS;
#endif
} }