2004-01-12 00:26:55 +03:00
|
|
|
/*
|
2004-11-22 04:38:40 +03:00
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University.
|
|
|
|
* All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
|
|
|
|
* All rights reserved.
|
2004-11-28 23:09:25 +03:00
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
2005-03-24 15:43:37 +03:00
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
2004-11-22 04:38:40 +03:00
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
2004-01-12 00:26:55 +03:00
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
#include "ompi_config.h"
|
2004-02-15 00:26:30 +03:00
|
|
|
#include "coll_basic.h"
|
2004-01-12 00:26:55 +03:00
|
|
|
|
|
|
|
#include "mpi.h"
|
2004-06-29 04:02:25 +04:00
|
|
|
#include "include/constants.h"
|
2004-03-17 21:45:16 +03:00
|
|
|
#include "communicator/communicator.h"
|
|
|
|
#include "mca/coll/coll.h"
|
|
|
|
#include "mca/coll/base/coll_tags.h"
|
2004-01-12 00:26:55 +03:00
|
|
|
#include "coll_basic.h"
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2004-06-29 04:02:25 +04:00
|
|
|
* allgather_intra
|
2004-01-12 00:26:55 +03:00
|
|
|
*
|
|
|
|
* Function: - allgather using other MPI collections
|
|
|
|
* Accepts: - same as MPI_Allgather()
|
|
|
|
* Returns: - MPI_SUCCESS or error code
|
|
|
|
*/
|
2004-06-29 04:02:25 +04:00
|
|
|
int mca_coll_basic_allgather_intra(void *sbuf, int scount,
|
|
|
|
struct ompi_datatype_t *sdtype, void *rbuf,
|
|
|
|
int rcount, struct ompi_datatype_t *rdtype,
|
|
|
|
struct ompi_communicator_t *comm)
|
2004-01-12 00:26:55 +03:00
|
|
|
{
|
|
|
|
int size;
|
|
|
|
int err;
|
2004-05-08 03:09:55 +04:00
|
|
|
|
2004-01-12 00:26:55 +03:00
|
|
|
/* Gather and broadcast. */
|
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
size = ompi_comm_size(comm);
|
2004-01-12 00:26:55 +03:00
|
|
|
|
2004-06-29 04:02:25 +04:00
|
|
|
err = comm->c_coll.coll_gather(sbuf, scount, sdtype, rbuf, rcount,
|
|
|
|
rdtype, 0, comm);
|
2004-01-12 00:26:55 +03:00
|
|
|
if (MPI_SUCCESS != err)
|
|
|
|
return err;
|
|
|
|
|
2004-06-29 04:02:25 +04:00
|
|
|
err = comm->c_coll.coll_bcast(rbuf, rcount * size, rdtype, 0, comm);
|
2004-01-12 00:26:55 +03:00
|
|
|
return err;
|
|
|
|
}
|
2004-06-29 04:02:25 +04:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* allgather_inter
|
|
|
|
*
|
|
|
|
* Function: - allgather using other MPI collections
|
|
|
|
* Accepts: - same as MPI_Allgather()
|
|
|
|
* Returns: - MPI_SUCCESS or error code
|
|
|
|
*/
|
|
|
|
int mca_coll_basic_allgather_inter(void *sbuf, int scount,
|
|
|
|
struct ompi_datatype_t *sdtype,
|
|
|
|
void *rbuf, int rcount,
|
|
|
|
struct ompi_datatype_t *rdtype,
|
|
|
|
struct ompi_communicator_t *comm)
|
|
|
|
{
|
2004-08-04 02:12:57 +04:00
|
|
|
int rank;
|
|
|
|
int root=0;
|
|
|
|
int size, rsize;
|
|
|
|
int err;
|
|
|
|
int i;
|
|
|
|
char *tmpbuf=NULL, *ptmp;
|
|
|
|
long rlb, slb, rextent, sextent;
|
|
|
|
long incr;
|
|
|
|
ompi_request_t *req;
|
|
|
|
ompi_request_t **reqs = comm->c_coll_basic_data->mccb_reqs;
|
2004-06-29 04:02:25 +04:00
|
|
|
|
2004-08-04 02:12:57 +04:00
|
|
|
rank = ompi_comm_rank ( comm );
|
|
|
|
size = ompi_comm_size (comm);
|
|
|
|
rsize = ompi_comm_remote_size (comm);
|
|
|
|
|
|
|
|
/* Algorithm:
|
|
|
|
- a gather to the root in remote group (simultaniously executed,
|
|
|
|
thats why we cannot use coll_gather).
|
|
|
|
- exchange the temp-results between two roots
|
|
|
|
- inter-bcast (again simultanious).
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Step one: gather operations: */
|
|
|
|
if ( rank != root ) {
|
|
|
|
/* send your data to root */
|
|
|
|
err = mca_pml.pml_send(sbuf, scount, sdtype, root,
|
|
|
|
MCA_COLL_BASE_TAG_ALLGATHER,
|
|
|
|
MCA_PML_BASE_SEND_STANDARD, comm);
|
|
|
|
if ( OMPI_SUCCESS != err ) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
2004-08-08 01:14:22 +04:00
|
|
|
/* receive a msg. from all other procs.*/
|
|
|
|
err = ompi_ddt_get_extent(rdtype, &rlb, &rextent);
|
|
|
|
if (OMPI_SUCCESS != err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
err = ompi_ddt_get_extent(sdtype, &slb, &sextent);
|
|
|
|
if (OMPI_SUCCESS != err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2004-08-04 02:12:57 +04:00
|
|
|
/* Do a send-recv between the two root procs. to avoid deadlock */
|
|
|
|
err = mca_pml.pml_isend (sbuf, scount, sdtype, 0,
|
|
|
|
MCA_COLL_BASE_TAG_ALLGATHER,
|
|
|
|
MCA_PML_BASE_SEND_STANDARD,
|
2004-08-08 01:14:22 +04:00
|
|
|
comm, &reqs[rsize] );
|
2004-08-04 02:12:57 +04:00
|
|
|
if ( OMPI_SUCCESS != err ) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2004-08-08 01:14:22 +04:00
|
|
|
err = mca_pml.pml_irecv(rbuf, rcount, rdtype, 0,
|
|
|
|
MCA_COLL_BASE_TAG_ALLGATHER, comm,
|
|
|
|
&reqs[0]);
|
2004-08-04 02:12:57 +04:00
|
|
|
if (OMPI_SUCCESS != err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
incr = rextent * rcount;
|
|
|
|
ptmp = (char *) rbuf + incr;
|
|
|
|
for (i = 1; i < rsize; ++i, ptmp += incr) {
|
|
|
|
err = mca_pml.pml_irecv(ptmp, rcount, rdtype, i,
|
|
|
|
MCA_COLL_BASE_TAG_ALLGATHER,
|
2004-08-08 01:14:22 +04:00
|
|
|
comm, &reqs[i]);
|
2004-08-04 02:12:57 +04:00
|
|
|
if (MPI_SUCCESS != err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-10-12 19:50:01 +04:00
|
|
|
err = ompi_request_wait_all (rsize+1, reqs, MPI_STATUSES_IGNORE);
|
2004-08-04 02:12:57 +04:00
|
|
|
if ( OMPI_SUCCESS != err ) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Step 2: exchange the resuts between the root processes */
|
|
|
|
tmpbuf = (char *) malloc (scount * size *sextent);
|
|
|
|
if ( NULL == tmpbuf ) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mca_pml.pml_isend (rbuf, rsize*rcount, rdtype, 0,
|
|
|
|
MCA_COLL_BASE_TAG_ALLGATHER,
|
|
|
|
MCA_PML_BASE_SEND_STANDARD,
|
|
|
|
comm, &req );
|
|
|
|
if ( OMPI_SUCCESS != err ) {
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mca_pml.pml_recv(tmpbuf, size *scount, sdtype, 0,
|
|
|
|
MCA_COLL_BASE_TAG_ALLGATHER, comm,
|
|
|
|
MPI_STATUS_IGNORE);
|
|
|
|
if (OMPI_SUCCESS != err) {
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2004-10-12 19:50:01 +04:00
|
|
|
err = ompi_request_wait_all(1, &req, MPI_STATUS_IGNORE);
|
2004-08-04 02:12:57 +04:00
|
|
|
if (OMPI_SUCCESS != err ) {
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Step 3: bcast the data to the remote group. This
|
|
|
|
happens in both groups simultaniously, thus we can
|
|
|
|
not use coll_bcast (this would deadlock).
|
|
|
|
*/
|
|
|
|
if ( rank != root ) {
|
|
|
|
/* post the recv */
|
|
|
|
err = mca_pml.pml_recv (rbuf, size*rcount, rdtype, 0,
|
|
|
|
MCA_COLL_BASE_TAG_ALLGATHER, comm,
|
|
|
|
MPI_STATUS_IGNORE);
|
|
|
|
if ( OMPI_SUCCESS != err ) {
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* Send the data to every other process in the remote group
|
|
|
|
except to rank zero. which has it already. */
|
|
|
|
for ( i=1; i<rsize; i++ ) {
|
|
|
|
err = mca_pml.pml_isend(tmpbuf, size*scount, sdtype, i,
|
|
|
|
MCA_COLL_BASE_TAG_ALLGATHER,
|
|
|
|
MCA_PML_BASE_SEND_STANDARD,
|
|
|
|
comm, &reqs[i-1] );
|
|
|
|
if ( OMPI_SUCCESS != err ) {
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-10-12 19:50:01 +04:00
|
|
|
err = ompi_request_wait_all (rsize-1, reqs, MPI_STATUSES_IGNORE);
|
2004-08-04 02:12:57 +04:00
|
|
|
if ( OMPI_SUCCESS != err ) {
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
exit:
|
|
|
|
if ( NULL != tmpbuf ) {
|
|
|
|
free ( tmpbuf);
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
2004-06-29 04:02:25 +04:00
|
|
|
}
|