1
1
openmpi/ompi/mca/coll/tuned/coll_tuned_util.c
Jelena Pjesivac-Grbovic 85192c01b0 Modifying util functionality:
- removing static qualification on ompi_coll_tuned_sendrecv 
- adding ompi_coll_tuned_isendrecv function which posts isend and irecv requests
These changes are separate from but necessary for new algorithms I am working on.

This commit was SVN r13161.
2007-01-17 21:29:13 +00:00

153 строки
6.0 KiB
C

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "coll_tuned.h"
#include "mpi.h"
#include "ompi/constants.h"
#include "ompi/datatype/datatype.h"
#include "ompi/communicator/communicator.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/coll_tags.h"
#include "ompi/mca/pml/pml.h"
#include "coll_tuned_util.h"
int ompi_coll_tuned_sendrecv_actual( void* sendbuf, int scount,
ompi_datatype_t* sdatatype,
int dest, int stag,
void* recvbuf, int rcount,
ompi_datatype_t* rdatatype,
int source, int rtag,
struct ompi_communicator_t* comm,
ompi_status_public_t* status )
{ /* post receive first, then send, then waitall... should be fast (I hope) */
int err, line = 0;
ompi_request_t* reqs[2];
ompi_status_public_t statuses[2];
/* post new irecv */
err = MCA_PML_CALL(irecv( recvbuf, rcount, rdatatype, source, rtag,
comm, &reqs[0]));
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; }
/* send data to children */
err = MCA_PML_CALL(isend( sendbuf, scount, sdatatype, dest, stag,
MCA_PML_BASE_SEND_STANDARD, comm, &reqs[1]));
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; }
err = ompi_request_wait_all( 2, reqs, statuses );
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; }
if (MPI_STATUS_IGNORE!=status) {
*status = statuses[0];
}
return (MPI_SUCCESS);
error_handler:
OPAL_OUTPUT ((ompi_coll_tuned_stream, "%s:%d: Error %d occurred\n",
__FILE__,line,err));
return (err);
}
inline int
ompi_coll_tuned_sendrecv( void* sendbuf, int scount, ompi_datatype_t* sdatatype,
int dest, int stag,
void* recvbuf, int rcount, ompi_datatype_t* rdatatype,
int source, int rtag,
struct ompi_communicator_t* comm,
ompi_status_public_t* status, int myid )
{
if ((dest == myid) && (source == myid)) {
return (int) ompi_ddt_sndrcv(sendbuf, (int32_t) scount, sdatatype,
recvbuf, (int32_t) rcount, rdatatype);
}
return ompi_coll_tuned_sendrecv_actual (sendbuf, scount, sdatatype,
dest, stag,
recvbuf, rcount, rdatatype,
source, rtag, comm, status);
}
/*
* localcompleted version that makes sure the send has completed locally
* Currently this is a sync call, but will change to locally completed
* version when available
*/
int ompi_coll_tuned_sendrecv_actual_localcompleted( void* sendbuf, int scount,
ompi_datatype_t* sdatatype,
int dest, int stag,
void* recvbuf, int rcount,
ompi_datatype_t* rdatatype,
int source, int rtag,
struct ompi_communicator_t* comm,
ompi_status_public_t* status )
{ /* post receive first, then [local] sync send, then wait... should be fast (I hope) */
int err, line = 0;
ompi_request_t* req[2];
ompi_status_public_t tmpstatus[2];
/* post new irecv */
err = MCA_PML_CALL(irecv( recvbuf, rcount, rdatatype, source, rtag,
comm, &(req[0])));
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; }
/* send data to children */
err = MCA_PML_CALL(isend( sendbuf, scount, sdatatype, dest, stag,
MCA_PML_BASE_SEND_SYNCHRONOUS, comm, &(req[1])));
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; }
err = ompi_request_wait_all( 2, req, tmpstatus );
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; }
if (MPI_STATUS_IGNORE!=status) {
*status = tmpstatus[0];
}
return (MPI_SUCCESS);
error_handler:
OPAL_OUTPUT ((ompi_coll_tuned_stream, "%s:%d: Error %d occurred\n",__FILE__,line,err));
return (err);
}
inline int
ompi_coll_tuned_isendrecv( void* sendbuf, int scount, ompi_datatype_t* sdtype,
int dest, int stag, ompi_request_t** sreq,
void* recvbuf, int rcount, ompi_datatype_t* rdtype,
int source, int rtag, ompi_request_t** rreq,
struct ompi_communicator_t* comm ) {
int ret, line;
ret = MCA_PML_CALL(irecv(recvbuf, rcount, rdtype, source, rtag, comm, rreq));
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_handler; }
ret = MCA_PML_CALL(isend(sendbuf, scount, sdtype, dest, stag,
MCA_PML_BASE_SEND_STANDARD, comm, sreq));
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_handler; }
return MPI_SUCCESS;
error_handler:
OPAL_OUTPUT((ompi_coll_tuned_stream, "%s:%d\tError occurred %d\n",
__FILE__, line, ret));
return ret;
}