1
1

ompi: use ompi_coll_base_sendrecv_actual() whenever possible

Signed-off-by: Gilles Gouaillardet <gilles@rist.or.jp>
Этот коммит содержится в:
Gilles Gouaillardet 2017-04-20 10:01:28 +09:00
родитель 52551d96c1
Коммит ded63c5e0c
9 изменённых файлов: 56 добавлений и 130 удалений

Просмотреть файл

@ -135,7 +135,6 @@ ompi_coll_base_allreduce_intra_recursivedoubling(const void *sbuf, void *rbuf,
int ret, line, rank, size, adjsize, remote, distance;
int newrank, newremote, extra_ranks;
char *tmpsend = NULL, *tmprecv = NULL, *tmpswap = NULL, *inplacebuf_free = NULL, *inplacebuf;
ompi_request_t *reqs[2] = {NULL, NULL};
ptrdiff_t span, gap;
size = ompi_comm_size(comm);
@ -215,14 +214,11 @@ ompi_coll_base_allreduce_intra_recursivedoubling(const void *sbuf, void *rbuf,
(newremote * 2 + 1):(newremote + extra_ranks);
/* Exchange the data */
ret = MCA_PML_CALL(irecv(tmprecv, count, dtype, remote,
MCA_COLL_BASE_TAG_ALLREDUCE, comm, &reqs[0]));
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
ret = MCA_PML_CALL(isend(tmpsend, count, dtype, remote,
ret = ompi_coll_base_sendrecv_actual(tmpsend, count, dtype, remote,
MCA_COLL_BASE_TAG_ALLREDUCE,
MCA_PML_BASE_SEND_STANDARD, comm, &reqs[1]));
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
ret = ompi_request_wait_all(2, reqs, MPI_STATUSES_IGNORE);
tmprecv, count, dtype, remote,
MCA_COLL_BASE_TAG_ALLREDUCE,
comm, MPI_STATUS_IGNORE);
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
/* Apply operation */

Просмотреть файл

@ -29,7 +29,7 @@
#include "ompi/mca/pml/pml.h"
#include "coll_base_util.h"
int ompi_coll_base_sendrecv_actual( void* sendbuf, size_t scount,
int ompi_coll_base_sendrecv_actual( const void* sendbuf, size_t scount,
ompi_datatype_t* sdatatype,
int dest, int stag,
void* recvbuf, size_t rcount,

Просмотреть файл

@ -36,7 +36,7 @@ BEGIN_C_DECLS
* If one of the communications results in a zero-byte message the
* communication is ignored, and no message will cross to the peer.
*/
int ompi_coll_base_sendrecv_actual( void* sendbuf, size_t scount,
int ompi_coll_base_sendrecv_actual( const void* sendbuf, size_t scount,
ompi_datatype_t* sdatatype,
int dest, int stag,
void* recvbuf, size_t rcount,

Просмотреть файл

@ -9,7 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2015-2016 Research Organization for Information Science
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
@ -27,6 +27,7 @@
#include "ompi/op/op.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/coll_tags.h"
#include "ompi/mca/coll/base/coll_base_util.h"
#include "coll_basic.h"
#include "ompi/mca/pml/pml.h"
@ -83,7 +84,6 @@ mca_coll_basic_allreduce_inter(const void *sbuf, void *rbuf, int count,
int err, i, rank, root = 0, rsize, line;
ptrdiff_t extent, dsize, gap;
char *tmpbuf = NULL, *pml_buffer = NULL;
ompi_request_t *req[2];
ompi_request_t **reqs = NULL;
rank = ompi_comm_rank(comm);
@ -114,18 +114,11 @@ mca_coll_basic_allreduce_inter(const void *sbuf, void *rbuf, int count,
}
/* Do a send-recv between the two root procs. to avoid deadlock */
err = MCA_PML_CALL(irecv(rbuf, count, dtype, 0,
MCA_COLL_BASE_TAG_ALLREDUCE, comm,
&(req[0])));
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
err = MCA_PML_CALL(isend(sbuf, count, dtype, 0,
err = ompi_coll_base_sendrecv_actual(sbuf, count, dtype, 0,
MCA_COLL_BASE_TAG_ALLREDUCE,
MCA_PML_BASE_SEND_STANDARD,
comm, &(req[1])));
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
err = ompi_request_wait_all(2, req, MPI_STATUSES_IGNORE);
rbuf, count, dtype, 0,
MCA_COLL_BASE_TAG_ALLREDUCE,
comm, MPI_STATUS_IGNORE);
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
/* Loop receiving and calling reduction function (C or Fortran). */
@ -154,18 +147,11 @@ mca_coll_basic_allreduce_inter(const void *sbuf, void *rbuf, int count,
/***************************************************************************/
if (rank == root) {
/* sendrecv between the two roots */
err = MCA_PML_CALL(irecv(pml_buffer, count, dtype, 0,
err = ompi_coll_base_sendrecv_actual(rbuf, count, dtype, 0,
MCA_COLL_BASE_TAG_ALLREDUCE,
comm, &(req[1])));
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
err = MCA_PML_CALL(isend(rbuf, count, dtype, 0,
pml_buffer, count, dtype, 0,
MCA_COLL_BASE_TAG_ALLREDUCE,
MCA_PML_BASE_SEND_STANDARD, comm,
&(req[0])));
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
err = ompi_request_wait_all(2, req, MPI_STATUSES_IGNORE);
comm, MPI_STATUS_IGNORE);
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
/* distribute the data to other processes in remote group.

Просмотреть файл

@ -10,7 +10,7 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2010 University of Houston. All rights reserved.
* Copyright (c) 2015-2016 Research Organization for Information Science
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
@ -27,11 +27,11 @@
#include "mpi.h"
#include "ompi/constants.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/request/request.h"
#include "ompi/communicator/communicator.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/mca/pml/pml.h"
#include "ompi/mca/coll/base/coll_tags.h"
#include "ompi/mca/coll/base/coll_base_util.h"
/*
* allgather_inter
@ -51,7 +51,6 @@ mca_coll_inter_allgather_inter(const void *sbuf, int scount,
int rank, root = 0, size, rsize, err = OMPI_SUCCESS;
char *ptmp_free = NULL, *ptmp = NULL;
ptrdiff_t gap, span;
ompi_request_t *req[2];
rank = ompi_comm_rank(comm);
size = ompi_comm_size(comm->c_local_comm);
@ -77,22 +76,11 @@ mca_coll_inter_allgather_inter(const void *sbuf, int scount,
if (rank == root) {
/* Do a send-recv between the two root procs. to avoid deadlock */
err = MCA_PML_CALL(irecv(rbuf, rcount*rsize, rdtype, 0,
MCA_COLL_BASE_TAG_ALLGATHER, comm,
&(req[0])));
if (OMPI_SUCCESS != err) {
goto exit;
}
err = MCA_PML_CALL(isend(ptmp, scount*size, sdtype, 0,
err = ompi_coll_base_sendrecv_actual(ptmp, scount*size, sdtype, 0,
MCA_COLL_BASE_TAG_ALLGATHER,
MCA_PML_BASE_SEND_STANDARD,
comm, &(req[1])));
if (OMPI_SUCCESS != err) {
goto exit;
}
err = ompi_request_wait_all(2, req, MPI_STATUSES_IGNORE);
rbuf, rcount*rsize, rdtype, 0,
MCA_COLL_BASE_TAG_ALLGATHER,
comm, MPI_STATUS_IGNORE);
if (OMPI_SUCCESS != err) {
goto exit;
}

Просмотреть файл

@ -10,7 +10,7 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2010 University of Houston. All rights reserved.
* Copyright (c) 2015-2016 Research Organization for Information Science
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
@ -24,11 +24,11 @@
#include "mpi.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/request/request.h"
#include "ompi/communicator/communicator.h"
#include "ompi/constants.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/coll_tags.h"
#include "ompi/mca/coll/base/coll_base_util.h"
#include "ompi/mca/pml/pml.h"
@ -51,7 +51,6 @@ mca_coll_inter_allgatherv_inter(const void *sbuf, int scount,
int *count=NULL,*displace=NULL;
char *ptmp_free=NULL, *ptmp=NULL;
ompi_datatype_t *ndtype = NULL;
ompi_request_t *req[2];
rank = ompi_comm_rank(comm);
size_local = ompi_comm_size(comm->c_local_comm);
@ -106,22 +105,11 @@ mca_coll_inter_allgatherv_inter(const void *sbuf, int scount,
if (0 == rank) {
/* Exchange data between roots */
err = MCA_PML_CALL(irecv(rbuf, 1, ndtype, 0,
MCA_COLL_BASE_TAG_ALLGATHERV, comm,
&(req[0])));
if (OMPI_SUCCESS != err) {
goto exit;
}
err = MCA_PML_CALL(isend(ptmp, total, sdtype, 0,
err = ompi_coll_base_sendrecv_actual(ptmp, total, sdtype, 0,
MCA_COLL_BASE_TAG_ALLGATHERV,
MCA_PML_BASE_SEND_STANDARD,
comm, &(req[1])));
if (OMPI_SUCCESS != err) {
goto exit;
}
err = ompi_request_wait_all(2, req, MPI_STATUSES_IGNORE);
rbuf, 1, ndtype, 0,
MCA_COLL_BASE_TAG_ALLGATHERV,
comm, MPI_STATUS_IGNORE);
if (OMPI_SUCCESS != err) {
goto exit;
}

Просмотреть файл

@ -11,7 +11,7 @@
* All rights reserved.
* Copyright (c) 2006-2007 University of Houston. All rights reserved.
* Copyright (c) 2013 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2015-2016 Research Organization for Information Science
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
@ -27,10 +27,10 @@
#include "ompi/constants.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/communicator/communicator.h"
#include "ompi/request/request.h"
#include "ompi/op/op.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/coll_tags.h"
#include "ompi/mca/coll/base/coll_base_util.h"
#include "ompi/mca/pml/pml.h"
/*
@ -49,7 +49,6 @@ mca_coll_inter_allreduce_inter(const void *sbuf, void *rbuf, int count,
{
int err, rank, root = 0;
char *tmpbuf = NULL, *pml_buffer = NULL;
ompi_request_t *req[2];
ptrdiff_t gap, span;
rank = ompi_comm_rank(comm);
@ -73,22 +72,11 @@ mca_coll_inter_allreduce_inter(const void *sbuf, void *rbuf, int count,
if (rank == root) {
/* Do a send-recv between the two root procs. to avoid deadlock */
err = MCA_PML_CALL(irecv(rbuf, count, dtype, 0,
MCA_COLL_BASE_TAG_ALLREDUCE, comm,
&(req[0])));
if (OMPI_SUCCESS != err) {
goto exit;
}
err = MCA_PML_CALL(isend(pml_buffer, count, dtype, 0,
err = ompi_coll_base_sendrecv_actual(pml_buffer, count, dtype, 0,
MCA_COLL_BASE_TAG_ALLREDUCE,
MCA_PML_BASE_SEND_STANDARD,
comm, &(req[1])));
if (OMPI_SUCCESS != err) {
goto exit;
}
err = ompi_request_wait_all(2, req, MPI_STATUSES_IGNORE);
rbuf, count, dtype, 0,
MCA_COLL_BASE_TAG_ALLREDUCE,
comm, MPI_STATUS_IGNORE);
if (OMPI_SUCCESS != err) {
goto exit;
}

Просмотреть файл

@ -22,6 +22,7 @@
#include "opal/include/opal/sys/atomic.h"
#include "ompi/mca/pml/pml.h"
#include "ompi/patterns/net/netpatterns.h"
#include "ompi/mca/coll/base/coll_base_util.h"
#include "coll_ops.h"
#include "commpatterns.h"
@ -42,7 +43,6 @@ OMPI_DECLSPEC int comm_allreduce_pml(void *sbuf, void *rbuf, int count,
char scratch_bufers[2][MAX_TMP_BUFFER];
int send_buffer=0,recv_buffer=1;
char *sbuf_current, *rbuf_current;
ompi_request_t *requests[2];
/* get size of data needed - same layout as user data, so that
* we can apply the reudction routines directly on these buffers
@ -165,11 +165,13 @@ OMPI_DECLSPEC int comm_allreduce_pml(void *sbuf, void *rbuf, int count,
/* is the remote data read */
pair_rank=my_exchange_node.rank_exchanges[exchange];
/* post non-blocking receive */
rc=MCA_PML_CALL(irecv(scratch_bufers[recv_buffer],
rc=ompi_coll_base_sendrecv_actual(scratch_bufers[send_buffer],
count_this_stripe,dtype, ranks_in_comm[pair_rank],
-OMPI_COMMON_TAG_ALLREDUCE,
comm,&(requests[0])));
scratch_bufers[recv_buffer],
count_this_stripe,dtype,ranks_in_comm[pair_rank],
-OMPI_COMMON_TAG_ALLREDUCE,
comm, MPI_STATUS_IGNORE);
if( 0 > rc ) {
fprintf(stderr," irecv failed in comm_allreduce_pml at iterations %d \n",
exchange);
@ -177,20 +179,6 @@ OMPI_DECLSPEC int comm_allreduce_pml(void *sbuf, void *rbuf, int count,
goto Error;
}
/* post non-blocking send */
rc=MCA_PML_CALL(isend(scratch_bufers[send_buffer],
count_this_stripe,dtype, ranks_in_comm[pair_rank],
-OMPI_COMMON_TAG_ALLREDUCE,MCA_PML_BASE_SEND_STANDARD,
comm,&(requests[1])));
if( 0 > rc ) {
fprintf(stderr," isend failed in comm_allreduce_pml at iterations %d \n",
exchange);
fflush(stderr);
goto Error;
}
/* wait on send and receive completion */
ompi_request_wait_all(2,requests,MPI_STATUSES_IGNORE);
/* reduce the data */
if( 0 < count_this_stripe ) {
ompi_op_reduce(op,

Просмотреть файл

@ -8,6 +8,8 @@
* Copyright (c) 2007 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2016 Intel, Inc. All rights reserved.
* Copyright (c) 2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -22,8 +24,8 @@
#include "ompi/constants.h"
#include "ompi/mca/pml/pml.h"
#include "ompi/communicator/communicator.h"
#include "ompi/request/request.h"
#include "ompi/runtime/mpiruntime.h"
#include "ompi/mca/coll/base/coll_base_util.h"
int
ompi_init_preconnect_mpi(void)
@ -31,7 +33,6 @@ ompi_init_preconnect_mpi(void)
int comm_size = ompi_comm_size(MPI_COMM_WORLD);
int comm_rank = ompi_comm_rank(MPI_COMM_WORLD);
int param, next, prev, i, ret = OMPI_SUCCESS;
struct ompi_request_t * requests[2];
char inbuf[1], outbuf[1];
const bool *value = NULL;
@ -58,20 +59,11 @@ ompi_init_preconnect_mpi(void)
next = (comm_rank + i) % comm_size;
prev = (comm_rank - i + comm_size) % comm_size;
ret = MCA_PML_CALL(isend(outbuf, 1, MPI_CHAR,
ret = ompi_coll_base_sendrecv_actual(outbuf, 1, MPI_CHAR,
next, 1,
MCA_PML_BASE_SEND_COMPLETE,
MPI_COMM_WORLD,
&requests[1]));
if (OMPI_SUCCESS != ret) return ret;
ret = MCA_PML_CALL(irecv(inbuf, 1, MPI_CHAR,
inbuf, 1, MPI_CHAR,
prev, 1,
MPI_COMM_WORLD,
&requests[0]));
if(OMPI_SUCCESS != ret) return ret;
ret = ompi_request_wait_all(2, requests, MPI_STATUSES_IGNORE);
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
if(OMPI_SUCCESS != ret) return ret;
}