1
1

Fixes *alltoall* collectives at top level

This commit :
 - Correctly retrieve the communicator size when
   checking memory and parameters
 - Ensure (sendtype,sendcount) and (recvtype,recvcount)
   matches and return with MPI_ERR_TRUNCATE otherwise
 - Return with MPI_SUCCESS without invoking the low level
   if no data is going to be transferred
 - Fixes trac:4506

cmr=v1.8.2:reviewer=bosilca

This commit was SVN r31815.

The following Trac tickets were found above:
  Ticket 4506 --> https://svn.open-mpi.org/trac/ompi/ticket/4506
Этот коммит содержится в:
Gilles Gouaillardet 2014-05-19 07:46:07 +00:00
родитель d531a2ccad
Коммит 8bafe06c57
14 изменённых файлов: 391 добавлений и 186 удалений

Просмотреть файл

@ -11,6 +11,8 @@
* Copyright (c) 2004-2006 The Regents of the University of California. * Copyright (c) 2004-2006 The Regents of the University of California.
* All rights reserved. * All rights reserved.
* Copyright (c) 2009 Oak Ridge National Labs. All rights reserved. * Copyright (c) 2009 Oak Ridge National Labs. All rights reserved.
* Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -50,8 +52,8 @@ int32_t ompi_datatype_sndrcv( void *sbuf, int32_t scount, const ompi_datatype_t*
size_t max_data; size_t max_data;
/* First check if we really have something to do */ /* First check if we really have something to do */
if (0 == rcount) { if (0 == rcount || 0 == rdtype->super.size) {
return ((0 == scount) ? MPI_SUCCESS : MPI_ERR_TRUNCATE); return ((0 == scount || 0 == sdtype->super.size) ? MPI_SUCCESS : MPI_ERR_TRUNCATE);
} }
/* If same datatypes used, just copy. */ /* If same datatypes used, just copy. */

Просмотреть файл

@ -14,6 +14,9 @@
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2013 FUJITSU LIMITED. All rights reserved. * Copyright (c) 2013 FUJITSU LIMITED. All rights reserved.
* Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2014 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -70,13 +73,18 @@ mca_coll_basic_alltoallw_intra_inplace(void *rbuf, int *rcounts, const int *rdis
/* in-place alltoallw slow algorithm (but works) */ /* in-place alltoallw slow algorithm (but works) */
for (i = 0 ; i < size ; ++i) { for (i = 0 ; i < size ; ++i) {
size_t msg_size_i;
ompi_datatype_type_size(rdtypes[i], &msg_size_i);
msg_size_i *= rcounts[i];
for (j = i+1 ; j < size ; ++j) { for (j = i+1 ; j < size ; ++j) {
ompi_datatype_type_extent (rdtypes[j], &ext); size_t msg_size_j;
ompi_datatype_type_size(rdtypes[j], &msg_size_j);
msg_size_j *= rcounts[j];
/* Initiate all send/recv to/from others. */ /* Initiate all send/recv to/from others. */
preq = basic_module->mccb_reqs; preq = basic_module->mccb_reqs;
if (i == rank && rcounts[j] != 0) { if (i == rank && msg_size_j != 0) {
/* Copy the data into the temporary buffer */ /* Copy the data into the temporary buffer */
err = ompi_datatype_copy_content_same_ddt (rdtypes[j], rcounts[j], err = ompi_datatype_copy_content_same_ddt (rdtypes[j], rcounts[j],
tmp_buffer, (char *) rbuf + rdisps[j]); tmp_buffer, (char *) rbuf + rdisps[j]);
@ -91,7 +99,7 @@ mca_coll_basic_alltoallw_intra_inplace(void *rbuf, int *rcounts, const int *rdis
j, MCA_COLL_BASE_TAG_ALLTOALLW, MCA_PML_BASE_SEND_STANDARD, j, MCA_COLL_BASE_TAG_ALLTOALLW, MCA_PML_BASE_SEND_STANDARD,
comm, preq++)); comm, preq++));
if (MPI_SUCCESS != err) { goto error_hndl; } if (MPI_SUCCESS != err) { goto error_hndl; }
} else if (j == rank && rcounts[i] != 0) { } else if (j == rank && msg_size_i != 0) {
/* Copy the data into the temporary buffer */ /* Copy the data into the temporary buffer */
err = ompi_datatype_copy_content_same_ddt (rdtypes[i], rcounts[i], err = ompi_datatype_copy_content_same_ddt (rdtypes[i], rcounts[i],
tmp_buffer, (char *) rbuf + rdisps[i]); tmp_buffer, (char *) rbuf + rdisps[i]);
@ -111,7 +119,7 @@ mca_coll_basic_alltoallw_intra_inplace(void *rbuf, int *rcounts, const int *rdis
} }
/* Wait for the requests to complete */ /* Wait for the requests to complete */
err = ompi_request_wait_all (2, basic_module->mccb_reqs, MPI_STATUS_IGNORE); err = ompi_request_wait_all (2, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
if (MPI_SUCCESS != err) { goto error_hndl; } if (MPI_SUCCESS != err) { goto error_hndl; }
/* Free the requests. */ /* Free the requests. */
@ -168,12 +176,10 @@ mca_coll_basic_alltoallw_intra(void *sbuf, int *scounts, int *sdisps,
psnd = ((char *) sbuf) + sdisps[rank]; psnd = ((char *) sbuf) + sdisps[rank];
prcv = ((char *) rbuf) + rdisps[rank]; prcv = ((char *) rbuf) + rdisps[rank];
if (0 != scounts[rank]) { err = ompi_datatype_sndrcv(psnd, scounts[rank], sdtypes[rank],
err = ompi_datatype_sndrcv(psnd, scounts[rank], sdtypes[rank], prcv, rcounts[rank], rdtypes[rank]);
prcv, rcounts[rank], rdtypes[rank]); if (MPI_SUCCESS != err) {
if (MPI_SUCCESS != err) { return err;
return err;
}
} }
/* If only one process, we're done. */ /* If only one process, we're done. */
@ -190,7 +196,11 @@ mca_coll_basic_alltoallw_intra(void *sbuf, int *scounts, int *sdisps,
/* Post all receives first -- a simple optimization */ /* Post all receives first -- a simple optimization */
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
if (i == rank || 0 == rcounts[i]) size_t msg_size;
ompi_datatype_type_size(rdtypes[i], &msg_size);
msg_size *= rcounts[i];
if (i == rank || 0 == msg_size)
continue; continue;
prcv = ((char *) rbuf) + rdisps[i]; prcv = ((char *) rbuf) + rdisps[i];
@ -208,7 +218,11 @@ mca_coll_basic_alltoallw_intra(void *sbuf, int *scounts, int *sdisps,
/* Now post all sends */ /* Now post all sends */
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
if (i == rank || 0 == scounts[i]) size_t msg_size;
ompi_datatype_type_size(sdtypes[i], &msg_size);
msg_size *= scounts[i];
if (i == rank || 0 == msg_size)
continue; continue;
psnd = ((char *) sbuf) + sdisps[i]; psnd = ((char *) sbuf) + sdisps[i];

Просмотреть файл

@ -13,6 +13,8 @@
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -45,6 +47,7 @@ int MPI_Alltoall(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype, void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm) MPI_Comm comm)
{ {
size_t sendtype_size, recvtype_size;
int err; int err;
MEMCHECKER( MEMCHECKER(
@ -70,21 +73,32 @@ int MPI_Alltoall(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
if (ompi_comm_invalid(comm)) { if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME); FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
err = MPI_ERR_TYPE;
} else if (recvcount < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_IN_PLACE == recvbuf) { } else if (MPI_IN_PLACE == recvbuf) {
err = MPI_ERR_ARG; return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
FUNC_NAME);
} else { } else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount); OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
OMPI_CHECK_DATATYPE_FOR_RECV(err, recvtype, recvcount);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
if (MPI_IN_PLACE != sendbuf && !OMPI_COMM_IS_INTER(comm)) {
ompi_datatype_type_size(sendtype, &sendtype_size);
ompi_datatype_type_size(recvtype, &recvtype_size);
if ((sendtype_size*sendcount) != (recvtype_size*recvcount)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TRUNCATE, FUNC_NAME);
}
} }
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} }
/* Do we need to do anything? */ /* Do we need to do anything? */
if (0 == sendcount && 0 == recvcount) { ompi_datatype_type_size(sendtype, &sendtype_size);
ompi_datatype_type_size(recvtype, &recvtype_size);
if (((MPI_IN_PLACE == sendbuf) ||
(0 == sendcount) || (0 == sendtype_size)) &&
(0 == recvcount) || (0 == recvtype_size)) {
return MPI_SUCCESS; return MPI_SUCCESS;
} }

Просмотреть файл

@ -13,6 +13,8 @@
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -47,19 +49,22 @@ int MPI_Alltoallv(const void *sendbuf, const int sendcounts[],
MPI_Datatype recvtype, MPI_Comm comm) MPI_Datatype recvtype, MPI_Comm comm)
{ {
int i, size, err; int i, size, err;
size_t sendtype_size, recvtype_size;
bool zerosend=true, zerorecv=true;
MEMCHECKER( MEMCHECKER(
ptrdiff_t recv_ext; ptrdiff_t recv_ext;
ptrdiff_t send_ext; ptrdiff_t send_ext;
size = ompi_comm_remote_size(comm); memchecker_datatype(sendtype);
memchecker_datatype(recvtype);
ompi_datatype_type_extent(recvtype, &recv_ext); ompi_datatype_type_extent(recvtype, &recv_ext);
ompi_datatype_type_extent(sendtype, &send_ext); ompi_datatype_type_extent(sendtype, &send_ext);
memchecker_datatype(sendtype);
memchecker_datatype(recvtype);
memchecker_comm(comm); memchecker_comm(comm);
size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
for ( i = 0; i < size; i++ ) { for ( i = 0; i < size; i++ ) {
/* check if send chunks are defined. */ /* check if send chunks are defined. */
memchecker_call(&opal_memchecker_base_isdefined, memchecker_call(&opal_memchecker_base_isdefined,
@ -95,22 +100,49 @@ int MPI_Alltoallv(const void *sendbuf, const int sendcounts[],
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} }
/* We always define the remote group to be the same as the local size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) { OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
err = MPI_ERR_COUNT; OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) { OMPI_CHECK_DATATYPE_FOR_RECV(err, recvtype, recvcounts[i]);
err = MPI_ERR_TYPE;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} }
if (MPI_IN_PLACE != sendbuf && !OMPI_COMM_IS_INTER(comm)) {
int me = ompi_comm_rank(comm);
ompi_datatype_type_size(sendtype, &sendtype_size);
ompi_datatype_type_size(recvtype, &recvtype_size);
if ((sendtype_size*sendcounts[me]) != (recvtype_size*recvcounts[me])) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TRUNCATE, FUNC_NAME);
}
}
}
/* Do we need to do anything? */
ompi_datatype_type_size(sendtype, &sendtype_size);
ompi_datatype_type_size(recvtype, &recvtype_size);
size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
if (0 != recvtype_size) {
for (i = 0; i < size; ++i) {
if (0 != recvcounts[i]) {
zerorecv = false;
break;
}
}
}
if (MPI_IN_PLACE == sendbuf) {
zerosend = zerorecv;
} else if (0 != sendtype_size) {
for (i = 0; i < size; ++i) {
if (0 != sendcounts[i]) {
zerosend = false;
break;
}
}
}
if (zerosend && zerorecv) {
return MPI_SUCCESS;
} }
OPAL_CR_ENTER_LIBRARY(); OPAL_CR_ENTER_LIBRARY();

Просмотреть файл

@ -13,6 +13,8 @@
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -47,14 +49,16 @@ int MPI_Alltoallw(const void *sendbuf, const int sendcounts[],
const MPI_Datatype recvtypes[], MPI_Comm comm) const MPI_Datatype recvtypes[], MPI_Comm comm)
{ {
int i, size, err; int i, size, err;
size_t sendtype_size, recvtype_size;
bool zerosend=true, zerorecv=true;
MEMCHECKER( MEMCHECKER(
ptrdiff_t recv_ext; ptrdiff_t recv_ext;
ptrdiff_t send_ext; ptrdiff_t send_ext;
size = ompi_comm_remote_size(comm);
memchecker_comm(comm); memchecker_comm(comm);
size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
for ( i = 0; i < size; i++ ) { for ( i = 0; i < size; i++ ) {
memchecker_datatype(sendtypes[i]); memchecker_datatype(sendtypes[i]);
memchecker_datatype(recvtypes[i]); memchecker_datatype(recvtypes[i]);
@ -94,22 +98,47 @@ int MPI_Alltoallw(const void *sendbuf, const int sendcounts[],
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} }
/* We always define the remote group to be the same as the local size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) { OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtypes[i], sendcounts[i]);
err = MPI_ERR_COUNT; OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtypes[i] || NULL == recvtypes[i]) { OMPI_CHECK_DATATYPE_FOR_RECV(err, recvtypes[i], recvcounts[i]);
err = MPI_ERR_TYPE;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtypes[i], sendcounts[i]);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} }
if (MPI_IN_PLACE != sendbuf && !OMPI_COMM_IS_INTER(comm)) {
int me = ompi_comm_rank(comm);
ompi_datatype_type_size(sendtypes[me], &sendtype_size);
ompi_datatype_type_size(recvtypes[me], &recvtype_size);
if ((sendtype_size*sendcounts[me]) != (recvtype_size*recvcounts[me])) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TRUNCATE, FUNC_NAME);
}
}
}
/* Do we need to do anything? */
size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
for (i = 0; i < size; ++i) {
ompi_datatype_type_size(recvtypes[i], &recvtype_size);
if (0 != recvtype_size && 0 != recvcounts[i]) {
zerorecv = false;
break;
}
}
if (MPI_IN_PLACE == sendbuf) {
zerosend = zerorecv;
} else {
for (i = 0; i < size; ++i) {
ompi_datatype_type_size(sendtypes[i], &sendtype_size);
if (0 != sendtype_size && 0 != sendcounts[i]) {
zerosend = false;
break;
}
}
}
if (zerosend && zerorecv) {
return MPI_SUCCESS;
} }
OPAL_CR_ENTER_LIBRARY(); OPAL_CR_ENTER_LIBRARY();

Просмотреть файл

@ -14,6 +14,8 @@
* Copyright (c) 2012 Oak Ridge National Laboratory. All rights reserved. * Copyright (c) 2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -46,6 +48,7 @@ int MPI_Ialltoall(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype, void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm, MPI_Request *request) MPI_Comm comm, MPI_Request *request)
{ {
size_t sendtype_size, recvtype_size;
int err; int err;
MEMCHECKER( MEMCHECKER(
@ -58,24 +61,31 @@ int MPI_Ialltoall(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
if (MPI_PARAM_CHECK) { if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both /* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */ intracommunicators and intercommunicators */
err = MPI_SUCCESS; err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME); OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) { if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME); FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) { } else if (MPI_IN_PLACE == sendbuf || MPI_IN_PLACE == recvbuf) {
err = MPI_ERR_TYPE; return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
} else if (recvcount < 0) { FUNC_NAME);
err = MPI_ERR_COUNT; } else {
} else if (MPI_IN_PLACE == sendbuf || MPI_IN_PLACE == recvbuf) { OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
err = MPI_ERR_ARG; OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} else { OMPI_CHECK_DATATYPE_FOR_RECV(err, recvtype, recvcount);
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} }
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
if (MPI_IN_PLACE != sendbuf && !OMPI_COMM_IS_INTER(comm)) {
ompi_datatype_type_size(sendtype, &sendtype_size);
ompi_datatype_type_size(recvtype, &recvtype_size);
if ((sendtype_size*sendcount) != (recvtype_size*recvcount)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TRUNCATE, FUNC_NAME);
}
}
} }
OPAL_CR_ENTER_LIBRARY(); OPAL_CR_ENTER_LIBRARY();

Просмотреть файл

@ -13,6 +13,8 @@
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -47,19 +49,22 @@ int MPI_Ialltoallv(const void *sendbuf, const int sendcounts[], const int sdispl
MPI_Request *request) MPI_Request *request)
{ {
int i, size, err; int i, size, err;
size_t sendtype_size, recvtype_size;
bool zerosend=true, zerorecv=true;
MEMCHECKER( MEMCHECKER(
ptrdiff_t recv_ext; ptrdiff_t recv_ext;
ptrdiff_t send_ext; ptrdiff_t send_ext;
size = ompi_comm_remote_size(comm); memchecker_datatype(sendtype);
memchecker_datatype(recvtype);
ompi_datatype_type_extent(recvtype, &recv_ext); ompi_datatype_type_extent(recvtype, &recv_ext);
ompi_datatype_type_extent(sendtype, &send_ext); ompi_datatype_type_extent(sendtype, &send_ext);
memchecker_datatype(sendtype);
memchecker_datatype(recvtype);
memchecker_comm(comm); memchecker_comm(comm);
size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
for ( i = 0; i < size; i++ ) { for ( i = 0; i < size; i++ ) {
/* check if send chunks are defined. */ /* check if send chunks are defined. */
memchecker_call(&opal_memchecker_base_isdefined, memchecker_call(&opal_memchecker_base_isdefined,
@ -89,21 +94,21 @@ int MPI_Ialltoallv(const void *sendbuf, const int sendcounts[], const int sdispl
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} }
/* We always define the remote group to be the same as the local size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) { OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
err = MPI_ERR_COUNT;
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
err = MPI_ERR_TYPE;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
OMPI_CHECK_DATATYPE_FOR_RECV(err, recvtype, recvcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
if (MPI_IN_PLACE != sendbuf && !OMPI_COMM_IS_INTER(comm)) {
int me = ompi_comm_rank(comm);
ompi_datatype_type_size(sendtype, &sendtype_size);
ompi_datatype_type_size(recvtype, &recvtype_size);
if ((sendtype_size*sendcounts[me]) != (recvtype_size*recvcounts[me])) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TRUNCATE, FUNC_NAME);
}
} }
} }

Просмотреть файл

@ -13,6 +13,8 @@
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -47,14 +49,17 @@ int MPI_Ialltoallw(const void *sendbuf, const int sendcounts[], const int sdispl
MPI_Request *request) MPI_Request *request)
{ {
int i, size, err; int i, size, err;
size_t sendtype_size, recvtype_size;
bool zerosend=true, zerorecv=true;
MEMCHECKER( MEMCHECKER(
ptrdiff_t recv_ext; ptrdiff_t recv_ext;
ptrdiff_t send_ext; ptrdiff_t send_ext;
size = ompi_comm_remote_size(comm);
memchecker_comm(comm); memchecker_comm(comm);
size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
for ( i = 0; i < size; i++ ) { for ( i = 0; i < size; i++ ) {
memchecker_datatype(sendtypes[i]); memchecker_datatype(sendtypes[i]);
memchecker_datatype(recvtypes[i]); memchecker_datatype(recvtypes[i]);
@ -88,21 +93,21 @@ int MPI_Ialltoallw(const void *sendbuf, const int sendcounts[], const int sdispl
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} }
/* We always define the remote group to be the same as the local size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) { OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtypes[i], sendcounts[i]);
err = MPI_ERR_COUNT;
} else if (MPI_DATATYPE_NULL == recvtypes[i] || NULL == recvtypes[i]) {
err = MPI_ERR_TYPE;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtypes[i], sendcounts[i]);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
OMPI_CHECK_DATATYPE_FOR_RECV(err, recvtypes[i], recvcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
if (MPI_IN_PLACE != sendbuf && !OMPI_COMM_IS_INTER(comm)) {
int me = ompi_comm_rank(comm);
ompi_datatype_type_size(sendtypes[me], &sendtype_size);
ompi_datatype_type_size(recvtypes[me], &recvtype_size);
if ((sendtype_size*sendcounts[me]) != (recvtype_size*recvcounts[me])) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TRUNCATE, FUNC_NAME);
}
} }
} }

Просмотреть файл

@ -14,6 +14,8 @@
* Copyright (c) 2012 Oak Ridge National Laboratory. All rights reserved. * Copyright (c) 2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -46,6 +48,7 @@ int MPI_Ineighbor_alltoall(const void *sendbuf, int sendcount, MPI_Datatype send
void *recvbuf, int recvcount, MPI_Datatype recvtype, void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm, MPI_Request *request) MPI_Comm comm, MPI_Request *request)
{ {
size_t sendtype_size, recvtype_size;
int err; int err;
MEMCHECKER( MEMCHECKER(
@ -58,25 +61,32 @@ int MPI_Ineighbor_alltoall(const void *sendbuf, int sendcount, MPI_Datatype send
if (MPI_PARAM_CHECK) { if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both /* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */ intracommunicators and intercommunicators */
err = MPI_SUCCESS; err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME); OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm) || !(OMPI_COMM_IS_CART(comm) || OMPI_COMM_IS_GRAPH(comm) || if (ompi_comm_invalid(comm) || !(OMPI_COMM_IS_CART(comm) || OMPI_COMM_IS_GRAPH(comm) ||
OMPI_COMM_IS_DIST_GRAPH(comm))) { OMPI_COMM_IS_DIST_GRAPH(comm))) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME); FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) { } else if (MPI_IN_PLACE == recvbuf) {
err = MPI_ERR_TYPE; return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
} else if (recvcount < 0) { FUNC_NAME);
err = MPI_ERR_COUNT; } else {
} else if (MPI_IN_PLACE == recvbuf) { OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
err = MPI_ERR_ARG; OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} else { OMPI_CHECK_DATATYPE_FOR_RECV(err, recvtype, recvcount);
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} }
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
if (MPI_IN_PLACE != sendbuf && !OMPI_COMM_IS_INTER(comm)) {
ompi_datatype_type_size(sendtype, &sendtype_size);
ompi_datatype_type_size(recvtype, &recvtype_size);
if ((sendtype_size*sendcount) != (recvtype_size*recvcount)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TRUNCATE, FUNC_NAME);
}
}
} }
OPAL_CR_ENTER_LIBRARY(); OPAL_CR_ENTER_LIBRARY();

Просмотреть файл

@ -13,6 +13,8 @@
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -47,19 +49,22 @@ int MPI_Ineighbor_alltoallv(const void *sendbuf, const int sendcounts[], const i
MPI_Request *request) MPI_Request *request)
{ {
int i, size, err; int i, size, err;
size_t sendtype_size, recvtype_size;
bool zerosend=true, zerorecv=true;
MEMCHECKER( MEMCHECKER(
ptrdiff_t recv_ext; ptrdiff_t recv_ext;
ptrdiff_t send_ext; ptrdiff_t send_ext;
size = ompi_comm_remote_size(comm); memchecker_datatype(sendtype);
memchecker_datatype(recvtype);
ompi_datatype_type_extent(recvtype, &recv_ext); ompi_datatype_type_extent(recvtype, &recv_ext);
ompi_datatype_type_extent(sendtype, &send_ext); ompi_datatype_type_extent(sendtype, &send_ext);
memchecker_datatype(sendtype);
memchecker_datatype(recvtype);
memchecker_comm(comm); memchecker_comm(comm);
size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
for ( i = 0; i < size; i++ ) { for ( i = 0; i < size; i++ ) {
/* check if send chunks are defined. */ /* check if send chunks are defined. */
memchecker_call(&opal_memchecker_base_isdefined, memchecker_call(&opal_memchecker_base_isdefined,
@ -90,21 +95,21 @@ int MPI_Ineighbor_alltoallv(const void *sendbuf, const int sendcounts[], const i
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} }
/* We always define the remote group to be the same as the local size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) { OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
err = MPI_ERR_COUNT;
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
err = MPI_ERR_TYPE;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
OMPI_CHECK_DATATYPE_FOR_RECV(err, recvtype, recvcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
if (MPI_IN_PLACE != sendbuf && !OMPI_COMM_IS_INTER(comm)) {
int me = ompi_comm_rank(comm);
ompi_datatype_type_size(sendtype, &sendtype_size);
ompi_datatype_type_size(recvtype, &recvtype_size);
if ((sendtype_size*sendcounts[me]) != (recvtype_size*recvcounts[me])) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TRUNCATE, FUNC_NAME);
}
} }
} }

Просмотреть файл

@ -13,6 +13,8 @@
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -47,14 +49,16 @@ int MPI_Ineighbor_alltoallw(const void *sendbuf, const int sendcounts[], const M
MPI_Request *request) MPI_Request *request)
{ {
int i, size, err; int i, size, err;
size_t sendtype_size, recvtype_size;
bool zerosend=true, zerorecv=true;
MEMCHECKER( MEMCHECKER(
ptrdiff_t recv_ext; ptrdiff_t recv_ext;
ptrdiff_t send_ext; ptrdiff_t send_ext;
size = ompi_comm_remote_size(comm);
memchecker_comm(comm); memchecker_comm(comm);
size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
for ( i = 0; i < size; i++ ) { for ( i = 0; i < size; i++ ) {
memchecker_datatype(sendtypes[i]); memchecker_datatype(sendtypes[i]);
memchecker_datatype(recvtypes[i]); memchecker_datatype(recvtypes[i]);
@ -89,21 +93,21 @@ int MPI_Ineighbor_alltoallw(const void *sendbuf, const int sendcounts[], const M
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} }
/* We always define the remote group to be the same as the local size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) { OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtypes[i], sendcounts[i]);
err = MPI_ERR_COUNT;
} else if (MPI_DATATYPE_NULL == recvtypes[i] || NULL == recvtypes[i]) {
err = MPI_ERR_TYPE;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtypes[i], sendcounts[i]);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
OMPI_CHECK_DATATYPE_FOR_RECV(err, recvtypes[i], recvcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
if (MPI_IN_PLACE != sendbuf && !OMPI_COMM_IS_INTER(comm)) {
int me = ompi_comm_rank(comm);
ompi_datatype_type_size(sendtypes[me], &sendtype_size);
ompi_datatype_type_size(recvtypes[me], &recvtype_size);
if ((sendtype_size*sendcounts[me]) != (recvtype_size*recvcounts[me])) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TRUNCATE, FUNC_NAME);
}
} }
} }

Просмотреть файл

@ -13,6 +13,8 @@
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -45,6 +47,7 @@ int MPI_Neighbor_alltoall(const void *sendbuf, int sendcount, MPI_Datatype sendt
void *recvbuf, int recvcount, MPI_Datatype recvtype, void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm) MPI_Comm comm)
{ {
size_t sendtype_size, recvtype_size;
int err; int err;
MEMCHECKER( MEMCHECKER(
@ -71,21 +74,32 @@ int MPI_Neighbor_alltoall(const void *sendbuf, int sendcount, MPI_Datatype sendt
OMPI_COMM_IS_DIST_GRAPH(comm))) { OMPI_COMM_IS_DIST_GRAPH(comm))) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME); FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
err = MPI_ERR_TYPE;
} else if (recvcount < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_IN_PLACE == recvbuf) { } else if (MPI_IN_PLACE == recvbuf) {
err = MPI_ERR_ARG; return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
FUNC_NAME);
} else { } else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount); OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
OMPI_CHECK_DATATYPE_FOR_RECV(err, recvtype, recvcount);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
if (MPI_IN_PLACE != sendbuf && !OMPI_COMM_IS_INTER(comm)) {
ompi_datatype_type_size(sendtype, &sendtype_size);
ompi_datatype_type_size(recvtype, &recvtype_size);
if ((sendtype_size*sendcount) != (recvtype_size*recvcount)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TRUNCATE, FUNC_NAME);
}
} }
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} }
/* Do we need to do anything? */ /* Do we need to do anything? */
if (0 == sendcount && 0 == recvcount) { ompi_datatype_type_size(sendtype, &sendtype_size);
ompi_datatype_type_size(recvtype, &recvtype_size);
if (((MPI_IN_PLACE == sendbuf) ||
(0 == sendcount || 0 == sendtype_size)) &&
(0 == recvcount || 0 == recvtype_size)) {
return MPI_SUCCESS; return MPI_SUCCESS;
} }

Просмотреть файл

@ -13,6 +13,8 @@
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -47,19 +49,22 @@ int MPI_Neighbor_alltoallv(const void *sendbuf, const int sendcounts[], const in
MPI_Datatype recvtype, MPI_Comm comm) MPI_Datatype recvtype, MPI_Comm comm)
{ {
int i, size, err; int i, size, err;
size_t sendtype_size, recvtype_size;
bool zerosend=true, zerorecv=true;
MEMCHECKER( MEMCHECKER(
ptrdiff_t recv_ext; ptrdiff_t recv_ext;
ptrdiff_t send_ext; ptrdiff_t send_ext;
size = ompi_comm_remote_size(comm); memchecker_datatype(sendtype);
memchecker_datatype(recvtype);
ompi_datatype_type_extent(recvtype, &recv_ext); ompi_datatype_type_extent(recvtype, &recv_ext);
ompi_datatype_type_extent(sendtype, &send_ext); ompi_datatype_type_extent(sendtype, &send_ext);
memchecker_datatype(sendtype);
memchecker_datatype(recvtype);
memchecker_comm(comm); memchecker_comm(comm);
size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
for ( i = 0; i < size; i++ ) { for ( i = 0; i < size; i++ ) {
/* check if send chunks are defined. */ /* check if send chunks are defined. */
memchecker_call(&opal_memchecker_base_isdefined, memchecker_call(&opal_memchecker_base_isdefined,
@ -96,22 +101,49 @@ int MPI_Neighbor_alltoallv(const void *sendbuf, const int sendcounts[], const in
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} }
/* We always define the remote group to be the same as the local size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) { OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
err = MPI_ERR_COUNT; OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) { OMPI_CHECK_DATATYPE_FOR_RECV(err, recvtype, recvcounts[i]);
err = MPI_ERR_TYPE;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} }
if (MPI_IN_PLACE != sendbuf && !OMPI_COMM_IS_INTER(comm)) {
int me = ompi_comm_rank(comm);
ompi_datatype_type_size(sendtype, &sendtype_size);
ompi_datatype_type_size(recvtype, &recvtype_size);
if ((sendtype_size*sendcounts[me]) != (recvtype_size*recvcounts[me])) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TRUNCATE, FUNC_NAME);
}
}
}
/* Do we need to do anything? */
ompi_datatype_type_size(sendtype, &sendtype_size);
ompi_datatype_type_size(recvtype, &recvtype_size);
size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
if (0 != recvtype_size) {
for (i = 0; i < size; ++i) {
if (0 != recvcounts[i]) {
zerorecv = false;
break;
}
}
}
if (MPI_IN_PLACE == sendbuf) {
zerosend = zerorecv;
} else if (0 != sendtype_size) {
for (i = 0; i < size; ++i) {
if (0 != sendcounts[i]) {
zerosend = false;
break;
}
}
}
if (zerosend && zerorecv) {
return MPI_SUCCESS;
} }
OPAL_CR_ENTER_LIBRARY(); OPAL_CR_ENTER_LIBRARY();

Просмотреть файл

@ -13,6 +13,8 @@
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -47,14 +49,16 @@ int MPI_Neighbor_alltoallw(const void *sendbuf, const int sendcounts[], const MP
const MPI_Datatype recvtypes[], MPI_Comm comm) const MPI_Datatype recvtypes[], MPI_Comm comm)
{ {
int i, size, err; int i, size, err;
size_t sendtype_size, recvtype_size;
bool zerosend=true, zerorecv=true;
MEMCHECKER( MEMCHECKER(
ptrdiff_t recv_ext; ptrdiff_t recv_ext;
ptrdiff_t send_ext; ptrdiff_t send_ext;
size = ompi_comm_remote_size(comm);
memchecker_comm(comm); memchecker_comm(comm);
size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
for ( i = 0; i < size; i++ ) { for ( i = 0; i < size; i++ ) {
memchecker_datatype(sendtypes[i]); memchecker_datatype(sendtypes[i]);
memchecker_datatype(recvtypes[i]); memchecker_datatype(recvtypes[i]);
@ -95,22 +99,47 @@ int MPI_Neighbor_alltoallw(const void *sendbuf, const int sendcounts[], const MP
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} }
/* We always define the remote group to be the same as the local size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) { OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtypes[i], sendcounts[i]);
err = MPI_ERR_COUNT; OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtypes[i] || NULL == recvtypes[i]) { OMPI_CHECK_DATATYPE_FOR_RECV(err, recvtypes[i], recvcounts[i]);
err = MPI_ERR_TYPE;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtypes[i], sendcounts[i]);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} }
if (MPI_IN_PLACE != sendbuf && !OMPI_COMM_IS_INTER(comm)) {
int me = ompi_comm_rank(comm);
ompi_datatype_type_size(sendtypes[me], &sendtype_size);
ompi_datatype_type_size(recvtypes[me], &recvtype_size);
if ((sendtype_size*sendcounts[me]) != (recvtype_size*recvcounts[me])) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TRUNCATE, FUNC_NAME);
}
}
}
/* Do we need to do anything? */
size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
for (i = 0; i < size; ++i) {
ompi_datatype_type_size(recvtypes[i], &recvtype_size);
if (0 != recvtype_size && 0 != recvcounts[i]) {
zerorecv = false;
break;
}
}
if (MPI_IN_PLACE == sendbuf) {
zerosend = zerorecv;
} else {
for (i = 0; i < size; ++i) {
ompi_datatype_type_size(sendtypes[i], &sendtype_size);
if (0 != sendtype_size && 0 != sendcounts[i]) {
zerosend = false;
break;
}
}
}
if (zerosend && zerorecv) {
return MPI_SUCCESS;
} }
OPAL_CR_ENTER_LIBRARY(); OPAL_CR_ENTER_LIBRARY();