1
1

Similar to r26488, also update the param error checking for

REDUCE_SCATTER, SCATTERV, and GATHERV.  Thanks to the mpi4py community
for reporting the issue.

This commit was SVN r26489.

The following SVN revision numbers were found above:
  r26488 --> open-mpi/ompi@42793aa10f
Этот коммит содержится в:
Jeff Squyres 2012-05-24 13:26:44 +00:00
родитель 42793aa10f
Коммит 4bda5da63a
3 изменённых файлов: 17 добавлений и 7 удалений

Просмотреть файл

@ -9,7 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -134,7 +134,12 @@ int MPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
size = ompi_comm_size(comm);
/* We always define the remote group to be the same as the
local group in the case of an intracommunicator, so
it's safe to get the size of the remote group here for
both intra- and intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);

Просмотреть файл

@ -9,7 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -100,7 +100,7 @@ int MPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
safe to get the size of the remote group here for both
intra- and intercommunicators */
size = ompi_comm_size(comm);
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, recvcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
@ -111,7 +111,7 @@ int MPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
element. But at least the Pallas benchmarks call MPI_REDUCE
with a count of 0. So be sure to handle it. Grrr... */
size = ompi_comm_size(comm);
size = ompi_comm_remote_size(comm);
for (count = i = 0; i < size; ++i) {
if (0 == recvcounts[i]) {
++count;

Просмотреть файл

@ -9,7 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -133,7 +133,12 @@ int MPI_Scatterv(void *sendbuf, int *sendcounts, int *displs,
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
size = ompi_comm_size(comm);
/* We always define the remote group to be the same as the
local group in the case of an intracommunicator, so
it's safe to get the size of the remote group here for
both intra- and intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);