1
1

The sum of the local and remote recvcounts is identical (per MPI standard

requirement) so the recvcounts array is bounded by the number of
participants in the local group.

This commit was SVN r26496.
Этот коммит содержится в:
George Bosilca 2012-05-25 04:27:19 +00:00
родитель 5391e98d56
Коммит 3704bfccbe

Просмотреть файл

@ -46,11 +46,11 @@ int MPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
MEMCHECKER(
int rank;
size = ompi_comm_remote_size(comm);
size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm);
for (count = i = 0; i < size; ++i) {
if (0 == recvcounts[i]) {
++count;
count += recvcounts[i];
}
}
@ -95,12 +95,10 @@ int MPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
/* We always define the remote group to be the same as the
local group in the case of an intracommunicator, so it's
safe to get the size of the remote group here for both
intra- and intercommunicators */
size = ompi_comm_remote_size(comm);
/* Based on the standard each group has to provide the same total
number of elements, so the size of the recvcounts array depends
on the number of participants in the local group. */
size = ompi_comm_size(comm);
for (i = 0; i < size; ++i) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, recvcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
@ -111,7 +109,7 @@ int MPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
element. But at least the Pallas benchmarks call MPI_REDUCE
with a count of 0. So be sure to handle it. Grrr... */
size = ompi_comm_remote_size(comm);
size = ompi_comm_size(comm);
for (count = i = 0; i < size; ++i) {
if (0 == recvcounts[i]) {
++count;