1
1

Fixes trac:430. Fix a few places where optimization checking conflicted

with the use of MPI_IN_PLACE, and make some optimization checks more
correct.  Thanks to Lisandro Dalcin for reporting the problems.

This commit was SVN r11904.

The following Trac tickets were found above:
  Ticket 430 --> https://svn.open-mpi.org/trac/ompi/ticket/430
Этот коммит содержится в:
Jeff Squyres 2006-09-29 22:49:04 +00:00
родитель 35376e7afc
Коммит 17539dc154
12 изменённых файлов: 46 добавлений и 46 удалений

5
NEWS
Просмотреть файл

@ -68,8 +68,9 @@ version 1.0.
- Fix for large-sized Fortran LOGICAL datatypes. - Fix for large-sized Fortran LOGICAL datatypes.
- Fix various error checking in MPI_INFO_GET_NTHKEY and - Fix various error checking in MPI_INFO_GET_NTHKEY and
MPI_GROUP_TRANSLATE_RANKS. Thanks to Lisandro Dalcin for reporting MPI_GROUP_TRANSLATE_RANKS, and some collective operations
the problem. (particularly with regards to MPI_IN_PLACE). Thanks to Lisandro
Dalcin for reporting the problems.
- Fix receiving messages to buffers allocated by MPI_ALLOC_MEM. - Fix receiving messages to buffers allocated by MPI_ALLOC_MEM.
- Fix the "tuned" collective componenete where some cases where - Fix the "tuned" collective componenete where some cases where
MPI_BCAST could hang. MPI_BCAST could hang.

Просмотреть файл

@ -60,12 +60,14 @@ int MPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} }
/* Can we optimize? Everyone had to give the same send signature, /* Do we need to do anything? Everyone had to give the same send
which means that everyone must have given a sendcount > 0 if signature, which means that everyone must have given a
there's anything to send. */ sendcount > 0 if there's anything to send. If we're doing
IN_PLACE, however, check recvcount, not sendcount. */
if (sendcount == 0) { if ((MPI_IN_PLACE != sendbuf && 0 == sendcount) ||
return MPI_SUCCESS; (0 == recvcount)) {
return MPI_SUCCESS;
} }
/* Invoke the coll component to perform the back-end operation */ /* Invoke the coll component to perform the back-end operation */

Просмотреть файл

@ -61,10 +61,9 @@ int MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} }
/* If the sendcount is 0, since everyone gave the same value, then /* Do we need to do anything? */
we don't need to do anything */
if (0 == sendcount) { if (0 == sendcount && 0 == recvcount) {
return MPI_SUCCESS; return MPI_SUCCESS;
} }

Просмотреть файл

@ -71,17 +71,12 @@ int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype,
} }
} }
/* If there's only one node, we're done */ /* If there's only one node, or if the count is 0, we're done */
if (OMPI_COMM_IS_INTRA(comm) && ompi_comm_size(comm) <= 1) { if ((OMPI_COMM_IS_INTRA(comm) && ompi_comm_size(comm) <= 1) ||
return MPI_SUCCESS; 0 == count) {
} return MPI_SUCCESS;
}
/* Can we optimize? */
if (count == 0) {
return MPI_SUCCESS;
}
/* Invoke the coll component to perform the back-end operation */ /* Invoke the coll component to perform the back-end operation */

Просмотреть файл

@ -62,6 +62,14 @@ int MPI_Exscan(void *sendbuf, void *recvbuf, int count,
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} }
/* Do we need to do anything? (MPI says that reductions have to
have a count of at least 1, but at least IMB calls reduce with
a count of 0 -- blah!) */
if (0 == count) {
return MPI_SUCCESS;
}
/* Invoke the coll component to perform the back-end operation */ /* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op); OBJ_RETAIN(op);

Просмотреть файл

@ -112,12 +112,14 @@ int MPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
} }
} }
/* Can we optimize? Everyone had to give the same send signature, /* Do we need to do anything? */
which means that everyone must have given a sendcount > 0 if
there's anything to send. */
if (sendcount == 0) { if ((0 == sendcount &&
return MPI_SUCCESS; (ompi_comm_rank(comm) != root ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE != sendbuf))) ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE == sendbuf &&
0 == recvbuf)) {
return MPI_SUCCESS;
} }
/* Invoke the coll component to perform the back-end operation */ /* Invoke the coll component to perform the back-end operation */

Просмотреть файл

@ -128,12 +128,6 @@ int MPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
} }
} }
/* If we have nothing to do, just return */
if (0 == sendcount && ompi_comm_rank(comm) != root) {
return MPI_SUCCESS;
}
/* Invoke the coll component to perform the back-end operation */ /* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_gatherv(sendbuf, sendcount, sendtype, recvbuf, err = comm->c_coll.coll_gatherv(sendbuf, sendcount, sendtype, recvbuf,

Просмотреть файл

@ -81,10 +81,10 @@ int MPI_Reduce(void *sendbuf, void *recvbuf, int count,
} }
} }
/* MPI-1, p114, says that each process must supply at least /* Do we need to do anything? (MPI says that reductions have to
one element. But at least the Pallas benchmarks call have a count of at least 1, but at least IMB calls reduce with
MPI_REDUCE with a count of 0. So be sure to handle it. */ a count of 0 -- blah!) */
if (0 == count) { if (0 == count) {
return MPI_SUCCESS; return MPI_SUCCESS;
} }

Просмотреть файл

@ -88,7 +88,7 @@ int MPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
if (size == count) { if (size == count) {
return MPI_SUCCESS; return MPI_SUCCESS;
} }
/* Invoke the coll component to perform the back-end operation */ /* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op); OBJ_RETAIN(op);

Просмотреть файл

@ -70,7 +70,9 @@ int MPI_Scan(void *sendbuf, void *recvbuf, int count,
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} }
/* If everyone supplied count == 0, we can just return */ /* Do we need to do anything? (MPI says that reductions have to
have a count of at least 1, but at least IMB calls reduce with
a count of 0 -- blah!) */
if (0 == count) { if (0 == count) {
return MPI_SUCCESS; return MPI_SUCCESS;

Просмотреть файл

@ -107,10 +107,13 @@ int MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
} }
} }
/* If we have nothing to receive, return success (everyone must /* Do we need to do anything? */
have given the same recvcount) */
if (0 == recvcount) { if ((0 == recvcount &&
(ompi_comm_rank(comm) != root ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE != recvbuf))) ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE == recvbuf &&
0 == sendcount)) {
return MPI_SUCCESS; return MPI_SUCCESS;
} }

Просмотреть файл

@ -136,12 +136,6 @@ int MPI_Scatterv(void *sendbuf, int *sendcounts, int *displs,
} }
} }
/* If we have nothing to do, just return */
if (0 == recvcount && ompi_comm_rank(comm) != root) {
return MPI_SUCCESS;
}
/* Invoke the coll component to perform the back-end operation */ /* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_scatterv(sendbuf, sendcounts, displs, sendtype, err = comm->c_coll.coll_scatterv(sendbuf, sendcounts, displs, sendtype,