1
1

Remove shortcut optimizations from non-blocking collectives. We need to call

into the collectives module even in the shortcutable cases to get the requests
created.

This commit was SVN r26784.
Этот коммит содержится в:
Brian Barrett 2012-07-11 16:03:55 +00:00
родитель 7a4f6a6a1a
Коммит eb3c6546c2
11 изменённых файлов: 1 добавлений и 132 удалений

Просмотреть файл

@ -88,28 +88,6 @@ int MPI_Iallgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* Do we need to do anything? Everyone had to give the same send
signature, which means that everyone must have given a
sendcount > 0 if there's anything to send for the intra-communicator
case. If we're doing IN_PLACE, however, check recvcount,
not sendcount. */
if ( OMPI_COMM_IS_INTRA(comm) ) {
if ((MPI_IN_PLACE != sendbuf && 0 == sendcount) ||
(0 == recvcount)) {
return MPI_SUCCESS;
}
}
else if ( OMPI_COMM_IS_INTER(comm) ){
/* for inter comunicators, the communication pattern
need not be symmetric. Specifically, one group is
allows to have sendcount=0, while the other has
a valid sendcount. Thus, the only way not to do
anything is if both sendcount and recvcount are zero. */
if ( 0 == sendcount && 0 == recvcount ) {
return MPI_SUCCESS;
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */

Просмотреть файл

@ -111,27 +111,6 @@ int MPI_Iallgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
}
}
/* Do we need to do anything? Everyone had to give the same
signature, which means that everyone must have given a
sum(recvounts) > 0 if there's anything to do. */
if ( OMPI_COMM_IS_INTRA( comm) ) {
for (i = 0; i < ompi_comm_size(comm); ++i) {
if (0 != recvcounts[i]) {
break;
}
}
if (i >= ompi_comm_size(comm)) {
return MPI_SUCCESS;
}
}
/* There is no rule that can be applied for inter-communicators, since
recvcount(s)=0 only indicates that the processes in the other group
do not send anything, sendcount=0 only indicates that I do not send
anything. However, other processes in my group might very well send
something */
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */

Просмотреть файл

@ -89,14 +89,6 @@ int MPI_Iallreduce(void *sendbuf, void *recvbuf, int count,
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* MPI-1, p114, says that each process must supply at least
one element. But at least the Pallas benchmarks call
MPI_REDUCE with a count of 0. So be sure to handle it. */
if (0 == count) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */

Просмотреть файл

@ -75,12 +75,6 @@ int MPI_Ialltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* Do we need to do anything? */
if (0 == sendcount && 0 == recvcount) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */

Просмотреть файл

@ -56,21 +56,7 @@ int MPI_Ibarrier(MPI_Comm comm, MPI_Request *request)
OPAL_CR_ENTER_LIBRARY();
/* Intracommunicators: Only invoke the back-end coll module barrier
function if there's more than one process in the communicator */
if (OMPI_COMM_IS_INTRA(comm)) {
if (ompi_comm_size(comm) > 1) {
err = comm->c_coll.coll_ibarrier(comm, request, comm->c_coll.coll_ibarrier_module);
}
}
/* Intercommunicators -- always invoke, because, by definition,
there's always at least 2 processes in an intercommunicator. */
else {
err = comm->c_coll.coll_ibarrier(comm, request, comm->c_coll.coll_ibarrier_module);
}
err = comm->c_coll.coll_ibarrier(comm, request, comm->c_coll.coll_ibarrier_module);
/* All done */

Просмотреть файл

@ -71,14 +71,6 @@ int MPI_Iexscan(void *sendbuf, void *recvbuf, int count,
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* Do we need to do anything? (MPI says that reductions have to
have a count of at least 1, but at least IMB calls reduce with
a count of 0 -- blah!) */
if (0 == count) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */

Просмотреть файл

@ -160,17 +160,6 @@ int MPI_Igather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
}
}
/* Do we need to do anything? */
if ((0 == sendcount && MPI_ROOT != root &&
(ompi_comm_rank(comm) != root ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE != sendbuf))) ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE == sendbuf &&
0 == recvcount) ||
(0 == recvcount && (MPI_ROOT == root || MPI_PROC_NULL == root))) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */

Просмотреть файл

@ -116,14 +116,6 @@ int MPI_Ireduce(void *sendbuf, void *recvbuf, int count,
}
}
/* Do we need to do anything? (MPI says that reductions have to
have a count of at least 1, but at least IMB calls reduce with
a count of 0 -- blah!) */
if (0 == count) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */

Просмотреть файл

@ -105,20 +105,6 @@ int MPI_Ireduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
}
}
/* MPI-1, p114, says that each process must supply at least one
element. But at least the Pallas benchmarks call MPI_REDUCE
with a count of 0. So be sure to handle it. Grrr... */
size = ompi_comm_size(comm);
for (count = i = 0; i < size; ++i) {
if (0 == recvcounts[i]) {
++count;
}
}
if (size == count) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */

Просмотреть файл

@ -85,14 +85,6 @@ int MPI_Iscan(void *sendbuf, void *recvbuf, int count,
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* Do we need to do anything? (MPI says that reductions have to
have a count of at least 1, but at least IMB calls reduce with
a count of 0 -- blah!) */
if (0 == count) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Call the coll component to actually perform the allgather */

Просмотреть файл

@ -143,17 +143,6 @@ int MPI_Iscatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
}
}
/* Do we need to do anything? */
if ((0 == recvcount && MPI_ROOT != root &&
(ompi_comm_rank(comm) != root ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE != recvbuf))) ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE == recvbuf &&
0 == sendcount) ||
(0 == sendcount && (MPI_ROOT == root || MPI_PROC_NULL == root))) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */