1
1

Fix more typos using the allgather module for allreduce operations, causing a crash when CUDA collectives are enabled.

Signed-off-by: Sylvain Jeaugey <sjeaugey@nvidia.com>
Signed-off-by: Akshay Venkatesh <akvenkatesh@nvidia.com>
Этот коммит содержится в:
Sylvain Jeaugey 2017-02-24 16:35:29 -08:00
родитель d7dd4d769e
Коммит f827b6b8dd

Просмотреть файл

@ -823,7 +823,7 @@ int ompi_comm_split_type (ompi_communicator_t *comm, int split_type, int key,
ok = (MPI_UNDEFINED == split_type) || global_split_type == split_type;
rc = comm->c_coll.coll_allreduce (MPI_IN_PLACE, &ok, 1, MPI_INT, MPI_MIN, comm,
comm->c_coll.coll_allgather_module);
comm->c_coll.coll_allreduce_module);
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
return rc;
}
@ -831,7 +831,7 @@ int ompi_comm_split_type (ompi_communicator_t *comm, int split_type, int key,
if (inter) {
/* need an extra allreduce to ensure that all ranks have the same result */
rc = comm->c_coll.coll_allreduce (MPI_IN_PLACE, &ok, 1, MPI_INT, MPI_MIN, comm,
comm->c_coll.coll_allgather_module);
comm->c_coll.coll_allreduce_module);
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
return rc;
}