Fix MPI_Alltoallv in coll/tuned
This changeset : - always call the low/level implementation for : * MPI_Alltoallv * MPI_Neighbor_alltoallv * MPI_Alltoallw * MPI_Neighbor_alltoallv - fix mca_coll_tuned_alltoallv_intra_basic_inplace so zero size types are correctly handled cmr=v1.8.2:reviewer=bosilca:ticket=4715 This commit was SVN r32013. The following Trac tickets were found above: Ticket 4715 --> https://svn.open-mpi.org/trac/ompi/ticket/4715
Этот коммит содержится в:
родитель
2f96f16416
Коммит
e9ed9def02
@ -56,16 +56,17 @@ mca_coll_tuned_alltoallv_intra_basic_inplace(void *rbuf, const int *rcounts, con
|
||||
int i, j, size, rank, err=MPI_SUCCESS;
|
||||
MPI_Request *preq;
|
||||
char *tmp_buffer;
|
||||
size_t max_size;
|
||||
size_t max_size, rdtype_size;
|
||||
ptrdiff_t ext;
|
||||
|
||||
/* Initialize. */
|
||||
|
||||
size = ompi_comm_size(comm);
|
||||
rank = ompi_comm_rank(comm);
|
||||
ompi_datatype_type_size(rdtype, &rdtype_size);
|
||||
|
||||
/* If only one process, we're done. */
|
||||
if (1 == size) {
|
||||
if (1 == size || 0 == rdtype_size) {
|
||||
return MPI_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -49,8 +49,6 @@ int MPI_Alltoallv(const void *sendbuf, const int sendcounts[],
|
||||
MPI_Datatype recvtype, MPI_Comm comm)
|
||||
{
|
||||
int i, size, err;
|
||||
size_t sendtype_size, recvtype_size;
|
||||
bool zerosend=true, zerorecv=true;
|
||||
|
||||
MEMCHECKER(
|
||||
ptrdiff_t recv_ext;
|
||||
@ -112,6 +110,7 @@ int MPI_Alltoallv(const void *sendbuf, const int sendcounts[],
|
||||
}
|
||||
|
||||
if (MPI_IN_PLACE != sendbuf && !OMPI_COMM_IS_INTER(comm)) {
|
||||
size_t sendtype_size, recvtype_size;
|
||||
int me = ompi_comm_rank(comm);
|
||||
ompi_datatype_type_size(sendtype, &sendtype_size);
|
||||
ompi_datatype_type_size(recvtype, &recvtype_size);
|
||||
@ -121,33 +120,6 @@ int MPI_Alltoallv(const void *sendbuf, const int sendcounts[],
|
||||
}
|
||||
}
|
||||
|
||||
/* Do we need to do anything? */
|
||||
|
||||
ompi_datatype_type_size(sendtype, &sendtype_size);
|
||||
ompi_datatype_type_size(recvtype, &recvtype_size);
|
||||
size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
|
||||
if (0 != recvtype_size) {
|
||||
for (i = 0; i < size; ++i) {
|
||||
if (0 != recvcounts[i]) {
|
||||
zerorecv = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (MPI_IN_PLACE == sendbuf) {
|
||||
zerosend = zerorecv;
|
||||
} else if (0 != sendtype_size) {
|
||||
for (i = 0; i < size; ++i) {
|
||||
if (0 != sendcounts[i]) {
|
||||
zerosend = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (zerosend && zerorecv) {
|
||||
return MPI_SUCCESS;
|
||||
}
|
||||
|
||||
OPAL_CR_ENTER_LIBRARY();
|
||||
|
||||
/* Invoke the coll component to perform the back-end operation */
|
||||
|
@ -49,8 +49,6 @@ int MPI_Alltoallw(const void *sendbuf, const int sendcounts[],
|
||||
const MPI_Datatype recvtypes[], MPI_Comm comm)
|
||||
{
|
||||
int i, size, err;
|
||||
size_t sendtype_size, recvtype_size;
|
||||
bool zerosend=true, zerorecv=true;
|
||||
|
||||
MEMCHECKER(
|
||||
ptrdiff_t recv_ext;
|
||||
@ -107,6 +105,7 @@ int MPI_Alltoallw(const void *sendbuf, const int sendcounts[],
|
||||
}
|
||||
|
||||
if (MPI_IN_PLACE != sendbuf && !OMPI_COMM_IS_INTER(comm)) {
|
||||
size_t sendtype_size, recvtype_size;
|
||||
int me = ompi_comm_rank(comm);
|
||||
ompi_datatype_type_size(sendtypes[me], &sendtype_size);
|
||||
ompi_datatype_type_size(recvtypes[me], &recvtype_size);
|
||||
@ -116,31 +115,6 @@ int MPI_Alltoallw(const void *sendbuf, const int sendcounts[],
|
||||
}
|
||||
}
|
||||
|
||||
/* Do we need to do anything? */
|
||||
|
||||
size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
|
||||
for (i = 0; i < size; ++i) {
|
||||
ompi_datatype_type_size(recvtypes[i], &recvtype_size);
|
||||
if (0 != recvtype_size && 0 != recvcounts[i]) {
|
||||
zerorecv = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (MPI_IN_PLACE == sendbuf) {
|
||||
zerosend = zerorecv;
|
||||
} else {
|
||||
for (i = 0; i < size; ++i) {
|
||||
ompi_datatype_type_size(sendtypes[i], &sendtype_size);
|
||||
if (0 != sendtype_size && 0 != sendcounts[i]) {
|
||||
zerosend = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (zerosend && zerorecv) {
|
||||
return MPI_SUCCESS;
|
||||
}
|
||||
|
||||
OPAL_CR_ENTER_LIBRARY();
|
||||
|
||||
/* Invoke the coll component to perform the back-end operation */
|
||||
|
@ -51,8 +51,6 @@ int MPI_Neighbor_alltoallv(const void *sendbuf, const int sendcounts[], const in
|
||||
{
|
||||
int i, err;
|
||||
int indegree, outdegree, weighted;
|
||||
size_t sendtype_size, recvtype_size;
|
||||
bool zerosend=true, zerorecv=true;
|
||||
|
||||
MEMCHECKER(
|
||||
ptrdiff_t recv_ext;
|
||||
@ -123,33 +121,6 @@ int MPI_Neighbor_alltoallv(const void *sendbuf, const int sendcounts[], const in
|
||||
}
|
||||
}
|
||||
|
||||
/* Do we need to do anything? */
|
||||
|
||||
ompi_datatype_type_size(sendtype, &sendtype_size);
|
||||
ompi_datatype_type_size(recvtype, &recvtype_size);
|
||||
ompi_comm_neighbors_count(comm, &indegree, &outdegree, &weighted);
|
||||
if (0 != recvtype_size) {
|
||||
for (i = 0; i < indegree; ++i) {
|
||||
if (0 != recvcounts[i]) {
|
||||
zerorecv = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (MPI_IN_PLACE == sendbuf) {
|
||||
zerosend = zerorecv;
|
||||
} else if (0 != sendtype_size) {
|
||||
for (i = 0; i < outdegree; ++i) {
|
||||
if (0 != sendcounts[i]) {
|
||||
zerosend = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (zerosend && zerorecv) {
|
||||
return MPI_SUCCESS;
|
||||
}
|
||||
|
||||
OPAL_CR_ENTER_LIBRARY();
|
||||
|
||||
/* Invoke the coll component to perform the back-end operation */
|
||||
|
@ -51,8 +51,6 @@ int MPI_Neighbor_alltoallw(const void *sendbuf, const int sendcounts[], const MP
|
||||
{
|
||||
int i, err;
|
||||
int indegree, outdegree, weighted;
|
||||
size_t sendtype_size, recvtype_size;
|
||||
bool zerosend=true, zerorecv=true;
|
||||
|
||||
MEMCHECKER(
|
||||
ptrdiff_t recv_ext;
|
||||
@ -119,32 +117,6 @@ int MPI_Neighbor_alltoallw(const void *sendbuf, const int sendcounts[], const MP
|
||||
}
|
||||
}
|
||||
|
||||
/* Do we need to do anything? */
|
||||
|
||||
err = ompi_comm_neighbors_count(comm, &indegree, &outdegree, &weighted);
|
||||
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
|
||||
for (i = 0; i < indegree; ++i) {
|
||||
ompi_datatype_type_size(recvtypes[i], &recvtype_size);
|
||||
if (0 != recvtype_size && 0 != recvcounts[i]) {
|
||||
zerorecv = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (MPI_IN_PLACE == sendbuf) {
|
||||
zerosend = zerorecv;
|
||||
} else {
|
||||
for (i = 0; i < outdegree; ++i) {
|
||||
ompi_datatype_type_size(sendtypes[i], &sendtype_size);
|
||||
if (0 != sendtype_size && 0 != sendcounts[i]) {
|
||||
zerosend = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (zerosend && zerorecv) {
|
||||
return MPI_SUCCESS;
|
||||
}
|
||||
|
||||
OPAL_CR_ENTER_LIBRARY();
|
||||
|
||||
/* Invoke the coll component to perform the back-end operation */
|
||||
|
Загрузка…
x
Ссылка в новой задаче
Block a user