1
1

Upon further investigation the fix in r7537 was an anomoly of zero'ing out the

bits to expose the low bits being set. We were casting from a size_t to a void*
which is not good when working with big endian machines.

This fix makes MPI 2 dynamics work on PPC 64 (tested with a Linux OS).

This commit was SVN r7538.

The following SVN revision numbers were found above:
  r7537 --> open-mpi/ompi@fd45714c03
Этот коммит содержится в:
Josh Hursey 2005-09-28 23:50:42 +00:00
родитель fd45714c03
Коммит e825b4522f

Просмотреть файл

@ -30,6 +30,7 @@
#include "errhandler/errhandler.h"
#include "proc/proc.h"
#include "info/info.h"
#include "opal/util/convert.h"
#include "opal/threads/mutex.h"
#include "util/proc_info.h"
#include "opal/util/bit_ops.h"
@ -53,6 +54,7 @@ int ompi_comm_connect_accept ( ompi_communicator_t *comm, int root,
int size, rsize, rank, rc;
size_t num_vals;
size_t rnamebuflen = 0;
int rnamebuflen_int = 0;
void *rnamebuf=NULL;
ompi_communicator_t *newcomp=MPI_COMM_NULL;
@ -64,6 +66,10 @@ int ompi_comm_connect_accept ( ompi_communicator_t *comm, int root,
size = ompi_comm_size ( comm );
rank = ompi_comm_rank ( comm );
/* tell the progress engine to tick the event library more
often, to make sure that the OOB messages get sent */
opal_progress_event_increment();
if ( rank == root ) {
/* The process receiving first does not have yet the contact
information of the remote process. Therefore, we have to
@ -83,10 +89,6 @@ int ompi_comm_connect_accept ( ompi_communicator_t *comm, int root,
return OMPI_ERROR;
}
/* tell the progress engine to tick the event library more
often, to make sure that the OOB messages get sent */
opal_progress_event_increment();
if (ORTE_SUCCESS != (rc = orte_dps.pack(nbuf, &size, 1, ORTE_INT))) {
goto exit;
}
@ -112,11 +114,18 @@ int ompi_comm_connect_accept ( ompi_communicator_t *comm, int root,
}
}
/* First convert the size_t to an int so we can cast in the bcast to a void *
* if we don't then we will get badness when using big vs little endian */
if (OMPI_SUCCESS != (rc = opal_size2int(rnamebuflen, &rnamebuflen_int, true))) {
goto exit;
}
/* bcast the buffer-length to all processes in the local comm */
rc = comm->c_coll.coll_bcast (&rnamebuflen, 1, MPI_INT, root, comm );
rc = comm->c_coll.coll_bcast (&rnamebuflen_int, 1, MPI_INT, root, comm );
if ( OMPI_SUCCESS != rc ) {
goto exit;
}
rnamebuflen = (size_t)rnamebuflen_int;
if ( rank != root ) {
/* non root processes need to allocate the buffer manually */
@ -132,7 +141,7 @@ int ompi_comm_connect_accept ( ompi_communicator_t *comm, int root,
adds processes, which were not known yet to our
process pool.
*/
rc = comm->c_coll.coll_bcast (rnamebuf, rnamebuflen, MPI_BYTE, root, comm );
rc = comm->c_coll.coll_bcast (rnamebuf, rnamebuflen_int, MPI_BYTE, root, comm );
if ( OMPI_SUCCESS != rc ) {
goto exit;
}