1
1
openmpi/ompi/mca/coll/inter/coll_inter_bcast.c
Ralph Castain 9613b3176c Effectively revert the orte_output system and return to direct use of opal_output at all levels. Retain the orte_show_help subsystem to allow aggregation of show_help messages at the HNP.
After much work by Jeff and myself, and quite a lot of discussion, it has become clear that we simply cannot resolve the infinite loops caused by RML-involved subsystems calling orte_output. The original rationale for the change to orte_output has also been reduced by shifting the output of XML-formatted vs human readable messages to an alternative approach.

I have globally replaced the orte_output/ORTE_OUTPUT calls in the code base, as well as the corresponding .h file name. I have test compiled and run this on the various environments within my reach, so hopefully this will prove minimally disruptive.

This commit was SVN r18619.
2008-06-09 14:53:58 +00:00

83 строки
2.5 KiB
C

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2007 University of Houston. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "coll_inter.h"
#include "mpi.h"
#include "ompi/constants.h"
#include "orte/util/show_help.h"
#include "ompi/communicator/communicator.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/coll_tags.h"
#include "ompi/mca/pml/pml.h"
/*
* bcast_inter
*
* Function: - broadcast using the local_comm
* Accepts: - same arguments as MPI_Bcast()
* Returns: - MPI_SUCCESS or error code
*/
int
mca_coll_inter_bcast_inter(void *buff, int count,
struct ompi_datatype_t *datatype, int root,
struct ompi_communicator_t *comm,
struct mca_coll_base_module_1_1_0_t *module)
{
int rsize;
int rank;
int err;
rsize = ompi_comm_remote_size(comm);
rank = ompi_comm_rank(comm);
if (MPI_PROC_NULL == root) {
/* do nothing */
err = OMPI_SUCCESS;
} else if (MPI_ROOT != root) {
/* Non-root, first process recieves the data and bcast to others */
if ( 0 == rank ) {
err = MCA_PML_CALL(recv(buff, count, datatype, root,
MCA_COLL_BASE_TAG_BCAST, comm,
MPI_STATUS_IGNORE));
if (OMPI_SUCCESS != err) {
return err;
}
}
err = comm->c_local_comm->c_coll.coll_bcast(buff, count, datatype, 0,
comm->c_local_comm,
comm->c_local_comm->c_coll.coll_bcast_module);
} else {
/* root section, send to the first process of the remote group */
err = MCA_PML_CALL(send(buff, count, datatype, 0,
MCA_COLL_BASE_TAG_BCAST,
MCA_PML_BASE_SEND_STANDARD,
comm));
if (OMPI_SUCCESS != err) {
return err;
}
}
/* All done */
return err;
}