99144db970
Some MPI C interface files saw some spacing changes to conform to the coding standards of Open MPI. Changed MPI C interface files to use {{{OPAL_CR_ENTER_LIBRARY()}}} and {{{OPAL_CR_EXIT_LIBRARY()}}} instead of just {{{OPAL_CR_TEST_CHECKPOINT_READY()}}}. This will allow the checkpoint/restart system more flexibility in how it is to behave. Fixed the configure check for {{{--enable-ft-thread}}} so it has a know dependance on {{{--enable-mpi-thread}}} (and/or {{{--enable-progress-thread}}}). Added a line for Checkpoint/Restart support to {{{ompi_info}}}. Added some options to choose at runtime whether or not to use the checkpoint polling thread. By default, if the user asked for it to be compiled in, then it is used. But some users will want the ability to toggle its use at runtime. There are still some places for improvement, but the feature works correctly. As always with Checkpoint/Restart, it is compiled out unless explicitly asked for at configure time. Further, if it was configured in, then it is not used unless explicitly asked for by the user at runtime. This commit was SVN r17516.
244 строки
9.3 KiB
C
244 строки
9.3 KiB
C
/*
|
|
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
|
|
* University Research and Technology
|
|
* Corporation. All rights reserved.
|
|
* Copyright (c) 2004-2005 The University of Tennessee and The University
|
|
* of Tennessee Research Foundation. All rights
|
|
* reserved.
|
|
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
|
|
* University of Stuttgart. All rights reserved.
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
* All rights reserved.
|
|
* Copyright (c) 2006-2007 University of Houston. All rights reserved.
|
|
* Copyright (c) 2006-2007 Cisco Systems, Inc. All rights reserved.
|
|
* $COPYRIGHT$
|
|
*
|
|
* Additional copyrights may follow
|
|
*
|
|
* $HEADER$
|
|
*/
|
|
|
|
#include "ompi_config.h"
|
|
|
|
#include "ompi/mpi/c/bindings.h"
|
|
#include "ompi/mca/pml/pml.h"
|
|
#include "ompi/communicator/communicator.h"
|
|
#include "ompi/request/request.h"
|
|
#include "ompi/memchecker.h"
|
|
|
|
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
|
|
#pragma weak MPI_Intercomm_create = PMPI_Intercomm_create
|
|
#endif
|
|
|
|
#if OMPI_PROFILING_DEFINES
|
|
#include "ompi/mpi/c/profile/defines.h"
|
|
#endif
|
|
|
|
static const char FUNC_NAME[] = "MPI_Intercomm_create";
|
|
|
|
|
|
int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
|
|
MPI_Comm bridge_comm, int remote_leader,
|
|
int tag, MPI_Comm *newintercomm)
|
|
{
|
|
int local_size=0, local_rank=0;
|
|
int lleader=0, rleader=0;
|
|
ompi_communicator_t *newcomp=NULL;
|
|
struct ompi_proc_t **rprocs=NULL;
|
|
int rc=0, rsize=0;
|
|
ompi_proc_t **proc_list=NULL;
|
|
int j;
|
|
ompi_group_t *new_group_pointer;
|
|
|
|
MEMCHECKER(
|
|
memchecker_comm(local_comm);
|
|
memchecker_comm(bridge_comm);
|
|
);
|
|
|
|
if ( MPI_PARAM_CHECK ) {
|
|
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
|
|
|
if ( ompi_comm_invalid ( local_comm ) ||
|
|
( local_comm->c_flags & OMPI_COMM_INTER ) )
|
|
return OMPI_ERRHANDLER_INVOKE ( MPI_COMM_WORLD, MPI_ERR_COMM,
|
|
FUNC_NAME);
|
|
|
|
if ( NULL == newintercomm )
|
|
return OMPI_ERRHANDLER_INVOKE ( local_comm, MPI_ERR_ARG,
|
|
FUNC_NAME);
|
|
|
|
/* if ( tag < 0 || tag > MPI_TAG_UB )
|
|
return OMPI_ERRHANDLER_INVOKE ( local_comm, MPI_ERR_ARG,
|
|
FUNC_NAME);
|
|
*/
|
|
}
|
|
|
|
OPAL_CR_ENTER_LIBRARY();
|
|
|
|
local_size = ompi_comm_size ( local_comm );
|
|
local_rank = ompi_comm_rank ( local_comm );
|
|
lleader = local_leader;
|
|
rleader = remote_leader;
|
|
|
|
if ( MPI_PARAM_CHECK ) {
|
|
if ( (0 > local_leader) || (local_leader >= local_size) )
|
|
return OMPI_ERRHANDLER_INVOKE ( local_comm, MPI_ERR_ARG,
|
|
FUNC_NAME);
|
|
|
|
/* remember that the remote_leader and bridge_comm arguments
|
|
just have to be valid at the local_leader */
|
|
if ( local_rank == local_leader ) {
|
|
if ( ompi_comm_invalid ( bridge_comm ) ||
|
|
(bridge_comm->c_flags & OMPI_COMM_INTER) ) {
|
|
OPAL_CR_EXIT_LIBRARY();
|
|
return OMPI_ERRHANDLER_INVOKE ( local_comm, MPI_ERR_COMM,
|
|
FUNC_NAME);
|
|
}
|
|
if ( (remote_leader < 0) || (remote_leader >= ompi_comm_size(bridge_comm))) {
|
|
OPAL_CR_EXIT_LIBRARY();
|
|
return OMPI_ERRHANDLER_INVOKE ( local_comm, MPI_ERR_ARG,
|
|
FUNC_NAME);
|
|
}
|
|
} /* if ( local_rank == local_leader ) */
|
|
}
|
|
|
|
if ( local_rank == local_leader ) {
|
|
MPI_Request req;
|
|
|
|
/* local leader exchange group sizes lists */
|
|
rc = MCA_PML_CALL(irecv(&rsize, 1, MPI_INT, rleader, tag, bridge_comm,
|
|
&req));
|
|
if ( rc != MPI_SUCCESS ) {
|
|
goto err_exit;
|
|
}
|
|
rc = MCA_PML_CALL(send (&local_size, 1, MPI_INT, rleader, tag,
|
|
MCA_PML_BASE_SEND_STANDARD, bridge_comm));
|
|
if ( rc != MPI_SUCCESS ) {
|
|
goto err_exit;
|
|
}
|
|
rc = ompi_request_wait_all ( 1, &req, MPI_STATUS_IGNORE);
|
|
if ( rc != MPI_SUCCESS ) {
|
|
goto err_exit;
|
|
}
|
|
}
|
|
|
|
/* bcast size and list of remote processes to all processes in local_comm */
|
|
rc = local_comm->c_coll.coll_bcast ( &rsize, 1, MPI_INT, lleader,
|
|
local_comm,
|
|
local_comm->c_coll.coll_bcast_module);
|
|
if ( rc != MPI_SUCCESS ) {
|
|
goto err_exit;
|
|
}
|
|
|
|
rprocs = ompi_comm_get_rprocs ( local_comm, bridge_comm, lleader,
|
|
remote_leader, tag, rsize );
|
|
if ( NULL == rprocs ) {
|
|
goto err_exit;
|
|
}
|
|
|
|
if ( MPI_PARAM_CHECK ) {
|
|
if(OMPI_GROUP_IS_DENSE(local_comm->c_local_group)) {
|
|
rc = ompi_comm_overlapping_groups(local_comm->c_local_group->grp_proc_count,
|
|
local_comm->c_local_group->grp_proc_pointers,
|
|
rsize,
|
|
rprocs);
|
|
}
|
|
else {
|
|
proc_list = (ompi_proc_t **) calloc (local_comm->c_local_group->grp_proc_count,
|
|
sizeof (ompi_proc_t *));
|
|
for(j=0 ; j<local_comm->c_local_group->grp_proc_count ; j++) {
|
|
proc_list[j] = ompi_group_peer_lookup(local_comm->c_local_group,j);
|
|
}
|
|
rc = ompi_comm_overlapping_groups(local_comm->c_local_group->grp_proc_count,
|
|
proc_list,
|
|
rsize,
|
|
rprocs);
|
|
}
|
|
if ( OMPI_SUCCESS != rc ) {
|
|
goto err_exit;
|
|
}
|
|
}
|
|
new_group_pointer=ompi_group_allocate(rsize);
|
|
if( NULL == new_group_pointer ) {
|
|
OPAL_CR_EXIT_LIBRARY();
|
|
return MPI_ERR_GROUP;
|
|
}
|
|
|
|
/* put group elements in the list */
|
|
for (j = 0; j < rsize; j++) {
|
|
new_group_pointer->grp_proc_pointers[j] = rprocs[j];
|
|
}
|
|
|
|
ompi_group_increment_proc_count(new_group_pointer);
|
|
|
|
rc = ompi_comm_set ( &newcomp, /* new comm */
|
|
local_comm, /* old comm */
|
|
local_comm->c_local_group->grp_proc_count, /* local_size */
|
|
NULL, /* local_procs*/
|
|
rsize, /* remote_size */
|
|
NULL, /* remote_procs */
|
|
NULL, /* attrs */
|
|
local_comm->error_handler, /* error handler*/
|
|
NULL, /* topo mpodule */
|
|
local_comm->c_local_group, /* local group */
|
|
new_group_pointer /* remote group */
|
|
);
|
|
|
|
if ( NULL == newcomp ) {
|
|
rc = MPI_ERR_INTERN;
|
|
goto err_exit;
|
|
}
|
|
if ( MPI_SUCCESS != rc ) {
|
|
goto err_exit;
|
|
}
|
|
|
|
ompi_group_decrement_proc_count (new_group_pointer);
|
|
OBJ_RELEASE(new_group_pointer);
|
|
new_group_pointer = MPI_GROUP_NULL;
|
|
|
|
/* Determine context id. It is identical to f_2_c_handle */
|
|
rc = ompi_comm_nextcid ( newcomp, /* new comm */
|
|
local_comm, /* old comm */
|
|
bridge_comm, /* bridge comm */
|
|
&lleader, /* local leader */
|
|
&rleader, /* remote_leader */
|
|
OMPI_COMM_CID_INTRA_BRIDGE, /* mode */
|
|
-1 ); /* send_first */
|
|
|
|
if ( MPI_SUCCESS != rc ) {
|
|
goto err_exit;
|
|
}
|
|
|
|
/* activate comm and init coll-module */
|
|
rc = ompi_comm_activate ( newcomp, /* new comm */
|
|
local_comm, /* old comm */
|
|
bridge_comm, /* bridge comm */
|
|
&lleader, /* local leader */
|
|
&rleader, /* remote_leader */
|
|
OMPI_COMM_CID_INTRA_BRIDGE, /* mode */
|
|
-1, /* send_first */
|
|
0); /* sync_flag */
|
|
|
|
if ( MPI_SUCCESS != rc ) {
|
|
goto err_exit;
|
|
}
|
|
|
|
err_exit:
|
|
OPAL_CR_EXIT_LIBRARY();
|
|
|
|
if ( NULL != rprocs ) {
|
|
free ( rprocs );
|
|
}
|
|
if ( NULL != proc_list ) {
|
|
free ( proc_list );
|
|
}
|
|
if ( OMPI_SUCCESS != rc ) {
|
|
*newintercomm = MPI_COMM_NULL;
|
|
return OMPI_ERRHANDLER_INVOKE(local_comm, MPI_ERR_INTERN,
|
|
FUNC_NAME);
|
|
}
|
|
|
|
*newintercomm = newcomp;
|
|
return MPI_SUCCESS;
|
|
}
|