1
1
openmpi/orte/mca/ess/slurm/ess_slurm_module.c
Ralph Castain 12cd07c9a9 Start reducing our dependency on the event library by removing at least one instance where we use it to redirect the program counter. Rolf reported occasional hangs of mpirun in very specific circumstances after all daemons were done. A review of MTT results indicates this may have been happening more generally in a small fraction of cases.
The problem was tracked to use of the grpcomm.onesided_barrier to control daemon/mpirun termination. This relied on messaging -and- required that the program counter jump from the errmgr back to grpcomm. On rare occasions, this jump did not occur, causing mpirun to hang.

This patch looks more invasive than it is - most of the affected files simply had one or two lines removed. The essence of the change is:

* pulled the job_complete and quit routines out of orterun and orted_main and put them in a common place

* modified the errmgr to directly call the new routines when termination is detected

* removed the grpcomm.onesided_barrier and its associated RML tag

* add a new "num_routes" API to the routed framework that reports back the number of dependent routes. When route_lost is called, the daemon's list of "children" is checked and adjusted if that route went to a "leaf" in the routing tree

* use connection termination between daemons to track rollup of the daemon tree. Daemons and HNP now terminate once num_routes returns zero

Also picked up in this commit is the addition of a new bool flag to the app_context struct, and increasing the job_control field from 8 to 16 bits. Both trivial.

This commit was SVN r23429.
2010-07-17 21:03:27 +00:00

435 строки
13 KiB
C

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2009 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
*/
#include "orte_config.h"
#include "orte/constants.h"
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif /* HAVE_UNISTD_H */
#ifdef HAVE_STRING_H
#include <string.h>
#endif /* HAVE_STRING_H */
#include <ctype.h>
#include "opal/util/opal_environ.h"
#include "opal/util/output.h"
#include "opal/util/opal_sos.h"
#include "opal/mca/base/mca_base_param.h"
#include "opal/util/argv.h"
#include "opal/class/opal_pointer_array.h"
#include "opal/dss/dss.h"
#include "opal/mca/paffinity/paffinity.h"
#include "orte/util/proc_info.h"
#include "orte/util/show_help.h"
#include "orte/mca/errmgr/errmgr.h"
#include "orte/util/name_fns.h"
#include "orte/runtime/orte_globals.h"
#include "orte/util/nidmap.h"
#include "orte/mca/ess/ess.h"
#include "orte/mca/ess/base/base.h"
#include "orte/mca/ess/slurm/ess_slurm.h"
static char *get_slurm_nodename(int nodeid);
static int slurm_set_name(void);
static int rte_init(void);
static int rte_finalize(void);
static uint8_t proc_get_locality(orte_process_name_t *proc);
static orte_vpid_t proc_get_daemon(orte_process_name_t *proc);
static char* proc_get_hostname(orte_process_name_t *proc);
static orte_local_rank_t proc_get_local_rank(orte_process_name_t *proc);
static orte_node_rank_t proc_get_node_rank(orte_process_name_t *proc);
static int update_pidmap(opal_byte_object_t *bo);
static int update_nidmap(opal_byte_object_t *bo);
orte_ess_base_module_t orte_ess_slurm_module = {
rte_init,
rte_finalize,
orte_ess_base_app_abort,
proc_get_locality,
proc_get_daemon,
proc_get_hostname,
proc_get_local_rank,
proc_get_node_rank,
update_pidmap,
update_nidmap,
orte_ess_base_query_sys_info,
NULL /* ft_event */
};
/*
* Local variables
*/
static orte_node_rank_t my_node_rank=ORTE_NODE_RANK_INVALID;
static int rte_init(void)
{
int ret;
char *error = NULL;
char **hosts = NULL;
char *slurm_nodelist;
/* run the prolog */
if (ORTE_SUCCESS != (ret = orte_ess_base_std_prolog())) {
error = "orte_ess_base_std_prolog";
goto error;
}
/* Start by getting a unique name */
slurm_set_name();
/* if I am a daemon, complete my setup using the
* default procedure
*/
if (ORTE_PROC_IS_DAEMON) {
/* get the list of nodes used for this job */
mca_base_param_reg_string_name("orte", "nodelist", "List of nodes in job",
true, false, NULL, &slurm_nodelist);
if (NULL != slurm_nodelist) {
/* split the node list into an argv array */
hosts = opal_argv_split(slurm_nodelist, ',');
}
if (ORTE_SUCCESS != (ret = orte_ess_base_orted_setup(hosts))) {
ORTE_ERROR_LOG(ret);
error = "orte_ess_base_orted_setup";
goto error;
}
opal_argv_free(hosts);
return ORTE_SUCCESS;
}
if (ORTE_PROC_IS_TOOL) {
/* otherwise, if I am a tool proc, use that procedure */
if (ORTE_SUCCESS != (ret = orte_ess_base_tool_setup())) {
ORTE_ERROR_LOG(ret);
error = "orte_ess_base_tool_setup";
goto error;
}
/* as a tool, I don't need a nidmap - so just return now */
return ORTE_SUCCESS;
}
/* otherwise, I must be an application process - use
* the default procedure to finish my setup
*/
if (ORTE_SUCCESS != (ret = orte_ess_base_app_setup())) {
ORTE_ERROR_LOG(ret);
error = "orte_ess_base_app_setup";
goto error;
}
/* setup the nidmap arrays */
if (ORTE_SUCCESS != (ret = orte_util_nidmap_init(orte_process_info.sync_buf))) {
ORTE_ERROR_LOG(ret);
error = "orte_util_nidmap_init";
goto error;
}
return ORTE_SUCCESS;
error:
orte_show_help("help-orte-runtime.txt",
"orte_init:startup:internal-failure",
true, error, ORTE_ERROR_NAME(ret), ret);
return ret;
}
static int rte_finalize(void)
{
int ret;
/* if I am a daemon, finalize using the default procedure */
if (ORTE_PROC_IS_DAEMON) {
if (ORTE_SUCCESS != (ret = orte_ess_base_orted_finalize())) {
ORTE_ERROR_LOG(ret);
}
} else if (ORTE_PROC_IS_TOOL) {
/* otherwise, if I am a tool proc, use that procedure */
if (ORTE_SUCCESS != (ret = orte_ess_base_tool_finalize())) {
ORTE_ERROR_LOG(ret);
}
/* as a tool, I didn't create a nidmap - so just return now */
return ret;
} else {
/* otherwise, I must be an application process
* use the default procedure to finish
*/
if (ORTE_SUCCESS != (ret = orte_ess_base_app_finalize())) {
ORTE_ERROR_LOG(ret);
}
}
/* deconstruct my nidmap and jobmap arrays */
orte_util_nidmap_finalize();
return ret;
}
static uint8_t proc_get_locality(orte_process_name_t *proc)
{
orte_nid_t *nid;
if (NULL == (nid = orte_util_lookup_nid(proc))) {
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
return OPAL_PROC_NON_LOCAL;
}
if (nid->daemon == ORTE_PROC_MY_DAEMON->vpid) {
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
"%s ess:slurm: proc %s is LOCAL",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(proc)));
return (OPAL_PROC_ON_NODE | OPAL_PROC_ON_CU | OPAL_PROC_ON_CLUSTER);
}
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
"%s ess:slurm: proc %s is REMOTE",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(proc)));
return OPAL_PROC_NON_LOCAL;
}
static orte_vpid_t proc_get_daemon(orte_process_name_t *proc)
{
orte_nid_t *nid;
if( ORTE_JOBID_IS_DAEMON(proc->jobid) ) {
return proc->vpid;
}
if (NULL == (nid = orte_util_lookup_nid(proc))) {
return ORTE_VPID_INVALID;
}
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
"%s ess:slurm: proc %s is hosted by daemon %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(proc),
ORTE_VPID_PRINT(nid->daemon)));
return nid->daemon;
}
static char* proc_get_hostname(orte_process_name_t *proc)
{
orte_nid_t *nid;
if (NULL == (nid = orte_util_lookup_nid(proc))) {
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
return NULL;
}
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
"%s ess:slurm: proc %s is on host %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(proc),
nid->name));
return nid->name;
}
static orte_local_rank_t proc_get_local_rank(orte_process_name_t *proc)
{
orte_pmap_t *pmap;
if (NULL == (pmap = orte_util_lookup_pmap(proc))) {
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
return ORTE_LOCAL_RANK_INVALID;
}
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
"%s ess:slurm: proc %s has local rank %d",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(proc),
(int)pmap->local_rank));
return pmap->local_rank;
}
static orte_node_rank_t proc_get_node_rank(orte_process_name_t *proc)
{
orte_pmap_t *pmap;
/* is this me? */
if (proc->jobid == ORTE_PROC_MY_NAME->jobid &&
proc->vpid == ORTE_PROC_MY_NAME->vpid) {
/* yes it is - reply with my rank. This is necessary
* because the pidmap will not have arrived when I
* am starting up, and if we use static ports, then
* I need to know my node rank during init
*/
return my_node_rank;
}
if (NULL == (pmap = orte_util_lookup_pmap(proc))) {
return ORTE_NODE_RANK_INVALID;
}
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
"%s ess:slurm: proc %s has node rank %d",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(proc),
(int)pmap->node_rank));
return pmap->node_rank;
}
static int update_pidmap(opal_byte_object_t *bo)
{
int ret;
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
"%s ess:slurm: updating pidmap",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
/* build the pmap */
if (ORTE_SUCCESS != (ret = orte_util_decode_pidmap(bo))) {
ORTE_ERROR_LOG(ret);
}
return ret;
}
static int update_nidmap(opal_byte_object_t *bo)
{
int rc;
/* decode the nidmap - the util will know what to do */
if (ORTE_SUCCESS != (rc = orte_util_decode_nodemap(bo))) {
ORTE_ERROR_LOG(rc);
}
return rc;
}
static int slurm_set_name(void)
{
int slurm_nodeid;
int rc;
orte_jobid_t jobid;
orte_vpid_t vpid;
char* tmp;
OPAL_OUTPUT_VERBOSE((1, orte_ess_base_output,
"ess:slurm setting name"));
mca_base_param_reg_string_name("orte", "ess_jobid", "Process jobid",
true, false, NULL, &tmp);
if (NULL == tmp) {
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
return ORTE_ERR_NOT_FOUND;
}
if (ORTE_SUCCESS != (rc = orte_util_convert_string_to_jobid(&jobid, tmp))) {
ORTE_ERROR_LOG(rc);
return(rc);
}
free(tmp);
mca_base_param_reg_string_name("orte", "ess_vpid", "Process vpid",
true, false, NULL, &tmp);
if (NULL == tmp) {
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
return ORTE_ERR_NOT_FOUND;
}
if (ORTE_SUCCESS != (rc = orte_util_convert_string_to_vpid(&vpid, tmp))) {
ORTE_ERROR_LOG(rc);
return(rc);
}
free(tmp);
ORTE_PROC_MY_NAME->jobid = jobid;
/* fix up the vpid and make it the "real" vpid */
slurm_nodeid = atoi(getenv("SLURM_NODEID"));
ORTE_PROC_MY_NAME->vpid = vpid + slurm_nodeid;
OPAL_OUTPUT_VERBOSE((1, orte_ess_base_output,
"ess:slurm set name to %s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
/* get my node rank in case we are using static ports - this won't
* be present for daemons, so don't error out if we don't have it
*/
mca_base_param_reg_string_name("orte", "ess_node_rank", "Process node rank",
true, false, NULL, &tmp);
if (NULL != tmp) {
my_node_rank = strtol(tmp, NULL, 10);
}
/* fix up the system info nodename to match exactly what slurm returned */
if (NULL != orte_process_info.nodename) {
free(orte_process_info.nodename);
}
orte_process_info.nodename = get_slurm_nodename(slurm_nodeid);
OPAL_OUTPUT_VERBOSE((1, orte_ess_base_output,
"ess:slurm set nodename to %s",
orte_process_info.nodename));
/* get the non-name common environmental variables */
if (ORTE_SUCCESS != (rc = orte_ess_env_get())) {
ORTE_ERROR_LOG(rc);
return rc;
}
return ORTE_SUCCESS;
}
static char *
get_slurm_nodename(int nodeid)
{
char **names = NULL;
char *slurm_nodelist;
char *ret;
mca_base_param_reg_string_name("orte", "nodelist", "List of nodes in job",
true, false, NULL, &slurm_nodelist);
if (NULL == slurm_nodelist) {
return NULL;
}
/* split the node list into an argv array */
names = opal_argv_split(slurm_nodelist, ',');
if (NULL == names) { /* got an error */
return NULL;
}
/* check to see if there are enough entries */
if (nodeid > opal_argv_count(names)) {
return NULL;
}
ret = strdup(names[nodeid]);
opal_argv_free(names);
/* All done */
return ret;
}