2005-08-25 22:29:23 +00:00
|
|
|
/*
|
2005-11-05 19:57:48 +00:00
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The University of Tennessee and The University
|
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
2005-08-25 22:29:23 +00:00
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "orte_config.h"
|
|
|
|
|
2005-12-12 20:04:00 +00:00
|
|
|
#ifdef HAVE_UNISTD_H
|
2005-08-25 22:29:23 +00:00
|
|
|
#include <unistd.h>
|
2005-12-12 20:04:00 +00:00
|
|
|
#endif /* HAVE_UNISTD_H */
|
|
|
|
#ifdef HAVE_STRING_H
|
2005-08-25 22:29:23 +00:00
|
|
|
#include <string.h>
|
2005-12-12 20:04:00 +00:00
|
|
|
#endif /* HAVE_STRING_H */
|
2005-08-25 22:29:23 +00:00
|
|
|
#include <ctype.h>
|
|
|
|
|
2006-02-12 01:33:29 +00:00
|
|
|
#include "orte/orte_constants.h"
|
2005-08-25 22:29:23 +00:00
|
|
|
#include "orte/mca/sds/sds.h"
|
|
|
|
#include "orte/mca/sds/base/base.h"
|
|
|
|
#include "orte/mca/sds/slurm/sds_slurm.h"
|
|
|
|
#include "orte/util/proc_info.h"
|
|
|
|
#include "opal/util/opal_environ.h"
|
|
|
|
#include "opal/mca/base/mca_base_param.h"
|
|
|
|
#include "orte/mca/ns/ns.h"
|
|
|
|
#include "orte/mca/errmgr/errmgr.h"
|
|
|
|
#include "orte/mca/ns/base/base.h"
|
2005-08-26 21:03:41 +00:00
|
|
|
#include "orte/util/sys_info.h"
|
2005-08-25 22:29:23 +00:00
|
|
|
#include "opal/util/argv.h"
|
|
|
|
|
|
|
|
orte_sds_base_module_t orte_sds_slurm_module = {
|
|
|
|
orte_sds_base_basic_contact_universe,
|
|
|
|
orte_sds_slurm_set_name,
|
|
|
|
orte_sds_slurm_finalize,
|
|
|
|
};
|
|
|
|
|
|
|
|
static char *get_slurm_nodename(int nodeid);
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
orte_sds_slurm_set_name(void)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
int id;
|
|
|
|
char* name_string = NULL;
|
|
|
|
int slurm_nodeid;
|
|
|
|
|
2007-07-20 02:34:29 +00:00
|
|
|
/* start by getting our jobid, and vpid (which is the
|
2005-08-25 22:29:23 +00:00
|
|
|
starting vpid for the list of daemons) */
|
|
|
|
id = mca_base_param_register_string("ns", "nds", "name", NULL, NULL);
|
|
|
|
mca_base_param_lookup_string(id, &name_string);
|
|
|
|
|
|
|
|
if(name_string != NULL) {
|
|
|
|
if (ORTE_SUCCESS !=
|
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 19:34:59 +00:00
|
|
|
(rc = orte_ns.convert_string_to_process_name(&(orte_process_info.my_name),
|
2005-08-25 22:29:23 +00:00
|
|
|
name_string))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
free(name_string);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
free(name_string);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
orte_jobid_t jobid;
|
|
|
|
orte_vpid_t vpid;
|
|
|
|
char* jobid_string;
|
|
|
|
char* vpid_string;
|
|
|
|
|
|
|
|
id = mca_base_param_register_string("ns", "nds", "jobid", NULL, NULL);
|
|
|
|
mca_base_param_lookup_string(id, &jobid_string);
|
|
|
|
if (NULL == jobid_string) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
|
|
|
return ORTE_ERR_NOT_FOUND;
|
|
|
|
}
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_ns.convert_string_to_jobid(&jobid, jobid_string))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
id = mca_base_param_register_string("ns", "nds", "vpid", NULL, NULL);
|
|
|
|
mca_base_param_lookup_string(id, &vpid_string);
|
|
|
|
if (NULL == vpid_string) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
|
|
|
return ORTE_ERR_NOT_FOUND;
|
|
|
|
}
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_ns.convert_string_to_vpid(&vpid, vpid_string))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_ns.create_process_name(&(orte_process_info.my_name),
|
|
|
|
jobid,
|
|
|
|
vpid))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* fix up the base name and make it the "real" name */
|
|
|
|
slurm_nodeid = atoi(getenv("SLURM_NODEID"));
|
|
|
|
orte_process_info.my_name->vpid += slurm_nodeid;
|
|
|
|
|
|
|
|
/* fix up the system info nodename to match exactly what slurm returned */
|
|
|
|
if (NULL != orte_system_info.nodename) {
|
|
|
|
free(orte_system_info.nodename);
|
|
|
|
}
|
|
|
|
orte_system_info.nodename = get_slurm_nodename(slurm_nodeid);
|
|
|
|
|
2007-05-21 14:57:58 +00:00
|
|
|
/* get the non-name common environmental variables */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_sds_env_get())) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
2005-08-25 22:29:23 +00:00
|
|
|
}
|
Compute and pass the local_rank and local number of procs (in that proc's job) on the node.
To be precise, given this hypothetical launching pattern:
host1: vpids 0, 2, 4, 6
host2: vpids 1, 3, 5, 7
The local_rank for these procs would be:
host1: vpids 0->local_rank 0, v2->lr1, v4->lr2, v6->lr3
host2: vpids 1->local_rank 0, v3->lr1, v5->lr2, v7->lr3
and the number of local procs on each node would be four. If vpid=0 then does a comm_spawn of one process on host1, the values of the parent job would remain unchanged. The local_rank of the child process would be 0 and its num_local_procs would be 1 since it is in a separate jobid.
I have verified this functionality for the rsh case - need to verify that slurm and other cases also get the right values. Some consolidation of common code is probably going to occur in the SDS components to make this simpler and more maintainable in the future.
This commit was SVN r14706.
2007-05-21 14:30:10 +00:00
|
|
|
|
2005-08-25 22:29:23 +00:00
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
orte_sds_slurm_finalize(void)
|
|
|
|
{
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static char *
|
|
|
|
get_slurm_nodename(int nodeid)
|
|
|
|
{
|
2006-11-21 20:51:39 +00:00
|
|
|
char **names = NULL;
|
2005-08-25 22:29:23 +00:00
|
|
|
char *slurm_nodelist;
|
|
|
|
char *ret;
|
|
|
|
|
2006-11-17 20:51:03 +00:00
|
|
|
slurm_nodelist = getenv("OMPI_MCA_orte_slurm_nodelist");
|
2005-08-25 22:29:23 +00:00
|
|
|
|
|
|
|
if (NULL == slurm_nodelist) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2006-11-17 20:51:03 +00:00
|
|
|
/* split the node list into an argv array */
|
|
|
|
names = opal_argv_split(slurm_nodelist, ',');
|
|
|
|
if (NULL == names) { /* got an error */
|
|
|
|
return NULL;
|
2005-08-25 22:29:23 +00:00
|
|
|
}
|
2006-11-17 20:51:03 +00:00
|
|
|
|
|
|
|
/* check to see if there are enough entries */
|
|
|
|
if (nodeid > opal_argv_count(names)) {
|
|
|
|
return NULL;
|
2005-08-25 22:29:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = strdup(names[nodeid]);
|
|
|
|
|
|
|
|
opal_argv_free(names);
|
|
|
|
|
|
|
|
/* All done */
|
|
|
|
return ret;
|
|
|
|
}
|