2008-02-28 04:57:57 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2007 The University of Tennessee and The University
|
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
|
|
|
* Copyright (c) 2004-2006 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
* Copyright (c) 2006-2007 Cisco Systems, Inc. All rights reserved.
|
|
|
|
* Copyright (c) 2007 Los Alamos National Security, LLC. All rights
|
|
|
|
* reserved.
|
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*
|
|
|
|
* These symbols are in a file by themselves to provide nice linker
|
|
|
|
* semantics. Since linkers generally pull in symbols by object
|
|
|
|
* files, keeping these symbols as the only symbols in this file
|
|
|
|
* prevents utility programs such as "ompi_info" from having to import
|
|
|
|
* entire components just to query their version and parameters.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "orte_config.h"
|
|
|
|
#include "orte/constants.h"
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
#ifdef HAVE_UNISTD_H
|
|
|
|
#include <unistd.h>
|
|
|
|
#endif
|
|
|
|
#include <errno.h>
|
|
|
|
#include <string.h>
|
|
|
|
#ifdef HAVE_STRINGS_H
|
|
|
|
#include <strings.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_SYS_SELECT_H
|
|
|
|
#include <sys/select.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_SYS_TIME_H
|
|
|
|
#include <sys/time.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_SYS_TYPES_H
|
|
|
|
#include <sys/types.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_SYS_STAT_H
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_SYS_WAIT_H
|
|
|
|
#include <sys/wait.h>
|
|
|
|
#endif
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <signal.h>
|
|
|
|
#ifdef HAVE_PWD_H
|
|
|
|
#include <pwd.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include "opal/mca/installdirs/installdirs.h"
|
|
|
|
#include "opal/mca/base/mca_base_param.h"
|
|
|
|
#include "opal/util/if.h"
|
|
|
|
#include "opal/util/os_path.h"
|
|
|
|
#include "opal/util/path.h"
|
|
|
|
#include "opal/event/event.h"
|
|
|
|
#include "opal/util/show_help.h"
|
|
|
|
#include "opal/util/argv.h"
|
|
|
|
#include "opal/util/opal_environ.h"
|
|
|
|
#include "opal/util/output.h"
|
|
|
|
#include "opal/util/trace.h"
|
|
|
|
#include "opal/util/basename.h"
|
2008-04-14 22:26:08 +04:00
|
|
|
#include "opal/util/bit_ops.h"
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
#include "orte/util/session_dir.h"
|
|
|
|
|
|
|
|
#include "orte/runtime/orte_wait.h"
|
|
|
|
#include "orte/runtime/orte_wakeup.h"
|
|
|
|
#include "orte/runtime/orte_globals.h"
|
|
|
|
#include "orte/util/name_fns.h"
|
2008-04-30 23:49:53 +04:00
|
|
|
#include "orte/util/nidmap.h"
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
#include "orte/mca/rml/rml.h"
|
|
|
|
#include "orte/mca/errmgr/errmgr.h"
|
|
|
|
#include "orte/mca/ras/ras_types.h"
|
|
|
|
#include "orte/mca/rmaps/rmaps.h"
|
2008-04-14 22:26:08 +04:00
|
|
|
#include "orte/mca/routed/routed.h"
|
|
|
|
#include "orte/mca/grpcomm/grpcomm.h"
|
|
|
|
#include "orte/mca/odls/odls.h"
|
|
|
|
#include "orte/mca/rml/base/rml_contact.h"
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
#include "orte/mca/plm/plm.h"
|
|
|
|
#include "orte/mca/plm/base/base.h"
|
|
|
|
#include "orte/mca/plm/base/plm_private.h"
|
|
|
|
#include "orte/mca/plm/rsh/plm_rsh.h"
|
|
|
|
|
|
|
|
#if OMPI_HAVE_POSIX_THREADS && OMPI_THREADS_HAVE_DIFFERENT_PIDS && OMPI_ENABLE_PROGRESS_THREADS
|
|
|
|
static int orte_plm_rsh_launch_threaded(orte_job_t *jdata);
|
|
|
|
#endif
|
|
|
|
|
2008-04-14 22:26:08 +04:00
|
|
|
static int remote_spawn(opal_buffer_t *launch);
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
orte_plm_base_module_t orte_plm_rsh_module = {
|
|
|
|
orte_plm_rsh_init,
|
|
|
|
orte_plm_base_set_hnp_name,
|
|
|
|
#if OMPI_HAVE_POSIX_THREADS && OMPI_THREADS_HAVE_DIFFERENT_PIDS && OMPI_ENABLE_PROGRESS_THREADS
|
|
|
|
orte_plm_rsh_launch_threaded,
|
|
|
|
#else
|
|
|
|
orte_plm_rsh_launch,
|
|
|
|
#endif
|
2008-04-14 22:26:08 +04:00
|
|
|
remote_spawn,
|
2008-02-28 04:57:57 +03:00
|
|
|
orte_plm_rsh_terminate_job,
|
|
|
|
orte_plm_rsh_terminate_orteds,
|
|
|
|
orte_plm_rsh_signal_job,
|
|
|
|
orte_plm_rsh_finalize
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef enum {
|
|
|
|
ORTE_PLM_RSH_SHELL_BASH = 0,
|
|
|
|
ORTE_PLM_RSH_SHELL_ZSH,
|
|
|
|
ORTE_PLM_RSH_SHELL_TCSH,
|
|
|
|
ORTE_PLM_RSH_SHELL_CSH,
|
|
|
|
ORTE_PLM_RSH_SHELL_KSH,
|
|
|
|
ORTE_PLM_RSH_SHELL_SH,
|
|
|
|
ORTE_PLM_RSH_SHELL_UNKNOWN
|
|
|
|
} orte_plm_rsh_shell_t;
|
|
|
|
|
|
|
|
/* These strings *must* follow the same order as the enum
|
|
|
|
ORTE_PLM_RSH_SHELL_* */
|
|
|
|
static const char * orte_plm_rsh_shell_name[] = {
|
|
|
|
"bash",
|
|
|
|
"zsh",
|
|
|
|
"tcsh", /* tcsh has to be first otherwise strstr finds csh */
|
|
|
|
"csh",
|
|
|
|
"ksh",
|
|
|
|
"sh",
|
|
|
|
"unknown"
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Local functions
|
|
|
|
*/
|
|
|
|
static void set_handler_default(int sig);
|
|
|
|
static orte_plm_rsh_shell_t find_shell(char *shell);
|
2008-04-14 22:26:08 +04:00
|
|
|
static int find_children(int rank, int parent, int me, int num_procs);
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
/* local global storage of timing variables */
|
|
|
|
static struct timeval joblaunchstart, joblaunchstop;
|
|
|
|
|
2008-04-14 22:26:08 +04:00
|
|
|
/* local global storage */
|
2008-02-28 04:57:57 +03:00
|
|
|
static orte_jobid_t active_job=ORTE_JOBID_INVALID;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Init the module
|
|
|
|
*/
|
|
|
|
int orte_plm_rsh_init(void)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_plm_base_comm_start())) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Check the Shell variable on the specified node
|
|
|
|
*/
|
|
|
|
|
2008-04-14 22:26:08 +04:00
|
|
|
static int orte_plm_rsh_probe(char *nodename,
|
2008-02-28 04:57:57 +03:00
|
|
|
orte_plm_rsh_shell_t *shell)
|
|
|
|
{
|
|
|
|
char ** argv;
|
|
|
|
int argc, rc = ORTE_SUCCESS, i;
|
|
|
|
int fd[2];
|
|
|
|
pid_t pid;
|
|
|
|
char outbuf[4096];
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: going to check SHELL variable on node %s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
2008-04-14 22:26:08 +04:00
|
|
|
nodename));
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
*shell = ORTE_PLM_RSH_SHELL_UNKNOWN;
|
|
|
|
if (pipe(fd)) {
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: pipe failed with errno=%d",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
errno));
|
|
|
|
return ORTE_ERR_IN_ERRNO;
|
|
|
|
}
|
|
|
|
if ((pid = fork()) < 0) {
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: fork failed with errno=%d",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
errno));
|
|
|
|
return ORTE_ERR_IN_ERRNO;
|
|
|
|
}
|
|
|
|
else if (pid == 0) { /* child */
|
|
|
|
if (dup2(fd[1], 1) < 0) {
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: dup2 failed with errno=%d",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
errno));
|
|
|
|
exit(01);
|
|
|
|
}
|
|
|
|
/* Build argv array */
|
|
|
|
argv = opal_argv_copy(mca_plm_rsh_component.agent_argv);
|
|
|
|
argc = mca_plm_rsh_component.agent_argc;
|
2008-04-14 22:26:08 +04:00
|
|
|
opal_argv_append(&argc, &argv, nodename);
|
2008-02-28 04:57:57 +03:00
|
|
|
opal_argv_append(&argc, &argv, "echo $SHELL");
|
|
|
|
|
|
|
|
execvp(argv[0], argv);
|
|
|
|
exit(errno);
|
|
|
|
}
|
|
|
|
if (close(fd[1])) {
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: close failed with errno=%d",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
errno));
|
|
|
|
return ORTE_ERR_IN_ERRNO;
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
ssize_t ret = 1;
|
|
|
|
char* ptr = outbuf;
|
|
|
|
size_t outbufsize = sizeof(outbuf);
|
|
|
|
|
|
|
|
do {
|
|
|
|
ret = read (fd[0], ptr, outbufsize-1);
|
|
|
|
if (ret < 0) {
|
|
|
|
if (errno == EINTR)
|
|
|
|
continue;
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: Unable to detect the remote shell (error %s)",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
strerror(errno)));
|
|
|
|
rc = ORTE_ERR_IN_ERRNO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if( outbufsize > 1 ) {
|
|
|
|
outbufsize -= ret;
|
|
|
|
ptr += ret;
|
|
|
|
}
|
|
|
|
} while( 0 != ret );
|
|
|
|
*ptr = '\0';
|
|
|
|
}
|
|
|
|
close(fd[0]);
|
|
|
|
|
|
|
|
if( outbuf[0] != '\0' ) {
|
|
|
|
char *sh_name = rindex(outbuf, '/');
|
|
|
|
if( NULL != sh_name ) {
|
|
|
|
sh_name++; /* skip '/' */
|
|
|
|
/* We cannot use "echo -n $SHELL" because -n is not portable. Therefore
|
|
|
|
* we have to remove the "\n" */
|
|
|
|
if ( sh_name[strlen(sh_name)-1] == '\n' ) {
|
|
|
|
sh_name[strlen(sh_name)-1] = '\0';
|
|
|
|
}
|
|
|
|
/* Search for the substring of known shell-names */
|
|
|
|
for (i = 0; i < (int)(sizeof (orte_plm_rsh_shell_name)/
|
|
|
|
sizeof(orte_plm_rsh_shell_name[0])); i++) {
|
|
|
|
if ( 0 == strcmp(sh_name, orte_plm_rsh_shell_name[i]) ) {
|
|
|
|
*shell = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: node %s has SHELL: %s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
2008-04-14 22:26:08 +04:00
|
|
|
nodename,
|
2008-02-28 04:57:57 +03:00
|
|
|
(ORTE_PLM_RSH_SHELL_UNKNOWN == *shell) ? "UNHANDLED" : orte_plm_rsh_shell_name[*shell]));
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2008-04-14 22:26:08 +04:00
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
/**
|
|
|
|
* Callback on daemon exit.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void orte_plm_rsh_wait_daemon(pid_t pid, int status, void* cbdata)
|
|
|
|
{
|
|
|
|
unsigned long deltat;
|
2008-04-14 22:26:08 +04:00
|
|
|
orte_std_cntr_t cnt=1;
|
|
|
|
uint8_t flag;
|
2008-02-28 04:57:57 +03:00
|
|
|
|
2008-02-28 22:58:32 +03:00
|
|
|
if (! WIFEXITED(status) || ! WEXITSTATUS(status) == 0) { /* if abnormal exit */
|
2008-04-14 22:26:08 +04:00
|
|
|
/* if we are not the HNP, send a message to the HNP alerting it
|
|
|
|
* to the failure
|
|
|
|
*/
|
|
|
|
if (!orte_process_info.hnp) {
|
|
|
|
opal_buffer_t buf;
|
|
|
|
orte_vpid_t *vpid=(orte_vpid_t*)cbdata;
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s daemon %d failed with status %d",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
(int)*vpid, WEXITSTATUS(status)));
|
|
|
|
OBJ_CONSTRUCT(&buf, opal_buffer_t);
|
|
|
|
opal_dss.pack(&buf, &cnt, 1, ORTE_STD_CNTR);
|
|
|
|
flag = 1;
|
|
|
|
opal_dss.pack(&buf, &flag, 1, OPAL_UINT8);
|
|
|
|
opal_dss.pack(&buf, vpid, 1, ORTE_VPID);
|
|
|
|
orte_rml.send_buffer(ORTE_PROC_MY_HNP, &buf, ORTE_RML_TAG_REPORT_REMOTE_LAUNCH, 0);
|
|
|
|
OBJ_DESTRUCT(&buf);
|
|
|
|
} else {
|
|
|
|
orte_proc_t *daemon=(orte_proc_t*)cbdata;
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s daemon %d failed with status %d",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
(int)daemon->name.vpid, WEXITSTATUS(status)));
|
|
|
|
/* note that this daemon failed */
|
|
|
|
daemon->state = ORTE_PROC_STATE_FAILED_TO_START;
|
|
|
|
/* report that the daemon has failed so we can exit */
|
|
|
|
orte_plm_base_launch_failed(active_job, true, pid, status, ORTE_JOB_STATE_FAILED_TO_START);
|
|
|
|
}
|
2008-02-28 22:58:32 +03:00
|
|
|
}
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
/* release any waiting threads */
|
|
|
|
OPAL_THREAD_LOCK(&mca_plm_rsh_component.lock);
|
|
|
|
|
|
|
|
if (mca_plm_rsh_component.num_children-- >=
|
|
|
|
mca_plm_rsh_component.num_concurrent ||
|
|
|
|
mca_plm_rsh_component.num_children == 0) {
|
|
|
|
opal_condition_signal(&mca_plm_rsh_component.cond);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (orte_timing && mca_plm_rsh_component.num_children == 0) {
|
|
|
|
if (0 != gettimeofday(&joblaunchstop, NULL)) {
|
|
|
|
opal_output(0, "plm_rsh: could not obtain job launch stop time");
|
|
|
|
} else {
|
|
|
|
deltat = (joblaunchstop.tv_sec - joblaunchstart.tv_sec)*1000000 +
|
|
|
|
(joblaunchstop.tv_usec - joblaunchstart.tv_usec);
|
|
|
|
opal_output(0, "plm_rsh: total time to launch job is %lu usec", deltat);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
OPAL_THREAD_UNLOCK(&mca_plm_rsh_component.lock);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2008-04-14 22:26:08 +04:00
|
|
|
static int setup_launch(int *argcptr, char ***argvptr,
|
|
|
|
char *nodename,
|
|
|
|
int *node_name_index1, int *node_name_index2,
|
|
|
|
int *local_exec_index,
|
|
|
|
int *proc_vpid_index, char **lib_base, char **bin_base,
|
|
|
|
bool *remote_sh, bool *remote_csh)
|
|
|
|
{
|
|
|
|
struct passwd *p;
|
|
|
|
int argc;
|
|
|
|
char **argv;
|
|
|
|
char *param;
|
|
|
|
orte_plm_rsh_shell_t shell;
|
|
|
|
bool local_sh = false, local_csh = false;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* What is our local shell? */
|
|
|
|
p = getpwuid(getuid());
|
|
|
|
if( NULL == p ) {
|
|
|
|
/* This user is unknown to the system. Therefore, there is no reason we
|
|
|
|
* spawn whatsoever in his name. Give up with a HUGE error message.
|
|
|
|
*/
|
|
|
|
opal_show_help( "help-plm-rsh.txt", "unknown-user", true, (int)getuid() );
|
|
|
|
return ORTE_ERR_FATAL;
|
|
|
|
} else {
|
|
|
|
param = p->pw_shell;
|
|
|
|
shell = find_shell(p->pw_shell);
|
|
|
|
}
|
|
|
|
/* If we didn't find it in getpwuid(), try looking at the $SHELL
|
|
|
|
environment variable (see https://svn.open-mpi.org/trac/ompi/ticket/1060)
|
|
|
|
*/
|
|
|
|
if (ORTE_PLM_RSH_SHELL_UNKNOWN == shell &&
|
|
|
|
NULL != (param = getenv("SHELL"))) {
|
|
|
|
shell = find_shell(param);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (shell) {
|
|
|
|
case ORTE_PLM_RSH_SHELL_SH: /* fall through */
|
|
|
|
case ORTE_PLM_RSH_SHELL_KSH: /* fall through */
|
|
|
|
case ORTE_PLM_RSH_SHELL_ZSH: /* fall through */
|
|
|
|
case ORTE_PLM_RSH_SHELL_BASH: local_sh = true; break;
|
|
|
|
case ORTE_PLM_RSH_SHELL_TCSH: /* fall through */
|
|
|
|
case ORTE_PLM_RSH_SHELL_CSH: local_csh = true; break;
|
|
|
|
default:
|
|
|
|
opal_output(0, "WARNING: local probe returned unhandled shell:%s assuming bash\n",
|
|
|
|
(NULL != param) ? param : "unknown");
|
|
|
|
*remote_sh = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: local csh: %d, local sh: %d",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
local_csh, local_sh));
|
|
|
|
|
|
|
|
/* What is our remote shell? */
|
|
|
|
if (mca_plm_rsh_component.assume_same_shell) {
|
|
|
|
*remote_sh = local_sh;
|
|
|
|
*remote_csh = local_csh;
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: assuming same remote shell as local shell",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
|
|
|
} else {
|
|
|
|
orte_plm_rsh_shell_t shell;
|
|
|
|
rc = orte_plm_rsh_probe(nodename, &shell);
|
|
|
|
|
|
|
|
if (ORTE_SUCCESS != rc) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (shell) {
|
|
|
|
case ORTE_PLM_RSH_SHELL_SH: /* fall through */
|
|
|
|
case ORTE_PLM_RSH_SHELL_KSH: /* fall through */
|
|
|
|
case ORTE_PLM_RSH_SHELL_ZSH: /* fall through */
|
|
|
|
case ORTE_PLM_RSH_SHELL_BASH: *remote_sh = true; break;
|
|
|
|
case ORTE_PLM_RSH_SHELL_TCSH: /* fall through */
|
|
|
|
case ORTE_PLM_RSH_SHELL_CSH: *remote_csh = true; break;
|
|
|
|
default:
|
|
|
|
opal_output(0, "WARNING: rsh probe returned unhandled shell; assuming bash\n");
|
|
|
|
*remote_sh = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: remote csh: %d, remote sh: %d",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
*remote_csh, *remote_sh));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Build argv array
|
|
|
|
*/
|
|
|
|
argv = opal_argv_copy(mca_plm_rsh_component.agent_argv);
|
|
|
|
argc = mca_plm_rsh_component.agent_argc;
|
|
|
|
*node_name_index1 = argc;
|
|
|
|
opal_argv_append(&argc, &argv, "<template>");
|
|
|
|
|
|
|
|
/* add the daemon command (as specified by user) */
|
|
|
|
*local_exec_index = argc;
|
|
|
|
opal_argv_append(&argc, &argv, mca_plm_rsh_component.orted);
|
|
|
|
|
|
|
|
/* if we are not tree launching or debugging, tell the daemon
|
|
|
|
* to daemonize so we can launch the next group
|
|
|
|
*/
|
|
|
|
if (!mca_plm_rsh_component.tree_spawn &&
|
|
|
|
!orte_debug_flag &&
|
|
|
|
!orte_debug_daemons_flag &&
|
|
|
|
!orte_debug_daemons_file_flag) {
|
|
|
|
opal_argv_append(&argc, &argv, "--daemonize");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add the basic arguments to the orted command line, including
|
|
|
|
* all debug options
|
|
|
|
*/
|
|
|
|
orte_plm_base_orted_append_basic_args(&argc, &argv,
|
|
|
|
"env",
|
|
|
|
proc_vpid_index,
|
|
|
|
node_name_index2);
|
|
|
|
|
|
|
|
if (0 < opal_output_get_verbosity(orte_plm_globals.output)) {
|
|
|
|
param = opal_argv_join(argv, ' ');
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: final template argv:\n\t%s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
(NULL == param) ? "NULL" : param));
|
|
|
|
if (NULL != param) free(param);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Figure out the basenames for the libdir and bindir. This
|
|
|
|
requires some explanation:
|
|
|
|
|
|
|
|
- Use opal_install_dirs.libdir and opal_install_dirs.bindir.
|
|
|
|
|
|
|
|
- After a discussion on the devel-core mailing list, the
|
|
|
|
developers decided that we should use the local directory
|
|
|
|
basenames as the basis for the prefix on the remote note.
|
|
|
|
This does not handle a few notable cases (e.g., if the
|
|
|
|
libdir/bindir is not simply a subdir under the prefix, if the
|
|
|
|
libdir/bindir basename is not the same on the remote node as
|
|
|
|
it is here on the local node, etc.), but we decided that
|
|
|
|
--prefix was meant to handle "the common case". If you need
|
|
|
|
something more complex than this, a) edit your shell startup
|
|
|
|
files to set PATH/LD_LIBRARY_PATH properly on the remove
|
|
|
|
node, or b) use some new/to-be-defined options that
|
|
|
|
explicitly allow setting the bindir/libdir on the remote
|
|
|
|
node. We decided to implement these options (e.g.,
|
|
|
|
--remote-bindir and --remote-libdir) to orterun when it
|
|
|
|
actually becomes a problem for someone (vs. a hypothetical
|
|
|
|
situation).
|
|
|
|
|
|
|
|
Hence, for now, we simply take the basename of this install's
|
|
|
|
libdir and bindir and use it to append this install's prefix
|
|
|
|
and use that on the remote node.
|
|
|
|
*/
|
|
|
|
|
|
|
|
*lib_base = opal_basename(opal_install_dirs.libdir);
|
|
|
|
*bin_base = opal_basename(opal_install_dirs.bindir);
|
|
|
|
|
|
|
|
/* all done */
|
|
|
|
*argcptr = argc;
|
|
|
|
*argvptr = argv;
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* actually ssh the child */
|
|
|
|
static void ssh_child(int argc, char **argv,
|
|
|
|
orte_vpid_t vpid, int proc_vpid_index,
|
|
|
|
int local_exec_index, char *prefix_dir,
|
|
|
|
char *bin_base, char *lib_base,
|
|
|
|
bool remote_sh, bool remote_csh)
|
|
|
|
{
|
|
|
|
char** env;
|
|
|
|
char* var;
|
|
|
|
long fd, fdmax = sysconf(_SC_OPEN_MAX);
|
|
|
|
int rc;
|
|
|
|
char *exec_path;
|
|
|
|
char **exec_argv;
|
|
|
|
int fdin;
|
|
|
|
sigset_t sigs;
|
|
|
|
|
|
|
|
/* setup environment */
|
|
|
|
env = opal_argv_copy(environ);
|
|
|
|
|
|
|
|
/* ensure that only the ssh plm is selected on the remote daemon */
|
|
|
|
var = mca_base_param_environ_variable("plm", NULL, NULL);
|
|
|
|
opal_setenv(var, "ssh", true, &env);
|
|
|
|
free(var);
|
|
|
|
|
|
|
|
/* We don't need to sense an oversubscribed condition and set the sched_yield
|
|
|
|
* for the node as we are only launching the daemons at this time. The daemons
|
|
|
|
* are now smart enough to set the oversubscribed condition themselves when
|
|
|
|
* they launch the local procs.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* We cannot launch locally as this would cause multiple daemons to
|
|
|
|
* exist on a node (HNP counts as a daemon). This is taken care of
|
|
|
|
* by the earlier check for daemon_preexists, so we only have to worry
|
|
|
|
* about remote launches here
|
|
|
|
*/
|
|
|
|
exec_argv = argv;
|
|
|
|
exec_path = strdup(mca_plm_rsh_component.agent_path);
|
|
|
|
|
|
|
|
if (NULL != prefix_dir) {
|
|
|
|
char *opal_prefix = getenv("OPAL_PREFIX");
|
|
|
|
if (remote_sh) {
|
|
|
|
asprintf (&argv[local_exec_index],
|
|
|
|
"%s%s%s PATH=%s/%s:$PATH ; export PATH ; "
|
|
|
|
"LD_LIBRARY_PATH=%s/%s:$LD_LIBRARY_PATH ; export LD_LIBRARY_PATH ; "
|
|
|
|
"%s/%s/%s",
|
|
|
|
(opal_prefix != NULL ? "OPAL_PREFIX=" : ""),
|
|
|
|
(opal_prefix != NULL ? opal_prefix : ""),
|
|
|
|
(opal_prefix != NULL ? " ;" : ""),
|
|
|
|
prefix_dir, bin_base,
|
|
|
|
prefix_dir, lib_base,
|
|
|
|
prefix_dir, bin_base,
|
|
|
|
mca_plm_rsh_component.orted);
|
|
|
|
} else if (remote_csh) {
|
|
|
|
/* [t]csh is a bit more challenging -- we
|
|
|
|
have to check whether LD_LIBRARY_PATH
|
|
|
|
is already set before we try to set it.
|
|
|
|
Must be very careful about obeying
|
|
|
|
[t]csh's order of evaluation and not
|
|
|
|
using a variable before it is defined.
|
|
|
|
See this thread for more details:
|
|
|
|
http://www.open-mpi.org/community/lists/users/2006/01/0517.php. */
|
|
|
|
asprintf (&argv[local_exec_index],
|
|
|
|
"%s%s%s set path = ( %s/%s $path ) ; "
|
|
|
|
"if ( $?LD_LIBRARY_PATH == 1 ) "
|
|
|
|
"set OMPI_have_llp ; "
|
|
|
|
"if ( $?LD_LIBRARY_PATH == 0 ) "
|
|
|
|
"setenv LD_LIBRARY_PATH %s/%s ; "
|
|
|
|
"if ( $?OMPI_have_llp == 1 ) "
|
|
|
|
"setenv LD_LIBRARY_PATH %s/%s:$LD_LIBRARY_PATH ; "
|
|
|
|
"%s/%s/%s",
|
|
|
|
(opal_prefix != NULL ? "setenv OPAL_PREFIX " : ""),
|
|
|
|
(opal_prefix != NULL ? opal_prefix : ""),
|
|
|
|
(opal_prefix != NULL ? " ;" : ""),
|
|
|
|
prefix_dir, bin_base,
|
|
|
|
prefix_dir, lib_base,
|
|
|
|
prefix_dir, lib_base,
|
|
|
|
prefix_dir, bin_base,
|
|
|
|
mca_plm_rsh_component.orted);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* pass the vpid */
|
|
|
|
rc = orte_util_convert_vpid_to_string(&var, vpid);
|
|
|
|
if (ORTE_SUCCESS != rc) {
|
|
|
|
opal_output(0, "orte_plm_rsh: unable to get daemon vpid as string");
|
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
free(argv[proc_vpid_index]);
|
|
|
|
argv[proc_vpid_index] = strdup(var);
|
|
|
|
free(var);
|
|
|
|
|
|
|
|
/* setup stdin if verbosity is not set */
|
|
|
|
if (0 > opal_output_get_verbosity(orte_plm_globals.output)) {
|
|
|
|
fdin = open("/dev/null", O_RDWR);
|
|
|
|
dup2(fdin, 0);
|
|
|
|
close(fdin);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* close all file descriptors w/ exception of stdin/stdout/stderr */
|
|
|
|
for(fd=3; fd<fdmax; fd++)
|
|
|
|
close(fd);
|
|
|
|
|
|
|
|
/* Set signal handlers back to the default. Do this close
|
|
|
|
to the execve() because the event library may (and likely
|
|
|
|
will) reset them. If we don't do this, the event
|
|
|
|
library may have left some set that, at least on some
|
|
|
|
OS's, don't get reset via fork() or exec(). Hence, the
|
|
|
|
orted could be unkillable (for example). */
|
|
|
|
|
|
|
|
set_handler_default(SIGTERM);
|
|
|
|
set_handler_default(SIGINT);
|
|
|
|
set_handler_default(SIGHUP);
|
|
|
|
set_handler_default(SIGPIPE);
|
|
|
|
set_handler_default(SIGCHLD);
|
|
|
|
|
|
|
|
/* Unblock all signals, for many of the same reasons that
|
|
|
|
we set the default handlers, above. This is noticable
|
|
|
|
on Linux where the event library blocks SIGTERM, but we
|
|
|
|
don't want that blocked by the orted (or, more
|
|
|
|
specifically, we don't want it to be blocked by the
|
|
|
|
orted and then inherited by the ORTE processes that it
|
|
|
|
forks, making them unkillable by SIGTERM). */
|
|
|
|
sigprocmask(0, 0, &sigs);
|
|
|
|
sigprocmask(SIG_UNBLOCK, &sigs, 0);
|
|
|
|
|
|
|
|
/* exec the daemon */
|
|
|
|
var = opal_argv_join(argv, ' ');
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: executing: (%s) [%s]",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
exec_path, (NULL == var) ? "NULL" : var));
|
|
|
|
if (NULL != var) free(var);
|
|
|
|
|
|
|
|
execve(exec_path, exec_argv, env);
|
|
|
|
opal_output(0, "plm:rsh: execv of %s failed with errno=%s(%d)\n",
|
|
|
|
exec_path, strerror(errno), errno);
|
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static opal_buffer_t collected_uris;
|
|
|
|
|
2008-04-30 23:49:53 +04:00
|
|
|
static int construct_daemonmap(opal_buffer_t *data)
|
|
|
|
{
|
|
|
|
opal_byte_object_t *bo;
|
|
|
|
orte_std_cntr_t cnt;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* extract the byte object holding the daemonmap */
|
|
|
|
cnt=1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(data, &bo, &cnt, OPAL_BYTE_OBJECT))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2008-05-01 18:49:56 +04:00
|
|
|
/* unpack the nodemap - this will free the bytes in bo */
|
2008-04-30 23:49:53 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_util_decode_nodemap(bo, &orte_daemonmap))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-04-14 22:26:08 +04:00
|
|
|
/*
|
|
|
|
* launch a set of daemons from a remote daemon
|
|
|
|
*/
|
|
|
|
static int remote_spawn(opal_buffer_t *launch)
|
|
|
|
{
|
|
|
|
opal_list_item_t *item;
|
|
|
|
orte_vpid_t vpid;
|
2008-04-30 23:49:53 +04:00
|
|
|
orte_nid_t **nodes;
|
2008-04-14 22:26:08 +04:00
|
|
|
int node_name_index1;
|
|
|
|
int node_name_index2;
|
|
|
|
int proc_vpid_index;
|
|
|
|
int local_exec_index;
|
|
|
|
char **argv = NULL;
|
2008-04-24 22:38:24 +04:00
|
|
|
char *prefix;
|
2008-04-14 22:26:08 +04:00
|
|
|
int argc;
|
|
|
|
int rc;
|
|
|
|
bool remote_sh = false, remote_csh = false;
|
|
|
|
char *lib_base = NULL, *bin_base = NULL;
|
|
|
|
bool failed_launch = true;
|
|
|
|
pid_t pid;
|
2008-04-24 22:38:24 +04:00
|
|
|
orte_std_cntr_t n;
|
|
|
|
|
2008-04-30 23:49:53 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: remote spawn called",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
2008-04-14 22:26:08 +04:00
|
|
|
|
2008-04-24 22:53:08 +04:00
|
|
|
/* extract the prefix from the launch buffer */
|
|
|
|
n = 1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(launch, &prefix, &n, OPAL_STRING))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
2008-04-30 23:49:53 +04:00
|
|
|
goto cleanup;
|
2008-04-24 22:53:08 +04:00
|
|
|
}
|
|
|
|
|
2008-04-30 23:49:53 +04:00
|
|
|
/* construct the daemonmap, if required - the decode function
|
|
|
|
* will know what to do
|
|
|
|
*/
|
|
|
|
if (ORTE_SUCCESS != (rc = construct_daemonmap(launch))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
nodes = (orte_nid_t**)orte_daemonmap.addr;
|
|
|
|
vpid=ORTE_PROC_MY_NAME->vpid;
|
|
|
|
|
2008-04-14 22:26:08 +04:00
|
|
|
/* clear out any previous child info */
|
|
|
|
while (NULL != (item = opal_list_remove_first(&mca_plm_rsh_component.children))) {
|
|
|
|
OBJ_RELEASE(item);
|
|
|
|
}
|
|
|
|
/* reconstruct the child list */
|
|
|
|
find_children(0, 0, ORTE_PROC_MY_NAME->vpid, orte_process_info.num_procs);
|
|
|
|
|
|
|
|
/* if I have no children, just return */
|
|
|
|
if (opal_list_is_empty(&mca_plm_rsh_component.children)) {
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: remote spawn - have no children!",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
2008-04-30 23:49:53 +04:00
|
|
|
failed_launch = false;
|
|
|
|
rc = ORTE_SUCCESS;
|
|
|
|
goto cleanup;
|
2008-04-14 22:26:08 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* setup the launch */
|
2008-04-30 23:49:53 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = setup_launch(&argc, &argv, orte_process_info.nodename, &node_name_index1, &node_name_index2,
|
2008-04-14 22:26:08 +04:00
|
|
|
&local_exec_index, &proc_vpid_index, &lib_base, &bin_base,
|
|
|
|
&remote_sh, &remote_csh))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* setup the collection buffer so I can report all the URI's back
|
|
|
|
* to the HNP when the launch completes
|
|
|
|
*/
|
|
|
|
OBJ_CONSTRUCT(&collected_uris, opal_buffer_t);
|
|
|
|
|
|
|
|
for (item = opal_list_get_first(&mca_plm_rsh_component.children);
|
|
|
|
item != opal_list_get_end(&mca_plm_rsh_component.children);
|
|
|
|
item = opal_list_get_next(item)) {
|
|
|
|
orte_namelist_t *child = (orte_namelist_t*)item;
|
|
|
|
vpid = child->name.vpid;
|
|
|
|
|
|
|
|
if (NULL == nodes[vpid]) {
|
|
|
|
opal_output(0, "%s NULL in daemonmap at position %d",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), (int)vpid);
|
2008-04-30 23:49:53 +04:00
|
|
|
rc = ORTE_ERR_NOT_FOUND;
|
2008-04-14 22:26:08 +04:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
free(argv[node_name_index1]);
|
2008-04-30 23:49:53 +04:00
|
|
|
argv[node_name_index1] = strdup(nodes[vpid]->name);
|
2008-04-14 22:26:08 +04:00
|
|
|
|
|
|
|
free(argv[node_name_index2]);
|
2008-04-30 23:49:53 +04:00
|
|
|
argv[node_name_index2] = strdup(nodes[vpid]->name);
|
2008-04-14 22:26:08 +04:00
|
|
|
|
|
|
|
/* fork a child to exec the rsh/ssh session */
|
|
|
|
pid = fork();
|
|
|
|
if (pid < 0) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_SYS_LIMITS_CHILDREN);
|
|
|
|
rc = ORTE_ERR_SYS_LIMITS_CHILDREN;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* child */
|
|
|
|
if (pid == 0) {
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: launching on node %s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
2008-04-30 23:49:53 +04:00
|
|
|
nodes[vpid]->name));
|
2008-04-14 22:26:08 +04:00
|
|
|
|
|
|
|
/* do the ssh launch - this will exit if it fails */
|
|
|
|
ssh_child(argc, argv, vpid,
|
2008-04-24 22:38:24 +04:00
|
|
|
proc_vpid_index, local_exec_index, prefix, bin_base,
|
2008-04-14 22:26:08 +04:00
|
|
|
lib_base, remote_sh, remote_csh);
|
|
|
|
|
|
|
|
} else { /* father */
|
|
|
|
OPAL_THREAD_LOCK(&mca_plm_rsh_component.lock);
|
|
|
|
/* This situation can lead to a deadlock if '--debug-daemons' is set.
|
|
|
|
* However, the deadlock condition is tested at the begining of this
|
|
|
|
* function, so we're quite confident it should not happens here.
|
|
|
|
*/
|
|
|
|
if (mca_plm_rsh_component.num_children++ >=
|
|
|
|
mca_plm_rsh_component.num_concurrent) {
|
|
|
|
opal_condition_wait(&mca_plm_rsh_component.cond, &mca_plm_rsh_component.lock);
|
|
|
|
}
|
|
|
|
OPAL_THREAD_UNLOCK(&mca_plm_rsh_component.lock);
|
|
|
|
|
|
|
|
/* setup callback on sigchild - wait until setup above is complete
|
|
|
|
* as the callback can occur in the call to orte_wait_cb
|
|
|
|
*/
|
|
|
|
orte_wait_cb(pid, orte_plm_rsh_wait_daemon, (void*)&vpid);
|
|
|
|
}
|
|
|
|
}
|
2008-05-01 23:19:34 +04:00
|
|
|
|
2008-04-14 22:26:08 +04:00
|
|
|
failed_launch = false;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (NULL != lib_base) {
|
|
|
|
free(lib_base);
|
|
|
|
}
|
|
|
|
if (NULL != bin_base) {
|
|
|
|
free(bin_base);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NULL != argv) {
|
|
|
|
opal_argv_free(argv);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check for failed launch */
|
|
|
|
if (failed_launch) {
|
|
|
|
/* report cannot launch this daemon to HNP */
|
|
|
|
opal_buffer_t buf;
|
|
|
|
orte_std_cntr_t cnt=1;
|
|
|
|
uint8_t flag=1;
|
|
|
|
OBJ_CONSTRUCT(&buf, opal_buffer_t);
|
|
|
|
opal_dss.pack(&buf, &cnt, 1, ORTE_STD_CNTR);
|
|
|
|
opal_dss.pack(&buf, &flag, 1, OPAL_UINT8);
|
|
|
|
opal_dss.pack(&buf, &vpid, 1, ORTE_VPID);
|
|
|
|
orte_rml.send_buffer(ORTE_PROC_MY_HNP, &buf, ORTE_RML_TAG_REPORT_REMOTE_LAUNCH, 0);
|
|
|
|
OBJ_DESTRUCT(&buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
/**
|
|
|
|
* Launch a daemon (bootproxy) on each node. The daemon will be responsible
|
|
|
|
* for launching the application.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* When working in this function, ALWAYS jump to "cleanup" if
|
|
|
|
* you encounter an error so that orterun will be woken up and
|
|
|
|
* the job can cleanly terminate
|
|
|
|
*/
|
|
|
|
int orte_plm_rsh_launch(orte_job_t *jdata)
|
|
|
|
{
|
|
|
|
orte_job_map_t *map;
|
|
|
|
int node_name_index1;
|
|
|
|
int node_name_index2;
|
|
|
|
int proc_vpid_index;
|
2008-04-14 22:26:08 +04:00
|
|
|
int local_exec_index;
|
2008-02-28 04:57:57 +03:00
|
|
|
char **argv = NULL;
|
|
|
|
char *prefix_dir;
|
|
|
|
int argc;
|
|
|
|
int rc;
|
|
|
|
bool remote_sh = false, remote_csh = false;
|
|
|
|
char *lib_base = NULL, *bin_base = NULL;
|
|
|
|
bool failed_launch = true;
|
|
|
|
orte_app_context_t **apps;
|
|
|
|
orte_node_t **nodes;
|
|
|
|
orte_std_cntr_t nnode;
|
|
|
|
|
|
|
|
if (orte_timing) {
|
|
|
|
if (0 != gettimeofday(&joblaunchstart, NULL)) {
|
|
|
|
opal_output(0, "plm_rsh: could not obtain start time");
|
|
|
|
joblaunchstart.tv_sec = 0;
|
|
|
|
joblaunchstart.tv_usec = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* create a jobid for this job */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_plm_base_create_jobid(&jdata->jobid))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: setting up job %s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_JOBID_PRINT(jdata->jobid)));
|
|
|
|
|
|
|
|
/* setup the job */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_plm_base_setup_job(jdata))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set the active jobid */
|
|
|
|
active_job = jdata->jobid;
|
|
|
|
|
|
|
|
/* Get the map for this job */
|
|
|
|
if (NULL == (map = orte_rmaps.get_job_map(active_job))) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
|
|
|
rc = ORTE_ERR_NOT_FOUND;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
apps = (orte_app_context_t**)jdata->apps->addr;
|
|
|
|
nodes = (orte_node_t**)map->nodes->addr;
|
|
|
|
|
|
|
|
if (0 == map->num_new_daemons) {
|
|
|
|
/* have all the daemons we need - launch app */
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: no new daemons to launch",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
|
|
|
goto launch_apps;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (0 < opal_output_get_verbosity(orte_plm_globals.output) &&
|
|
|
|
mca_plm_rsh_component.num_concurrent < map->num_new_daemons) {
|
|
|
|
/**
|
|
|
|
* If we are in '--debug-daemons' we keep the ssh connection
|
|
|
|
* alive for the span of the run. If we use this option
|
|
|
|
* AND we launch on more than "num_concurrent" machines
|
|
|
|
* then we will deadlock. No connections are terminated
|
|
|
|
* until the job is complete, no job is started
|
|
|
|
* since all the orteds are waiting for all the others
|
|
|
|
* to come online, and the others ore not launched because
|
|
|
|
* we are waiting on those that have started to terminate
|
|
|
|
* their ssh tunnels. :(
|
|
|
|
* As we cannot run in this situation, pretty print the error
|
|
|
|
* and return an error code.
|
|
|
|
*/
|
|
|
|
opal_show_help("help-plm-rsh.txt", "deadlock-params",
|
|
|
|
true, mca_plm_rsh_component.num_concurrent, map->num_new_daemons);
|
|
|
|
rc = ORTE_ERR_FATAL;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* After a discussion between Ralph & Jeff, we concluded that we
|
|
|
|
* really are handling the prefix dir option incorrectly. It currently
|
|
|
|
* is associated with an app_context, yet it really refers to the
|
|
|
|
* location where OpenRTE/Open MPI is installed on a NODE. Fixing
|
|
|
|
* this right now would involve significant change to orterun as well
|
|
|
|
* as elsewhere, so we will intentionally leave this incorrect at this
|
|
|
|
* point. The error, however, is identical to that seen in all prior
|
|
|
|
* releases of OpenRTE/Open MPI, so our behavior is no worse than before.
|
|
|
|
*
|
|
|
|
* A note to fix this, along with ideas on how to do so, has been filed
|
|
|
|
* on the project's Trac system under "feature enhancement".
|
|
|
|
*
|
|
|
|
* For now, default to the prefix_dir provided in the first app_context.
|
|
|
|
* Since there always MUST be at least one app_context, we are safe in
|
|
|
|
* doing this.
|
|
|
|
*/
|
|
|
|
prefix_dir = apps[0]->prefix_dir;
|
|
|
|
|
2008-04-14 22:26:08 +04:00
|
|
|
/* setup the launch */
|
|
|
|
if (ORTE_SUCCESS != (rc = setup_launch(&argc, &argv, nodes[0]->name, &node_name_index1, &node_name_index2,
|
|
|
|
&local_exec_index, &proc_vpid_index, &lib_base, &bin_base,
|
|
|
|
&remote_sh, &remote_csh))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
2008-02-28 04:57:57 +03:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2008-04-14 22:26:08 +04:00
|
|
|
/* if we are tree launching, find our children, get the launch cmd,
|
|
|
|
* and setup the recv to hear of any remote failures
|
|
|
|
*/
|
|
|
|
if (mca_plm_rsh_component.tree_spawn) {
|
2008-05-01 23:19:34 +04:00
|
|
|
orte_daemon_cmd_flag_t command = ORTE_DAEMON_TREE_SPAWN;
|
|
|
|
opal_byte_object_t bo, *boptr;
|
|
|
|
orte_job_t *jdatorted;
|
|
|
|
|
|
|
|
orte_tree_launch_cmd= OBJ_NEW(opal_buffer_t);
|
|
|
|
/* insert the tree_spawn cmd */
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(orte_tree_launch_cmd, &command, 1, ORTE_DAEMON_CMD))) {
|
2008-04-14 22:26:08 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
2008-05-01 23:19:34 +04:00
|
|
|
OBJ_RELEASE(orte_tree_launch_cmd);
|
2008-04-14 22:26:08 +04:00
|
|
|
goto cleanup;
|
2008-04-24 22:38:24 +04:00
|
|
|
}
|
|
|
|
/* pack the prefix since this will be needed by the next wave */
|
2008-05-01 23:19:34 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(orte_tree_launch_cmd, &prefix_dir, 1, OPAL_STRING))) {
|
2008-04-24 22:38:24 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
2008-05-01 23:19:34 +04:00
|
|
|
OBJ_RELEASE(orte_tree_launch_cmd);
|
2008-04-24 22:38:24 +04:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2008-05-01 23:19:34 +04:00
|
|
|
/* construct a nodemap */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_util_encode_nodemap(&bo))) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(rc);
|
2008-05-01 23:19:34 +04:00
|
|
|
OBJ_RELEASE(orte_tree_launch_cmd);
|
2008-02-28 04:57:57 +03:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2008-05-01 23:19:34 +04:00
|
|
|
/* store it */
|
|
|
|
boptr = &bo;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(orte_tree_launch_cmd, &boptr, 1, OPAL_BYTE_OBJECT))) {
|
2008-04-14 22:26:08 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
2008-05-01 23:19:34 +04:00
|
|
|
OBJ_RELEASE(orte_tree_launch_cmd);
|
|
|
|
free(bo.bytes);
|
2008-04-14 22:26:08 +04:00
|
|
|
goto cleanup;
|
2008-02-28 04:57:57 +03:00
|
|
|
}
|
2008-05-01 23:19:34 +04:00
|
|
|
/* release the data since it has now been copied into our buffer */
|
|
|
|
free(bo.bytes);
|
|
|
|
/* get the orted job data object */
|
|
|
|
if (NULL == (jdatorted = orte_get_job_data_object(ORTE_PROC_MY_NAME->jobid))) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
|
|
|
return ORTE_ERR_NOT_FOUND;
|
|
|
|
}
|
|
|
|
find_children(0, 0, 0, jdatorted->num_procs);
|
2008-02-28 04:57:57 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Iterate through each of the nodes
|
|
|
|
*/
|
|
|
|
|
2008-04-14 22:26:08 +04:00
|
|
|
nnode=0;
|
|
|
|
while (nnode < map->num_nodes) {
|
2008-02-28 04:57:57 +03:00
|
|
|
pid_t pid;
|
2008-04-14 22:26:08 +04:00
|
|
|
opal_list_item_t *item;
|
2008-02-28 04:57:57 +03:00
|
|
|
|
2008-04-14 22:26:08 +04:00
|
|
|
/* if we are tree launching, only launch our own children */
|
|
|
|
if (mca_plm_rsh_component.tree_spawn) {
|
|
|
|
for (item = opal_list_get_first(&mca_plm_rsh_component.children);
|
|
|
|
item != opal_list_get_end(&mca_plm_rsh_component.children);
|
|
|
|
item = opal_list_get_next(item)) {
|
|
|
|
orte_namelist_t *child = (orte_namelist_t*)item;
|
|
|
|
if (child->name.vpid == nodes[nnode]->daemon->name.vpid) {
|
|
|
|
goto launch;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* didn't find it - ignore this node */
|
|
|
|
goto next_node;
|
|
|
|
}
|
|
|
|
|
|
|
|
launch:
|
2008-02-28 04:57:57 +03:00
|
|
|
/* if this daemon already exists, don't launch it! */
|
|
|
|
if (nodes[nnode]->daemon_launched) {
|
2008-04-14 22:26:08 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh:launch daemon already exists on node %s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
nodes[nnode]->name));
|
2008-04-15 18:20:03 +04:00
|
|
|
goto next_node;
|
2008-02-28 04:57:57 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* if the node's daemon has not been defined, then we
|
|
|
|
* have an error!
|
|
|
|
*/
|
|
|
|
if (NULL == nodes[nnode]->daemon) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_FATAL);
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh:launch daemon failed to be defined on node %s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
nodes[nnode]->name));
|
2008-03-26 01:42:24 +03:00
|
|
|
rc = ORTE_ERR_FATAL;
|
|
|
|
goto cleanup;
|
2008-02-28 04:57:57 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* setup node name */
|
|
|
|
free(argv[node_name_index1]);
|
|
|
|
if (NULL != nodes[nnode]->username &&
|
|
|
|
0 != strlen (nodes[nnode]->username)) {
|
|
|
|
asprintf (&argv[node_name_index1], "%s@%s",
|
|
|
|
nodes[nnode]->username, nodes[nnode]->name);
|
|
|
|
} else {
|
|
|
|
argv[node_name_index1] = strdup(nodes[nnode]->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
free(argv[node_name_index2]);
|
|
|
|
argv[node_name_index2] = strdup(nodes[nnode]->name);
|
|
|
|
|
2008-04-14 22:26:08 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: launching on node %s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
nodes[nnode]->name));
|
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
/* fork a child to exec the rsh/ssh session */
|
|
|
|
pid = fork();
|
|
|
|
if (pid < 0) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_SYS_LIMITS_CHILDREN);
|
|
|
|
rc = ORTE_ERR_SYS_LIMITS_CHILDREN;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* child */
|
|
|
|
if (pid == 0) {
|
|
|
|
|
2008-04-14 22:26:08 +04:00
|
|
|
/* do the ssh launch - this will exit if it fails */
|
|
|
|
ssh_child(argc, argv, nodes[nnode]->daemon->name.vpid,
|
|
|
|
proc_vpid_index, local_exec_index, prefix_dir, bin_base,
|
|
|
|
lib_base, remote_sh, remote_csh);
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
|
|
|
|
} else { /* father */
|
|
|
|
/* indicate this daemon has been launched */
|
|
|
|
nodes[nnode]->daemon->state = ORTE_PROC_STATE_LAUNCHED;
|
2008-03-31 22:15:24 +04:00
|
|
|
/* record the pid */
|
|
|
|
nodes[nnode]->daemon->pid = pid;
|
2008-02-28 04:57:57 +03:00
|
|
|
|
2008-04-14 22:26:08 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: recording launch of daemon %s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(&nodes[nnode]->daemon->name)));
|
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
OPAL_THREAD_LOCK(&mca_plm_rsh_component.lock);
|
|
|
|
/* This situation can lead to a deadlock if '--debug-daemons' is set.
|
|
|
|
* However, the deadlock condition is tested at the begining of this
|
|
|
|
* function, so we're quite confident it should not happens here.
|
|
|
|
*/
|
|
|
|
if (mca_plm_rsh_component.num_children++ >=
|
|
|
|
mca_plm_rsh_component.num_concurrent) {
|
|
|
|
opal_condition_wait(&mca_plm_rsh_component.cond, &mca_plm_rsh_component.lock);
|
|
|
|
}
|
|
|
|
OPAL_THREAD_UNLOCK(&mca_plm_rsh_component.lock);
|
|
|
|
|
|
|
|
/* setup callback on sigchild - wait until setup above is complete
|
|
|
|
* as the callback can occur in the call to orte_wait_cb
|
|
|
|
*/
|
2008-03-31 22:15:24 +04:00
|
|
|
orte_wait_cb(pid, orte_plm_rsh_wait_daemon, (void*)nodes[nnode]->daemon);
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
/* if required - add delay to avoid problems w/ X11 authentication */
|
|
|
|
if (0 < opal_output_get_verbosity(orte_plm_globals.output)
|
|
|
|
&& mca_plm_rsh_component.delay) {
|
|
|
|
sleep(mca_plm_rsh_component.delay);
|
|
|
|
}
|
|
|
|
}
|
2008-04-14 22:26:08 +04:00
|
|
|
next_node:
|
|
|
|
nnode++;
|
2008-02-28 04:57:57 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* wait for daemons to callback */
|
2008-05-01 23:19:34 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_plm_base_daemon_callback(map->num_new_daemons))) {
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: daemon launch failed for job %s on error %s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_JOBID_PRINT(active_job), ORTE_ERROR_NAME(rc)));
|
|
|
|
goto cleanup;
|
2008-02-28 04:57:57 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
launch_apps:
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_plm_base_launch_apps(active_job))) {
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh: launch of apps failed for job %s on error %s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_JOBID_PRINT(active_job), ORTE_ERROR_NAME(rc)));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get here if launch went okay */
|
|
|
|
failed_launch = false;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (NULL != lib_base) {
|
|
|
|
free(lib_base);
|
|
|
|
}
|
|
|
|
if (NULL != bin_base) {
|
|
|
|
free(bin_base);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NULL != argv) {
|
|
|
|
opal_argv_free(argv);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check for failed launch - if so, force terminate */
|
|
|
|
if (failed_launch) {
|
2008-04-10 13:15:08 +04:00
|
|
|
orte_plm_base_launch_failed(jdata->jobid, false, -1, ORTE_ERROR_DEFAULT_EXIT_CODE, ORTE_JOB_STATE_FAILED_TO_START);
|
2008-02-28 04:57:57 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-04-14 22:26:08 +04:00
|
|
|
static int find_children(int rank, int parent, int me, int num_procs)
|
|
|
|
{
|
|
|
|
int i, bitmap, peer, hibit, mask, found;
|
|
|
|
orte_namelist_t *child;
|
|
|
|
|
|
|
|
/* is this me? */
|
|
|
|
if (me == rank) {
|
|
|
|
bitmap = opal_cube_dim(num_procs);
|
|
|
|
|
|
|
|
hibit = opal_hibit(rank, bitmap);
|
|
|
|
--bitmap;
|
|
|
|
|
|
|
|
for (i = hibit + 1, mask = 1 << i; i <= bitmap; ++i, mask <<= 1) {
|
|
|
|
peer = rank | mask;
|
|
|
|
if (peer < num_procs) {
|
|
|
|
child = OBJ_NEW(orte_namelist_t);
|
|
|
|
child->name.jobid = ORTE_PROC_MY_NAME->jobid;
|
|
|
|
child->name.vpid = peer;
|
|
|
|
OPAL_OUTPUT_VERBOSE((3, orte_plm_globals.output,
|
|
|
|
"%s plm:rsh find-children found child %s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(&child->name)));
|
|
|
|
|
|
|
|
opal_list_append(&mca_plm_rsh_component.children, &child->item);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return parent;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* find the children of this rank */
|
|
|
|
bitmap = opal_cube_dim(num_procs);
|
|
|
|
|
|
|
|
hibit = opal_hibit(rank, bitmap);
|
|
|
|
--bitmap;
|
|
|
|
|
|
|
|
for (i = hibit + 1, mask = 1 << i; i <= bitmap; ++i, mask <<= 1) {
|
|
|
|
peer = rank | mask;
|
|
|
|
if (peer < num_procs) {
|
|
|
|
/* execute compute on this child */
|
|
|
|
if (0 <= (found = find_children(peer, rank, me, num_procs))) {
|
|
|
|
return found;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
/**
|
|
|
|
* Terminate all processes for a given job
|
|
|
|
*/
|
|
|
|
int orte_plm_rsh_terminate_job(orte_jobid_t jobid)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* order them to kill their local procs for this job */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_plm_base_orted_kill_local_procs(jobid))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2008-04-14 22:26:08 +04:00
|
|
|
* Terminate the orteds for a given job
|
2008-02-28 04:57:57 +03:00
|
|
|
*/
|
|
|
|
int orte_plm_rsh_terminate_orteds(void)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* now tell them to die! */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_plm_base_orted_exit())) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
int orte_plm_rsh_signal_job(orte_jobid_t jobid, int32_t signal)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* order them to pass this signal to their local procs */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_plm_base_orted_signal_local_procs(jobid, signal))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
int orte_plm_rsh_finalize(void)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* cleanup any pending recvs */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_plm_base_comm_stop())) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Handle threading issues.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if OMPI_HAVE_POSIX_THREADS && OMPI_THREADS_HAVE_DIFFERENT_PIDS && OMPI_ENABLE_PROGRESS_THREADS
|
|
|
|
|
|
|
|
struct orte_plm_rsh_stack_t {
|
|
|
|
opal_condition_t cond;
|
|
|
|
opal_mutex_t mutex;
|
|
|
|
bool complete;
|
|
|
|
orte_jobid_t jobid;
|
|
|
|
int rc;
|
|
|
|
};
|
|
|
|
typedef struct orte_plm_rsh_stack_t orte_plm_rsh_stack_t;
|
|
|
|
|
|
|
|
static void orte_plm_rsh_stack_construct(orte_plm_rsh_stack_t* stack)
|
|
|
|
{
|
|
|
|
OBJ_CONSTRUCT(&stack->mutex, opal_mutex_t);
|
|
|
|
OBJ_CONSTRUCT(&stack->cond, opal_condition_t);
|
|
|
|
stack->rc = 0;
|
|
|
|
stack->complete = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void orte_plm_rsh_stack_destruct(orte_plm_rsh_stack_t* stack)
|
|
|
|
{
|
|
|
|
OBJ_DESTRUCT(&stack->mutex);
|
|
|
|
OBJ_DESTRUCT(&stack->cond);
|
|
|
|
}
|
|
|
|
|
|
|
|
static OBJ_CLASS_INSTANCE(
|
|
|
|
orte_plm_rsh_stack_t,
|
|
|
|
opal_object_t,
|
|
|
|
orte_plm_rsh_stack_construct,
|
|
|
|
orte_plm_rsh_stack_destruct);
|
|
|
|
|
|
|
|
static void orte_plm_rsh_launch_cb(int fd, short event, void* args)
|
|
|
|
{
|
|
|
|
orte_plm_rsh_stack_t *stack = (orte_plm_rsh_stack_t*)args;
|
|
|
|
OPAL_THREAD_LOCK(&stack->mutex);
|
|
|
|
stack->rc = orte_plm_rsh_launch(stack->jobid);
|
|
|
|
stack->complete = true;
|
|
|
|
opal_condition_signal(&stack->cond);
|
|
|
|
OPAL_THREAD_UNLOCK(&stack->mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int orte_plm_rsh_launch_threaded(orte_jobid_t jobid)
|
|
|
|
{
|
|
|
|
struct timeval tv = { 0, 0 };
|
|
|
|
struct opal_event event;
|
|
|
|
struct orte_plm_rsh_stack_t stack;
|
|
|
|
|
|
|
|
OBJ_CONSTRUCT(&stack, orte_plm_rsh_stack_t);
|
|
|
|
|
|
|
|
stack.jobid = jobid;
|
|
|
|
if( opal_event_progress_thread() ) {
|
|
|
|
stack.rc = orte_plm_rsh_launch( jobid );
|
|
|
|
} else {
|
|
|
|
opal_evtimer_set(&event, orte_plm_rsh_launch_cb, &stack);
|
|
|
|
opal_evtimer_add(&event, &tv);
|
|
|
|
|
|
|
|
OPAL_THREAD_LOCK(&stack.mutex);
|
|
|
|
while (stack.complete == false) {
|
|
|
|
opal_condition_wait(&stack.cond, &stack.mutex);
|
|
|
|
}
|
|
|
|
OPAL_THREAD_UNLOCK(&stack.mutex);
|
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&stack);
|
|
|
|
return stack.rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
static void set_handler_default(int sig)
|
|
|
|
{
|
|
|
|
struct sigaction act;
|
|
|
|
|
|
|
|
act.sa_handler = SIG_DFL;
|
|
|
|
act.sa_flags = 0;
|
|
|
|
sigemptyset(&act.sa_mask);
|
|
|
|
|
|
|
|
sigaction(sig, &act, (struct sigaction *)0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static orte_plm_rsh_shell_t find_shell(char *shell)
|
|
|
|
{
|
|
|
|
int i = 0;
|
|
|
|
char *sh_name = NULL;
|
|
|
|
|
|
|
|
sh_name = rindex(shell, '/');
|
|
|
|
/* skip the '/' */
|
|
|
|
++sh_name;
|
|
|
|
for (i = 0; i < (int)(sizeof (orte_plm_rsh_shell_name) /
|
|
|
|
sizeof(orte_plm_rsh_shell_name[0])); ++i) {
|
|
|
|
if (0 == strcmp(sh_name, orte_plm_rsh_shell_name[i])) {
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We didn't find it */
|
|
|
|
return ORTE_PLM_RSH_SHELL_UNKNOWN;
|
|
|
|
}
|