/* * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana * University Research and Technology * Corporation. All rights reserved. * Copyright (c) 2004-2007 The University of Tennessee and The University * of Tennessee Research Foundation. All rights * reserved. * Copyright (c) 2004-2006 High Performance Computing Center Stuttgart, * University of Stuttgart. All rights reserved. * Copyright (c) 2004-2005 The Regents of the University of California. * All rights reserved. * Copyright (c) 2006-2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007 Los Alamos National Security, LLC. All rights * reserved. * $COPYRIGHT$ * * Additional copyrights may follow * * $HEADER$ * * These symbols are in a file by themselves to provide nice linker * semantics. Since linkers generally pull in symbols by object * files, keeping these symbols as the only symbols in this file * prevents utility programs such as "ompi_info" from having to import * entire components just to query their version and parameters. */ #include "orte_config.h" #include "orte/orte_constants.h" #include #ifdef HAVE_UNISTD_H #include #endif #include #include #ifdef HAVE_STRINGS_H #include #endif #ifdef HAVE_SYS_SELECT_H #include #endif #ifdef HAVE_SYS_TIME_H #include #endif #ifdef HAVE_SYS_TYPES_H #include #endif #ifdef HAVE_SYS_STAT_H #include #endif #ifdef HAVE_SYS_WAIT_H #include #endif #include #include #ifdef HAVE_PWD_H #include #endif #include "opal/mca/installdirs/installdirs.h" #include "opal/mca/base/mca_base_param.h" #include "opal/util/if.h" #include "opal/util/os_path.h" #include "opal/util/path.h" #include "opal/event/event.h" #include "opal/util/show_help.h" #include "opal/util/argv.h" #include "opal/util/opal_environ.h" #include "opal/util/output.h" #include "opal/util/trace.h" #include "opal/util/basename.h" #include "orte/util/sys_info.h" #include "orte/util/univ_info.h" #include "orte/util/session_dir.h" #include "orte/runtime/orte_wait.h" #include "orte/runtime/orte_wakeup.h" #include "orte/runtime/params.h" #include "orte/dss/dss.h" #include "orte/mca/ns/ns.h" #include "orte/mca/rml/rml.h" #include "orte/mca/gpr/gpr.h" #include "orte/mca/errmgr/errmgr.h" #include "orte/mca/ras/ras_types.h" #include "orte/mca/rmaps/rmaps.h" #include "orte/mca/smr/smr.h" #include "orte/mca/pls/pls.h" #include "orte/mca/pls/base/base.h" #include "orte/mca/pls/base/pls_private.h" #include "orte/mca/pls/rsh/pls_rsh.h" #if OMPI_HAVE_POSIX_THREADS && OMPI_THREADS_HAVE_DIFFERENT_PIDS && OMPI_ENABLE_PROGRESS_THREADS static int orte_pls_rsh_launch_threaded(orte_jobid_t jobid); #endif orte_pls_base_module_t orte_pls_rsh_module = { #if OMPI_HAVE_POSIX_THREADS && OMPI_THREADS_HAVE_DIFFERENT_PIDS && OMPI_ENABLE_PROGRESS_THREADS orte_pls_rsh_launch_threaded, #else orte_pls_rsh_launch, #endif orte_pls_rsh_terminate_job, orte_pls_rsh_terminate_orteds, orte_pls_rsh_terminate_proc, orte_pls_rsh_signal_job, orte_pls_rsh_signal_proc, orte_pls_rsh_finalize }; typedef enum { ORTE_PLS_RSH_SHELL_BASH = 0, ORTE_PLS_RSH_SHELL_ZSH, ORTE_PLS_RSH_SHELL_TCSH, ORTE_PLS_RSH_SHELL_CSH, ORTE_PLS_RSH_SHELL_KSH, ORTE_PLS_RSH_SHELL_SH, ORTE_PLS_RSH_SHELL_UNKNOWN } orte_pls_rsh_shell_t; /* These strings *must* follow the same order as the enum ORTE_PLS_RSH_SHELL_* */ static const char * orte_pls_rsh_shell_name[] = { "bash", "zsh", "tcsh", /* tcsh has to be first otherwise strstr finds csh */ "csh", "ksh", "sh", "unknown" }; /* * Local functions */ static void set_handler_default(int sig); static orte_pls_rsh_shell_t find_shell(char *shell); /* local global storage of timing variables */ static struct timeval joblaunchstart, joblaunchstop; /* global storage of active jobid being launched */ static orte_jobid_t active_job=ORTE_JOBID_INVALID; /** * Check the Shell variable on the specified node */ static int orte_pls_rsh_probe(orte_mapped_node_t *node, orte_pls_rsh_shell_t *shell) { char ** argv; int argc, rc = ORTE_SUCCESS, i; int fd[2]; pid_t pid; char outbuf[4096]; if (mca_pls_rsh_component.debug) { opal_output(0, "pls:rsh: going to check SHELL variable on node %s\n", node->nodename); } *shell = ORTE_PLS_RSH_SHELL_UNKNOWN; if (pipe(fd)) { opal_output(0, "pls:rsh: pipe failed with errno=%d\n", errno); return ORTE_ERR_IN_ERRNO; } if ((pid = fork()) < 0) { opal_output(0, "pls:rsh: fork failed with errno=%d\n", errno); return ORTE_ERR_IN_ERRNO; } else if (pid == 0) { /* child */ if (dup2(fd[1], 1) < 0) { opal_output(0, "pls:rsh: dup2 failed with errno=%d\n", errno); exit(01); } /* Build argv array */ argv = opal_argv_copy(mca_pls_rsh_component.agent_argv); argc = mca_pls_rsh_component.agent_argc; opal_argv_append(&argc, &argv, node->nodename); opal_argv_append(&argc, &argv, "echo $SHELL"); execvp(argv[0], argv); exit(errno); } if (close(fd[1])) { opal_output(0, "pls:rsh: close failed with errno=%d\n", errno); return ORTE_ERR_IN_ERRNO; } { ssize_t ret = 1; char* ptr = outbuf; size_t outbufsize = sizeof(outbuf); do { ret = read (fd[0], ptr, outbufsize-1); if (ret < 0) { if (errno == EINTR) continue; opal_output( 0, "Unable to detect the remote shell (error %s)\n", strerror(errno) ); rc = ORTE_ERR_IN_ERRNO; break; } if( outbufsize > 1 ) { outbufsize -= ret; ptr += ret; } } while( 0 != ret ); *ptr = '\0'; } close(fd[0]); if( outbuf[0] != '\0' ) { char *sh_name = rindex(outbuf, '/'); if( NULL != sh_name ) { sh_name++; /* skip '/' */ /* We cannot use "echo -n $SHELL" because -n is not portable. Therefore * we have to remove the "\n" */ if ( sh_name[strlen(sh_name)-1] == '\n' ) { sh_name[strlen(sh_name)-1] = '\0'; } /* Search for the substring of known shell-names */ for (i = 0; i < (int)(sizeof (orte_pls_rsh_shell_name)/ sizeof(orte_pls_rsh_shell_name[0])); i++) { if ( 0 == strcmp(sh_name, orte_pls_rsh_shell_name[i]) ) { *shell = i; break; } } } } if (mca_pls_rsh_component.debug) { if( ORTE_PLS_RSH_SHELL_UNKNOWN == *shell ) { opal_output(0, "pls:rsh: node:%s has unhandled SHELL\n", node->nodename); } else { opal_output(0, "pls:rsh: node:%s has SHELL: %s\n", node->nodename, orte_pls_rsh_shell_name[*shell]); } } return rc; } /** * Fill the exec_path variable with the directory to the orted */ static int orte_pls_rsh_fill_exec_path ( char ** exec_path) { struct stat buf; asprintf(exec_path, "%s/orted", opal_install_dirs.bindir); if (0 != stat(*exec_path, &buf)) { char *path = getenv("PATH"); if (NULL == path) { path = ("PATH is empty!"); } opal_show_help("help-pls-rsh.txt", "no-local-orted", true, path, opal_install_dirs.bindir); return ORTE_ERR_NOT_FOUND; } return ORTE_SUCCESS; } /** * Callback on daemon exit. */ static void orte_pls_rsh_wait_daemon(pid_t pid, int status, void* cbdata) { int rc; unsigned long deltat; orte_buffer_t ack; int src[3] = {-1, -1}; if (! WIFEXITED(status) || ! WEXITSTATUS(status) == 0) { /* tell the user something went wrong */ opal_output(0, "ERROR: A daemon failed to start as expected."); opal_output(0, "ERROR: There may be more information available from"); opal_output(0, "ERROR: the remote shell (see above)."); if (WIFEXITED(status)) { opal_output(0, "ERROR: The daemon exited unexpectedly with status %d.", WEXITSTATUS(status)); } else if (WIFSIGNALED(status)) { #ifdef WCOREDUMP if (WCOREDUMP(status)) { opal_output(0, "The daemon received a signal %d (with core).", WTERMSIG(status)); } else { opal_output(0, "The daemon received a signal %d.", WTERMSIG(status)); } #else opal_output(0, "The daemon received a signal %d.", WTERMSIG(status)); #endif /* WCOREDUMP */ } else { opal_output(0, "No extra status information is available: %d.", status); } /* need to fake a message to the daemon callback system so it can break out * of its receive loop */ src[2] = pid; if(WIFSIGNALED(status)) { src[1] = WTERMSIG(status); } OBJ_CONSTRUCT(&ack, orte_buffer_t); if (ORTE_SUCCESS != (rc = orte_dss.pack(&ack, &src, 3, ORTE_INT))) { ORTE_ERROR_LOG(rc); } rc = orte_rml.send_buffer(ORTE_PROC_MY_NAME, &ack, ORTE_RML_TAG_ORTED_CALLBACK, 0); if (0 > rc) { ORTE_ERROR_LOG(rc); } OBJ_DESTRUCT(&ack); /* The usual reasons for ssh to exit abnormally all are a pretty good indication that the child processes aren't going to start up properly. Set the job state to indicate we failed to launch so orterun's exit status will be non-zero and forcibly terminate the job so orterun can exit */ if (ORTE_SUCCESS != (rc = orte_smr.set_job_state(active_job, ORTE_JOB_STATE_FAILED_TO_START))) { ORTE_ERROR_LOG(rc); } if (ORTE_SUCCESS != (rc = orte_wakeup(active_job))) { ORTE_ERROR_LOG(rc); } } /* if abnormal exit */ /* release any waiting threads */ OPAL_THREAD_LOCK(&mca_pls_rsh_component.lock); if (mca_pls_rsh_component.num_children-- >= mca_pls_rsh_component.num_concurrent || mca_pls_rsh_component.num_children == 0) { opal_condition_signal(&mca_pls_rsh_component.cond); } if (mca_pls_rsh_component.timing && mca_pls_rsh_component.num_children == 0) { if (0 != gettimeofday(&joblaunchstop, NULL)) { opal_output(0, "pls_rsh: could not obtain job launch stop time"); } else { deltat = (joblaunchstop.tv_sec - joblaunchstart.tv_sec)*1000000 + (joblaunchstop.tv_usec - joblaunchstart.tv_usec); opal_output(0, "pls_rsh: total time to launch job is %lu usec", deltat); } } OPAL_THREAD_UNLOCK(&mca_pls_rsh_component.lock); } /** * Launch a daemon (bootproxy) on each node. The daemon will be responsible * for launching the application. */ /* When working in this function, ALWAYS jump to "cleanup" if * you encounter an error so that orterun will be woken up and * the job can cleanly terminate */ int orte_pls_rsh_launch(orte_jobid_t jobid) { orte_job_map_t *map=NULL; opal_list_item_t *n_item; orte_mapped_node_t *rmaps_node; int node_name_index1; int node_name_index2; int proc_name_index; int local_exec_index, local_exec_index_end; char *jobid_string = NULL; char *param; char **argv = NULL; char *prefix_dir; int argc; int rc; sigset_t sigs; struct passwd *p; bool remote_sh = false, remote_csh = false; bool local_sh = false, local_csh = false; char *lib_base = NULL, *bin_base = NULL; bool failed_launch = true; orte_pls_rsh_shell_t shell; if (mca_pls_rsh_component.debug) { opal_output(0, "pls:rsh: launching job %ld", (long)jobid); } if (mca_pls_rsh_component.timing) { if (0 != gettimeofday(&joblaunchstart, NULL)) { opal_output(0, "pls_rsh: could not obtain start time"); joblaunchstart.tv_sec = 0; joblaunchstart.tv_usec = 0; } } /* set the active jobid */ active_job = jobid; /* Get the map for this job * We need the entire mapping for a couple of reasons: * - need the prefix to start with. * - need to know the nodes we are launching on * All other mapping responsibilities fall to orted in the fork PLS */ rc = orte_rmaps.get_job_map(&map, jobid); if (ORTE_SUCCESS != rc) { ORTE_ERROR_LOG(rc); goto cleanup; } if (0 == map->num_new_daemons) { /* have all the daemons we need - launch app */ if (mca_pls_rsh_component.debug) { opal_output(0, "pls:rsh: no new daemons to launch"); } goto launch_apps; } if (mca_pls_rsh_component.debug_daemons && mca_pls_rsh_component.num_concurrent < map->num_new_daemons) { /** * If we are in '--debug-daemons' we keep the ssh connection * alive for the span of the run. If we use this option * AND we launch on more than "num_concurrent" machines * then we will deadlock. No connections are terminated * until the job is complete, no job is started * since all the orteds are waiting for all the others * to come online, and the others ore not launched because * we are waiting on those that have started to terminate * their ssh tunnels. :( * As we cannot run in this situation, pretty print the error * and return an error code. */ opal_show_help("help-pls-rsh.txt", "deadlock-params", true, mca_pls_rsh_component.num_concurrent, map->num_new_daemons); rc = ORTE_ERR_FATAL; goto cleanup; } /* * After a discussion between Ralph & Jeff, we concluded that we * really are handling the prefix dir option incorrectly. It currently * is associated with an app_context, yet it really refers to the * location where OpenRTE/Open MPI is installed on a NODE. Fixing * this right now would involve significant change to orterun as well * as elsewhere, so we will intentionally leave this incorrect at this * point. The error, however, is identical to that seen in all prior * releases of OpenRTE/Open MPI, so our behavior is no worse than before. * * A note to fix this, along with ideas on how to do so, has been filed * on the project's Trac system under "feature enhancement". * * For now, default to the prefix_dir provided in the first app_context. * Since there always MUST be at least one app_context, we are safe in * doing this. */ prefix_dir = map->apps[0]->prefix_dir; /* What is our local shell? */ p = getpwuid(getuid()); if( NULL == p ) { /* This user is unknown to the system. Therefore, there is no reason we * spawn whatsoever in his name. Give up with a HUGE error message. */ opal_show_help( "help-pls-rsh.txt", "unknown-user", true, (int)getuid() ); rc = ORTE_ERR_FATAL; goto cleanup; } else { param = p->pw_shell; shell = find_shell(p->pw_shell); } /* If we didn't find it in getpwuid(), try looking at the $SHELL environment variable (see https://svn.open-mpi.org/trac/ompi/ticket/1060) */ if (ORTE_PLS_RSH_SHELL_UNKNOWN == shell && NULL != (param = getenv("SHELL"))) { shell = find_shell(param); } switch (shell) { case ORTE_PLS_RSH_SHELL_SH: /* fall through */ case ORTE_PLS_RSH_SHELL_KSH: /* fall through */ case ORTE_PLS_RSH_SHELL_ZSH: /* fall through */ case ORTE_PLS_RSH_SHELL_BASH: local_sh = true; break; case ORTE_PLS_RSH_SHELL_TCSH: /* fall through */ case ORTE_PLS_RSH_SHELL_CSH: local_csh = true; break; default: opal_output(0, "WARNING: local probe returned unhandled shell:%s assuming bash\n", (NULL != param) ? param : "unknown"); remote_sh = true; break; } if (mca_pls_rsh_component.debug) { opal_output(0, "pls:rsh: local csh: %d, local sh: %d\n", local_csh, local_sh); } /* What is our remote shell? */ if (mca_pls_rsh_component.assume_same_shell) { remote_sh = local_sh; remote_csh = local_csh; if (mca_pls_rsh_component.debug) { opal_output(0, "pls:rsh: assuming same remote shell as local shell"); } } else { orte_pls_rsh_shell_t shell; rmaps_node = (orte_mapped_node_t*)opal_list_get_first(&map->nodes); rc = orte_pls_rsh_probe(rmaps_node, &shell); if (ORTE_SUCCESS != rc) { ORTE_ERROR_LOG(rc); goto cleanup; } switch (shell) { case ORTE_PLS_RSH_SHELL_SH: /* fall through */ case ORTE_PLS_RSH_SHELL_KSH: /* fall through */ case ORTE_PLS_RSH_SHELL_ZSH: /* fall through */ case ORTE_PLS_RSH_SHELL_BASH: remote_sh = true; break; case ORTE_PLS_RSH_SHELL_TCSH: /* fall through */ case ORTE_PLS_RSH_SHELL_CSH: remote_csh = true; break; default: opal_output(0, "WARNING: rsh probe returned unhandled shell; assuming bash\n"); remote_sh = true; } } if (mca_pls_rsh_component.debug) { opal_output(0, "pls:rsh: remote csh: %d, remote sh: %d\n", remote_csh, remote_sh); } /* * Build argv array */ argv = opal_argv_copy(mca_pls_rsh_component.agent_argv); argc = mca_pls_rsh_component.agent_argc; node_name_index1 = argc; opal_argv_append(&argc, &argv, "