2005-03-14 23:57:21 +03:00
|
|
|
/*
|
2005-11-05 22:57:48 +03:00
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
2006-08-23 07:32:36 +04:00
|
|
|
* Copyright (c) 2004-2006 The University of Tennessee and The University
|
2005-11-05 22:57:48 +03:00
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
2006-06-08 22:27:17 +04:00
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
2005-03-14 23:57:21 +03:00
|
|
|
* University of Stuttgart. All rights reserved.
|
2005-03-24 15:43:37 +03:00
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
2006-05-16 18:14:12 +04:00
|
|
|
* Copyright (c) 2006 Cisco Systems, Inc. All rights reserved.
|
2005-03-14 23:57:21 +03:00
|
|
|
* $COPYRIGHT$
|
2006-06-08 22:27:17 +04:00
|
|
|
*
|
2005-03-14 23:57:21 +03:00
|
|
|
* Additional copyrights may follow
|
2006-06-08 22:27:17 +04:00
|
|
|
*
|
2005-03-14 23:57:21 +03:00
|
|
|
* $HEADER$
|
|
|
|
*
|
|
|
|
* These symbols are in a file by themselves to provide nice linker
|
|
|
|
* semantics. Since linkers generally pull in symbols by object
|
|
|
|
* files, keeping these symbols as the only symbols in this file
|
|
|
|
* prevents utility programs such as "ompi_info" from having to import
|
|
|
|
* entire components just to query their version and parameters.
|
|
|
|
*/
|
|
|
|
|
2005-05-06 21:00:06 +04:00
|
|
|
#include "orte_config.h"
|
2006-09-15 01:29:51 +04:00
|
|
|
#include "orte/orte_constants.h"
|
2005-09-29 16:35:43 +04:00
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
#include <stdlib.h>
|
2005-06-22 02:48:57 +04:00
|
|
|
#ifdef HAVE_UNISTD_H
|
2005-03-14 23:57:21 +03:00
|
|
|
#include <unistd.h>
|
2005-06-22 02:48:57 +04:00
|
|
|
#endif
|
2005-03-14 23:57:21 +03:00
|
|
|
#include <errno.h>
|
2005-03-29 17:50:15 +04:00
|
|
|
#include <string.h>
|
2005-09-06 20:10:05 +04:00
|
|
|
#ifdef HAVE_SYS_SELECT_H
|
|
|
|
#include <sys/select.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_SYS_TIME_H
|
|
|
|
#include <sys/time.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_SYS_TYPES_H
|
2005-03-14 23:57:21 +03:00
|
|
|
#include <sys/types.h>
|
2005-09-06 20:10:05 +04:00
|
|
|
#endif
|
|
|
|
#ifdef HAVE_UNISTD_H
|
|
|
|
#include <unistd.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_SYS_STAT_H
|
2005-03-14 23:57:21 +03:00
|
|
|
#include <sys/stat.h>
|
2005-09-06 20:10:05 +04:00
|
|
|
#endif
|
2005-06-22 02:48:57 +04:00
|
|
|
#ifdef HAVE_SYS_WAIT_H
|
2005-03-14 23:57:21 +03:00
|
|
|
#include <sys/wait.h>
|
2005-06-22 02:48:57 +04:00
|
|
|
#endif
|
2005-03-14 23:57:21 +03:00
|
|
|
#include <fcntl.h>
|
2005-05-19 17:33:28 +04:00
|
|
|
#include <signal.h>
|
2005-08-04 19:09:02 +04:00
|
|
|
#ifdef HAVE_PWD_H
|
|
|
|
#include <pwd.h>
|
|
|
|
#endif
|
2005-03-14 23:57:21 +03:00
|
|
|
|
2006-03-12 07:35:01 +03:00
|
|
|
#include "opal/install_dirs.h"
|
2005-09-29 16:35:43 +04:00
|
|
|
#include "opal/mca/base/mca_base_param.h"
|
|
|
|
#include "opal/util/if.h"
|
2006-08-23 07:32:36 +04:00
|
|
|
#include "opal/util/os_path.h"
|
2005-09-29 16:35:43 +04:00
|
|
|
#include "opal/util/path.h"
|
|
|
|
#include "opal/event/event.h"
|
|
|
|
#include "opal/util/show_help.h"
|
|
|
|
#include "opal/util/argv.h"
|
|
|
|
#include "opal/util/opal_environ.h"
|
|
|
|
#include "opal/util/output.h"
|
2006-10-02 22:29:15 +04:00
|
|
|
#include "opal/util/trace.h"
|
2006-05-16 18:14:12 +04:00
|
|
|
#include "opal/util/basename.h"
|
2006-09-15 01:29:51 +04:00
|
|
|
|
|
|
|
#include "orte/util/sys_info.h"
|
2005-07-28 17:00:32 +04:00
|
|
|
#include "orte/util/univ_info.h"
|
|
|
|
#include "orte/util/session_dir.h"
|
2006-09-15 01:29:51 +04:00
|
|
|
|
2005-07-28 17:00:32 +04:00
|
|
|
#include "orte/runtime/orte_wait.h"
|
2006-10-03 21:40:00 +04:00
|
|
|
#include "orte/dss/dss.h"
|
2006-09-15 01:29:51 +04:00
|
|
|
|
2005-07-28 17:00:32 +04:00
|
|
|
#include "orte/mca/ns/ns.h"
|
|
|
|
#include "orte/mca/rml/rml.h"
|
|
|
|
#include "orte/mca/gpr/gpr.h"
|
|
|
|
#include "orte/mca/errmgr/errmgr.h"
|
2006-09-15 01:29:51 +04:00
|
|
|
#include "orte/mca/ras/ras_types.h"
|
|
|
|
#include "orte/mca/rmaps/base/rmaps_private.h"
|
2006-08-16 20:35:09 +04:00
|
|
|
#include "orte/mca/smr/smr.h"
|
2006-09-15 01:29:51 +04:00
|
|
|
|
|
|
|
#include "orte/mca/pls/pls.h"
|
|
|
|
#include "orte/mca/pls/base/pls_private.h"
|
2005-07-28 17:00:32 +04:00
|
|
|
#include "orte/mca/pls/rsh/pls_rsh.h"
|
2005-03-14 23:57:21 +03:00
|
|
|
|
2006-08-23 07:32:36 +04:00
|
|
|
#if !defined(__WINDOWS__)
|
2005-05-13 01:44:23 +04:00
|
|
|
extern char **environ;
|
2006-08-23 07:32:36 +04:00
|
|
|
#endif /* !defined(__WINDOWS__) */
|
2005-03-14 23:57:21 +03:00
|
|
|
|
2005-03-24 18:45:44 +03:00
|
|
|
#if OMPI_HAVE_POSIX_THREADS && OMPI_THREADS_HAVE_DIFFERENT_PIDS && OMPI_ENABLE_PROGRESS_THREADS
|
2005-03-14 23:57:21 +03:00
|
|
|
static int orte_pls_rsh_launch_threaded(orte_jobid_t jobid);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2006-09-15 01:29:51 +04:00
|
|
|
orte_pls_base_module_t orte_pls_rsh_module = {
|
2005-03-24 18:45:44 +03:00
|
|
|
#if OMPI_HAVE_POSIX_THREADS && OMPI_THREADS_HAVE_DIFFERENT_PIDS && OMPI_ENABLE_PROGRESS_THREADS
|
2005-03-14 23:57:21 +03:00
|
|
|
orte_pls_rsh_launch_threaded,
|
|
|
|
#else
|
|
|
|
orte_pls_rsh_launch,
|
|
|
|
#endif
|
|
|
|
orte_pls_rsh_terminate_job,
|
2006-09-15 01:29:51 +04:00
|
|
|
orte_pls_rsh_terminate_orteds,
|
2005-03-14 23:57:21 +03:00
|
|
|
orte_pls_rsh_terminate_proc,
|
2006-06-08 22:27:17 +04:00
|
|
|
orte_pls_rsh_signal_job,
|
|
|
|
orte_pls_rsh_signal_proc,
|
2005-03-14 23:57:21 +03:00
|
|
|
orte_pls_rsh_finalize
|
|
|
|
};
|
|
|
|
|
2005-03-19 02:58:36 +03:00
|
|
|
/* struct used to have enough information to clean up the state of the
|
|
|
|
universe if a daemon aborts */
|
|
|
|
struct rsh_daemon_info_t {
|
2005-07-03 20:06:07 +04:00
|
|
|
opal_object_t super;
|
2005-08-11 23:51:50 +04:00
|
|
|
orte_ras_node_t* node;
|
2005-03-19 02:58:36 +03:00
|
|
|
orte_jobid_t jobid;
|
|
|
|
};
|
|
|
|
typedef struct rsh_daemon_info_t rsh_daemon_info_t;
|
|
|
|
static OBJ_CLASS_INSTANCE(rsh_daemon_info_t,
|
2005-07-03 20:06:07 +04:00
|
|
|
opal_object_t,
|
2005-03-19 02:58:36 +03:00
|
|
|
NULL, NULL);
|
2005-05-19 17:33:28 +04:00
|
|
|
static void set_handler_default(int sig);
|
2005-03-14 23:57:21 +03:00
|
|
|
|
2005-09-06 20:10:05 +04:00
|
|
|
enum {
|
|
|
|
ORTE_PLS_RSH_SHELL_BASH = 0,
|
|
|
|
ORTE_PLS_RSH_SHELL_TCSH,
|
|
|
|
ORTE_PLS_RSH_SHELL_CSH,
|
|
|
|
ORTE_PLS_RSH_SHELL_KSH,
|
|
|
|
ORTE_PLS_RSH_SHELL_UNKNOWN
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef int orte_pls_rsh_shell;
|
|
|
|
|
|
|
|
static const char * orte_pls_rsh_shell_name[] = {
|
|
|
|
"bash",
|
|
|
|
"tcsh", /* tcsh has to be first otherwise strstr finds csh */
|
|
|
|
"csh",
|
|
|
|
"ksh",
|
|
|
|
"unknown"
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Check the Shell variable on the specified node
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int orte_pls_rsh_probe(orte_ras_node_t * node, orte_pls_rsh_shell * shell)
|
|
|
|
{
|
|
|
|
char ** argv;
|
|
|
|
int argc, rc, nfds, i;
|
|
|
|
int fd[2];
|
|
|
|
pid_t pid;
|
|
|
|
fd_set readset;
|
|
|
|
fd_set errset;
|
|
|
|
char outbuf[4096];
|
|
|
|
|
|
|
|
if (mca_pls_rsh_component.debug) {
|
|
|
|
opal_output(0, "pls:rsh: going to check SHELL variable on node %s\n",
|
|
|
|
node->node_name);
|
|
|
|
}
|
|
|
|
*shell = ORTE_PLS_RSH_SHELL_UNKNOWN;
|
|
|
|
/*
|
|
|
|
* Build argv array
|
|
|
|
*/
|
2005-12-22 17:37:19 +03:00
|
|
|
argv = opal_argv_copy(mca_pls_rsh_component.agent_argv);
|
|
|
|
argc = mca_pls_rsh_component.agent_argc;
|
2005-09-06 20:10:05 +04:00
|
|
|
opal_argv_append(&argc, &argv, node->node_name);
|
|
|
|
opal_argv_append(&argc, &argv, "echo $SHELL");
|
|
|
|
if (pipe(fd)) {
|
|
|
|
opal_output(0, "pls:rsh: pipe failed with errno=%d\n", errno);
|
|
|
|
return ORTE_ERR_IN_ERRNO;
|
|
|
|
}
|
|
|
|
if ((pid = fork()) < 0) {
|
|
|
|
opal_output(0, "pls:rsh: fork failed with errno=%d\n", errno);
|
|
|
|
return ORTE_ERR_IN_ERRNO;
|
|
|
|
}
|
|
|
|
else if (pid == 0) { /* child */
|
|
|
|
if (dup2(fd[1], 1) < 0) {
|
|
|
|
opal_output(0, "pls:rsh: dup2 failed with errno=%d\n", errno);
|
|
|
|
return ORTE_ERR_IN_ERRNO;
|
|
|
|
}
|
|
|
|
execvp(argv[0], argv);
|
|
|
|
exit(errno);
|
|
|
|
}
|
|
|
|
if (close(fd[1])) {
|
|
|
|
opal_output(0, "pls:rsh: close failed with errno=%d\n", errno);
|
|
|
|
return ORTE_ERR_IN_ERRNO;
|
|
|
|
}
|
|
|
|
/* Monitor stdout */
|
|
|
|
FD_ZERO(&readset);
|
|
|
|
nfds = fd[0]+1;
|
|
|
|
|
|
|
|
memset (outbuf, 0, sizeof (outbuf));
|
|
|
|
rc = ORTE_SUCCESS;;
|
|
|
|
while (ORTE_SUCCESS == rc) {
|
|
|
|
int err;
|
|
|
|
FD_SET (fd[0], &readset);
|
|
|
|
errset = readset;
|
|
|
|
err = select(nfds, &readset, NULL, &errset, NULL);
|
|
|
|
if (err == -1) {
|
|
|
|
if (errno == EINTR)
|
|
|
|
continue;
|
|
|
|
else {
|
|
|
|
rc = ORTE_ERR_IN_ERRNO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (FD_ISSET(fd[0], &errset) != 0)
|
|
|
|
rc = ORTE_ERR_FATAL;
|
|
|
|
/* In case we have something valid to read on stdin */
|
|
|
|
if (FD_ISSET(fd[0], &readset) != 0) {
|
|
|
|
ssize_t ret = 1;
|
|
|
|
char temp[4096];
|
|
|
|
char * ptr = outbuf;
|
|
|
|
ssize_t outbufsize = sizeof(outbuf);
|
|
|
|
|
|
|
|
memset (temp, 0, sizeof(temp));
|
|
|
|
|
|
|
|
while (ret != 0) {
|
|
|
|
ret = read (fd[0], temp, 256);
|
|
|
|
if (ret < 0) {
|
|
|
|
if (errno == EINTR)
|
|
|
|
continue;
|
|
|
|
else {
|
|
|
|
rc = ORTE_ERR_IN_ERRNO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (outbufsize > 0) {
|
|
|
|
memcpy (ptr, temp, (ret > outbufsize) ? outbufsize : ret);
|
|
|
|
outbufsize -= ret;
|
|
|
|
ptr += ret;
|
|
|
|
if (outbufsize > 0)
|
|
|
|
*ptr = '\0';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* After reading complete string (aka read returns 0), we just break */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Search for the substring of known shell-names */
|
|
|
|
for (i = 0; i < (int)(sizeof (orte_pls_rsh_shell_name)/
|
|
|
|
sizeof(orte_pls_rsh_shell_name[0])); i++) {
|
|
|
|
if (NULL != strstr (outbuf, orte_pls_rsh_shell_name[i])) {
|
|
|
|
*shell = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (mca_pls_rsh_component.debug) {
|
|
|
|
opal_output(0, "pls:rsh: node:%s has SHELL:%s\n",
|
|
|
|
node->node_name, orte_pls_rsh_shell_name[*shell]);
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Fill the exec_path variable with the directory to the orted
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int orte_pls_rsh_fill_exec_path ( char ** exec_path)
|
|
|
|
{
|
|
|
|
struct stat buf;
|
|
|
|
|
2006-03-12 07:35:01 +03:00
|
|
|
asprintf(exec_path, "%s/orted", OPAL_BINDIR);
|
2005-09-06 20:10:05 +04:00
|
|
|
if (0 != stat(*exec_path, &buf)) {
|
|
|
|
char *path = getenv("PATH");
|
|
|
|
if (NULL == path) {
|
|
|
|
path = ("PATH is empty!");
|
|
|
|
}
|
|
|
|
opal_show_help("help-pls-rsh.txt", "no-local-orted",
|
2006-03-12 07:35:01 +03:00
|
|
|
true, path, OPAL_BINDIR);
|
2005-09-06 20:10:05 +04:00
|
|
|
return ORTE_ERR_NOT_FOUND;
|
|
|
|
}
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
/**
|
|
|
|
* Callback on daemon exit.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void orte_pls_rsh_wait_daemon(pid_t pid, int status, void* cbdata)
|
|
|
|
{
|
2005-03-19 02:58:36 +03:00
|
|
|
rsh_daemon_info_t *info = (rsh_daemon_info_t*) cbdata;
|
2005-07-03 20:22:16 +04:00
|
|
|
opal_list_t map;
|
|
|
|
opal_list_item_t* item;
|
2005-03-19 02:58:36 +03:00
|
|
|
int rc;
|
|
|
|
|
2005-03-31 08:23:55 +04:00
|
|
|
/* if ssh exited abnormally, set the child processes to aborted
|
|
|
|
and print something useful to the user. The usual reasons for
|
|
|
|
ssh to exit abnormally all are a pretty good indication that
|
2005-09-05 00:55:27 +04:00
|
|
|
the child processes aren't going to start up properly.
|
2005-03-19 02:58:36 +03:00
|
|
|
|
2005-03-31 08:23:55 +04:00
|
|
|
This should somehow be pushed up to the calling level, but we
|
|
|
|
don't really have a way to do that just yet.
|
|
|
|
*/
|
2005-12-12 23:04:00 +03:00
|
|
|
#ifdef __WINDOWS__
|
2005-06-22 02:48:57 +04:00
|
|
|
printf("This is not implemented yet for windows\n");
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERROR);
|
|
|
|
return;
|
|
|
|
#else
|
2005-03-31 08:23:55 +04:00
|
|
|
if (! WIFEXITED(status) || ! WEXITSTATUS(status) == 0) {
|
|
|
|
/* get the mapping for our node so we can cancel the right things */
|
2005-07-03 20:22:16 +04:00
|
|
|
OBJ_CONSTRUCT(&map, opal_list_t);
|
2005-03-31 08:23:55 +04:00
|
|
|
rc = orte_rmaps_base_get_node_map(orte_process_info.my_name->cellid,
|
|
|
|
info->jobid,
|
|
|
|
info->node->node_name,
|
|
|
|
&map);
|
2005-08-04 19:09:02 +04:00
|
|
|
if (ORTE_SUCCESS != rc) {
|
2005-03-19 02:58:36 +03:00
|
|
|
ORTE_ERROR_LOG(rc);
|
2005-03-31 08:23:55 +04:00
|
|
|
goto cleanup;
|
2005-03-19 02:58:36 +03:00
|
|
|
}
|
2005-03-31 08:23:55 +04:00
|
|
|
|
|
|
|
/* set state of all processes associated with the daemon as
|
|
|
|
terminated */
|
2005-07-03 20:22:16 +04:00
|
|
|
for(item = opal_list_get_first(&map);
|
|
|
|
item != opal_list_get_end(&map);
|
|
|
|
item = opal_list_get_next(item)) {
|
2005-03-31 08:23:55 +04:00
|
|
|
orte_rmaps_base_map_t* map = (orte_rmaps_base_map_t*) item;
|
2006-08-15 23:54:10 +04:00
|
|
|
orte_std_cntr_t i;
|
2005-03-31 08:23:55 +04:00
|
|
|
|
|
|
|
for (i = 0 ; i < map->num_procs ; ++i) {
|
2005-04-16 01:34:07 +04:00
|
|
|
/* Clean up the session directory as if we were the
|
|
|
|
process itself. This covers the case where the
|
|
|
|
process died abnormally and didn't cleanup its own
|
|
|
|
session directory. */
|
|
|
|
|
|
|
|
orte_session_dir_finalize(&(map->procs[i])->proc_name);
|
|
|
|
|
2006-08-16 20:35:09 +04:00
|
|
|
rc = orte_smr.set_proc_state(&(map->procs[i]->proc_name),
|
2005-03-31 08:23:55 +04:00
|
|
|
ORTE_PROC_STATE_ABORTED, status);
|
|
|
|
}
|
2005-08-04 19:09:02 +04:00
|
|
|
if (ORTE_SUCCESS != rc) {
|
2005-03-31 08:23:55 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&map);
|
2005-03-19 02:58:36 +03:00
|
|
|
|
|
|
|
cleanup:
|
2005-03-31 08:23:55 +04:00
|
|
|
/* tell the user something went wrong */
|
2005-07-04 03:31:27 +04:00
|
|
|
opal_output(0, "ERROR: A daemon on node %s failed to start as expected.",
|
2005-09-05 00:55:27 +04:00
|
|
|
info->node->node_name);
|
2005-07-04 03:31:27 +04:00
|
|
|
opal_output(0, "ERROR: There may be more information available from");
|
|
|
|
opal_output(0, "ERROR: the remote shell (see above).");
|
2005-06-22 02:48:57 +04:00
|
|
|
|
2005-03-19 02:58:36 +03:00
|
|
|
if (WIFEXITED(status)) {
|
2005-07-04 03:31:27 +04:00
|
|
|
opal_output(0, "ERROR: The daemon exited unexpectedly with status %d.",
|
2005-03-19 02:58:36 +03:00
|
|
|
WEXITSTATUS(status));
|
|
|
|
} else if (WIFSIGNALED(status)) {
|
|
|
|
#ifdef WCOREDUMP
|
|
|
|
if (WCOREDUMP(status)) {
|
2005-07-04 03:31:27 +04:00
|
|
|
opal_output(0, "The daemon received a signal %d (with core).",
|
2005-09-05 00:55:27 +04:00
|
|
|
WTERMSIG(status));
|
2005-04-13 02:13:55 +04:00
|
|
|
} else {
|
2005-09-05 00:55:27 +04:00
|
|
|
opal_output(0, "The daemon received a signal %d.", WTERMSIG(status));
|
2005-03-19 02:58:36 +03:00
|
|
|
}
|
2005-04-13 02:13:55 +04:00
|
|
|
#else
|
2005-09-05 00:55:27 +04:00
|
|
|
opal_output(0, "The daemon received a signal %d.", WTERMSIG(status));
|
2005-03-19 02:58:36 +03:00
|
|
|
#endif /* WCOREDUMP */
|
|
|
|
} else {
|
2005-07-04 03:31:27 +04:00
|
|
|
opal_output(0, "No extra status information is available: %d.", status);
|
2005-03-19 02:58:36 +03:00
|
|
|
}
|
|
|
|
}
|
2005-12-12 23:04:00 +03:00
|
|
|
#endif /* __WINDOWS__ */
|
2005-03-19 02:58:36 +03:00
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
/* release any waiting threads */
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&mca_pls_rsh_component.lock);
|
2005-09-05 00:55:27 +04:00
|
|
|
if (mca_pls_rsh_component.num_children-- >=
|
2005-08-04 19:09:02 +04:00
|
|
|
mca_pls_rsh_component.num_concurrent ||
|
|
|
|
mca_pls_rsh_component.num_children == 0) {
|
2005-07-04 02:45:48 +04:00
|
|
|
opal_condition_signal(&mca_pls_rsh_component.cond);
|
2005-03-14 23:57:21 +03:00
|
|
|
}
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&mca_pls_rsh_component.lock);
|
2005-03-19 02:58:36 +03:00
|
|
|
|
|
|
|
/* cleanup */
|
|
|
|
OBJ_RELEASE(info->node);
|
|
|
|
OBJ_RELEASE(info);
|
2005-03-14 23:57:21 +03:00
|
|
|
}
|
|
|
|
|
2005-03-31 19:47:37 +04:00
|
|
|
/**
|
|
|
|
* Launch a daemon (bootproxy) on each node. The daemon will be responsible
|
|
|
|
* for launching the application.
|
|
|
|
*/
|
2005-03-14 23:57:21 +03:00
|
|
|
|
|
|
|
int orte_pls_rsh_launch(orte_jobid_t jobid)
|
|
|
|
{
|
2005-10-27 21:04:10 +04:00
|
|
|
opal_list_t mapping;
|
|
|
|
opal_list_item_t* m_item, *n_item;
|
2006-09-15 01:29:51 +04:00
|
|
|
orte_std_cntr_t num_nodes;
|
2005-03-18 06:43:59 +03:00
|
|
|
orte_vpid_t vpid;
|
|
|
|
int node_name_index1;
|
|
|
|
int node_name_index2;
|
2005-03-14 23:57:21 +03:00
|
|
|
int proc_name_index;
|
2005-08-04 19:09:02 +04:00
|
|
|
int local_exec_index, local_exec_index_end;
|
2005-08-19 22:56:44 +04:00
|
|
|
int call_yield_index;
|
2005-03-14 23:57:21 +03:00
|
|
|
char *jobid_string;
|
|
|
|
char *uri, *param;
|
2005-08-04 19:09:02 +04:00
|
|
|
char **argv, **tmp;
|
2005-03-14 23:57:21 +03:00
|
|
|
int argc;
|
|
|
|
int rc;
|
2005-05-19 17:33:28 +04:00
|
|
|
sigset_t sigs;
|
2005-08-04 19:09:02 +04:00
|
|
|
struct passwd *p;
|
|
|
|
bool remote_bash = false, remote_csh = false;
|
|
|
|
bool local_bash = false, local_csh = false;
|
2006-05-16 18:14:12 +04:00
|
|
|
char *lib_base = NULL, *bin_base = NULL;
|
2006-09-15 01:29:51 +04:00
|
|
|
opal_list_t daemons;
|
|
|
|
orte_pls_daemon_info_t *dmn;
|
|
|
|
|
|
|
|
/* setup a list that will contain the info for all the daemons
|
|
|
|
* so we can store it on the registry when done
|
|
|
|
*/
|
|
|
|
OBJ_CONSTRUCT(&daemons, opal_list_t);
|
2005-09-05 00:55:27 +04:00
|
|
|
|
2005-10-08 02:24:52 +04:00
|
|
|
/* Query the list of nodes allocated and mapped to this job.
|
|
|
|
* We need the entire mapping for a couple of reasons:
|
|
|
|
* - need the prefix to start with.
|
|
|
|
* - need to know if we are launching on a subset of the allocated nodes
|
|
|
|
* All other mapping responsibilities fall to orted in the fork PLS
|
2005-03-14 23:57:21 +03:00
|
|
|
*/
|
2005-10-27 21:04:10 +04:00
|
|
|
OBJ_CONSTRUCT(&mapping, opal_list_t);
|
|
|
|
rc = orte_rmaps_base_get_map(jobid, &mapping);
|
2005-08-04 19:09:02 +04:00
|
|
|
if (ORTE_SUCCESS != rc) {
|
2005-03-14 23:57:21 +03:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2005-10-27 21:04:10 +04:00
|
|
|
num_nodes = 0;
|
|
|
|
for(m_item = opal_list_get_first(&mapping);
|
|
|
|
m_item != opal_list_get_end(&mapping);
|
|
|
|
m_item = opal_list_get_next(m_item)) {
|
|
|
|
orte_rmaps_base_map_t* map = (orte_rmaps_base_map_t*)m_item;
|
|
|
|
num_nodes += opal_list_get_size(&map->nodes);
|
|
|
|
}
|
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
/*
|
|
|
|
* Allocate a range of vpids for the daemons.
|
|
|
|
*/
|
2005-08-04 19:09:02 +04:00
|
|
|
if (num_nodes == 0) {
|
2005-03-14 23:57:21 +03:00
|
|
|
return ORTE_ERR_BAD_PARAM;
|
|
|
|
}
|
2005-03-18 06:43:59 +03:00
|
|
|
rc = orte_ns.reserve_range(0, num_nodes, &vpid);
|
2005-08-04 19:09:02 +04:00
|
|
|
if (ORTE_SUCCESS != rc) {
|
2005-03-14 23:57:21 +03:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2005-03-29 23:41:29 +04:00
|
|
|
|
2006-09-15 01:29:51 +04:00
|
|
|
/* setup the orted triggers for passing their launch info */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_smr.init_orted_stage_gates(jobid, num_nodes, NULL, NULL))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2005-05-06 21:00:06 +04:00
|
|
|
/* need integer value for command line parameter */
|
2005-09-03 04:15:21 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_ns.convert_jobid_to_string(&jobid_string, jobid))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2005-03-14 23:57:21 +03:00
|
|
|
|
2005-08-04 19:09:02 +04:00
|
|
|
/* What is our local shell? */
|
|
|
|
p = getpwuid(getuid());
|
|
|
|
if (NULL != p) {
|
2006-10-05 09:45:18 +04:00
|
|
|
local_csh = OPAL_INT_TO_BOOL(strstr(p->pw_shell, "csh");
|
2005-08-04 19:09:02 +04:00
|
|
|
if ((strstr(p->pw_shell, "bash") != 0) ||
|
|
|
|
(strstr(p->pw_shell, "zsh") != 0)) {
|
|
|
|
local_bash = true;
|
|
|
|
} else {
|
|
|
|
local_bash = false;
|
|
|
|
}
|
|
|
|
if (mca_pls_rsh_component.debug) {
|
2005-09-05 00:55:27 +04:00
|
|
|
opal_output(0, "pls:rsh: local csh: %d, local bash: %d\n",
|
2005-08-04 19:09:02 +04:00
|
|
|
local_csh, local_bash);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* What is our remote shell? */
|
|
|
|
if (mca_pls_rsh_component.assume_same_shell) {
|
|
|
|
remote_bash = local_bash;
|
|
|
|
remote_csh = local_csh;
|
|
|
|
if (mca_pls_rsh_component.debug) {
|
|
|
|
opal_output(0, "pls:rsh: assuming same remote shell as local shell");
|
|
|
|
}
|
|
|
|
} else {
|
2005-09-06 20:10:05 +04:00
|
|
|
orte_pls_rsh_shell shell;
|
2005-10-27 21:04:10 +04:00
|
|
|
orte_rmaps_base_map_t* map = (orte_rmaps_base_map_t*)opal_list_get_first(&mapping);
|
2006-06-08 22:27:17 +04:00
|
|
|
orte_rmaps_base_node_t* rmaps_node =
|
2005-12-20 18:59:17 +03:00
|
|
|
(orte_rmaps_base_node_t*)opal_list_get_first(&map->nodes);
|
|
|
|
orte_ras_node_t* node = rmaps_node->node;
|
2005-09-06 20:10:05 +04:00
|
|
|
|
|
|
|
rc = orte_pls_rsh_probe(node, &shell);
|
|
|
|
|
|
|
|
if (ORTE_SUCCESS != rc) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (shell) {
|
|
|
|
case ORTE_PLS_RSH_SHELL_KSH: /* fall through */
|
|
|
|
case ORTE_PLS_RSH_SHELL_BASH: remote_bash = true; break;
|
|
|
|
case ORTE_PLS_RSH_SHELL_TCSH: /* fall through */
|
|
|
|
case ORTE_PLS_RSH_SHELL_CSH: remote_csh = true; break;
|
|
|
|
default:
|
|
|
|
opal_output(0, "WARNING: rsh probe returned unhandled shell:%s assuming bash\n",
|
|
|
|
orte_pls_rsh_shell_name[shell]);
|
|
|
|
remote_bash = true;
|
|
|
|
}
|
2005-08-04 19:09:02 +04:00
|
|
|
}
|
|
|
|
if (mca_pls_rsh_component.debug) {
|
2005-09-05 00:55:27 +04:00
|
|
|
opal_output(0, "pls:rsh: remote csh: %d, remote bash: %d\n",
|
2005-08-04 19:09:02 +04:00
|
|
|
remote_csh, remote_bash);
|
|
|
|
}
|
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
/*
|
2005-05-13 01:44:23 +04:00
|
|
|
* Build argv array
|
2005-03-14 23:57:21 +03:00
|
|
|
*/
|
2005-12-22 17:37:19 +03:00
|
|
|
argv = opal_argv_copy(mca_pls_rsh_component.agent_argv);
|
|
|
|
argc = mca_pls_rsh_component.agent_argc;
|
2005-03-18 06:43:59 +03:00
|
|
|
node_name_index1 = argc;
|
2005-09-29 16:35:43 +04:00
|
|
|
opal_argv_append(&argc, &argv, "<template>");
|
2005-03-14 23:57:21 +03:00
|
|
|
|
2005-08-04 19:09:02 +04:00
|
|
|
/* Do we need to source .profile on the remote side? */
|
|
|
|
|
|
|
|
if (!(remote_csh || remote_bash)) {
|
|
|
|
int i;
|
|
|
|
tmp = opal_argv_split("( ! [ -e ./.profile ] || . ./.profile;", ' ');
|
|
|
|
if (NULL == tmp) {
|
|
|
|
return ORTE_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
|
|
|
for (i = 0; NULL != tmp[i]; ++i) {
|
|
|
|
opal_argv_append(&argc, &argv, tmp[i]);
|
|
|
|
}
|
|
|
|
opal_argv_free(tmp);
|
|
|
|
}
|
|
|
|
|
2005-05-13 01:44:23 +04:00
|
|
|
/* add the daemon command (as specified by user) */
|
2005-05-12 23:12:53 +04:00
|
|
|
local_exec_index = argc;
|
2005-07-04 04:13:44 +04:00
|
|
|
opal_argv_append(&argc, &argv, mca_pls_rsh_component.orted);
|
2005-09-05 00:55:27 +04:00
|
|
|
|
2005-05-13 01:44:23 +04:00
|
|
|
/* check for debug flags */
|
2006-09-15 01:29:51 +04:00
|
|
|
orte_pls_base_mca_argv(&argc, &argv);
|
2005-05-13 01:44:23 +04:00
|
|
|
|
2005-07-04 04:13:44 +04:00
|
|
|
opal_argv_append(&argc, &argv, "--bootproxy");
|
|
|
|
opal_argv_append(&argc, &argv, jobid_string);
|
|
|
|
opal_argv_append(&argc, &argv, "--name");
|
2005-03-14 23:57:21 +03:00
|
|
|
proc_name_index = argc;
|
2005-09-29 16:35:43 +04:00
|
|
|
opal_argv_append(&argc, &argv, "<template>");
|
2005-05-24 17:39:15 +04:00
|
|
|
|
|
|
|
/* tell the daemon how many procs are in the daemon's job */
|
2005-07-04 04:13:44 +04:00
|
|
|
opal_argv_append(&argc, &argv, "--num_procs");
|
2005-05-24 17:39:15 +04:00
|
|
|
asprintf(¶m, "%lu", (unsigned long)(vpid + num_nodes));
|
2005-07-04 04:13:44 +04:00
|
|
|
opal_argv_append(&argc, &argv, param);
|
2005-05-24 17:39:15 +04:00
|
|
|
free(param);
|
|
|
|
/* tell the daemon the starting vpid of the daemon's job */
|
2005-07-04 04:13:44 +04:00
|
|
|
opal_argv_append(&argc, &argv, "--vpid_start");
|
|
|
|
opal_argv_append(&argc, &argv, "0");
|
2005-09-05 00:55:27 +04:00
|
|
|
|
2005-07-04 04:13:44 +04:00
|
|
|
opal_argv_append(&argc, &argv, "--nodename");
|
2005-03-18 06:43:59 +03:00
|
|
|
node_name_index2 = argc;
|
2005-09-29 16:35:43 +04:00
|
|
|
opal_argv_append(&argc, &argv, "<template>");
|
2005-03-14 23:57:21 +03:00
|
|
|
|
2005-05-13 01:44:23 +04:00
|
|
|
/* pass along the universe name and location info */
|
2005-07-04 04:13:44 +04:00
|
|
|
opal_argv_append(&argc, &argv, "--universe");
|
2005-05-13 01:44:23 +04:00
|
|
|
asprintf(¶m, "%s@%s:%s", orte_universe_info.uid,
|
|
|
|
orte_universe_info.host, orte_universe_info.name);
|
2005-07-04 04:13:44 +04:00
|
|
|
opal_argv_append(&argc, &argv, param);
|
2005-05-13 01:44:23 +04:00
|
|
|
free(param);
|
2005-09-05 00:55:27 +04:00
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
/* setup ns contact info */
|
2005-07-04 04:13:44 +04:00
|
|
|
opal_argv_append(&argc, &argv, "--nsreplica");
|
2005-08-04 19:09:02 +04:00
|
|
|
if (NULL != orte_process_info.ns_replica_uri) {
|
2005-03-14 23:57:21 +03:00
|
|
|
uri = strdup(orte_process_info.ns_replica_uri);
|
|
|
|
} else {
|
|
|
|
uri = orte_rml.get_uri();
|
|
|
|
}
|
|
|
|
asprintf(¶m, "\"%s\"", uri);
|
2005-07-04 04:13:44 +04:00
|
|
|
opal_argv_append(&argc, &argv, param);
|
2005-03-14 23:57:21 +03:00
|
|
|
free(uri);
|
2005-05-13 01:44:23 +04:00
|
|
|
free(param);
|
2005-03-14 23:57:21 +03:00
|
|
|
|
|
|
|
/* setup gpr contact info */
|
2005-07-04 04:13:44 +04:00
|
|
|
opal_argv_append(&argc, &argv, "--gprreplica");
|
2005-08-04 19:09:02 +04:00
|
|
|
if (NULL != orte_process_info.gpr_replica_uri) {
|
2005-03-14 23:57:21 +03:00
|
|
|
uri = strdup(orte_process_info.gpr_replica_uri);
|
|
|
|
} else {
|
|
|
|
uri = orte_rml.get_uri();
|
|
|
|
}
|
|
|
|
asprintf(¶m, "\"%s\"", uri);
|
2005-07-04 04:13:44 +04:00
|
|
|
opal_argv_append(&argc, &argv, param);
|
2005-03-14 23:57:21 +03:00
|
|
|
free(uri);
|
2005-05-13 01:44:23 +04:00
|
|
|
free(param);
|
2005-03-14 23:57:21 +03:00
|
|
|
|
2005-08-19 22:56:44 +04:00
|
|
|
opal_argv_append(&argc, &argv, "--mpi-call-yield");
|
|
|
|
call_yield_index = argc;
|
|
|
|
opal_argv_append(&argc, &argv, "0");
|
|
|
|
|
2005-08-04 19:09:02 +04:00
|
|
|
local_exec_index_end = argc;
|
|
|
|
if (!(remote_csh || remote_bash)) {
|
|
|
|
opal_argv_append(&argc, &argv, ")");
|
|
|
|
}
|
|
|
|
if (mca_pls_rsh_component.debug) {
|
|
|
|
param = opal_argv_join(argv, ' ');
|
|
|
|
if (NULL != param) {
|
2005-09-29 16:35:43 +04:00
|
|
|
opal_output(0, "pls:rsh: final template argv:");
|
2005-08-04 19:09:02 +04:00
|
|
|
opal_output(0, "pls:rsh: %s", param);
|
|
|
|
free(param);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-05-16 18:14:12 +04:00
|
|
|
/* Figure out the basenames for the libdir and bindir. This
|
|
|
|
requires some explanation:
|
|
|
|
|
|
|
|
- Use OPAL_LIBDIR and OPAL_BINDIR instead of -D'ing some macros
|
|
|
|
in this directory's Makefile.am because it makes all the
|
|
|
|
dependencies work out correctly. These are defined in
|
|
|
|
opal/install_dirs.h.
|
|
|
|
|
|
|
|
- After a discussion on the devel-core mailing list, the
|
|
|
|
developers decided that we should use the local directory
|
|
|
|
basenames as the basis for the prefix on the remote note.
|
|
|
|
This does not handle a few notable cases (e.g., f the
|
|
|
|
libdir/bindir is not simply a subdir under the prefix, if the
|
|
|
|
libdir/bindir basename is not the same on the remote node as
|
|
|
|
it is here in the local node, etc.), but we decided that
|
|
|
|
--prefix was meant to handle "the common case". If you need
|
|
|
|
something more complex than this, a) edit your shell startup
|
|
|
|
files to set PATH/LD_LIBRARY_PATH properly on the remove
|
|
|
|
node, or b) use some new/to-be-defined options that
|
|
|
|
explicitly allow setting the bindir/libdir on the remote
|
|
|
|
node. We decided to implement these options (e.g.,
|
|
|
|
--remote-bindir and --remote-libdir) to orterun when it
|
|
|
|
actually becomes a problem for someone (vs. a hypothetical
|
|
|
|
situation).
|
|
|
|
|
|
|
|
Hence, for now, we simply take the basename of this install's
|
|
|
|
libdir and bindir and use it to append this install's prefix
|
|
|
|
and use that on the remote node.
|
|
|
|
*/
|
|
|
|
|
|
|
|
lib_base = opal_basename(OPAL_LIBDIR);
|
|
|
|
bin_base = opal_basename(OPAL_BINDIR);
|
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
/*
|
2005-10-27 21:04:10 +04:00
|
|
|
* Iterate through each of the contexts
|
2005-03-14 23:57:21 +03:00
|
|
|
*/
|
2005-10-27 21:04:10 +04:00
|
|
|
for(m_item = opal_list_get_first(&mapping);
|
|
|
|
m_item != opal_list_get_end(&mapping);
|
|
|
|
m_item = opal_list_get_next(m_item)) {
|
|
|
|
orte_rmaps_base_map_t* map = (orte_rmaps_base_map_t*)m_item;
|
|
|
|
char * prefix_dir = map->app->prefix_dir;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For each of the contexts - iterate through the nodes.
|
2005-09-06 20:10:05 +04:00
|
|
|
*/
|
2005-10-27 21:04:10 +04:00
|
|
|
for(n_item = opal_list_get_first(&map->nodes);
|
|
|
|
n_item != opal_list_get_end(&map->nodes);
|
|
|
|
n_item = opal_list_get_next(n_item)) {
|
|
|
|
orte_rmaps_base_node_t* rmaps_node = (orte_rmaps_base_node_t*)n_item;
|
|
|
|
orte_ras_node_t* ras_node = rmaps_node->node;
|
|
|
|
orte_process_name_t* name;
|
|
|
|
pid_t pid;
|
|
|
|
char *exec_path;
|
|
|
|
char **exec_argv;
|
|
|
|
|
2005-10-31 23:37:44 +03:00
|
|
|
/* already launched on this node */
|
|
|
|
if(ras_node->node_launched++ != 0)
|
|
|
|
continue;
|
|
|
|
|
2006-09-15 01:29:51 +04:00
|
|
|
/* new daemon - setup to record its info */
|
|
|
|
dmn = OBJ_NEW(orte_pls_daemon_info_t);
|
|
|
|
opal_list_append(&daemons, &dmn->super);
|
|
|
|
|
2005-10-27 21:04:10 +04:00
|
|
|
/* setup node name */
|
|
|
|
free(argv[node_name_index1]);
|
|
|
|
if (NULL != ras_node->node_username &&
|
|
|
|
0 != strlen (ras_node->node_username)) {
|
|
|
|
asprintf (&argv[node_name_index1], "%s@%s",
|
|
|
|
ras_node->node_username, ras_node->node_name);
|
|
|
|
} else {
|
|
|
|
argv[node_name_index1] = strdup(ras_node->node_name);
|
2005-08-04 19:09:02 +04:00
|
|
|
}
|
2005-03-14 23:57:21 +03:00
|
|
|
|
2005-10-27 21:04:10 +04:00
|
|
|
free(argv[node_name_index2]);
|
|
|
|
argv[node_name_index2] = strdup(ras_node->node_name);
|
2006-09-15 01:29:51 +04:00
|
|
|
|
|
|
|
/* save it in the daemon info */
|
|
|
|
dmn->nodename = strdup(ras_node->node_name);
|
2005-03-14 23:57:21 +03:00
|
|
|
|
2005-10-27 21:04:10 +04:00
|
|
|
/* initialize daemons process name */
|
|
|
|
rc = orte_ns.create_process_name(&name, ras_node->node_cellid, 0, vpid);
|
|
|
|
if (ORTE_SUCCESS != rc) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
2005-08-04 19:09:02 +04:00
|
|
|
}
|
2006-09-15 01:29:51 +04:00
|
|
|
|
|
|
|
/* save it in the daemon info */
|
|
|
|
dmn->cell = ras_node->node_cellid;
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_dss.copy((void**)&(dmn->name), name, ORTE_NAME))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2005-08-04 19:09:02 +04:00
|
|
|
|
2005-10-27 21:04:10 +04:00
|
|
|
/* rsh a child to exec the rsh/ssh session */
|
2006-09-15 01:29:51 +04:00
|
|
|
|
|
|
|
/* set the process state to "launched" */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_smr.set_proc_state(name, ORTE_PROC_STATE_LAUNCHED, 0))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef __WINDOWS__
|
2005-10-27 21:04:10 +04:00
|
|
|
printf("Unimplemented feature for windows\n");
|
|
|
|
return;
|
|
|
|
#if 0
|
|
|
|
{
|
|
|
|
/* Do fork the windows way: see opal_few() for example */
|
|
|
|
HANDLE new_process;
|
|
|
|
STARTUPINFO si;
|
|
|
|
PROCESS_INFORMATION pi;
|
|
|
|
DWORD process_id;
|
|
|
|
|
|
|
|
ZeroMemory (&si, sizeof(si));
|
|
|
|
ZeroMemory (&pi, sizeof(pi));
|
|
|
|
|
|
|
|
GetStartupInfo (&si);
|
|
|
|
if (!CreateProcess (NULL,
|
|
|
|
"new process",
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
TRUE,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
&si,
|
|
|
|
&pi)){
|
|
|
|
/* actual error can be got by simply calling GetLastError() */
|
|
|
|
return OMPI_ERROR;
|
2005-08-19 22:56:44 +04:00
|
|
|
}
|
2005-10-27 21:04:10 +04:00
|
|
|
/* get child pid */
|
|
|
|
process_id = GetProcessId(&pi);
|
|
|
|
pid = (int) process_id;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#else
|
|
|
|
pid = fork();
|
|
|
|
#endif
|
|
|
|
if (pid < 0) {
|
|
|
|
rc = ORTE_ERR_OUT_OF_RESOURCE;
|
|
|
|
goto cleanup;
|
2005-08-19 22:56:44 +04:00
|
|
|
}
|
|
|
|
|
2005-10-27 21:04:10 +04:00
|
|
|
/* child */
|
|
|
|
if (pid == 0) {
|
|
|
|
char* name_string;
|
|
|
|
char** env;
|
|
|
|
char* var;
|
|
|
|
long fd, fdmax = sysconf(_SC_OPEN_MAX);
|
|
|
|
|
2005-08-04 19:09:02 +04:00
|
|
|
if (mca_pls_rsh_component.debug) {
|
2005-10-27 21:04:10 +04:00
|
|
|
opal_output(0, "pls:rsh: launching on node %s\n",
|
|
|
|
ras_node->node_name);
|
2005-08-04 19:09:02 +04:00
|
|
|
}
|
2005-07-28 17:00:32 +04:00
|
|
|
|
2005-10-27 21:04:10 +04:00
|
|
|
/* set the progress engine schedule for this node.
|
|
|
|
* if node_slots is set to zero, then we default to
|
|
|
|
* NOT being oversubscribed
|
|
|
|
*/
|
|
|
|
if (ras_node->node_slots > 0 &&
|
2006-08-15 23:54:10 +04:00
|
|
|
(orte_std_cntr_t)opal_list_get_size(&rmaps_node->node_procs) > ras_node->node_slots) {
|
2005-10-27 21:04:10 +04:00
|
|
|
if (mca_pls_rsh_component.debug) {
|
|
|
|
opal_output(0, "pls:rsh: oversubscribed -- setting mpi_yield_when_idle to 1 (%d %d)",
|
|
|
|
ras_node->node_slots, opal_list_get_size(&rmaps_node->node_procs));
|
2005-09-06 20:10:05 +04:00
|
|
|
}
|
2005-10-27 21:04:10 +04:00
|
|
|
free(argv[call_yield_index]);
|
|
|
|
argv[call_yield_index] = strdup("1");
|
2005-09-06 20:10:05 +04:00
|
|
|
} else {
|
2005-10-27 21:04:10 +04:00
|
|
|
if (mca_pls_rsh_component.debug) {
|
|
|
|
opal_output(0, "pls:rsh: not oversubscribed -- setting mpi_yield_when_idle to 0");
|
|
|
|
}
|
|
|
|
free(argv[call_yield_index]);
|
|
|
|
argv[call_yield_index] = strdup("0");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Is this a local launch?
|
|
|
|
*
|
|
|
|
* Not all node names may be resolvable (if we found
|
|
|
|
* localhost in the hostfile, for example). So first
|
|
|
|
* check trivial case of node_name being same as the
|
|
|
|
* current nodename, which must be local. If that doesn't
|
|
|
|
* match, check using ifislocal().
|
|
|
|
*/
|
|
|
|
if (0 == strcmp(ras_node->node_name, orte_system_info.nodename) ||
|
|
|
|
opal_ifislocal(ras_node->node_name)) {
|
|
|
|
if (mca_pls_rsh_component.debug) {
|
|
|
|
opal_output(0, "pls:rsh: %s is a LOCAL node\n",
|
|
|
|
ras_node->node_name);
|
2005-09-29 16:35:43 +04:00
|
|
|
}
|
2005-10-27 21:04:10 +04:00
|
|
|
exec_argv = &argv[local_exec_index];
|
|
|
|
exec_path = opal_path_findv(exec_argv[0], 0, environ, NULL);
|
|
|
|
|
|
|
|
if (NULL == exec_path && NULL == prefix_dir) {
|
2005-09-06 20:10:05 +04:00
|
|
|
rc = orte_pls_rsh_fill_exec_path (&exec_path);
|
|
|
|
if (ORTE_SUCCESS != rc) {
|
|
|
|
return rc;
|
2005-07-28 17:00:32 +04:00
|
|
|
}
|
2005-09-29 16:35:43 +04:00
|
|
|
} else {
|
2005-10-27 21:04:10 +04:00
|
|
|
if (NULL != prefix_dir) {
|
2006-08-23 07:32:36 +04:00
|
|
|
exec_path = opal_os_path( false, prefix_dir, bin_base, "orted", NULL );
|
2005-10-27 21:04:10 +04:00
|
|
|
}
|
|
|
|
/* If we yet did not fill up the execpath, do so now */
|
|
|
|
if (NULL == exec_path) {
|
|
|
|
rc = orte_pls_rsh_fill_exec_path (&exec_path);
|
|
|
|
if (ORTE_SUCCESS != rc) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
}
|
2005-09-29 16:35:43 +04:00
|
|
|
}
|
2005-10-27 21:04:10 +04:00
|
|
|
|
|
|
|
/* If we have a prefix, then modify the PATH and
|
|
|
|
LD_LIBRARY_PATH environment variables. We're
|
|
|
|
already in the child process, so it's ok to modify
|
|
|
|
environ. */
|
|
|
|
if (NULL != prefix_dir) {
|
|
|
|
char *oldenv, *newenv;
|
|
|
|
|
|
|
|
/* Reset PATH */
|
2006-08-23 07:32:36 +04:00
|
|
|
newenv = opal_os_path( false, prefix_dir, bin_base, NULL );
|
2005-10-27 21:04:10 +04:00
|
|
|
oldenv = getenv("PATH");
|
|
|
|
if (NULL != oldenv) {
|
2006-08-23 07:32:36 +04:00
|
|
|
char *temp;
|
|
|
|
asprintf(&temp, "%s:%s", newenv, oldenv );
|
|
|
|
free( newenv );
|
|
|
|
newenv = temp;
|
2005-10-27 21:04:10 +04:00
|
|
|
}
|
|
|
|
opal_setenv("PATH", newenv, true, &environ);
|
|
|
|
if (mca_pls_rsh_component.debug) {
|
|
|
|
opal_output(0, "pls:rsh: reset PATH: %s", newenv);
|
|
|
|
}
|
|
|
|
free(newenv);
|
|
|
|
|
|
|
|
/* Reset LD_LIBRARY_PATH */
|
2006-08-23 07:32:36 +04:00
|
|
|
newenv = opal_os_path( false, prefix_dir, lib_base, NULL );
|
2005-10-27 21:04:10 +04:00
|
|
|
oldenv = getenv("LD_LIBRARY_PATH");
|
|
|
|
if (NULL != oldenv) {
|
2006-08-23 07:32:36 +04:00
|
|
|
char* temp;
|
2006-08-24 00:40:01 +04:00
|
|
|
asprintf(&temp, "%s:%s", newenv, oldenv);
|
2006-08-23 07:32:36 +04:00
|
|
|
free(newenv);
|
|
|
|
newenv = temp;
|
2005-10-27 21:04:10 +04:00
|
|
|
}
|
|
|
|
opal_setenv("LD_LIBRARY_PATH", newenv, true, &environ);
|
|
|
|
if (mca_pls_rsh_component.debug) {
|
|
|
|
opal_output(0, "pls:rsh: reset LD_LIBRARY_PATH: %s",
|
|
|
|
newenv);
|
|
|
|
}
|
|
|
|
free(newenv);
|
2005-09-29 16:35:43 +04:00
|
|
|
}
|
|
|
|
|
2005-10-27 21:04:10 +04:00
|
|
|
/* Since this is a local execution, we need to
|
|
|
|
potentially whack the final ")" in the argv (if
|
|
|
|
sh/csh conditionals, from above). Note that we're
|
|
|
|
modifying the argv[] in the child process, so
|
|
|
|
there's no need to save this and restore it
|
|
|
|
afterward -- the parent's argv[] is unmodified. */
|
|
|
|
if (NULL != argv[local_exec_index_end]) {
|
|
|
|
free(argv[local_exec_index_end]);
|
|
|
|
argv[local_exec_index_end] = NULL;
|
2005-09-29 16:35:43 +04:00
|
|
|
}
|
2006-02-17 01:14:05 +03:00
|
|
|
|
|
|
|
/* Finally, chdir($HOME) because we're making the
|
|
|
|
assumption that this is what will happen on
|
|
|
|
remote nodes (via rsh/ssh). This allows a user
|
|
|
|
to specify a path that is relative to $HOME for
|
|
|
|
both the cwd and argv[0] and it will work on
|
|
|
|
all nodes -- including the local nost.
|
|
|
|
Otherwise, it would work on remote nodes and
|
|
|
|
not the local node. If the user does not start
|
|
|
|
in $HOME on the remote nodes... well... let's
|
|
|
|
hope they start in $HOME. :-) */
|
|
|
|
var = getenv("HOME");
|
|
|
|
if (NULL != var) {
|
|
|
|
if (mca_pls_rsh_component.debug) {
|
|
|
|
opal_output(0, "pls:rsh: changing to directory %s",
|
|
|
|
var);
|
|
|
|
}
|
|
|
|
/* Ignore errors -- what are we going to do?
|
|
|
|
(and we ignore errors on the remote nodes
|
|
|
|
in the fork pls, so this is consistent) */
|
|
|
|
chdir(var);
|
|
|
|
}
|
2005-10-27 21:04:10 +04:00
|
|
|
} else {
|
2005-09-29 16:35:43 +04:00
|
|
|
if (mca_pls_rsh_component.debug) {
|
2005-10-27 21:04:10 +04:00
|
|
|
opal_output(0, "pls:rsh: %s is a REMOTE node\n",
|
|
|
|
ras_node->node_name);
|
|
|
|
}
|
|
|
|
exec_argv = argv;
|
2005-12-22 17:37:19 +03:00
|
|
|
exec_path = strdup(mca_pls_rsh_component.agent_path);
|
2005-10-27 21:04:10 +04:00
|
|
|
|
|
|
|
if (NULL != prefix_dir) {
|
|
|
|
if (remote_bash) {
|
|
|
|
asprintf (&argv[local_exec_index],
|
2006-06-06 01:12:36 +04:00
|
|
|
"PATH=%s/%s:$PATH ; export PATH ; "
|
|
|
|
"LD_LIBRARY_PATH=%s/%s:$LD_LIBRARY_PATH ; export LD_LIBRARY_PATH ; "
|
|
|
|
"%s/%s/%s",
|
2006-06-08 22:27:17 +04:00
|
|
|
prefix_dir, bin_base,
|
2006-06-06 01:12:36 +04:00
|
|
|
prefix_dir, lib_base,
|
|
|
|
prefix_dir, bin_base,
|
|
|
|
mca_pls_rsh_component.orted);
|
2005-10-27 21:04:10 +04:00
|
|
|
}
|
|
|
|
if (remote_csh) {
|
2006-02-02 14:58:40 +03:00
|
|
|
/* [t]csh is a bit more challenging -- we
|
|
|
|
have to check whether LD_LIBRARY_PATH
|
|
|
|
is already set before we try to set it.
|
|
|
|
Must be very careful about obeying
|
|
|
|
[t]csh's order of evaluation and not
|
|
|
|
using a variable before it is defined.
|
|
|
|
See this thread for more details:
|
|
|
|
http://www.open-mpi.org/community/lists/users/2006/01/0517.php. */
|
2005-10-27 21:04:10 +04:00
|
|
|
asprintf (&argv[local_exec_index],
|
2006-06-06 01:12:36 +04:00
|
|
|
"set path = ( %s/%s $path ) ; "
|
2006-02-02 14:58:40 +03:00
|
|
|
"if ( $?LD_LIBRARY_PATH == 1 ) "
|
|
|
|
"set OMPI_have_llp ; "
|
|
|
|
"if ( $?LD_LIBRARY_PATH == 0 ) "
|
2006-06-06 01:12:36 +04:00
|
|
|
"setenv LD_LIBRARY_PATH %s/%s ; "
|
2006-02-02 14:58:40 +03:00
|
|
|
"if ( $?OMPI_have_llp == 1 ) "
|
2006-06-06 01:12:36 +04:00
|
|
|
"setenv LD_LIBRARY_PATH %s/%s:$LD_LIBRARY_PATH ; "
|
|
|
|
"%s/%s/%s",
|
|
|
|
prefix_dir, bin_base,
|
2006-06-08 22:27:17 +04:00
|
|
|
prefix_dir, lib_base,
|
|
|
|
prefix_dir, lib_base,
|
2006-06-06 01:12:36 +04:00
|
|
|
prefix_dir, bin_base,
|
|
|
|
mca_pls_rsh_component.orted);
|
2005-10-27 21:04:10 +04:00
|
|
|
}
|
2005-09-29 16:35:43 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-10-27 21:04:10 +04:00
|
|
|
/* setup process name */
|
|
|
|
rc = orte_ns.get_proc_name_string(&name_string, name);
|
|
|
|
if (ORTE_SUCCESS != rc) {
|
|
|
|
opal_output(0, "orte_pls_rsh: unable to create process name");
|
|
|
|
exit(-1);
|
2005-08-04 19:09:02 +04:00
|
|
|
}
|
2005-10-27 21:04:10 +04:00
|
|
|
free(argv[proc_name_index]);
|
|
|
|
argv[proc_name_index] = strdup(name_string);
|
|
|
|
|
|
|
|
if (!mca_pls_rsh_component.debug) {
|
|
|
|
/* setup stdin */
|
|
|
|
int fd = open("/dev/null", O_RDWR);
|
|
|
|
dup2(fd, 0);
|
|
|
|
close(fd);
|
2005-08-04 19:09:02 +04:00
|
|
|
}
|
2005-10-27 21:04:10 +04:00
|
|
|
|
|
|
|
/* close all file descriptors w/ exception of stdin/stdout/stderr */
|
|
|
|
for(fd=3; fd<fdmax; fd++)
|
|
|
|
close(fd);
|
|
|
|
|
|
|
|
/* Set signal handlers back to the default. Do this close
|
|
|
|
to the execve() because the event library may (and likely
|
|
|
|
will) reset them. If we don't do this, the event
|
|
|
|
library may have left some set that, at least on some
|
|
|
|
OS's, don't get reset via fork() or exec(). Hence, the
|
|
|
|
orted could be unkillable (for example). */
|
|
|
|
|
|
|
|
set_handler_default(SIGTERM);
|
|
|
|
set_handler_default(SIGINT);
|
2005-12-12 23:04:00 +03:00
|
|
|
#ifndef __WINDOWS__
|
2005-10-27 21:04:10 +04:00
|
|
|
set_handler_default(SIGHUP);
|
|
|
|
set_handler_default(SIGPIPE);
|
|
|
|
#endif
|
|
|
|
set_handler_default(SIGCHLD);
|
|
|
|
|
|
|
|
/* Unblock all signals, for many of the same reasons that
|
|
|
|
we set the default handlers, above. This is noticable
|
|
|
|
on Linux where the event library blocks SIGTERM, but we
|
|
|
|
don't want that blocked by the orted (or, more
|
|
|
|
specifically, we don't want it to be blocked by the
|
|
|
|
orted and then inherited by the ORTE processes that it
|
|
|
|
forks, making them unkillable by SIGTERM). */
|
2005-12-12 23:04:00 +03:00
|
|
|
#ifndef __WINDOWS__
|
2005-10-27 21:04:10 +04:00
|
|
|
sigprocmask(0, 0, &sigs);
|
|
|
|
sigprocmask(SIG_UNBLOCK, &sigs, 0);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* setup environment */
|
|
|
|
env = opal_argv_copy(environ);
|
|
|
|
var = mca_base_param_environ_variable("seed",NULL,NULL);
|
|
|
|
opal_setenv(var, "0", true, &env);
|
|
|
|
|
|
|
|
/* exec the daemon */
|
|
|
|
if (mca_pls_rsh_component.debug) {
|
|
|
|
param = opal_argv_join(exec_argv, ' ');
|
|
|
|
if (NULL != param) {
|
|
|
|
opal_output(0, "pls:rsh: executing: %s", param);
|
|
|
|
free(param);
|
2005-09-06 20:10:05 +04:00
|
|
|
}
|
|
|
|
}
|
2005-10-27 21:04:10 +04:00
|
|
|
execve(exec_path, exec_argv, env);
|
|
|
|
opal_output(0, "pls:rsh: execv failed with errno=%d\n", errno);
|
2005-03-14 23:57:21 +03:00
|
|
|
exit(-1);
|
|
|
|
|
2005-10-27 21:04:10 +04:00
|
|
|
} else { /* father */
|
|
|
|
rsh_daemon_info_t *daemon_info;
|
2005-05-19 17:33:28 +04:00
|
|
|
|
2005-10-27 21:04:10 +04:00
|
|
|
OPAL_THREAD_LOCK(&mca_pls_rsh_component.lock);
|
2006-09-08 19:17:17 +04:00
|
|
|
/* JJH Bug:
|
|
|
|
* If we are in '--debug-daemons' we keep the ssh connection
|
|
|
|
* alive for the span of the run. If we use this option
|
|
|
|
* AND we launch on more than "num_concurrent" machines
|
|
|
|
* then we will deadlock. No connections are terminated
|
|
|
|
* until the job is complete, no job is started
|
|
|
|
* since all the orteds are waiting for all the others
|
|
|
|
* to come online, and the others ore not launched because
|
|
|
|
* we are waiting on those that have started to terminate
|
|
|
|
* their ssh tunnels. :(
|
|
|
|
*/
|
2006-01-14 01:02:40 +03:00
|
|
|
if (mca_pls_rsh_component.num_children++ >=
|
2005-10-27 21:04:10 +04:00
|
|
|
mca_pls_rsh_component.num_concurrent) {
|
2006-01-14 01:02:40 +03:00
|
|
|
opal_condition_wait(&mca_pls_rsh_component.cond, &mca_pls_rsh_component.lock);
|
2005-08-04 19:09:02 +04:00
|
|
|
}
|
2005-10-27 21:04:10 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&mca_pls_rsh_component.lock);
|
2006-09-15 01:29:51 +04:00
|
|
|
|
2005-10-27 21:04:10 +04:00
|
|
|
/* setup callback on sigchild - wait until setup above is complete
|
|
|
|
* as the callback can occur in the call to orte_wait_cb
|
|
|
|
*/
|
|
|
|
daemon_info = OBJ_NEW(rsh_daemon_info_t);
|
|
|
|
OBJ_RETAIN(ras_node);
|
|
|
|
daemon_info->node = ras_node;
|
|
|
|
daemon_info->jobid = jobid;
|
|
|
|
orte_wait_cb(pid, orte_pls_rsh_wait_daemon, daemon_info);
|
|
|
|
|
|
|
|
/* if required - add delay to avoid problems w/ X11 authentication */
|
|
|
|
if (mca_pls_rsh_component.debug && mca_pls_rsh_component.delay) {
|
|
|
|
sleep(mca_pls_rsh_component.delay);
|
|
|
|
}
|
|
|
|
vpid++;
|
2005-03-18 06:43:59 +03:00
|
|
|
}
|
2005-10-27 21:04:10 +04:00
|
|
|
free(name);
|
2005-09-06 20:10:05 +04:00
|
|
|
}
|
2005-03-14 23:57:21 +03:00
|
|
|
}
|
2006-09-15 01:29:51 +04:00
|
|
|
|
|
|
|
/* all done, so store the daemon info on the registry */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_pls_base_store_active_daemons(&daemons, jobid))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
2005-03-14 23:57:21 +03:00
|
|
|
|
|
|
|
cleanup:
|
2005-10-27 21:04:10 +04:00
|
|
|
while (NULL != (m_item = opal_list_remove_first(&mapping))) {
|
|
|
|
OBJ_RELEASE(m_item);
|
2005-10-08 02:24:52 +04:00
|
|
|
}
|
2005-10-27 21:04:10 +04:00
|
|
|
OBJ_DESTRUCT(&mapping);
|
2005-10-08 02:24:52 +04:00
|
|
|
|
2006-09-15 01:29:51 +04:00
|
|
|
while (NULL != (m_item = opal_list_remove_first(&daemons))) {
|
|
|
|
OBJ_RELEASE(m_item);
|
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&daemons);
|
|
|
|
|
2006-05-16 18:14:12 +04:00
|
|
|
if (NULL != lib_base) {
|
|
|
|
free(lib_base);
|
|
|
|
}
|
|
|
|
if (NULL != bin_base) {
|
|
|
|
free(bin_base);
|
|
|
|
}
|
|
|
|
|
2005-09-02 22:50:01 +04:00
|
|
|
free(jobid_string); /* done with this variable */
|
2005-09-03 04:15:21 +04:00
|
|
|
opal_argv_free(argv);
|
2005-10-08 02:24:52 +04:00
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2005-03-31 19:47:37 +04:00
|
|
|
|
|
|
|
/**
|
2006-09-15 01:29:51 +04:00
|
|
|
* Terminate all processes for a given job
|
2005-03-31 19:47:37 +04:00
|
|
|
*/
|
2005-03-14 23:57:21 +03:00
|
|
|
int orte_pls_rsh_terminate_job(orte_jobid_t jobid)
|
|
|
|
{
|
2006-09-15 01:29:51 +04:00
|
|
|
int rc;
|
|
|
|
opal_list_t daemons;
|
|
|
|
opal_list_item_t *item;
|
|
|
|
|
2006-10-02 22:29:15 +04:00
|
|
|
OPAL_TRACE(1);
|
|
|
|
|
2006-09-15 01:29:51 +04:00
|
|
|
/* construct the list of active daemons on this job */
|
|
|
|
OBJ_CONSTRUCT(&daemons, opal_list_t);
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_pls_base_get_active_daemons(&daemons, jobid))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* order them to kill their local procs for this job */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_pls_base_orted_kill_local_procs(&daemons, jobid))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
CLEANUP:
|
|
|
|
while (NULL != (item = opal_list_remove_first(&daemons))) {
|
|
|
|
OBJ_RELEASE(item);
|
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&daemons);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Terminate the orteds for a given job
|
|
|
|
*/
|
|
|
|
int orte_pls_rsh_terminate_orteds(orte_jobid_t jobid)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
opal_list_t daemons;
|
|
|
|
opal_list_item_t *item;
|
|
|
|
|
2006-10-02 22:29:15 +04:00
|
|
|
OPAL_TRACE(1);
|
|
|
|
|
2006-09-15 01:29:51 +04:00
|
|
|
/* construct the list of active daemons on this job */
|
|
|
|
OBJ_CONSTRUCT(&daemons, opal_list_t);
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_pls_base_get_active_daemons(&daemons, jobid))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* now tell them to die! */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_pls_base_orted_exit(&daemons))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
CLEANUP:
|
|
|
|
while (NULL != (item = opal_list_remove_first(&daemons))) {
|
|
|
|
OBJ_RELEASE(item);
|
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&daemons);
|
|
|
|
return rc;
|
2005-03-14 23:57:21 +03:00
|
|
|
}
|
|
|
|
|
2006-09-15 01:29:51 +04:00
|
|
|
/*
|
|
|
|
* Terminate a specific process
|
|
|
|
*/
|
2005-03-14 23:57:21 +03:00
|
|
|
int orte_pls_rsh_terminate_proc(const orte_process_name_t* proc)
|
|
|
|
{
|
2006-10-02 22:29:15 +04:00
|
|
|
OPAL_TRACE(1);
|
|
|
|
|
2006-09-15 01:29:51 +04:00
|
|
|
return ORTE_ERR_NOT_IMPLEMENTED;
|
2005-03-14 23:57:21 +03:00
|
|
|
}
|
|
|
|
|
2006-06-08 22:27:17 +04:00
|
|
|
int orte_pls_rsh_signal_job(orte_jobid_t jobid, int32_t signal)
|
|
|
|
{
|
2006-09-15 01:29:51 +04:00
|
|
|
int rc;
|
|
|
|
opal_list_t daemons;
|
|
|
|
opal_list_item_t *item;
|
|
|
|
|
2006-10-02 22:29:15 +04:00
|
|
|
OPAL_TRACE(1);
|
|
|
|
|
2006-09-15 01:29:51 +04:00
|
|
|
/* construct the list of active daemons on this job */
|
|
|
|
OBJ_CONSTRUCT(&daemons, opal_list_t);
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_pls_base_get_active_daemons(&daemons, jobid))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
OBJ_DESTRUCT(&daemons);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* order them to pass this signal to their local procs */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_pls_base_orted_signal_local_procs(&daemons, signal))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
while (NULL != (item = opal_list_remove_first(&daemons))) {
|
|
|
|
OBJ_RELEASE(item);
|
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&daemons);
|
|
|
|
return rc;
|
2006-06-08 22:27:17 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int orte_pls_rsh_signal_proc(const orte_process_name_t* proc, int32_t signal)
|
|
|
|
{
|
2006-10-02 22:29:15 +04:00
|
|
|
OPAL_TRACE(1);
|
|
|
|
|
2006-09-15 01:29:51 +04:00
|
|
|
return ORTE_ERR_NOT_IMPLEMENTED;
|
2006-06-08 22:27:17 +04:00
|
|
|
}
|
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
int orte_pls_rsh_finalize(void)
|
|
|
|
{
|
2006-09-15 01:29:51 +04:00
|
|
|
int rc;
|
|
|
|
|
2005-04-01 02:37:46 +04:00
|
|
|
/* cleanup any pending recvs */
|
2006-09-15 01:29:51 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_pls_base_comm_stop())) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
return rc;
|
2005-03-14 23:57:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Handle threading issues.
|
|
|
|
*/
|
|
|
|
|
2005-03-24 18:45:44 +03:00
|
|
|
#if OMPI_HAVE_POSIX_THREADS && OMPI_THREADS_HAVE_DIFFERENT_PIDS && OMPI_ENABLE_PROGRESS_THREADS
|
2005-03-14 23:57:21 +03:00
|
|
|
|
|
|
|
struct orte_pls_rsh_stack_t {
|
2005-07-04 02:45:48 +04:00
|
|
|
opal_condition_t cond;
|
|
|
|
opal_mutex_t mutex;
|
2005-03-14 23:57:21 +03:00
|
|
|
bool complete;
|
|
|
|
orte_jobid_t jobid;
|
|
|
|
int rc;
|
|
|
|
};
|
|
|
|
typedef struct orte_pls_rsh_stack_t orte_pls_rsh_stack_t;
|
|
|
|
|
|
|
|
static void orte_pls_rsh_stack_construct(orte_pls_rsh_stack_t* stack)
|
|
|
|
{
|
2005-07-04 02:45:48 +04:00
|
|
|
OBJ_CONSTRUCT(&stack->mutex, opal_mutex_t);
|
|
|
|
OBJ_CONSTRUCT(&stack->cond, opal_condition_t);
|
2005-03-14 23:57:21 +03:00
|
|
|
stack->rc = 0;
|
|
|
|
stack->complete = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void orte_pls_rsh_stack_destruct(orte_pls_rsh_stack_t* stack)
|
|
|
|
{
|
|
|
|
OBJ_DESTRUCT(&stack->mutex);
|
|
|
|
OBJ_DESTRUCT(&stack->cond);
|
|
|
|
}
|
|
|
|
|
|
|
|
static OBJ_CLASS_INSTANCE(
|
|
|
|
orte_pls_rsh_stack_t,
|
2005-07-03 20:06:07 +04:00
|
|
|
opal_object_t,
|
2005-03-14 23:57:21 +03:00
|
|
|
orte_pls_rsh_stack_construct,
|
|
|
|
orte_pls_rsh_stack_destruct);
|
|
|
|
|
|
|
|
static void orte_pls_rsh_launch_cb(int fd, short event, void* args)
|
|
|
|
{
|
|
|
|
orte_pls_rsh_stack_t *stack = (orte_pls_rsh_stack_t*)args;
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&stack->mutex);
|
2005-03-14 23:57:21 +03:00
|
|
|
stack->rc = orte_pls_rsh_launch(stack->jobid);
|
|
|
|
stack->complete = true;
|
2005-07-04 02:45:48 +04:00
|
|
|
opal_condition_signal(&stack->cond);
|
|
|
|
OPAL_THREAD_UNLOCK(&stack->mutex);
|
2005-03-14 23:57:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int orte_pls_rsh_launch_threaded(orte_jobid_t jobid)
|
|
|
|
{
|
|
|
|
struct timeval tv = { 0, 0 };
|
2005-07-04 03:09:55 +04:00
|
|
|
struct opal_event event;
|
2005-03-14 23:57:21 +03:00
|
|
|
struct orte_pls_rsh_stack_t stack;
|
|
|
|
|
|
|
|
OBJ_CONSTRUCT(&stack, orte_pls_rsh_stack_t);
|
|
|
|
|
|
|
|
stack.jobid = jobid;
|
2005-09-19 19:54:53 +04:00
|
|
|
if( opal_event_progress_thread() ) {
|
|
|
|
stack.rc = orte_pls_rsh_launch( jobid );
|
|
|
|
} else {
|
|
|
|
opal_evtimer_set(&event, orte_pls_rsh_launch_cb, &stack);
|
|
|
|
opal_evtimer_add(&event, &tv);
|
2005-03-14 23:57:21 +03:00
|
|
|
|
2005-09-19 19:54:53 +04:00
|
|
|
OPAL_THREAD_LOCK(&stack.mutex);
|
|
|
|
while (stack.complete == false) {
|
|
|
|
opal_condition_wait(&stack.cond, &stack.mutex);
|
|
|
|
}
|
|
|
|
OPAL_THREAD_UNLOCK(&stack.mutex);
|
2005-08-04 19:09:02 +04:00
|
|
|
}
|
2005-03-14 23:57:21 +03:00
|
|
|
OBJ_DESTRUCT(&stack);
|
|
|
|
return stack.rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2005-05-19 17:33:28 +04:00
|
|
|
|
|
|
|
static void set_handler_default(int sig)
|
|
|
|
{
|
2005-12-12 23:04:00 +03:00
|
|
|
#ifndef __WINDOWS__
|
2005-05-19 17:33:28 +04:00
|
|
|
struct sigaction act;
|
|
|
|
|
|
|
|
act.sa_handler = SIG_DFL;
|
|
|
|
act.sa_flags = 0;
|
|
|
|
sigemptyset(&act.sa_mask);
|
|
|
|
|
|
|
|
sigaction(sig, &act, (struct sigaction *)0);
|
2005-06-22 02:48:57 +04:00
|
|
|
#endif
|
2005-05-19 17:33:28 +04:00
|
|
|
}
|