2006-08-11 01:46:52 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The University of Tennessee and The University
|
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
* Copyright (c) 2006 Cisco Systems, Inc. All rights reserved.
|
|
|
|
* Copyright (c) 2006 Sun Microsystems, Inc. All rights reserved.
|
|
|
|
* Use is subject to license terms.
|
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*
|
|
|
|
* These symbols are in a file by themselves to provide nice linker
|
|
|
|
* semantics. Since linkers generally pull in symbols by object
|
|
|
|
* files, keeping these symbols as the only symbols in this file
|
|
|
|
* prevents utility programs such as "ompi_info" from having to import
|
|
|
|
* entire components just to query their version and parameters.
|
|
|
|
*/
|
|
|
|
/**
|
|
|
|
* @file:
|
|
|
|
* Part of the gridengine launcher.
|
|
|
|
* See pls_gridengine.h for an overview of how it works.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "orte_config.h"
|
2006-09-15 01:29:51 +04:00
|
|
|
#include "orte/orte_constants.h"
|
2006-08-11 01:46:52 +04:00
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
#ifdef HAVE_UNISTD_H
|
|
|
|
#include <unistd.h>
|
|
|
|
#endif
|
|
|
|
#include <errno.h>
|
|
|
|
#include <string.h>
|
|
|
|
#ifdef HAVE_SYS_SELECT_H
|
|
|
|
#include <sys/select.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_SYS_TIME_H
|
|
|
|
#include <sys/time.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_SYS_TYPES_H
|
|
|
|
#include <sys/types.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_SYS_STAT_H
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_SYS_WAIT_H
|
|
|
|
#include <sys/wait.h>
|
|
|
|
#endif
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <signal.h>
|
|
|
|
#ifdef HAVE_PWD_H
|
|
|
|
#include <pwd.h>
|
|
|
|
#endif
|
|
|
|
|
2007-04-21 04:15:05 +04:00
|
|
|
#include "opal/mca/installdirs/installdirs.h"
|
2006-08-11 01:46:52 +04:00
|
|
|
#include "opal/mca/base/mca_base_param.h"
|
|
|
|
#include "opal/util/if.h"
|
2006-08-24 00:40:01 +04:00
|
|
|
#include "opal/util/os_path.h"
|
2006-08-11 01:46:52 +04:00
|
|
|
#include "opal/util/path.h"
|
|
|
|
#include "opal/event/event.h"
|
|
|
|
#include "opal/util/show_help.h"
|
|
|
|
#include "opal/util/argv.h"
|
|
|
|
#include "opal/util/opal_environ.h"
|
|
|
|
#include "opal/util/output.h"
|
|
|
|
#include "opal/util/basename.h"
|
2006-09-15 01:29:51 +04:00
|
|
|
|
2006-08-11 01:46:52 +04:00
|
|
|
#include "orte/util/univ_info.h"
|
|
|
|
#include "orte/util/session_dir.h"
|
2006-09-15 01:29:51 +04:00
|
|
|
#include "orte/util/sys_info.h"
|
2006-08-11 01:46:52 +04:00
|
|
|
#include "orte/runtime/orte_wait.h"
|
|
|
|
#include "orte/mca/ns/ns.h"
|
|
|
|
#include "orte/mca/rml/rml.h"
|
|
|
|
#include "orte/mca/gpr/gpr.h"
|
|
|
|
#include "orte/mca/errmgr/errmgr.h"
|
2006-09-15 01:29:51 +04:00
|
|
|
#include "orte/mca/ras/ras_types.h"
|
2006-10-07 19:45:24 +04:00
|
|
|
#include "orte/mca/rmaps/rmaps.h"
|
2006-08-16 20:35:09 +04:00
|
|
|
#include "orte/mca/smr/smr.h"
|
2006-09-15 01:29:51 +04:00
|
|
|
|
|
|
|
#include "orte/mca/pls/pls.h"
|
2006-11-16 18:11:45 +03:00
|
|
|
#include "orte/mca/pls/base/base.h"
|
2006-09-15 01:29:51 +04:00
|
|
|
#include "orte/mca/pls/base/pls_private.h"
|
2006-08-11 01:46:52 +04:00
|
|
|
#include "orte/mca/pls/gridengine/pls_gridengine.h"
|
|
|
|
|
2006-08-24 00:40:01 +04:00
|
|
|
#if !defined(__WINDOWS__)
|
2006-08-11 01:46:52 +04:00
|
|
|
extern char **environ;
|
2006-08-24 00:40:01 +04:00
|
|
|
#endif /* !defined(__WINDOWS__) */
|
2006-08-11 01:46:52 +04:00
|
|
|
|
2006-09-15 01:29:51 +04:00
|
|
|
orte_pls_base_module_t orte_pls_gridengine_module = {
|
|
|
|
orte_pls_gridengine_launch_job,
|
2006-08-11 01:46:52 +04:00
|
|
|
orte_pls_gridengine_terminate_job,
|
2006-09-15 01:29:51 +04:00
|
|
|
orte_pls_gridengine_terminate_orteds,
|
2006-08-11 01:46:52 +04:00
|
|
|
orte_pls_gridengine_terminate_proc,
|
|
|
|
orte_pls_gridengine_signal_job,
|
|
|
|
orte_pls_gridengine_signal_proc,
|
2007-01-25 17:17:44 +03:00
|
|
|
orte_pls_gridengine_cancel_operation,
|
2006-08-11 01:46:52 +04:00
|
|
|
orte_pls_gridengine_finalize
|
|
|
|
};
|
|
|
|
|
|
|
|
static void set_handler_default(int sig);
|
2006-10-07 19:45:24 +04:00
|
|
|
#if 0
|
2006-08-11 01:46:52 +04:00
|
|
|
static int update_slot_keyval(orte_ras_node_t* node, int* slot_cnt);
|
2006-10-07 19:45:24 +04:00
|
|
|
#endif
|
2006-08-11 01:46:52 +04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Fill the orted_path variable with the directory to the orted
|
|
|
|
*/
|
|
|
|
static int orte_pls_gridengine_fill_orted_path(char** orted_path)
|
|
|
|
{
|
|
|
|
struct stat buf;
|
|
|
|
|
2007-04-21 04:15:05 +04:00
|
|
|
asprintf(orted_path, "%s/orted", opal_install_dirs.bindir);
|
2006-08-11 01:46:52 +04:00
|
|
|
if (0 != stat(*orted_path, &buf)) {
|
|
|
|
char *path = getenv("PATH");
|
|
|
|
if (NULL == path) {
|
|
|
|
path = ("PATH is empty!");
|
|
|
|
}
|
|
|
|
opal_show_help("help-pls-gridengine.txt", "no-local-orted",
|
2007-04-21 04:15:05 +04:00
|
|
|
true, path, opal_install_dirs.bindir);
|
2006-08-11 01:46:52 +04:00
|
|
|
return ORTE_ERR_NOT_FOUND;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Callback on daemon exit.
|
|
|
|
*/
|
|
|
|
static void orte_pls_gridengine_wait_daemon(pid_t pid, int status, void* cbdata)
|
|
|
|
{
|
2006-10-07 19:45:24 +04:00
|
|
|
orte_pls_daemon_info_t *info = (orte_pls_daemon_info_t*) cbdata;
|
2006-08-11 01:46:52 +04:00
|
|
|
int rc;
|
|
|
|
|
2006-09-15 01:29:51 +04:00
|
|
|
/* if qrsh exited abnormally, set the daemon's state to aborted
|
2006-08-11 01:46:52 +04:00
|
|
|
and print something useful to the user. The usual reasons for
|
|
|
|
qrsh to exit abnormally all are a pretty good indication that
|
2006-09-15 01:29:51 +04:00
|
|
|
the child processes aren't going to start up properly, so this
|
|
|
|
will signal the system to kill the job.
|
2006-08-11 01:46:52 +04:00
|
|
|
|
|
|
|
This should somehow be pushed up to the calling level, but we
|
|
|
|
don't really have a way to do that just yet.
|
|
|
|
*/
|
|
|
|
if (! WIFEXITED(status) || ! WEXITSTATUS(status) == 0) {
|
2006-09-15 01:29:51 +04:00
|
|
|
/* tell the user something went wrong. We need to do this BEFORE we
|
|
|
|
* set the state to ABORTED as that action will cause a trigger to
|
|
|
|
* fire that will kill the job before any output would get printed!
|
|
|
|
*/
|
2006-08-11 01:46:52 +04:00
|
|
|
opal_output(0, "ERROR: A daemon on node %s failed to start as expected.",
|
2006-09-15 01:29:51 +04:00
|
|
|
info->nodename);
|
2006-08-11 01:46:52 +04:00
|
|
|
opal_output(0, "ERROR: There may be more information available from");
|
|
|
|
opal_output(0, "ERROR: the 'qstat -t' command on the Grid Engine tasks.");
|
|
|
|
opal_output(0, "ERROR: If the problem persists, please restart the");
|
|
|
|
opal_output(0, "ERROR: Grid Engine PE job");
|
|
|
|
if (WIFEXITED(status)) {
|
|
|
|
opal_output(0, "ERROR: The daemon exited unexpectedly with status %d.",
|
2006-09-15 01:29:51 +04:00
|
|
|
WEXITSTATUS(status));
|
2006-08-11 01:46:52 +04:00
|
|
|
} else if (WIFSIGNALED(status)) {
|
|
|
|
#ifdef WCOREDUMP
|
|
|
|
if (WCOREDUMP(status)) {
|
|
|
|
opal_output(0, "The daemon received a signal %d (with core).",
|
2006-09-15 01:29:51 +04:00
|
|
|
WTERMSIG(status));
|
2006-08-11 01:46:52 +04:00
|
|
|
} else {
|
|
|
|
opal_output(0, "The daemon received a signal %d.", WTERMSIG(status));
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
opal_output(0, "The daemon received a signal %d.", WTERMSIG(status));
|
|
|
|
#endif /* WCOREDUMP */
|
|
|
|
} else {
|
|
|
|
opal_output(0, "No extra status information is available: %d.", status);
|
|
|
|
}
|
2006-09-15 01:29:51 +04:00
|
|
|
|
|
|
|
/* now set the state to aborted */
|
|
|
|
rc = orte_smr.set_proc_state(info->name, ORTE_PROC_STATE_ABORTED, status);
|
|
|
|
if (ORTE_SUCCESS != rc) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
2006-08-11 01:46:52 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* cleanup */
|
|
|
|
OBJ_RELEASE(info);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Launch a daemon (bootproxy) on each node. The daemon will be responsible
|
|
|
|
* for launching the application.
|
|
|
|
*/
|
2006-09-15 01:29:51 +04:00
|
|
|
int orte_pls_gridengine_launch_job(orte_jobid_t jobid)
|
2006-08-11 01:46:52 +04:00
|
|
|
{
|
2006-10-07 19:45:24 +04:00
|
|
|
orte_job_map_t *map;
|
|
|
|
opal_list_item_t *n_item;
|
2006-08-15 23:54:10 +04:00
|
|
|
orte_std_cntr_t num_nodes;
|
2006-08-11 01:46:52 +04:00
|
|
|
orte_vpid_t vpid;
|
|
|
|
int node_name_index1;
|
|
|
|
int node_name_index2;
|
|
|
|
int proc_name_index;
|
|
|
|
int orted_index;
|
|
|
|
char *jobid_string;
|
2006-10-07 19:45:24 +04:00
|
|
|
char *prefix_dir;
|
2007-04-10 18:23:32 +04:00
|
|
|
char *param;
|
2006-08-11 01:46:52 +04:00
|
|
|
char **argv;
|
2006-10-20 20:50:13 +04:00
|
|
|
char **env;
|
2006-08-11 01:46:52 +04:00
|
|
|
int argc;
|
|
|
|
int rc;
|
|
|
|
sigset_t sigs;
|
|
|
|
char *lib_base = NULL, *bin_base = NULL;
|
|
|
|
char *sge_root, *sge_arch;
|
2006-09-15 01:29:51 +04:00
|
|
|
opal_list_t daemons;
|
|
|
|
orte_pls_daemon_info_t *dmn;
|
|
|
|
|
|
|
|
/* setup a list that will contain the info for all the daemons
|
|
|
|
* so we can store it on the registry when done
|
|
|
|
*/
|
|
|
|
OBJ_CONSTRUCT(&daemons, opal_list_t);
|
|
|
|
|
2006-10-07 19:45:24 +04:00
|
|
|
/* Get the map for this job.
|
2006-08-11 01:46:52 +04:00
|
|
|
* We need the entire mapping for a couple of reasons:
|
|
|
|
* - need the prefix to start with.
|
|
|
|
* - need to know if we are launching on a subset of the allocated nodes
|
|
|
|
* All other mapping responsibilities fall to orted in the fork PLS
|
|
|
|
*/
|
2006-10-07 19:45:24 +04:00
|
|
|
rc = orte_rmaps.get_job_map(&map, jobid);
|
2006-08-11 01:46:52 +04:00
|
|
|
if (ORTE_SUCCESS != rc) {
|
2006-09-15 01:29:51 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
2006-11-16 18:11:45 +03:00
|
|
|
OBJ_DESTRUCT(&daemons);
|
|
|
|
return rc;
|
2006-08-11 01:46:52 +04:00
|
|
|
}
|
|
|
|
|
2006-11-16 18:11:45 +03:00
|
|
|
/* if the user requested that we re-use daemons,
|
|
|
|
* launch the procs on any existing, re-usable daemons
|
|
|
|
*/
|
|
|
|
if (orte_pls_base.reuse_daemons) {
|
2006-11-17 22:06:10 +03:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_pls_base_launch_on_existing_daemons(map))) {
|
2006-11-16 18:11:45 +03:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
OBJ_RELEASE(map);
|
|
|
|
OBJ_DESTRUCT(&daemons);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-10-07 19:45:24 +04:00
|
|
|
num_nodes = (orte_std_cntr_t)opal_list_get_size(&map->nodes);
|
2006-11-16 18:11:45 +03:00
|
|
|
if (num_nodes == 0) {
|
|
|
|
/* job must have been launched on existing daemons - just return */
|
|
|
|
OBJ_RELEASE(map);
|
|
|
|
OBJ_DESTRUCT(&daemons);
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2006-08-11 01:46:52 +04:00
|
|
|
/*
|
|
|
|
* Allocate a range of vpids for the daemons.
|
|
|
|
*/
|
|
|
|
rc = orte_ns.reserve_range(0, num_nodes, &vpid);
|
|
|
|
if (ORTE_SUCCESS != rc) {
|
2006-09-15 01:29:51 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
2006-08-11 01:46:52 +04:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2006-09-15 01:29:51 +04:00
|
|
|
/* setup the orted triggers for passing their launch info */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_smr.init_orted_stage_gates(jobid, num_nodes, NULL, NULL))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2006-08-11 01:46:52 +04:00
|
|
|
/* need integer value for command line parameter */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_ns.convert_jobid_to_string(&jobid_string, jobid))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Build argv array
|
|
|
|
*/
|
|
|
|
argv = opal_argv_split("qrsh", ' ');
|
|
|
|
argc = opal_argv_count(argv);
|
|
|
|
/* gridengine specific flags */
|
|
|
|
opal_argv_append(&argc, &argv, "-inherit");/*run tasks within curr job*/
|
|
|
|
opal_argv_append(&argc, &argv, "-noshell");/*execute w/o wrapping shell*/
|
|
|
|
opal_argv_append(&argc, &argv, "-nostdin");/*suppress input stream stdin*/
|
|
|
|
opal_argv_append(&argc, &argv, "-V"); /*task to have the env as job*/
|
|
|
|
if (mca_pls_gridengine_component.verbose) {
|
|
|
|
opal_argv_append(&argc, &argv, "-verbose");
|
|
|
|
}
|
|
|
|
|
|
|
|
node_name_index1 = argc;
|
|
|
|
opal_argv_append(&argc, &argv, "<template>");
|
|
|
|
|
|
|
|
/* add the orted daemon in command and
|
|
|
|
* force orted in the same ptree as sge_shephard with no daemonize */
|
|
|
|
orted_index = argc;
|
|
|
|
opal_argv_append(&argc, &argv, mca_pls_gridengine_component.orted);
|
|
|
|
opal_argv_append(&argc, &argv, "--no-daemonize");
|
|
|
|
|
2007-04-10 18:23:32 +04:00
|
|
|
/* Add basic orted command line options */
|
|
|
|
orte_pls_base_orted_append_basic_args(&argc, &argv,
|
|
|
|
&proc_name_index,
|
|
|
|
&node_name_index2,
|
|
|
|
jobid_string,
|
|
|
|
(vpid + num_nodes)
|
|
|
|
);
|
2006-08-11 01:46:52 +04:00
|
|
|
|
2006-10-20 20:50:13 +04:00
|
|
|
/* setup environment. The environment is common to all the daemons
|
|
|
|
* so we only need to do this once
|
|
|
|
*/
|
|
|
|
env = opal_argv_copy(environ);
|
|
|
|
param = mca_base_param_environ_variable("seed",NULL,NULL);
|
|
|
|
opal_setenv(param, "0", true, &env);
|
|
|
|
|
|
|
|
/* clean out any MCA component selection directives that
|
|
|
|
* won't work on remote nodes
|
|
|
|
*/
|
|
|
|
orte_pls_base_purge_mca_params(&env);
|
|
|
|
|
|
|
|
if (mca_pls_gridengine_component.debug) {
|
2006-08-11 01:46:52 +04:00
|
|
|
param = opal_argv_join(argv, ' ');
|
|
|
|
if (NULL != param) {
|
|
|
|
opal_output(0, "pls:gridengine: final template argv:");
|
|
|
|
opal_output(0, "pls:gridengine: %s", param);
|
|
|
|
free(param);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Figure out the basenames for the libdir and bindir. There is a
|
|
|
|
lengthy comment about this in pls_rsh_module.c explaining all
|
2006-10-07 19:45:24 +04:00
|
|
|
the rationale for how / why we're doing this.
|
|
|
|
*/
|
2006-08-11 01:46:52 +04:00
|
|
|
|
2007-04-21 04:15:05 +04:00
|
|
|
lib_base = opal_basename(opal_install_dirs.libdir);
|
|
|
|
bin_base = opal_basename(opal_install_dirs.bindir);
|
2006-08-11 01:46:52 +04:00
|
|
|
|
2006-10-07 19:45:24 +04:00
|
|
|
/* See the note about prefix_dir in the orte/mca/pls/slurm/pls_slurm.c
|
|
|
|
* module. Fo here, just note that we must have at least one app_context,
|
|
|
|
* and we take the prefix_dir from that first one.
|
|
|
|
*/
|
|
|
|
prefix_dir = map->apps[0]->prefix_dir;
|
|
|
|
|
2006-10-20 20:50:13 +04:00
|
|
|
/* If we have a prefix, then modify the PATH and
|
|
|
|
LD_LIBRARY_PATH environment variables.
|
|
|
|
*/
|
|
|
|
if (NULL != prefix_dir) {
|
|
|
|
char *oldenv, *newenv;
|
|
|
|
|
|
|
|
/* Reset PATH */
|
|
|
|
newenv = opal_os_path( false, prefix_dir, bin_base, NULL );
|
|
|
|
oldenv = getenv("PATH");
|
|
|
|
if (NULL != oldenv) {
|
|
|
|
char *temp;
|
|
|
|
asprintf(&temp, "%s:%s", newenv, oldenv);
|
|
|
|
free( newenv );
|
|
|
|
newenv = temp;
|
|
|
|
}
|
|
|
|
opal_setenv("PATH", newenv, true, &env);
|
|
|
|
if (mca_pls_gridengine_component.debug) {
|
|
|
|
opal_output(0, "pls:gridengine: reset PATH: %s", newenv);
|
|
|
|
}
|
|
|
|
free(newenv);
|
|
|
|
|
|
|
|
/* Reset LD_LIBRARY_PATH */
|
|
|
|
newenv = opal_os_path( false, prefix_dir, lib_base, NULL );
|
|
|
|
oldenv = getenv("LD_LIBRARY_PATH");
|
|
|
|
if (NULL != oldenv) {
|
|
|
|
char* temp;
|
|
|
|
asprintf(&temp, "%s:%s", newenv, oldenv);
|
|
|
|
free(newenv);
|
|
|
|
newenv = temp;
|
|
|
|
}
|
|
|
|
opal_setenv("LD_LIBRARY_PATH", newenv, true, &env);
|
|
|
|
if (mca_pls_gridengine_component.debug) {
|
|
|
|
opal_output(0, "pls:gridengine: reset LD_LIBRARY_PATH: %s",
|
|
|
|
newenv);
|
|
|
|
}
|
|
|
|
free(newenv);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-10-07 19:45:24 +04:00
|
|
|
* Iterate through the nodes.
|
2006-08-11 01:46:52 +04:00
|
|
|
*/
|
2006-10-07 19:45:24 +04:00
|
|
|
for(n_item = opal_list_get_first(&map->nodes);
|
|
|
|
n_item != opal_list_get_end(&map->nodes);
|
|
|
|
n_item = opal_list_get_next(n_item)) {
|
|
|
|
orte_mapped_node_t* rmaps_node = (orte_mapped_node_t*)n_item;
|
|
|
|
orte_process_name_t* name;
|
|
|
|
pid_t pid;
|
|
|
|
char *exec_path, *orted_path;
|
|
|
|
char **exec_argv;
|
|
|
|
#if 0
|
|
|
|
int remain_slot_cnt;
|
|
|
|
|
|
|
|
/* RHC - I don't believe this code is really necessary any longer.
|
|
|
|
* The mapper correctly accounts for slots that have already been
|
|
|
|
* used. Even if another job starts to run between the time the
|
|
|
|
* mapper maps this job and we get to this point, the new job
|
|
|
|
* will have gone through the mapper and will not overuse the node.
|
|
|
|
* As this code consumes considerable time, I have sliced it out
|
|
|
|
* of the code for now.
|
|
|
|
*
|
|
|
|
* query the registry for the remaining gridengine slot count on
|
|
|
|
* this node, and update the registry for the count for the
|
|
|
|
* current process launch */
|
|
|
|
if (ORTE_SUCCESS != (rc =
|
|
|
|
update_slot_keyval(ras_node, &remain_slot_cnt))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2006-08-11 01:46:52 +04:00
|
|
|
|
2006-10-07 19:45:24 +04:00
|
|
|
/* check for the unlikely scenario, because gridengine ras already
|
|
|
|
* checks for it, but still provide a check there. */
|
|
|
|
if (remain_slot_cnt < 0) {
|
|
|
|
opal_show_help("help-pls-gridengine.txt", "insufficient-pe-slot",
|
|
|
|
true, ras_node->node_name, true);
|
|
|
|
exit(-1); /* exit instead of return ORTE_ERR_OUT_OF_RESOURCE */
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* setup node name */
|
|
|
|
free(argv[node_name_index1]);
|
|
|
|
if (NULL != rmaps_node->username &&
|
|
|
|
0 != strlen (rmaps_node->username)) {
|
|
|
|
asprintf(&argv[node_name_index1], "%s@%s",
|
|
|
|
rmaps_node->username, rmaps_node->nodename);
|
|
|
|
} else {
|
|
|
|
argv[node_name_index1] = strdup(rmaps_node->nodename);
|
|
|
|
}
|
2006-08-11 01:46:52 +04:00
|
|
|
|
2006-10-07 19:45:24 +04:00
|
|
|
free(argv[node_name_index2]);
|
|
|
|
argv[node_name_index2] = strdup(rmaps_node->nodename);
|
2006-08-11 01:46:52 +04:00
|
|
|
|
2006-10-07 19:45:24 +04:00
|
|
|
/* initialize daemons process name */
|
|
|
|
rc = orte_ns.create_process_name(&name, rmaps_node->cell, 0, vpid);
|
|
|
|
if (ORTE_SUCCESS != rc) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2006-08-11 01:46:52 +04:00
|
|
|
|
2006-10-07 19:45:24 +04:00
|
|
|
/* new daemon - setup to record its info */
|
|
|
|
dmn = OBJ_NEW(orte_pls_daemon_info_t);
|
|
|
|
dmn->active_job = jobid;
|
|
|
|
dmn->cell = rmaps_node->cell;
|
|
|
|
dmn->nodename = strdup(rmaps_node->nodename);
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_dss.copy((void**)&(dmn->name), name, ORTE_NAME))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
opal_list_append(&daemons, &dmn->super);
|
|
|
|
|
2006-08-11 01:46:52 +04:00
|
|
|
#ifdef __WINDOWS__
|
2006-10-07 19:45:24 +04:00
|
|
|
printf("Unimplemented feature for windows\n");
|
|
|
|
return ORTE_ERR_NOT_IMPLEMENTED;
|
2006-08-11 01:46:52 +04:00
|
|
|
#else
|
2006-10-07 19:45:24 +04:00
|
|
|
/* fork a child to do qrsh */
|
|
|
|
pid = fork();
|
2006-08-11 01:46:52 +04:00
|
|
|
#endif
|
2006-10-07 19:45:24 +04:00
|
|
|
if (pid < 0) {
|
|
|
|
rc = ORTE_ERR_OUT_OF_RESOURCE;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2006-08-11 01:46:52 +04:00
|
|
|
|
2006-10-07 19:45:24 +04:00
|
|
|
/* child */
|
|
|
|
if (pid == 0) {
|
|
|
|
char* name_string;
|
|
|
|
char* var;
|
|
|
|
long fd, fdmax = sysconf(_SC_OPEN_MAX);
|
2006-08-11 01:46:52 +04:00
|
|
|
|
2006-10-07 19:45:24 +04:00
|
|
|
if (mca_pls_gridengine_component.debug) {
|
|
|
|
opal_output(0, "pls:gridengine: launching on node %s",
|
|
|
|
rmaps_node->nodename);
|
|
|
|
}
|
2006-08-11 01:46:52 +04:00
|
|
|
|
2006-10-07 19:45:24 +04:00
|
|
|
/* setting exec_argv and exec_path for qrsh */
|
|
|
|
exec_argv = &argv[0];
|
|
|
|
|
|
|
|
sge_root = getenv("SGE_ROOT");
|
|
|
|
sge_arch = getenv("ARC");
|
|
|
|
asprintf(&exec_path, "%s/bin/%s/qrsh", sge_root, sge_arch);
|
|
|
|
exec_path = opal_path_findv(exec_path, X_OK, environ, NULL);
|
|
|
|
if (NULL == exec_path) {
|
|
|
|
opal_show_help("help-pls-gridengine.txt", "bad-qrsh-path",
|
|
|
|
true, exec_path, sge_root, sge_arch);
|
|
|
|
return ORTE_ERR_NOT_FOUND;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mca_pls_gridengine_component.debug) {
|
|
|
|
opal_output(0, "pls:gridengine: exec_argv[0]=%s, exec_path=%s",
|
|
|
|
exec_argv[0], exec_path);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* setting orted_path for orted */
|
|
|
|
orted_path = opal_path_findv(exec_argv[orted_index], 0, environ, NULL);
|
2006-08-11 01:46:52 +04:00
|
|
|
|
2006-10-07 19:45:24 +04:00
|
|
|
if (NULL == orted_path && NULL == prefix_dir) {
|
|
|
|
rc = orte_pls_gridengine_fill_orted_path(&orted_path);
|
|
|
|
if (ORTE_SUCCESS != rc) {
|
|
|
|
return rc;
|
2006-08-11 01:46:52 +04:00
|
|
|
}
|
2006-10-07 19:45:24 +04:00
|
|
|
} else {
|
|
|
|
if (NULL != prefix_dir) {
|
|
|
|
orted_path = opal_os_path( false, prefix_dir, bin_base, "orted", NULL );
|
2006-08-11 01:46:52 +04:00
|
|
|
}
|
2006-10-07 19:45:24 +04:00
|
|
|
/* If we yet did not fill up the orted_path, do so now */
|
|
|
|
if (NULL == orted_path) {
|
2006-08-11 01:46:52 +04:00
|
|
|
rc = orte_pls_gridengine_fill_orted_path(&orted_path);
|
|
|
|
if (ORTE_SUCCESS != rc) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
}
|
2006-10-07 19:45:24 +04:00
|
|
|
}
|
|
|
|
asprintf(&argv[orted_index], orted_path);
|
|
|
|
if (mca_pls_gridengine_component.debug) {
|
|
|
|
opal_output(0, "pls:gridengine: orted_path=%s", orted_path);
|
|
|
|
}
|
|
|
|
|
|
|
|
var = getenv("HOME");
|
|
|
|
if (NULL != var) {
|
|
|
|
if (mca_pls_gridengine_component.debug) {
|
|
|
|
opal_output(0, "pls:gridengine: changing to directory %s",
|
|
|
|
var);
|
2006-08-11 01:46:52 +04:00
|
|
|
}
|
2006-10-07 19:45:24 +04:00
|
|
|
/* Ignore errors -- what are we going to do?
|
|
|
|
(and we ignore errors on the remote nodes
|
|
|
|
in the fork pls, so this is consistent) */
|
|
|
|
chdir(var);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* setup process name */
|
|
|
|
rc = orte_ns.get_proc_name_string(&name_string, name);
|
|
|
|
if (ORTE_SUCCESS != rc) {
|
|
|
|
opal_output(0, "pls:gridengine: unable to create process name");
|
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
free(argv[proc_name_index]);
|
|
|
|
argv[proc_name_index] = strdup(name_string);
|
|
|
|
|
|
|
|
if (!mca_pls_gridengine_component.debug) {
|
|
|
|
/* setup stdin */
|
|
|
|
int fd = open("/dev/null", O_RDWR, 0);
|
|
|
|
dup2(fd, 0);
|
|
|
|
close(fd);
|
|
|
|
}
|
2006-08-11 01:46:52 +04:00
|
|
|
|
2006-10-07 19:45:24 +04:00
|
|
|
/* close all file descriptors w/ exception of stdin/stdout/stderr */
|
|
|
|
for(fd=3; fd<fdmax; fd++)
|
|
|
|
close(fd);
|
2006-08-11 01:46:52 +04:00
|
|
|
|
2006-10-07 19:45:24 +04:00
|
|
|
/* Set signal handlers back to the default. Do this close
|
|
|
|
to the execve() because the event library may (and likely
|
|
|
|
will) reset them. If we don't do this, the event
|
|
|
|
library may have left some set that, at least on some
|
|
|
|
OS's, don't get reset via fork() or exec(). Hence, the
|
|
|
|
orted could be unkillable (for example). */
|
|
|
|
|
|
|
|
set_handler_default(SIGTERM);
|
|
|
|
set_handler_default(SIGINT);
|
2006-08-11 01:46:52 +04:00
|
|
|
#ifndef __WINDOWS__
|
2006-10-07 19:45:24 +04:00
|
|
|
set_handler_default(SIGHUP);
|
|
|
|
set_handler_default(SIGPIPE);
|
2006-08-11 01:46:52 +04:00
|
|
|
#endif
|
2006-10-07 19:45:24 +04:00
|
|
|
set_handler_default(SIGCHLD);
|
|
|
|
|
|
|
|
/* Unblock all signals, for many of the same reasons that
|
|
|
|
we set the default handlers, above. This is noticable
|
|
|
|
on Linux where the event library blocks SIGTERM, but we
|
|
|
|
don't want that blocked by the orted (or, more
|
|
|
|
specifically, we don't want it to be blocked by the
|
|
|
|
orted and then inherited by the ORTE processes that it
|
|
|
|
forks, making them unkillable by SIGTERM). */
|
2006-08-11 01:46:52 +04:00
|
|
|
#ifndef __WINDOWS__
|
2006-10-07 19:45:24 +04:00
|
|
|
sigprocmask(0, 0, &sigs);
|
|
|
|
sigprocmask(SIG_UNBLOCK, &sigs, 0);
|
2006-08-11 01:46:52 +04:00
|
|
|
#endif
|
2006-10-07 19:45:24 +04:00
|
|
|
|
|
|
|
/* exec the daemon */
|
|
|
|
if (mca_pls_gridengine_component.debug) {
|
|
|
|
param = opal_argv_join(exec_argv, ' ');
|
|
|
|
if (NULL != param) {
|
|
|
|
opal_output(0, "pls:gridengine: executing: %s", param);
|
|
|
|
free(param);
|
2006-09-15 01:29:51 +04:00
|
|
|
}
|
2006-08-11 01:46:52 +04:00
|
|
|
}
|
2006-10-07 19:45:24 +04:00
|
|
|
execve(exec_path, exec_argv, env);
|
|
|
|
opal_output(0, "pls:gridengine: execve failed with errno=%d\n", errno);
|
|
|
|
exit(-1);
|
|
|
|
} else { /* parent */
|
|
|
|
if (mca_pls_gridengine_component.debug) {
|
|
|
|
opal_output(0, "pls:gridengine: parent");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* setup callback on sigchild - wait until setup above is complete
|
|
|
|
* as the callback can occur in the call to orte_wait_cb
|
|
|
|
*/
|
|
|
|
orte_wait_cb(pid, orte_pls_gridengine_wait_daemon, dmn);
|
|
|
|
|
|
|
|
vpid++;
|
2006-08-11 01:46:52 +04:00
|
|
|
}
|
2006-10-07 19:45:24 +04:00
|
|
|
free(name);
|
2006-08-11 01:46:52 +04:00
|
|
|
}
|
2006-09-15 01:29:51 +04:00
|
|
|
|
|
|
|
/* all done, so store the daemon info on the registry */
|
2006-10-07 19:45:24 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_pls_base_store_active_daemons(&daemons))) {
|
2006-09-15 01:29:51 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
|
2006-08-11 01:46:52 +04:00
|
|
|
|
|
|
|
cleanup:
|
2006-10-08 02:44:00 +04:00
|
|
|
OBJ_RELEASE(map);
|
|
|
|
|
2006-08-11 01:46:52 +04:00
|
|
|
if (NULL != lib_base) {
|
|
|
|
free(lib_base);
|
|
|
|
}
|
|
|
|
if (NULL != bin_base) {
|
|
|
|
free(bin_base);
|
|
|
|
}
|
|
|
|
|
|
|
|
free(jobid_string); /* done with this variable */
|
|
|
|
opal_argv_free(argv);
|
2006-10-20 20:50:13 +04:00
|
|
|
opal_argv_free(env);
|
2006-08-11 01:46:52 +04:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2006-10-07 19:45:24 +04:00
|
|
|
#if 0
|
2006-08-11 01:46:52 +04:00
|
|
|
/**
|
|
|
|
* Query the registry for the gridengine slot count, and update it
|
|
|
|
*/
|
|
|
|
static int update_slot_keyval(orte_ras_node_t* ras_node, int* slot_cnt)
|
|
|
|
{
|
|
|
|
int rc, *iptr, ivalue;
|
2006-08-15 23:54:10 +04:00
|
|
|
orte_std_cntr_t num_tokens, i, get_cnt;
|
2006-08-11 01:46:52 +04:00
|
|
|
orte_gpr_value_t** get_values;
|
|
|
|
char **tokens;
|
|
|
|
char *get_keys[] = {"orte-gridengine-slot-cnt", NULL};
|
2006-08-11 19:32:17 +04:00
|
|
|
orte_gpr_keyval_t *condition;
|
2006-08-11 01:46:52 +04:00
|
|
|
|
|
|
|
/* get token */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_schema.get_node_tokens(&tokens,
|
|
|
|
&num_tokens, ras_node->node_cellid, ras_node->node_name))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* setup condition/filter for query - return only processes that
|
|
|
|
* are assigned to the specified node name
|
|
|
|
*/
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_gpr.create_keyval(&condition, ORTE_NODE_NAME_KEY, ORTE_STRING, (void*)ras_node->node_name))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
2006-08-11 19:32:17 +04:00
|
|
|
return rc;
|
2006-08-11 01:46:52 +04:00
|
|
|
}
|
|
|
|
rc = orte_gpr.get_conditional(
|
|
|
|
ORTE_GPR_KEYS_OR|ORTE_GPR_TOKENS_OR,
|
|
|
|
ORTE_NODE_SEGMENT,
|
|
|
|
tokens,
|
|
|
|
get_keys,
|
|
|
|
1,
|
|
|
|
&condition,
|
|
|
|
&get_cnt,
|
|
|
|
&get_values);
|
|
|
|
if(ORTE_SUCCESS != rc) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* parse the response */
|
|
|
|
for(i=0; i<get_cnt; i++) {
|
|
|
|
orte_gpr_value_t* value = get_values[i];
|
2006-08-15 23:54:10 +04:00
|
|
|
orte_std_cntr_t k;
|
2006-08-11 01:46:52 +04:00
|
|
|
|
|
|
|
/* looking in each GPR container for the keyval */
|
|
|
|
for(k=0; k < value->cnt; k++) {
|
|
|
|
orte_gpr_keyval_t* keyval = value->keyvals[k];
|
2006-08-11 19:32:17 +04:00
|
|
|
orte_data_value_t *put_value;
|
2006-08-11 01:46:52 +04:00
|
|
|
|
|
|
|
if(strcmp(keyval->key, "orte-gridengine-slot-cnt") == 0) {
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_dss.get(
|
|
|
|
(void**)&iptr, keyval->value, ORTE_INT))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
*slot_cnt = *iptr;
|
|
|
|
free(iptr);
|
|
|
|
if (mca_pls_gridengine_component.debug) {
|
|
|
|
opal_output(0, "pls:gridengine: %s: registry shows PE slots=%d",
|
|
|
|
ras_node->node_name, *slot_cnt);
|
|
|
|
}
|
|
|
|
|
|
|
|
(*slot_cnt)--; /* account for the current launch */
|
|
|
|
|
|
|
|
if (mca_pls_gridengine_component.debug) {
|
|
|
|
opal_output(0,"pls:gridengine: %s: decrementing, PE slots=%d",
|
|
|
|
ras_node->node_name, *slot_cnt);
|
|
|
|
}
|
|
|
|
|
|
|
|
put_value = OBJ_NEW(orte_data_value_t);
|
|
|
|
if (NULL == put_value) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
|
|
|
return ORTE_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
|
|
|
ivalue = *slot_cnt;
|
|
|
|
put_value->type = ORTE_INT;
|
|
|
|
put_value->data = &ivalue;
|
|
|
|
|
|
|
|
/* put the keyvalue in the segment */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_gpr.put_1(
|
|
|
|
ORTE_GPR_OVERWRITE|ORTE_GPR_TOKENS_XAND,
|
|
|
|
ORTE_NODE_SEGMENT,
|
|
|
|
tokens,
|
|
|
|
"orte-gridengine-slot-cnt",
|
|
|
|
put_value
|
|
|
|
))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for(i=1; i<get_cnt; i++)
|
|
|
|
OBJ_RELEASE(get_values[i]);
|
|
|
|
if (NULL != get_values) free(get_values);
|
|
|
|
opal_argv_free(tokens);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
2006-10-07 19:45:24 +04:00
|
|
|
#endif
|
2006-08-11 01:46:52 +04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Query the registry for all nodes participating in the job
|
|
|
|
*/
|
2007-01-25 17:17:44 +03:00
|
|
|
int orte_pls_gridengine_terminate_job(orte_jobid_t jobid, struct timeval *timeout, opal_list_t *attrs)
|
2006-08-11 01:46:52 +04:00
|
|
|
{
|
2006-09-15 01:29:51 +04:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* order them to kill their local procs for this job */
|
2007-04-24 05:58:40 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_pls_base_orted_kill_local_procs(jobid, timeout, attrs))) {
|
2006-09-15 01:29:51 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
2006-08-11 01:46:52 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int orte_pls_gridengine_terminate_proc(const orte_process_name_t* proc)
|
|
|
|
{
|
2006-09-15 01:29:51 +04:00
|
|
|
return ORTE_ERR_NOT_IMPLEMENTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Terminate the orteds for a given job
|
|
|
|
*/
|
2007-01-25 17:17:44 +03:00
|
|
|
int orte_pls_gridengine_terminate_orteds(orte_jobid_t jobid, struct timeval *timeout, opal_list_t *attrs)
|
2006-09-15 01:29:51 +04:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* now tell them to die! */
|
2007-04-24 05:58:40 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_pls_base_orted_exit(timeout, attrs))) {
|
2006-09-15 01:29:51 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
2006-08-11 01:46:52 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Signal all processes associated with this job
|
|
|
|
*/
|
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
|
|
|
int orte_pls_gridengine_signal_job(orte_jobid_t jobid, int32_t signal, opal_list_t *attrs)
|
2006-08-11 01:46:52 +04:00
|
|
|
{
|
2006-09-15 01:29:51 +04:00
|
|
|
int rc;
|
2007-04-24 05:58:40 +04:00
|
|
|
|
2006-09-15 01:29:51 +04:00
|
|
|
/* order them to pass this signal to their local procs */
|
2007-04-24 05:58:40 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_pls_base_orted_signal_local_procs(jobid, signal, attrs))) {
|
2006-09-15 01:29:51 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
2006-08-11 01:46:52 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Signal a specific process.
|
|
|
|
*/
|
|
|
|
int orte_pls_gridengine_signal_proc(const orte_process_name_t* proc, int32_t signal)
|
|
|
|
{
|
2006-09-15 01:29:51 +04:00
|
|
|
return ORTE_ERR_NOT_IMPLEMENTED;
|
2006-08-11 01:46:52 +04:00
|
|
|
}
|
|
|
|
|
2007-01-25 17:17:44 +03:00
|
|
|
/**
|
|
|
|
* Cancel an operation involving comm to an orted
|
|
|
|
*/
|
|
|
|
int orte_pls_gridengine_cancel_operation(void)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_pls_base_orted_cancel_operation())) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-08-11 01:46:52 +04:00
|
|
|
/**
|
|
|
|
* Finalize
|
|
|
|
*/
|
|
|
|
int orte_pls_gridengine_finalize(void)
|
|
|
|
{
|
2006-09-15 01:29:51 +04:00
|
|
|
int rc;
|
|
|
|
|
2006-08-11 01:46:52 +04:00
|
|
|
/* cleanup any pending recvs */
|
2006-09-15 01:29:51 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_pls_base_comm_stop())) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
2006-08-11 01:46:52 +04:00
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Set signal handler
|
|
|
|
*/
|
|
|
|
static void set_handler_default(int sig)
|
|
|
|
{
|
|
|
|
#ifndef __WINDOWS__
|
|
|
|
struct sigaction act;
|
|
|
|
act.sa_handler = SIG_DFL;
|
|
|
|
act.sa_flags = 0;
|
|
|
|
sigemptyset(&act.sa_mask);
|
|
|
|
|
|
|
|
sigaction(sig, &act, (struct sigaction *)0);
|
|
|
|
#endif
|
|
|
|
}
|