1
1
openmpi/orte/mca/plm/rsh/plm_rsh_component.c

342 строки
12 KiB
C
Исходник Обычный вид История

/*
* Copyright (c) 2004-2008 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007-2011 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2008-2009 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2010 Oracle and/or its affiliates. All rights
* reserved.
* Copyright (c) 2009 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2011 IBM Corporation. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
* These symbols are in a file by themselves to provide nice linker
* semantics. Since linkers generally pull in symbols by object
* files, keeping these symbols as the only symbols in this file
* prevents utility programs such as "ompi_info" from having to import
* entire components just to query their version and parameters.
*/
#include "orte_config.h"
#include "orte/constants.h"
#include <stdlib.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <ctype.h>
#include "opal/util/opal_environ.h"
#include "opal/util/output.h"
#include "opal/util/argv.h"
#include "opal/util/path.h"
#include "opal/mca/base/mca_base_param.h"
#include "orte/util/name_fns.h"
#include "orte/runtime/orte_globals.h"
#include "orte/util/show_help.h"
#include "orte/mca/plm/plm.h"
#include "orte/mca/plm/base/plm_private.h"
#include "orte/mca/plm/rsh/plm_rsh.h"
/*
* Public string showing the plm ompi_rsh component version number
*/
const char *mca_plm_rsh_component_version_string =
"Open MPI rsh plm MCA component version " ORTE_VERSION;
static int rsh_component_open(void);
static int rsh_component_query(mca_base_module_t **module, int *priority);
static int rsh_component_close(void);
static int rsh_launch_agent_lookup(const char *agent_list, char *path);
/*
* Instantiate the public struct with all of our public information
* and pointers to our public functions in it
*/
orte_plm_rsh_component_t mca_plm_rsh_component = {
{
/* First, the mca_component_t struct containing meta information
about the component itself */
{
ORTE_PLM_BASE_VERSION_2_0_0,
/* Component name and version */
"rsh",
ORTE_MAJOR_VERSION,
ORTE_MINOR_VERSION,
ORTE_RELEASE_VERSION,
/* Component open and close functions */
rsh_component_open,
rsh_component_close,
rsh_component_query
},
{
/* The component is checkpoint ready */
MCA_BASE_METADATA_PARAM_CHECKPOINT
}
}
};
static int rsh_component_open(void)
{
int tmp, value;
mca_base_component_t *c = &mca_plm_rsh_component.super.base_version;
char *ctmp, **cargv;
/* initialize globals */
mca_plm_rsh_component.using_qrsh = false;
mca_plm_rsh_component.using_llspawn = false;
/* lookup parameters */
mca_base_param_reg_int(c, "num_concurrent",
"How many plm_rsh_agent instances to invoke concurrently (must be > 0)",
false, false, 128, &tmp);
if (tmp <= 0) {
This commit represents a bunch of work on a Mercurial side branch. As such, the commit message back to the master SVN repository is fairly long. = ORTE Job-Level Output Messages = Add two new interfaces that should be used for all new code throughout the ORTE and OMPI layers (we already make the search-and-replace on the existing ORTE / OMPI layers): * orte_output(): (and corresponding friends ORTE_OUTPUT, orte_output_verbose, etc.) This function sends the output directly to the HNP for processing as part of a job-specific output channel. It supports all the same outputs as opal_output() (syslog, file, stdout, stderr), but for stdout/stderr, the output is sent to the HNP for processing and output. More on this below. * orte_show_help(): This function is a drop-in-replacement for opal_show_help(), with two differences in functionality: 1. the rendered text help message output is sent to the HNP for display (rather than outputting directly into the process' stderr stream) 1. the HNP detects duplicate help messages and does not display them (so that you don't see the same error message N times, once from each of your N MPI processes); instead, it counts "new" instances of the help message and displays a message every ~5 seconds when there are new ones ("I got X new copies of the help message...") opal_show_help and opal_output still exist, but they only output in the current process. The intent for the new orte_* functions is that they can apply job-level intelligence to the output. As such, we recommend that all new ORTE and OMPI code use the new orte_* functions, not thei opal_* functions. === New code === For ORTE and OMPI programmers, here's what you need to do differently in new code: * Do not include opal/util/show_help.h or opal/util/output.h. Instead, include orte/util/output.h (this one header file has declarations for both the orte_output() series of functions and orte_show_help()). * Effectively s/opal_output/orte_output/gi throughout your code. Note that orte_output_open() takes a slightly different argument list (as a way to pass data to the filtering stream -- see below), so you if explicitly call opal_output_open(), you'll need to slightly adapt to the new signature of orte_output_open(). * Literally s/opal_show_help/orte_show_help/. The function signature is identical. === Notes === * orte_output'ing to stream 0 will do similar to what opal_output'ing did, so leaving a hard-coded "0" as the first argument is safe. * For systems that do not use ORTE's RML or the HNP, the effect of orte_output_* and orte_show_help will be identical to their opal counterparts (the additional information passed to orte_output_open() will be lost!). Indeed, the orte_* functions simply become trivial wrappers to their opal_* counterparts. Note that we have not tested this; the code is simple but it is quite possible that we mucked something up. = Filter Framework = Messages sent view the new orte_* functions described above and messages output via the IOF on the HNP will now optionally be passed through a new "filter" framework before being output to stdout/stderr. The "filter" OPAL MCA framework is intended to allow preprocessing to messages before they are sent to their final destinations. The first component that was written in the filter framework was to create an XML stream, segregating all the messages into different XML tags, etc. This will allow 3rd party tools to read the stdout/stderr from the HNP and be able to know exactly what each text message is (e.g., a help message, another OMPI infrastructure message, stdout from the user process, stderr from the user process, etc.). Filtering is not active by default. Filter components must be specifically requested, such as: {{{ $ mpirun --mca filter xml ... }}} There can only be one filter component active. = New MCA Parameters = The new functionality described above introduces two new MCA parameters: * '''orte_base_help_aggregate''': Defaults to 1 (true), meaning that help messages will be aggregated, as described above. If set to 0, all help messages will be displayed, even if they are duplicates (i.e., the original behavior). * '''orte_base_show_output_recursions''': An MCA parameter to help debug one of the known issues, described below. It is likely that this MCA parameter will disappear before v1.3 final. = Known Issues = * The XML filter component is not complete. The current output from this component is preliminary and not real XML. A bit more work needs to be done to configure.m4 search for an appropriate XML library/link it in/use it at run time. * There are possible recursion loops in the orte_output() and orte_show_help() functions -- e.g., if RML send calls orte_output() or orte_show_help(). We have some ideas how to fix these, but figured that it was ok to commit before feature freeze with known issues. The code currently contains sub-optimal workarounds so that this will not be a problem, but it would be good to actually solve the problem rather than have hackish workarounds before v1.3 final. This commit was SVN r18434.
2008-05-13 20:00:55 +00:00
orte_show_help("help-plm-rsh.txt", "concurrency-less-than-zero",
true, tmp);
tmp = 1;
}
mca_plm_rsh_component.num_concurrent = tmp;
mca_base_param_reg_int(c, "force_rsh",
"Force the launcher to always use rsh",
false, false, false, &tmp);
mca_plm_rsh_component.force_rsh = OPAL_INT_TO_BOOL(tmp);
mca_base_param_reg_int(c, "disable_qrsh",
"Disable the launcher to use qrsh when under the Grid Engine parallel environment",
false, false, false, &tmp);
mca_plm_rsh_component.disable_qrsh = OPAL_INT_TO_BOOL(tmp);
mca_base_param_reg_int(c, "daemonize_qrsh",
"Daemonize the orted under the Grid Engine parallel environment",
false, false, false, &tmp);
mca_plm_rsh_component.daemonize_qrsh = OPAL_INT_TO_BOOL(tmp);
mca_base_param_reg_int(c, "disable_llspawn",
"Disable the use of llspawn when under the LoadLeveler environment",
false, false, false, &tmp);
mca_plm_rsh_component.disable_llspawn = OPAL_INT_TO_BOOL(tmp);
mca_base_param_reg_int(c, "daemonize_llspawn",
"Daemonize the orted when under the LoadLeveler environment",
false, false, false, &tmp);
mca_plm_rsh_component.daemonize_llspawn = OPAL_INT_TO_BOOL(tmp);
mca_base_param_reg_int(c, "priority",
"Priority of the rsh plm component",
false, false, 10,
&mca_plm_rsh_component.priority);
mca_base_param_reg_string(c, "delay",
"Delay between invocations of the remote agent (sec[:usec])",
false, false, NULL,
&ctmp);
if (NULL != ctmp) {
cargv = opal_argv_split(ctmp, ':');
mca_plm_rsh_component.delay.tv_sec = strtol(cargv[0], NULL, 10);
if (1 < opal_argv_count(cargv)) {
mca_plm_rsh_component.delay.tv_nsec = 1000 * strtol(cargv[1], NULL, 10);
}
opal_argv_free(cargv);
free(ctmp);
}
mca_base_param_reg_int(c, "no_tree_spawn",
"If set to 1, do not launch via a tree-based topology",
false, false, 0, &tmp);
if (0 == tmp) {
mca_plm_rsh_component.tree_spawn = true;
} else {
mca_plm_rsh_component.tree_spawn = false;
}
/* local rsh/ssh launch agent */
tmp = mca_base_param_reg_string(c, "agent",
"The command used to launch executables on remote nodes (typically either \"ssh\" or \"rsh\")",
false, false, "ssh : rsh", NULL);
mca_base_param_reg_syn_name(tmp, "pls", "rsh_agent", true);
mca_base_param_reg_syn_name(tmp, "orte", "rsh_agent", true);
mca_base_param_lookup_string(tmp, &mca_plm_rsh_component.agent);
tmp = mca_base_param_reg_int_name("orte", "assume_same_shell",
"If set to 1, assume that the shell on the remote node is the same as the shell on the local node. Otherwise, probe for what the remote shell [default: 1]",
false, false, 1, NULL);
mca_base_param_reg_syn_name(tmp, "plm", "rsh_assume_same_shell", true);
mca_base_param_lookup_int(tmp, &value);
mca_plm_rsh_component.assume_same_shell = OPAL_INT_TO_BOOL(value);
mca_base_param_reg_int(c, "pass_environ_mca_params",
"If set to 0, do not include mca params from the environment on the orted cmd line",
false, false, 1, &tmp);
mca_plm_rsh_component.pass_environ_mca_params = OPAL_INT_TO_BOOL(tmp);
return ORTE_SUCCESS;
}
static int rsh_component_query(mca_base_module_t **module, int *priority)
{
char *tmp;
/* Check if we are under Grid Engine parallel environment by looking at several
* environment variables. If so, setup the path and argv[0]. */
if (!mca_plm_rsh_component.disable_qrsh &&
NULL != getenv("SGE_ROOT") && NULL != getenv("ARC") &&
NULL != getenv("PE_HOSTFILE") && NULL != getenv("JOB_ID")) {
/* setup the search path for qrsh */
asprintf(&tmp, "%s/bin/%s", getenv("SGE_ROOT"), getenv("ARC"));
/* see if the agent is available */
if (ORTE_SUCCESS != rsh_launch_agent_lookup("qrsh", tmp)) {
/* can't be SGE */
opal_output_verbose(1, orte_plm_globals.output,
"%s plm:rsh: unable to be used: SGE indicated but cannot find path "
"or execution permissions not set for launching agent qrsh",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME));
free(tmp);
*module = NULL;
return ORTE_ERROR;
}
free(tmp);
mca_plm_rsh_component.using_qrsh = true;
/* no tree spawn allowed under qrsh */
mca_plm_rsh_component.tree_spawn = false;
goto success;
} else if (!mca_plm_rsh_component.disable_llspawn &&
NULL != getenv("LOADL_STEP_ID")) {
/* We are running as a LOADLEVELER job.
Search for llspawn in the users PATH */
if (ORTE_SUCCESS != rsh_launch_agent_lookup("llspawn", NULL)) {
opal_output_verbose(1, orte_plm_globals.output,
"%s plm:rsh: unable to be used: LoadLeveler "
"indicated but cannot find path or execution "
"permissions not set for launching agent llspawn",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME));
*module = NULL;
return ORTE_ERROR;
}
mca_plm_rsh_component.using_llspawn = true;
goto success;
}
/* if this isn't an Grid Engine or LoadLeveler environment,
see if MCA-specified agent (default: ssh:rsh) is available */
if (ORTE_SUCCESS != rsh_launch_agent_lookup(NULL, NULL)) {
/* this isn't an error - we just cannot be selected */
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
"%s plm:rsh: unable to be used: cannot find path "
"for launching agent \"%s\"\n",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
mca_plm_rsh_component.agent));
*module = NULL;
return ORTE_ERROR;
}
success:
/* we are good - make ourselves available */
*priority = mca_plm_rsh_component.priority;
*module = (mca_base_module_t *) &orte_plm_rsh_module;
return ORTE_SUCCESS;
}
static int rsh_component_close(void)
{
return ORTE_SUCCESS;
}
/*
* Take a colon-delimited list of agents and locate the first one that
* we are able to find in the PATH. Split that one into argv and
* return it. If nothing found, then return NULL.
*/
char **orte_plm_rsh_search(const char* agent_list, const char *path)
{
int i, j;
char *line, **lines;
char **tokens, *tmp;
char cwd[OPAL_PATH_MAX];
if (NULL == path) {
getcwd(cwd, OPAL_PATH_MAX);
} else {
strncpy(cwd, path, OPAL_PATH_MAX);
}
if (NULL == agent_list) {
lines = opal_argv_split(mca_plm_rsh_component.agent, ':');
} else {
lines = opal_argv_split(agent_list, ':');
}
for (i = 0; NULL != lines[i]; ++i) {
line = lines[i];
/* Trim whitespace at the beginning and end of the line */
for (j = 0; '\0' != line[j] && isspace(line[j]); ++line) {
continue;
}
for (j = strlen(line) - 2; j > 0 && isspace(line[j]); ++j) {
line[j] = '\0';
}
if (strlen(line) <= 0) {
continue;
}
/* Split it */
tokens = opal_argv_split(line, ' ');
/* Look for the first token in the PATH */
tmp = opal_path_findv(tokens[0], X_OK, environ, cwd);
if (NULL != tmp) {
free(tokens[0]);
tokens[0] = tmp;
opal_argv_free(lines);
return tokens;
}
/* Didn't find it */
opal_argv_free(tokens);
}
/* Doh -- didn't find anything */
opal_argv_free(lines);
return NULL;
}
static int rsh_launch_agent_lookup(const char *agent_list, char *path)
{
char **tmp;
OPAL_OUTPUT_VERBOSE((5, orte_plm_globals.output,
"%s plm:rsh_lookup on agent %s path %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(NULL == agent_list) ? mca_plm_rsh_component.agent : agent_list,
(NULL == path) ? "NULL" : path));
if (NULL == (tmp = orte_plm_rsh_search(agent_list, path))) {
return ORTE_ERR_NOT_FOUND;
}
/* if we got here, then one of the given agents could be found */
opal_argv_free(tmp);
return ORTE_SUCCESS;
}