2008-02-28 04:57:57 +03:00
|
|
|
/*
|
2010-03-13 02:57:50 +03:00
|
|
|
* Copyright (c) 2004-2010 The Trustees of Indiana University and Indiana
|
2008-02-28 04:57:57 +03:00
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
2009-06-24 00:43:45 +04:00
|
|
|
* Copyright (c) 2004-2009 The University of Tennessee and The University
|
2008-02-28 04:57:57 +03:00
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "orte_config.h"
|
|
|
|
#include "orte/constants.h"
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#ifdef HAVE_FCNTL_H
|
|
|
|
#include <fcntl.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_UNISTD_H
|
|
|
|
#include <unistd.h>
|
|
|
|
#endif
|
2009-05-16 08:15:55 +04:00
|
|
|
#include <stdlib.h>
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
#include "opal/event/event.h"
|
|
|
|
#include "opal/runtime/opal.h"
|
Per the RFC, extend the current use of the ompi_proc_t flags field (without changing the field itself).
The prior ompi_proc_t structure had a uint8_t flag field in it, where only one
bit was used to flag that a proc was "local". In that context, "local" was
constrained to mean "local to this node".
This commit provides a greater degree of granularity on the term "local", to include tests
to see if the proc is on the same socket, PC board, node, switch, CU (computing
unit), and cluster.
Add #define's to designate which bits stand for which local condition. This
was added to the OPAL layer to avoid conflicting with the proposed movement of
the BTLs. To make it easier to use, a set of macros have been defined - e.g.,
OPAL_PROC_ON_LOCAL_SOCKET - that test the specific bit. These can be used in
the code base to clearly indicate which sense of locality is being considered.
All locations in the code base that looked at the current proc_t field have
been changed to use the new macros.
Also modify the orte_ess modules so that each returns a uint8_t (to match the
ompi_proc_t field) that contains a complete description of the locality of this
proc. Obviously, not all environments will be capable of providing such detailed
info. Thus, getting a "false" from a test for "on_local_socket" may simply
indicate a lack of knowledge.
This commit was SVN r20496.
2009-02-10 05:20:16 +03:00
|
|
|
#include "opal/mca/paffinity/paffinity.h"
|
2008-02-28 04:57:57 +03:00
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
#include "orte/util/show_help.h"
|
2008-02-28 04:57:57 +03:00
|
|
|
#include "opal/mca/mca.h"
|
|
|
|
#include "opal/mca/base/base.h"
|
|
|
|
#include "opal/mca/base/mca_base_param.h"
|
2009-02-14 05:26:12 +03:00
|
|
|
#include "opal/util/output.h"
|
2010-05-18 03:08:56 +04:00
|
|
|
#include "opal/util/opal_sos.h"
|
2008-02-28 04:57:57 +03:00
|
|
|
#include "opal/util/malloc.h"
|
2009-05-16 08:15:55 +04:00
|
|
|
#include "opal/util/argv.h"
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
#include "orte/mca/rml/base/base.h"
|
2009-02-14 05:26:12 +03:00
|
|
|
#include "orte/mca/rml/rml_types.h"
|
2008-02-28 04:57:57 +03:00
|
|
|
#include "orte/mca/routed/base/base.h"
|
|
|
|
#include "orte/mca/routed/routed.h"
|
|
|
|
#include "orte/mca/errmgr/base/base.h"
|
|
|
|
#include "orte/mca/grpcomm/base/base.h"
|
|
|
|
#include "orte/mca/iof/base/base.h"
|
|
|
|
#include "orte/mca/ess/base/base.h"
|
|
|
|
#include "orte/mca/ess/ess.h"
|
|
|
|
#include "orte/mca/ras/base/base.h"
|
|
|
|
#include "orte/mca/plm/base/base.h"
|
|
|
|
|
|
|
|
#include "orte/mca/rmaps/base/base.h"
|
2010-03-13 02:57:50 +03:00
|
|
|
#if OPAL_ENABLE_FT_CR == 1
|
2008-02-28 04:57:57 +03:00
|
|
|
#include "orte/mca/snapc/base/base.h"
|
|
|
|
#endif
|
|
|
|
#include "orte/mca/filem/base/base.h"
|
|
|
|
#include "orte/util/proc_info.h"
|
|
|
|
#include "orte/util/session_dir.h"
|
|
|
|
#include "orte/util/name_fns.h"
|
2008-11-01 00:10:00 +03:00
|
|
|
#include "orte/util/nidmap.h"
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
#include "orte/runtime/runtime.h"
|
|
|
|
#include "orte/runtime/orte_wait.h"
|
|
|
|
#include "orte/runtime/orte_globals.h"
|
|
|
|
|
|
|
|
#include "orte/runtime/orte_cr.h"
|
|
|
|
#include "orte/mca/ess/ess.h"
|
|
|
|
#include "orte/mca/ess/base/base.h"
|
|
|
|
#include "orte/mca/ess/env/ess_env.h"
|
|
|
|
|
|
|
|
static int env_set_name(void);
|
|
|
|
|
2009-05-04 15:07:40 +04:00
|
|
|
static int rte_init(void);
|
2008-02-28 04:57:57 +03:00
|
|
|
static int rte_finalize(void);
|
Per the RFC, extend the current use of the ompi_proc_t flags field (without changing the field itself).
The prior ompi_proc_t structure had a uint8_t flag field in it, where only one
bit was used to flag that a proc was "local". In that context, "local" was
constrained to mean "local to this node".
This commit provides a greater degree of granularity on the term "local", to include tests
to see if the proc is on the same socket, PC board, node, switch, CU (computing
unit), and cluster.
Add #define's to designate which bits stand for which local condition. This
was added to the OPAL layer to avoid conflicting with the proposed movement of
the BTLs. To make it easier to use, a set of macros have been defined - e.g.,
OPAL_PROC_ON_LOCAL_SOCKET - that test the specific bit. These can be used in
the code base to clearly indicate which sense of locality is being considered.
All locations in the code base that looked at the current proc_t field have
been changed to use the new macros.
Also modify the orte_ess modules so that each returns a uint8_t (to match the
ompi_proc_t field) that contains a complete description of the locality of this
proc. Obviously, not all environments will be capable of providing such detailed
info. Thus, getting a "false" from a test for "on_local_socket" may simply
indicate a lack of knowledge.
This commit was SVN r20496.
2009-02-10 05:20:16 +03:00
|
|
|
static uint8_t proc_get_locality(orte_process_name_t *proc);
|
2008-11-01 00:10:00 +03:00
|
|
|
static orte_vpid_t proc_get_daemon(orte_process_name_t *proc);
|
2008-04-30 23:49:53 +04:00
|
|
|
static char* proc_get_hostname(orte_process_name_t *proc);
|
2008-09-25 17:39:08 +04:00
|
|
|
static orte_local_rank_t proc_get_local_rank(orte_process_name_t *proc);
|
|
|
|
static orte_node_rank_t proc_get_node_rank(orte_process_name_t *proc);
|
2008-11-18 18:35:50 +03:00
|
|
|
static int update_pidmap(opal_byte_object_t *bo);
|
2008-11-01 00:10:00 +03:00
|
|
|
static int update_nidmap(opal_byte_object_t *bo);
|
2008-04-30 23:49:53 +04:00
|
|
|
|
2010-03-13 02:57:50 +03:00
|
|
|
#if OPAL_ENABLE_FT_CR == 1
|
2008-03-05 07:57:23 +03:00
|
|
|
static int rte_ft_event(int state);
|
2008-03-05 08:57:13 +03:00
|
|
|
#endif
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
orte_ess_base_module_t orte_ess_env_module = {
|
|
|
|
rte_init,
|
|
|
|
rte_finalize,
|
2008-03-05 07:57:23 +03:00
|
|
|
orte_ess_base_app_abort,
|
Per the RFC, extend the current use of the ompi_proc_t flags field (without changing the field itself).
The prior ompi_proc_t structure had a uint8_t flag field in it, where only one
bit was used to flag that a proc was "local". In that context, "local" was
constrained to mean "local to this node".
This commit provides a greater degree of granularity on the term "local", to include tests
to see if the proc is on the same socket, PC board, node, switch, CU (computing
unit), and cluster.
Add #define's to designate which bits stand for which local condition. This
was added to the OPAL layer to avoid conflicting with the proposed movement of
the BTLs. To make it easier to use, a set of macros have been defined - e.g.,
OPAL_PROC_ON_LOCAL_SOCKET - that test the specific bit. These can be used in
the code base to clearly indicate which sense of locality is being considered.
All locations in the code base that looked at the current proc_t field have
been changed to use the new macros.
Also modify the orte_ess modules so that each returns a uint8_t (to match the
ompi_proc_t field) that contains a complete description of the locality of this
proc. Obviously, not all environments will be capable of providing such detailed
info. Thus, getting a "false" from a test for "on_local_socket" may simply
indicate a lack of knowledge.
This commit was SVN r20496.
2009-02-10 05:20:16 +03:00
|
|
|
proc_get_locality,
|
2008-11-01 00:10:00 +03:00
|
|
|
proc_get_daemon,
|
2008-04-30 23:49:53 +04:00
|
|
|
proc_get_hostname,
|
|
|
|
proc_get_local_rank,
|
|
|
|
proc_get_node_rank,
|
2008-11-18 18:35:50 +03:00
|
|
|
update_pidmap,
|
2008-11-01 00:10:00 +03:00
|
|
|
update_nidmap,
|
2010-03-23 23:47:41 +03:00
|
|
|
orte_ess_base_query_sys_info,
|
2010-03-13 02:57:50 +03:00
|
|
|
#if OPAL_ENABLE_FT_CR == 1
|
2008-03-05 07:57:23 +03:00
|
|
|
rte_ft_event
|
2008-03-05 08:57:13 +03:00
|
|
|
#else
|
|
|
|
NULL
|
|
|
|
#endif
|
2008-02-28 04:57:57 +03:00
|
|
|
};
|
|
|
|
|
2009-05-16 08:15:55 +04:00
|
|
|
/*
|
|
|
|
* Local variables
|
|
|
|
*/
|
|
|
|
static orte_node_rank_t my_node_rank=ORTE_NODE_RANK_INVALID;
|
|
|
|
|
2009-05-04 15:07:40 +04:00
|
|
|
static int rte_init(void)
|
2008-02-28 04:57:57 +03:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
char *error = NULL;
|
2009-05-16 08:15:55 +04:00
|
|
|
char **hosts = NULL;
|
|
|
|
char *nodelist;
|
2008-04-23 04:17:12 +04:00
|
|
|
|
2008-06-18 07:15:56 +04:00
|
|
|
/* run the prolog */
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_ess_base_std_prolog())) {
|
|
|
|
error = "orte_ess_base_std_prolog";
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
/* Start by getting a unique name from the enviro */
|
|
|
|
env_set_name();
|
|
|
|
|
|
|
|
/* if I am a daemon, complete my setup using the
|
|
|
|
* default procedure
|
|
|
|
*/
|
2009-05-04 15:07:40 +04:00
|
|
|
if (ORTE_PROC_IS_DAEMON) {
|
2009-05-16 08:15:55 +04:00
|
|
|
/* get the list of nodes used for this job */
|
|
|
|
nodelist = getenv("OMPI_MCA_orte_nodelist");
|
|
|
|
|
|
|
|
if (NULL != nodelist) {
|
|
|
|
/* split the node list into an argv array */
|
|
|
|
hosts = opal_argv_split(nodelist, ',');
|
|
|
|
}
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_ess_base_orted_setup(hosts))) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
error = "orte_ess_base_orted_setup";
|
|
|
|
goto error;
|
|
|
|
}
|
2009-05-16 08:15:55 +04:00
|
|
|
opal_argv_free(hosts);
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ORTE_PROC_IS_TOOL) {
|
2008-02-28 04:57:57 +03:00
|
|
|
/* otherwise, if I am a tool proc, use that procedure */
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_ess_base_tool_setup())) {
|
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
error = "orte_ess_base_tool_setup";
|
|
|
|
goto error;
|
|
|
|
}
|
2008-11-01 00:10:00 +03:00
|
|
|
/* as a tool, I don't need a nidmap - so just return now */
|
|
|
|
return ORTE_SUCCESS;
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
}
|
2009-05-16 08:15:55 +04:00
|
|
|
|
|
|
|
/* otherwise, I must be an application process - use
|
|
|
|
* the default procedure to finish my setup
|
|
|
|
*/
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_ess_base_app_setup())) {
|
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
error = "orte_ess_base_app_setup";
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2008-11-01 00:10:00 +03:00
|
|
|
/* if one was provided, build my nidmap */
|
2009-03-06 00:56:03 +03:00
|
|
|
if (ORTE_SUCCESS != (ret = orte_util_nidmap_init(orte_process_info.sync_buf))) {
|
2008-11-01 00:10:00 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2009-01-07 17:58:38 +03:00
|
|
|
error = "orte_util_nidmap_init";
|
2008-11-01 00:10:00 +03:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
return ORTE_SUCCESS;
|
|
|
|
|
|
|
|
error:
|
This commit represents a bunch of work on a Mercurial side branch. As
such, the commit message back to the master SVN repository is fairly
long.
= ORTE Job-Level Output Messages =
Add two new interfaces that should be used for all new code throughout
the ORTE and OMPI layers (we already make the search-and-replace on
the existing ORTE / OMPI layers):
* orte_output(): (and corresponding friends ORTE_OUTPUT,
orte_output_verbose, etc.) This function sends the output directly
to the HNP for processing as part of a job-specific output
channel. It supports all the same outputs as opal_output()
(syslog, file, stdout, stderr), but for stdout/stderr, the output
is sent to the HNP for processing and output. More on this below.
* orte_show_help(): This function is a drop-in-replacement for
opal_show_help(), with two differences in functionality:
1. the rendered text help message output is sent to the HNP for
display (rather than outputting directly into the process' stderr
stream)
1. the HNP detects duplicate help messages and does not display them
(so that you don't see the same error message N times, once from
each of your N MPI processes); instead, it counts "new" instances
of the help message and displays a message every ~5 seconds when
there are new ones ("I got X new copies of the help message...")
opal_show_help and opal_output still exist, but they only output in
the current process. The intent for the new orte_* functions is that
they can apply job-level intelligence to the output. As such, we
recommend that all new ORTE and OMPI code use the new orte_*
functions, not thei opal_* functions.
=== New code ===
For ORTE and OMPI programmers, here's what you need to do differently
in new code:
* Do not include opal/util/show_help.h or opal/util/output.h.
Instead, include orte/util/output.h (this one header file has
declarations for both the orte_output() series of functions and
orte_show_help()).
* Effectively s/opal_output/orte_output/gi throughout your code.
Note that orte_output_open() takes a slightly different argument
list (as a way to pass data to the filtering stream -- see below),
so you if explicitly call opal_output_open(), you'll need to
slightly adapt to the new signature of orte_output_open().
* Literally s/opal_show_help/orte_show_help/. The function signature
is identical.
=== Notes ===
* orte_output'ing to stream 0 will do similar to what
opal_output'ing did, so leaving a hard-coded "0" as the first
argument is safe.
* For systems that do not use ORTE's RML or the HNP, the effect of
orte_output_* and orte_show_help will be identical to their opal
counterparts (the additional information passed to
orte_output_open() will be lost!). Indeed, the orte_* functions
simply become trivial wrappers to their opal_* counterparts. Note
that we have not tested this; the code is simple but it is quite
possible that we mucked something up.
= Filter Framework =
Messages sent view the new orte_* functions described above and
messages output via the IOF on the HNP will now optionally be passed
through a new "filter" framework before being output to
stdout/stderr. The "filter" OPAL MCA framework is intended to allow
preprocessing to messages before they are sent to their final
destinations. The first component that was written in the filter
framework was to create an XML stream, segregating all the messages
into different XML tags, etc. This will allow 3rd party tools to read
the stdout/stderr from the HNP and be able to know exactly what each
text message is (e.g., a help message, another OMPI infrastructure
message, stdout from the user process, stderr from the user process,
etc.).
Filtering is not active by default. Filter components must be
specifically requested, such as:
{{{
$ mpirun --mca filter xml ...
}}}
There can only be one filter component active.
= New MCA Parameters =
The new functionality described above introduces two new MCA
parameters:
* '''orte_base_help_aggregate''': Defaults to 1 (true), meaning that
help messages will be aggregated, as described above. If set to 0,
all help messages will be displayed, even if they are duplicates
(i.e., the original behavior).
* '''orte_base_show_output_recursions''': An MCA parameter to help
debug one of the known issues, described below. It is likely that
this MCA parameter will disappear before v1.3 final.
= Known Issues =
* The XML filter component is not complete. The current output from
this component is preliminary and not real XML. A bit more work
needs to be done to configure.m4 search for an appropriate XML
library/link it in/use it at run time.
* There are possible recursion loops in the orte_output() and
orte_show_help() functions -- e.g., if RML send calls orte_output()
or orte_show_help(). We have some ideas how to fix these, but
figured that it was ok to commit before feature freeze with known
issues. The code currently contains sub-optimal workarounds so
that this will not be a problem, but it would be good to actually
solve the problem rather than have hackish workarounds before v1.3 final.
This commit was SVN r18434.
2008-05-14 00:00:55 +04:00
|
|
|
orte_show_help("help-orte-runtime.txt",
|
2008-02-28 04:57:57 +03:00
|
|
|
"orte_init:startup:internal-failure",
|
|
|
|
true, error, ORTE_ERROR_NAME(ret), ret);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rte_finalize(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* if I am a daemon, finalize using the default procedure */
|
2009-05-04 15:07:40 +04:00
|
|
|
if (ORTE_PROC_IS_DAEMON) {
|
2008-02-28 04:57:57 +03:00
|
|
|
if (ORTE_SUCCESS != (ret = orte_ess_base_orted_finalize())) {
|
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
}
|
2009-05-04 15:07:40 +04:00
|
|
|
} else if (ORTE_PROC_IS_TOOL) {
|
2008-02-28 04:57:57 +03:00
|
|
|
/* otherwise, if I am a tool proc, use that procedure */
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_ess_base_tool_finalize())) {
|
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
}
|
2008-11-01 00:10:00 +03:00
|
|
|
/* as a tool, I didn't create a nidmap - so just return now */
|
|
|
|
return ret;
|
2008-02-28 04:57:57 +03:00
|
|
|
} else {
|
2008-11-01 00:10:00 +03:00
|
|
|
/* otherwise, I must be an application process
|
|
|
|
* use the default procedure to finish
|
2008-02-28 04:57:57 +03:00
|
|
|
*/
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_ess_base_app_finalize())) {
|
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-01-07 17:58:38 +03:00
|
|
|
/* deconstruct the nidmap and jobmap arrays */
|
|
|
|
orte_util_nidmap_finalize();
|
2008-11-01 00:10:00 +03:00
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
Per the RFC, extend the current use of the ompi_proc_t flags field (without changing the field itself).
The prior ompi_proc_t structure had a uint8_t flag field in it, where only one
bit was used to flag that a proc was "local". In that context, "local" was
constrained to mean "local to this node".
This commit provides a greater degree of granularity on the term "local", to include tests
to see if the proc is on the same socket, PC board, node, switch, CU (computing
unit), and cluster.
Add #define's to designate which bits stand for which local condition. This
was added to the OPAL layer to avoid conflicting with the proposed movement of
the BTLs. To make it easier to use, a set of macros have been defined - e.g.,
OPAL_PROC_ON_LOCAL_SOCKET - that test the specific bit. These can be used in
the code base to clearly indicate which sense of locality is being considered.
All locations in the code base that looked at the current proc_t field have
been changed to use the new macros.
Also modify the orte_ess modules so that each returns a uint8_t (to match the
ompi_proc_t field) that contains a complete description of the locality of this
proc. Obviously, not all environments will be capable of providing such detailed
info. Thus, getting a "false" from a test for "on_local_socket" may simply
indicate a lack of knowledge.
This commit was SVN r20496.
2009-02-10 05:20:16 +03:00
|
|
|
static uint8_t proc_get_locality(orte_process_name_t *proc)
|
2008-04-30 23:49:53 +04:00
|
|
|
{
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
orte_nid_t *nid;
|
|
|
|
|
2009-01-07 17:58:38 +03:00
|
|
|
if (NULL == (nid = orte_util_lookup_nid(proc))) {
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
Per the RFC, extend the current use of the ompi_proc_t flags field (without changing the field itself).
The prior ompi_proc_t structure had a uint8_t flag field in it, where only one
bit was used to flag that a proc was "local". In that context, "local" was
constrained to mean "local to this node".
This commit provides a greater degree of granularity on the term "local", to include tests
to see if the proc is on the same socket, PC board, node, switch, CU (computing
unit), and cluster.
Add #define's to designate which bits stand for which local condition. This
was added to the OPAL layer to avoid conflicting with the proposed movement of
the BTLs. To make it easier to use, a set of macros have been defined - e.g.,
OPAL_PROC_ON_LOCAL_SOCKET - that test the specific bit. These can be used in
the code base to clearly indicate which sense of locality is being considered.
All locations in the code base that looked at the current proc_t field have
been changed to use the new macros.
Also modify the orte_ess modules so that each returns a uint8_t (to match the
ompi_proc_t field) that contains a complete description of the locality of this
proc. Obviously, not all environments will be capable of providing such detailed
info. Thus, getting a "false" from a test for "on_local_socket" may simply
indicate a lack of knowledge.
This commit was SVN r20496.
2009-02-10 05:20:16 +03:00
|
|
|
return OPAL_PROC_NON_LOCAL;
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (nid->daemon == ORTE_PROC_MY_DAEMON->vpid) {
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
|
Per the RFC, extend the current use of the ompi_proc_t flags field (without changing the field itself).
The prior ompi_proc_t structure had a uint8_t flag field in it, where only one
bit was used to flag that a proc was "local". In that context, "local" was
constrained to mean "local to this node".
This commit provides a greater degree of granularity on the term "local", to include tests
to see if the proc is on the same socket, PC board, node, switch, CU (computing
unit), and cluster.
Add #define's to designate which bits stand for which local condition. This
was added to the OPAL layer to avoid conflicting with the proposed movement of
the BTLs. To make it easier to use, a set of macros have been defined - e.g.,
OPAL_PROC_ON_LOCAL_SOCKET - that test the specific bit. These can be used in
the code base to clearly indicate which sense of locality is being considered.
All locations in the code base that looked at the current proc_t field have
been changed to use the new macros.
Also modify the orte_ess modules so that each returns a uint8_t (to match the
ompi_proc_t field) that contains a complete description of the locality of this
proc. Obviously, not all environments will be capable of providing such detailed
info. Thus, getting a "false" from a test for "on_local_socket" may simply
indicate a lack of knowledge.
This commit was SVN r20496.
2009-02-10 05:20:16 +03:00
|
|
|
"%s ess:env: proc %s on LOCAL NODE",
|
2009-03-06 00:50:47 +03:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(proc)));
|
Per the RFC, extend the current use of the ompi_proc_t flags field (without changing the field itself).
The prior ompi_proc_t structure had a uint8_t flag field in it, where only one
bit was used to flag that a proc was "local". In that context, "local" was
constrained to mean "local to this node".
This commit provides a greater degree of granularity on the term "local", to include tests
to see if the proc is on the same socket, PC board, node, switch, CU (computing
unit), and cluster.
Add #define's to designate which bits stand for which local condition. This
was added to the OPAL layer to avoid conflicting with the proposed movement of
the BTLs. To make it easier to use, a set of macros have been defined - e.g.,
OPAL_PROC_ON_LOCAL_SOCKET - that test the specific bit. These can be used in
the code base to clearly indicate which sense of locality is being considered.
All locations in the code base that looked at the current proc_t field have
been changed to use the new macros.
Also modify the orte_ess modules so that each returns a uint8_t (to match the
ompi_proc_t field) that contains a complete description of the locality of this
proc. Obviously, not all environments will be capable of providing such detailed
info. Thus, getting a "false" from a test for "on_local_socket" may simply
indicate a lack of knowledge.
This commit was SVN r20496.
2009-02-10 05:20:16 +03:00
|
|
|
return (OPAL_PROC_ON_NODE | OPAL_PROC_ON_CU | OPAL_PROC_ON_CLUSTER);
|
2008-04-30 23:49:53 +04:00
|
|
|
}
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
|
2008-04-30 23:49:53 +04:00
|
|
|
"%s ess:env: proc %s is REMOTE",
|
2009-03-06 00:50:47 +03:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(proc)));
|
2008-04-30 23:49:53 +04:00
|
|
|
|
Per the RFC, extend the current use of the ompi_proc_t flags field (without changing the field itself).
The prior ompi_proc_t structure had a uint8_t flag field in it, where only one
bit was used to flag that a proc was "local". In that context, "local" was
constrained to mean "local to this node".
This commit provides a greater degree of granularity on the term "local", to include tests
to see if the proc is on the same socket, PC board, node, switch, CU (computing
unit), and cluster.
Add #define's to designate which bits stand for which local condition. This
was added to the OPAL layer to avoid conflicting with the proposed movement of
the BTLs. To make it easier to use, a set of macros have been defined - e.g.,
OPAL_PROC_ON_LOCAL_SOCKET - that test the specific bit. These can be used in
the code base to clearly indicate which sense of locality is being considered.
All locations in the code base that looked at the current proc_t field have
been changed to use the new macros.
Also modify the orte_ess modules so that each returns a uint8_t (to match the
ompi_proc_t field) that contains a complete description of the locality of this
proc. Obviously, not all environments will be capable of providing such detailed
info. Thus, getting a "false" from a test for "on_local_socket" may simply
indicate a lack of knowledge.
This commit was SVN r20496.
2009-02-10 05:20:16 +03:00
|
|
|
return OPAL_PROC_NON_LOCAL;
|
2008-04-30 23:49:53 +04:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2008-11-01 00:10:00 +03:00
|
|
|
static orte_vpid_t proc_get_daemon(orte_process_name_t *proc)
|
|
|
|
{
|
|
|
|
orte_nid_t *nid;
|
2009-06-24 00:43:45 +04:00
|
|
|
|
|
|
|
if( ORTE_JOBID_IS_DAEMON(proc->jobid) ) {
|
|
|
|
return proc->vpid;
|
|
|
|
}
|
|
|
|
|
2009-01-07 17:58:38 +03:00
|
|
|
if (NULL == (nid = orte_util_lookup_nid(proc))) {
|
2008-11-01 00:10:00 +03:00
|
|
|
return ORTE_VPID_INVALID;
|
|
|
|
}
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
|
|
|
|
"%s ess:env: proc %s is hosted by daemon %s",
|
2009-03-06 00:50:47 +03:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(proc),
|
|
|
|
ORTE_VPID_PRINT(nid->daemon)));
|
2008-11-01 00:10:00 +03:00
|
|
|
|
|
|
|
return nid->daemon;
|
|
|
|
}
|
|
|
|
|
2008-04-30 23:49:53 +04:00
|
|
|
static char* proc_get_hostname(orte_process_name_t *proc)
|
|
|
|
{
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
orte_nid_t *nid;
|
|
|
|
|
2009-01-07 17:58:38 +03:00
|
|
|
if (NULL == (nid = orte_util_lookup_nid(proc))) {
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
|
|
|
return NULL;
|
2008-06-24 21:53:10 +04:00
|
|
|
}
|
2008-04-30 23:49:53 +04:00
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
|
2008-04-30 23:49:53 +04:00
|
|
|
"%s ess:env: proc %s is on host %s",
|
2009-03-06 00:50:47 +03:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(proc),
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
nid->name));
|
2008-04-30 23:49:53 +04:00
|
|
|
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
return nid->name;
|
2008-04-30 23:49:53 +04:00
|
|
|
}
|
|
|
|
|
2008-09-25 17:39:08 +04:00
|
|
|
static orte_local_rank_t proc_get_local_rank(orte_process_name_t *proc)
|
2008-04-30 23:49:53 +04:00
|
|
|
{
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
orte_pmap_t *pmap;
|
|
|
|
|
2009-01-07 17:58:38 +03:00
|
|
|
if (NULL == (pmap = orte_util_lookup_pmap(proc))) {
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
2009-01-21 03:19:37 +03:00
|
|
|
return ORTE_LOCAL_RANK_INVALID;
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
}
|
2008-04-30 23:49:53 +04:00
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
|
2008-04-30 23:49:53 +04:00
|
|
|
"%s ess:env: proc %s has local rank %d",
|
2009-03-06 00:50:47 +03:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(proc),
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
(int)pmap->local_rank));
|
2008-04-30 23:49:53 +04:00
|
|
|
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
return pmap->local_rank;
|
2008-04-30 23:49:53 +04:00
|
|
|
}
|
|
|
|
|
2008-09-25 17:39:08 +04:00
|
|
|
static orte_node_rank_t proc_get_node_rank(orte_process_name_t *proc)
|
2008-04-30 23:49:53 +04:00
|
|
|
{
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
orte_pmap_t *pmap;
|
|
|
|
|
2009-05-16 08:15:55 +04:00
|
|
|
/* is this me? */
|
|
|
|
if (proc->jobid == ORTE_PROC_MY_NAME->jobid &&
|
|
|
|
proc->vpid == ORTE_PROC_MY_NAME->vpid) {
|
|
|
|
/* yes it is - reply with my rank. This is necessary
|
|
|
|
* because the pidmap will not have arrived when I
|
|
|
|
* am starting up, and if we use static ports, then
|
|
|
|
* I need to know my node rank during init
|
|
|
|
*/
|
|
|
|
return my_node_rank;
|
|
|
|
}
|
|
|
|
|
2009-01-07 17:58:38 +03:00
|
|
|
if (NULL == (pmap = orte_util_lookup_pmap(proc))) {
|
2009-01-21 03:19:37 +03:00
|
|
|
return ORTE_NODE_RANK_INVALID;
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
}
|
2008-04-30 23:49:53 +04:00
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
|
2008-04-30 23:49:53 +04:00
|
|
|
"%s ess:env: proc %s has node rank %d",
|
2009-03-06 00:50:47 +03:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(proc),
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
(int)pmap->node_rank));
|
2008-04-30 23:49:53 +04:00
|
|
|
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
return pmap->node_rank;
|
2008-04-30 23:49:53 +04:00
|
|
|
}
|
|
|
|
|
2008-11-18 18:35:50 +03:00
|
|
|
static int update_pidmap(opal_byte_object_t *bo)
|
2008-11-01 00:10:00 +03:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2008-11-18 18:35:50 +03:00
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
|
|
|
|
"%s ess:env: updating pidmap",
|
2009-03-06 00:50:47 +03:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
2008-11-01 00:10:00 +03:00
|
|
|
|
|
|
|
/* build the pmap */
|
2009-01-07 17:58:38 +03:00
|
|
|
if (ORTE_SUCCESS != (ret = orte_util_decode_pidmap(bo))) {
|
2008-11-01 00:10:00 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int update_nidmap(opal_byte_object_t *bo)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
/* decode the nidmap - the util will know what to do */
|
2009-01-07 17:58:38 +03:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_util_decode_nodemap(bo))) {
|
2008-11-01 00:10:00 +03:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
static int env_set_name(void)
|
|
|
|
{
|
2009-05-16 08:15:55 +04:00
|
|
|
char *tmp;
|
|
|
|
int rc;
|
2008-02-28 04:57:57 +03:00
|
|
|
orte_jobid_t jobid;
|
|
|
|
orte_vpid_t vpid;
|
|
|
|
|
2009-05-16 08:15:55 +04:00
|
|
|
mca_base_param_reg_string_name("orte", "ess_jobid", "Process jobid",
|
|
|
|
true, false, NULL, &tmp);
|
|
|
|
if (NULL == tmp) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
|
|
|
return ORTE_ERR_NOT_FOUND;
|
|
|
|
}
|
2009-05-16 08:15:55 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_util_convert_string_to_jobid(&jobid, tmp))) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return(rc);
|
|
|
|
}
|
2009-05-16 08:15:55 +04:00
|
|
|
free(tmp);
|
2008-02-28 04:57:57 +03:00
|
|
|
|
2009-05-16 08:15:55 +04:00
|
|
|
mca_base_param_reg_string_name("orte", "ess_vpid", "Process vpid",
|
|
|
|
true, false, NULL, &tmp);
|
|
|
|
if (NULL == tmp) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
|
|
|
return ORTE_ERR_NOT_FOUND;
|
|
|
|
}
|
2009-05-16 08:15:55 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_util_convert_string_to_vpid(&vpid, tmp))) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return(rc);
|
|
|
|
}
|
2009-05-16 08:15:55 +04:00
|
|
|
free(tmp);
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
ORTE_PROC_MY_NAME->jobid = jobid;
|
|
|
|
ORTE_PROC_MY_NAME->vpid = vpid;
|
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_ess_base_output,
|
2009-03-06 00:50:47 +03:00
|
|
|
"ess:env set name to %s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
2008-02-28 04:57:57 +03:00
|
|
|
|
2009-05-16 08:15:55 +04:00
|
|
|
/* get my node rank in case we are using static ports - this won't
|
|
|
|
* be present for daemons, so don't error out if we don't have it
|
|
|
|
*/
|
|
|
|
mca_base_param_reg_string_name("orte", "ess_node_rank", "Process node rank",
|
|
|
|
true, false, NULL, &tmp);
|
|
|
|
if (NULL != tmp) {
|
|
|
|
my_node_rank = strtol(tmp, NULL, 10);
|
|
|
|
}
|
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
/* get the non-name common environmental variables */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_ess_env_get())) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2010-03-13 02:57:50 +03:00
|
|
|
#if OPAL_ENABLE_FT_CR == 1
|
2008-03-05 07:57:23 +03:00
|
|
|
static int rte_ft_event(int state)
|
|
|
|
{
|
|
|
|
int ret, exit_status = ORTE_SUCCESS;
|
2009-05-04 15:07:40 +04:00
|
|
|
orte_proc_type_t svtype;
|
2008-03-05 07:57:23 +03:00
|
|
|
|
|
|
|
/******** Checkpoint Prep ********/
|
|
|
|
if(OPAL_CRS_CHECKPOINT == state) {
|
2008-04-23 04:17:12 +04:00
|
|
|
/*
|
|
|
|
* Notify SnapC
|
|
|
|
*/
|
|
|
|
if( ORTE_SUCCESS != (ret = orte_snapc.ft_event(OPAL_CRS_CHECKPOINT))) {
|
2009-09-23 21:05:49 +04:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2008-04-23 04:17:12 +04:00
|
|
|
exit_status = ret;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2008-03-05 07:57:23 +03:00
|
|
|
/*
|
2008-04-23 04:17:12 +04:00
|
|
|
* Notify Routed
|
|
|
|
*/
|
|
|
|
if( ORTE_SUCCESS != (ret = orte_routed.ft_event(OPAL_CRS_CHECKPOINT))) {
|
2009-09-23 21:05:49 +04:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2008-04-23 04:17:12 +04:00
|
|
|
exit_status = ret;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Notify RML -> OOB
|
2008-03-05 07:57:23 +03:00
|
|
|
*/
|
|
|
|
if( ORTE_SUCCESS != (ret = orte_rml.ft_event(OPAL_CRS_CHECKPOINT))) {
|
2009-09-23 21:05:49 +04:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2008-03-05 07:57:23 +03:00
|
|
|
exit_status = ret;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/******** Continue Recovery ********/
|
|
|
|
else if (OPAL_CRS_CONTINUE == state ) {
|
2010-03-24 00:28:02 +03:00
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_ess_base_output,
|
|
|
|
"ess:env ft_event(%2d) - %s is Continuing",
|
|
|
|
state, ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
|
|
|
|
2008-03-05 07:57:23 +03:00
|
|
|
/*
|
2008-04-23 04:17:12 +04:00
|
|
|
* Notify RML -> OOB
|
2008-03-05 07:57:23 +03:00
|
|
|
*/
|
|
|
|
if( ORTE_SUCCESS != (ret = orte_rml.ft_event(OPAL_CRS_CONTINUE))) {
|
2009-09-23 21:05:49 +04:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2008-03-05 07:57:23 +03:00
|
|
|
exit_status = ret;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2008-04-23 04:17:12 +04:00
|
|
|
/*
|
|
|
|
* Notify Routed
|
|
|
|
*/
|
|
|
|
if( ORTE_SUCCESS != (ret = orte_routed.ft_event(OPAL_CRS_CONTINUE))) {
|
2009-09-23 21:05:49 +04:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2008-04-23 04:17:12 +04:00
|
|
|
exit_status = ret;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Notify SnapC
|
|
|
|
*/
|
|
|
|
if( ORTE_SUCCESS != (ret = orte_snapc.ft_event(OPAL_CRS_CONTINUE))) {
|
2009-09-23 21:05:49 +04:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2008-04-23 04:17:12 +04:00
|
|
|
exit_status = ret;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
A number of C/R enhancements per RFC below:
http://www.open-mpi.org/community/lists/devel/2010/07/8240.php
Documentation:
http://osl.iu.edu/research/ft/
Major Changes:
--------------
* Added C/R-enabled Debugging support.
Enabled with the --enable-crdebug flag. See the following website for more information:
http://osl.iu.edu/research/ft/crdebug/
* Added Stable Storage (SStore) framework for checkpoint storage
* 'central' component does a direct to central storage save
* 'stage' component stages checkpoints to central storage while the application continues execution.
* 'stage' supports offline compression of checkpoints before moving (sstore_stage_compress)
* 'stage' supports local caching of checkpoints to improve automatic recovery (sstore_stage_caching)
* Added Compression (compress) framework to support
* Add two new ErrMgr recovery policies
* {{{crmig}}} C/R Process Migration
* {{{autor}}} C/R Automatic Recovery
* Added the {{{ompi-migrate}}} command line tool to support the {{{crmig}}} ErrMgr component
* Added CR MPI Ext functions (enable them with {{{--enable-mpi-ext=cr}}} configure option)
* {{{OMPI_CR_Checkpoint}}} (Fixes trac:2342)
* {{{OMPI_CR_Restart}}}
* {{{OMPI_CR_Migrate}}} (may need some more work for mapping rules)
* {{{OMPI_CR_INC_register_callback}}} (Fixes trac:2192)
* {{{OMPI_CR_Quiesce_start}}}
* {{{OMPI_CR_Quiesce_checkpoint}}}
* {{{OMPI_CR_Quiesce_end}}}
* {{{OMPI_CR_self_register_checkpoint_callback}}}
* {{{OMPI_CR_self_register_restart_callback}}}
* {{{OMPI_CR_self_register_continue_callback}}}
* The ErrMgr predicted_fault() interface has been changed to take an opal_list_t of ErrMgr defined types. This will allow us to better support a wider range of fault prediction services in the future.
* Add a progress meter to:
* FileM rsh (filem_rsh_process_meter)
* SnapC full (snapc_full_progress_meter)
* SStore stage (sstore_stage_progress_meter)
* Added 2 new command line options to ompi-restart
* --showme : Display the full command line that would have been exec'ed.
* --mpirun_opts : Command line options to pass directly to mpirun. (Fixes trac:2413)
* Deprecated some MCA params:
* crs_base_snapshot_dir deprecated, use sstore_stage_local_snapshot_dir
* snapc_base_global_snapshot_dir deprecated, use sstore_base_global_snapshot_dir
* snapc_base_global_shared deprecated, use sstore_stage_global_is_shared
* snapc_base_store_in_place deprecated, replaced with different components of SStore
* snapc_base_global_snapshot_ref deprecated, use sstore_base_global_snapshot_ref
* snapc_base_establish_global_snapshot_dir deprecated, never well supported
* snapc_full_skip_filem deprecated, use sstore_stage_skip_filem
Minor Changes:
--------------
* Fixes trac:1924 : {{{ompi-restart}}} now recognizes path prefixed checkpoint handles and does the right thing.
* Fixes trac:2097 : {{{ompi-info}}} should now report all available CRS components
* Fixes trac:2161 : Manual checkpoint movement. A user can 'mv' a checkpoint directory from the original location to another and still restart from it.
* Fixes trac:2208 : Honor various TMPDIR varaibles instead of forcing {{{/tmp}}}
* Move {{{ompi_cr_continue_like_restart}}} to {{{orte_cr_continue_like_restart}}} to be more flexible in where this should be set.
* opal_crs_base_metadata_write* functions have been moved to SStore to support a wider range of metadata handling functionality.
* Cleanup the CRS framework and components to work with the SStore framework.
* Cleanup the SnapC framework and components to work with the SStore framework (cleans up these code paths considerably).
* Add 'quiesce' hook to CRCP for a future enhancement.
* We now require a BLCR version that supports {{{cr_request_file()}}} or {{{cr_request_checkpoint()}}} in order to make the code more maintainable. Note that {{{cr_request_file}}} has been deprecated since 0.7.0, so we prefer to use {{{cr_request_checkpoint()}}}.
* Add optional application level INC callbacks (registered through the CR MPI Ext interface).
* Increase the {{{opal_cr_thread_sleep_wait}}} parameter to 1000 microseconds to make the C/R thread less aggressive.
* {{{opal-restart}}} now looks for cache directories before falling back on stable storage when asked.
* {{{opal-restart}}} also support local decompression before restarting
* {{{orte-checkpoint}}} now uses the SStore framework to work with the metadata
* {{{orte-restart}}} now uses the SStore framework to work with the metadata
* Remove the {{{orte-restart}}} preload option. This was removed since the user only needs to select the 'stage' component in order to support this functionality.
* Since the '-am' parameter is saved in the metadata, {{{ompi-restart}}} no longer hard codes {{{-am ft-enable-cr}}}.
* Fix {{{hnp}}} ErrMgr so that if a previous component in the stack has 'fixed' the problem, then it should be skipped.
* Make sure to decrement the number of 'num_local_procs' in the orted when one goes away.
* odls now checks the SStore framework to see if it needs to load any checkpoint files before launching (to support 'stage'). This separates the SStore logic from the --preload-[binary|files] options.
* Add unique IDs to the named pipes established between the orted and the app in SnapC. This is to better support migration and automatic recovery activities.
* Improve the checks for 'already checkpointing' error path.
* A a recovery output timer, to show how long it takes to restart a job
* Do a better job of cleaning up the old session directory on restart.
* Add a local module to the autor and crmig ErrMgr components. These small modules prevent the 'orted' component from attempting a local recovery (Which does not work for MPI apps at the moment)
* Add a fix for bounding the checkpointable region between MPI_Init and MPI_Finalize.
This commit was SVN r23587.
The following Trac tickets were found above:
Ticket 1924 --> https://svn.open-mpi.org/trac/ompi/ticket/1924
Ticket 2097 --> https://svn.open-mpi.org/trac/ompi/ticket/2097
Ticket 2161 --> https://svn.open-mpi.org/trac/ompi/ticket/2161
Ticket 2192 --> https://svn.open-mpi.org/trac/ompi/ticket/2192
Ticket 2208 --> https://svn.open-mpi.org/trac/ompi/ticket/2208
Ticket 2342 --> https://svn.open-mpi.org/trac/ompi/ticket/2342
Ticket 2413 --> https://svn.open-mpi.org/trac/ompi/ticket/2413
2010-08-11 00:51:11 +04:00
|
|
|
|
|
|
|
if( orte_cr_continue_like_restart ) {
|
|
|
|
/*
|
|
|
|
* Barrier to make all processes have been successfully restarted before
|
|
|
|
* we try to remove some restart only files.
|
|
|
|
*/
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_grpcomm.barrier())) {
|
|
|
|
opal_output(0, "ess:env: ft_event(%2d): Failed in orte_grpcomm.barrier (%d)",
|
|
|
|
state, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if( orte_cr_flush_restart_files ) {
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_ess_base_output,
|
|
|
|
"ess:env ft_event(%2d): %s "
|
|
|
|
"Cleanup restart files...",
|
|
|
|
state, ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
|
|
|
opal_crs_base_cleanup_flush();
|
|
|
|
}
|
|
|
|
}
|
2008-03-05 07:57:23 +03:00
|
|
|
}
|
|
|
|
/******** Restart Recovery ********/
|
|
|
|
else if (OPAL_CRS_RESTART == state ) {
|
2010-03-24 00:28:02 +03:00
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_ess_base_output,
|
|
|
|
"ess:env ft_event(%2d) - %s is Restarting",
|
|
|
|
state, ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
|
|
|
|
2008-03-05 07:57:23 +03:00
|
|
|
/*
|
2008-04-23 04:17:12 +04:00
|
|
|
* This should follow the ess init() function
|
|
|
|
*/
|
|
|
|
|
2008-05-01 21:48:13 +04:00
|
|
|
/*
|
2008-07-07 18:55:29 +04:00
|
|
|
* Clear nidmap and jmap
|
2008-05-01 21:48:13 +04:00
|
|
|
*/
|
2009-01-07 17:58:38 +03:00
|
|
|
orte_util_nidmap_finalize();
|
2008-05-01 21:48:13 +04:00
|
|
|
|
2008-04-23 04:17:12 +04:00
|
|
|
/*
|
|
|
|
* - Reset Contact information
|
|
|
|
*/
|
|
|
|
if( ORTE_SUCCESS != (ret = env_set_name() ) ) {
|
|
|
|
exit_status = ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Notify RML -> OOB
|
2008-03-05 07:57:23 +03:00
|
|
|
*/
|
|
|
|
if( ORTE_SUCCESS != (ret = orte_rml.ft_event(OPAL_CRS_RESTART))) {
|
2009-09-23 21:05:49 +04:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2008-03-05 07:57:23 +03:00
|
|
|
exit_status = ret;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-04-23 04:17:12 +04:00
|
|
|
* Restart the routed framework
|
|
|
|
* JJH: Lie to the finalize function so it does not try to contact the daemon.
|
2008-03-05 07:57:23 +03:00
|
|
|
*/
|
2009-05-04 15:07:40 +04:00
|
|
|
svtype = orte_process_info.proc_type;
|
|
|
|
orte_process_info.proc_type = ORTE_PROC_TOOL;
|
2008-04-23 04:17:12 +04:00
|
|
|
if (ORTE_SUCCESS != (ret = orte_routed.finalize()) ) {
|
2009-09-23 21:05:49 +04:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2008-03-05 07:57:23 +03:00
|
|
|
exit_status = ret;
|
2008-04-23 04:17:12 +04:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2009-05-04 15:07:40 +04:00
|
|
|
orte_process_info.proc_type = svtype;
|
2008-04-23 04:17:12 +04:00
|
|
|
if (ORTE_SUCCESS != (ret = orte_routed.initialize()) ) {
|
2009-09-23 21:05:49 +04:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2008-04-23 04:17:12 +04:00
|
|
|
exit_status = ret;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Group Comm - Clean out stale data
|
|
|
|
*/
|
|
|
|
orte_grpcomm.finalize();
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_grpcomm.init())) {
|
2009-09-23 21:05:49 +04:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2008-04-23 04:17:12 +04:00
|
|
|
exit_status = ret;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_grpcomm.purge_proc_attrs())) {
|
2009-09-23 21:05:49 +04:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2008-04-23 04:17:12 +04:00
|
|
|
exit_status = ret;
|
|
|
|
goto cleanup;
|
2008-03-05 07:57:23 +03:00
|
|
|
}
|
|
|
|
|
2008-04-23 04:17:12 +04:00
|
|
|
/*
|
|
|
|
* Restart the PLM - Does nothing at the moment, but included for completeness
|
|
|
|
*/
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_plm.finalize())) {
|
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
exit_status = ret;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_plm.init())) {
|
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
exit_status = ret;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RML - Enable communications
|
|
|
|
*/
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_rml.enable_comm())) {
|
2009-09-23 21:05:49 +04:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2008-04-23 04:17:12 +04:00
|
|
|
exit_status = ret;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2008-03-05 07:57:23 +03:00
|
|
|
/*
|
2008-04-23 04:17:12 +04:00
|
|
|
* Notify Routed
|
2008-03-05 07:57:23 +03:00
|
|
|
*/
|
2008-04-23 04:17:12 +04:00
|
|
|
if( ORTE_SUCCESS != (ret = orte_routed.ft_event(OPAL_CRS_RESTART))) {
|
2009-09-23 21:05:49 +04:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2008-03-05 07:57:23 +03:00
|
|
|
exit_status = ret;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2010-03-24 00:28:02 +03:00
|
|
|
/* if one was provided, build my nidmap */
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_util_nidmap_init(orte_process_info.sync_buf))) {
|
2009-09-23 21:05:49 +04:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2008-03-05 07:57:23 +03:00
|
|
|
exit_status = ret;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
A number of C/R enhancements per RFC below:
http://www.open-mpi.org/community/lists/devel/2010/07/8240.php
Documentation:
http://osl.iu.edu/research/ft/
Major Changes:
--------------
* Added C/R-enabled Debugging support.
Enabled with the --enable-crdebug flag. See the following website for more information:
http://osl.iu.edu/research/ft/crdebug/
* Added Stable Storage (SStore) framework for checkpoint storage
* 'central' component does a direct to central storage save
* 'stage' component stages checkpoints to central storage while the application continues execution.
* 'stage' supports offline compression of checkpoints before moving (sstore_stage_compress)
* 'stage' supports local caching of checkpoints to improve automatic recovery (sstore_stage_caching)
* Added Compression (compress) framework to support
* Add two new ErrMgr recovery policies
* {{{crmig}}} C/R Process Migration
* {{{autor}}} C/R Automatic Recovery
* Added the {{{ompi-migrate}}} command line tool to support the {{{crmig}}} ErrMgr component
* Added CR MPI Ext functions (enable them with {{{--enable-mpi-ext=cr}}} configure option)
* {{{OMPI_CR_Checkpoint}}} (Fixes trac:2342)
* {{{OMPI_CR_Restart}}}
* {{{OMPI_CR_Migrate}}} (may need some more work for mapping rules)
* {{{OMPI_CR_INC_register_callback}}} (Fixes trac:2192)
* {{{OMPI_CR_Quiesce_start}}}
* {{{OMPI_CR_Quiesce_checkpoint}}}
* {{{OMPI_CR_Quiesce_end}}}
* {{{OMPI_CR_self_register_checkpoint_callback}}}
* {{{OMPI_CR_self_register_restart_callback}}}
* {{{OMPI_CR_self_register_continue_callback}}}
* The ErrMgr predicted_fault() interface has been changed to take an opal_list_t of ErrMgr defined types. This will allow us to better support a wider range of fault prediction services in the future.
* Add a progress meter to:
* FileM rsh (filem_rsh_process_meter)
* SnapC full (snapc_full_progress_meter)
* SStore stage (sstore_stage_progress_meter)
* Added 2 new command line options to ompi-restart
* --showme : Display the full command line that would have been exec'ed.
* --mpirun_opts : Command line options to pass directly to mpirun. (Fixes trac:2413)
* Deprecated some MCA params:
* crs_base_snapshot_dir deprecated, use sstore_stage_local_snapshot_dir
* snapc_base_global_snapshot_dir deprecated, use sstore_base_global_snapshot_dir
* snapc_base_global_shared deprecated, use sstore_stage_global_is_shared
* snapc_base_store_in_place deprecated, replaced with different components of SStore
* snapc_base_global_snapshot_ref deprecated, use sstore_base_global_snapshot_ref
* snapc_base_establish_global_snapshot_dir deprecated, never well supported
* snapc_full_skip_filem deprecated, use sstore_stage_skip_filem
Minor Changes:
--------------
* Fixes trac:1924 : {{{ompi-restart}}} now recognizes path prefixed checkpoint handles and does the right thing.
* Fixes trac:2097 : {{{ompi-info}}} should now report all available CRS components
* Fixes trac:2161 : Manual checkpoint movement. A user can 'mv' a checkpoint directory from the original location to another and still restart from it.
* Fixes trac:2208 : Honor various TMPDIR varaibles instead of forcing {{{/tmp}}}
* Move {{{ompi_cr_continue_like_restart}}} to {{{orte_cr_continue_like_restart}}} to be more flexible in where this should be set.
* opal_crs_base_metadata_write* functions have been moved to SStore to support a wider range of metadata handling functionality.
* Cleanup the CRS framework and components to work with the SStore framework.
* Cleanup the SnapC framework and components to work with the SStore framework (cleans up these code paths considerably).
* Add 'quiesce' hook to CRCP for a future enhancement.
* We now require a BLCR version that supports {{{cr_request_file()}}} or {{{cr_request_checkpoint()}}} in order to make the code more maintainable. Note that {{{cr_request_file}}} has been deprecated since 0.7.0, so we prefer to use {{{cr_request_checkpoint()}}}.
* Add optional application level INC callbacks (registered through the CR MPI Ext interface).
* Increase the {{{opal_cr_thread_sleep_wait}}} parameter to 1000 microseconds to make the C/R thread less aggressive.
* {{{opal-restart}}} now looks for cache directories before falling back on stable storage when asked.
* {{{opal-restart}}} also support local decompression before restarting
* {{{orte-checkpoint}}} now uses the SStore framework to work with the metadata
* {{{orte-restart}}} now uses the SStore framework to work with the metadata
* Remove the {{{orte-restart}}} preload option. This was removed since the user only needs to select the 'stage' component in order to support this functionality.
* Since the '-am' parameter is saved in the metadata, {{{ompi-restart}}} no longer hard codes {{{-am ft-enable-cr}}}.
* Fix {{{hnp}}} ErrMgr so that if a previous component in the stack has 'fixed' the problem, then it should be skipped.
* Make sure to decrement the number of 'num_local_procs' in the orted when one goes away.
* odls now checks the SStore framework to see if it needs to load any checkpoint files before launching (to support 'stage'). This separates the SStore logic from the --preload-[binary|files] options.
* Add unique IDs to the named pipes established between the orted and the app in SnapC. This is to better support migration and automatic recovery activities.
* Improve the checks for 'already checkpointing' error path.
* A a recovery output timer, to show how long it takes to restart a job
* Do a better job of cleaning up the old session directory on restart.
* Add a local module to the autor and crmig ErrMgr components. These small modules prevent the 'orted' component from attempting a local recovery (Which does not work for MPI apps at the moment)
* Add a fix for bounding the checkpointable region between MPI_Init and MPI_Finalize.
This commit was SVN r23587.
The following Trac tickets were found above:
Ticket 1924 --> https://svn.open-mpi.org/trac/ompi/ticket/1924
Ticket 2097 --> https://svn.open-mpi.org/trac/ompi/ticket/2097
Ticket 2161 --> https://svn.open-mpi.org/trac/ompi/ticket/2161
Ticket 2192 --> https://svn.open-mpi.org/trac/ompi/ticket/2192
Ticket 2208 --> https://svn.open-mpi.org/trac/ompi/ticket/2208
Ticket 2342 --> https://svn.open-mpi.org/trac/ompi/ticket/2342
Ticket 2413 --> https://svn.open-mpi.org/trac/ompi/ticket/2413
2010-08-11 00:51:11 +04:00
|
|
|
/*
|
|
|
|
* Barrier to make all processes have been successfully restarted before
|
|
|
|
* we try to remove some restart only files.
|
|
|
|
*/
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_grpcomm.barrier())) {
|
|
|
|
opal_output(0, "ess:env ft_event(%2d): Failed in orte_grpcomm.barrier (%d)",
|
|
|
|
state, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
if( orte_cr_flush_restart_files ) {
|
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_ess_base_output,
|
|
|
|
"ess:env ft_event(%2d): %s "
|
|
|
|
"Cleanup restart files...",
|
|
|
|
state, ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
|
|
|
|
|
|
|
opal_crs_base_cleanup_flush();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Session directory re-init
|
|
|
|
*/
|
|
|
|
if (orte_create_session_dirs) {
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_session_dir(true,
|
|
|
|
orte_process_info.tmpdir_base,
|
|
|
|
orte_process_info.nodename,
|
|
|
|
NULL, /* Batch ID -- Not used */
|
|
|
|
ORTE_PROC_MY_NAME))) {
|
|
|
|
exit_status = ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
opal_output_set_output_file_info(orte_process_info.proc_session_dir,
|
|
|
|
"output-", NULL, NULL);
|
|
|
|
}
|
|
|
|
|
2010-03-24 00:28:02 +03:00
|
|
|
/*
|
|
|
|
* Notify SnapC
|
|
|
|
*/
|
|
|
|
if( ORTE_SUCCESS != (ret = orte_snapc.ft_event(OPAL_CRS_RESTART))) {
|
2008-05-01 21:48:13 +04:00
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
exit_status = ret;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2008-03-05 07:57:23 +03:00
|
|
|
}
|
|
|
|
else if (OPAL_CRS_TERM == state ) {
|
|
|
|
/* Nothing */
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* Error state = Nothing */
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
|
|
|
|
return exit_status;
|
|
|
|
}
|
2008-03-05 08:57:13 +03:00
|
|
|
#endif
|