2008-02-28 01:57:57 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
2011-06-23 20:38:02 +00:00
|
|
|
* Copyright (c) 2004-2011 The University of Tennessee and The University
|
2008-02-28 01:57:57 +00:00
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
2011-11-02 17:42:06 +00:00
|
|
|
* Copyright (c) 2007-2011 Cisco Systems, Inc. All rights reserved.
|
2008-02-28 01:57:57 +00:00
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "orte_config.h"
|
|
|
|
#include "orte/constants.h"
|
|
|
|
|
|
|
|
#ifdef HAVE_UNISTD_H
|
|
|
|
#include <unistd.h>
|
|
|
|
#endif /* HAVE_UNISTD_H */
|
|
|
|
#ifdef HAVE_STRING_H
|
|
|
|
#include <string.h>
|
|
|
|
#endif /* HAVE_STRING_H */
|
|
|
|
#include <ctype.h>
|
|
|
|
|
|
|
|
#include <lsf/lsbatch.h>
|
|
|
|
|
|
|
|
#include "opal/util/opal_environ.h"
|
|
|
|
|
2008-07-11 15:40:25 +00:00
|
|
|
#include "orte/util/show_help.h"
|
2008-02-28 01:57:57 +00:00
|
|
|
#include "orte/util/name_fns.h"
|
2009-05-04 11:07:40 +00:00
|
|
|
#include "orte/util/proc_info.h"
|
2008-02-28 01:57:57 +00:00
|
|
|
#include "orte/runtime/orte_globals.h"
|
|
|
|
#include "opal/mca/base/mca_base_param.h"
|
|
|
|
#include "orte/mca/errmgr/errmgr.h"
|
2009-01-07 14:58:38 +00:00
|
|
|
#include "orte/util/nidmap.h"
|
2011-08-12 21:02:48 +00:00
|
|
|
#include "orte/util/regex.h"
|
2008-02-28 01:57:57 +00:00
|
|
|
|
|
|
|
#include "orte/mca/ess/ess.h"
|
|
|
|
#include "orte/mca/ess/base/base.h"
|
|
|
|
#include "orte/mca/ess/lsf/ess_lsf.h"
|
|
|
|
|
|
|
|
static int lsf_set_name(void);
|
|
|
|
|
2009-05-04 11:07:40 +00:00
|
|
|
static int rte_init(void);
|
2008-02-28 01:57:57 +00:00
|
|
|
static int rte_finalize(void);
|
|
|
|
|
|
|
|
orte_ess_base_module_t orte_ess_lsf_module = {
|
|
|
|
rte_init,
|
|
|
|
rte_finalize,
|
2008-03-05 04:57:23 +00:00
|
|
|
orte_ess_base_app_abort,
|
2011-10-19 20:18:14 +00:00
|
|
|
orte_ess_base_proc_get_locality,
|
|
|
|
orte_ess_base_proc_get_daemon,
|
|
|
|
orte_ess_base_proc_get_hostname,
|
|
|
|
orte_ess_base_proc_get_local_rank,
|
|
|
|
orte_ess_base_proc_get_node_rank,
|
|
|
|
orte_ess_base_update_pidmap,
|
|
|
|
orte_ess_base_update_nidmap,
|
2008-03-05 04:57:23 +00:00
|
|
|
NULL /* ft_event */
|
2008-02-28 01:57:57 +00:00
|
|
|
};
|
|
|
|
|
2009-05-16 04:15:55 +00:00
|
|
|
/*
|
|
|
|
* Local variables
|
|
|
|
*/
|
|
|
|
static orte_node_rank_t my_node_rank=ORTE_NODE_RANK_INVALID;
|
|
|
|
|
2008-04-30 19:49:53 +00:00
|
|
|
|
2009-05-04 11:07:40 +00:00
|
|
|
static int rte_init(void)
|
2008-02-28 01:57:57 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
char *error = NULL;
|
2009-05-16 04:15:55 +00:00
|
|
|
char **hosts = NULL;
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 17:53:37 +00:00
|
|
|
|
2008-06-18 03:15:56 +00:00
|
|
|
/* run the prolog */
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_ess_base_std_prolog())) {
|
|
|
|
error = "orte_ess_base_std_prolog";
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2008-02-28 01:57:57 +00:00
|
|
|
/* Start by getting a unique name */
|
|
|
|
lsf_set_name();
|
|
|
|
|
|
|
|
/* if I am a daemon, complete my setup using the
|
|
|
|
* default procedure
|
|
|
|
*/
|
2009-05-04 11:07:40 +00:00
|
|
|
if (ORTE_PROC_IS_DAEMON) {
|
2011-08-12 21:02:48 +00:00
|
|
|
if (NULL != orte_node_regex) {
|
|
|
|
/* extract the nodes */
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_regex_extract_node_names(orte_node_regex, &hosts))) {
|
|
|
|
error = "orte_regex_extract_node_names";
|
|
|
|
goto error;
|
|
|
|
}
|
2009-05-16 04:15:55 +00:00
|
|
|
}
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_ess_base_orted_setup(hosts))) {
|
2008-02-28 01:57:57 +00:00
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
error = "orte_ess_base_orted_setup";
|
|
|
|
goto error;
|
|
|
|
}
|
2009-05-16 04:15:55 +00:00
|
|
|
opal_argv_free(hosts);
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ORTE_PROC_IS_TOOL) {
|
2008-02-28 01:57:57 +00:00
|
|
|
/* otherwise, if I am a tool proc, use that procedure */
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_ess_base_tool_setup())) {
|
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
error = "orte_ess_base_tool_setup";
|
|
|
|
goto error;
|
|
|
|
}
|
2008-10-31 21:10:00 +00:00
|
|
|
/* as a tool, I don't need a nidmap - so just return now */
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
|
2009-05-16 04:15:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* otherwise, I must be an application process - use
|
|
|
|
* the default procedure to finish my setup
|
|
|
|
*/
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_ess_base_app_setup())) {
|
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
error = "orte_ess_base_app_setup";
|
|
|
|
goto error;
|
2008-10-31 21:10:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* setup the nidmap arrays */
|
2009-03-05 21:56:03 +00:00
|
|
|
if (ORTE_SUCCESS != (ret = orte_util_nidmap_init(orte_process_info.sync_buf))) {
|
2008-10-31 21:10:00 +00:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2009-01-07 14:58:38 +00:00
|
|
|
error = "orte_util_nidmap_init";
|
2008-10-31 21:10:00 +00:00
|
|
|
goto error;
|
2008-02-28 01:57:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
|
|
|
|
error:
|
2011-11-22 21:24:35 +00:00
|
|
|
if (ORTE_ERR_SILENT != ret && !orte_report_silent_errors) {
|
|
|
|
orte_show_help("help-orte-runtime.txt",
|
|
|
|
"orte_init:startup:internal-failure",
|
|
|
|
true, error, ORTE_ERROR_NAME(ret), ret);
|
|
|
|
}
|
|
|
|
|
2008-02-28 01:57:57 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rte_finalize(void)
|
|
|
|
{
|
|
|
|
int ret;
|
2008-04-30 19:49:53 +00:00
|
|
|
|
2008-02-28 01:57:57 +00:00
|
|
|
/* if I am a daemon, finalize using the default procedure */
|
2009-05-04 11:07:40 +00:00
|
|
|
if (ORTE_PROC_IS_DAEMON) {
|
2008-02-28 01:57:57 +00:00
|
|
|
if (ORTE_SUCCESS != (ret = orte_ess_base_orted_finalize())) {
|
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
}
|
2009-05-04 11:07:40 +00:00
|
|
|
} else if (ORTE_PROC_IS_TOOL) {
|
2008-02-28 01:57:57 +00:00
|
|
|
/* otherwise, if I am a tool proc, use that procedure */
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_ess_base_tool_finalize())) {
|
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
}
|
2008-10-31 21:10:00 +00:00
|
|
|
/* as a tool, I didn't create a nidmap - so just return now */
|
|
|
|
return ret;
|
2008-02-28 01:57:57 +00:00
|
|
|
} else {
|
2008-10-31 21:10:00 +00:00
|
|
|
/* otherwise, I must be an application process
|
|
|
|
* use the default procedure to finish
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 17:53:37 +00:00
|
|
|
*/
|
2008-02-28 01:57:57 +00:00
|
|
|
if (ORTE_SUCCESS != (ret = orte_ess_base_app_finalize())) {
|
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-10-31 21:10:00 +00:00
|
|
|
|
|
|
|
/* deconstruct my nidmap and jobmap arrays */
|
2009-01-07 14:58:38 +00:00
|
|
|
orte_util_nidmap_finalize();
|
2008-10-31 21:10:00 +00:00
|
|
|
|
2008-02-28 01:57:57 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lsf_set_name(void)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
int lsf_nodeid;
|
2008-07-11 15:40:25 +00:00
|
|
|
orte_jobid_t jobid;
|
|
|
|
orte_vpid_t vpid;
|
2009-05-16 04:15:55 +00:00
|
|
|
char* tmp;
|
2008-02-28 01:57:57 +00:00
|
|
|
|
2009-05-16 04:15:55 +00:00
|
|
|
mca_base_param_reg_string_name("orte", "ess_jobid", "Process jobid",
|
|
|
|
true, false, NULL, &tmp);
|
|
|
|
if (NULL == tmp) {
|
2008-07-11 15:40:25 +00:00
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
|
|
|
return ORTE_ERR_NOT_FOUND;
|
|
|
|
}
|
2009-05-16 04:15:55 +00:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_util_convert_string_to_jobid(&jobid, tmp))) {
|
2008-07-11 15:40:25 +00:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return(rc);
|
|
|
|
}
|
2009-05-16 04:15:55 +00:00
|
|
|
free(tmp);
|
Although we never really thought about it, we made an unconscious assumption in the mapper system - we assumed that the daemons would be placed on nodes in the order that the nodes appear in the allocation. In other words, we assumed that the launch environment would map processes in node order.
Turns out, this isn't necessarily true. The Cray, for example, launches processes in a toroidal pattern, thus causing the daemons to wind up somewhere other than what we thought. Other environments (e.g., slurm) are also capable of such behavior, depending upon the default mapping algorithm they are told to use.
Resolve this problem by making the daemon-to-node assignment in the affected environments when the daemon calls back and tells us what node it is on. Order the nodes in the mapping list so they are in daemon-vpid order as opposed to the order in which they show in the allocation. For environments that don't exhibit this mapping behavior (e.g., rsh), this won't have any impact.
Also, clean up the vm launch procedure a little bit so it more closely aligns with the state machine implementation that is coming, and remove some lingering "slave" code.
This commit was SVN r25551.
2011-11-30 19:58:24 +00:00
|
|
|
ORTE_PROC_MY_NAME->jobid = jobid;
|
|
|
|
|
|
|
|
/* get the vpid from the nodeid */
|
2009-05-16 04:15:55 +00:00
|
|
|
mca_base_param_reg_string_name("orte", "ess_vpid", "Process vpid",
|
|
|
|
true, false, NULL, &tmp);
|
|
|
|
if (NULL == tmp) {
|
2008-07-11 15:40:25 +00:00
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
|
|
|
return ORTE_ERR_NOT_FOUND;
|
2008-02-28 01:57:57 +00:00
|
|
|
}
|
2009-05-16 04:15:55 +00:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_util_convert_string_to_vpid(&vpid, tmp))) {
|
2008-07-11 15:40:25 +00:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return(rc);
|
|
|
|
}
|
2009-05-16 04:15:55 +00:00
|
|
|
free(tmp);
|
2008-02-28 01:57:57 +00:00
|
|
|
lsf_nodeid = atoi(getenv("LSF_PM_TASKID"));
|
Although we never really thought about it, we made an unconscious assumption in the mapper system - we assumed that the daemons would be placed on nodes in the order that the nodes appear in the allocation. In other words, we assumed that the launch environment would map processes in node order.
Turns out, this isn't necessarily true. The Cray, for example, launches processes in a toroidal pattern, thus causing the daemons to wind up somewhere other than what we thought. Other environments (e.g., slurm) are also capable of such behavior, depending upon the default mapping algorithm they are told to use.
Resolve this problem by making the daemon-to-node assignment in the affected environments when the daemon calls back and tells us what node it is on. Order the nodes in the mapping list so they are in daemon-vpid order as opposed to the order in which they show in the allocation. For environments that don't exhibit this mapping behavior (e.g., rsh), this won't have any impact.
Also, clean up the vm launch procedure a little bit so it more closely aligns with the state machine implementation that is coming, and remove some lingering "slave" code.
This commit was SVN r25551.
2011-11-30 19:58:24 +00:00
|
|
|
ORTE_PROC_MY_NAME->vpid = vpid + lsf_nodeid;
|
2008-02-28 01:57:57 +00:00
|
|
|
|
|
|
|
/* get the non-name common environmental variables */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_ess_env_get())) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|