2008-02-28 04:57:57 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The University of Tennessee and The University
|
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "orte_config.h"
|
|
|
|
#include "orte/constants.h"
|
|
|
|
|
|
|
|
#ifdef HAVE_SYS_TYPES_H
|
|
|
|
#include <sys/types.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_UNISTD_H
|
|
|
|
#include <unistd.h>
|
|
|
|
#endif
|
|
|
|
#include <signal.h>
|
|
|
|
#include <errno.h>
|
|
|
|
|
|
|
|
#include "opal/util/argv.h"
|
|
|
|
#include "opal/util/path.h"
|
|
|
|
#include "opal/mca/base/mca_base_param.h"
|
|
|
|
#include "opal/mca/installdirs/installdirs.h"
|
2008-04-30 23:49:53 +04:00
|
|
|
#include "opal/class/opal_pointer_array.h"
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
#include "opal/class/opal_value_array.h"
|
2008-02-28 04:57:57 +03:00
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
#include "orte/util/show_help.h"
|
2008-02-28 04:57:57 +03:00
|
|
|
#include "orte/util/proc_info.h"
|
|
|
|
#include "orte/mca/errmgr/errmgr.h"
|
|
|
|
#include "orte/mca/iof/iof.h"
|
|
|
|
#include "orte/mca/rml/rml.h"
|
|
|
|
#include "orte/mca/rml/base/rml_contact.h"
|
|
|
|
#include "orte/mca/routed/routed.h"
|
|
|
|
#include "orte/util/name_fns.h"
|
|
|
|
#include "orte/runtime/orte_globals.h"
|
2008-11-01 00:10:00 +03:00
|
|
|
#include "orte/util/nidmap.h"
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
#include "orte/mca/ess/ess.h"
|
|
|
|
#include "orte/mca/ess/base/base.h"
|
|
|
|
#include "orte/mca/ess/singleton/ess_singleton.h"
|
|
|
|
|
|
|
|
static int fork_hnp(void);
|
|
|
|
|
|
|
|
static void set_handler_default(int sig)
|
|
|
|
{
|
|
|
|
#if !defined(__WINDOWS__)
|
|
|
|
struct sigaction act;
|
|
|
|
|
|
|
|
act.sa_handler = SIG_DFL;
|
|
|
|
act.sa_flags = 0;
|
|
|
|
sigemptyset(&act.sa_mask);
|
|
|
|
|
|
|
|
sigaction(sig, &act, (struct sigaction *)0);
|
|
|
|
#endif /* !defined(__WINDOWS__) */
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rte_init(char flags);
|
2008-04-30 23:49:53 +04:00
|
|
|
static int rte_finalize(void);
|
|
|
|
static bool proc_is_local(orte_process_name_t *proc);
|
2008-11-01 00:10:00 +03:00
|
|
|
static orte_vpid_t proc_get_daemon(orte_process_name_t *proc);
|
2008-04-30 23:49:53 +04:00
|
|
|
static char* proc_get_hostname(orte_process_name_t *proc);
|
|
|
|
static uint32_t proc_get_arch(orte_process_name_t *proc);
|
2008-09-25 17:39:08 +04:00
|
|
|
static orte_local_rank_t proc_get_local_rank(orte_process_name_t *proc);
|
|
|
|
static orte_node_rank_t proc_get_node_rank(orte_process_name_t *proc);
|
2008-06-19 02:17:53 +04:00
|
|
|
static int update_arch(orte_process_name_t *proc, uint32_t arch);
|
2008-11-18 18:35:50 +03:00
|
|
|
static int update_pidmap(opal_byte_object_t *bo);
|
2008-11-01 00:10:00 +03:00
|
|
|
static int update_nidmap(opal_byte_object_t *bo);
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
orte_ess_base_module_t orte_ess_singleton_module = {
|
|
|
|
rte_init,
|
2008-04-30 23:49:53 +04:00
|
|
|
rte_finalize,
|
2008-03-05 07:57:23 +03:00
|
|
|
orte_ess_base_app_abort,
|
2008-04-30 23:49:53 +04:00
|
|
|
proc_is_local,
|
2008-11-01 00:10:00 +03:00
|
|
|
proc_get_daemon,
|
2008-04-30 23:49:53 +04:00
|
|
|
proc_get_hostname,
|
|
|
|
proc_get_arch,
|
|
|
|
proc_get_local_rank,
|
|
|
|
proc_get_node_rank,
|
2008-06-19 02:17:53 +04:00
|
|
|
update_arch,
|
2008-11-18 18:35:50 +03:00
|
|
|
update_pidmap,
|
2008-11-01 00:10:00 +03:00
|
|
|
update_nidmap,
|
2008-03-05 07:57:23 +03:00
|
|
|
NULL /* ft_event */
|
2008-02-28 04:57:57 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
static int rte_init(char flags)
|
|
|
|
{
|
|
|
|
int rc;
|
2008-06-30 17:28:56 +04:00
|
|
|
orte_nid_t *node;
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
orte_jmap_t *jmap;
|
|
|
|
orte_pmap_t pmap;
|
2008-06-30 17:28:56 +04:00
|
|
|
|
2008-06-18 07:15:56 +04:00
|
|
|
/* run the prolog */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_ess_base_std_prolog())) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
/*
|
|
|
|
* If we are the selected module, then we must be a singleton
|
|
|
|
* as it means that no other method for discovering a name
|
|
|
|
* could be found. In this case, we need to start a daemon that
|
|
|
|
* can support our operation. We must do this for two reasons:
|
|
|
|
*
|
|
|
|
* (1) if we try to play the role of the HNP, then any child processes
|
|
|
|
* we might start via comm_spawn will rely on us for all ORTE-level
|
|
|
|
* support. However, we can only progress those requests when the
|
|
|
|
* the application calls into the OMPI/ORTE library! Thus, if this
|
|
|
|
* singleton just does computation, the other processes will "hang"
|
|
|
|
* in any calls into the ORTE layer that communicate with the HNP -
|
|
|
|
* and most calls on application procs *do*.
|
|
|
|
*
|
|
|
|
* (2) daemons are used to communicate messages for administrative
|
|
|
|
* purposes in a broadcast-like manner. Thus, daemons are expected
|
|
|
|
* to be able to interpret specific commands. Our application process
|
|
|
|
* doesn't have any idea how to handle those commands, thus causing
|
|
|
|
* the entire ORTE administrative system to break down.
|
|
|
|
*
|
|
|
|
* For those reasons, we choose to fork/exec a daemon at this time
|
|
|
|
* and then reconnect ourselves to it. We could just "fork" and declare
|
|
|
|
* the child to be a daemon, but that would require we place *all* of the
|
|
|
|
* daemon command processing code in the ORTE library, do some strange
|
|
|
|
* mojo in a few places, etc. This doesn't seem worth it, so we'll just
|
|
|
|
* do the old fork/exec here
|
|
|
|
*
|
|
|
|
* Note that Windows-based systems have to do their own special trick as
|
|
|
|
* they don't support fork/exec. So we have to use a giant "if" here to
|
|
|
|
* protect the Windows world. To make the results more readable, we put
|
|
|
|
* the whole mess in a separate function below
|
|
|
|
*/
|
|
|
|
if (ORTE_SUCCESS != (rc= fork_hnp())) {
|
|
|
|
/* if this didn't work, then we cannot support operation any further.
|
|
|
|
* Abort the system and tell orte_init to exit
|
|
|
|
*/
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
orte_process_info.num_procs = 1;
|
2008-04-30 23:49:53 +04:00
|
|
|
|
2008-04-01 16:40:00 +04:00
|
|
|
/* NOTE: do not wireup our io - let the fork'd orted serve
|
|
|
|
* as our io handler. This prevents issues with the event
|
|
|
|
* library wrt pty's and stdin
|
|
|
|
*/
|
|
|
|
|
2009-01-07 17:58:38 +03:00
|
|
|
/* setup the nidmap and jobmap arrays */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_util_nidmap_init(NULL))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* add a jmap entry for myself */
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
jmap = OBJ_NEW(orte_jmap_t);
|
|
|
|
jmap->job = ORTE_PROC_MY_NAME->jobid;
|
2009-01-07 17:58:38 +03:00
|
|
|
opal_pointer_array_add(&orte_jobmap, jmap);
|
|
|
|
pmap.local_rank = 0;
|
|
|
|
pmap.node_rank = 0;
|
|
|
|
pmap.node = 0;
|
|
|
|
opal_value_array_set_item(&jmap->pmap, 0, &pmap);
|
|
|
|
jmap->num_procs = 1;
|
2008-06-30 17:28:56 +04:00
|
|
|
|
|
|
|
/* create a nidmap entry for this node */
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
node = OBJ_NEW(orte_nid_t);
|
2008-06-30 17:28:56 +04:00
|
|
|
node->name = strdup(orte_process_info.nodename);
|
|
|
|
node->daemon = 0; /* the HNP co-occupies our node */
|
|
|
|
node->arch = orte_process_info.arch;
|
2009-01-07 17:58:38 +03:00
|
|
|
opal_pointer_array_set_item(&orte_nidmap, 0, node);
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
|
|
|
|
/* use the std app init to complete the procedure */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_ess_base_app_setup())) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2008-06-30 17:28:56 +04:00
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2008-04-30 23:49:53 +04:00
|
|
|
static int rte_finalize(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
/* deconstruct my nidmap and jobmap arrays */
|
2009-01-07 17:58:38 +03:00
|
|
|
orte_util_nidmap_finalize();
|
2008-04-30 23:49:53 +04:00
|
|
|
|
|
|
|
/* use the default procedure to finish */
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_ess_base_app_finalize())) {
|
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
#define ORTE_URI_MSG_LGTH 256
|
|
|
|
|
|
|
|
static int fork_hnp(void)
|
|
|
|
{
|
|
|
|
#if !defined(__WINDOWS__)
|
|
|
|
int p[2], death_pipe[2];
|
|
|
|
char *cmd;
|
|
|
|
char **argv = NULL;
|
|
|
|
int argc;
|
|
|
|
char *param;
|
|
|
|
sigset_t sigs;
|
|
|
|
int buffer_length, num_chars_read, chunk;
|
|
|
|
char *orted_uri;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* A pipe is used to communicate between the parent and child to
|
|
|
|
indicate whether the exec ultiimately succeeded or failed. The
|
|
|
|
child sets the pipe to be close-on-exec; the child only ever
|
|
|
|
writes anything to the pipe if there is an error (e.g.,
|
|
|
|
executable not found, exec() fails, etc.). The parent does a
|
|
|
|
blocking read on the pipe; if the pipe closed with no data,
|
|
|
|
then the exec() succeeded. If the parent reads something from
|
|
|
|
the pipe, then the child was letting us know that it failed.
|
|
|
|
*/
|
|
|
|
if (pipe(p) < 0) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_SYS_LIMITS_PIPES);
|
|
|
|
return ORTE_ERR_SYS_LIMITS_PIPES;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we also have to give the HNP a pipe it can watch to know when
|
|
|
|
* we terminated. Since the HNP is going to be a child of us, it
|
|
|
|
* can't just use waitpid to see when we leave - so it will watch
|
|
|
|
* the pipe instead
|
|
|
|
*/
|
|
|
|
if (pipe(death_pipe) < 0) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_SYS_LIMITS_PIPES);
|
|
|
|
return ORTE_ERR_SYS_LIMITS_PIPES;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* find the orted binary using the install_dirs support - this also
|
|
|
|
* checks to ensure that we can see this executable and it *is* executable by us
|
|
|
|
*/
|
|
|
|
cmd = opal_path_access("orted", opal_install_dirs.bindir, X_OK);
|
|
|
|
if (NULL == cmd) {
|
|
|
|
/* guess we couldn't do it - best to abort */
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_FILE_NOT_EXECUTABLE);
|
|
|
|
close(p[0]);
|
|
|
|
close(p[1]);
|
|
|
|
return ORTE_ERR_FILE_NOT_EXECUTABLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* okay, setup an appropriate argv */
|
|
|
|
opal_argv_append(&argc, &argv, "orted");
|
|
|
|
|
|
|
|
/* tell the daemon it is to be the HNP */
|
|
|
|
opal_argv_append(&argc, &argv, "--hnp");
|
|
|
|
|
|
|
|
/* tell the daemon to get out of our process group */
|
|
|
|
opal_argv_append(&argc, &argv, "--set-sid");
|
|
|
|
|
|
|
|
/* tell the daemon to report back its uri so we can connect to it */
|
|
|
|
opal_argv_append(&argc, &argv, "--report-uri");
|
|
|
|
asprintf(¶m, "%d", p[1]);
|
|
|
|
opal_argv_append(&argc, &argv, param);
|
|
|
|
free(param);
|
|
|
|
|
|
|
|
/* give the daemon a pipe it can watch to tell when we have died */
|
|
|
|
opal_argv_append(&argc, &argv, "--singleton-died-pipe");
|
|
|
|
asprintf(¶m, "%d", death_pipe[0]);
|
|
|
|
opal_argv_append(&argc, &argv, param);
|
|
|
|
free(param);
|
|
|
|
|
|
|
|
/* add any debug flags */
|
|
|
|
if (orte_debug_flag) {
|
|
|
|
opal_argv_append(&argc, &argv, "--debug");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (orte_debug_daemons_flag) {
|
|
|
|
opal_argv_append(&argc, &argv, "--debug-daemons");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (orte_debug_daemons_file_flag) {
|
|
|
|
if (!orte_debug_daemons_flag) {
|
|
|
|
opal_argv_append(&argc, &argv, "--debug-daemons");
|
|
|
|
}
|
|
|
|
opal_argv_append(&argc, &argv, "--debug-daemons-file");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fork off the child */
|
|
|
|
orte_process_info.hnp_pid = fork();
|
|
|
|
if(orte_process_info.hnp_pid < 0) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_SYS_LIMITS_CHILDREN);
|
|
|
|
close(p[0]);
|
|
|
|
close(p[1]);
|
|
|
|
close(death_pipe[0]);
|
|
|
|
close(death_pipe[1]);
|
|
|
|
free(cmd);
|
|
|
|
return ORTE_ERR_SYS_LIMITS_CHILDREN;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (orte_process_info.hnp_pid == 0) {
|
|
|
|
close(p[0]);
|
|
|
|
close(death_pipe[1]);
|
|
|
|
/* I am the child - exec me */
|
|
|
|
|
|
|
|
/* Set signal handlers back to the default. Do this close
|
|
|
|
to the execve() because the event library may (and likely
|
|
|
|
will) reset them. If we don't do this, the event
|
|
|
|
library may have left some set that, at least on some
|
|
|
|
OS's, don't get reset via fork() or exec(). Hence, the
|
|
|
|
orted could be unkillable (for example). */
|
|
|
|
set_handler_default(SIGTERM);
|
|
|
|
set_handler_default(SIGINT);
|
|
|
|
set_handler_default(SIGHUP);
|
|
|
|
set_handler_default(SIGPIPE);
|
|
|
|
set_handler_default(SIGCHLD);
|
|
|
|
|
|
|
|
/* Unblock all signals, for many of the same reasons that
|
|
|
|
we set the default handlers, above. This is noticable
|
|
|
|
on Linux where the event library blocks SIGTERM, but we
|
|
|
|
don't want that blocked by the orted (or, more
|
|
|
|
specifically, we don't want it to be blocked by the
|
|
|
|
orted and then inherited by the ORTE processes that it
|
|
|
|
forks, making them unkillable by SIGTERM). */
|
|
|
|
sigprocmask(0, 0, &sigs);
|
|
|
|
sigprocmask(SIG_UNBLOCK, &sigs, 0);
|
|
|
|
|
|
|
|
execv(cmd, argv);
|
|
|
|
|
|
|
|
/* if I get here, the execv failed! */
|
This commit represents a bunch of work on a Mercurial side branch. As
such, the commit message back to the master SVN repository is fairly
long.
= ORTE Job-Level Output Messages =
Add two new interfaces that should be used for all new code throughout
the ORTE and OMPI layers (we already make the search-and-replace on
the existing ORTE / OMPI layers):
* orte_output(): (and corresponding friends ORTE_OUTPUT,
orte_output_verbose, etc.) This function sends the output directly
to the HNP for processing as part of a job-specific output
channel. It supports all the same outputs as opal_output()
(syslog, file, stdout, stderr), but for stdout/stderr, the output
is sent to the HNP for processing and output. More on this below.
* orte_show_help(): This function is a drop-in-replacement for
opal_show_help(), with two differences in functionality:
1. the rendered text help message output is sent to the HNP for
display (rather than outputting directly into the process' stderr
stream)
1. the HNP detects duplicate help messages and does not display them
(so that you don't see the same error message N times, once from
each of your N MPI processes); instead, it counts "new" instances
of the help message and displays a message every ~5 seconds when
there are new ones ("I got X new copies of the help message...")
opal_show_help and opal_output still exist, but they only output in
the current process. The intent for the new orte_* functions is that
they can apply job-level intelligence to the output. As such, we
recommend that all new ORTE and OMPI code use the new orte_*
functions, not thei opal_* functions.
=== New code ===
For ORTE and OMPI programmers, here's what you need to do differently
in new code:
* Do not include opal/util/show_help.h or opal/util/output.h.
Instead, include orte/util/output.h (this one header file has
declarations for both the orte_output() series of functions and
orte_show_help()).
* Effectively s/opal_output/orte_output/gi throughout your code.
Note that orte_output_open() takes a slightly different argument
list (as a way to pass data to the filtering stream -- see below),
so you if explicitly call opal_output_open(), you'll need to
slightly adapt to the new signature of orte_output_open().
* Literally s/opal_show_help/orte_show_help/. The function signature
is identical.
=== Notes ===
* orte_output'ing to stream 0 will do similar to what
opal_output'ing did, so leaving a hard-coded "0" as the first
argument is safe.
* For systems that do not use ORTE's RML or the HNP, the effect of
orte_output_* and orte_show_help will be identical to their opal
counterparts (the additional information passed to
orte_output_open() will be lost!). Indeed, the orte_* functions
simply become trivial wrappers to their opal_* counterparts. Note
that we have not tested this; the code is simple but it is quite
possible that we mucked something up.
= Filter Framework =
Messages sent view the new orte_* functions described above and
messages output via the IOF on the HNP will now optionally be passed
through a new "filter" framework before being output to
stdout/stderr. The "filter" OPAL MCA framework is intended to allow
preprocessing to messages before they are sent to their final
destinations. The first component that was written in the filter
framework was to create an XML stream, segregating all the messages
into different XML tags, etc. This will allow 3rd party tools to read
the stdout/stderr from the HNP and be able to know exactly what each
text message is (e.g., a help message, another OMPI infrastructure
message, stdout from the user process, stderr from the user process,
etc.).
Filtering is not active by default. Filter components must be
specifically requested, such as:
{{{
$ mpirun --mca filter xml ...
}}}
There can only be one filter component active.
= New MCA Parameters =
The new functionality described above introduces two new MCA
parameters:
* '''orte_base_help_aggregate''': Defaults to 1 (true), meaning that
help messages will be aggregated, as described above. If set to 0,
all help messages will be displayed, even if they are duplicates
(i.e., the original behavior).
* '''orte_base_show_output_recursions''': An MCA parameter to help
debug one of the known issues, described below. It is likely that
this MCA parameter will disappear before v1.3 final.
= Known Issues =
* The XML filter component is not complete. The current output from
this component is preliminary and not real XML. A bit more work
needs to be done to configure.m4 search for an appropriate XML
library/link it in/use it at run time.
* There are possible recursion loops in the orte_output() and
orte_show_help() functions -- e.g., if RML send calls orte_output()
or orte_show_help(). We have some ideas how to fix these, but
figured that it was ok to commit before feature freeze with known
issues. The code currently contains sub-optimal workarounds so
that this will not be a problem, but it would be good to actually
solve the problem rather than have hackish workarounds before v1.3 final.
This commit was SVN r18434.
2008-05-14 00:00:55 +04:00
|
|
|
orte_show_help("help-ess-base.txt", "ess-base:execv-error",
|
2008-02-28 04:57:57 +03:00
|
|
|
true, cmd, strerror(errno));
|
|
|
|
exit(1);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
/* I am the parent - wait to hear something back and
|
|
|
|
* report results
|
|
|
|
*/
|
|
|
|
close(p[1]); /* parent closes the write - orted will write its contact info to it*/
|
|
|
|
close(death_pipe[0]); /* parent closes the death_pipe's read */
|
|
|
|
|
|
|
|
/* setup the buffer to read the name + uri */
|
|
|
|
buffer_length = ORTE_URI_MSG_LGTH;
|
|
|
|
chunk = ORTE_URI_MSG_LGTH-1;
|
|
|
|
num_chars_read = 0;
|
|
|
|
orted_uri = (char*)malloc(buffer_length);
|
|
|
|
|
|
|
|
while (chunk == (rc = read(p[0], &orted_uri[num_chars_read], chunk))) {
|
|
|
|
/* we read an entire buffer - better get more */
|
|
|
|
num_chars_read += chunk;
|
|
|
|
buffer_length += ORTE_URI_MSG_LGTH;
|
|
|
|
orted_uri = realloc((void*)orted_uri, buffer_length);
|
|
|
|
}
|
|
|
|
num_chars_read += rc;
|
|
|
|
|
|
|
|
if (num_chars_read <= 0) {
|
|
|
|
/* we didn't get anything back - this is bad */
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_HNP_COULD_NOT_START);
|
|
|
|
free(orted_uri);
|
|
|
|
return ORTE_ERR_HNP_COULD_NOT_START;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* parse the name from the returned info */
|
|
|
|
if (']' != orted_uri[strlen(orted_uri)-1]) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_COMM_FAILURE);
|
|
|
|
free(orted_uri);
|
|
|
|
return ORTE_ERR_COMM_FAILURE;
|
|
|
|
}
|
|
|
|
orted_uri[strlen(orted_uri)-1] = '\0';
|
|
|
|
if (NULL == (param = strrchr(orted_uri, '['))) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_COMM_FAILURE);
|
|
|
|
free(orted_uri);
|
|
|
|
return ORTE_ERR_COMM_FAILURE;
|
|
|
|
}
|
|
|
|
*param = '\0'; /* terminate the string */
|
|
|
|
param++;
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_util_convert_string_to_process_name(ORTE_PROC_MY_NAME, param))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
free(orted_uri);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
/* save the daemon uri - we will process it later */
|
|
|
|
orte_process_info.my_daemon_uri = strdup(orted_uri);
|
|
|
|
|
|
|
|
/* likewise, since this is also the HNP, set that uri too */
|
|
|
|
orte_process_info.my_hnp_uri = strdup(orted_uri);
|
|
|
|
|
|
|
|
/* indicate we are a singleton so orte_init knows what to do */
|
|
|
|
orte_process_info.singleton = true;
|
|
|
|
/* all done - report success */
|
|
|
|
free(orted_uri);
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
/* someone will have to devise a Windows equivalent */
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
2008-04-30 23:49:53 +04:00
|
|
|
|
|
|
|
static bool proc_is_local(orte_process_name_t *proc)
|
|
|
|
{
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
orte_nid_t *nid;
|
|
|
|
|
2009-01-07 17:58:38 +03:00
|
|
|
if (NULL == (nid = orte_util_lookup_nid(proc))) {
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nid->daemon == ORTE_PROC_MY_DAEMON->vpid) {
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
|
2008-11-01 00:10:00 +03:00
|
|
|
"%s ess:singleton: proc %s is LOCAL",
|
2008-04-30 23:49:53 +04:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(proc)));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
|
2008-11-01 00:10:00 +03:00
|
|
|
"%s ess:singleton: proc %s is REMOTE",
|
2008-04-30 23:49:53 +04:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(proc)));
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2008-11-01 00:10:00 +03:00
|
|
|
static orte_vpid_t proc_get_daemon(orte_process_name_t *proc)
|
|
|
|
{
|
|
|
|
orte_nid_t *nid;
|
|
|
|
|
2009-01-07 17:58:38 +03:00
|
|
|
if (NULL == (nid = orte_util_lookup_nid(proc))) {
|
2008-11-01 00:10:00 +03:00
|
|
|
return ORTE_VPID_INVALID;
|
|
|
|
}
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
|
|
|
|
"%s ess:singleton: proc %s is hosted by daemon %s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(proc),
|
|
|
|
ORTE_VPID_PRINT(nid->daemon)));
|
|
|
|
|
|
|
|
return nid->daemon;
|
|
|
|
}
|
|
|
|
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
static char* proc_get_hostname(orte_process_name_t *proc)
|
2008-06-24 21:53:10 +04:00
|
|
|
{
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
orte_nid_t *nid;
|
2008-06-24 21:53:10 +04:00
|
|
|
|
2009-01-07 17:58:38 +03:00
|
|
|
if (NULL == (nid = orte_util_lookup_nid(proc))) {
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
|
|
|
return NULL;
|
2008-06-24 21:53:10 +04:00
|
|
|
}
|
|
|
|
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
|
|
|
|
"%s ess:singleton: proc %s is on host %s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(proc),
|
|
|
|
nid->name));
|
2008-04-30 23:49:53 +04:00
|
|
|
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
return nid->name;
|
2008-04-30 23:49:53 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t proc_get_arch(orte_process_name_t *proc)
|
|
|
|
{
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
orte_nid_t *nid;
|
2008-04-30 23:49:53 +04:00
|
|
|
|
2009-01-07 17:58:38 +03:00
|
|
|
if (NULL == (nid = orte_util_lookup_nid(proc))) {
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
|
|
|
return 0;
|
2008-06-24 21:53:10 +04:00
|
|
|
}
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
|
|
|
|
"%s ess:singleton: proc %s has arch %0x",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(proc),
|
|
|
|
nid->arch));
|
|
|
|
|
|
|
|
return nid->arch;
|
2008-04-30 23:49:53 +04:00
|
|
|
}
|
|
|
|
|
2008-06-19 02:17:53 +04:00
|
|
|
static int update_arch(orte_process_name_t *proc, uint32_t arch)
|
|
|
|
{
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
orte_nid_t *nid;
|
2008-06-19 02:17:53 +04:00
|
|
|
|
2009-01-07 17:58:38 +03:00
|
|
|
if (NULL == (nid = orte_util_lookup_nid(proc))) {
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
|
|
|
return ORTE_ERR_NOT_FOUND;
|
|
|
|
}
|
2008-06-19 02:17:53 +04:00
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
|
|
|
|
"%s ess:singleton: updating proc %s to arch %0x",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(proc),
|
|
|
|
arch));
|
|
|
|
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
nid->arch = arch;
|
|
|
|
|
2008-06-19 02:17:53 +04:00
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2008-09-25 17:39:08 +04:00
|
|
|
static orte_local_rank_t proc_get_local_rank(orte_process_name_t *proc)
|
2008-04-30 23:49:53 +04:00
|
|
|
{
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
orte_pmap_t *pmap;
|
|
|
|
|
2009-01-07 17:58:38 +03:00
|
|
|
if (NULL == (pmap = orte_util_lookup_pmap(proc))) {
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
2009-01-21 03:19:37 +03:00
|
|
|
return ORTE_LOCAL_RANK_INVALID;
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
|
|
|
|
"%s ess:singleton: proc %s has local rank %d",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(proc),
|
|
|
|
(int)pmap->local_rank));
|
|
|
|
|
|
|
|
return pmap->local_rank;
|
2008-04-30 23:49:53 +04:00
|
|
|
}
|
|
|
|
|
2008-09-25 17:39:08 +04:00
|
|
|
static orte_node_rank_t proc_get_node_rank(orte_process_name_t *proc)
|
2008-04-30 23:49:53 +04:00
|
|
|
{
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
orte_pmap_t *pmap;
|
|
|
|
|
2009-01-07 17:58:38 +03:00
|
|
|
if (NULL == (pmap = orte_util_lookup_pmap(proc))) {
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
2009-01-21 03:19:37 +03:00
|
|
|
return ORTE_NODE_RANK_INVALID;
|
Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules
2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI
3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature
4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.
Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB
5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.
Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.
6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch
7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch
8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies
9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.
10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon
There is still more cleanup to be done for efficiency, but this at least works.
Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.
Fixes ticket #1256
This commit was SVN r18804.
2008-07-03 21:53:37 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output,
|
|
|
|
"%s ess:singleton: proc %s has node rank %d",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(proc),
|
|
|
|
(int)pmap->node_rank));
|
|
|
|
|
|
|
|
return pmap->node_rank;
|
2008-04-30 23:49:53 +04:00
|
|
|
}
|
2008-11-01 00:10:00 +03:00
|
|
|
|
2008-11-18 18:35:50 +03:00
|
|
|
static int update_pidmap(opal_byte_object_t *bo)
|
2008-11-01 00:10:00 +03:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* build the pmap */
|
2009-01-07 17:58:38 +03:00
|
|
|
if (ORTE_SUCCESS != (ret = orte_util_decode_pidmap(bo))) {
|
2008-11-01 00:10:00 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int update_nidmap(opal_byte_object_t *bo)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
/* decode the nidmap - the util will know what to do */
|
2009-01-07 17:58:38 +03:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_util_decode_nodemap(bo))) {
|
2008-11-01 00:10:00 +03:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|