2005-03-14 23:57:21 +03:00
/* -*- C -*-
*
2007-03-17 02:11:45 +03:00
* Copyright ( c ) 2004 - 2007 The Trustees of Indiana University and Indiana
2005-11-05 22:57:48 +03:00
* University Research and Technology
* Corporation . All rights reserved .
2006-08-24 20:18:42 +04:00
* Copyright ( c ) 2004 - 2006 The University of Tennessee and The University
2005-11-05 22:57:48 +03:00
* of Tennessee Research Foundation . All rights
* reserved .
2005-09-20 21:09:11 +04:00
* Copyright ( c ) 2004 - 2005 High Performance Computing Center Stuttgart ,
2005-03-14 23:57:21 +03:00
* University of Stuttgart . All rights reserved .
2005-03-24 15:43:37 +03:00
* Copyright ( c ) 2004 - 2005 The Regents of the University of California .
* All rights reserved .
2007-01-08 23:25:26 +03:00
* Copyright ( c ) 2006 - 2007 Cisco Systems , Inc . All rights reserved .
2007-02-07 20:46:19 +03:00
* Copyright ( c ) 2007 Sun Microsystems , Inc . All rights reserved .
2007-06-05 07:03:59 +04:00
* Copyright ( c ) 2007 Los Alamos National Security , LLC . All rights
* reserved .
2005-03-14 23:57:21 +03:00
* $ COPYRIGHT $
2005-09-20 21:09:11 +04:00
*
2005-03-14 23:57:21 +03:00
* Additional copyrights may follow
2005-09-20 21:09:11 +04:00
*
2005-03-14 23:57:21 +03:00
* $ HEADER $
*/
# include "orte_config.h"
2007-07-19 23:00:06 +04:00
# include "orte/orte_constants.h"
2005-03-14 23:57:21 +03:00
# include <stdio.h>
# ifdef HAVE_UNISTD_H
# include <unistd.h>
# endif
# ifdef HAVE_SYS_PARAM_H
# include <sys/param.h>
# endif
# include <errno.h>
# include <signal.h>
# include <ctype.h>
2005-12-18 01:05:10 +03:00
# ifdef HAVE_SYS_TYPES_H
2005-04-01 04:30:37 +04:00
# include <sys/types.h>
2005-12-18 01:05:10 +03:00
# endif /* HAVE_SYS_TYPES_H */
# ifdef HAVE_SYS_WAIT_H
2005-04-01 04:30:37 +04:00
# include <sys/wait.h>
2005-12-18 01:05:10 +03:00
# endif /* HAVE_SYS_WAIT_H */
2007-01-25 17:17:44 +03:00
# ifdef HAVE_SYS_TIME_H
# include <sys/time.h>
2007-04-01 20:16:54 +04:00
# endif /* HAVE_SYS_TIME_H */
2005-03-14 23:57:21 +03:00
2005-07-04 03:09:55 +04:00
# include "opal/event/event.h"
2007-04-21 04:15:05 +04:00
# include "opal/mca/installdirs/installdirs.h"
2005-09-19 21:20:01 +04:00
# include "opal/mca/base/base.h"
# include "opal/threads/condition.h"
2005-07-04 04:13:44 +04:00
# include "opal/util/argv.h"
2005-09-19 21:20:01 +04:00
# include "opal/util/basename.h"
2005-07-04 04:13:44 +04:00
# include "opal/util/cmd_line.h"
2005-09-19 21:20:01 +04:00
# include "opal/util/opal_environ.h"
2005-07-04 03:31:27 +04:00
# include "opal/util/output.h"
2005-07-04 06:38:44 +04:00
# include "opal/util/show_help.h"
2005-09-19 21:20:01 +04:00
# include "opal/util/trace.h"
2007-03-17 02:11:45 +03:00
# if OPAL_ENABLE_FT == 1
# include "opal/runtime/opal_cr.h"
# endif
2006-06-09 21:21:23 +04:00
# include "opal/version.h"
2007-04-21 04:15:05 +04:00
# include "opal/runtime/opal.h"
2007-07-19 23:00:06 +04:00
# include "opal/util/os_path.h"
2005-09-19 21:20:01 +04:00
# include "orte/class/orte_pointer_array.h"
# include "orte/util/proc_info.h"
# include "orte/util/sys_info.h"
# include "orte/util/universe_setup_file_io.h"
2006-09-14 19:27:17 +04:00
# include "orte/util/pre_condition_transports.h"
2005-03-14 23:57:21 +03:00
2005-09-19 21:20:01 +04:00
# include "orte/mca/ns/ns.h"
# include "orte/mca/gpr/gpr.h"
2007-04-24 23:19:14 +04:00
# include "orte/mca/odls/odls_types.h"
2006-09-15 01:29:51 +04:00
# include "orte/mca/pls/pls.h"
2006-11-01 01:16:51 +03:00
# include "orte/mca/rmaps/rmaps_types.h"
2005-09-19 21:20:01 +04:00
# include "orte/mca/rmgr/rmgr.h"
2007-07-12 23:53:18 +04:00
# include "orte/mca/rml/rml.h"
2005-09-19 21:20:01 +04:00
# include "orte/mca/schema/schema.h"
2006-09-15 01:29:51 +04:00
# include "orte/mca/smr/smr.h"
2005-09-19 21:20:01 +04:00
# include "orte/mca/errmgr/errmgr.h"
2005-03-14 23:57:21 +03:00
2005-09-19 21:20:01 +04:00
# include "orte/runtime/runtime.h"
2007-01-25 17:17:44 +03:00
# include "orte/runtime/params.h"
2005-09-19 21:20:01 +04:00
# include "orte/runtime/orte_wait.h"
2005-03-14 23:57:21 +03:00
2007-07-12 23:53:18 +04:00
/* ensure I can behave like a daemon */
# include "orte/orted/orted.h"
2005-08-31 20:15:59 +04:00
# include "orterun.h"
2005-08-31 04:47:52 +04:00
# include "totalview.h"
2005-03-14 23:57:21 +03:00
/*
* Globals
*/
2005-07-04 03:09:55 +04:00
static struct opal_event term_handler ;
static struct opal_event int_handler ;
2006-07-11 09:24:08 +04:00
# ifndef __WINDOWS__
2006-06-08 22:27:17 +04:00
static struct opal_event sigusr1_handler ;
static struct opal_event sigusr2_handler ;
2006-07-11 09:24:08 +04:00
# endif /* __WINDOWS__ */
2006-10-02 04:46:31 +04:00
static orte_jobid_t jobid = ORTE_JOBID_INVALID ;
2005-07-03 08:02:01 +04:00
static orte_pointer_array_t * apps_pa ;
2005-03-14 23:57:21 +03:00
static bool wait_for_job_completion = true ;
2005-04-12 20:01:30 +04:00
static char * orterun_basename = NULL ;
2005-04-16 01:52:58 +04:00
static int max_display_aborted = 1 ;
static int num_aborted = 0 ;
static int num_killed = 0 ;
2005-08-08 20:42:28 +04:00
static char * * global_mca_env = NULL ;
2006-07-11 01:25:33 +04:00
static bool have_zero_np = false ;
2006-08-15 23:54:10 +04:00
static orte_std_cntr_t total_num_apps = 0 ;
2006-09-15 06:52:08 +04:00
static bool want_prefix_by_default = ( bool ) ORTE_WANT_ORTERUN_PREFIX_BY_DEFAULT ;
2005-03-14 23:57:21 +03:00
/*
2007-07-10 16:53:48 +04:00
* Globals
2005-03-14 23:57:21 +03:00
*/
2007-07-10 16:53:48 +04:00
struct globals_t orterun_globals ;
bool globals_init = false ;
2005-03-14 23:57:21 +03:00
2005-07-04 04:13:44 +04:00
opal_cmd_line_init_t cmd_line_init [ ] = {
2005-03-14 23:57:21 +03:00
/* Various "obvious" options */
2005-09-05 00:54:19 +04:00
{ NULL , NULL , NULL , ' h ' , NULL , " help " , 0 ,
2005-07-04 04:13:44 +04:00
& orterun_globals . help , OPAL_CMD_LINE_TYPE_BOOL ,
2005-03-14 23:57:21 +03:00
" This help message " } ,
2006-06-09 21:21:23 +04:00
{ NULL , NULL , NULL , ' V ' , NULL , " version " , 0 ,
& orterun_globals . version , OPAL_CMD_LINE_TYPE_BOOL ,
" Print version and exit " } ,
2005-03-14 23:57:21 +03:00
{ NULL , NULL , NULL , ' v ' , NULL , " verbose " , 0 ,
2005-07-04 04:13:44 +04:00
& orterun_globals . verbose , OPAL_CMD_LINE_TYPE_BOOL ,
2005-03-14 23:57:21 +03:00
" Be verbose " } ,
2006-06-26 22:21:45 +04:00
{ NULL , NULL , NULL , ' q ' , NULL , " quiet " , 0 ,
& orterun_globals . quiet , OPAL_CMD_LINE_TYPE_BOOL ,
" Suppress helpful messages " } ,
2005-03-14 23:57:21 +03:00
2007-03-17 02:11:45 +03:00
/* Preload the binary on the remote machine */
{ NULL , NULL , NULL , ' s ' , NULL , " preload-binary " , 0 ,
& orterun_globals . preload_binary , OPAL_CMD_LINE_TYPE_BOOL ,
" Preload the binary on the remote machine before starting the remote process. " } ,
/* Preload files on the remote machine */
{ NULL , NULL , NULL , ' \0 ' , NULL , " preload-files " , 1 ,
& orterun_globals . preload_files , OPAL_CMD_LINE_TYPE_STRING ,
" Preload the comma separated list of files to the remote machines current working directory before starting the remote process. " } ,
/* Where to Preload files on the remote machine */
{ NULL , NULL , NULL , ' \0 ' , NULL , " preload-files-dest-dir " , 1 ,
& orterun_globals . preload_files_dest_dir , OPAL_CMD_LINE_TYPE_STRING ,
" The destination directory to use in conjunction with --preload-files. By default the absolute and relative paths provided by --preload-files are used. " } ,
2005-03-14 23:57:21 +03:00
/* Use an appfile */
{ NULL , NULL , NULL , ' \0 ' , NULL , " app " , 1 ,
2005-07-04 04:13:44 +04:00
& orterun_globals . appfile , OPAL_CMD_LINE_TYPE_STRING ,
2005-03-14 23:57:21 +03:00
" Provide an appfile; ignore all other command line options " } ,
/* Number of processes; -c, -n, --n, -np, and --np are all
synonyms */
{ NULL , NULL , NULL , ' c ' , " np " , " np " , 1 ,
2006-09-25 23:41:54 +04:00
& orterun_globals . num_procs , OPAL_CMD_LINE_TYPE_INT ,
2005-03-14 23:57:21 +03:00
" Number of processes to run " } ,
{ NULL , NULL , NULL , ' \0 ' , " n " , " n " , 1 ,
2006-09-25 23:41:54 +04:00
& orterun_globals . num_procs , OPAL_CMD_LINE_TYPE_INT ,
2005-03-14 23:57:21 +03:00
" Number of processes to run " } ,
2006-07-11 01:25:33 +04:00
2005-03-14 23:57:21 +03:00
/* Set a hostfile */
2005-03-19 02:40:08 +03:00
{ " rds " , " hostfile " , " path " , ' \0 ' , " hostfile " , " hostfile " , 1 ,
2005-07-04 04:13:44 +04:00
NULL , OPAL_CMD_LINE_TYPE_STRING ,
2005-03-19 02:40:08 +03:00
" Provide a hostfile " } ,
{ " rds " , " hostfile " , " path " , ' \0 ' , " machinefile " , " machinefile " , 1 ,
2005-07-04 04:13:44 +04:00
NULL , OPAL_CMD_LINE_TYPE_STRING ,
2005-03-14 23:57:21 +03:00
" Provide a hostfile " } ,
/* Don't wait for the process to finish before exiting */
2007-01-16 19:10:31 +03:00
#if 0
2005-03-14 23:57:21 +03:00
{ NULL , NULL , NULL , ' \0 ' , " nw " , " nw " , 0 ,
2005-07-04 04:13:44 +04:00
& orterun_globals . no_wait_for_job_completion , OPAL_CMD_LINE_TYPE_BOOL ,
2005-03-14 23:57:21 +03:00
" Launch the processes and do not wait for their completion (i.e., let orterun complete as soon a successful launch occurs) " } ,
2007-01-16 19:10:31 +03:00
# endif
2005-04-16 01:52:58 +04:00
/* Set the max number of aborted processes to show */
{ NULL , NULL , NULL , ' \0 ' , " aborted " , " aborted " , 1 ,
2005-07-04 04:13:44 +04:00
& max_display_aborted , OPAL_CMD_LINE_TYPE_INT ,
2005-04-16 01:52:58 +04:00
" The maximum number of aborted processes to display " } ,
2005-03-14 23:57:21 +03:00
/* Export environment variables; potentially used multiple times,
so it does not make sense to set into a variable */
{ NULL , NULL , NULL , ' x ' , NULL , NULL , 1 ,
2005-07-04 04:13:44 +04:00
NULL , OPAL_CMD_LINE_TYPE_NULL ,
2005-03-14 23:57:21 +03:00
" Export an environment variable, optionally specifying a value (e.g., \" -x foo \" exports the environment variable foo and takes its value from the current environment; \" -x foo=bar \" exports the environment variable name foo and sets its value to \" bar \" in the started processes) " } ,
/* Specific mapping (C, cX, N, nX) */
2005-10-01 19:51:20 +04:00
#if 0
/* JJH --map is not currently implemented so don't advertise it until it is */
2005-03-14 23:57:21 +03:00
{ NULL , NULL , NULL , ' \0 ' , NULL , " map " , 1 ,
2005-07-04 04:13:44 +04:00
NULL , OPAL_CMD_LINE_TYPE_STRING ,
2005-03-14 23:57:21 +03:00
" Mapping of processes to nodes / CPUs " } ,
2005-10-01 19:51:20 +04:00
# endif
(copied from a mail that has a lengthy description of this commit)
I spoke with Tim about this the other day -- he gave me the green
light to go ahead with this, but it turned into a bigger job than I
thought it would be. I revamped how the default RAS scheduling and
round_robin RMAPS mapping occurs. The previous algorithms were pretty
brain dead, and ignored the "slots" and "max_slots" tokens in
hostfiles. I considered this a big enough problem to fix it for the
beta (because there is currently no way to control where processes are
launched on SMPs).
There's still some more bells and whistles that I'd like to implement,
but there's no hurry, and they can go on the trunk at any time. My
patches below are for what I considered "essential", and do the
following:
- honor the "slots" and "max-slots" tokens in the hostfile (and all
their synonyms), meaning that we allocate/map until we fill slots,
and if there are still more processes to allocate/map, we keep going
until we fill max-slots (i.e., only oversubscribe a node if we have
to).
- offer two different algorithms, currently supported by two new
options to orterun. Remember that there are two parts here -- slot
allocation and process mapping. Slot allocation controls how many
processes we'll be running on a node. After that decision has been
made, process mapping effectively controls where the ranks of
MPI_COMM_WORLD (MCW) are placed. Some of the examples given below
don't make sense unless you remember that there is a difference
between the two (which makes total sense, but you have to think
about it in terms of both things):
1. "-bynode": allocates/maps one process per node in a round-robin
fashion until all slots on the node are taken. If we still have more
processes after all slots are taken, then keep going until all
max-slots are taken. Examples:
- The hostfile:
eddie slots=2 max-slots=4
vogon slots=4 max-slots=8
- orterun -bynode -np 6 -hostfile hostfile a.out
eddie: MCW ranks 0, 2
vogon: MCW ranks 1, 3, 4, 5
- orterun -bynode -np 8 -hostfile hostfile a.out
eddie: MCW ranks 0, 2, 4
vogon: MCW ranks 1, 3, 5, 6, 7
-> the algorithm oversubscribes all nodes "equally" (until each
node's max_slots is hit, of course)
- orterun -bynode -np 12 -hostfile hostfile a.out
eddie: MCW ranks 0, 2, 4, 6
vogon: MCW ranks 1, 3, 5, 7, 8, 9, 10, 11
2. "-byslot" (this is the default if you don't specify -bynode):
greedily takes all available slots on a node for a job before moving
on to the next node. If we still have processes to allocate/schedule,
then oversubscribe all nodes equally (i.e., go round robin on all
nodes until each node's max_slots is hit). Examples:
- The hostfile
eddie slots=2 max-slots=4
vogon slots=4 max-slots=8
- orterun -np 6 -hostfile hostfile a.out
eddie: MCW ranks 0, 1
vogon: MCW ranks 2, 3, 4, 5
- orterun -np 8 -hostfile hostfile a.out
eddie: MCW ranks 0, 1, 2
vogon: MCW ranks 3, 4, 5, 6, 7
-> the algorithm oversubscribes all nodes "equally" (until max_slots
is hit)
- orterun -np 12 -hostfile hostfile a.out
eddie: MCW ranks 0, 1, 2, 3
vogon: MCW ranks 4, 5, 6, 7, 8, 9, 10, 11
The above examples are fairly contrived, and it's not clear from them
that you can get different allocation answers in all cases (the
mapping differences are obvious). Consider the following allocation
example:
- The hostfile
eddie count=4
vogon count=4
earth count=4
deep-thought count=4
- orterun -np 8 -hostfile hostfile a.out
eddie: 4 slots will be allocated
vogon: 4 slots will be allocated
earth: no slots allocated
deep-thought: no slots allocated
- orterun -bynode -np 8 -hostfile hostfile a.out
eddie: 2 slots will be allocated
vogon: 2 slots will be allocated
earth: 2 slots will be allocated
deep-thought: 2 slots will be allocated
This commit was SVN r5894.
2005-05-31 20:36:53 +04:00
{ NULL , NULL , NULL , ' \0 ' , " bynode " , " bynode " , 0 ,
2005-07-04 04:13:44 +04:00
& orterun_globals . by_node , OPAL_CMD_LINE_TYPE_BOOL ,
(copied from a mail that has a lengthy description of this commit)
I spoke with Tim about this the other day -- he gave me the green
light to go ahead with this, but it turned into a bigger job than I
thought it would be. I revamped how the default RAS scheduling and
round_robin RMAPS mapping occurs. The previous algorithms were pretty
brain dead, and ignored the "slots" and "max_slots" tokens in
hostfiles. I considered this a big enough problem to fix it for the
beta (because there is currently no way to control where processes are
launched on SMPs).
There's still some more bells and whistles that I'd like to implement,
but there's no hurry, and they can go on the trunk at any time. My
patches below are for what I considered "essential", and do the
following:
- honor the "slots" and "max-slots" tokens in the hostfile (and all
their synonyms), meaning that we allocate/map until we fill slots,
and if there are still more processes to allocate/map, we keep going
until we fill max-slots (i.e., only oversubscribe a node if we have
to).
- offer two different algorithms, currently supported by two new
options to orterun. Remember that there are two parts here -- slot
allocation and process mapping. Slot allocation controls how many
processes we'll be running on a node. After that decision has been
made, process mapping effectively controls where the ranks of
MPI_COMM_WORLD (MCW) are placed. Some of the examples given below
don't make sense unless you remember that there is a difference
between the two (which makes total sense, but you have to think
about it in terms of both things):
1. "-bynode": allocates/maps one process per node in a round-robin
fashion until all slots on the node are taken. If we still have more
processes after all slots are taken, then keep going until all
max-slots are taken. Examples:
- The hostfile:
eddie slots=2 max-slots=4
vogon slots=4 max-slots=8
- orterun -bynode -np 6 -hostfile hostfile a.out
eddie: MCW ranks 0, 2
vogon: MCW ranks 1, 3, 4, 5
- orterun -bynode -np 8 -hostfile hostfile a.out
eddie: MCW ranks 0, 2, 4
vogon: MCW ranks 1, 3, 5, 6, 7
-> the algorithm oversubscribes all nodes "equally" (until each
node's max_slots is hit, of course)
- orterun -bynode -np 12 -hostfile hostfile a.out
eddie: MCW ranks 0, 2, 4, 6
vogon: MCW ranks 1, 3, 5, 7, 8, 9, 10, 11
2. "-byslot" (this is the default if you don't specify -bynode):
greedily takes all available slots on a node for a job before moving
on to the next node. If we still have processes to allocate/schedule,
then oversubscribe all nodes equally (i.e., go round robin on all
nodes until each node's max_slots is hit). Examples:
- The hostfile
eddie slots=2 max-slots=4
vogon slots=4 max-slots=8
- orterun -np 6 -hostfile hostfile a.out
eddie: MCW ranks 0, 1
vogon: MCW ranks 2, 3, 4, 5
- orterun -np 8 -hostfile hostfile a.out
eddie: MCW ranks 0, 1, 2
vogon: MCW ranks 3, 4, 5, 6, 7
-> the algorithm oversubscribes all nodes "equally" (until max_slots
is hit)
- orterun -np 12 -hostfile hostfile a.out
eddie: MCW ranks 0, 1, 2, 3
vogon: MCW ranks 4, 5, 6, 7, 8, 9, 10, 11
The above examples are fairly contrived, and it's not clear from them
that you can get different allocation answers in all cases (the
mapping differences are obvious). Consider the following allocation
example:
- The hostfile
eddie count=4
vogon count=4
earth count=4
deep-thought count=4
- orterun -np 8 -hostfile hostfile a.out
eddie: 4 slots will be allocated
vogon: 4 slots will be allocated
earth: no slots allocated
deep-thought: no slots allocated
- orterun -bynode -np 8 -hostfile hostfile a.out
eddie: 2 slots will be allocated
vogon: 2 slots will be allocated
earth: 2 slots will be allocated
deep-thought: 2 slots will be allocated
This commit was SVN r5894.
2005-05-31 20:36:53 +04:00
" Whether to allocate/map processes round-robin by node " } ,
{ NULL , NULL , NULL , ' \0 ' , " byslot " , " byslot " , 0 ,
2005-07-04 04:13:44 +04:00
& orterun_globals . by_slot , OPAL_CMD_LINE_TYPE_BOOL ,
(copied from a mail that has a lengthy description of this commit)
I spoke with Tim about this the other day -- he gave me the green
light to go ahead with this, but it turned into a bigger job than I
thought it would be. I revamped how the default RAS scheduling and
round_robin RMAPS mapping occurs. The previous algorithms were pretty
brain dead, and ignored the "slots" and "max_slots" tokens in
hostfiles. I considered this a big enough problem to fix it for the
beta (because there is currently no way to control where processes are
launched on SMPs).
There's still some more bells and whistles that I'd like to implement,
but there's no hurry, and they can go on the trunk at any time. My
patches below are for what I considered "essential", and do the
following:
- honor the "slots" and "max-slots" tokens in the hostfile (and all
their synonyms), meaning that we allocate/map until we fill slots,
and if there are still more processes to allocate/map, we keep going
until we fill max-slots (i.e., only oversubscribe a node if we have
to).
- offer two different algorithms, currently supported by two new
options to orterun. Remember that there are two parts here -- slot
allocation and process mapping. Slot allocation controls how many
processes we'll be running on a node. After that decision has been
made, process mapping effectively controls where the ranks of
MPI_COMM_WORLD (MCW) are placed. Some of the examples given below
don't make sense unless you remember that there is a difference
between the two (which makes total sense, but you have to think
about it in terms of both things):
1. "-bynode": allocates/maps one process per node in a round-robin
fashion until all slots on the node are taken. If we still have more
processes after all slots are taken, then keep going until all
max-slots are taken. Examples:
- The hostfile:
eddie slots=2 max-slots=4
vogon slots=4 max-slots=8
- orterun -bynode -np 6 -hostfile hostfile a.out
eddie: MCW ranks 0, 2
vogon: MCW ranks 1, 3, 4, 5
- orterun -bynode -np 8 -hostfile hostfile a.out
eddie: MCW ranks 0, 2, 4
vogon: MCW ranks 1, 3, 5, 6, 7
-> the algorithm oversubscribes all nodes "equally" (until each
node's max_slots is hit, of course)
- orterun -bynode -np 12 -hostfile hostfile a.out
eddie: MCW ranks 0, 2, 4, 6
vogon: MCW ranks 1, 3, 5, 7, 8, 9, 10, 11
2. "-byslot" (this is the default if you don't specify -bynode):
greedily takes all available slots on a node for a job before moving
on to the next node. If we still have processes to allocate/schedule,
then oversubscribe all nodes equally (i.e., go round robin on all
nodes until each node's max_slots is hit). Examples:
- The hostfile
eddie slots=2 max-slots=4
vogon slots=4 max-slots=8
- orterun -np 6 -hostfile hostfile a.out
eddie: MCW ranks 0, 1
vogon: MCW ranks 2, 3, 4, 5
- orterun -np 8 -hostfile hostfile a.out
eddie: MCW ranks 0, 1, 2
vogon: MCW ranks 3, 4, 5, 6, 7
-> the algorithm oversubscribes all nodes "equally" (until max_slots
is hit)
- orterun -np 12 -hostfile hostfile a.out
eddie: MCW ranks 0, 1, 2, 3
vogon: MCW ranks 4, 5, 6, 7, 8, 9, 10, 11
The above examples are fairly contrived, and it's not clear from them
that you can get different allocation answers in all cases (the
mapping differences are obvious). Consider the following allocation
example:
- The hostfile
eddie count=4
vogon count=4
earth count=4
deep-thought count=4
- orterun -np 8 -hostfile hostfile a.out
eddie: 4 slots will be allocated
vogon: 4 slots will be allocated
earth: no slots allocated
deep-thought: no slots allocated
- orterun -bynode -np 8 -hostfile hostfile a.out
eddie: 2 slots will be allocated
vogon: 2 slots will be allocated
earth: 2 slots will be allocated
deep-thought: 2 slots will be allocated
This commit was SVN r5894.
2005-05-31 20:36:53 +04:00
" Whether to allocate/map processes round-robin by slot (the default) " } ,
2007-01-17 17:56:22 +03:00
{ " rmaps " , " base " , " pernode " , ' \0 ' , " pernode " , " pernode " , 0 ,
2006-12-13 07:51:38 +03:00
NULL , OPAL_CMD_LINE_TYPE_BOOL ,
2006-12-12 03:54:05 +03:00
" Launch one process per available node on the specified number of nodes [no -np => use all allocated nodes] " } ,
2007-01-17 17:56:22 +03:00
{ " rmaps " , " base " , " n_pernode " , ' \0 ' , " npernode " , " npernode " , 1 ,
2006-12-13 07:51:38 +03:00
NULL , OPAL_CMD_LINE_TYPE_INT ,
2006-12-12 03:54:05 +03:00
" Launch n processes per node on all allocated nodes " } ,
2007-01-17 17:56:22 +03:00
{ " rmaps " , " base " , " no_oversubscribe " , ' \0 ' , " nooversubscribe " , " nooversubscribe " , 0 ,
2006-12-13 07:51:38 +03:00
NULL , OPAL_CMD_LINE_TYPE_BOOL ,
2006-07-11 01:25:33 +04:00
" Nodes are not to be oversubscribed, even if the system supports such operation " } ,
2006-12-13 16:49:15 +03:00
{ " rmaps " , " base " , " display_map " , ' \0 ' , " display-map " , " display-map " , 0 ,
2006-12-03 16:59:23 +03:00
NULL , OPAL_CMD_LINE_TYPE_BOOL ,
" Display the process map just before launch " } ,
2006-11-01 01:16:51 +03:00
2005-03-14 23:57:21 +03:00
/* mpiexec-like arguments */
{ NULL , NULL , NULL , ' \0 ' , " wdir " , " wdir " , 1 ,
2005-07-04 04:13:44 +04:00
& orterun_globals . wdir , OPAL_CMD_LINE_TYPE_STRING ,
2005-03-14 23:57:21 +03:00
" Set the working directory of the started processes " } ,
2007-05-08 23:09:32 +04:00
{ NULL , NULL , NULL , ' \0 ' , " wd " , " wd " , 1 ,
& orterun_globals . wdir , OPAL_CMD_LINE_TYPE_STRING ,
" Synonym for --wdir " } ,
2005-03-14 23:57:21 +03:00
{ NULL , NULL , NULL , ' \0 ' , " path " , " path " , 1 ,
2005-07-04 04:13:44 +04:00
& orterun_globals . path , OPAL_CMD_LINE_TYPE_STRING ,
2005-03-14 23:57:21 +03:00
" PATH to be used to look for executables to start processes " } ,
/* These arguments can be specified multiple times */
2005-09-20 12:56:02 +04:00
#if 0
/* JMS: Removed because it's not really implemented */
2005-03-14 23:57:21 +03:00
{ NULL , NULL , NULL , ' \0 ' , " arch " , " arch " , 1 ,
2005-07-04 04:13:44 +04:00
NULL , OPAL_CMD_LINE_TYPE_STRING ,
2005-03-14 23:57:21 +03:00
" Architecture to start processes on " } ,
2005-09-20 12:56:02 +04:00
# endif
2005-03-14 23:57:21 +03:00
{ NULL , NULL , NULL , ' H ' , " host " , " host " , 1 ,
2005-07-04 04:13:44 +04:00
NULL , OPAL_CMD_LINE_TYPE_STRING ,
2005-03-14 23:57:21 +03:00
" List of hosts to invoke processes on " } ,
2005-11-20 19:06:53 +03:00
2006-07-05 00:12:35 +04:00
/* OSC mpiexec-like arguments */
2007-03-24 19:16:16 +03:00
{ " rmaps " , " base " , " no_schedule_local " , ' \0 ' , " nolocal " , " nolocal " , 0 ,
2006-12-13 07:51:38 +03:00
NULL , OPAL_CMD_LINE_TYPE_BOOL ,
2006-07-05 00:12:35 +04:00
" Do not run any MPI applications on the local node " } ,
2005-11-20 19:06:53 +03:00
/* User-level debugger arguments */
{ NULL , NULL , NULL , ' \0 ' , " tv " , " tv " , 0 ,
& orterun_globals . debugger , OPAL_CMD_LINE_TYPE_BOOL ,
" Deprecated backwards compatibility flag; synonym for \" --debug \" " } ,
{ NULL , NULL , NULL , ' \0 ' , " debug " , " debug " , 0 ,
& orterun_globals . debugger , OPAL_CMD_LINE_TYPE_BOOL ,
" Invoke the user-level debugger indicated by the orte_base_user_debugger MCA parameter " } ,
{ " orte " , " base " , " user_debugger " , ' \0 ' , " debugger " , " debugger " , 1 ,
NULL , OPAL_CMD_LINE_TYPE_STRING ,
" Sequence of debuggers to search for when \" --debug \" is used " } ,
2005-05-13 01:44:23 +04:00
/* OpenRTE arguments */
2005-11-20 19:06:53 +03:00
{ " orte " , " debug " , NULL , ' d ' , NULL , " debug-devel " , 0 ,
2005-07-04 04:13:44 +04:00
NULL , OPAL_CMD_LINE_TYPE_BOOL ,
2005-05-13 01:44:23 +04:00
" Enable debugging of OpenRTE " } ,
2006-10-11 19:18:57 +04:00
2005-05-13 01:44:23 +04:00
{ " orte " , " debug " , " daemons " , ' \0 ' , NULL , " debug-daemons " , 0 ,
2005-07-04 04:13:44 +04:00
NULL , OPAL_CMD_LINE_TYPE_INT ,
2005-05-13 01:44:23 +04:00
" Enable debugging of any OpenRTE daemons used by this application " } ,
2006-10-11 19:18:57 +04:00
2005-05-13 01:44:23 +04:00
{ " orte " , " debug " , " daemons_file " , ' \0 ' , NULL , " debug-daemons-file " , 0 ,
2005-07-04 04:13:44 +04:00
NULL , OPAL_CMD_LINE_TYPE_BOOL ,
2005-05-13 01:44:23 +04:00
" Enable debugging of any OpenRTE daemons used by this application, storing output in files " } ,
2006-10-11 19:18:57 +04:00
2005-05-24 19:02:50 +04:00
{ " orte " , " no_daemonize " , NULL , ' \0 ' , NULL , " no-daemonize " , 0 ,
2005-07-04 04:13:44 +04:00
NULL , OPAL_CMD_LINE_TYPE_BOOL ,
2005-05-24 19:02:50 +04:00
" Do not detach OpenRTE daemons used by this application " } ,
2006-10-11 19:18:57 +04:00
2005-05-13 01:44:23 +04:00
{ " universe " , NULL , NULL , ' \0 ' , NULL , " universe " , 1 ,
2005-07-04 04:13:44 +04:00
NULL , OPAL_CMD_LINE_TYPE_STRING ,
2005-05-13 01:44:23 +04:00
" Set the universe name as username@hostname:universe_name for this application " } ,
2006-10-11 19:18:57 +04:00
2005-05-13 01:44:23 +04:00
{ NULL , NULL , NULL , ' \0 ' , NULL , " tmpdir " , 1 ,
2005-07-04 04:13:44 +04:00
& orte_process_info . tmpdir_base , OPAL_CMD_LINE_TYPE_STRING ,
2005-05-13 01:44:23 +04:00
" Set the root for the session directory tree for orterun ONLY " } ,
2006-12-13 07:51:38 +03:00
{ NULL , NULL , NULL , ' \0 ' , NULL , " do-not-launch " , 0 ,
& orterun_globals . do_not_launch , OPAL_CMD_LINE_TYPE_BOOL ,
" Perform all necessary operations to prepare to launch the application, but do not actually launch it " } ,
2006-02-28 14:52:12 +03:00
{ NULL , NULL , NULL , ' \0 ' , NULL , " prefix " , 1 ,
NULL , OPAL_CMD_LINE_TYPE_STRING ,
" Prefix where Open MPI is installed on remote nodes " } ,
2006-10-06 17:02:56 +04:00
{ NULL , NULL , NULL , ' \0 ' , NULL , " noprefix " , 0 ,
2006-09-15 06:52:08 +04:00
NULL , OPAL_CMD_LINE_TYPE_STRING ,
" Disable automatic --prefix behavior " } ,
2006-03-23 19:53:11 +03:00
2005-03-14 23:57:21 +03:00
/* End of list */
{ NULL , NULL , NULL , ' \0 ' , NULL , NULL , 0 ,
2005-07-04 04:13:44 +04:00
NULL , OPAL_CMD_LINE_TYPE_NULL , NULL }
2005-03-14 23:57:21 +03:00
} ;
/*
* Local functions
*/
static void exit_callback ( int fd , short event , void * arg ) ;
2006-06-26 19:12:52 +04:00
static void abort_signal_callback ( int fd , short event , void * arg ) ;
static void signal_forward_callback ( int fd , short event , void * arg ) ;
2005-03-14 23:57:21 +03:00
static int create_app ( int argc , char * argv [ ] , orte_app_context_t * * app ,
2005-08-08 20:42:28 +04:00
bool * made_app , char * * * app_env ) ;
2005-03-14 23:57:21 +03:00
static int init_globals ( void ) ;
2007-06-27 05:03:31 +04:00
static int parse_globals ( int argc , char * argv [ ] , opal_cmd_line_t * cmd_line ) ;
2005-03-14 23:57:21 +03:00
static int parse_locals ( int argc , char * argv [ ] ) ;
While waiting for fortran compiles...
Fixes for orterun in handling different MCA params for different
processes (reviewed by Brian):
- By design, if you run the following:
mpirun --mca foo aaa --mca foo bbb a.out
a.out will get a single MCA param for foo with value "aaa,bbb".
- However, if you specify multiple apps with different values for the
same MCA param, you should expect to get the different values for
each app. For example:
mpirun --mca foo aaa a.out : --mca foo bbb b.out
Should yield a.out with a "foo" param with value "aaa" and b.out
with a "foo" param with a value "bbb".
- This did not work -- both a.out and b.out would get a "foo" with
"aaa,bbb".
- This commit fixes this behavior -- now a.out will get aaa and b.out
will get bbb.
- Additionally, if you mix --mca and and app file, you can have
"global" params and per-line-in-the-appfile params. For example:
mpirun --mca foo zzzz --app appfile
where "appfile" contains:
-np 1 --mca bar aaa a.out
-np 1 --mca bar bbb b.out
In this case, a.out will get foo=zzzz and bar=aaa, and b.out will
get foo=zzzz and bar=bbb.
Spiffy.
Ok, fortran build is done... back to Fortran... sigh...
This commit was SVN r5710.
2005-05-13 18:36:36 +04:00
static int parse_appfile ( char * filename , char * * * env ) ;
2005-03-14 23:57:21 +03:00
static void job_state_callback ( orte_jobid_t jobid , orte_proc_state_t state ) ;
2007-04-24 23:19:14 +04:00
static void dump_aborted_procs ( orte_jobid_t jobid , orte_app_context_t * * apps , orte_job_state_t state ) ;
2005-03-14 23:57:21 +03:00
2005-08-31 20:15:59 +04:00
int orterun ( int argc , char * argv [ ] )
2005-03-14 23:57:21 +03:00
{
orte_app_context_t * * apps ;
2006-10-09 05:04:00 +04:00
int rc , ret , i , num_apps , array_size ;
2006-02-08 20:40:11 +03:00
orte_proc_state_t cb_states ;
2006-09-15 01:29:51 +04:00
orte_job_state_t exit_state ;
2006-10-17 20:06:17 +04:00
opal_list_t attributes ;
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
opal_list_item_t * item ;
2006-12-13 07:51:38 +03:00
uint8_t flow ;
2007-06-27 05:03:31 +04:00
opal_cmd_line_t cmd_line ;
2005-03-14 23:57:21 +03:00
2007-06-27 05:03:31 +04:00
/* find our basename (the name of the executable) so that we can
use it in pretty - print error messages */
orterun_basename = opal_basename ( argv [ 0 ] ) ;
2007-04-21 04:15:05 +04:00
2007-06-27 05:03:31 +04:00
/* Setup and parse the command line */
init_globals ( ) ;
opal_cmd_line_create ( & cmd_line , cmd_line_init ) ;
mca_base_cmd_line_setup ( & cmd_line ) ;
if ( ORTE_SUCCESS ! = ( ret = opal_cmd_line_parse ( & cmd_line , true ,
argc , argv ) ) ) {
return ret ;
}
2007-04-21 04:15:05 +04:00
2007-06-27 05:03:31 +04:00
/* Need to initialize OPAL so that install_dirs are filled in */
2007-07-13 23:08:05 +04:00
/*
* NOTE : ( JJH )
* We need to allow ' mca_base_cmd_line_process_args ( ) ' to process command
* line arguments * before * calling opal_init_util ( ) since the command
* line could contain MCA parameters that affect the way opal_init_util ( )
* functions . AMCA parameters are one such option normally received on the
* command line that affect the way opal_init_util ( ) behaves .
* It is " safe " to call mca_base_cmd_line_process_args ( ) before
* opal_init_util ( ) since mca_base_cmd_line_process_args ( ) does * not *
* depend upon opal_init_util ( ) functionality .
*/
2007-04-21 04:15:05 +04:00
opal_init_util ( ) ;
2007-07-13 19:47:57 +04:00
/* save the environment for launch purposes */
orte_launch_environ = opal_argv_copy ( environ ) ;
2007-07-23 22:36:33 +04:00
/* setup the daemon communication subsystem flags */
OBJ_CONSTRUCT ( & orted_comm_mutex , opal_mutex_t ) ;
OBJ_CONSTRUCT ( & orted_comm_cond , opal_condition_t ) ;
orte_orterun = true ;
2007-04-21 04:15:05 +04:00
/* Setup MCA params */
2005-11-20 19:06:53 +03:00
2005-03-14 23:57:21 +03:00
/* Check for some "global" command line params */
2007-06-27 05:03:31 +04:00
parse_globals ( argc , argv , & cmd_line ) ;
OBJ_DESTRUCT ( & cmd_line ) ;
2005-03-14 23:57:21 +03:00
/* If we're still here, parse each app */
parse_locals ( argc , argv ) ;
/* Convert the list of apps to an array of orte_app_context_t
pointers */
2005-07-08 22:48:25 +04:00
array_size = orte_pointer_array_get_size ( apps_pa ) ;
2006-08-23 06:35:00 +04:00
apps = ( orte_app_context_t * * ) malloc ( sizeof ( orte_app_context_t * ) * array_size ) ;
2005-03-14 23:57:21 +03:00
if ( NULL = = apps ) {
2006-02-16 23:40:23 +03:00
opal_show_help ( " help-orterun.txt " , " orterun:call-failed " ,
true , orterun_basename , " system " , " malloc returned NULL " , errno ) ;
2005-03-14 23:57:21 +03:00
exit ( 1 ) ;
}
2005-07-08 22:48:25 +04:00
num_apps = 0 ;
2006-10-02 19:03:43 +04:00
for ( i = 0 ; i < array_size ; + + i ) {
2005-09-05 00:54:19 +04:00
apps [ num_apps ] = ( orte_app_context_t * )
2005-07-03 08:02:01 +04:00
orte_pointer_array_get_item ( apps_pa , i ) ;
2005-08-08 20:42:28 +04:00
if ( NULL ! = apps [ num_apps ] ) {
2005-07-08 22:48:25 +04:00
num_apps + + ;
}
}
if ( 0 = = num_apps ) {
/* This should never happen -- this case should be caught in
create_app ( ) , but let ' s just double check . . . */
2005-09-05 00:54:19 +04:00
opal_show_help ( " help-orterun.txt " , " orterun:nothing-to-do " ,
2005-07-08 22:48:25 +04:00
true , orterun_basename ) ;
exit ( 1 ) ;
2005-04-16 01:52:58 +04:00
}
2005-03-14 23:57:21 +03:00
2007-03-17 02:11:45 +03:00
# if OPAL_ENABLE_FT == 1
/* Disable OPAL CR notifications for this tool */
opal_cr_set_enabled ( false ) ;
# endif
2005-03-14 23:57:21 +03:00
/* Intialize our Open RTE environment */
2005-08-27 00:13:35 +04:00
/* Set the flag telling orte_init that I am NOT a
2005-06-24 20:59:37 +04:00
* singleton , but am " infrastructure " - prevents setting
* up incorrect infrastructure that only a singleton would
* require
*/
These changes were mostly captured in a prior RFC (except for #2 below) and are aimed specifically at improving startup performance and setting up the remaining modifications described in that RFC.
The commit has been tested for C/R and Cray operations, and on Odin (SLURM, rsh) and RoadRunner (TM). I tried to update all environments, but obviously could not test them. I know that Windows needs some work, and have highlighted what is know to be needed in the odls process component.
This represents a lot of work by Brian, Tim P, Josh, and myself, with much advice from Jeff and others. For posterity, I have appended a copy of the email describing the work that was done:
As we have repeatedly noted, the modex operation in MPI_Init is the single greatest consumer of time during startup. To-date, we have executed that operation as an ORTE stage gate that held the process until a startup message containing all required modex (and OOB contact info - see #3 below) info could be sent to it. Each process would send its data to the HNP's registry, which assembled and sent the message when all processes had reported in.
In addition, ORTE had taken responsibility for monitoring process status as it progressed through a series of "stage gates". The process reported its status at each gate, and ORTE would then send a "release" message once all procs had reported in.
The incoming changes revamp these procedures in three ways:
1. eliminating the ORTE stage gate system and cleanly delineating responsibility between the OMPI and ORTE layers for MPI init/finalize. The modex stage gate (STG1) has been replaced by a collective operation in the modex itself that performs an allgather on the required modex info. The allgather is implemented using the orte_grpcomm framework since the BTL's are not active at that point. At the moment, the grpcomm framework only has a "basic" component analogous to OMPI's "basic" coll framework - I would recommend that the MPI team create additional, more advanced components to improve performance of this step.
The other stage gates have been replaced by orte_grpcomm barrier functions. We tried to use MPI barriers instead (since the BTL's are active at that point), but - as we discussed on the telecon - these are not currently true barriers so the job would hang when we fell through while messages were still in process. Note that the grpcomm barrier doesn't actually resolve that problem, but Brian has pointed out that we are unlikely to ever see it violated. Again, you might want to spend a little time on an advanced barrier algorithm as the one in "basic" is very simplistic.
Summarizing this change: ORTE no longer tracks process state nor has direct responsibility for synchronizing jobs. This is now done via collective operations within the MPI layer, albeit using ORTE collective communication services. I -strongly- urge the MPI team to implement advanced collective algorithms to improve the performance of this critical procedure.
2. reducing the volume of data exchanged during modex. Data in the modex consisted of the process name, the name of the node where that process is located (expressed as a string), plus a string representation of all contact info. The nodename was required in order for the modex to determine if the process was local or not - in addition, some people like to have it to print pretty error messages when a connection failed.
The size of this data has been reduced in three ways:
(a) reducing the size of the process name itself. The process name consisted of two 32-bit fields for the jobid and vpid. This is far larger than any current system, or system likely to exist in the near future, can support. Accordingly, the default size of these fields has been reduced to 16-bits, which means you can have 32k procs in each of 32k jobs. Since the daemons must have a vpid, and we require one daemon/node, this also restricts the default configuration to 32k nodes.
To support any future "mega-clusters", a configuration option --enable-jumbo-apps has been added. This option increases the jobid and vpid field sizes to 32-bits. Someday, if necessary, someone can add yet another option to increase them to 64-bits, I suppose.
(b) replacing the string nodename with an integer nodeid. Since we have one daemon/node, the nodeid corresponds to the local daemon's vpid. This replaces an often lengthy string with only 2 (or at most 4) bytes, a substantial reduction.
(c) when the mca param requesting that nodenames be sent to support pretty error messages, a second mca param is now used to request FQDN - otherwise, the domain name is stripped (by default) from the message to save space. If someone wants to combine those into a single param somehow (perhaps with an argument?), they are welcome to do so - I didn't want to alter what people are already using.
While these may seem like small savings, they actually amount to a significant impact when aggregated across the entire modex operation. Since every proc must receive the modex data regardless of the collective used to send it, just reducing the size of the process name removes nearly 400MBytes of communication from a 32k proc job (admittedly, much of this comm may occur in parallel). So it does add up pretty quickly.
3. routing RML messages to reduce connections. The default messaging system remains point-to-point - i.e., each proc opens a socket to every proc it communicates with and sends its messages directly. A new option uses the orteds as routers - i.e., each proc only opens a single socket to its local orted. All messages are sent from the proc to the orted, which forwards the message to the orted on the node where the intended recipient proc is located - that orted then forwards the message to its local proc (the recipient). This greatly reduces the connection storm we have encountered during startup.
It also has the benefit of removing the sharing of every proc's OOB contact with every other proc. The orted routing tables are populated during launch since every orted gets a map of where every proc is being placed. Each proc, therefore, only needs to know the contact info for its local daemon, which is passed in via the environment when the proc is fork/exec'd by the daemon. This alone removes ~50 bytes/process of communication that was in the current STG1 startup message - so for our 32k proc job, this saves us roughly 32k*50 = 1.6MBytes sent to 32k procs = 51GBytes of messaging.
Note that you can use the new routing method by specifying -mca routed tree - if you so desire. This mode will become the default at some point in the future.
There are a few minor additional changes in the commit that I'll just note in passing:
* propagation of command line mca params to the orteds - fixes ticket #1073. See note there for details.
* requiring of "finalize" prior to "exit" for MPI procs - fixes ticket #1144. See note there for details.
* cleanup of some stale header files
This commit was SVN r16364.
2007-10-05 23:48:23 +04:00
if ( ORTE_SUCCESS ! = ( rc = orte_init ( ORTE_INFRASTRUCTURE ) ) ) {
Commit the orted-failed-to-start code. This correctly causes the system to detect the failure of an orted to start and allows the system to terminate all procs/orteds that *did* start.
The primary change that underlies all this is in the OOB. Specifically, the problem in the code until now has been that the OOB attempts to resolve an address when we call the "send" to an unknown recipient. The OOB would then wait forever if that recipient never actually started (and hence, never reported back its OOB contact info). In the case of an orted that failed to start, we would correctly detect that the orted hadn't started, but then we would attempt to order all orteds (including the one that failed to start) to die. This would cause the OOB to "hang" the system.
Unfortunately, revising how the OOB resolves addresses introduced a number of additional problems. Specifically, and most troublesome, was the fact that comm_spawn involved the immediate transmission of the rendezvous point from parent-to-child after the child was spawned. The current code used the OOB address resolution as a "barrier" - basically, the parent would attempt to send the info to the child, and then "hold" there until the child's contact info had arrived (meaning the child had started) and the send could be completed.
Note that this also caused comm_spawn to "hang" the entire system if the child never started... The app-failed-to-start helped improve that behavior - this code provides additional relief.
With this change, the OOB will return an ADDRESSEE_UNKNOWN error if you attempt to send to a recipient whose contact info isn't already in the OOB's hash tables. To resolve comm_spawn issues, we also now force the cross-sharing of connection info between parent and child jobs during spawn.
Finally, to aid in setting triggers to the right values, we introduce the "arith" API for the GPR. This function allows you to atomically change the value in a registry location (either divide, multiply, add, or subtract) by the provided operand. It is equivalent to first fetching the value using a "get", then modifying it, and then putting the result back into the registry via a "put".
This commit was SVN r14711.
2007-05-21 22:31:28 +04:00
ORTE_ERROR_LOG ( rc ) ;
2007-09-14 00:11:38 +04:00
free ( apps ) ;
2005-03-14 23:57:21 +03:00
return rc ;
2007-07-13 19:47:57 +04:00
}
Commit the orted-failed-to-start code. This correctly causes the system to detect the failure of an orted to start and allows the system to terminate all procs/orteds that *did* start.
The primary change that underlies all this is in the OOB. Specifically, the problem in the code until now has been that the OOB attempts to resolve an address when we call the "send" to an unknown recipient. The OOB would then wait forever if that recipient never actually started (and hence, never reported back its OOB contact info). In the case of an orted that failed to start, we would correctly detect that the orted hadn't started, but then we would attempt to order all orteds (including the one that failed to start) to die. This would cause the OOB to "hang" the system.
Unfortunately, revising how the OOB resolves addresses introduced a number of additional problems. Specifically, and most troublesome, was the fact that comm_spawn involved the immediate transmission of the rendezvous point from parent-to-child after the child was spawned. The current code used the OOB address resolution as a "barrier" - basically, the parent would attempt to send the info to the child, and then "hold" there until the child's contact info had arrived (meaning the child had started) and the send could be completed.
Note that this also caused comm_spawn to "hang" the entire system if the child never started... The app-failed-to-start helped improve that behavior - this code provides additional relief.
With this change, the OOB will return an ADDRESSEE_UNKNOWN error if you attempt to send to a recipient whose contact info isn't already in the OOB's hash tables. To resolve comm_spawn issues, we also now force the cross-sharing of connection info between parent and child jobs during spawn.
Finally, to aid in setting triggers to the right values, we introduce the "arith" API for the GPR. This function allows you to atomically change the value in a registry location (either divide, multiply, add, or subtract) by the provided operand. It is equivalent to first fetching the value using a "get", then modifying it, and then putting the result back into the registry via a "put".
This commit was SVN r14711.
2007-05-21 22:31:28 +04:00
2007-07-19 23:00:06 +04:00
/* If we have a prefix, then modify the PATH and
LD_LIBRARY_PATH environment variables in our copy . This
will ensure that any locally - spawned children will
have our executables and libraries in their path
For now , default to the prefix_dir provided in the first app_context .
Since there always MUST be at least one app_context , we are safe in
doing this .
*/
if ( NULL ! = apps [ 0 ] - > prefix_dir ) {
char * oldenv , * newenv , * lib_base , * bin_base ;
lib_base = opal_basename ( opal_install_dirs . libdir ) ;
bin_base = opal_basename ( opal_install_dirs . bindir ) ;
/* Reset PATH */
newenv = opal_os_path ( false , apps [ 0 ] - > prefix_dir , bin_base , NULL ) ;
oldenv = getenv ( " PATH " ) ;
if ( NULL ! = oldenv ) {
char * temp ;
asprintf ( & temp , " %s:%s " , newenv , oldenv ) ;
free ( newenv ) ;
newenv = temp ;
}
opal_setenv ( " PATH " , newenv , true , & orte_launch_environ ) ;
if ( orte_debug_flag ) {
opal_output ( 0 , " %s: reset PATH: %s " , orterun_basename , newenv ) ;
}
free ( newenv ) ;
free ( bin_base ) ;
/* Reset LD_LIBRARY_PATH */
newenv = opal_os_path ( false , apps [ 0 ] - > prefix_dir , lib_base , NULL ) ;
oldenv = getenv ( " LD_LIBRARY_PATH " ) ;
if ( NULL ! = oldenv ) {
char * temp ;
asprintf ( & temp , " %s:%s " , newenv , oldenv ) ;
free ( newenv ) ;
newenv = temp ;
}
opal_setenv ( " LD_LIBRARY_PATH " , newenv , true , & orte_launch_environ ) ;
if ( orte_debug_flag ) {
opal_output ( 0 , " %s: reset LD_LIBRARY_PATH: %s " ,
orterun_basename , newenv ) ;
}
free ( newenv ) ;
free ( lib_base ) ;
}
2007-07-12 23:53:18 +04:00
/* since we are a daemon, we should *always* yield the processor when idle */
opal_progress_set_yield_when_idle ( true ) ;
2006-09-14 19:27:17 +04:00
/* pre-condition any network transports that require it */
if ( ORTE_SUCCESS ! = ( rc = orte_pre_condition_transports ( apps , num_apps ) ) ) {
ORTE_ERROR_LOG ( rc ) ;
opal_show_help ( " help-orterun.txt " , " orterun:precondition " , false ,
orterun_basename , NULL , NULL , rc ) ;
return rc ;
}
2007-07-12 23:53:18 +04:00
/* setup our receive functions so we can fully participate in daemon
* communications - this will allow us to relay messages
* during start for better scalability
*/
/* register the daemon main receive functions */
/* setup to listen for broadcast commands via routed messaging algorithms */
rc = orte_rml . recv_buffer_nb ( ORTE_NAME_WILDCARD , ORTE_RML_TAG_ORTED_ROUTED ,
ORTE_RML_NON_PERSISTENT , orte_daemon_recv_routed , NULL ) ;
if ( rc ! = ORTE_SUCCESS & & rc ! = ORTE_ERR_NOT_IMPLEMENTED ) {
ORTE_ERROR_LOG ( rc ) ;
return rc ;
}
/* setup to listen for commands sent specifically to me, even though I would probably
* be the one sending them ! Unfortunately , since I am a participating daemon ,
* there are times I need to send a command to " all daemons " , and that means * I * have
* to receive it too
*/
rc = orte_rml . recv_buffer_nb ( ORTE_NAME_WILDCARD , ORTE_RML_TAG_DAEMON , ORTE_RML_NON_PERSISTENT , orte_daemon_recv , NULL ) ;
if ( rc ! = ORTE_SUCCESS & & rc ! = ORTE_ERR_NOT_IMPLEMENTED ) {
ORTE_ERROR_LOG ( rc ) ;
return rc ;
}
2005-09-05 00:54:19 +04:00
/* Prep to start the application */
2006-10-17 20:06:17 +04:00
/* construct the list of attributes */
OBJ_CONSTRUCT ( & attributes , opal_list_t ) ;
2006-12-13 07:51:38 +03:00
if ( orterun_globals . do_not_launch ) {
flow = ORTE_RMGR_SETUP | ORTE_RMGR_RES_DISC | ORTE_RMGR_ALLOC | ORTE_RMGR_MAP | ORTE_RMGR_SETUP_TRIGS ;
orte_rmgr . add_attribute ( & attributes , ORTE_RMGR_SPAWN_FLOW , ORTE_UINT8 , & flow , ORTE_RMGR_ATTR_OVERRIDE ) ;
}
2005-03-14 23:57:21 +03:00
2006-06-08 22:27:17 +04:00
/** setup callbacks for abort signals */
2005-09-20 21:09:11 +04:00
opal_signal_set ( & term_handler , SIGTERM ,
2006-06-26 19:12:52 +04:00
abort_signal_callback , & term_handler ) ;
2005-09-11 03:22:37 +04:00
opal_signal_add ( & term_handler , NULL ) ;
2005-09-20 21:09:11 +04:00
opal_signal_set ( & int_handler , SIGINT ,
2006-06-26 19:12:52 +04:00
abort_signal_callback , & int_handler ) ;
2005-09-11 03:22:37 +04:00
opal_signal_add ( & int_handler , NULL ) ;
2005-03-14 23:57:21 +03:00
2006-07-11 09:24:08 +04:00
# ifndef __WINDOWS__
2006-06-26 19:12:52 +04:00
/** setup callbacks for signals we should foward */
2006-06-08 22:27:17 +04:00
opal_signal_set ( & sigusr1_handler , SIGUSR1 ,
2006-06-26 19:12:52 +04:00
signal_forward_callback , & sigusr1_handler ) ;
2006-06-08 22:27:17 +04:00
opal_signal_add ( & sigusr1_handler , NULL ) ;
opal_signal_set ( & sigusr2_handler , SIGUSR2 ,
2006-06-26 19:12:52 +04:00
signal_forward_callback , & sigusr2_handler ) ;
2006-06-08 22:27:17 +04:00
opal_signal_add ( & sigusr2_handler , NULL ) ;
2006-07-11 09:24:08 +04:00
# endif /* __WINDOWS__ */
2005-08-30 21:29:43 +04:00
orte_totalview_init_before_spawn ( ) ;
2005-03-14 23:57:21 +03:00
/* Spawn the job */
2005-09-05 00:54:19 +04:00
2006-09-15 01:29:51 +04:00
cb_states = ORTE_PROC_STATE_TERMINATED | ORTE_PROC_STATE_AT_STG1 ;
2006-10-17 20:06:17 +04:00
rc = orte_rmgr . spawn_job ( apps , num_apps , & jobid , 0 , NULL , job_state_callback , cb_states , & attributes ) ;
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
while ( NULL ! = ( item = opal_list_remove_first ( & attributes ) ) ) OBJ_RELEASE ( item ) ;
OBJ_DESTRUCT ( & attributes ) ;
2007-05-18 17:29:11 +04:00
if ( orterun_globals . do_not_launch ) {
/* we are done! */
goto DONE ;
}
2007-07-12 23:53:18 +04:00
2007-05-18 17:29:11 +04:00
OPAL_THREAD_LOCK ( & orterun_globals . lock ) ;
/* If the spawn was successful, wait for the app to complete */
if ( ORTE_SUCCESS = = rc ) {
while ( ! orterun_globals . exit ) {
opal_condition_wait ( & orterun_globals . cond ,
& orterun_globals . lock ) ;
2007-01-18 20:15:19 +03:00
}
2007-05-18 17:29:11 +04:00
}
/* check to see if the job was aborted */
if ( ORTE_JOBID_INVALID ! = jobid & &
ORTE_SUCCESS ! = ( rc = orte_smr . get_job_state ( & exit_state , jobid ) ) ) {
if ( ORTE_SUCCESS ! = rc ) {
ORTE_ERROR_LOG ( rc ) ;
2006-12-13 07:51:38 +03:00
}
2007-05-18 17:29:11 +04:00
/* define the exit state as abnormal by default */
exit_state = ORTE_JOB_STATE_ABORTED ;
}
if ( ORTE_JOB_STATE_TERMINATED ! = exit_state ) {
/* abnormal termination of some kind */
dump_aborted_procs ( jobid , apps , exit_state ) ;
/* If we showed more abort messages than were allowed,
show a followup message here */
if ( num_aborted > max_display_aborted ) {
i = num_aborted - max_display_aborted ;
printf ( " %d additional process%s aborted (not shown) \n " ,
i , ( ( i > 1 ) ? " es " : " " ) ) ;
2005-03-14 23:57:21 +03:00
}
2007-05-18 17:29:11 +04:00
if ( num_killed > 0 ) {
printf ( " %d process%s killed (possibly by Open MPI) \n " ,
num_killed , ( ( num_killed > 1 ) ? " es " : " " ) ) ;
}
}
/* Make sure we propagate the exit code */
if ( WIFEXITED ( orterun_globals . exit_status ) ) {
rc = WEXITSTATUS ( orterun_globals . exit_status ) ;
} else if ( ORTE_JOB_STATE_FAILED_TO_START = = exit_state ) {
/* ensure we don't treat this like a signal */
rc = orterun_globals . exit_status ;
} else {
/* If a process was killed by a signal, then make the
* exit code of orterun be " signo + 128 " so that " prog "
* and " orterun prog " will both set the same status
* value for the shell */
rc = WTERMSIG ( orterun_globals . exit_status ) + 128 ;
}
/* the job is complete - now tell the orteds that it is
* okay to finalize and exit , we are done with them .
* Issue this as a " soft kill " so the daemons won ' t die
* if they are part of a virtual machine - since that is
* the default mode , we can just leave the attributes as NULL
*/
if ( ORTE_JOBID_INVALID ! = jobid ) {
if ( ORTE_SUCCESS ! = ( ret = orte_pls . terminate_orteds ( & orte_abort_timeout , NULL ) ) ) {
opal_show_help ( " help-orterun.txt " , " orterun:daemon-die " , true ,
orterun_basename , ORTE_ERROR_NAME ( ret ) ) ;
}
}
OPAL_THREAD_UNLOCK ( & orterun_globals . lock ) ;
/* If we were forcibly killed, print a warning that the
user may still have some manual cleanup to do . */
if ( ORTE_JOBID_INVALID = = jobid ) {
opal_show_help ( " help-orterun.txt " , " orterun:abnormal-exit " ,
true , orterun_basename , orterun_basename ) ;
2005-03-14 23:57:21 +03:00
}
2006-12-13 07:51:38 +03:00
DONE :
2005-03-14 23:57:21 +03:00
for ( i = 0 ; i < num_apps ; + + i ) {
OBJ_RELEASE ( apps [ i ] ) ;
}
free ( apps ) ;
2005-07-03 08:02:01 +04:00
OBJ_RELEASE ( apps_pa ) ;
2006-10-17 20:06:17 +04:00
2007-07-23 22:36:33 +04:00
/* cleanup the orted communication mutex and condition objects */
OBJ_DESTRUCT ( & orted_comm_mutex ) ;
OBJ_DESTRUCT ( & orted_comm_cond ) ;
2005-03-14 23:57:21 +03:00
orte_finalize ( ) ;
2005-04-13 19:26:33 +04:00
free ( orterun_basename ) ;
2005-03-14 23:57:21 +03:00
return rc ;
}
2005-03-31 23:39:02 +04:00
/*
2005-09-05 00:54:19 +04:00
* On abnormal termination - dump the
2005-03-31 23:39:02 +04:00
* exit status of the aborted procs .
*/
2007-04-24 23:19:14 +04:00
static void dump_aborted_procs ( orte_jobid_t jobid , orte_app_context_t * * apps , orte_job_state_t state )
2005-03-31 23:39:02 +04:00
{
char * segment ;
orte_gpr_value_t * * values = NULL ;
2006-08-15 23:54:10 +04:00
orte_std_cntr_t i , k , num_values = 0 ;
2005-03-31 23:39:02 +04:00
int rc ;
2005-04-29 04:36:07 +04:00
int32_t exit_status = 0 ;
bool exit_status_set ;
2007-04-24 23:19:14 +04:00
bool abort_reported = false ;
2005-03-31 23:39:02 +04:00
char * keys [ ] = {
ORTE_PROC_NAME_KEY ,
2007-02-07 20:46:19 +03:00
ORTE_PROC_LOCAL_PID_KEY ,
2005-03-31 23:39:02 +04:00
ORTE_PROC_RANK_KEY ,
ORTE_PROC_EXIT_CODE_KEY ,
ORTE_NODE_NAME_KEY ,
2007-04-24 23:19:14 +04:00
ORTE_PROC_APP_CONTEXT_KEY ,
ORTE_PROC_STATE_KEY ,
2005-03-31 23:39:02 +04:00
NULL
} ;
2007-04-24 23:19:14 +04:00
2005-09-20 21:09:11 +04:00
OPAL_TRACE_ARG1 ( 1 , jobid ) ;
2007-04-24 23:19:14 +04:00
2005-03-31 23:39:02 +04:00
/* query the job segment on the registry */
if ( ORTE_SUCCESS ! = ( rc = orte_schema . get_job_segment_name ( & segment , jobid ) ) ) {
ORTE_ERROR_LOG ( rc ) ;
return ;
}
2007-04-24 23:19:14 +04:00
rc = orte_gpr . get ( ORTE_GPR_KEYS_OR | ORTE_GPR_TOKENS_OR ,
segment ,
NULL ,
keys ,
& num_values ,
& values
) ;
2005-03-31 23:39:02 +04:00
if ( rc ! = ORTE_SUCCESS ) {
ORTE_ERROR_LOG ( rc ) ;
free ( segment ) ;
return ;
}
2007-04-24 23:19:14 +04:00
2005-04-28 17:18:52 +04:00
for ( i = 0 ; i < num_values ; i + + ) {
2005-03-31 23:39:02 +04:00
orte_gpr_value_t * value = values [ i ] ;
2006-02-07 06:32:36 +03:00
orte_process_name_t name , * nptr ;
pid_t pid = 0 , * pidptr ;
These changes were mostly captured in a prior RFC (except for #2 below) and are aimed specifically at improving startup performance and setting up the remaining modifications described in that RFC.
The commit has been tested for C/R and Cray operations, and on Odin (SLURM, rsh) and RoadRunner (TM). I tried to update all environments, but obviously could not test them. I know that Windows needs some work, and have highlighted what is know to be needed in the odls process component.
This represents a lot of work by Brian, Tim P, Josh, and myself, with much advice from Jeff and others. For posterity, I have appended a copy of the email describing the work that was done:
As we have repeatedly noted, the modex operation in MPI_Init is the single greatest consumer of time during startup. To-date, we have executed that operation as an ORTE stage gate that held the process until a startup message containing all required modex (and OOB contact info - see #3 below) info could be sent to it. Each process would send its data to the HNP's registry, which assembled and sent the message when all processes had reported in.
In addition, ORTE had taken responsibility for monitoring process status as it progressed through a series of "stage gates". The process reported its status at each gate, and ORTE would then send a "release" message once all procs had reported in.
The incoming changes revamp these procedures in three ways:
1. eliminating the ORTE stage gate system and cleanly delineating responsibility between the OMPI and ORTE layers for MPI init/finalize. The modex stage gate (STG1) has been replaced by a collective operation in the modex itself that performs an allgather on the required modex info. The allgather is implemented using the orte_grpcomm framework since the BTL's are not active at that point. At the moment, the grpcomm framework only has a "basic" component analogous to OMPI's "basic" coll framework - I would recommend that the MPI team create additional, more advanced components to improve performance of this step.
The other stage gates have been replaced by orte_grpcomm barrier functions. We tried to use MPI barriers instead (since the BTL's are active at that point), but - as we discussed on the telecon - these are not currently true barriers so the job would hang when we fell through while messages were still in process. Note that the grpcomm barrier doesn't actually resolve that problem, but Brian has pointed out that we are unlikely to ever see it violated. Again, you might want to spend a little time on an advanced barrier algorithm as the one in "basic" is very simplistic.
Summarizing this change: ORTE no longer tracks process state nor has direct responsibility for synchronizing jobs. This is now done via collective operations within the MPI layer, albeit using ORTE collective communication services. I -strongly- urge the MPI team to implement advanced collective algorithms to improve the performance of this critical procedure.
2. reducing the volume of data exchanged during modex. Data in the modex consisted of the process name, the name of the node where that process is located (expressed as a string), plus a string representation of all contact info. The nodename was required in order for the modex to determine if the process was local or not - in addition, some people like to have it to print pretty error messages when a connection failed.
The size of this data has been reduced in three ways:
(a) reducing the size of the process name itself. The process name consisted of two 32-bit fields for the jobid and vpid. This is far larger than any current system, or system likely to exist in the near future, can support. Accordingly, the default size of these fields has been reduced to 16-bits, which means you can have 32k procs in each of 32k jobs. Since the daemons must have a vpid, and we require one daemon/node, this also restricts the default configuration to 32k nodes.
To support any future "mega-clusters", a configuration option --enable-jumbo-apps has been added. This option increases the jobid and vpid field sizes to 32-bits. Someday, if necessary, someone can add yet another option to increase them to 64-bits, I suppose.
(b) replacing the string nodename with an integer nodeid. Since we have one daemon/node, the nodeid corresponds to the local daemon's vpid. This replaces an often lengthy string with only 2 (or at most 4) bytes, a substantial reduction.
(c) when the mca param requesting that nodenames be sent to support pretty error messages, a second mca param is now used to request FQDN - otherwise, the domain name is stripped (by default) from the message to save space. If someone wants to combine those into a single param somehow (perhaps with an argument?), they are welcome to do so - I didn't want to alter what people are already using.
While these may seem like small savings, they actually amount to a significant impact when aggregated across the entire modex operation. Since every proc must receive the modex data regardless of the collective used to send it, just reducing the size of the process name removes nearly 400MBytes of communication from a 32k proc job (admittedly, much of this comm may occur in parallel). So it does add up pretty quickly.
3. routing RML messages to reduce connections. The default messaging system remains point-to-point - i.e., each proc opens a socket to every proc it communicates with and sends its messages directly. A new option uses the orteds as routers - i.e., each proc only opens a single socket to its local orted. All messages are sent from the proc to the orted, which forwards the message to the orted on the node where the intended recipient proc is located - that orted then forwards the message to its local proc (the recipient). This greatly reduces the connection storm we have encountered during startup.
It also has the benefit of removing the sharing of every proc's OOB contact with every other proc. The orted routing tables are populated during launch since every orted gets a map of where every proc is being placed. Each proc, therefore, only needs to know the contact info for its local daemon, which is passed in via the environment when the proc is fork/exec'd by the daemon. This alone removes ~50 bytes/process of communication that was in the current STG1 startup message - so for our 32k proc job, this saves us roughly 32k*50 = 1.6MBytes sent to 32k procs = 51GBytes of messaging.
Note that you can use the new routing method by specifying -mca routed tree - if you so desire. This mode will become the default at some point in the future.
There are a few minor additional changes in the commit that I'll just note in passing:
* propagation of command line mca params to the orteds - fixes ticket #1073. See note there for details.
* requiring of "finalize" prior to "exit" for MPI procs - fixes ticket #1144. See note there for details.
* cleanup of some stale header files
This commit was SVN r16364.
2007-10-05 23:48:23 +04:00
orte_std_cntr_t * sptr , app_idx = 0 ;
orte_vpid_t rank = 0 , * vptr ;
2005-05-01 04:47:35 +04:00
bool rank_found = false ;
2005-03-31 23:39:02 +04:00
char * node_name = NULL ;
2006-02-07 06:32:36 +03:00
orte_exit_code_t * ecptr ;
2007-04-24 23:19:14 +04:00
orte_proc_state_t * pst_ptr , pst ;
2005-04-29 04:36:07 +04:00
exit_status = 0 ;
exit_status_set = false ;
2005-03-31 23:39:02 +04:00
for ( k = 0 ; k < value - > cnt ; k + + ) {
orte_gpr_keyval_t * keyval = value - > keyvals [ k ] ;
if ( strcmp ( keyval - > key , ORTE_PROC_NAME_KEY ) = = 0 ) {
2006-02-07 06:32:36 +03:00
if ( ORTE_SUCCESS ! = ( rc = orte_dss . get ( ( void * * ) & nptr , keyval - > value , ORTE_NAME ) ) ) {
ORTE_ERROR_LOG ( rc ) ;
continue ;
}
name = * nptr ;
2005-03-31 23:39:02 +04:00
continue ;
}
2007-02-07 20:46:19 +03:00
if ( strcmp ( keyval - > key , ORTE_PROC_LOCAL_PID_KEY ) = = 0 ) {
2006-02-07 06:32:36 +03:00
if ( ORTE_SUCCESS ! = ( rc = orte_dss . get ( ( void * * ) & pidptr , keyval - > value , ORTE_PID ) ) ) {
ORTE_ERROR_LOG ( rc ) ;
continue ;
}
pid = * pidptr ;
2005-03-31 23:39:02 +04:00
continue ;
}
if ( strcmp ( keyval - > key , ORTE_PROC_RANK_KEY ) = = 0 ) {
These changes were mostly captured in a prior RFC (except for #2 below) and are aimed specifically at improving startup performance and setting up the remaining modifications described in that RFC.
The commit has been tested for C/R and Cray operations, and on Odin (SLURM, rsh) and RoadRunner (TM). I tried to update all environments, but obviously could not test them. I know that Windows needs some work, and have highlighted what is know to be needed in the odls process component.
This represents a lot of work by Brian, Tim P, Josh, and myself, with much advice from Jeff and others. For posterity, I have appended a copy of the email describing the work that was done:
As we have repeatedly noted, the modex operation in MPI_Init is the single greatest consumer of time during startup. To-date, we have executed that operation as an ORTE stage gate that held the process until a startup message containing all required modex (and OOB contact info - see #3 below) info could be sent to it. Each process would send its data to the HNP's registry, which assembled and sent the message when all processes had reported in.
In addition, ORTE had taken responsibility for monitoring process status as it progressed through a series of "stage gates". The process reported its status at each gate, and ORTE would then send a "release" message once all procs had reported in.
The incoming changes revamp these procedures in three ways:
1. eliminating the ORTE stage gate system and cleanly delineating responsibility between the OMPI and ORTE layers for MPI init/finalize. The modex stage gate (STG1) has been replaced by a collective operation in the modex itself that performs an allgather on the required modex info. The allgather is implemented using the orte_grpcomm framework since the BTL's are not active at that point. At the moment, the grpcomm framework only has a "basic" component analogous to OMPI's "basic" coll framework - I would recommend that the MPI team create additional, more advanced components to improve performance of this step.
The other stage gates have been replaced by orte_grpcomm barrier functions. We tried to use MPI barriers instead (since the BTL's are active at that point), but - as we discussed on the telecon - these are not currently true barriers so the job would hang when we fell through while messages were still in process. Note that the grpcomm barrier doesn't actually resolve that problem, but Brian has pointed out that we are unlikely to ever see it violated. Again, you might want to spend a little time on an advanced barrier algorithm as the one in "basic" is very simplistic.
Summarizing this change: ORTE no longer tracks process state nor has direct responsibility for synchronizing jobs. This is now done via collective operations within the MPI layer, albeit using ORTE collective communication services. I -strongly- urge the MPI team to implement advanced collective algorithms to improve the performance of this critical procedure.
2. reducing the volume of data exchanged during modex. Data in the modex consisted of the process name, the name of the node where that process is located (expressed as a string), plus a string representation of all contact info. The nodename was required in order for the modex to determine if the process was local or not - in addition, some people like to have it to print pretty error messages when a connection failed.
The size of this data has been reduced in three ways:
(a) reducing the size of the process name itself. The process name consisted of two 32-bit fields for the jobid and vpid. This is far larger than any current system, or system likely to exist in the near future, can support. Accordingly, the default size of these fields has been reduced to 16-bits, which means you can have 32k procs in each of 32k jobs. Since the daemons must have a vpid, and we require one daemon/node, this also restricts the default configuration to 32k nodes.
To support any future "mega-clusters", a configuration option --enable-jumbo-apps has been added. This option increases the jobid and vpid field sizes to 32-bits. Someday, if necessary, someone can add yet another option to increase them to 64-bits, I suppose.
(b) replacing the string nodename with an integer nodeid. Since we have one daemon/node, the nodeid corresponds to the local daemon's vpid. This replaces an often lengthy string with only 2 (or at most 4) bytes, a substantial reduction.
(c) when the mca param requesting that nodenames be sent to support pretty error messages, a second mca param is now used to request FQDN - otherwise, the domain name is stripped (by default) from the message to save space. If someone wants to combine those into a single param somehow (perhaps with an argument?), they are welcome to do so - I didn't want to alter what people are already using.
While these may seem like small savings, they actually amount to a significant impact when aggregated across the entire modex operation. Since every proc must receive the modex data regardless of the collective used to send it, just reducing the size of the process name removes nearly 400MBytes of communication from a 32k proc job (admittedly, much of this comm may occur in parallel). So it does add up pretty quickly.
3. routing RML messages to reduce connections. The default messaging system remains point-to-point - i.e., each proc opens a socket to every proc it communicates with and sends its messages directly. A new option uses the orteds as routers - i.e., each proc only opens a single socket to its local orted. All messages are sent from the proc to the orted, which forwards the message to the orted on the node where the intended recipient proc is located - that orted then forwards the message to its local proc (the recipient). This greatly reduces the connection storm we have encountered during startup.
It also has the benefit of removing the sharing of every proc's OOB contact with every other proc. The orted routing tables are populated during launch since every orted gets a map of where every proc is being placed. Each proc, therefore, only needs to know the contact info for its local daemon, which is passed in via the environment when the proc is fork/exec'd by the daemon. This alone removes ~50 bytes/process of communication that was in the current STG1 startup message - so for our 32k proc job, this saves us roughly 32k*50 = 1.6MBytes sent to 32k procs = 51GBytes of messaging.
Note that you can use the new routing method by specifying -mca routed tree - if you so desire. This mode will become the default at some point in the future.
There are a few minor additional changes in the commit that I'll just note in passing:
* propagation of command line mca params to the orteds - fixes ticket #1073. See note there for details.
* requiring of "finalize" prior to "exit" for MPI procs - fixes ticket #1144. See note there for details.
* cleanup of some stale header files
This commit was SVN r16364.
2007-10-05 23:48:23 +04:00
if ( ORTE_SUCCESS ! = ( rc = orte_dss . get ( ( void * * ) & vptr , keyval - > value , ORTE_VPID ) ) ) {
2006-02-07 06:32:36 +03:00
ORTE_ERROR_LOG ( rc ) ;
continue ;
}
2005-05-01 04:47:35 +04:00
rank_found = true ;
These changes were mostly captured in a prior RFC (except for #2 below) and are aimed specifically at improving startup performance and setting up the remaining modifications described in that RFC.
The commit has been tested for C/R and Cray operations, and on Odin (SLURM, rsh) and RoadRunner (TM). I tried to update all environments, but obviously could not test them. I know that Windows needs some work, and have highlighted what is know to be needed in the odls process component.
This represents a lot of work by Brian, Tim P, Josh, and myself, with much advice from Jeff and others. For posterity, I have appended a copy of the email describing the work that was done:
As we have repeatedly noted, the modex operation in MPI_Init is the single greatest consumer of time during startup. To-date, we have executed that operation as an ORTE stage gate that held the process until a startup message containing all required modex (and OOB contact info - see #3 below) info could be sent to it. Each process would send its data to the HNP's registry, which assembled and sent the message when all processes had reported in.
In addition, ORTE had taken responsibility for monitoring process status as it progressed through a series of "stage gates". The process reported its status at each gate, and ORTE would then send a "release" message once all procs had reported in.
The incoming changes revamp these procedures in three ways:
1. eliminating the ORTE stage gate system and cleanly delineating responsibility between the OMPI and ORTE layers for MPI init/finalize. The modex stage gate (STG1) has been replaced by a collective operation in the modex itself that performs an allgather on the required modex info. The allgather is implemented using the orte_grpcomm framework since the BTL's are not active at that point. At the moment, the grpcomm framework only has a "basic" component analogous to OMPI's "basic" coll framework - I would recommend that the MPI team create additional, more advanced components to improve performance of this step.
The other stage gates have been replaced by orte_grpcomm barrier functions. We tried to use MPI barriers instead (since the BTL's are active at that point), but - as we discussed on the telecon - these are not currently true barriers so the job would hang when we fell through while messages were still in process. Note that the grpcomm barrier doesn't actually resolve that problem, but Brian has pointed out that we are unlikely to ever see it violated. Again, you might want to spend a little time on an advanced barrier algorithm as the one in "basic" is very simplistic.
Summarizing this change: ORTE no longer tracks process state nor has direct responsibility for synchronizing jobs. This is now done via collective operations within the MPI layer, albeit using ORTE collective communication services. I -strongly- urge the MPI team to implement advanced collective algorithms to improve the performance of this critical procedure.
2. reducing the volume of data exchanged during modex. Data in the modex consisted of the process name, the name of the node where that process is located (expressed as a string), plus a string representation of all contact info. The nodename was required in order for the modex to determine if the process was local or not - in addition, some people like to have it to print pretty error messages when a connection failed.
The size of this data has been reduced in three ways:
(a) reducing the size of the process name itself. The process name consisted of two 32-bit fields for the jobid and vpid. This is far larger than any current system, or system likely to exist in the near future, can support. Accordingly, the default size of these fields has been reduced to 16-bits, which means you can have 32k procs in each of 32k jobs. Since the daemons must have a vpid, and we require one daemon/node, this also restricts the default configuration to 32k nodes.
To support any future "mega-clusters", a configuration option --enable-jumbo-apps has been added. This option increases the jobid and vpid field sizes to 32-bits. Someday, if necessary, someone can add yet another option to increase them to 64-bits, I suppose.
(b) replacing the string nodename with an integer nodeid. Since we have one daemon/node, the nodeid corresponds to the local daemon's vpid. This replaces an often lengthy string with only 2 (or at most 4) bytes, a substantial reduction.
(c) when the mca param requesting that nodenames be sent to support pretty error messages, a second mca param is now used to request FQDN - otherwise, the domain name is stripped (by default) from the message to save space. If someone wants to combine those into a single param somehow (perhaps with an argument?), they are welcome to do so - I didn't want to alter what people are already using.
While these may seem like small savings, they actually amount to a significant impact when aggregated across the entire modex operation. Since every proc must receive the modex data regardless of the collective used to send it, just reducing the size of the process name removes nearly 400MBytes of communication from a 32k proc job (admittedly, much of this comm may occur in parallel). So it does add up pretty quickly.
3. routing RML messages to reduce connections. The default messaging system remains point-to-point - i.e., each proc opens a socket to every proc it communicates with and sends its messages directly. A new option uses the orteds as routers - i.e., each proc only opens a single socket to its local orted. All messages are sent from the proc to the orted, which forwards the message to the orted on the node where the intended recipient proc is located - that orted then forwards the message to its local proc (the recipient). This greatly reduces the connection storm we have encountered during startup.
It also has the benefit of removing the sharing of every proc's OOB contact with every other proc. The orted routing tables are populated during launch since every orted gets a map of where every proc is being placed. Each proc, therefore, only needs to know the contact info for its local daemon, which is passed in via the environment when the proc is fork/exec'd by the daemon. This alone removes ~50 bytes/process of communication that was in the current STG1 startup message - so for our 32k proc job, this saves us roughly 32k*50 = 1.6MBytes sent to 32k procs = 51GBytes of messaging.
Note that you can use the new routing method by specifying -mca routed tree - if you so desire. This mode will become the default at some point in the future.
There are a few minor additional changes in the commit that I'll just note in passing:
* propagation of command line mca params to the orteds - fixes ticket #1073. See note there for details.
* requiring of "finalize" prior to "exit" for MPI procs - fixes ticket #1144. See note there for details.
* cleanup of some stale header files
This commit was SVN r16364.
2007-10-05 23:48:23 +04:00
rank = * vptr ;
2005-03-31 23:39:02 +04:00
continue ;
}
if ( strcmp ( keyval - > key , ORTE_PROC_EXIT_CODE_KEY ) = = 0 ) {
2006-02-07 06:32:36 +03:00
if ( ORTE_SUCCESS ! = ( rc = orte_dss . get ( ( void * * ) & ecptr , keyval - > value , ORTE_EXIT_CODE ) ) ) {
ORTE_ERROR_LOG ( rc ) ;
continue ;
}
exit_status = * ecptr ;
2005-04-29 04:36:07 +04:00
exit_status_set = true ;
2005-03-31 23:39:02 +04:00
continue ;
}
if ( strcmp ( keyval - > key , ORTE_NODE_NAME_KEY ) = = 0 ) {
2006-02-07 06:32:36 +03:00
node_name = ( char * ) ( keyval - > value - > data ) ;
2005-03-31 23:39:02 +04:00
continue ;
}
2007-04-24 23:19:14 +04:00
if ( strcmp ( keyval - > key , ORTE_PROC_APP_CONTEXT_KEY ) = = 0 ) {
if ( ORTE_SUCCESS ! = ( rc = orte_dss . get ( ( void * * ) & sptr , keyval - > value , ORTE_STD_CNTR ) ) ) {
ORTE_ERROR_LOG ( rc ) ;
continue ;
}
app_idx = * sptr ;
continue ;
}
if ( strcmp ( keyval - > key , ORTE_PROC_STATE_KEY ) = = 0 ) {
if ( ORTE_SUCCESS ! = ( rc = orte_dss . get ( ( void * * ) & pst_ptr , keyval - > value , ORTE_PROC_STATE ) ) ) {
ORTE_ERROR_LOG ( rc ) ;
continue ;
}
pst = * pst_ptr ;
continue ;
}
2005-03-31 23:39:02 +04:00
}
2007-04-24 23:19:14 +04:00
2006-10-02 19:03:43 +04:00
if ( rank_found ) {
2007-04-24 23:19:14 +04:00
if ( ORTE_JOB_STATE_FAILED_TO_START = = state ) {
if ( num_aborted < max_display_aborted ) {
if ( ORTE_ERR_SYS_LIMITS_PIPES = = exit_status ) {
opal_show_help ( " help-orterun.txt " , " orterun:sys-limit-pipe " , true ,
orterun_basename , node_name , ( unsigned long ) rank ) ;
} else if ( ORTE_ERR_PIPE_SETUP_FAILURE = = exit_status ) {
opal_show_help ( " help-orterun.txt " , " orterun:pipe-setup-failure " , true ,
orterun_basename , node_name , ( unsigned long ) rank ) ;
} else if ( ORTE_ERR_SYS_LIMITS_CHILDREN = = exit_status ) {
opal_show_help ( " help-orterun.txt " , " orterun:sys-limit-children " , true ,
orterun_basename , node_name , ( unsigned long ) rank ) ;
} else if ( ORTE_ERR_FAILED_GET_TERM_ATTRS = = exit_status ) {
opal_show_help ( " help-orterun.txt " , " orterun:failed-term-attrs " , true ,
orterun_basename , node_name , ( unsigned long ) rank ) ;
} else if ( ORTE_ERR_WDIR_NOT_FOUND = = exit_status ) {
opal_show_help ( " help-orterun.txt " , " orterun:wdir-not-found " , true ,
orterun_basename , apps [ app_idx ] - > cwd , node_name , ( unsigned long ) rank ) ;
} else if ( ORTE_ERR_EXE_NOT_FOUND = = exit_status ) {
opal_show_help ( " help-orterun.txt " , " orterun:exe-not-found " , true ,
orterun_basename , apps [ app_idx ] - > app , node_name , ( unsigned long ) rank ) ;
} else if ( ORTE_ERR_EXE_NOT_ACCESSIBLE = = exit_status ) {
opal_show_help ( " help-orterun.txt " , " orterun:exe-not-accessible " , true ,
orterun_basename , apps [ app_idx ] - > app , node_name , ( unsigned long ) rank ) ;
} else if ( ORTE_ERR_PIPE_READ_FAILURE = = exit_status ) {
opal_show_help ( " help-orterun.txt " , " orterun:pipe-read-failure " , true ,
orterun_basename , node_name , ( unsigned long ) rank ) ;
} else if ( 0 ! = exit_status ) {
opal_show_help ( " help-orterun.txt " , " orterun:proc-failed-to-start " , true ,
orterun_basename , ORTE_ERROR_NAME ( exit_status ) , node_name ,
( unsigned long ) rank ) ;
} else {
opal_show_help ( " help-orterun.txt " , " orterun:proc-failed-to-start-no-status " , true ,
orterun_basename , node_name ) ;
}
}
+ + num_aborted ;
} else {
if ( ORTE_PROC_STATE_ABORTED = = pst ) {
if ( ! abort_reported ) {
opal_show_help ( " help-orterun.txt " , " orterun:proc-ordered-abort " , true ,
2006-12-17 23:01:11 +03:00
orterun_basename , ( unsigned long ) rank , ( unsigned long ) pid ,
2007-04-24 23:19:14 +04:00
node_name , orterun_basename ) ;
abort_reported = true ;
}
+ + num_aborted ;
} else if ( WIFSIGNALED ( exit_status ) ) {
if ( 9 = = WTERMSIG ( exit_status ) ) {
+ + num_killed ;
} else {
if ( num_aborted < max_display_aborted ) {
# ifdef HAVE_STRSIGNAL
if ( NULL ! = strsignal ( WTERMSIG ( exit_status ) ) ) {
opal_show_help ( " help-orterun.txt " , " orterun:proc-aborted-strsignal " , false ,
orterun_basename , ( unsigned long ) rank , ( unsigned long ) pid ,
node_name , WTERMSIG ( exit_status ) ,
strsignal ( WTERMSIG ( exit_status ) ) ) ;
} else {
2007-02-09 19:39:30 +03:00
# endif
2007-04-24 23:19:14 +04:00
opal_show_help ( " help-orterun.txt " , " orterun:proc-aborted " , false ,
orterun_basename , ( unsigned long ) rank , ( unsigned long ) pid ,
node_name , WTERMSIG ( exit_status ) ) ;
2007-02-09 19:39:30 +03:00
# ifdef HAVE_STRSIGNAL
2007-04-24 23:19:14 +04:00
}
2006-12-17 23:01:11 +03:00
# endif
2007-04-24 23:19:14 +04:00
}
+ + num_aborted ;
2006-02-16 23:40:23 +03:00
}
2005-04-16 01:52:58 +04:00
}
}
2005-04-28 17:18:52 +04:00
}
2007-04-24 23:19:14 +04:00
2005-05-01 13:53:30 +04:00
/* If we haven't done so already, hold the exit_status so we
2007-04-24 23:19:14 +04:00
can return it when exiting . Specifically , keep the first
non - zero entry . If they all return zero , we ' ll return
zero . We already have the globals . lock ( from
These changes were mostly captured in a prior RFC (except for #2 below) and are aimed specifically at improving startup performance and setting up the remaining modifications described in that RFC.
The commit has been tested for C/R and Cray operations, and on Odin (SLURM, rsh) and RoadRunner (TM). I tried to update all environments, but obviously could not test them. I know that Windows needs some work, and have highlighted what is know to be needed in the odls process component.
This represents a lot of work by Brian, Tim P, Josh, and myself, with much advice from Jeff and others. For posterity, I have appended a copy of the email describing the work that was done:
As we have repeatedly noted, the modex operation in MPI_Init is the single greatest consumer of time during startup. To-date, we have executed that operation as an ORTE stage gate that held the process until a startup message containing all required modex (and OOB contact info - see #3 below) info could be sent to it. Each process would send its data to the HNP's registry, which assembled and sent the message when all processes had reported in.
In addition, ORTE had taken responsibility for monitoring process status as it progressed through a series of "stage gates". The process reported its status at each gate, and ORTE would then send a "release" message once all procs had reported in.
The incoming changes revamp these procedures in three ways:
1. eliminating the ORTE stage gate system and cleanly delineating responsibility between the OMPI and ORTE layers for MPI init/finalize. The modex stage gate (STG1) has been replaced by a collective operation in the modex itself that performs an allgather on the required modex info. The allgather is implemented using the orte_grpcomm framework since the BTL's are not active at that point. At the moment, the grpcomm framework only has a "basic" component analogous to OMPI's "basic" coll framework - I would recommend that the MPI team create additional, more advanced components to improve performance of this step.
The other stage gates have been replaced by orte_grpcomm barrier functions. We tried to use MPI barriers instead (since the BTL's are active at that point), but - as we discussed on the telecon - these are not currently true barriers so the job would hang when we fell through while messages were still in process. Note that the grpcomm barrier doesn't actually resolve that problem, but Brian has pointed out that we are unlikely to ever see it violated. Again, you might want to spend a little time on an advanced barrier algorithm as the one in "basic" is very simplistic.
Summarizing this change: ORTE no longer tracks process state nor has direct responsibility for synchronizing jobs. This is now done via collective operations within the MPI layer, albeit using ORTE collective communication services. I -strongly- urge the MPI team to implement advanced collective algorithms to improve the performance of this critical procedure.
2. reducing the volume of data exchanged during modex. Data in the modex consisted of the process name, the name of the node where that process is located (expressed as a string), plus a string representation of all contact info. The nodename was required in order for the modex to determine if the process was local or not - in addition, some people like to have it to print pretty error messages when a connection failed.
The size of this data has been reduced in three ways:
(a) reducing the size of the process name itself. The process name consisted of two 32-bit fields for the jobid and vpid. This is far larger than any current system, or system likely to exist in the near future, can support. Accordingly, the default size of these fields has been reduced to 16-bits, which means you can have 32k procs in each of 32k jobs. Since the daemons must have a vpid, and we require one daemon/node, this also restricts the default configuration to 32k nodes.
To support any future "mega-clusters", a configuration option --enable-jumbo-apps has been added. This option increases the jobid and vpid field sizes to 32-bits. Someday, if necessary, someone can add yet another option to increase them to 64-bits, I suppose.
(b) replacing the string nodename with an integer nodeid. Since we have one daemon/node, the nodeid corresponds to the local daemon's vpid. This replaces an often lengthy string with only 2 (or at most 4) bytes, a substantial reduction.
(c) when the mca param requesting that nodenames be sent to support pretty error messages, a second mca param is now used to request FQDN - otherwise, the domain name is stripped (by default) from the message to save space. If someone wants to combine those into a single param somehow (perhaps with an argument?), they are welcome to do so - I didn't want to alter what people are already using.
While these may seem like small savings, they actually amount to a significant impact when aggregated across the entire modex operation. Since every proc must receive the modex data regardless of the collective used to send it, just reducing the size of the process name removes nearly 400MBytes of communication from a 32k proc job (admittedly, much of this comm may occur in parallel). So it does add up pretty quickly.
3. routing RML messages to reduce connections. The default messaging system remains point-to-point - i.e., each proc opens a socket to every proc it communicates with and sends its messages directly. A new option uses the orteds as routers - i.e., each proc only opens a single socket to its local orted. All messages are sent from the proc to the orted, which forwards the message to the orted on the node where the intended recipient proc is located - that orted then forwards the message to its local proc (the recipient). This greatly reduces the connection storm we have encountered during startup.
It also has the benefit of removing the sharing of every proc's OOB contact with every other proc. The orted routing tables are populated during launch since every orted gets a map of where every proc is being placed. Each proc, therefore, only needs to know the contact info for its local daemon, which is passed in via the environment when the proc is fork/exec'd by the daemon. This alone removes ~50 bytes/process of communication that was in the current STG1 startup message - so for our 32k proc job, this saves us roughly 32k*50 = 1.6MBytes sent to 32k procs = 51GBytes of messaging.
Note that you can use the new routing method by specifying -mca routed tree - if you so desire. This mode will become the default at some point in the future.
There are a few minor additional changes in the commit that I'll just note in passing:
* propagation of command line mca params to the orteds - fixes ticket #1073. See note there for details.
* requiring of "finalize" prior to "exit" for MPI procs - fixes ticket #1144. See note there for details.
* cleanup of some stale header files
This commit was SVN r16364.
2007-10-05 23:48:23 +04:00
job_state_callback ) , so don ' t try to get it again . */
2007-04-24 23:19:14 +04:00
if ( ORTE_JOB_STATE_FAILED_TO_START = = state ) {
/* if the job failed to start, then there cannot be
* an exit state set , so we force the exit state
* to be 1 so that scripts can tell we failed . Keep
* this BEFORE the exit_status_set " if " so that we
* can detect some procs failing to start while
* others did .
*
* Any exit state we find is actually just the ORTE error
* code we set so that orterun can output an intelligible
* error message . Hence , there is no sense in trying to
* propagate any reported exit states - just set it to " 1 "
*/
orterun_globals . exit_status = 1 ;
} else if ( 0 = = orterun_globals . exit_status & & exit_status_set ) {
2005-04-29 04:36:07 +04:00
orterun_globals . exit_status = exit_status ;
2005-03-31 23:39:02 +04:00
}
2007-04-24 23:19:14 +04:00
2005-03-31 23:39:02 +04:00
OBJ_RELEASE ( value ) ;
}
2005-04-28 17:18:52 +04:00
if ( NULL ! = values ) {
2005-03-31 23:39:02 +04:00
free ( values ) ;
}
2005-09-03 05:22:11 +04:00
free ( segment ) ;
2005-03-31 23:39:02 +04:00
}
2005-03-14 23:57:21 +03:00
/*
* signal main thread when application completes
*/
static void job_state_callback ( orte_jobid_t jobid , orte_proc_state_t state )
{
2005-09-20 21:09:11 +04:00
OPAL_TRACE_ARG2 ( 1 , jobid , state ) ;
2005-07-04 02:45:48 +04:00
OPAL_THREAD_LOCK ( & orterun_globals . lock ) ;
2005-04-16 01:52:58 +04:00
2006-02-16 23:40:23 +03:00
/* Note that there's only three states that we're interested in
2005-04-16 01:52:58 +04:00
here :
TERMINATED : which means that all the processes in the job have
completed ( normally and / or abnormally ) .
2006-02-16 23:40:23 +03:00
AT_STG1 : which means that everyone has hit stage gate 1 , so we
can do the parallel debugger startup stuff .
2005-04-16 01:52:58 +04:00
Remember that the rmgr itself will also be called for the
ABORTED state and call the pls . terminate_job , which will result
in killing all the other processes . */
2005-09-05 00:54:19 +04:00
2005-08-30 21:29:43 +04:00
if ( orte_debug_flag ) {
opal_output ( 0 , " spawn: in job_state_callback(jobid = %d, state = 0x%x) \n " ,
jobid , state ) ;
}
2005-03-14 23:57:21 +03:00
switch ( state ) {
2005-03-31 23:39:02 +04:00
case ORTE_PROC_STATE_TERMINATED :
2006-08-16 20:35:09 +04:00
orterun_globals . exit_status = 0 ; /* set the exit status to indicate normal termination */
2005-03-14 23:57:21 +03:00
orterun_globals . exit = true ;
2005-07-04 02:45:48 +04:00
opal_condition_signal ( & orterun_globals . cond ) ;
2005-03-14 23:57:21 +03:00
break ;
2005-08-30 21:29:43 +04:00
case ORTE_PROC_STATE_AT_STG1 :
orte_totalview_init_after_spawn ( jobid ) ;
break ;
2006-02-08 20:40:11 +03:00
default :
2007-01-31 07:24:56 +03:00
opal_output ( 0 , " orterun: job state callback in unexpected state - jobid %lu, state 0x%04x \n " ,
( long unsigned int ) jobid , state ) ;
2006-02-08 20:40:11 +03:00
break ;
2005-03-14 23:57:21 +03:00
}
2005-07-04 02:45:48 +04:00
OPAL_THREAD_UNLOCK ( & orterun_globals . lock ) ;
2005-03-14 23:57:21 +03:00
}
2005-03-31 23:39:02 +04:00
/*
2005-09-05 00:54:19 +04:00
* Fail - safe in the event the job hangs and doesn ' t
2005-03-31 23:39:02 +04:00
* cleanup correctly .
*/
2005-03-14 23:57:21 +03:00
static void exit_callback ( int fd , short event , void * arg )
{
2005-09-19 21:20:01 +04:00
OPAL_TRACE ( 1 ) ;
2005-09-20 21:09:11 +04:00
2005-09-11 03:22:37 +04:00
/* Remove the TERM and INT signal handlers */
opal_signal_del ( & term_handler ) ;
opal_signal_del ( & int_handler ) ;
2006-07-11 09:24:08 +04:00
# ifndef __WINDOWS__
2006-06-08 22:27:17 +04:00
/** Remove the USR signal handlers */
opal_signal_del ( & sigusr1_handler ) ;
opal_signal_del ( & sigusr2_handler ) ;
2006-07-11 09:24:08 +04:00
# endif /* __WINDOWS__ */
2006-06-08 22:27:17 +04:00
2005-08-27 00:36:11 +04:00
/* Trigger the normal exit conditions */
orterun_globals . exit = true ;
orterun_globals . exit_status = 1 ;
opal_condition_signal ( & orterun_globals . cond ) ;
2005-03-14 23:57:21 +03:00
}
2005-03-31 23:39:02 +04:00
/*
* Attempt to terminate the job and wait for callback indicating
2005-09-05 00:54:19 +04:00
* the job has been aborted .
2005-03-31 23:39:02 +04:00
*/
2007-01-08 23:25:26 +03:00
typedef enum {
ABORT_SIGNAL_FIRST ,
ABORT_SIGNAL_PROCESSING ,
ABORT_SIGNAL_WARNED ,
ABORT_SIGNAL_DONE
} abort_signal_state_t ;
2006-06-08 22:27:17 +04:00
static void abort_signal_callback ( int fd , short flags , void * arg )
2005-03-14 23:57:21 +03:00
{
int ret ;
2005-07-04 03:09:55 +04:00
opal_event_t * event ;
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
opal_list_t attrs ;
opal_list_item_t * item ;
2007-01-25 17:17:44 +03:00
static abort_signal_state_t state = ABORT_SIGNAL_FIRST ;
2007-01-08 23:25:26 +03:00
static struct timeval invoked , now ;
double a , b ;
2007-01-25 17:17:44 +03:00
2005-09-19 21:20:01 +04:00
OPAL_TRACE ( 1 ) ;
2007-01-25 17:17:44 +03:00
2007-01-08 23:25:26 +03:00
/* If this whole process has already completed, then bail */
switch ( state ) {
2007-01-31 02:03:13 +03:00
case ABORT_SIGNAL_FIRST :
/* This is the first time through */
state = ABORT_SIGNAL_PROCESSING ;
break ;
2007-01-25 17:17:44 +03:00
2007-01-31 02:03:13 +03:00
case ABORT_SIGNAL_WARNED :
gettimeofday ( & now , NULL ) ;
a = invoked . tv_sec * 1000000 + invoked . tv_usec ;
b = now . tv_sec * 1000000 + invoked . tv_usec ;
if ( b - a < = 1000000 ) {
if ( ! orterun_globals . quiet ) {
fprintf ( stderr , " %s: forcibly killing job... \n " ,
orterun_basename ) ;
}
/* We are in an event handler; exit_callback() will delete
the handler that is currently running ( which is a Bad
Thing ) , so we can ' t call it directly . Instead , we have
to exit this handler and setup to call exit_handler ( )
after this . */
if ( NULL ! = ( event = ( opal_event_t * )
malloc ( sizeof ( opal_event_t ) ) ) ) {
opal_evtimer_set ( event , exit_callback , NULL ) ;
now . tv_sec = 0 ;
now . tv_usec = 0 ;
opal_evtimer_add ( event , & now ) ;
state = ABORT_SIGNAL_DONE ;
}
return ;
}
/* Otherwise fall through to PROCESSING and warn again */
2007-01-25 17:17:44 +03:00
2007-01-31 02:03:13 +03:00
case ABORT_SIGNAL_PROCESSING :
opal_show_help ( " help-orterun.txt " , " orterun:sigint-while-processing " ,
true , orterun_basename , orterun_basename ,
orterun_basename ) ;
gettimeofday ( & invoked , NULL ) ;
state = ABORT_SIGNAL_WARNED ;
return ;
case ABORT_SIGNAL_DONE :
/* Nothing to do -- return */
return ;
2005-03-14 23:57:21 +03:00
}
2007-01-08 23:25:26 +03:00
2006-06-26 22:21:45 +04:00
if ( ! orterun_globals . quiet ) {
2006-09-15 01:29:51 +04:00
fprintf ( stderr , " %s: killing job... \n \n " , orterun_basename ) ;
2006-06-26 22:21:45 +04:00
}
2007-01-25 17:17:44 +03:00
2006-09-15 01:29:51 +04:00
/* terminate the job - this will also wakeup orterun so
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
* it can kill all the orteds . Be sure to kill all the job ' s
* descendants , if any , so nothing is left hanging
2006-09-15 01:29:51 +04:00
*/
2006-10-02 04:46:31 +04:00
if ( jobid ! = ORTE_JOBID_INVALID ) {
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
OBJ_CONSTRUCT ( & attrs , opal_list_t ) ;
orte_rmgr . add_attribute ( & attrs , ORTE_NS_INCLUDE_DESCENDANTS , ORTE_UNDEF , NULL , ORTE_RMGR_ATTR_OVERRIDE ) ;
2007-01-25 17:17:44 +03:00
ret = orte_pls . terminate_job ( jobid , & orte_abort_timeout , & attrs ) ;
2007-01-31 02:03:13 +03:00
while ( NULL ! = ( item = opal_list_remove_first ( & attrs ) ) ) {
OBJ_RELEASE ( item ) ;
}
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
OBJ_DESTRUCT ( & attrs ) ;
2005-03-14 23:57:21 +03:00
if ( ORTE_SUCCESS ! = ret ) {
2007-01-31 02:03:13 +03:00
/* If we failed the terminate_job() above, then the
condition variable in the main loop in orterun won ' t
wake up . So signal it . */
if ( NULL ! = ( event = ( opal_event_t * )
malloc ( sizeof ( opal_event_t ) ) ) ) {
opal_evtimer_set ( event , exit_callback , NULL ) ;
now . tv_sec = 0 ;
now . tv_usec = 0 ;
opal_evtimer_add ( event , & now ) ;
} else {
/* We really don't want to do this, but everything
else has failed . . . */
orterun_globals . exit = true ;
orterun_globals . exit_status = 1 ;
opal_condition_signal ( & orterun_globals . cond ) ;
}
2006-10-02 04:46:31 +04:00
jobid = ORTE_JOBID_INVALID ;
2005-03-14 23:57:21 +03:00
}
}
2007-01-31 02:03:13 +03:00
2007-01-08 23:25:26 +03:00
state = ABORT_SIGNAL_DONE ;
2005-03-14 23:57:21 +03:00
}
2006-06-08 22:27:17 +04:00
/**
* Pass user signals to the remote application processes
*/
2006-06-26 19:12:52 +04:00
static void signal_forward_callback ( int fd , short event , void * arg )
2006-06-08 22:27:17 +04:00
{
2006-08-23 06:35:00 +04:00
struct opal_event * signal = ( struct opal_event * ) arg ;
2006-06-26 19:12:52 +04:00
int signum , ret ;
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
opal_list_t attrs ;
opal_list_item_t * item ;
2006-06-08 22:27:17 +04:00
OPAL_TRACE ( 1 ) ;
2006-06-26 19:12:52 +04:00
signum = OPAL_EVENT_SIGNAL ( signal ) ;
2006-06-26 22:21:45 +04:00
if ( ! orterun_globals . quiet ) {
2007-04-05 21:45:03 +04:00
fprintf ( stderr , " %s: Forwarding signal %d to job \n " ,
2006-06-26 19:12:52 +04:00
orterun_basename , signum ) ;
2006-06-26 22:21:45 +04:00
}
2006-06-08 22:27:17 +04:00
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
/** send the signal out to the processes, including any descendants */
OBJ_CONSTRUCT ( & attrs , opal_list_t ) ;
orte_rmgr . add_attribute ( & attrs , ORTE_NS_INCLUDE_DESCENDANTS , ORTE_UNDEF , NULL , ORTE_RMGR_ATTR_OVERRIDE ) ;
if ( ORTE_SUCCESS ! = ( ret = orte_pls . signal_job ( jobid , signum , & attrs ) ) ) {
2006-06-26 19:12:52 +04:00
fprintf ( stderr , " Signal %d could not be sent to the job (returned %d) " ,
signum , ret ) ;
2006-06-08 22:27:17 +04:00
}
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
while ( NULL ! = ( item = opal_list_remove_first ( & attrs ) ) ) OBJ_RELEASE ( item ) ;
OBJ_DESTRUCT ( & attrs ) ;
2006-06-08 22:27:17 +04:00
}
2005-09-05 00:54:19 +04:00
static int init_globals ( void )
2005-03-14 23:57:21 +03:00
{
2005-03-19 02:58:36 +03:00
/* Only CONSTRUCT things once */
if ( ! globals_init ) {
2005-07-04 02:45:48 +04:00
OBJ_CONSTRUCT ( & orterun_globals . lock , opal_mutex_t ) ;
OBJ_CONSTRUCT ( & orterun_globals . cond , opal_condition_t ) ;
2006-10-23 07:34:08 +04:00
orterun_globals . hostfile = NULL ;
orterun_globals . env_val = NULL ;
orterun_globals . appfile = NULL ;
orterun_globals . wdir = NULL ;
orterun_globals . path = NULL ;
2005-03-19 02:58:36 +03:00
}
2006-07-11 01:25:33 +04:00
/* Reset the other fields every time */
2005-03-19 02:58:36 +03:00
2006-10-23 07:34:08 +04:00
orterun_globals . help = false ;
orterun_globals . version = false ;
orterun_globals . verbose = false ;
orterun_globals . quiet = false ;
orterun_globals . exit = false ;
orterun_globals . no_wait_for_job_completion = false ;
orterun_globals . by_node = false ;
orterun_globals . by_slot = false ;
orterun_globals . debugger = false ;
2006-12-13 07:51:38 +03:00
orterun_globals . do_not_launch = false ;
2006-12-12 03:54:05 +03:00
orterun_globals . num_procs = 0 ;
orterun_globals . exit_status = 0 ;
2006-11-16 01:59:01 +03:00
if ( NULL ! = orterun_globals . hostfile )
2006-10-23 07:34:08 +04:00
free ( orterun_globals . hostfile ) ;
orterun_globals . hostfile = NULL ;
2006-11-16 01:59:01 +03:00
if ( NULL ! = orterun_globals . env_val )
2006-10-23 07:34:08 +04:00
free ( orterun_globals . env_val ) ;
orterun_globals . env_val = NULL ;
2006-11-16 01:59:01 +03:00
if ( NULL ! = orterun_globals . appfile )
2006-10-23 07:34:08 +04:00
free ( orterun_globals . appfile ) ;
orterun_globals . appfile = NULL ;
2006-11-16 01:59:01 +03:00
if ( NULL ! = orterun_globals . wdir )
2006-10-23 07:34:08 +04:00
free ( orterun_globals . wdir ) ;
orterun_globals . wdir = NULL ;
if ( NULL ! = orterun_globals . path )
free ( orterun_globals . path ) ;
orterun_globals . path = NULL ;
2005-03-19 02:58:36 +03:00
2007-03-17 02:11:45 +03:00
orterun_globals . preload_binary = false ;
orterun_globals . preload_files = NULL ;
orterun_globals . preload_files_dest_dir = NULL ;
2005-03-19 02:58:36 +03:00
/* All done */
globals_init = true ;
2005-03-14 23:57:21 +03:00
return ORTE_SUCCESS ;
}
2007-06-27 05:03:31 +04:00
static int parse_globals ( int argc , char * argv [ ] , opal_cmd_line_t * cmd_line )
2005-03-14 23:57:21 +03:00
{
2007-06-27 05:03:31 +04:00
int id ;
2005-09-05 00:54:19 +04:00
2006-06-09 21:21:23 +04:00
/* print version if requested. Do this before check for help so
that - - version - - help works as one might expect . */
2006-06-22 23:48:27 +04:00
if ( orterun_globals . version & &
! ( 1 = = argc | | orterun_globals . help ) ) {
2006-06-09 21:21:23 +04:00
char * project_name = NULL ;
if ( 0 = = strcmp ( orterun_basename , " mpirun " ) ) {
project_name = " Open MPI " ;
} else {
project_name = " OpenRTE " ;
}
opal_show_help ( " help-orterun.txt " , " orterun:version " , false ,
2006-06-22 23:48:27 +04:00
orterun_basename , project_name , OPAL_VERSION ,
PACKAGE_BUGREPORT ) ;
2006-06-09 21:21:23 +04:00
/* if we were the only argument, exit */
if ( 2 = = argc ) exit ( 0 ) ;
}
2005-07-29 01:17:48 +04:00
/* Check for help request */
2005-04-12 20:01:30 +04:00
if ( 1 = = argc | | orterun_globals . help ) {
2005-03-14 23:57:21 +03:00
char * args = NULL ;
2006-06-22 23:48:27 +04:00
char * project_name = NULL ;
if ( 0 = = strcmp ( orterun_basename , " mpirun " ) ) {
project_name = " Open MPI " ;
} else {
project_name = " OpenRTE " ;
}
2007-06-27 05:03:31 +04:00
args = opal_cmd_line_get_usage_msg ( cmd_line ) ;
2005-07-04 06:38:44 +04:00
opal_show_help ( " help-orterun.txt " , " orterun:usage " , false ,
2006-06-22 23:48:27 +04:00
orterun_basename , project_name , OPAL_VERSION ,
orterun_basename , args ,
PACKAGE_BUGREPORT ) ;
2005-03-14 23:57:21 +03:00
free ( args ) ;
2005-09-05 00:54:19 +04:00
2005-03-14 23:57:21 +03:00
/* If someone asks for help, that should be all we do */
exit ( 0 ) ;
}
2005-11-20 19:06:53 +03:00
/* Do we want a user-level debugger? */
2005-10-05 14:24:34 +04:00
2005-11-20 19:06:53 +03:00
if ( orterun_globals . debugger ) {
2007-07-10 16:53:48 +04:00
orte_run_debugger ( orterun_basename , cmd_line , argc , argv ) ;
2005-11-20 19:06:53 +03:00
}
2005-10-05 14:24:34 +04:00
2005-06-06 17:43:20 +04:00
/* Allocate and map by node or by slot? Shortcut for setting an
MCA param . */
(copied from a mail that has a lengthy description of this commit)
I spoke with Tim about this the other day -- he gave me the green
light to go ahead with this, but it turned into a bigger job than I
thought it would be. I revamped how the default RAS scheduling and
round_robin RMAPS mapping occurs. The previous algorithms were pretty
brain dead, and ignored the "slots" and "max_slots" tokens in
hostfiles. I considered this a big enough problem to fix it for the
beta (because there is currently no way to control where processes are
launched on SMPs).
There's still some more bells and whistles that I'd like to implement,
but there's no hurry, and they can go on the trunk at any time. My
patches below are for what I considered "essential", and do the
following:
- honor the "slots" and "max-slots" tokens in the hostfile (and all
their synonyms), meaning that we allocate/map until we fill slots,
and if there are still more processes to allocate/map, we keep going
until we fill max-slots (i.e., only oversubscribe a node if we have
to).
- offer two different algorithms, currently supported by two new
options to orterun. Remember that there are two parts here -- slot
allocation and process mapping. Slot allocation controls how many
processes we'll be running on a node. After that decision has been
made, process mapping effectively controls where the ranks of
MPI_COMM_WORLD (MCW) are placed. Some of the examples given below
don't make sense unless you remember that there is a difference
between the two (which makes total sense, but you have to think
about it in terms of both things):
1. "-bynode": allocates/maps one process per node in a round-robin
fashion until all slots on the node are taken. If we still have more
processes after all slots are taken, then keep going until all
max-slots are taken. Examples:
- The hostfile:
eddie slots=2 max-slots=4
vogon slots=4 max-slots=8
- orterun -bynode -np 6 -hostfile hostfile a.out
eddie: MCW ranks 0, 2
vogon: MCW ranks 1, 3, 4, 5
- orterun -bynode -np 8 -hostfile hostfile a.out
eddie: MCW ranks 0, 2, 4
vogon: MCW ranks 1, 3, 5, 6, 7
-> the algorithm oversubscribes all nodes "equally" (until each
node's max_slots is hit, of course)
- orterun -bynode -np 12 -hostfile hostfile a.out
eddie: MCW ranks 0, 2, 4, 6
vogon: MCW ranks 1, 3, 5, 7, 8, 9, 10, 11
2. "-byslot" (this is the default if you don't specify -bynode):
greedily takes all available slots on a node for a job before moving
on to the next node. If we still have processes to allocate/schedule,
then oversubscribe all nodes equally (i.e., go round robin on all
nodes until each node's max_slots is hit). Examples:
- The hostfile
eddie slots=2 max-slots=4
vogon slots=4 max-slots=8
- orterun -np 6 -hostfile hostfile a.out
eddie: MCW ranks 0, 1
vogon: MCW ranks 2, 3, 4, 5
- orterun -np 8 -hostfile hostfile a.out
eddie: MCW ranks 0, 1, 2
vogon: MCW ranks 3, 4, 5, 6, 7
-> the algorithm oversubscribes all nodes "equally" (until max_slots
is hit)
- orterun -np 12 -hostfile hostfile a.out
eddie: MCW ranks 0, 1, 2, 3
vogon: MCW ranks 4, 5, 6, 7, 8, 9, 10, 11
The above examples are fairly contrived, and it's not clear from them
that you can get different allocation answers in all cases (the
mapping differences are obvious). Consider the following allocation
example:
- The hostfile
eddie count=4
vogon count=4
earth count=4
deep-thought count=4
- orterun -np 8 -hostfile hostfile a.out
eddie: 4 slots will be allocated
vogon: 4 slots will be allocated
earth: no slots allocated
deep-thought: no slots allocated
- orterun -bynode -np 8 -hostfile hostfile a.out
eddie: 2 slots will be allocated
vogon: 2 slots will be allocated
earth: 2 slots will be allocated
deep-thought: 2 slots will be allocated
This commit was SVN r5894.
2005-05-31 20:36:53 +04:00
2005-09-27 06:54:15 +04:00
/* Don't initialize the MCA parameter here unless we have to,
2005-10-08 02:24:52 +04:00
* since it really should be initialized in rmaps_base_open */
2005-09-27 06:54:15 +04:00
if ( orterun_globals . by_node | | orterun_globals . by_slot ) {
char * policy = NULL ;
2006-07-05 00:12:35 +04:00
id = mca_base_param_reg_string_name ( " rmaps " , " base_schedule_policy " ,
2005-10-08 02:24:52 +04:00
" Scheduling policy for RMAPS. [slot | node] " ,
2005-09-27 06:54:15 +04:00
false , false , " slot " , & policy ) ;
if ( orterun_globals . by_node ) {
orterun_globals . by_slot = false ;
mca_base_param_set_string ( id , " node " ) ;
} else {
orterun_globals . by_slot = true ;
mca_base_param_set_string ( id , " slot " ) ;
}
free ( policy ) ;
}
else {
/* Default */
(copied from a mail that has a lengthy description of this commit)
I spoke with Tim about this the other day -- he gave me the green
light to go ahead with this, but it turned into a bigger job than I
thought it would be. I revamped how the default RAS scheduling and
round_robin RMAPS mapping occurs. The previous algorithms were pretty
brain dead, and ignored the "slots" and "max_slots" tokens in
hostfiles. I considered this a big enough problem to fix it for the
beta (because there is currently no way to control where processes are
launched on SMPs).
There's still some more bells and whistles that I'd like to implement,
but there's no hurry, and they can go on the trunk at any time. My
patches below are for what I considered "essential", and do the
following:
- honor the "slots" and "max-slots" tokens in the hostfile (and all
their synonyms), meaning that we allocate/map until we fill slots,
and if there are still more processes to allocate/map, we keep going
until we fill max-slots (i.e., only oversubscribe a node if we have
to).
- offer two different algorithms, currently supported by two new
options to orterun. Remember that there are two parts here -- slot
allocation and process mapping. Slot allocation controls how many
processes we'll be running on a node. After that decision has been
made, process mapping effectively controls where the ranks of
MPI_COMM_WORLD (MCW) are placed. Some of the examples given below
don't make sense unless you remember that there is a difference
between the two (which makes total sense, but you have to think
about it in terms of both things):
1. "-bynode": allocates/maps one process per node in a round-robin
fashion until all slots on the node are taken. If we still have more
processes after all slots are taken, then keep going until all
max-slots are taken. Examples:
- The hostfile:
eddie slots=2 max-slots=4
vogon slots=4 max-slots=8
- orterun -bynode -np 6 -hostfile hostfile a.out
eddie: MCW ranks 0, 2
vogon: MCW ranks 1, 3, 4, 5
- orterun -bynode -np 8 -hostfile hostfile a.out
eddie: MCW ranks 0, 2, 4
vogon: MCW ranks 1, 3, 5, 6, 7
-> the algorithm oversubscribes all nodes "equally" (until each
node's max_slots is hit, of course)
- orterun -bynode -np 12 -hostfile hostfile a.out
eddie: MCW ranks 0, 2, 4, 6
vogon: MCW ranks 1, 3, 5, 7, 8, 9, 10, 11
2. "-byslot" (this is the default if you don't specify -bynode):
greedily takes all available slots on a node for a job before moving
on to the next node. If we still have processes to allocate/schedule,
then oversubscribe all nodes equally (i.e., go round robin on all
nodes until each node's max_slots is hit). Examples:
- The hostfile
eddie slots=2 max-slots=4
vogon slots=4 max-slots=8
- orterun -np 6 -hostfile hostfile a.out
eddie: MCW ranks 0, 1
vogon: MCW ranks 2, 3, 4, 5
- orterun -np 8 -hostfile hostfile a.out
eddie: MCW ranks 0, 1, 2
vogon: MCW ranks 3, 4, 5, 6, 7
-> the algorithm oversubscribes all nodes "equally" (until max_slots
is hit)
- orterun -np 12 -hostfile hostfile a.out
eddie: MCW ranks 0, 1, 2, 3
vogon: MCW ranks 4, 5, 6, 7, 8, 9, 10, 11
The above examples are fairly contrived, and it's not clear from them
that you can get different allocation answers in all cases (the
mapping differences are obvious). Consider the following allocation
example:
- The hostfile
eddie count=4
vogon count=4
earth count=4
deep-thought count=4
- orterun -np 8 -hostfile hostfile a.out
eddie: 4 slots will be allocated
vogon: 4 slots will be allocated
earth: no slots allocated
deep-thought: no slots allocated
- orterun -bynode -np 8 -hostfile hostfile a.out
eddie: 2 slots will be allocated
vogon: 2 slots will be allocated
earth: 2 slots will be allocated
deep-thought: 2 slots will be allocated
This commit was SVN r5894.
2005-05-31 20:36:53 +04:00
orterun_globals . by_slot = true ;
}
2006-10-07 23:50:12 +04:00
2007-01-25 17:17:44 +03:00
/* If we don't want to wait, we don't want to wait */
2005-03-14 23:57:21 +03:00
if ( orterun_globals . no_wait_for_job_completion ) {
wait_for_job_completion = false ;
}
return ORTE_SUCCESS ;
}
static int parse_locals ( int argc , char * argv [ ] )
{
int i , rc , app_num ;
int temp_argc ;
2005-08-08 20:42:28 +04:00
char * * temp_argv , * * env ;
2005-03-14 23:57:21 +03:00
orte_app_context_t * app ;
bool made_app ;
2006-08-15 23:54:10 +04:00
orte_std_cntr_t j , size1 ;
2005-03-14 23:57:21 +03:00
/* Make the apps */
temp_argc = 0 ;
temp_argv = NULL ;
2005-07-04 04:13:44 +04:00
opal_argv_append ( & temp_argc , & temp_argv , argv [ 0 ] ) ;
2006-03-24 18:39:09 +03:00
/* Make the max size of the array be INT_MAX because we may be
parsing an app file , in which case we don ' t know how many
entries there will be . The max size of an orte_pointer_array
is only a safety net ; it only initially allocates block_size
entries ( 2 , in this case ) */
2006-03-23 20:55:25 +03:00
orte_pointer_array_init ( & apps_pa , 1 , INT_MAX , 2 ) ;
While waiting for fortran compiles...
Fixes for orterun in handling different MCA params for different
processes (reviewed by Brian):
- By design, if you run the following:
mpirun --mca foo aaa --mca foo bbb a.out
a.out will get a single MCA param for foo with value "aaa,bbb".
- However, if you specify multiple apps with different values for the
same MCA param, you should expect to get the different values for
each app. For example:
mpirun --mca foo aaa a.out : --mca foo bbb b.out
Should yield a.out with a "foo" param with value "aaa" and b.out
with a "foo" param with a value "bbb".
- This did not work -- both a.out and b.out would get a "foo" with
"aaa,bbb".
- This commit fixes this behavior -- now a.out will get aaa and b.out
will get bbb.
- Additionally, if you mix --mca and and app file, you can have
"global" params and per-line-in-the-appfile params. For example:
mpirun --mca foo zzzz --app appfile
where "appfile" contains:
-np 1 --mca bar aaa a.out
-np 1 --mca bar bbb b.out
In this case, a.out will get foo=zzzz and bar=aaa, and b.out will
get foo=zzzz and bar=bbb.
Spiffy.
Ok, fortran build is done... back to Fortran... sigh...
This commit was SVN r5710.
2005-05-13 18:36:36 +04:00
2005-08-08 20:42:28 +04:00
/* NOTE: This bogus env variable is necessary in the calls to
create_app ( ) , below . See comment immediately before the
create_app ( ) function for an explanation . */
While waiting for fortran compiles...
Fixes for orterun in handling different MCA params for different
processes (reviewed by Brian):
- By design, if you run the following:
mpirun --mca foo aaa --mca foo bbb a.out
a.out will get a single MCA param for foo with value "aaa,bbb".
- However, if you specify multiple apps with different values for the
same MCA param, you should expect to get the different values for
each app. For example:
mpirun --mca foo aaa a.out : --mca foo bbb b.out
Should yield a.out with a "foo" param with value "aaa" and b.out
with a "foo" param with a value "bbb".
- This did not work -- both a.out and b.out would get a "foo" with
"aaa,bbb".
- This commit fixes this behavior -- now a.out will get aaa and b.out
will get bbb.
- Additionally, if you mix --mca and and app file, you can have
"global" params and per-line-in-the-appfile params. For example:
mpirun --mca foo zzzz --app appfile
where "appfile" contains:
-np 1 --mca bar aaa a.out
-np 1 --mca bar bbb b.out
In this case, a.out will get foo=zzzz and bar=aaa, and b.out will
get foo=zzzz and bar=bbb.
Spiffy.
Ok, fortran build is done... back to Fortran... sigh...
This commit was SVN r5710.
2005-05-13 18:36:36 +04:00
env = NULL ;
2005-03-14 23:57:21 +03:00
for ( app_num = 0 , i = 1 ; i < argc ; + + i ) {
if ( 0 = = strcmp ( argv [ i ] , " : " ) ) {
/* Make an app with this argv */
2005-07-04 04:13:44 +04:00
if ( opal_argv_count ( temp_argv ) > 1 ) {
While waiting for fortran compiles...
Fixes for orterun in handling different MCA params for different
processes (reviewed by Brian):
- By design, if you run the following:
mpirun --mca foo aaa --mca foo bbb a.out
a.out will get a single MCA param for foo with value "aaa,bbb".
- However, if you specify multiple apps with different values for the
same MCA param, you should expect to get the different values for
each app. For example:
mpirun --mca foo aaa a.out : --mca foo bbb b.out
Should yield a.out with a "foo" param with value "aaa" and b.out
with a "foo" param with a value "bbb".
- This did not work -- both a.out and b.out would get a "foo" with
"aaa,bbb".
- This commit fixes this behavior -- now a.out will get aaa and b.out
will get bbb.
- Additionally, if you mix --mca and and app file, you can have
"global" params and per-line-in-the-appfile params. For example:
mpirun --mca foo zzzz --app appfile
where "appfile" contains:
-np 1 --mca bar aaa a.out
-np 1 --mca bar bbb b.out
In this case, a.out will get foo=zzzz and bar=aaa, and b.out will
get foo=zzzz and bar=bbb.
Spiffy.
Ok, fortran build is done... back to Fortran... sigh...
This commit was SVN r5710.
2005-05-13 18:36:36 +04:00
if ( NULL ! = env ) {
2005-07-04 04:13:44 +04:00
opal_argv_free ( env ) ;
While waiting for fortran compiles...
Fixes for orterun in handling different MCA params for different
processes (reviewed by Brian):
- By design, if you run the following:
mpirun --mca foo aaa --mca foo bbb a.out
a.out will get a single MCA param for foo with value "aaa,bbb".
- However, if you specify multiple apps with different values for the
same MCA param, you should expect to get the different values for
each app. For example:
mpirun --mca foo aaa a.out : --mca foo bbb b.out
Should yield a.out with a "foo" param with value "aaa" and b.out
with a "foo" param with a value "bbb".
- This did not work -- both a.out and b.out would get a "foo" with
"aaa,bbb".
- This commit fixes this behavior -- now a.out will get aaa and b.out
will get bbb.
- Additionally, if you mix --mca and and app file, you can have
"global" params and per-line-in-the-appfile params. For example:
mpirun --mca foo zzzz --app appfile
where "appfile" contains:
-np 1 --mca bar aaa a.out
-np 1 --mca bar bbb b.out
In this case, a.out will get foo=zzzz and bar=aaa, and b.out will
get foo=zzzz and bar=bbb.
Spiffy.
Ok, fortran build is done... back to Fortran... sigh...
This commit was SVN r5710.
2005-05-13 18:36:36 +04:00
env = NULL ;
}
2006-03-24 18:28:42 +03:00
app = NULL ;
While waiting for fortran compiles...
Fixes for orterun in handling different MCA params for different
processes (reviewed by Brian):
- By design, if you run the following:
mpirun --mca foo aaa --mca foo bbb a.out
a.out will get a single MCA param for foo with value "aaa,bbb".
- However, if you specify multiple apps with different values for the
same MCA param, you should expect to get the different values for
each app. For example:
mpirun --mca foo aaa a.out : --mca foo bbb b.out
Should yield a.out with a "foo" param with value "aaa" and b.out
with a "foo" param with a value "bbb".
- This did not work -- both a.out and b.out would get a "foo" with
"aaa,bbb".
- This commit fixes this behavior -- now a.out will get aaa and b.out
will get bbb.
- Additionally, if you mix --mca and and app file, you can have
"global" params and per-line-in-the-appfile params. For example:
mpirun --mca foo zzzz --app appfile
where "appfile" contains:
-np 1 --mca bar aaa a.out
-np 1 --mca bar bbb b.out
In this case, a.out will get foo=zzzz and bar=aaa, and b.out will
get foo=zzzz and bar=bbb.
Spiffy.
Ok, fortran build is done... back to Fortran... sigh...
This commit was SVN r5710.
2005-05-13 18:36:36 +04:00
rc = create_app ( temp_argc , temp_argv , & app , & made_app , & env ) ;
2006-03-23 19:53:11 +03:00
/** keep track of the number of apps - point this app_context to that index */
2005-03-14 23:57:21 +03:00
if ( ORTE_SUCCESS ! = rc ) {
/* Assume that the error message has already been
printed ; no need to cleanup - - we can just
exit */
exit ( 1 ) ;
}
if ( made_app ) {
2006-08-15 23:54:10 +04:00
orte_std_cntr_t dummy ;
2006-03-24 18:28:42 +03:00
app - > idx = app_num ;
+ + app_num ;
2005-07-03 08:02:01 +04:00
orte_pointer_array_add ( & dummy , apps_pa , app ) ;
2005-03-14 23:57:21 +03:00
}
2005-09-05 00:54:19 +04:00
2005-03-14 23:57:21 +03:00
/* Reset the temps */
2005-09-05 00:54:19 +04:00
2005-03-14 23:57:21 +03:00
temp_argc = 0 ;
temp_argv = NULL ;
2005-07-04 04:13:44 +04:00
opal_argv_append ( & temp_argc , & temp_argv , argv [ 0 ] ) ;
2005-03-14 23:57:21 +03:00
}
} else {
2005-07-04 04:13:44 +04:00
opal_argv_append ( & temp_argc , & temp_argv , argv [ i ] ) ;
2005-03-14 23:57:21 +03:00
}
}
2005-07-04 04:13:44 +04:00
if ( opal_argv_count ( temp_argv ) > 1 ) {
2006-03-24 18:28:42 +03:00
app = NULL ;
While waiting for fortran compiles...
Fixes for orterun in handling different MCA params for different
processes (reviewed by Brian):
- By design, if you run the following:
mpirun --mca foo aaa --mca foo bbb a.out
a.out will get a single MCA param for foo with value "aaa,bbb".
- However, if you specify multiple apps with different values for the
same MCA param, you should expect to get the different values for
each app. For example:
mpirun --mca foo aaa a.out : --mca foo bbb b.out
Should yield a.out with a "foo" param with value "aaa" and b.out
with a "foo" param with a value "bbb".
- This did not work -- both a.out and b.out would get a "foo" with
"aaa,bbb".
- This commit fixes this behavior -- now a.out will get aaa and b.out
will get bbb.
- Additionally, if you mix --mca and and app file, you can have
"global" params and per-line-in-the-appfile params. For example:
mpirun --mca foo zzzz --app appfile
where "appfile" contains:
-np 1 --mca bar aaa a.out
-np 1 --mca bar bbb b.out
In this case, a.out will get foo=zzzz and bar=aaa, and b.out will
get foo=zzzz and bar=bbb.
Spiffy.
Ok, fortran build is done... back to Fortran... sigh...
This commit was SVN r5710.
2005-05-13 18:36:36 +04:00
rc = create_app ( temp_argc , temp_argv , & app , & made_app , & env ) ;
2005-03-14 23:57:21 +03:00
if ( ORTE_SUCCESS ! = rc ) {
/* Assume that the error message has already been printed;
no need to cleanup - - we can just exit */
exit ( 1 ) ;
}
if ( made_app ) {
2006-08-15 23:54:10 +04:00
orte_std_cntr_t dummy ;
2006-03-24 18:28:42 +03:00
app - > idx = app_num ;
+ + app_num ;
2005-07-03 08:02:01 +04:00
orte_pointer_array_add ( & dummy , apps_pa , app ) ;
2005-03-14 23:57:21 +03:00
}
}
While waiting for fortran compiles...
Fixes for orterun in handling different MCA params for different
processes (reviewed by Brian):
- By design, if you run the following:
mpirun --mca foo aaa --mca foo bbb a.out
a.out will get a single MCA param for foo with value "aaa,bbb".
- However, if you specify multiple apps with different values for the
same MCA param, you should expect to get the different values for
each app. For example:
mpirun --mca foo aaa a.out : --mca foo bbb b.out
Should yield a.out with a "foo" param with value "aaa" and b.out
with a "foo" param with a value "bbb".
- This did not work -- both a.out and b.out would get a "foo" with
"aaa,bbb".
- This commit fixes this behavior -- now a.out will get aaa and b.out
will get bbb.
- Additionally, if you mix --mca and and app file, you can have
"global" params and per-line-in-the-appfile params. For example:
mpirun --mca foo zzzz --app appfile
where "appfile" contains:
-np 1 --mca bar aaa a.out
-np 1 --mca bar bbb b.out
In this case, a.out will get foo=zzzz and bar=aaa, and b.out will
get foo=zzzz and bar=bbb.
Spiffy.
Ok, fortran build is done... back to Fortran... sigh...
This commit was SVN r5710.
2005-05-13 18:36:36 +04:00
if ( NULL ! = env ) {
2005-07-04 04:13:44 +04:00
opal_argv_free ( env ) ;
While waiting for fortran compiles...
Fixes for orterun in handling different MCA params for different
processes (reviewed by Brian):
- By design, if you run the following:
mpirun --mca foo aaa --mca foo bbb a.out
a.out will get a single MCA param for foo with value "aaa,bbb".
- However, if you specify multiple apps with different values for the
same MCA param, you should expect to get the different values for
each app. For example:
mpirun --mca foo aaa a.out : --mca foo bbb b.out
Should yield a.out with a "foo" param with value "aaa" and b.out
with a "foo" param with a value "bbb".
- This did not work -- both a.out and b.out would get a "foo" with
"aaa,bbb".
- This commit fixes this behavior -- now a.out will get aaa and b.out
will get bbb.
- Additionally, if you mix --mca and and app file, you can have
"global" params and per-line-in-the-appfile params. For example:
mpirun --mca foo zzzz --app appfile
where "appfile" contains:
-np 1 --mca bar aaa a.out
-np 1 --mca bar bbb b.out
In this case, a.out will get foo=zzzz and bar=aaa, and b.out will
get foo=zzzz and bar=bbb.
Spiffy.
Ok, fortran build is done... back to Fortran... sigh...
This commit was SVN r5710.
2005-05-13 18:36:36 +04:00
}
2005-07-04 04:13:44 +04:00
opal_argv_free ( temp_argv ) ;
2005-03-14 23:57:21 +03:00
2005-08-08 20:42:28 +04:00
/* Once we've created all the apps, add the global MCA params to
each app ' s environment ( checking for duplicates , of
course - - yay opal_environ_merge ( ) ) . */
if ( NULL ! = global_mca_env ) {
size1 = orte_pointer_array_get_size ( apps_pa ) ;
/* Iterate through all the apps */
for ( j = 0 ; j < size1 ; + + j ) {
2005-09-05 00:54:19 +04:00
app = ( orte_app_context_t * )
2005-08-08 20:42:28 +04:00
orte_pointer_array_get_item ( apps_pa , j ) ;
if ( NULL ! = app ) {
/* Use handy utility function */
env = opal_environ_merge ( global_mca_env , app - > env ) ;
opal_argv_free ( app - > env ) ;
app - > env = env ;
}
}
}
/* Now take a subset of the MCA params and set them as MCA
overrides here in orterun ( so that when we orte_init ( ) later ,
all the components see these MCA params ) . Here ' s how we decide
which subset of the MCA params we set here in orterun :
1. If any global MCA params were set , use those
2. If no global MCA params were set and there was only one app ,
then use its app MCA params
3. Otherwise , don ' t set any
*/
env = NULL ;
if ( NULL ! = global_mca_env ) {
env = global_mca_env ;
} else {
if ( orte_pointer_array_get_size ( apps_pa ) > = 1 ) {
/* Remember that pointer_array's can be padded with NULL
entries ; so only use the app ' s env if there is exactly
1 non - NULL entry */
2005-09-05 00:54:19 +04:00
app = ( orte_app_context_t * )
2005-08-08 20:42:28 +04:00
orte_pointer_array_get_item ( apps_pa , 0 ) ;
if ( NULL ! = app ) {
env = app - > env ;
for ( j = 1 ; j < orte_pointer_array_get_size ( apps_pa ) ; + + j ) {
if ( NULL ! = orte_pointer_array_get_item ( apps_pa , j ) ) {
env = NULL ;
break ;
}
}
}
}
}
2005-09-05 00:54:19 +04:00
2005-08-08 20:42:28 +04:00
if ( NULL ! = env ) {
size1 = opal_argv_count ( env ) ;
for ( j = 0 ; j < size1 ; + + j ) {
putenv ( env [ j ] ) ;
}
}
2005-03-14 23:57:21 +03:00
/* All done */
return ORTE_SUCCESS ;
}
2005-08-08 20:42:28 +04:00
/*
* This function takes a " char ***app_env " parameter to handle the
* specific case :
*
* orterun - - mca foo bar - app appfile
*
* That is , we ' ll need to keep foo = bar , but the presence of the app
* file will cause an invocation of parse_appfile ( ) , which will cause
* one or more recursive calls back to create_app ( ) . Since the
* foo = bar value applies globally to all apps in the appfile , we need
* to pass in the " base " environment ( that contains the foo = bar value )
* when we parse each line in the appfile .
*
* This is really just a special case - - when we have a simple case like :
*
* orterun - - mca foo bar - np 4 hostname
*
* Then the upper - level function ( parse_locals ( ) ) calls create_app ( )
* with a NULL value for app_env , meaning that there is no " base "
* environment that the app needs to be created from .
*/
2005-03-14 23:57:21 +03:00
static int create_app ( int argc , char * argv [ ] , orte_app_context_t * * app_ptr ,
2005-08-08 20:42:28 +04:00
bool * made_app , char * * * app_env )
2005-03-14 23:57:21 +03:00
{
2005-07-04 04:13:44 +04:00
opal_cmd_line_t cmd_line ;
2005-03-14 23:57:21 +03:00
char cwd [ OMPI_PATH_MAX ] ;
2006-02-07 06:32:36 +03:00
int i , j , count , rc ;
2005-03-14 23:57:21 +03:00
char * param , * value , * value2 ;
orte_app_context_t * app = NULL ;
2005-11-03 21:15:47 +03:00
#if 0 /* Used only in the C/N notion case, remove to silence compiler warnings */
2006-08-15 23:54:10 +04:00
orte_std_cntr_t l , len ;
2005-11-03 21:15:47 +03:00
# endif
2005-04-19 09:45:25 +04:00
bool map_data = false , save_arg , cmd_line_made = false ;
2005-03-14 23:57:21 +03:00
int new_argc = 0 ;
char * * new_argv = NULL ;
* made_app = false ;
/* Pre-process the command line:
2005-09-05 00:54:19 +04:00
2005-03-14 23:57:21 +03:00
- convert C , cX , N , nX arguments to " -rawmap <id> <arg> " so
that the parser can pick it up nicely .
- convert - host to - rawmap < id > < arg >
- convert - arch to - rawmap < id > < arg >
Converting these to the same argument type will a ) simplify the
logic down below , and b ) allow us to preserve the ordering of
these arguments as the user specified them on the command
line . */
for ( i = 0 ; i < argc ; + + i ) {
map_data = false ;
save_arg = true ;
2005-11-03 21:15:47 +03:00
/* JJH To fix in the future
* Currently C / N notation is not supported so don ' t execute this check
2006-02-07 06:32:36 +03:00
* Bug : Make this context sensitive since it will not behave properly
2005-11-03 21:15:47 +03:00
* with the following argument set :
* $ orterun - np 2 - host c2 , c3 , c12 hostname
* Since it will see the hosts c2 , c3 , and c12 as C options instead
* of hostnames .
*/
if ( false ) { ; } /* Wrapper to preserve logic continuation while the below
is commented out */
#if 0
2005-03-14 23:57:21 +03:00
if ( 0 = = strcmp ( argv [ i ] , " C " ) | |
0 = = strcmp ( argv [ i ] , " N " ) ) {
map_data = true ;
2005-09-05 00:54:19 +04:00
}
2005-03-14 23:57:21 +03:00
2005-09-05 00:54:19 +04:00
/* Heuristic: if the string fits "[cn][0-9]+" or "[cn][0-9],",
2005-03-14 23:57:21 +03:00
then accept it as mapping data */
else if ( ' c ' = = argv [ i ] [ 0 ] | | ' n ' = = argv [ i ] [ 0 ] ) {
len = strlen ( argv [ i ] ) ;
if ( len > 1 ) {
for ( l = 1 ; l < len ; + + l ) {
if ( ' , ' = = argv [ i ] [ l ] ) {
map_data = true ;
break ;
} else if ( ! isdigit ( argv [ i ] [ l ] ) ) {
break ;
}
}
if ( l > = len ) {
map_data = true ;
}
}
}
2005-11-03 21:15:47 +03:00
# endif
2005-03-14 23:57:21 +03:00
2005-09-20 12:56:02 +04:00
#if 0
/* JMS commented out because we don't handle this in any
mapper */
2005-03-14 23:57:21 +03:00
/* Save -arch args */
else if ( 0 = = strcmp ( " -arch " , argv [ i ] ) ) {
char str [ 2 ] = { ' 0 ' + ORTE_APP_CONTEXT_MAP_ARCH , ' \0 ' } ;
2005-07-04 04:13:44 +04:00
opal_argv_append ( & new_argc , & new_argv , " -rawmap " ) ;
opal_argv_append ( & new_argc , & new_argv , str ) ;
2005-03-14 23:57:21 +03:00
save_arg = false ;
}
2005-09-20 12:56:02 +04:00
# endif
2005-03-14 23:57:21 +03:00
/* Save -host args */
2006-02-07 06:32:36 +03:00
else if ( 0 = = strcmp ( " --host " , argv [ i ] ) | |
0 = = strcmp ( " -host " , argv [ i ] ) | |
2005-09-22 20:08:40 +04:00
0 = = strcmp ( " -H " , argv [ i ] ) ) {
2005-03-14 23:57:21 +03:00
char str [ 2 ] = { ' 0 ' + ORTE_APP_CONTEXT_MAP_HOSTNAME , ' \0 ' } ;
2005-07-04 04:13:44 +04:00
opal_argv_append ( & new_argc , & new_argv , " -rawmap " ) ;
opal_argv_append ( & new_argc , & new_argv , str ) ;
2005-03-14 23:57:21 +03:00
save_arg = false ;
}
These changes were mostly captured in a prior RFC (except for #2 below) and are aimed specifically at improving startup performance and setting up the remaining modifications described in that RFC.
The commit has been tested for C/R and Cray operations, and on Odin (SLURM, rsh) and RoadRunner (TM). I tried to update all environments, but obviously could not test them. I know that Windows needs some work, and have highlighted what is know to be needed in the odls process component.
This represents a lot of work by Brian, Tim P, Josh, and myself, with much advice from Jeff and others. For posterity, I have appended a copy of the email describing the work that was done:
As we have repeatedly noted, the modex operation in MPI_Init is the single greatest consumer of time during startup. To-date, we have executed that operation as an ORTE stage gate that held the process until a startup message containing all required modex (and OOB contact info - see #3 below) info could be sent to it. Each process would send its data to the HNP's registry, which assembled and sent the message when all processes had reported in.
In addition, ORTE had taken responsibility for monitoring process status as it progressed through a series of "stage gates". The process reported its status at each gate, and ORTE would then send a "release" message once all procs had reported in.
The incoming changes revamp these procedures in three ways:
1. eliminating the ORTE stage gate system and cleanly delineating responsibility between the OMPI and ORTE layers for MPI init/finalize. The modex stage gate (STG1) has been replaced by a collective operation in the modex itself that performs an allgather on the required modex info. The allgather is implemented using the orte_grpcomm framework since the BTL's are not active at that point. At the moment, the grpcomm framework only has a "basic" component analogous to OMPI's "basic" coll framework - I would recommend that the MPI team create additional, more advanced components to improve performance of this step.
The other stage gates have been replaced by orte_grpcomm barrier functions. We tried to use MPI barriers instead (since the BTL's are active at that point), but - as we discussed on the telecon - these are not currently true barriers so the job would hang when we fell through while messages were still in process. Note that the grpcomm barrier doesn't actually resolve that problem, but Brian has pointed out that we are unlikely to ever see it violated. Again, you might want to spend a little time on an advanced barrier algorithm as the one in "basic" is very simplistic.
Summarizing this change: ORTE no longer tracks process state nor has direct responsibility for synchronizing jobs. This is now done via collective operations within the MPI layer, albeit using ORTE collective communication services. I -strongly- urge the MPI team to implement advanced collective algorithms to improve the performance of this critical procedure.
2. reducing the volume of data exchanged during modex. Data in the modex consisted of the process name, the name of the node where that process is located (expressed as a string), plus a string representation of all contact info. The nodename was required in order for the modex to determine if the process was local or not - in addition, some people like to have it to print pretty error messages when a connection failed.
The size of this data has been reduced in three ways:
(a) reducing the size of the process name itself. The process name consisted of two 32-bit fields for the jobid and vpid. This is far larger than any current system, or system likely to exist in the near future, can support. Accordingly, the default size of these fields has been reduced to 16-bits, which means you can have 32k procs in each of 32k jobs. Since the daemons must have a vpid, and we require one daemon/node, this also restricts the default configuration to 32k nodes.
To support any future "mega-clusters", a configuration option --enable-jumbo-apps has been added. This option increases the jobid and vpid field sizes to 32-bits. Someday, if necessary, someone can add yet another option to increase them to 64-bits, I suppose.
(b) replacing the string nodename with an integer nodeid. Since we have one daemon/node, the nodeid corresponds to the local daemon's vpid. This replaces an often lengthy string with only 2 (or at most 4) bytes, a substantial reduction.
(c) when the mca param requesting that nodenames be sent to support pretty error messages, a second mca param is now used to request FQDN - otherwise, the domain name is stripped (by default) from the message to save space. If someone wants to combine those into a single param somehow (perhaps with an argument?), they are welcome to do so - I didn't want to alter what people are already using.
While these may seem like small savings, they actually amount to a significant impact when aggregated across the entire modex operation. Since every proc must receive the modex data regardless of the collective used to send it, just reducing the size of the process name removes nearly 400MBytes of communication from a 32k proc job (admittedly, much of this comm may occur in parallel). So it does add up pretty quickly.
3. routing RML messages to reduce connections. The default messaging system remains point-to-point - i.e., each proc opens a socket to every proc it communicates with and sends its messages directly. A new option uses the orteds as routers - i.e., each proc only opens a single socket to its local orted. All messages are sent from the proc to the orted, which forwards the message to the orted on the node where the intended recipient proc is located - that orted then forwards the message to its local proc (the recipient). This greatly reduces the connection storm we have encountered during startup.
It also has the benefit of removing the sharing of every proc's OOB contact with every other proc. The orted routing tables are populated during launch since every orted gets a map of where every proc is being placed. Each proc, therefore, only needs to know the contact info for its local daemon, which is passed in via the environment when the proc is fork/exec'd by the daemon. This alone removes ~50 bytes/process of communication that was in the current STG1 startup message - so for our 32k proc job, this saves us roughly 32k*50 = 1.6MBytes sent to 32k procs = 51GBytes of messaging.
Note that you can use the new routing method by specifying -mca routed tree - if you so desire. This mode will become the default at some point in the future.
There are a few minor additional changes in the commit that I'll just note in passing:
* propagation of command line mca params to the orteds - fixes ticket #1073. See note there for details.
* requiring of "finalize" prior to "exit" for MPI procs - fixes ticket #1144. See note there for details.
* cleanup of some stale header files
This commit was SVN r16364.
2007-10-05 23:48:23 +04:00
/* save any mca command line args so they can be passed
* separately to the daemons
*/
else if ( 0 = = strcmp ( " -mca " , argv [ i ] ) | |
( 0 = = strcmp ( " --mca " , argv [ i ] ) ) ) {
opal_argv_append_nosize ( & orted_cmd_line , argv [ i ] ) ;
opal_argv_append_nosize ( & orted_cmd_line , argv [ i + 1 ] ) ;
opal_argv_append_nosize ( & orted_cmd_line , argv [ i + 2 ] ) ;
}
2005-03-14 23:57:21 +03:00
/* If this token was C/N map data, save it */
if ( map_data ) {
char str [ 2 ] = { ' 0 ' + ORTE_APP_CONTEXT_MAP_CN , ' \0 ' } ;
2005-07-04 04:13:44 +04:00
opal_argv_append ( & new_argc , & new_argv , " -rawmap " ) ;
opal_argv_append ( & new_argc , & new_argv , str ) ;
2005-03-14 23:57:21 +03:00
}
if ( save_arg ) {
2005-07-04 04:13:44 +04:00
opal_argv_append ( & new_argc , & new_argv , argv [ i ] ) ;
2005-03-14 23:57:21 +03:00
}
}
/* Parse application command line options. Add the -rawmap option
separately so that the user doesn ' t see it in the - - help
message . */
init_globals ( ) ;
2005-07-04 04:13:44 +04:00
opal_cmd_line_create ( & cmd_line , cmd_line_init ) ;
2005-03-18 06:43:59 +03:00
mca_base_cmd_line_setup ( & cmd_line ) ;
2005-03-14 23:57:21 +03:00
cmd_line_made = true ;
2005-07-04 04:13:44 +04:00
opal_cmd_line_make_opt3 ( & cmd_line , ' \0 ' , NULL , " rawmap " , 2 ,
2005-03-14 23:57:21 +03:00
" Hidden / internal parameter -- users should not use this! " ) ;
2005-07-04 04:13:44 +04:00
rc = opal_cmd_line_parse ( & cmd_line , true , new_argc , new_argv ) ;
opal_argv_free ( new_argv ) ;
2005-03-14 23:57:21 +03:00
new_argv = NULL ;
2006-02-12 04:33:29 +03:00
if ( ORTE_SUCCESS ! = rc ) {
2005-03-14 23:57:21 +03:00
goto cleanup ;
}
2005-08-08 20:42:28 +04:00
mca_base_cmd_line_process_args ( & cmd_line , app_env , & global_mca_env ) ;
2005-03-14 23:57:21 +03:00
/* Is there an appfile in here? */
if ( NULL ! = orterun_globals . appfile ) {
OBJ_DESTRUCT ( & cmd_line ) ;
2005-08-08 20:42:28 +04:00
return parse_appfile ( strdup ( orterun_globals . appfile ) , app_env ) ;
2005-03-14 23:57:21 +03:00
}
/* Setup application context */
app = OBJ_NEW ( orte_app_context_t ) ;
2006-02-07 06:32:36 +03:00
opal_cmd_line_get_tail ( & cmd_line , & count , & app - > argv ) ;
2005-03-14 23:57:21 +03:00
/* See if we have anything left */
2006-02-07 06:32:36 +03:00
if ( 0 = = count ) {
2005-07-04 06:38:44 +04:00
opal_show_help ( " help-orterun.txt " , " orterun:executable-not-specified " ,
2005-04-12 20:01:30 +04:00
true , orterun_basename , orterun_basename ) ;
2005-03-14 23:57:21 +03:00
rc = ORTE_ERR_NOT_FOUND ;
goto cleanup ;
}
2005-04-09 05:26:17 +04:00
/* Grab all OMPI_* environment variables */
2005-03-14 23:57:21 +03:00
2005-08-08 20:42:28 +04:00
app - > env = opal_argv_copy ( * app_env ) ;
2005-03-14 23:57:21 +03:00
for ( i = 0 ; NULL ! = environ [ i ] ; + + i ) {
2005-04-06 05:58:30 +04:00
if ( 0 = = strncmp ( " OMPI_ " , environ [ i ] , 5 ) ) {
2005-07-04 04:13:44 +04:00
opal_argv_append_nosize ( & app - > env , environ [ i ] ) ;
2005-03-14 23:57:21 +03:00
}
}
/* Did the user request to export any environment variables? */
2005-07-04 04:13:44 +04:00
if ( opal_cmd_line_is_taken ( & cmd_line , " x " ) ) {
j = opal_cmd_line_get_ninsts ( & cmd_line , " x " ) ;
2005-03-14 23:57:21 +03:00
for ( i = 0 ; i < j ; + + i ) {
2005-07-04 04:13:44 +04:00
param = opal_cmd_line_get_param ( & cmd_line , " x " , i , 0 ) ;
2005-03-14 23:57:21 +03:00
if ( NULL ! = strchr ( param , ' = ' ) ) {
2005-07-04 04:13:44 +04:00
opal_argv_append_nosize ( & app - > env , param ) ;
2005-03-14 23:57:21 +03:00
} else {
value = getenv ( param ) ;
if ( NULL ! = value ) {
if ( NULL ! = strchr ( value , ' = ' ) ) {
2005-07-04 04:13:44 +04:00
opal_argv_append_nosize ( & app - > env , value ) ;
2005-03-14 23:57:21 +03:00
} else {
asprintf ( & value2 , " %s=%s " , param , value ) ;
2005-07-04 04:13:44 +04:00
opal_argv_append_nosize ( & app - > env , value2 ) ;
2005-05-13 01:44:23 +04:00
free ( value2 ) ;
2005-03-14 23:57:21 +03:00
}
} else {
2005-07-04 03:31:27 +04:00
opal_output ( 0 , " Warning: could not find environment variable \" %s \" \n " , param ) ;
2005-03-14 23:57:21 +03:00
}
}
}
}
/* Did the user request a specific path? */
if ( NULL ! = orterun_globals . path ) {
asprintf ( & value , " PATH=%s " , orterun_globals . path ) ;
2005-07-04 04:13:44 +04:00
opal_argv_append_nosize ( & app - > env , value ) ;
2005-03-14 23:57:21 +03:00
free ( value ) ;
}
/* Did the user request a specific wdir? */
if ( NULL ! = orterun_globals . wdir ) {
app - > cwd = strdup ( orterun_globals . wdir ) ;
2006-02-16 23:40:23 +03:00
app - > user_specified_cwd = true ;
2005-03-14 23:57:21 +03:00
} else {
getcwd ( cwd , sizeof ( cwd ) ) ;
app - > cwd = strdup ( cwd ) ;
2006-02-16 23:40:23 +03:00
app - > user_specified_cwd = false ;
2005-03-14 23:57:21 +03:00
}
2006-09-15 06:52:08 +04:00
/* Check to see if the user explicitly wanted to disable automatic
- - prefix behavior */
if ( opal_cmd_line_is_taken ( & cmd_line , " noprefix " ) ) {
want_prefix_by_default = false ;
}
2006-02-28 14:52:12 +03:00
/* Did the user specify a specific prefix for this app_context_t
or provide an absolute path name to argv [ 0 ] ? */
if ( opal_cmd_line_is_taken ( & cmd_line , " prefix " ) | |
2006-09-15 06:52:08 +04:00
' / ' = = argv [ 0 ] [ 0 ] | | want_prefix_by_default ) {
2005-09-06 20:10:05 +04:00
size_t param_len ;
2006-02-28 17:44:40 +03:00
/* The --prefix option takes precedence over /path/to/orterun */
if ( opal_cmd_line_is_taken ( & cmd_line , " prefix " ) ) {
param = opal_cmd_line_get_param ( & cmd_line , " prefix " , 0 , 0 ) ;
2006-09-15 06:52:08 +04:00
}
/* /path/to/orterun */
else if ( ' / ' = = argv [ 0 ] [ 0 ] ) {
2006-08-23 06:35:00 +04:00
char * tmp_basename = NULL ;
2006-02-28 17:44:40 +03:00
/* If they specified an absolute path, strip off the
/ bin / < exec_name > " and leave just the prefix */
2006-08-23 06:35:00 +04:00
param = opal_dirname ( argv [ 0 ] ) ;
2006-02-28 14:52:12 +03:00
/* Quick sanity check to ensure we got
something / bin / < exec_name > and that the installation
tree is at least more or less what we expect it to
be */
2006-08-23 06:35:00 +04:00
tmp_basename = opal_basename ( param ) ;
if ( 0 = = strcmp ( " bin " , tmp_basename ) ) {
char * tmp = param ;
param = opal_dirname ( tmp ) ;
free ( tmp ) ;
2006-02-28 14:52:12 +03:00
} else {
free ( param ) ;
param = NULL ;
2005-09-06 20:10:05 +04:00
}
2006-08-23 06:35:00 +04:00
free ( tmp_basename ) ;
2005-09-06 20:10:05 +04:00
}
2006-09-15 06:52:08 +04:00
/* --enable-orterun-prefix-default was given to orterun */
else {
2007-04-21 04:15:05 +04:00
param = strdup ( opal_install_dirs . prefix ) ;
2006-09-15 06:52:08 +04:00
}
2005-09-06 20:10:05 +04:00
2006-02-28 14:52:12 +03:00
if ( NULL ! = param ) {
2006-08-24 20:18:42 +04:00
/* "Parse" the param, aka remove superfluous path_sep. */
2006-02-28 14:52:12 +03:00
param_len = strlen ( param ) ;
2006-08-22 01:55:41 +04:00
while ( 0 = = strcmp ( OPAL_PATH_SEP , & ( param [ param_len - 1 ] ) ) ) {
2006-02-28 14:52:12 +03:00
param [ param_len - 1 ] = ' \0 ' ;
param_len - - ;
if ( 0 = = param_len ) {
opal_show_help ( " help-orterun.txt " , " orterun:empty-prefix " ,
true , orterun_basename , orterun_basename ) ;
return ORTE_ERR_FATAL ;
}
}
app - > prefix_dir = strdup ( param ) ;
}
2005-09-06 20:10:05 +04:00
}
2005-03-14 23:57:21 +03:00
/* Did the user request any mappings? They were all converted to
- - rawmap items , above . */
2005-07-04 04:13:44 +04:00
if ( opal_cmd_line_is_taken ( & cmd_line , " rawmap " ) ) {
j = opal_cmd_line_get_ninsts ( & cmd_line , " rawmap " ) ;
2006-08-23 06:35:00 +04:00
app - > map_data = ( orte_app_context_map_t * * ) malloc ( sizeof ( orte_app_context_map_t * ) * j ) ;
2005-03-14 23:57:21 +03:00
if ( NULL = = app - > map_data ) {
rc = ORTE_ERR_OUT_OF_RESOURCE ;
goto cleanup ;
}
app - > num_map = j ;
for ( i = 0 ; i < j ; + + i ) {
app - > map_data [ i ] = NULL ;
}
for ( i = 0 ; i < j ; + + i ) {
2005-07-04 04:13:44 +04:00
value = opal_cmd_line_get_param ( & cmd_line , " rawmap " , i , 0 ) ;
value2 = opal_cmd_line_get_param ( & cmd_line , " rawmap " , i , 1 ) ;
2005-03-14 23:57:21 +03:00
app - > map_data [ i ] = OBJ_NEW ( orte_app_context_map_t ) ;
if ( NULL = = app - > map_data [ i ] ) {
rc = ORTE_ERR_OUT_OF_RESOURCE ;
goto cleanup ;
}
app - > map_data [ i ] - > map_type = value [ 0 ] - ' 0 ' ;
app - > map_data [ i ] - > map_data = strdup ( value2 ) ;
2006-02-07 06:32:36 +03:00
/* map_data = true;
2005-10-08 02:24:52 +04:00
* JJH - This activates the C / N mapping stuff ,
* or at least allows us to pass the ' num_procs ' check below .
* since it is not implemented yet , leave commented . */
2005-03-14 23:57:21 +03:00
}
}
/* Get the numprocs */
2006-09-25 23:41:54 +04:00
app - > num_procs = ( orte_std_cntr_t ) orterun_globals . num_procs ;
2005-04-09 05:26:17 +04:00
2006-07-11 01:25:33 +04:00
/* If the user didn't specify the number of processes to run, then we
default to launching an app process using every slot . We can ' t do
anything about that here - we leave it to the RMAPS framework ' s
components to note this and deal with it later .
HOWEVER , we ONLY support this mode of operation if the number of
app_contexts is equal to ONE . If the user provides multiple applications ,
we simply must have more information - in this case , generate an
error .
*/
if ( app - > num_procs = = 0 ) {
have_zero_np = true ; /** flag that we have a zero_np situation */
2005-03-14 23:57:21 +03:00
}
2007-03-17 02:11:45 +03:00
2006-07-11 01:25:33 +04:00
if ( 0 < total_num_apps & & have_zero_np ) {
/** we have more than one app and a zero_np - that's no good.
* note that we have to do this as a two step logic check since
* the user may fail to specify num_procs for the first app , but
* then give us another application .
*/
opal_show_help ( " help-orterun.txt " , " orterun:multi-apps-and-zero-np " ,
true , orterun_basename , NULL ) ;
return ORTE_ERR_FATAL ;
}
total_num_apps + + ;
2007-03-17 02:11:45 +03:00
/* Preserve if we are to preload the binary */
app - > preload_binary = orterun_globals . preload_binary ;
if ( NULL ! = orterun_globals . preload_files )
app - > preload_files = strdup ( orterun_globals . preload_files ) ;
else
app - > preload_files = NULL ;
if ( NULL ! = orterun_globals . preload_files_dest_dir )
app - > preload_files_dest_dir = strdup ( orterun_globals . preload_files_dest_dir ) ;
else
app - > preload_files_dest_dir = NULL ;
2006-02-16 23:40:23 +03:00
/* Do not try to find argv[0] here -- the starter is responsible
for that because it may not be relevant to try to find it on
the node where orterun is executing . So just strdup ( ) argv [ 0 ]
into app . */
2005-03-14 23:57:21 +03:00
2006-02-16 23:40:23 +03:00
app - > app = strdup ( app - > argv [ 0 ] ) ;
2005-03-14 23:57:21 +03:00
if ( NULL = = app - > app ) {
2006-02-16 23:40:23 +03:00
opal_show_help ( " help-orterun.txt " , " orterun:call-failed " ,
true , orterun_basename , " library " , " strdup returned NULL " , errno ) ;
2005-03-14 23:57:21 +03:00
rc = ORTE_ERR_NOT_FOUND ;
goto cleanup ;
}
* app_ptr = app ;
app = NULL ;
* made_app = true ;
/* All done */
cleanup :
if ( NULL ! = app ) {
OBJ_RELEASE ( app ) ;
}
if ( NULL ! = new_argv ) {
2005-07-04 04:13:44 +04:00
opal_argv_free ( new_argv ) ;
2005-03-14 23:57:21 +03:00
}
if ( cmd_line_made ) {
OBJ_DESTRUCT ( & cmd_line ) ;
}
return rc ;
}
While waiting for fortran compiles...
Fixes for orterun in handling different MCA params for different
processes (reviewed by Brian):
- By design, if you run the following:
mpirun --mca foo aaa --mca foo bbb a.out
a.out will get a single MCA param for foo with value "aaa,bbb".
- However, if you specify multiple apps with different values for the
same MCA param, you should expect to get the different values for
each app. For example:
mpirun --mca foo aaa a.out : --mca foo bbb b.out
Should yield a.out with a "foo" param with value "aaa" and b.out
with a "foo" param with a value "bbb".
- This did not work -- both a.out and b.out would get a "foo" with
"aaa,bbb".
- This commit fixes this behavior -- now a.out will get aaa and b.out
will get bbb.
- Additionally, if you mix --mca and and app file, you can have
"global" params and per-line-in-the-appfile params. For example:
mpirun --mca foo zzzz --app appfile
where "appfile" contains:
-np 1 --mca bar aaa a.out
-np 1 --mca bar bbb b.out
In this case, a.out will get foo=zzzz and bar=aaa, and b.out will
get foo=zzzz and bar=bbb.
Spiffy.
Ok, fortran build is done... back to Fortran... sigh...
This commit was SVN r5710.
2005-05-13 18:36:36 +04:00
static int parse_appfile ( char * filename , char * * * env )
2005-03-14 23:57:21 +03:00
{
size_t i , len ;
FILE * fp ;
char line [ BUFSIZ ] ;
2006-03-23 20:55:25 +03:00
int rc , argc , app_num ;
2005-03-14 23:57:21 +03:00
char * * argv ;
orte_app_context_t * app ;
bool blank , made_app ;
char bogus [ ] = " bogus " ;
While waiting for fortran compiles...
Fixes for orterun in handling different MCA params for different
processes (reviewed by Brian):
- By design, if you run the following:
mpirun --mca foo aaa --mca foo bbb a.out
a.out will get a single MCA param for foo with value "aaa,bbb".
- However, if you specify multiple apps with different values for the
same MCA param, you should expect to get the different values for
each app. For example:
mpirun --mca foo aaa a.out : --mca foo bbb b.out
Should yield a.out with a "foo" param with value "aaa" and b.out
with a "foo" param with a value "bbb".
- This did not work -- both a.out and b.out would get a "foo" with
"aaa,bbb".
- This commit fixes this behavior -- now a.out will get aaa and b.out
will get bbb.
- Additionally, if you mix --mca and and app file, you can have
"global" params and per-line-in-the-appfile params. For example:
mpirun --mca foo zzzz --app appfile
where "appfile" contains:
-np 1 --mca bar aaa a.out
-np 1 --mca bar bbb b.out
In this case, a.out will get foo=zzzz and bar=aaa, and b.out will
get foo=zzzz and bar=bbb.
Spiffy.
Ok, fortran build is done... back to Fortran... sigh...
This commit was SVN r5710.
2005-05-13 18:36:36 +04:00
char * * tmp_env ;
2005-03-14 23:57:21 +03:00
/* Try to open the file */
fp = fopen ( filename , " r " ) ;
if ( NULL = = fp ) {
2005-07-04 06:38:44 +04:00
opal_show_help ( " help-orterun.txt " , " orterun:appfile-not-found " , true ,
2005-03-14 23:57:21 +03:00
filename ) ;
return ORTE_ERR_NOT_FOUND ;
}
/* Read in line by line */
line [ sizeof ( line ) - 1 ] = ' \0 ' ;
2006-03-23 20:55:25 +03:00
app_num = 0 ;
2005-03-14 23:57:21 +03:00
do {
/* We need a bogus argv[0] (because when argv comes in from
the command line , argv [ 0 ] is " orterun " , so the parsing
logic ignores it ) . So create one here rather than making
an argv and then pre - pending a new argv [ 0 ] ( which would be
rather inefficient ) . */
line [ 0 ] = ' \0 ' ;
strcat ( line , bogus ) ;
2005-09-05 00:54:19 +04:00
if ( NULL = = fgets ( line + sizeof ( bogus ) - 1 ,
2005-03-14 23:57:21 +03:00
sizeof ( line ) - sizeof ( bogus ) - 1 , fp ) ) {
break ;
}
2005-04-12 22:42:34 +04:00
/* Remove a trailing newline */
2005-03-14 23:57:21 +03:00
len = strlen ( line ) ;
2005-04-12 22:42:34 +04:00
if ( len > 0 & & ' \n ' = = line [ len - 1 ] ) {
line [ len - 1 ] = ' \0 ' ;
if ( len > 0 ) {
- - len ;
}
}
/* Remove comments */
2005-03-14 23:57:21 +03:00
for ( i = 0 ; i < len ; + + i ) {
if ( ' # ' = = line [ i ] ) {
line [ i ] = ' \0 ' ;
break ;
} else if ( i + 1 < len & & ' / ' = = line [ i ] & & ' / ' = = line [ i + 1 ] ) {
line [ i ] = ' \0 ' ;
break ;
}
}
/* Is this a blank line? */
len = strlen ( line ) ;
for ( blank = true , i = sizeof ( bogus ) ; i < len ; + + i ) {
if ( ! isspace ( line [ i ] ) ) {
blank = false ;
break ;
}
}
if ( blank ) {
continue ;
}
/* We got a line with *something* on it. So process it */
2005-07-04 04:13:44 +04:00
argv = opal_argv_split ( line , ' ' ) ;
argc = opal_argv_count ( argv ) ;
2005-03-14 23:57:21 +03:00
if ( argc > 0 ) {
While waiting for fortran compiles...
Fixes for orterun in handling different MCA params for different
processes (reviewed by Brian):
- By design, if you run the following:
mpirun --mca foo aaa --mca foo bbb a.out
a.out will get a single MCA param for foo with value "aaa,bbb".
- However, if you specify multiple apps with different values for the
same MCA param, you should expect to get the different values for
each app. For example:
mpirun --mca foo aaa a.out : --mca foo bbb b.out
Should yield a.out with a "foo" param with value "aaa" and b.out
with a "foo" param with a value "bbb".
- This did not work -- both a.out and b.out would get a "foo" with
"aaa,bbb".
- This commit fixes this behavior -- now a.out will get aaa and b.out
will get bbb.
- Additionally, if you mix --mca and and app file, you can have
"global" params and per-line-in-the-appfile params. For example:
mpirun --mca foo zzzz --app appfile
where "appfile" contains:
-np 1 --mca bar aaa a.out
-np 1 --mca bar bbb b.out
In this case, a.out will get foo=zzzz and bar=aaa, and b.out will
get foo=zzzz and bar=bbb.
Spiffy.
Ok, fortran build is done... back to Fortran... sigh...
This commit was SVN r5710.
2005-05-13 18:36:36 +04:00
2005-08-08 20:42:28 +04:00
/* Create a temporary env to use in the recursive call --
that is : don ' t disturb the original env so that we can
have a consistent global env . This allows for the
case :
2005-09-05 00:54:19 +04:00
orterun - - mca foo bar - - appfile file
2005-08-08 20:42:28 +04:00
where the " file " contains multiple apps . In this case ,
each app in " file " will get * only * foo = bar as the base
environment from which its specific environment is
constructed . */
While waiting for fortran compiles...
Fixes for orterun in handling different MCA params for different
processes (reviewed by Brian):
- By design, if you run the following:
mpirun --mca foo aaa --mca foo bbb a.out
a.out will get a single MCA param for foo with value "aaa,bbb".
- However, if you specify multiple apps with different values for the
same MCA param, you should expect to get the different values for
each app. For example:
mpirun --mca foo aaa a.out : --mca foo bbb b.out
Should yield a.out with a "foo" param with value "aaa" and b.out
with a "foo" param with a value "bbb".
- This did not work -- both a.out and b.out would get a "foo" with
"aaa,bbb".
- This commit fixes this behavior -- now a.out will get aaa and b.out
will get bbb.
- Additionally, if you mix --mca and and app file, you can have
"global" params and per-line-in-the-appfile params. For example:
mpirun --mca foo zzzz --app appfile
where "appfile" contains:
-np 1 --mca bar aaa a.out
-np 1 --mca bar bbb b.out
In this case, a.out will get foo=zzzz and bar=aaa, and b.out will
get foo=zzzz and bar=bbb.
Spiffy.
Ok, fortran build is done... back to Fortran... sigh...
This commit was SVN r5710.
2005-05-13 18:36:36 +04:00
if ( NULL ! = * env ) {
2005-07-04 04:13:44 +04:00
tmp_env = opal_argv_copy ( * env ) ;
While waiting for fortran compiles...
Fixes for orterun in handling different MCA params for different
processes (reviewed by Brian):
- By design, if you run the following:
mpirun --mca foo aaa --mca foo bbb a.out
a.out will get a single MCA param for foo with value "aaa,bbb".
- However, if you specify multiple apps with different values for the
same MCA param, you should expect to get the different values for
each app. For example:
mpirun --mca foo aaa a.out : --mca foo bbb b.out
Should yield a.out with a "foo" param with value "aaa" and b.out
with a "foo" param with a value "bbb".
- This did not work -- both a.out and b.out would get a "foo" with
"aaa,bbb".
- This commit fixes this behavior -- now a.out will get aaa and b.out
will get bbb.
- Additionally, if you mix --mca and and app file, you can have
"global" params and per-line-in-the-appfile params. For example:
mpirun --mca foo zzzz --app appfile
where "appfile" contains:
-np 1 --mca bar aaa a.out
-np 1 --mca bar bbb b.out
In this case, a.out will get foo=zzzz and bar=aaa, and b.out will
get foo=zzzz and bar=bbb.
Spiffy.
Ok, fortran build is done... back to Fortran... sigh...
This commit was SVN r5710.
2005-05-13 18:36:36 +04:00
if ( NULL = = tmp_env ) {
return ORTE_ERR_OUT_OF_RESOURCE ;
}
} else {
tmp_env = NULL ;
}
rc = create_app ( argc , argv , & app , & made_app , & tmp_env ) ;
2005-03-14 23:57:21 +03:00
if ( ORTE_SUCCESS ! = rc ) {
/* Assume that the error message has already been
printed ; no need to cleanup - - we can just exit */
exit ( 1 ) ;
}
While waiting for fortran compiles...
Fixes for orterun in handling different MCA params for different
processes (reviewed by Brian):
- By design, if you run the following:
mpirun --mca foo aaa --mca foo bbb a.out
a.out will get a single MCA param for foo with value "aaa,bbb".
- However, if you specify multiple apps with different values for the
same MCA param, you should expect to get the different values for
each app. For example:
mpirun --mca foo aaa a.out : --mca foo bbb b.out
Should yield a.out with a "foo" param with value "aaa" and b.out
with a "foo" param with a value "bbb".
- This did not work -- both a.out and b.out would get a "foo" with
"aaa,bbb".
- This commit fixes this behavior -- now a.out will get aaa and b.out
will get bbb.
- Additionally, if you mix --mca and and app file, you can have
"global" params and per-line-in-the-appfile params. For example:
mpirun --mca foo zzzz --app appfile
where "appfile" contains:
-np 1 --mca bar aaa a.out
-np 1 --mca bar bbb b.out
In this case, a.out will get foo=zzzz and bar=aaa, and b.out will
get foo=zzzz and bar=bbb.
Spiffy.
Ok, fortran build is done... back to Fortran... sigh...
This commit was SVN r5710.
2005-05-13 18:36:36 +04:00
if ( NULL ! = tmp_env ) {
2005-07-04 04:13:44 +04:00
opal_argv_free ( tmp_env ) ;
While waiting for fortran compiles...
Fixes for orterun in handling different MCA params for different
processes (reviewed by Brian):
- By design, if you run the following:
mpirun --mca foo aaa --mca foo bbb a.out
a.out will get a single MCA param for foo with value "aaa,bbb".
- However, if you specify multiple apps with different values for the
same MCA param, you should expect to get the different values for
each app. For example:
mpirun --mca foo aaa a.out : --mca foo bbb b.out
Should yield a.out with a "foo" param with value "aaa" and b.out
with a "foo" param with a value "bbb".
- This did not work -- both a.out and b.out would get a "foo" with
"aaa,bbb".
- This commit fixes this behavior -- now a.out will get aaa and b.out
will get bbb.
- Additionally, if you mix --mca and and app file, you can have
"global" params and per-line-in-the-appfile params. For example:
mpirun --mca foo zzzz --app appfile
where "appfile" contains:
-np 1 --mca bar aaa a.out
-np 1 --mca bar bbb b.out
In this case, a.out will get foo=zzzz and bar=aaa, and b.out will
get foo=zzzz and bar=bbb.
Spiffy.
Ok, fortran build is done... back to Fortran... sigh...
This commit was SVN r5710.
2005-05-13 18:36:36 +04:00
}
2005-03-14 23:57:21 +03:00
if ( made_app ) {
2006-08-15 23:54:10 +04:00
orte_std_cntr_t dummy ;
2006-03-24 18:28:42 +03:00
app - > idx = app_num ;
+ + app_num ;
2005-07-03 08:02:01 +04:00
orte_pointer_array_add ( & dummy , apps_pa , app ) ;
2005-03-14 23:57:21 +03:00
}
}
} while ( ! feof ( fp ) ) ;
fclose ( fp ) ;
/* All done */
free ( filename ) ;
return ORTE_SUCCESS ;
}