2005-03-14 23:57:21 +03:00
/*
2007-03-17 02:11:45 +03:00
* Copyright ( c ) 2004 - 2007 The Trustees of Indiana University and Indiana
2005-11-05 22:57:48 +03:00
* University Research and Technology
* Corporation . All rights reserved .
2006-08-23 07:32:36 +04:00
* Copyright ( c ) 2004 - 2006 The University of Tennessee and The University
2005-11-05 22:57:48 +03:00
* of Tennessee Research Foundation . All rights
* reserved .
2005-03-14 23:57:21 +03:00
* Copyright ( c ) 2004 - 2005 High Performance Computing Center Stuttgart ,
* University of Stuttgart . All rights reserved .
2005-03-24 15:43:37 +03:00
* Copyright ( c ) 2004 - 2005 The Regents of the University of California .
* All rights reserved .
2005-03-14 23:57:21 +03:00
* $ COPYRIGHT $
*
* Additional copyrights may follow
*
* $ HEADER $
*
* These symbols are in a file by themselves to provide nice linker
* semantics . Since linkers generally pull in symbols by object
* files , keeping these symbols as the only symbols in this file
* prevents utility programs such as " ompi_info " from having to import
* entire components just to query their version and parameters .
*/
2006-02-12 04:33:29 +03:00
# include "orte_config.h"
2006-09-15 01:29:51 +04:00
# include "orte/orte_constants.h"
2005-12-15 23:54:24 +03:00
2005-03-14 23:57:21 +03:00
# include <stdlib.h>
2005-06-22 02:48:57 +04:00
# ifdef HAVE_UNISTD_H
2005-03-14 23:57:21 +03:00
# include <unistd.h>
2005-06-22 02:48:57 +04:00
# endif
2005-12-15 23:54:24 +03:00
# include <ctype.h>
2005-03-14 23:57:21 +03:00
2005-07-04 04:13:44 +04:00
# include "opal/util/argv.h"
2005-07-04 05:59:52 +04:00
# include "opal/util/path.h"
2005-07-04 04:13:44 +04:00
# include "opal/util/basename.h"
2005-08-04 19:09:02 +04:00
# include "opal/util/show_help.h"
2006-02-12 04:33:29 +03:00
# include "opal/mca/base/mca_base_param.h"
2006-09-15 01:29:51 +04:00
# include "orte/mca/errmgr/errmgr.h"
2006-02-12 04:33:29 +03:00
# include "orte/mca/rml/rml.h"
2005-03-14 23:57:21 +03:00
2006-09-15 01:29:51 +04:00
# include "orte/mca/pls/pls.h"
# include "orte/mca/pls/base/pls_private.h"
# include "orte/mca/pls/rsh/pls_rsh.h"
2006-08-23 07:32:36 +04:00
# if !defined(__WINDOWS__)
2005-12-16 22:20:33 +03:00
extern char * * environ ;
2006-08-23 07:32:36 +04:00
# endif /* !defined(__WINDOWS__) */
2005-03-14 23:57:21 +03:00
2005-12-15 23:54:24 +03:00
/*
* Local function
*/
static char * * search ( const char * agent_list ) ;
2005-03-14 23:57:21 +03:00
/*
* Public string showing the pls ompi_rsh component version number
*/
const char * mca_pls_rsh_component_version_string =
Major simplifications to component versioning:
- After long discussions and ruminations on how we run components in
LAM/MPI, made the decision that, by default, all components included
in Open MPI will use the version number of their parent project
(i.e., OMPI or ORTE). They are certaint free to use a different
number, but this simplification makes the common cases easy:
- components are only released when the parent project is released
- it is easy (trivial?) to distinguish which version component goes
with with version of the parent project
- removed all autogen/configure code for templating the version .h
file in components
- made all ORTE components use ORTE_*_VERSION for version numbers
- made all OMPI components use OMPI_*_VERSION for version numbers
- removed all VERSION files from components
- configure now displays OPAL, ORTE, and OMPI version numbers
- ditto for ompi_info
- right now, faking it -- OPAL and ORTE and OMPI will always have the
same version number (i.e., they all come from the same top-level
VERSION file). But this paves the way for the Great Configure
Reorganization, where, among other things, each project will have
its own version number.
So all in all, we went from a boatload of version numbers to
[effectively] three. That's pretty good. :-)
This commit was SVN r6344.
2005-07-05 00:12:36 +04:00
" Open MPI rsh pls MCA component version " ORTE_VERSION ;
2005-03-14 23:57:21 +03:00
/*
* Instantiate the public struct with all of our public information
* and pointers to our public functions in it
*/
orte_pls_rsh_component_t mca_pls_rsh_component = {
{
/* First, the mca_component_t struct containing meta information
about the component itself */
{
2006-09-15 01:29:51 +04:00
/* Indicate that we are a pls v1.3.0 component (which also
2005-03-14 23:57:21 +03:00
implies a specific MCA version ) */
2006-09-15 01:29:51 +04:00
ORTE_PLS_BASE_VERSION_1_3_0 ,
2005-03-14 23:57:21 +03:00
/* Component name and version */
" rsh " ,
Major simplifications to component versioning:
- After long discussions and ruminations on how we run components in
LAM/MPI, made the decision that, by default, all components included
in Open MPI will use the version number of their parent project
(i.e., OMPI or ORTE). They are certaint free to use a different
number, but this simplification makes the common cases easy:
- components are only released when the parent project is released
- it is easy (trivial?) to distinguish which version component goes
with with version of the parent project
- removed all autogen/configure code for templating the version .h
file in components
- made all ORTE components use ORTE_*_VERSION for version numbers
- made all OMPI components use OMPI_*_VERSION for version numbers
- removed all VERSION files from components
- configure now displays OPAL, ORTE, and OMPI version numbers
- ditto for ompi_info
- right now, faking it -- OPAL and ORTE and OMPI will always have the
same version number (i.e., they all come from the same top-level
VERSION file). But this paves the way for the Great Configure
Reorganization, where, among other things, each project will have
its own version number.
So all in all, we went from a boatload of version numbers to
[effectively] three. That's pretty good. :-)
This commit was SVN r6344.
2005-07-05 00:12:36 +04:00
ORTE_MAJOR_VERSION ,
ORTE_MINOR_VERSION ,
ORTE_RELEASE_VERSION ,
2005-03-14 23:57:21 +03:00
/* Component open and close functions */
orte_pls_rsh_component_open ,
orte_pls_rsh_component_close
} ,
/* Next the MCA v1.0.0 component meta data */
{
2007-03-17 02:11:45 +03:00
/* The component is checkpoint ready */
MCA_BASE_METADATA_PARAM_CHECKPOINT
2005-03-14 23:57:21 +03:00
} ,
/* Initialization / querying functions */
orte_pls_rsh_component_init
}
} ;
int orte_pls_rsh_component_open ( void )
{
Bring the timing instrumentation to the trunk.
If you want to look at our launch and MPI process startup times, you can do so with two MCA params:
OMPI_MCA_orte_timing: set it to anything non-zero and you will get the launch time for different steps in the job launch procedure. The degree of detail depends on the launch environment. rsh will provide you with the average, min, and max launch time for the daemons. SLURM block launches the daemon, so you only get the time to launch the daemons and the total time to launch the job. Ditto for bproc. TM looks more like rsh. Only those four environments are currently supported - anyone interested in extending this capability to other environs is welcome to do so. In all cases, you also get the time to setup the job for launch.
OMPI_MCA_ompi_timing: set it to anything non-zero and you will get the time for mpi_init to reach the compound registry command, the time to execute that command, the time to go from our stage1 barrier to the stage2 barrier, and the time to go from the stage2 barrier to the end of mpi_init. This will be output for each process, so you'll have to compile any statistics on your own. Note: if someone develops a nice parser to do so, it would be really appreciated if you could/would share!
This commit was SVN r12302.
2006-10-25 19:27:47 +04:00
int tmp , value ;
2005-08-04 19:09:02 +04:00
mca_base_component_t * c = & mca_pls_rsh_component . super . pls_version ;
2005-04-14 18:08:21 +04:00
2005-03-14 23:57:21 +03:00
/* initialize globals */
2005-07-04 02:45:48 +04:00
OBJ_CONSTRUCT ( & mca_pls_rsh_component . lock , opal_mutex_t ) ;
OBJ_CONSTRUCT ( & mca_pls_rsh_component . cond , opal_condition_t ) ;
2005-03-14 23:57:21 +03:00
mca_pls_rsh_component . num_children = 0 ;
2005-12-22 17:37:19 +03:00
mca_pls_rsh_component . agent_argv = NULL ;
mca_pls_rsh_component . agent_argc = 0 ;
mca_pls_rsh_component . agent_path = NULL ;
2005-03-14 23:57:21 +03:00
/* lookup parameters */
2005-08-04 19:09:02 +04:00
mca_base_param_reg_int ( c , " debug " ,
" Whether or not to enable debugging output for the rsh pls component (0 or 1) " ,
false , false , false , & tmp ) ;
2006-10-05 09:45:18 +04:00
mca_pls_rsh_component . debug = OPAL_INT_TO_BOOL ( tmp ) ;
2005-08-04 19:09:02 +04:00
mca_base_param_reg_int ( c , " num_concurrent " ,
" How many pls_rsh_agent instances to invoke concurrently (must be > 0) " ,
false , false , 128 , & tmp ) ;
if ( tmp < = 0 ) {
opal_show_help ( " help-pls-rsh.txt " , " concurrency-less-than-zero " ,
true , tmp ) ;
tmp = 1 ;
}
2006-10-05 10:17:30 +04:00
mca_pls_rsh_component . num_concurrent = tmp ;
2006-09-08 19:17:17 +04:00
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
mca_base_param_reg_int ( c , " force_rsh " ,
" Force the launcher to always use rsh, even for local daemons " ,
false , false , false , & tmp ) ;
mca_pls_rsh_component . force_rsh = OPAL_INT_TO_BOOL ( tmp ) ;
2005-08-04 19:09:02 +04:00
if ( mca_pls_rsh_component . debug = = 0 ) {
mca_base_param_reg_int_name ( " orte " , " debug " ,
" Whether or not to enable debugging output for all ORTE components (0 or 1) " ,
false , false , false , & tmp ) ;
2006-10-05 09:45:18 +04:00
mca_pls_rsh_component . debug = OPAL_INT_TO_BOOL ( tmp ) ;
2005-03-18 06:43:59 +03:00
}
2006-10-24 21:24:09 +04:00
mca_base_param_reg_int_name ( " orte " , " debug_daemons " ,
Bring the timing instrumentation to the trunk.
If you want to look at our launch and MPI process startup times, you can do so with two MCA params:
OMPI_MCA_orte_timing: set it to anything non-zero and you will get the launch time for different steps in the job launch procedure. The degree of detail depends on the launch environment. rsh will provide you with the average, min, and max launch time for the daemons. SLURM block launches the daemon, so you only get the time to launch the daemons and the total time to launch the job. Ditto for bproc. TM looks more like rsh. Only those four environments are currently supported - anyone interested in extending this capability to other environs is welcome to do so. In all cases, you also get the time to setup the job for launch.
OMPI_MCA_ompi_timing: set it to anything non-zero and you will get the time for mpi_init to reach the compound registry command, the time to execute that command, the time to go from our stage1 barrier to the stage2 barrier, and the time to go from the stage2 barrier to the end of mpi_init. This will be output for each process, so you'll have to compile any statistics on your own. Note: if someone develops a nice parser to do so, it would be really appreciated if you could/would share!
This commit was SVN r12302.
2006-10-25 19:27:47 +04:00
" Whether or not to enable debugging of daemons (0 or 1) " ,
false , false , false , & tmp ) ;
2006-10-24 19:59:02 +04:00
mca_pls_rsh_component . debug_daemons = OPAL_INT_TO_BOOL ( tmp ) ;
Bring the timing instrumentation to the trunk.
If you want to look at our launch and MPI process startup times, you can do so with two MCA params:
OMPI_MCA_orte_timing: set it to anything non-zero and you will get the launch time for different steps in the job launch procedure. The degree of detail depends on the launch environment. rsh will provide you with the average, min, and max launch time for the daemons. SLURM block launches the daemon, so you only get the time to launch the daemons and the total time to launch the job. Ditto for bproc. TM looks more like rsh. Only those four environments are currently supported - anyone interested in extending this capability to other environs is welcome to do so. In all cases, you also get the time to setup the job for launch.
OMPI_MCA_ompi_timing: set it to anything non-zero and you will get the time for mpi_init to reach the compound registry command, the time to execute that command, the time to go from our stage1 barrier to the stage2 barrier, and the time to go from the stage2 barrier to the end of mpi_init. This will be output for each process, so you'll have to compile any statistics on your own. Note: if someone develops a nice parser to do so, it would be really appreciated if you could/would share!
This commit was SVN r12302.
2006-10-25 19:27:47 +04:00
tmp = mca_base_param_reg_int_name ( " orte " , " timing " ,
" Request that critical timing loops be measured " ,
false , false , 0 , & value ) ;
if ( value ! = 0 ) {
mca_pls_rsh_component . timing = true ;
} else {
mca_pls_rsh_component . timing = false ;
}
2005-08-04 19:09:02 +04:00
mca_base_param_reg_string ( c , " orted " ,
" The command name that the rsh pls component will invoke for the ORTE daemon " ,
false , false , " orted " ,
& mca_pls_rsh_component . orted ) ;
2006-10-11 19:18:57 +04:00
2005-08-04 19:09:02 +04:00
mca_base_param_reg_int ( c , " priority " ,
" Priority of the rsh pls component " ,
false , false , 10 ,
& mca_pls_rsh_component . priority ) ;
mca_base_param_reg_int ( c , " delay " ,
" Delay (in seconds) between invocations of the remote agent, but only used when the \" debug \" MCA parameter is true, or the top-level MCA debugging is enabled (otherwise this value is ignored) " ,
false , false , 1 ,
& mca_pls_rsh_component . delay ) ;
mca_base_param_reg_int ( c , " reap " ,
" If set to 1, wait for all the processes to complete before exiting. Otherwise, quit immediately -- without waiting for confirmation that all other processes in the job have completed. " ,
false , false , 1 , & tmp ) ;
2006-10-05 09:45:18 +04:00
mca_pls_rsh_component . reap = OPAL_INT_TO_BOOL ( tmp ) ;
2005-08-04 19:09:02 +04:00
mca_base_param_reg_int ( c , " assume_same_shell " ,
2005-09-06 20:10:05 +04:00
" If set to 1, assume that the shell on the remote node is the same as the shell on the local node. Otherwise, probe for what the remote shell. " ,
2005-08-04 19:09:02 +04:00
false , false , 1 , & tmp ) ;
2006-10-05 09:45:18 +04:00
mca_pls_rsh_component . assume_same_shell = OPAL_INT_TO_BOOL ( tmp ) ;
2005-03-14 23:57:21 +03:00
2005-08-04 19:09:02 +04:00
mca_base_param_reg_string ( c , " agent " ,
2006-01-19 01:00:34 +03:00
" The command used to launch executables on remote nodes (typically either \" ssh \" or \" rsh \" ) " ,
false , false , " ssh : rsh " ,
2005-12-22 17:37:19 +03:00
& mca_pls_rsh_component . agent_param ) ;
return ORTE_SUCCESS ;
}
2006-08-23 07:32:36 +04:00
# if !defined(__WINDOWS__)
extern char * * environ ;
# endif /* !defined(__WINDOWS__) */
2005-12-22 17:37:19 +03:00
orte_pls_base_module_t * orte_pls_rsh_component_init ( int * priority )
{
char * bname ;
size_t i ;
2006-09-15 01:29:51 +04:00
/* if we are not an HNP, then don't select us */
if ( ! orte_process_info . seed ) {
return NULL ;
}
2005-12-22 17:37:19 +03:00
/* Take the string that was given to us by the pla_rsh_agent MCA
param and search for it */
mca_pls_rsh_component . agent_argv =
search ( mca_pls_rsh_component . agent_param ) ;
mca_pls_rsh_component . agent_argc =
opal_argv_count ( mca_pls_rsh_component . agent_argv ) ;
mca_pls_rsh_component . agent_path = NULL ;
if ( mca_pls_rsh_component . agent_argc > 0 ) {
2005-04-14 18:08:21 +04:00
/* If the agent is ssh, and debug was not selected, then
automatically add " -x " */
2005-12-22 17:37:19 +03:00
bname = opal_basename ( mca_pls_rsh_component . agent_argv [ 0 ] ) ;
2005-05-20 23:33:22 +04:00
if ( NULL ! = bname & & 0 = = strcmp ( bname , " ssh " ) & &
mca_pls_rsh_component . debug = = 0 ) {
2005-12-22 17:37:19 +03:00
for ( i = 1 ; NULL ! = mca_pls_rsh_component . agent_argv [ i ] ; + + i ) {
if ( 0 = = strcasecmp ( " -x " ,
mca_pls_rsh_component . agent_argv [ i ] ) ) {
2005-04-14 18:08:21 +04:00
break ;
}
}
2005-12-22 17:37:19 +03:00
if ( NULL = = mca_pls_rsh_component . agent_argv [ i ] ) {
opal_argv_append ( & mca_pls_rsh_component . agent_argc ,
& mca_pls_rsh_component . agent_argv , " -x " ) ;
2005-04-14 18:08:21 +04:00
}
}
if ( NULL ! = bname ) {
free ( bname ) ;
}
2005-03-19 02:40:08 +03:00
}
2005-03-14 23:57:21 +03:00
2005-12-22 17:37:19 +03:00
/* If we didn't find the agent in the path, then don't use this
component */
if ( NULL = = mca_pls_rsh_component . agent_argv | |
NULL = = mca_pls_rsh_component . agent_argv [ 0 ] ) {
2005-03-18 06:43:59 +03:00
return NULL ;
}
2005-12-22 17:37:19 +03:00
mca_pls_rsh_component . agent_path =
opal_path_findv ( mca_pls_rsh_component . agent_argv [ 0 ] , X_OK ,
environ , NULL ) ;
if ( NULL = = mca_pls_rsh_component . agent_path ) {
2005-03-14 23:57:21 +03:00
return NULL ;
}
* priority = mca_pls_rsh_component . priority ;
2006-09-15 01:29:51 +04:00
2005-03-14 23:57:21 +03:00
return & orte_pls_rsh_module ;
}
int orte_pls_rsh_component_close ( void )
{
2005-04-01 02:11:54 +04:00
/* cleanup state */
2005-03-14 23:57:21 +03:00
OBJ_DESTRUCT ( & mca_pls_rsh_component . lock ) ;
OBJ_DESTRUCT ( & mca_pls_rsh_component . cond ) ;
2005-08-26 06:08:23 +04:00
if ( NULL ! = mca_pls_rsh_component . orted ) {
free ( mca_pls_rsh_component . orted ) ;
}
2005-12-22 17:37:19 +03:00
if ( NULL ! = mca_pls_rsh_component . agent_param ) {
free ( mca_pls_rsh_component . agent_param ) ;
}
if ( NULL ! = mca_pls_rsh_component . agent_argv ) {
opal_argv_free ( mca_pls_rsh_component . agent_argv ) ;
}
if ( NULL ! = mca_pls_rsh_component . agent_path ) {
free ( mca_pls_rsh_component . agent_path ) ;
}
2005-03-14 23:57:21 +03:00
return ORTE_SUCCESS ;
}
2005-12-15 23:54:24 +03:00
/*
* Take a colon - delimited list of agents and locate the first one that
* we are able to find in the PATH . Split that one into argv and
* return it . If nothing found , then return NULL .
*/
static char * * search ( const char * agent_list )
{
int i , j ;
char * line , * * lines = opal_argv_split ( agent_list , ' : ' ) ;
char * * tokens , * tmp ;
char cwd [ PATH_MAX ] ;
getcwd ( cwd , PATH_MAX ) ;
for ( i = 0 ; NULL ! = lines [ i ] ; + + i ) {
line = lines [ i ] ;
/* Trim whitespace at the beginning and end of the line */
for ( j = 0 ; ' \0 ' ! = line [ j ] & & isspace ( line [ j ] ) ; + + line ) {
continue ;
}
for ( j = strlen ( line ) - 2 ; j > 0 & & isspace ( line [ j ] ) ; + + j ) {
line [ j ] = ' \0 ' ;
}
if ( strlen ( line ) < = 0 ) {
continue ;
}
/* Split it */
tokens = opal_argv_split ( line , ' ' ) ;
/* Look for the first token in the PATH */
tmp = opal_path_findv ( tokens [ 0 ] , X_OK , environ , cwd ) ;
if ( NULL ! = tmp ) {
2005-12-22 17:37:19 +03:00
free ( tokens [ 0 ] ) ;
tokens [ 0 ] = tmp ;
2005-12-15 23:54:24 +03:00
opal_argv_free ( lines ) ;
return tokens ;
}
/* Didn't find it */
opal_argv_free ( tokens ) ;
}
/* Doh -- didn't find anything */
opal_argv_free ( lines ) ;
return NULL ;
}