2006-09-14 21:29:51 +00:00
|
|
|
/*
|
2007-09-27 14:37:04 +00:00
|
|
|
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
|
2006-09-14 21:29:51 +00:00
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2006 The University of Tennessee and The University
|
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
/** @file:
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef MCA_ODLS_PRIVATE_H
|
|
|
|
#define MCA_ODLS_PRIVATE_H
|
|
|
|
|
|
|
|
/*
|
|
|
|
* includes
|
|
|
|
*/
|
|
|
|
#include "orte_config.h"
|
2008-02-28 01:57:57 +00:00
|
|
|
#include "orte/types.h"
|
2006-09-14 21:29:51 +00:00
|
|
|
|
|
|
|
#include "opal/class/opal_list.h"
|
2007-10-10 15:02:10 +00:00
|
|
|
#include "opal/threads/mutex.h"
|
|
|
|
#include "opal/threads/condition.h"
|
2006-09-14 21:29:51 +00:00
|
|
|
|
2008-02-28 01:57:57 +00:00
|
|
|
#include "opal/dss/dss_types.h"
|
|
|
|
#include "orte/mca/plm/plm_types.h"
|
2007-10-10 15:02:10 +00:00
|
|
|
#include "orte/mca/rmaps/rmaps_types.h"
|
|
|
|
#include "orte/mca/rml/rml_types.h"
|
2008-02-28 01:57:57 +00:00
|
|
|
#include "orte/runtime/orte_globals.h"
|
2006-09-14 21:29:51 +00:00
|
|
|
|
|
|
|
#include "orte/mca/odls/odls_types.h"
|
|
|
|
|
2008-02-28 01:57:57 +00:00
|
|
|
BEGIN_C_DECLS
|
2006-09-14 21:29:51 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* General ODLS types
|
|
|
|
*/
|
2007-07-14 15:14:07 +00:00
|
|
|
|
2006-11-11 04:03:45 +00:00
|
|
|
/*
|
|
|
|
* List object to locally store the process names and pids of
|
|
|
|
* our children. This can subsequently be used to order termination
|
|
|
|
* or pass signals without looking the info up again.
|
|
|
|
*/
|
|
|
|
typedef struct orte_odls_child_t {
|
|
|
|
opal_list_item_t super; /* required to place this on a list */
|
|
|
|
orte_process_name_t *name; /* the OpenRTE name of the proc */
|
Compute and pass the local_rank and local number of procs (in that proc's job) on the node.
To be precise, given this hypothetical launching pattern:
host1: vpids 0, 2, 4, 6
host2: vpids 1, 3, 5, 7
The local_rank for these procs would be:
host1: vpids 0->local_rank 0, v2->lr1, v4->lr2, v6->lr3
host2: vpids 1->local_rank 0, v3->lr1, v5->lr2, v7->lr3
and the number of local procs on each node would be four. If vpid=0 then does a comm_spawn of one process on host1, the values of the parent job would remain unchanged. The local_rank of the child process would be 0 and its num_local_procs would be 1 since it is in a separate jobid.
I have verified this functionality for the rsh case - need to verify that slurm and other cases also get the right values. Some consolidation of common code is probably going to occur in the SDS components to make this simpler and more maintainable in the future.
This commit was SVN r14706.
2007-05-21 14:30:10 +00:00
|
|
|
orte_vpid_t local_rank; /* local rank of the proc on this node */
|
2006-11-11 04:03:45 +00:00
|
|
|
pid_t pid; /* local pid of the proc */
|
|
|
|
orte_std_cntr_t app_idx; /* index of the app_context for this proc */
|
|
|
|
bool alive; /* is this proc alive? */
|
|
|
|
orte_proc_state_t state; /* the state of the process */
|
2008-02-28 01:57:57 +00:00
|
|
|
orte_exit_code_t exit_code; /* process exit code */
|
2007-07-14 15:14:07 +00:00
|
|
|
unsigned long cpu_set;
|
2008-02-28 01:57:57 +00:00
|
|
|
char *rml_uri; /* contact info for this child */
|
2006-11-11 04:03:45 +00:00
|
|
|
} orte_odls_child_t;
|
2007-02-27 09:43:32 +00:00
|
|
|
ORTE_DECLSPEC OBJ_CLASS_DECLARATION(orte_odls_child_t);
|
2007-10-10 15:02:10 +00:00
|
|
|
|
2006-09-14 21:29:51 +00:00
|
|
|
typedef struct orte_odls_globals_t {
|
|
|
|
/** Verbose/debug output stream */
|
|
|
|
int output;
|
|
|
|
/** Time to allow process to forcibly die */
|
|
|
|
int timeout_before_sigkill;
|
2007-10-10 15:02:10 +00:00
|
|
|
/* mutex */
|
|
|
|
opal_mutex_t mutex;
|
|
|
|
/* condition variable */
|
|
|
|
opal_condition_t cond;
|
|
|
|
/* list of children for this orted */
|
|
|
|
opal_list_t children;
|
2006-09-14 21:29:51 +00:00
|
|
|
} orte_odls_globals_t;
|
|
|
|
|
2006-10-06 07:08:17 +00:00
|
|
|
ORTE_DECLSPEC extern orte_odls_globals_t orte_odls_globals;
|
2006-09-14 21:29:51 +00:00
|
|
|
|
2007-10-10 15:02:10 +00:00
|
|
|
/*
|
|
|
|
* Default functions that are common to most environments - can
|
|
|
|
* be overridden by specific environments if they need something
|
|
|
|
* different (e.g., bproc)
|
|
|
|
*/
|
2007-10-11 00:02:49 +00:00
|
|
|
ORTE_DECLSPEC int
|
2008-02-28 01:57:57 +00:00
|
|
|
orte_odls_base_default_get_add_procs_data(opal_buffer_t *data,
|
|
|
|
orte_jobid_t job);
|
2007-10-11 00:02:49 +00:00
|
|
|
|
|
|
|
ORTE_DECLSPEC int
|
2008-02-28 01:57:57 +00:00
|
|
|
orte_odls_base_default_construct_child_list(opal_buffer_t *data,
|
2007-10-11 00:02:49 +00:00
|
|
|
orte_jobid_t *job,
|
Squeeeeeeze the launch message. This is the message sent to the daemons that provides all the data required for launching their local procs. In reorganizing the ODLS framework, I discovered that we were sending a significant amount of unnecessary and repeated data. This commit resolves this by:
1. taking advantage of the fact that we no longer create the launch message via a GPR trigger. In earlier times, we had the GPR create the launch message based on a subscription. In that mode of operation, we could not guarantee the order in which the data was stored in the message - hence, we had no choice but to parse the message in a loop that checked each value against a list of possible "keys" until the corresponding value was found.
Now, however, we construct the message "by hand", so we know precisely what data is in each location in the message. Thus, we no longer need to send the character string "keys" for each data value any more. This represents a rather large savings in the message size - to give you an example, we typically would use a 30-char "key" for a 2-byte data value. As you can see, the overhead can become very large.
2. sending node-specific data only once. Again, because we used to construct the message via subscriptions that were done on a per-proc basis, the data for each node (e.g., the daemon's name, whether or not the node was oversubscribed) would be included in the data for each proc. Thus, the node-specific data was repeated for every proc.
Now that we construct the message "by hand", there is no reason to do this any more. Instead, we can insert the data for a specific node only once, and then provide the per-proc data for that node. We therefore not only save all that extra data in the message, but we also only need to parse the per-node data once.
The savings become significant at scale. Here is a comparison between the revised trunk and the trunk prior to this commit (all data was taken on odin, using openib, 64 nodes, unity message routing, tested with application consisting of mpi_init/mpi_barrier/mpi_finalize, all execution times given in seconds, all launch message sizes in bytes):
Per-node scaling, taken at 1ppn:
#nodes original trunk revised trunk
time size time size
1 0.10 819 0.09 564
2 0.14 1070 0.14 677
3 0.15 1321 0.14 790
4 0.15 1572 0.15 903
8 0.17 2576 0.20 1355
16 0.25 4584 0.21 2259
32 0.28 8600 0.27 4067
64 0.50 16632 0.39 7683
Per-proc scaling, taken at 64 nodes
ppn original trunk revised trunk
time size time size
1 0.50 16669 0.40 7720
2 0.55 32733 0.54 11048
3 0.87 48797 0.81 14376
4 1.0 64861 0.85 17704
Condensing those numbers, it appears we gained:
per-node message size: 251 bytes/node -> 113 bytes/node
per-proc message size: 251 bytes/proc -> 52 bytes/proc
per-job message size: 568 bytes/job -> 399 bytes/job
(job-specific data such as jobid, override oversubscribe flag, total #procs in job, total slots allocated)
The fact that the two pre-commit trunk numbers are the same confirms the fact that each proc was containing the node data as well. It isn't quite the 10x message reduction I had hoped to get, but it is significant and gives much better scaling.
Note that the timing info was, as usual, pretty chaotic - the numbers cited here were typical across several runs taken after the initial one to avoid NFS file positioning influences.
Also note that this commit removes the orte_process_info.vpid_start field and the handful of places that passed that useless value. By definition, all jobs start at vpid=0, so all we were doing is passing "0" around. In fact, many places simply hardwired it to "0" anyway rather than deal with it.
This commit was SVN r16428.
2007-10-11 15:57:26 +00:00
|
|
|
orte_std_cntr_t *num_local_procs,
|
2007-10-11 00:02:49 +00:00
|
|
|
orte_vpid_t *vpid_range,
|
|
|
|
orte_std_cntr_t *total_slots_allocated,
|
|
|
|
bool *node_included,
|
|
|
|
bool *oversubscribed,
|
|
|
|
bool *override_oversubscribed,
|
2008-02-28 01:57:57 +00:00
|
|
|
orte_std_cntr_t *num_contexts,
|
|
|
|
orte_app_context_t ***app_contexts);
|
2007-10-10 15:02:10 +00:00
|
|
|
|
|
|
|
/* define a function that will fork a local proc */
|
|
|
|
typedef int (*orte_odls_base_fork_local_proc_fn_t)(orte_app_context_t *context,
|
|
|
|
orte_odls_child_t *child,
|
|
|
|
char **environ_copy);
|
|
|
|
|
2007-10-11 00:02:49 +00:00
|
|
|
ORTE_DECLSPEC int
|
2008-02-28 01:57:57 +00:00
|
|
|
orte_odls_base_default_launch_local(orte_jobid_t job,
|
|
|
|
orte_std_cntr_t num_apps,
|
|
|
|
orte_app_context_t **apps,
|
Squeeeeeeze the launch message. This is the message sent to the daemons that provides all the data required for launching their local procs. In reorganizing the ODLS framework, I discovered that we were sending a significant amount of unnecessary and repeated data. This commit resolves this by:
1. taking advantage of the fact that we no longer create the launch message via a GPR trigger. In earlier times, we had the GPR create the launch message based on a subscription. In that mode of operation, we could not guarantee the order in which the data was stored in the message - hence, we had no choice but to parse the message in a loop that checked each value against a list of possible "keys" until the corresponding value was found.
Now, however, we construct the message "by hand", so we know precisely what data is in each location in the message. Thus, we no longer need to send the character string "keys" for each data value any more. This represents a rather large savings in the message size - to give you an example, we typically would use a 30-char "key" for a 2-byte data value. As you can see, the overhead can become very large.
2. sending node-specific data only once. Again, because we used to construct the message via subscriptions that were done on a per-proc basis, the data for each node (e.g., the daemon's name, whether or not the node was oversubscribed) would be included in the data for each proc. Thus, the node-specific data was repeated for every proc.
Now that we construct the message "by hand", there is no reason to do this any more. Instead, we can insert the data for a specific node only once, and then provide the per-proc data for that node. We therefore not only save all that extra data in the message, but we also only need to parse the per-node data once.
The savings become significant at scale. Here is a comparison between the revised trunk and the trunk prior to this commit (all data was taken on odin, using openib, 64 nodes, unity message routing, tested with application consisting of mpi_init/mpi_barrier/mpi_finalize, all execution times given in seconds, all launch message sizes in bytes):
Per-node scaling, taken at 1ppn:
#nodes original trunk revised trunk
time size time size
1 0.10 819 0.09 564
2 0.14 1070 0.14 677
3 0.15 1321 0.14 790
4 0.15 1572 0.15 903
8 0.17 2576 0.20 1355
16 0.25 4584 0.21 2259
32 0.28 8600 0.27 4067
64 0.50 16632 0.39 7683
Per-proc scaling, taken at 64 nodes
ppn original trunk revised trunk
time size time size
1 0.50 16669 0.40 7720
2 0.55 32733 0.54 11048
3 0.87 48797 0.81 14376
4 1.0 64861 0.85 17704
Condensing those numbers, it appears we gained:
per-node message size: 251 bytes/node -> 113 bytes/node
per-proc message size: 251 bytes/proc -> 52 bytes/proc
per-job message size: 568 bytes/job -> 399 bytes/job
(job-specific data such as jobid, override oversubscribe flag, total #procs in job, total slots allocated)
The fact that the two pre-commit trunk numbers are the same confirms the fact that each proc was containing the node data as well. It isn't quite the 10x message reduction I had hoped to get, but it is significant and gives much better scaling.
Note that the timing info was, as usual, pretty chaotic - the numbers cited here were typical across several runs taken after the initial one to avoid NFS file positioning influences.
Also note that this commit removes the orte_process_info.vpid_start field and the handful of places that passed that useless value. By definition, all jobs start at vpid=0, so all we were doing is passing "0" around. In fact, many places simply hardwired it to "0" anyway rather than deal with it.
This commit was SVN r16428.
2007-10-11 15:57:26 +00:00
|
|
|
orte_std_cntr_t num_local_procs,
|
|
|
|
orte_vpid_t vpid_range,
|
2007-10-11 00:02:49 +00:00
|
|
|
orte_std_cntr_t total_slots_allocated,
|
|
|
|
bool oversubscribed,
|
|
|
|
bool override_oversubscribed,
|
|
|
|
orte_odls_base_fork_local_proc_fn_t fork_local);
|
2007-10-10 15:02:10 +00:00
|
|
|
|
2007-10-11 00:02:49 +00:00
|
|
|
ORTE_DECLSPEC int
|
2008-02-28 01:57:57 +00:00
|
|
|
orte_odls_base_default_deliver_message(orte_jobid_t job, opal_buffer_t *buffer, orte_rml_tag_t tag);
|
2007-10-10 15:02:10 +00:00
|
|
|
|
2007-10-11 00:02:49 +00:00
|
|
|
ORTE_DECLSPEC void odls_base_default_wait_local_proc(pid_t pid, int status, void* cbdata);
|
2007-10-10 15:02:10 +00:00
|
|
|
|
|
|
|
/* define a function type to signal a local proc */
|
|
|
|
typedef int (*orte_odls_base_signal_local_fn_t)(pid_t pid, int signum);
|
|
|
|
|
2007-10-11 00:02:49 +00:00
|
|
|
ORTE_DECLSPEC int
|
|
|
|
orte_odls_base_default_signal_local_procs(const orte_process_name_t *proc, int32_t signal,
|
|
|
|
orte_odls_base_signal_local_fn_t signal_local);
|
2007-10-10 15:02:10 +00:00
|
|
|
|
|
|
|
/* define a function type for killing a local proc */
|
|
|
|
typedef int (*orte_odls_base_kill_local_fn_t)(pid_t pid, int signum);
|
|
|
|
|
|
|
|
/* define a function type to detect that a child died */
|
|
|
|
typedef bool (*orte_odls_base_child_died_fn_t)(pid_t pid, unsigned int timeout, int *exit_status);
|
|
|
|
|
2007-10-11 00:02:49 +00:00
|
|
|
ORTE_DECLSPEC int
|
|
|
|
orte_odls_base_default_kill_local_procs(orte_jobid_t job, bool set_state,
|
|
|
|
orte_odls_base_kill_local_fn_t kill_local,
|
|
|
|
orte_odls_base_child_died_fn_t child_died);
|
2007-10-10 15:02:10 +00:00
|
|
|
|
2008-02-28 01:57:57 +00:00
|
|
|
ORTE_DECLSPEC int orte_odls_base_default_require_sync(orte_process_name_t *proc, opal_buffer_t *buf);
|
2006-09-14 21:29:51 +00:00
|
|
|
|
2007-09-27 13:13:29 +00:00
|
|
|
/*
|
|
|
|
* Preload binary/files functions
|
|
|
|
*/
|
|
|
|
ORTE_DECLSPEC int orte_odls_base_preload_files_app_context(orte_app_context_t* context);
|
|
|
|
|
2008-02-28 01:57:57 +00:00
|
|
|
END_C_DECLS
|
|
|
|
|
2006-09-14 21:29:51 +00:00
|
|
|
#endif
|