1
1
openmpi/orte/mca/odls/odls.h
Ralph Castain 3dbd4d9be7 Squeeeeeeze the launch message. This is the message sent to the daemons that provides all the data required for launching their local procs. In reorganizing the ODLS framework, I discovered that we were sending a significant amount of unnecessary and repeated data. This commit resolves this by:
1. taking advantage of the fact that we no longer create the launch  message via a GPR trigger. In earlier times, we had the GPR create the launch message based on a subscription. In that mode of operation, we could not guarantee the order in which the data was stored in the message - hence, we had no choice but to parse the message in a loop that checked each value against a list of possible "keys" until the corresponding value was found.

Now, however, we construct the message "by hand", so we know precisely what data is in each location in the message. Thus, we no longer need to send the character string "keys" for each data value any more. This represents a rather large savings in the message size - to give you an example, we typically would use a 30-char "key" for a 2-byte data value. As you can see, the overhead can become very large.

2. sending node-specific data only once. Again, because we used to construct the message via subscriptions that were done on a per-proc basis, the data for each node (e.g., the daemon's name, whether or not the node was oversubscribed) would be included in the data for each proc. Thus, the node-specific data was repeated for every proc.

Now that we construct the message "by hand", there is no reason to do this any more. Instead, we can insert the data for a specific node only once, and then provide the per-proc data for that node. We therefore not only save all that extra data in the message, but we also only need to parse the per-node data once.

The savings become significant at scale. Here is a comparison between the revised trunk and the trunk prior to this commit (all data was taken on odin, using openib, 64 nodes, unity message routing, tested with application consisting of mpi_init/mpi_barrier/mpi_finalize, all execution times given in seconds, all launch message sizes in bytes):

Per-node scaling, taken at 1ppn:

#nodes           original trunk                         revised trunk
             time               size                time               size
      1      0.10                819                0.09                564
      2      0.14               1070                0.14                677
      3      0.15               1321                0.14                790
      4      0.15               1572                0.15                903
      8      0.17               2576                0.20               1355
     16      0.25               4584                0.21               2259
     32      0.28               8600                0.27               4067
     64      0.50              16632                0.39               7683

Per-proc scaling, taken at 64 nodes

   ppn             original trunk                         revised trunk
              time               size                time               size
      1       0.50              16669                0.40               7720
      2       0.55              32733                0.54              11048
      3       0.87              48797                0.81              14376
      4       1.0               64861                0.85              17704


Condensing those numbers, it appears we gained:

per-node message size: 251 bytes/node -> 113 bytes/node

per-proc message size: 251 bytes/proc  -> 52 bytes/proc

per-job message size:  568 bytes/job -> 399 bytes/job 
(job-specific data such as jobid, override oversubscribe flag, total #procs in job, total slots allocated)

The fact that the two pre-commit trunk numbers are the same confirms the fact that each proc was containing the node data as well. It isn't quite the 10x message reduction I had hoped to get, but it is significant and gives much better scaling.

Note that the timing info was, as usual, pretty chaotic - the numbers cited here were typical across several runs taken after the initial one to avoid NFS file positioning influences.

Also note that this commit removes the orte_process_info.vpid_start field and the handful of places that passed that useless value. By definition, all jobs start at vpid=0, so all we were doing is passing "0" around. In fact, many places simply hardwired it to "0" anyway rather than deal with it.

This commit was SVN r16428.
2007-10-11 15:57:26 +00:00

167 строки
5.5 KiB
C

/* -*- C -*-
*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
/**
* @file
*
* The OpenRTE Daemon's Local Launch Subsystem
*
*/
#ifndef ORTE_MCA_ODLS_H
#define ORTE_MCA_ODLS_H
#include "orte_config.h"
#include "opal/mca/mca.h"
#include "opal/class/opal_list.h"
#include "orte/dss/dss_types.h"
#include "orte/mca/gpr/gpr_types.h"
#include "orte/mca/ns/ns_types.h"
#include "orte/mca/rmaps/rmaps_types.h"
#include "orte/mca/rml/rml_types.h"
#include "orte/mca/odls/odls_types.h"
/*
* odls module functions
*/
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
/*
* Construct a notify data object for use in adding local processes
* In order to reuse daemons, we need a way for the HNP to construct a notify_data object that
* contains the data needed by the active ODLS component to launch a local process. Since the
* only one that knows what a particular ODLS component needs is that component, we require an
* entry point that the HNP can call to get the required notify_data object. This is constructed
* for *all* nodes - the individual orteds then parse that data to find the specific launch info
* for procs on their node
*/
typedef int (*orte_odls_base_module_get_add_procs_data_fn_t)(orte_gpr_notify_data_t **data,
orte_job_map_t *map);
/**
* Locally launch the provided processes
*/
typedef int (*orte_odls_base_module_launch_local_processes_fn_t)(orte_gpr_notify_data_t *data);
/**
* Kill the local processes on this node
*/
typedef int (*orte_odls_base_module_kill_local_processes_fn_t)(orte_jobid_t job, bool set_state);
/**
* Signal local processes
*/
typedef int (*orte_odls_base_module_signal_local_process_fn_t)(const orte_process_name_t *proc,
int32_t signal);
/**
* Deliver a message to local processes
*/
typedef int (*orte_odls_base_module_deliver_message_fn_t)(orte_jobid_t job, orte_buffer_t *buffer,
orte_rml_tag_t tag);
/**
* Extract the mapping of daemon-proc pair
*/
typedef int (*orte_odls_base_module_extract_proc_map_info_fn_t)(orte_process_name_t *daemon,
opal_list_t *proc_list,
orte_gpr_value_t *value);
/**
* Register to require sync before termination
*/
typedef int (*orte_odls_base_module_require_sync_fn_t)(orte_process_name_t *proc);
/**
* pls module version 1.3.0
*/
struct orte_odls_base_module_1_3_0_t {
orte_odls_base_module_get_add_procs_data_fn_t get_add_procs_data;
orte_odls_base_module_launch_local_processes_fn_t launch_local_procs;
orte_odls_base_module_kill_local_processes_fn_t kill_local_procs;
orte_odls_base_module_signal_local_process_fn_t signal_local_procs;
orte_odls_base_module_deliver_message_fn_t deliver_message;
orte_odls_base_module_extract_proc_map_info_fn_t extract_proc_map_info;
orte_odls_base_module_require_sync_fn_t require_sync;
};
/** shorten orte_odls_base_module_1_3_0_t declaration */
typedef struct orte_odls_base_module_1_3_0_t orte_odls_base_module_1_3_0_t;
/** shorten orte_odls_base_module_t declaration */
typedef struct orte_odls_base_module_1_3_0_t orte_odls_base_module_t;
/**
* odls initialization function
*
* Called by the MCA framework to initialize the component. Invoked
* exactly once per process.
*
* @param priority (OUT) Relative priority or ranking use by MCA to
* select a module.
*/
typedef struct orte_odls_base_module_1_3_0_t*
(*orte_odls_base_component_init_fn_t)(int *priority);
/**
* Cleanup all resources held by the component
*/
typedef int (*orte_odls_base_component_finalize_fn_t)(void);
/**
* odls component v1.3.0
*/
struct orte_odls_base_component_1_3_0_t {
/** component version */
mca_base_component_t version;
/** component data */
mca_base_component_data_1_0_0_t odls_data;
/** Function called when component is initialized */
orte_odls_base_component_init_fn_t init;
/* Function called when component is finalized */
orte_odls_base_component_finalize_fn_t finalize;
};
/** Convenience typedef */
typedef struct orte_odls_base_component_1_3_0_t orte_odls_base_component_1_3_0_t;
/** Convenience typedef */
typedef orte_odls_base_component_1_3_0_t orte_odls_base_component_t;
/**
* Macro for use in modules that are of type odls v1.3.0
*/
#define ORTE_ODLS_BASE_VERSION_1_3_0 \
/* odls v1.3 is chained to MCA v1.0 */ \
MCA_BASE_VERSION_1_0_0, \
/* odls v1.3 */ \
"odls", 1, 3, 0
/* Global structure for accessing ODLS functions
*/
ORTE_DECLSPEC extern orte_odls_base_module_t orte_odls; /* holds selected module's function pointers */
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#endif /* MCA_ODLS_H */