1
1
openmpi/orte/mca/odls/base/odls_base_frame.c

200 строки
6.7 KiB
C
Исходник Обычный вид История

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2010-2011 Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2011-2013 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "orte_config.h"
#include "orte/constants.h"
#ifdef HAVE_STRING_H
#include <string.h>
#endif
#include "opal/class/opal_ring_buffer.h"
#include "opal/mca/mca.h"
#include "opal/mca/base/base.h"
#include "opal/mca/hwloc/hwloc.h"
#include "opal/util/output.h"
#include "opal/util/path.h"
Squeeeeeeze the launch message. This is the message sent to the daemons that provides all the data required for launching their local procs. In reorganizing the ODLS framework, I discovered that we were sending a significant amount of unnecessary and repeated data. This commit resolves this by: 1. taking advantage of the fact that we no longer create the launch message via a GPR trigger. In earlier times, we had the GPR create the launch message based on a subscription. In that mode of operation, we could not guarantee the order in which the data was stored in the message - hence, we had no choice but to parse the message in a loop that checked each value against a list of possible "keys" until the corresponding value was found. Now, however, we construct the message "by hand", so we know precisely what data is in each location in the message. Thus, we no longer need to send the character string "keys" for each data value any more. This represents a rather large savings in the message size - to give you an example, we typically would use a 30-char "key" for a 2-byte data value. As you can see, the overhead can become very large. 2. sending node-specific data only once. Again, because we used to construct the message via subscriptions that were done on a per-proc basis, the data for each node (e.g., the daemon's name, whether or not the node was oversubscribed) would be included in the data for each proc. Thus, the node-specific data was repeated for every proc. Now that we construct the message "by hand", there is no reason to do this any more. Instead, we can insert the data for a specific node only once, and then provide the per-proc data for that node. We therefore not only save all that extra data in the message, but we also only need to parse the per-node data once. The savings become significant at scale. Here is a comparison between the revised trunk and the trunk prior to this commit (all data was taken on odin, using openib, 64 nodes, unity message routing, tested with application consisting of mpi_init/mpi_barrier/mpi_finalize, all execution times given in seconds, all launch message sizes in bytes): Per-node scaling, taken at 1ppn: #nodes original trunk revised trunk time size time size 1 0.10 819 0.09 564 2 0.14 1070 0.14 677 3 0.15 1321 0.14 790 4 0.15 1572 0.15 903 8 0.17 2576 0.20 1355 16 0.25 4584 0.21 2259 32 0.28 8600 0.27 4067 64 0.50 16632 0.39 7683 Per-proc scaling, taken at 64 nodes ppn original trunk revised trunk time size time size 1 0.50 16669 0.40 7720 2 0.55 32733 0.54 11048 3 0.87 48797 0.81 14376 4 1.0 64861 0.85 17704 Condensing those numbers, it appears we gained: per-node message size: 251 bytes/node -> 113 bytes/node per-proc message size: 251 bytes/proc -> 52 bytes/proc per-job message size: 568 bytes/job -> 399 bytes/job (job-specific data such as jobid, override oversubscribe flag, total #procs in job, total slots allocated) The fact that the two pre-commit trunk numbers are the same confirms the fact that each proc was containing the node data as well. It isn't quite the 10x message reduction I had hoped to get, but it is significant and gives much better scaling. Note that the timing info was, as usual, pretty chaotic - the numbers cited here were typical across several runs taken after the initial one to avoid NFS file positioning influences. Also note that this commit removes the orte_process_info.vpid_start field and the handful of places that passed that useless value. By definition, all jobs start at vpid=0, so all we were doing is passing "0" around. In fact, many places simply hardwired it to "0" anyway rather than deal with it. This commit was SVN r16428.
2007-10-11 19:57:26 +04:00
#include "opal/util/argv.h"
#include "orte/mca/errmgr/errmgr.h"
#include "orte/mca/plm/plm_types.h"
#include "orte/util/name_fns.h"
#include "orte/runtime/orte_globals.h"
#include "orte/util/show_help.h"
#include "orte/util/parse_options.h"
#include "orte/mca/ess/ess.h"
#include "orte/mca/odls/base/odls_private.h"
#include "orte/mca/odls/base/base.h"
/*
* The following file was created by configure. It contains extern
* statements and the definition of an array of pointers to each
* component's public mca_base_component_t struct.
*/
#include "orte/mca/odls/base/static-components.h"
/*
* Instantiate globals
*/
orte_odls_base_module_t orte_odls = {0};
/*
* Framework global variables
*/
orte_odls_globals_t orte_odls_globals;
static int orte_odls_base_register(mca_base_register_flag_t flags)
MCA/base: Add new MCA variable system Features: - Support for an override parameter file (openmpi-mca-param-override.conf). Variable values in this file can not be overridden by any file or environment value. - Support for boolean, unsigned, and unsigned long long variables. - Support for true/false values. - Support for enumerations on integer variables. - Support for MPIT scope, verbosity, and binding. - Support for command line source. - Support for setting variable source via the environment using OMPI_MCA_SOURCE_<var name>=source (either command or file:filename) - Cleaner API. - Support for variable groups (equivalent to MPIT categories). Notes: - Variables must be created with a backing store (char **, int *, or bool *) that must live at least as long as the variable. - Creating a variable with the MCA_BASE_VAR_FLAG_SETTABLE enables the use of mca_base_var_set_value() to change the value. - String values are duplicated when the variable is registered. It is up to the caller to free the original value if necessary. The new value will be freed by the mca_base_var system and must not be freed by the user. - Variables with constant scope may not be settable. - Variable groups (and all associated variables) are deregistered when the component is closed or the component repository item is freed. This prevents a segmentation fault from accessing a variable after its component is unloaded. - After some discussion we decided we should remove the automatic registration of component priority variables. Few component actually made use of this feature. - The enumerator interface was updated to be general enough to handle future uses of the interface. - The code to generate ompi_info output has been moved into the MCA variable system. See mca_base_var_dump(). opal: update core and components to mca_base_var system orte: update core and components to mca_base_var system ompi: update core and components to mca_base_var system This commit also modifies the rmaps framework. The following variables were moved from ppr and lama: rmaps_base_pernode, rmaps_base_n_pernode, rmaps_base_n_persocket. Both lama and ppr create synonyms for these variables. This commit was SVN r28236.
2013-03-28 01:09:41 +04:00
{
orte_odls_globals.timeout_before_sigkill = 1;
(void) mca_base_var_register("orte", "odls", "base", "sigkill_timeout",
"Time to wait for a process to die after issuing a kill signal to it",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_9,
MCA_BASE_VAR_SCOPE_READONLY,
&orte_odls_globals.timeout_before_sigkill);
return ORTE_SUCCESS;
}
static int orte_odls_base_close(void)
{
int i;
orte_proc_t *proc;
opal_list_item_t *item;
/* cleanup ODLS globals */
while (NULL != (item = opal_list_remove_first(&orte_odls_globals.xterm_ranks))) {
OBJ_RELEASE(item);
}
OBJ_DESTRUCT(&orte_odls_globals.xterm_ranks);
/* cleanup the global list of local children and job data */
for (i=0; i < orte_local_children->size; i++) {
if (NULL != (proc = (orte_proc_t*)opal_pointer_array_get_item(orte_local_children, i))) {
OBJ_RELEASE(proc);
}
}
OBJ_RELEASE(orte_local_children);
return mca_base_framework_components_close(&orte_odls_base_framework, NULL);
}
/**
* Function for finding and opening either all MCA components, or the one
* that was specifically requested via a MCA parameter.
*/
static int orte_odls_base_open(mca_base_open_flag_t flags)
{
char **ranks=NULL, *tmp;
int rc, i, rank;
orte_namelist_t *nm;
bool xterm_hold;
MCA/base: Add new MCA variable system Features: - Support for an override parameter file (openmpi-mca-param-override.conf). Variable values in this file can not be overridden by any file or environment value. - Support for boolean, unsigned, and unsigned long long variables. - Support for true/false values. - Support for enumerations on integer variables. - Support for MPIT scope, verbosity, and binding. - Support for command line source. - Support for setting variable source via the environment using OMPI_MCA_SOURCE_<var name>=source (either command or file:filename) - Cleaner API. - Support for variable groups (equivalent to MPIT categories). Notes: - Variables must be created with a backing store (char **, int *, or bool *) that must live at least as long as the variable. - Creating a variable with the MCA_BASE_VAR_FLAG_SETTABLE enables the use of mca_base_var_set_value() to change the value. - String values are duplicated when the variable is registered. It is up to the caller to free the original value if necessary. The new value will be freed by the mca_base_var system and must not be freed by the user. - Variables with constant scope may not be settable. - Variable groups (and all associated variables) are deregistered when the component is closed or the component repository item is freed. This prevents a segmentation fault from accessing a variable after its component is unloaded. - After some discussion we decided we should remove the automatic registration of component priority variables. Few component actually made use of this feature. - The enumerator interface was updated to be general enough to handle future uses of the interface. - The code to generate ompi_info output has been moved into the MCA variable system. See mca_base_var_dump(). opal: update core and components to mca_base_var system orte: update core and components to mca_base_var system ompi: update core and components to mca_base_var system This commit also modifies the rmaps framework. The following variables were moved from ppr and lama: rmaps_base_pernode, rmaps_base_n_pernode, rmaps_base_n_persocket. Both lama and ppr create synonyms for these variables. This commit was SVN r28236.
2013-03-28 01:09:41 +04:00
/* initialize the global array of local children */
orte_local_children = OBJ_NEW(opal_pointer_array_t);
if (OPAL_SUCCESS != (rc = opal_pointer_array_init(orte_local_children,
1,
ORTE_GLOBAL_ARRAY_MAX_SIZE,
1))) {
ORTE_ERROR_LOG(rc);
return rc;
}
/* initialize ODLS globals */
OBJ_CONSTRUCT(&orte_odls_globals.xterm_ranks, opal_list_t);
orte_odls_globals.xtermcmd = NULL;
/* check if the user requested that we display output in xterms */
if (NULL != orte_xterm) {
/* construct a list of ranks to be displayed */
xterm_hold = false;
orte_util_parse_range_options(orte_xterm, &ranks);
for (i=0; i < opal_argv_count(ranks); i++) {
if (0 == strcmp(ranks[i], "BANG")) {
xterm_hold = true;
continue;
}
nm = OBJ_NEW(orte_namelist_t);
rank = strtol(ranks[i], NULL, 10);
if (-1 == rank) {
/* wildcard */
nm->name.vpid = ORTE_VPID_WILDCARD;
} else if (rank < 0) {
/* error out on bozo case */
orte_show_help("help-odls-base.txt",
"orte-odls-base:xterm-neg-rank",
true, rank);
return ORTE_ERROR;
} else {
/* we can't check here if the rank is out of
* range as we don't yet know how many ranks
* will be in the job - we'll check later
*/
nm->name.vpid = rank;
}
opal_list_append(&orte_odls_globals.xterm_ranks, &nm->super);
}
opal_argv_free(ranks);
/* construct the xtermcmd */
orte_odls_globals.xtermcmd = NULL;
tmp = opal_find_absolute_path("xterm");
if (NULL == tmp) {
return ORTE_ERROR;
}
opal_argv_append_nosize(&orte_odls_globals.xtermcmd, tmp);
free(tmp);
opal_argv_append_nosize(&orte_odls_globals.xtermcmd, "-T");
opal_argv_append_nosize(&orte_odls_globals.xtermcmd, "save");
if (xterm_hold) {
opal_argv_append_nosize(&orte_odls_globals.xtermcmd, "-hold");
}
opal_argv_append_nosize(&orte_odls_globals.xtermcmd, "-e");
}
/* Open up all available components */
return mca_base_framework_components_open(&orte_odls_base_framework, flags);
}
MCA_BASE_FRAMEWORK_DECLARE(orte, odls, "ORTE Daemon Launch Subsystem",
orte_odls_base_register, orte_odls_base_open, orte_odls_base_close,
mca_odls_base_static_components, 0);
static void launch_local_const(orte_odls_launch_local_t *ptr)
{
ptr->ev = opal_event_alloc();
ptr->job = ORTE_JOBID_INVALID;
ptr->fork_local = NULL;
ptr->retries = 0;
}
static void launch_local_dest(orte_odls_launch_local_t *ptr)
{
opal_event_free(ptr->ev);
}
OBJ_CLASS_INSTANCE(orte_odls_launch_local_t,
opal_object_t,
launch_local_const,
launch_local_dest);