1
1
openmpi/orte/mca/ess/singleton/ess_singleton_module.c
Ralph Castain a523dba41d NOTE: this modifies the MPI-RTE interface
We have been getting several requests for new collectives that need to be inserted in various places of the MPI layer, all in support of either checkpoint/restart or various research efforts. Until now, this would require that the collective id's be generated at launch. which required modification
s to ORTE and other places. We chose not to make collectives reusable as the race conditions associated with resetting collective counters are daunti
ng.

This commit extends the collective system to allow self-generation of collective id's that the daemons need to support, thereby allowing developers to request any number of collectives for their work. There is one restriction: RTE collectives must occur at the process level - i.e., we don't curren
tly have a way of tagging the collective to a specific thread. From the comment in the code:

 * In order to allow scalable
 * generation of collective id's, they are formed as:
 *
 * top 32-bits are the jobid of the procs involved in
 * the collective. For collectives across multiple jobs
 * (e.g., in a connect_accept), the daemon jobid will
 * be used as the id will be issued by mpirun. This
 * won't cause problems because daemons don't use the
 * collective_id
 *
 * bottom 32-bits are a rolling counter that recycles
 * when the max is hit. The daemon will cleanup each
 * collective upon completion, so this means a job can
 * never have more than 2**32 collectives going on at
 * a time. If someone needs more than that - they've got
 * a problem.
 *
 * Note that this means (for now) that RTE-level collectives
 * cannot be done by individual threads - they must be
 * done at the overall process level. This is required as
 * there is no guaranteed ordering for the collective id's,
 * and all the participants must agree on the id of the
 * collective they are executing. So if thread A on one
 * process asks for a collective id before thread B does,
 * but B asks before A on another process, the collectives will
 * be mixed and not result in the expected behavior. We may
 * find a way to relax this requirement in the future by
 * adding a thread context id to the jobid field (maybe taking the
 * lower 16-bits of that field).

This commit includes a test program (orte/test/mpi/coll_test.c) that cycles 100 times across barrier and modex collectives.

This commit was SVN r32203.
2014-07-10 18:53:12 +00:00

231 строка
7.8 KiB
C

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2010 Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013-2014 Intel, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
*/
#include "orte_config.h"
#include "orte/constants.h"
#ifdef HAVE_STRING_H
#include <string.h>
#endif
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <signal.h>
#include <errno.h>
#include "opal/hash_string.h"
#include "opal/util/argv.h"
#include "opal/util/path.h"
#include "opal/mca/installdirs/installdirs.h"
#include "orte/util/show_help.h"
#include "orte/util/proc_info.h"
#include "orte/mca/errmgr/errmgr.h"
#include "orte/mca/routed/routed.h"
#include "orte/util/name_fns.h"
#include "orte/runtime/orte_globals.h"
#include "orte/util/nidmap.h"
#include "orte/util/session_dir.h"
#include "orte/mca/ess/ess.h"
#include "orte/mca/ess/base/base.h"
#include "orte/mca/ess/singleton/ess_singleton.h"
static int rte_init(void);
static int rte_finalize(void);
extern char *orte_ess_singleton_server_uri;
orte_ess_base_module_t orte_ess_singleton_module = {
rte_init,
rte_finalize,
orte_ess_base_app_abort,
NULL /* ft_event */
};
static int rte_init(void)
{
int rc;
char *param;
uint16_t jobfam;
uint32_t hash32;
uint32_t bias;
/* run the prolog */
if (ORTE_SUCCESS != (rc = orte_ess_base_std_prolog())) {
ORTE_ERROR_LOG(rc);
return rc;
}
if (NULL != orte_ess_singleton_server_uri) {
/* we are going to connect to a server HNP */
if (0 == strncmp(orte_ess_singleton_server_uri, "file", strlen("file")) ||
0 == strncmp(orte_ess_singleton_server_uri, "FILE", strlen("FILE"))) {
char input[1024], *filename;
FILE *fp;
/* it is a file - get the filename */
filename = strchr(orte_ess_singleton_server_uri, ':');
if (NULL == filename) {
/* filename is not correctly formatted */
orte_show_help("help-orterun.txt", "orterun:ompi-server-filename-bad", true,
"singleton", orte_ess_singleton_server_uri);
return ORTE_ERROR;
}
++filename; /* space past the : */
if (0 >= strlen(filename)) {
/* they forgot to give us the name! */
orte_show_help("help-orterun.txt", "orterun:ompi-server-filename-missing", true,
"singleton", orte_ess_singleton_server_uri);
return ORTE_ERROR;
}
/* open the file and extract the uri */
fp = fopen(filename, "r");
if (NULL == fp) { /* can't find or read file! */
orte_show_help("help-orterun.txt", "orterun:ompi-server-filename-access", true,
"singleton", orte_ess_singleton_server_uri);
return ORTE_ERROR;
}
if (NULL == fgets(input, 1024, fp)) {
/* something malformed about file */
fclose(fp);
orte_show_help("help-orterun.txt", "orterun:ompi-server-file-bad", true,
"singleton", orte_ess_singleton_server_uri, "singleton");
return ORTE_ERROR;
}
fclose(fp);
input[strlen(input)-1] = '\0'; /* remove newline */
orte_process_info.my_hnp_uri = strdup(input);
} else {
orte_process_info.my_hnp_uri = strdup(orte_ess_singleton_server_uri);
}
/* save the daemon uri - we will process it later */
orte_process_info.my_daemon_uri = strdup(orte_process_info.my_hnp_uri);
/* for convenience, push the pubsub version of this param into the environ */
asprintf(&param,"OMPI_MCA_pubsub_orte_server=%s",orte_process_info.my_hnp_uri);
putenv(param);
}
/* indicate we are a singleton so orte_init knows what to do */
orte_process_info.proc_type |= ORTE_PROC_SINGLETON;
/* now define my own name */
/* hash the nodename */
OPAL_HASH_STR(orte_process_info.nodename, hash32);
bias = (uint32_t)orte_process_info.pid;
OPAL_OUTPUT_VERBOSE((5, orte_ess_base_framework.framework_output,
"ess:singleton: initial bias %ld nodename hash %lu",
(long)bias, (unsigned long)hash32));
/* fold in the bias */
hash32 = hash32 ^ bias;
/* now compress to 16-bits */
jobfam = (uint16_t)(((0x0000ffff & (0xffff0000 & hash32) >> 16)) ^ (0x0000ffff & hash32));
OPAL_OUTPUT_VERBOSE((5, orte_ess_base_framework.framework_output,
"ess:singleton:: final jobfam %lu",
(unsigned long)jobfam));
/* set the name - if we eventually spawn an HNP, it will use
* local jobid 0, so offset us by 1
*/
ORTE_PROC_MY_NAME->jobid = (0xffff0000 & ((uint32_t)jobfam << 16)) + 1;
ORTE_PROC_MY_NAME->vpid = 0;
orte_process_info.num_procs = 1;
if (orte_process_info.max_procs < orte_process_info.num_procs) {
orte_process_info.max_procs = orte_process_info.num_procs;
}
/* flag that we are not routing since we have no HNP */
orte_routing_is_enabled = false;
/* take a pass thru the session directory code to fillin the
* tmpdir names - don't create anything yet
*/
if (ORTE_SUCCESS != (rc = orte_session_dir(false,
orte_process_info.tmpdir_base,
orte_process_info.nodename, NULL,
ORTE_PROC_MY_NAME))) {
ORTE_ERROR_LOG(rc);
return rc;
}
/* clear the session directory just in case there are
* stale directories laying around
*/
orte_session_dir_cleanup(ORTE_JOBID_WILDCARD);
/* use the std app init to complete the procedure */
if (ORTE_SUCCESS != (rc = orte_ess_base_app_setup(true))) {
ORTE_ERROR_LOG(rc);
return rc;
}
/* if one was provided, build my nidmap */
if (ORTE_SUCCESS != (rc = orte_util_nidmap_init(orte_process_info.sync_buf))) {
ORTE_ERROR_LOG(rc);
return rc;
}
/* to the best of our knowledge, we are alone */
orte_process_info.my_node_rank = 0;
orte_process_info.my_local_rank = 0;
/* set some envars */
putenv("OMPI_NUM_APP_CTX=1");
putenv("OMPI_FIRST_RANKS=0");
putenv("OMPI_APP_CTX_NUM_PROCS=1");
putenv("OMPI_MCA_orte_ess_num_procs=1");
return ORTE_SUCCESS;
}
static int rte_finalize(void)
{
int ret;
/* deconstruct my nidmap and jobmap arrays */
orte_util_nidmap_finalize();
/* use the default procedure to finish */
if (ORTE_SUCCESS != (ret = orte_ess_base_app_finalize())) {
ORTE_ERROR_LOG(ret);
}
/* cleanup the environment */
unsetenv("OMPI_NUM_APP_CTX");
unsetenv("OMPI_FIRST_RANKS");
unsetenv("OMPI_APP_CTX_NUM_PROCS");
unsetenv("OMPI_MCA_orte_ess_num_procs");
unsetenv("OMPI_MCA_pubsub_orte_server"); // just in case it is there
return ret;
}