2015-04-09 19:39:22 +03:00
|
|
|
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
|
2010-04-23 09:51:29 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
2011-06-24 00:38:02 +04:00
|
|
|
* Copyright (c) 2004-2011 The University of Tennessee and The University
|
2010-04-23 09:51:29 +04:00
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
2015-06-24 06:59:57 +03:00
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
2010-04-23 09:51:29 +04:00
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
2015-06-24 06:59:57 +03:00
|
|
|
* Copyright (c) 2010 Oracle and/or its affiliates. All rights reserved.
|
2013-02-28 21:31:47 +04:00
|
|
|
* Copyright (c) 2011 Cisco Systems, Inc. All rights reserved.
|
2016-02-17 19:32:17 +03:00
|
|
|
* Copyright (c) 2013-2016 Intel, Inc. All rights reserved.
|
2015-04-09 19:39:22 +03:00
|
|
|
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
|
|
|
|
* reserved.
|
2016-08-23 03:29:17 +03:00
|
|
|
* Copyright (c) 2016 Research Organization for Information Science
|
|
|
|
* and Technology (RIST). All rights reserved.
|
2010-04-23 09:51:29 +04:00
|
|
|
* $COPYRIGHT$
|
2015-06-24 06:59:57 +03:00
|
|
|
*
|
2010-04-23 09:51:29 +04:00
|
|
|
* Additional copyrights may follow
|
2015-06-24 06:59:57 +03:00
|
|
|
*
|
2010-04-23 09:51:29 +04:00
|
|
|
* $HEADER$
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "orte_config.h"
|
|
|
|
#include "orte/constants.h"
|
|
|
|
|
|
|
|
#include <string.h>
|
|
|
|
#ifdef HAVE_SYS_TYPES_H
|
|
|
|
#include <sys/types.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_UNISTD_H
|
|
|
|
#include <unistd.h>
|
|
|
|
#endif
|
|
|
|
#include <signal.h>
|
|
|
|
#include <errno.h>
|
|
|
|
|
2010-07-13 10:33:07 +04:00
|
|
|
#include "opal/hash_string.h"
|
2010-04-23 09:51:29 +04:00
|
|
|
#include "opal/util/argv.h"
|
2015-09-16 10:58:05 +03:00
|
|
|
#include "opal/util/opal_environ.h"
|
2010-04-23 09:51:29 +04:00
|
|
|
#include "opal/util/path.h"
|
2016-03-08 21:33:15 +03:00
|
|
|
#include "opal/runtime/opal_progress_threads.h"
|
2010-04-23 09:51:29 +04:00
|
|
|
#include "opal/mca/installdirs/installdirs.h"
|
Per the PMIx RFC:
WHAT: Merge the PMIx branch into the devel repo, creating a new
OPAL “lmix” framework to abstract PMI support for all RTEs.
Replace the ORTE daemon-level collectives with a new PMIx
server and update the ORTE grpcomm framework to support
server-to-server collectives
WHY: We’ve had problems dealing with variations in PMI implementations,
and need to extend the existing PMI definitions to meet exascale
requirements.
WHEN: Mon, Aug 25
WHERE: https://github.com/rhc54/ompi-svn-mirror.git
Several community members have been working on a refactoring of the current PMI support within OMPI. Although the APIs are common, Slurm and Cray implement a different range of capabilities, and package them differently. For example, Cray provides an integrated PMI-1/2 library, while Slurm separates the two and requires the user to specify the one to be used at runtime. In addition, several bugs in the Slurm implementations have caused problems requiring extra coding.
All this has led to a slew of #if’s in the PMI code and bugs when the corner-case logic for one implementation accidentally traps the other. Extending this support to other implementations would have increased this complexity to an unacceptable level.
Accordingly, we have:
* created a new OPAL “pmix” framework to abstract the PMI support, with separate components for Cray, Slurm PMI-1, and Slurm PMI-2 implementations.
* Replaced the current ORTE grpcomm daemon-based collective operation with an integrated PMIx server, and updated the grpcomm APIs to provide more flexible, multi-algorithm support for collective operations. At this time, only the xcast and allgather operations are supported.
* Replaced the current global collective id with a signature based on the names of the participating procs. The allows an unlimited number of collectives to be executed by any group of processes, subject to the requirement that only one collective can be active at a time for a unique combination of procs. Note that a proc can be involved in any number of simultaneous collectives - it is the specific combination of procs that is subject to the constraint
* removed the prior OMPI/OPAL modex code
* added new macros for executing modex send/recv to simplify use of the new APIs. The send macros allow the caller to specify whether or not the BTL supports async modex operations - if so, then the non-blocking “fence” operation is used, if the active PMIx component supports it. Otherwise, the default is a full blocking modex exchange as we currently perform.
* retained the current flag that directs us to use a blocking fence operation, but only to retrieve data upon demand
This commit was SVN r32570.
2014-08-21 22:56:47 +04:00
|
|
|
#include "opal/mca/pmix/base/base.h"
|
2015-06-18 19:53:20 +03:00
|
|
|
#include "opal/mca/pmix/pmix.h"
|
2010-04-23 09:51:29 +04:00
|
|
|
|
|
|
|
#include "orte/util/show_help.h"
|
|
|
|
#include "orte/util/proc_info.h"
|
|
|
|
#include "orte/mca/errmgr/errmgr.h"
|
2015-06-18 19:53:20 +03:00
|
|
|
#include "orte/mca/plm/base/base.h"
|
2010-04-23 09:51:29 +04:00
|
|
|
#include "orte/util/name_fns.h"
|
|
|
|
#include "orte/runtime/orte_globals.h"
|
2014-02-09 06:10:31 +04:00
|
|
|
#include "orte/util/session_dir.h"
|
2015-06-18 19:53:20 +03:00
|
|
|
#include "orte/util/pre_condition_transports.h"
|
2010-04-23 09:51:29 +04:00
|
|
|
|
|
|
|
#include "orte/mca/ess/ess.h"
|
|
|
|
#include "orte/mca/ess/base/base.h"
|
|
|
|
#include "orte/mca/ess/singleton/ess_singleton.h"
|
|
|
|
|
|
|
|
|
|
|
|
static int rte_init(void);
|
|
|
|
static int rte_finalize(void);
|
|
|
|
|
|
|
|
orte_ess_base_module_t orte_ess_singleton_module = {
|
|
|
|
rte_init,
|
|
|
|
rte_finalize,
|
|
|
|
orte_ess_base_app_abort,
|
|
|
|
NULL /* ft_event */
|
|
|
|
};
|
|
|
|
|
2015-06-18 19:53:20 +03:00
|
|
|
extern char *orte_ess_singleton_server_uri;
|
|
|
|
static bool added_transport_keys=false;
|
|
|
|
static bool added_num_procs = false;
|
|
|
|
static bool added_app_ctx = false;
|
|
|
|
static bool added_pmix_envs = false;
|
2015-08-30 07:19:27 +03:00
|
|
|
static char *pmixenvars[4];
|
2016-03-08 21:33:15 +03:00
|
|
|
static bool progress_thread_running = false;
|
2015-06-18 19:53:20 +03:00
|
|
|
|
|
|
|
static int fork_hnp(void);
|
|
|
|
|
2010-04-23 09:51:29 +04:00
|
|
|
static int rte_init(void)
|
|
|
|
{
|
2015-06-18 19:53:20 +03:00
|
|
|
int rc, ret;
|
|
|
|
char *error = NULL;
|
2016-09-19 07:37:08 +03:00
|
|
|
char *ev1, *ev2;
|
2015-06-18 19:53:20 +03:00
|
|
|
opal_value_t *kv;
|
2016-09-27 09:22:39 +03:00
|
|
|
char *val = NULL;
|
2015-06-18 19:53:20 +03:00
|
|
|
int u32, *u32ptr;
|
|
|
|
uint16_t u16, *u16ptr;
|
2016-07-21 19:02:00 +03:00
|
|
|
orte_process_name_t name;
|
2010-07-13 10:33:07 +04:00
|
|
|
|
2010-04-23 09:51:29 +04:00
|
|
|
/* run the prolog */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_ess_base_std_prolog())) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2015-06-18 19:53:20 +03:00
|
|
|
u32ptr = &u32;
|
|
|
|
u16ptr = &u16;
|
2010-07-13 10:33:07 +04:00
|
|
|
|
2016-02-25 20:05:38 +03:00
|
|
|
if (NULL != mca_ess_singleton_component.server_uri) {
|
2010-07-13 10:33:07 +04:00
|
|
|
/* we are going to connect to a server HNP */
|
2016-02-25 20:05:38 +03:00
|
|
|
if (0 == strncmp(mca_ess_singleton_component.server_uri, "file", strlen("file")) ||
|
|
|
|
0 == strncmp(mca_ess_singleton_component.server_uri, "FILE", strlen("FILE"))) {
|
2010-07-13 10:33:07 +04:00
|
|
|
char input[1024], *filename;
|
|
|
|
FILE *fp;
|
2015-06-24 06:59:57 +03:00
|
|
|
|
2010-07-13 10:33:07 +04:00
|
|
|
/* it is a file - get the filename */
|
2016-02-25 20:05:38 +03:00
|
|
|
filename = strchr(mca_ess_singleton_component.server_uri, ':');
|
2010-07-13 10:33:07 +04:00
|
|
|
if (NULL == filename) {
|
|
|
|
/* filename is not correctly formatted */
|
|
|
|
orte_show_help("help-orterun.txt", "orterun:ompi-server-filename-bad", true,
|
2016-02-25 20:05:38 +03:00
|
|
|
"singleton", mca_ess_singleton_component.server_uri);
|
2010-07-13 10:33:07 +04:00
|
|
|
return ORTE_ERROR;
|
|
|
|
}
|
|
|
|
++filename; /* space past the : */
|
2015-06-24 06:59:57 +03:00
|
|
|
|
2010-07-13 10:33:07 +04:00
|
|
|
if (0 >= strlen(filename)) {
|
|
|
|
/* they forgot to give us the name! */
|
|
|
|
orte_show_help("help-orterun.txt", "orterun:ompi-server-filename-missing", true,
|
2016-02-25 20:05:38 +03:00
|
|
|
"singleton", mca_ess_singleton_component.server_uri);
|
2010-07-13 10:33:07 +04:00
|
|
|
return ORTE_ERROR;
|
|
|
|
}
|
2015-06-24 06:59:57 +03:00
|
|
|
|
2010-07-13 10:33:07 +04:00
|
|
|
/* open the file and extract the uri */
|
|
|
|
fp = fopen(filename, "r");
|
|
|
|
if (NULL == fp) { /* can't find or read file! */
|
|
|
|
orte_show_help("help-orterun.txt", "orterun:ompi-server-filename-access", true,
|
2016-02-25 20:05:38 +03:00
|
|
|
"singleton", mca_ess_singleton_component.server_uri);
|
2010-07-13 10:33:07 +04:00
|
|
|
return ORTE_ERROR;
|
|
|
|
}
|
2015-04-10 17:54:37 +03:00
|
|
|
memset(input, 0, 1024); // initialize the array to ensure a NULL termination
|
|
|
|
if (NULL == fgets(input, 1023, fp)) {
|
2010-07-13 10:33:07 +04:00
|
|
|
/* something malformed about file */
|
|
|
|
fclose(fp);
|
|
|
|
orte_show_help("help-orterun.txt", "orterun:ompi-server-file-bad", true,
|
2016-02-25 20:05:38 +03:00
|
|
|
"singleton", mca_ess_singleton_component.server_uri, "singleton");
|
2010-07-13 10:33:07 +04:00
|
|
|
return ORTE_ERROR;
|
|
|
|
}
|
|
|
|
fclose(fp);
|
|
|
|
input[strlen(input)-1] = '\0'; /* remove newline */
|
|
|
|
orte_process_info.my_hnp_uri = strdup(input);
|
|
|
|
} else {
|
2016-02-25 20:05:38 +03:00
|
|
|
orte_process_info.my_hnp_uri = strdup(mca_ess_singleton_component.server_uri);
|
2010-07-13 10:33:07 +04:00
|
|
|
}
|
|
|
|
/* save the daemon uri - we will process it later */
|
|
|
|
orte_process_info.my_daemon_uri = strdup(orte_process_info.my_hnp_uri);
|
2015-06-18 19:53:20 +03:00
|
|
|
/* construct our name - we are in their job family, so we know that
|
|
|
|
* much. However, we cannot know how many other singletons and jobs
|
|
|
|
* this HNP is running. Oh well - if someone really wants to use this
|
|
|
|
* option, they can try to figure it out. For now, we'll just assume
|
|
|
|
* we are the only ones */
|
|
|
|
ORTE_PROC_MY_NAME->jobid = ORTE_CONSTRUCT_LOCAL_JOBID(ORTE_PROC_MY_HNP->jobid, 1);
|
|
|
|
/* obviously, we are vpid=0 for this job */
|
|
|
|
ORTE_PROC_MY_NAME->vpid = 0;
|
|
|
|
|
2010-07-13 10:33:07 +04:00
|
|
|
/* for convenience, push the pubsub version of this param into the environ */
|
2016-02-25 20:05:38 +03:00
|
|
|
opal_setenv (OPAL_MCA_PREFIX"pubsub_orte_server", orte_process_info.my_hnp_uri, true, &environ);
|
2016-03-22 20:24:03 +03:00
|
|
|
} else if (NULL != getenv("SINGULARITY_CONTAINER") ||
|
|
|
|
mca_ess_singleton_component.isolated) {
|
2016-02-25 20:05:38 +03:00
|
|
|
/* ensure we use the isolated pmix component */
|
|
|
|
opal_setenv (OPAL_MCA_PREFIX"pmix", "isolated", true, &environ);
|
|
|
|
} else {
|
2016-09-15 07:57:23 +03:00
|
|
|
/* we want to use PMIX_NAMESPACE that will be sent by the hnp as a jobid */
|
|
|
|
opal_setenv(OPAL_MCA_PREFIX"orte_launch", "1", true, &environ);
|
2015-06-18 19:53:20 +03:00
|
|
|
/* spawn our very own HNP to support us */
|
|
|
|
if (ORTE_SUCCESS != (rc = fork_hnp())) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
/* our name was given to us by the HNP */
|
2016-02-25 20:05:38 +03:00
|
|
|
opal_setenv (OPAL_MCA_PREFIX"pmix", "^s1,s2,cray,isolated", true, &environ);
|
2013-10-04 06:58:26 +04:00
|
|
|
}
|
|
|
|
|
2016-03-08 21:33:15 +03:00
|
|
|
/* get an async event base - we use the opal_async one so
|
|
|
|
* we don't startup extra threads if not needed */
|
|
|
|
orte_event_base = opal_progress_thread_init(NULL);
|
|
|
|
progress_thread_running = true;
|
|
|
|
|
2015-06-18 19:53:20 +03:00
|
|
|
/* open and setup pmix */
|
2016-03-08 21:33:15 +03:00
|
|
|
if (OPAL_SUCCESS != (ret = mca_base_framework_open(&opal_pmix_base_framework, 0))) {
|
|
|
|
error = "opening pmix";
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (OPAL_SUCCESS != (ret = opal_pmix_base_select())) {
|
|
|
|
error = "select pmix";
|
|
|
|
goto error;
|
2014-04-18 18:25:48 +04:00
|
|
|
}
|
2016-03-08 21:33:15 +03:00
|
|
|
/* set the event base */
|
|
|
|
opal_pmix_base_set_evbase(orte_event_base);
|
2015-06-18 19:53:20 +03:00
|
|
|
/* initialize the selected module */
|
2015-10-18 06:24:03 +03:00
|
|
|
if (!opal_pmix.initialized() && (OPAL_SUCCESS != (ret = opal_pmix.init()))) {
|
2016-03-08 21:33:15 +03:00
|
|
|
/* we cannot run */
|
|
|
|
error = "pmix init";
|
2015-10-18 06:24:03 +03:00
|
|
|
goto error;
|
2010-04-23 09:51:29 +04:00
|
|
|
}
|
2015-01-27 17:55:17 +03:00
|
|
|
|
2015-06-18 19:53:20 +03:00
|
|
|
/* pmix.init set our process name down in the OPAL layer,
|
|
|
|
* so carry it forward here */
|
|
|
|
ORTE_PROC_MY_NAME->jobid = OPAL_PROC_MY_NAME.jobid;
|
|
|
|
ORTE_PROC_MY_NAME->vpid = OPAL_PROC_MY_NAME.vpid;
|
2016-07-21 19:02:00 +03:00
|
|
|
name.jobid = OPAL_PROC_MY_NAME.jobid;
|
|
|
|
name.vpid = ORTE_VPID_WILDCARD;
|
2015-06-18 19:53:20 +03:00
|
|
|
|
|
|
|
/* get our local rank from PMI */
|
|
|
|
OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_LOCAL_RANK,
|
|
|
|
ORTE_PROC_MY_NAME, &u16ptr, OPAL_UINT16);
|
|
|
|
if (OPAL_SUCCESS != ret) {
|
|
|
|
error = "getting local rank";
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
orte_process_info.my_local_rank = u16;
|
|
|
|
|
|
|
|
/* get our node rank from PMI */
|
|
|
|
OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_NODE_RANK,
|
|
|
|
ORTE_PROC_MY_NAME, &u16ptr, OPAL_UINT16);
|
|
|
|
if (OPAL_SUCCESS != ret) {
|
|
|
|
error = "getting node rank";
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
orte_process_info.my_node_rank = u16;
|
|
|
|
|
2016-04-29 06:21:01 +03:00
|
|
|
/* get max procs */
|
|
|
|
OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_MAX_PROCS,
|
2016-07-21 19:02:00 +03:00
|
|
|
&name, &u32ptr, OPAL_UINT32);
|
2015-06-18 19:53:20 +03:00
|
|
|
if (OPAL_SUCCESS != ret) {
|
2016-04-29 06:21:01 +03:00
|
|
|
error = "getting max procs";
|
2015-06-18 19:53:20 +03:00
|
|
|
goto error;
|
|
|
|
}
|
2016-04-29 06:21:01 +03:00
|
|
|
orte_process_info.max_procs = u32;
|
|
|
|
|
|
|
|
/* we are a singleton, so there is only one proc in the job */
|
|
|
|
orte_process_info.num_procs = 1;
|
2015-06-18 19:53:20 +03:00
|
|
|
/* push into the environ for pickup in MPI layer for
|
|
|
|
* MPI-3 required info key
|
|
|
|
*/
|
|
|
|
if (NULL == getenv(OPAL_MCA_PREFIX"orte_ess_num_procs")) {
|
|
|
|
asprintf(&ev1, OPAL_MCA_PREFIX"orte_ess_num_procs=%d", orte_process_info.num_procs);
|
|
|
|
putenv(ev1);
|
|
|
|
added_num_procs = true;
|
|
|
|
}
|
|
|
|
if (NULL == getenv("OMPI_APP_CTX_NUM_PROCS")) {
|
|
|
|
asprintf(&ev2, "OMPI_APP_CTX_NUM_PROCS=%d", orte_process_info.num_procs);
|
|
|
|
putenv(ev2);
|
|
|
|
added_app_ctx = true;
|
2015-01-27 17:55:17 +03:00
|
|
|
}
|
|
|
|
|
2015-06-18 19:53:20 +03:00
|
|
|
|
|
|
|
/* get our app number from PMI - ok if not found */
|
|
|
|
OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_APPNUM,
|
|
|
|
ORTE_PROC_MY_NAME, &u32ptr, OPAL_UINT32);
|
|
|
|
if (OPAL_SUCCESS == ret) {
|
|
|
|
orte_process_info.app_num = u32;
|
|
|
|
} else {
|
|
|
|
orte_process_info.app_num = 0;
|
Per the PMIx RFC:
WHAT: Merge the PMIx branch into the devel repo, creating a new
OPAL “lmix” framework to abstract PMI support for all RTEs.
Replace the ORTE daemon-level collectives with a new PMIx
server and update the ORTE grpcomm framework to support
server-to-server collectives
WHY: We’ve had problems dealing with variations in PMI implementations,
and need to extend the existing PMI definitions to meet exascale
requirements.
WHEN: Mon, Aug 25
WHERE: https://github.com/rhc54/ompi-svn-mirror.git
Several community members have been working on a refactoring of the current PMI support within OMPI. Although the APIs are common, Slurm and Cray implement a different range of capabilities, and package them differently. For example, Cray provides an integrated PMI-1/2 library, while Slurm separates the two and requires the user to specify the one to be used at runtime. In addition, several bugs in the Slurm implementations have caused problems requiring extra coding.
All this has led to a slew of #if’s in the PMI code and bugs when the corner-case logic for one implementation accidentally traps the other. Extending this support to other implementations would have increased this complexity to an unacceptable level.
Accordingly, we have:
* created a new OPAL “pmix” framework to abstract the PMI support, with separate components for Cray, Slurm PMI-1, and Slurm PMI-2 implementations.
* Replaced the current ORTE grpcomm daemon-based collective operation with an integrated PMIx server, and updated the grpcomm APIs to provide more flexible, multi-algorithm support for collective operations. At this time, only the xcast and allgather operations are supported.
* Replaced the current global collective id with a signature based on the names of the participating procs. The allows an unlimited number of collectives to be executed by any group of processes, subject to the requirement that only one collective can be active at a time for a unique combination of procs. Note that a proc can be involved in any number of simultaneous collectives - it is the specific combination of procs that is subject to the constraint
* removed the prior OMPI/OPAL modex code
* added new macros for executing modex send/recv to simplify use of the new APIs. The send macros allow the caller to specify whether or not the BTL supports async modex operations - if so, then the non-blocking “fence” operation is used, if the active PMIx component supports it. Otherwise, the default is a full blocking modex exchange as we currently perform.
* retained the current flag that directs us to use a blocking fence operation, but only to retrieve data upon demand
This commit was SVN r32570.
2014-08-21 22:56:47 +04:00
|
|
|
}
|
2015-06-18 19:53:20 +03:00
|
|
|
/* set some other standard values */
|
|
|
|
orte_process_info.num_local_peers = 0;
|
|
|
|
|
|
|
|
/* setup transport keys in case the MPI layer needs them -
|
|
|
|
* we can use the jobfam and stepid as unique keys
|
|
|
|
* because they are unique values assigned by the RM
|
|
|
|
*/
|
2016-09-19 07:37:08 +03:00
|
|
|
assert (NULL != getenv(OPAL_MCA_PREFIX"orte_precondition_transports"));
|
2015-06-24 06:59:57 +03:00
|
|
|
|
2015-06-18 19:53:20 +03:00
|
|
|
/* retrieve our topology */
|
|
|
|
OPAL_MODEX_RECV_VALUE(ret, OPAL_PMIX_LOCAL_TOPO,
|
2016-07-21 19:02:00 +03:00
|
|
|
&name, &val, OPAL_STRING);
|
2015-06-18 19:53:20 +03:00
|
|
|
if (OPAL_SUCCESS == ret && NULL != val) {
|
|
|
|
/* load the topology */
|
|
|
|
if (0 != hwloc_topology_init(&opal_hwloc_topology)) {
|
|
|
|
ret = OPAL_ERROR;
|
|
|
|
free(val);
|
|
|
|
error = "setting topology";
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (0 != hwloc_topology_set_xmlbuffer(opal_hwloc_topology, val, strlen(val))) {
|
|
|
|
ret = OPAL_ERROR;
|
|
|
|
free(val);
|
|
|
|
hwloc_topology_destroy(opal_hwloc_topology);
|
|
|
|
error = "setting topology";
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
/* since we are loading this from an external source, we have to
|
|
|
|
* explicitly set a flag so hwloc sets things up correctly
|
|
|
|
*/
|
|
|
|
if (0 != hwloc_topology_set_flags(opal_hwloc_topology,
|
|
|
|
(HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM |
|
|
|
|
HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM |
|
|
|
|
HWLOC_TOPOLOGY_FLAG_IO_DEVICES))) {
|
|
|
|
ret = OPAL_ERROR;
|
|
|
|
hwloc_topology_destroy(opal_hwloc_topology);
|
|
|
|
free(val);
|
|
|
|
error = "setting topology";
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
/* now load the topology */
|
|
|
|
if (0 != hwloc_topology_load(opal_hwloc_topology)) {
|
|
|
|
ret = OPAL_ERROR;
|
|
|
|
hwloc_topology_destroy(opal_hwloc_topology);
|
|
|
|
free(val);
|
|
|
|
error = "setting topology";
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
free(val);
|
|
|
|
} else {
|
|
|
|
/* it wasn't passed down to us, so go get it */
|
|
|
|
if (OPAL_SUCCESS != (ret = opal_hwloc_base_get_topology())) {
|
|
|
|
error = "topology discovery";
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
/* push it into the PMIx database in case someone
|
|
|
|
* tries to retrieve it so we avoid an attempt to
|
|
|
|
* get it again */
|
|
|
|
kv = OBJ_NEW(opal_value_t);
|
|
|
|
kv->key = strdup(OPAL_PMIX_LOCAL_TOPO);
|
|
|
|
kv->type = OPAL_STRING;
|
|
|
|
if (0 != (ret = hwloc_topology_export_xmlbuffer(opal_hwloc_topology, &kv->data.string, &u32))) {
|
|
|
|
error = "topology export";
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (OPAL_SUCCESS != (ret = opal_pmix.store_local(ORTE_PROC_MY_NAME, kv))) {
|
|
|
|
error = "topology store";
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
OBJ_RELEASE(kv);
|
Per the PMIx RFC:
WHAT: Merge the PMIx branch into the devel repo, creating a new
OPAL “lmix” framework to abstract PMI support for all RTEs.
Replace the ORTE daemon-level collectives with a new PMIx
server and update the ORTE grpcomm framework to support
server-to-server collectives
WHY: We’ve had problems dealing with variations in PMI implementations,
and need to extend the existing PMI definitions to meet exascale
requirements.
WHEN: Mon, Aug 25
WHERE: https://github.com/rhc54/ompi-svn-mirror.git
Several community members have been working on a refactoring of the current PMI support within OMPI. Although the APIs are common, Slurm and Cray implement a different range of capabilities, and package them differently. For example, Cray provides an integrated PMI-1/2 library, while Slurm separates the two and requires the user to specify the one to be used at runtime. In addition, several bugs in the Slurm implementations have caused problems requiring extra coding.
All this has led to a slew of #if’s in the PMI code and bugs when the corner-case logic for one implementation accidentally traps the other. Extending this support to other implementations would have increased this complexity to an unacceptable level.
Accordingly, we have:
* created a new OPAL “pmix” framework to abstract the PMI support, with separate components for Cray, Slurm PMI-1, and Slurm PMI-2 implementations.
* Replaced the current ORTE grpcomm daemon-based collective operation with an integrated PMIx server, and updated the grpcomm APIs to provide more flexible, multi-algorithm support for collective operations. At this time, only the xcast and allgather operations are supported.
* Replaced the current global collective id with a signature based on the names of the participating procs. The allows an unlimited number of collectives to be executed by any group of processes, subject to the requirement that only one collective can be active at a time for a unique combination of procs. Note that a proc can be involved in any number of simultaneous collectives - it is the specific combination of procs that is subject to the constraint
* removed the prior OMPI/OPAL modex code
* added new macros for executing modex send/recv to simplify use of the new APIs. The send macros allow the caller to specify whether or not the BTL supports async modex operations - if so, then the non-blocking “fence” operation is used, if the active PMIx component supports it. Otherwise, the default is a full blocking modex exchange as we currently perform.
* retained the current flag that directs us to use a blocking fence operation, but only to retrieve data upon demand
This commit was SVN r32570.
2014-08-21 22:56:47 +04:00
|
|
|
}
|
2015-08-30 07:19:27 +03:00
|
|
|
|
2015-06-18 19:53:20 +03:00
|
|
|
/* use the std app init to complete the procedure */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_ess_base_app_setup(true))) {
|
Per the PMIx RFC:
WHAT: Merge the PMIx branch into the devel repo, creating a new
OPAL “lmix” framework to abstract PMI support for all RTEs.
Replace the ORTE daemon-level collectives with a new PMIx
server and update the ORTE grpcomm framework to support
server-to-server collectives
WHY: We’ve had problems dealing with variations in PMI implementations,
and need to extend the existing PMI definitions to meet exascale
requirements.
WHEN: Mon, Aug 25
WHERE: https://github.com/rhc54/ompi-svn-mirror.git
Several community members have been working on a refactoring of the current PMI support within OMPI. Although the APIs are common, Slurm and Cray implement a different range of capabilities, and package them differently. For example, Cray provides an integrated PMI-1/2 library, while Slurm separates the two and requires the user to specify the one to be used at runtime. In addition, several bugs in the Slurm implementations have caused problems requiring extra coding.
All this has led to a slew of #if’s in the PMI code and bugs when the corner-case logic for one implementation accidentally traps the other. Extending this support to other implementations would have increased this complexity to an unacceptable level.
Accordingly, we have:
* created a new OPAL “pmix” framework to abstract the PMI support, with separate components for Cray, Slurm PMI-1, and Slurm PMI-2 implementations.
* Replaced the current ORTE grpcomm daemon-based collective operation with an integrated PMIx server, and updated the grpcomm APIs to provide more flexible, multi-algorithm support for collective operations. At this time, only the xcast and allgather operations are supported.
* Replaced the current global collective id with a signature based on the names of the participating procs. The allows an unlimited number of collectives to be executed by any group of processes, subject to the requirement that only one collective can be active at a time for a unique combination of procs. Note that a proc can be involved in any number of simultaneous collectives - it is the specific combination of procs that is subject to the constraint
* removed the prior OMPI/OPAL modex code
* added new macros for executing modex send/recv to simplify use of the new APIs. The send macros allow the caller to specify whether or not the BTL supports async modex operations - if so, then the non-blocking “fence” operation is used, if the active PMIx component supports it. Otherwise, the default is a full blocking modex exchange as we currently perform.
* retained the current flag that directs us to use a blocking fence operation, but only to retrieve data upon demand
This commit was SVN r32570.
2014-08-21 22:56:47 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2015-06-18 19:53:20 +03:00
|
|
|
|
|
|
|
/* push our hostname so others can find us, if they need to */
|
|
|
|
OPAL_MODEX_SEND_VALUE(ret, OPAL_PMIX_GLOBAL, OPAL_PMIX_HOSTNAME, orte_process_info.nodename, OPAL_STRING);
|
|
|
|
if (ORTE_SUCCESS != ret) {
|
|
|
|
error = "db store hostname";
|
|
|
|
goto error;
|
|
|
|
}
|
Per the PMIx RFC:
WHAT: Merge the PMIx branch into the devel repo, creating a new
OPAL “lmix” framework to abstract PMI support for all RTEs.
Replace the ORTE daemon-level collectives with a new PMIx
server and update the ORTE grpcomm framework to support
server-to-server collectives
WHY: We’ve had problems dealing with variations in PMI implementations,
and need to extend the existing PMI definitions to meet exascale
requirements.
WHEN: Mon, Aug 25
WHERE: https://github.com/rhc54/ompi-svn-mirror.git
Several community members have been working on a refactoring of the current PMI support within OMPI. Although the APIs are common, Slurm and Cray implement a different range of capabilities, and package them differently. For example, Cray provides an integrated PMI-1/2 library, while Slurm separates the two and requires the user to specify the one to be used at runtime. In addition, several bugs in the Slurm implementations have caused problems requiring extra coding.
All this has led to a slew of #if’s in the PMI code and bugs when the corner-case logic for one implementation accidentally traps the other. Extending this support to other implementations would have increased this complexity to an unacceptable level.
Accordingly, we have:
* created a new OPAL “pmix” framework to abstract the PMI support, with separate components for Cray, Slurm PMI-1, and Slurm PMI-2 implementations.
* Replaced the current ORTE grpcomm daemon-based collective operation with an integrated PMIx server, and updated the grpcomm APIs to provide more flexible, multi-algorithm support for collective operations. At this time, only the xcast and allgather operations are supported.
* Replaced the current global collective id with a signature based on the names of the participating procs. The allows an unlimited number of collectives to be executed by any group of processes, subject to the requirement that only one collective can be active at a time for a unique combination of procs. Note that a proc can be involved in any number of simultaneous collectives - it is the specific combination of procs that is subject to the constraint
* removed the prior OMPI/OPAL modex code
* added new macros for executing modex send/recv to simplify use of the new APIs. The send macros allow the caller to specify whether or not the BTL supports async modex operations - if so, then the non-blocking “fence” operation is used, if the active PMIx component supports it. Otherwise, the default is a full blocking modex exchange as we currently perform.
* retained the current flag that directs us to use a blocking fence operation, but only to retrieve data upon demand
This commit was SVN r32570.
2014-08-21 22:56:47 +04:00
|
|
|
|
2010-04-23 09:51:29 +04:00
|
|
|
return ORTE_SUCCESS;
|
2015-06-18 19:53:20 +03:00
|
|
|
|
|
|
|
error:
|
|
|
|
if (ORTE_ERR_SILENT != ret && !orte_report_silent_errors) {
|
|
|
|
orte_show_help("help-orte-runtime.txt",
|
|
|
|
"orte_init:startup:internal-failure",
|
|
|
|
true, error, ORTE_ERROR_NAME(ret), ret);
|
|
|
|
}
|
|
|
|
return ret;
|
2010-04-23 09:51:29 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int rte_finalize(void)
|
|
|
|
{
|
|
|
|
int ret;
|
2015-06-24 06:59:57 +03:00
|
|
|
|
2015-06-18 19:53:20 +03:00
|
|
|
/* remove the envars that we pushed into environ
|
|
|
|
* so we leave that structure intact
|
|
|
|
*/
|
|
|
|
if (added_transport_keys) {
|
|
|
|
unsetenv(OPAL_MCA_PREFIX"orte_precondition_transports");
|
|
|
|
}
|
|
|
|
if (added_num_procs) {
|
|
|
|
unsetenv(OPAL_MCA_PREFIX"orte_ess_num_procs");
|
|
|
|
}
|
|
|
|
if (added_app_ctx) {
|
|
|
|
unsetenv("OMPI_APP_CTX_NUM_PROCS");
|
|
|
|
}
|
|
|
|
if (added_pmix_envs) {
|
|
|
|
unsetenv("PMIX_NAMESPACE");
|
|
|
|
unsetenv("PMIX_RANK");
|
|
|
|
unsetenv("PMIX_SERVER_URI");
|
|
|
|
unsetenv("PMIX_SECURITY_MODE");
|
|
|
|
}
|
2016-03-08 21:33:15 +03:00
|
|
|
/* use the default procedure to finish */
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_ess_base_app_finalize())) {
|
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
}
|
|
|
|
|
2014-12-03 01:12:24 +03:00
|
|
|
/* mark us as finalized */
|
|
|
|
if (NULL != opal_pmix.finalize) {
|
|
|
|
opal_pmix.finalize();
|
|
|
|
(void) mca_base_framework_close(&opal_pmix_base_framework);
|
|
|
|
}
|
2015-06-24 06:59:57 +03:00
|
|
|
|
2016-03-08 21:33:15 +03:00
|
|
|
/* release the event base */
|
|
|
|
if (progress_thread_running) {
|
|
|
|
opal_progress_thread_finalize(NULL);
|
|
|
|
progress_thread_running = false;
|
2010-04-23 09:51:29 +04:00
|
|
|
}
|
2013-10-04 06:58:26 +04:00
|
|
|
return ret;
|
2010-04-23 09:51:29 +04:00
|
|
|
}
|
2015-06-18 19:53:20 +03:00
|
|
|
|
|
|
|
#define ORTE_URI_MSG_LGTH 256
|
|
|
|
|
|
|
|
static void set_handler_default(int sig)
|
|
|
|
{
|
|
|
|
struct sigaction act;
|
|
|
|
|
|
|
|
act.sa_handler = SIG_DFL;
|
|
|
|
act.sa_flags = 0;
|
|
|
|
sigemptyset(&act.sa_mask);
|
|
|
|
|
|
|
|
sigaction(sig, &act, (struct sigaction *)0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fork_hnp(void)
|
|
|
|
{
|
|
|
|
int p[2], death_pipe[2];
|
|
|
|
char *cmd;
|
|
|
|
char **argv = NULL;
|
|
|
|
int argc;
|
|
|
|
char *param, *cptr;
|
|
|
|
sigset_t sigs;
|
|
|
|
int buffer_length, num_chars_read, chunk;
|
|
|
|
char *orted_uri;
|
2015-08-30 07:19:27 +03:00
|
|
|
int rc, i;
|
2015-06-18 19:53:20 +03:00
|
|
|
|
|
|
|
/* A pipe is used to communicate between the parent and child to
|
|
|
|
indicate whether the exec ultimately succeeded or failed. The
|
|
|
|
child sets the pipe to be close-on-exec; the child only ever
|
|
|
|
writes anything to the pipe if there is an error (e.g.,
|
|
|
|
executable not found, exec() fails, etc.). The parent does a
|
|
|
|
blocking read on the pipe; if the pipe closed with no data,
|
|
|
|
then the exec() succeeded. If the parent reads something from
|
|
|
|
the pipe, then the child was letting us know that it failed.
|
|
|
|
*/
|
|
|
|
if (pipe(p) < 0) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_SYS_LIMITS_PIPES);
|
|
|
|
return ORTE_ERR_SYS_LIMITS_PIPES;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we also have to give the HNP a pipe it can watch to know when
|
|
|
|
* we terminated. Since the HNP is going to be a child of us, it
|
|
|
|
* can't just use waitpid to see when we leave - so it will watch
|
|
|
|
* the pipe instead
|
|
|
|
*/
|
|
|
|
if (pipe(death_pipe) < 0) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_SYS_LIMITS_PIPES);
|
|
|
|
return ORTE_ERR_SYS_LIMITS_PIPES;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* find the orted binary using the install_dirs support - this also
|
|
|
|
* checks to ensure that we can see this executable and it *is* executable by us
|
|
|
|
*/
|
|
|
|
cmd = opal_path_access("orted", opal_install_dirs.bindir, X_OK);
|
|
|
|
if (NULL == cmd) {
|
|
|
|
/* guess we couldn't do it - best to abort */
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_FILE_NOT_EXECUTABLE);
|
|
|
|
close(p[0]);
|
|
|
|
close(p[1]);
|
|
|
|
return ORTE_ERR_FILE_NOT_EXECUTABLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* okay, setup an appropriate argv */
|
|
|
|
opal_argv_append(&argc, &argv, "orted");
|
|
|
|
|
|
|
|
/* tell the daemon it is to be the HNP */
|
|
|
|
opal_argv_append(&argc, &argv, "--hnp");
|
|
|
|
|
|
|
|
/* tell the daemon to get out of our process group */
|
|
|
|
opal_argv_append(&argc, &argv, "--set-sid");
|
|
|
|
|
|
|
|
/* tell the daemon to report back its uri so we can connect to it */
|
|
|
|
opal_argv_append(&argc, &argv, "--report-uri");
|
|
|
|
asprintf(¶m, "%d", p[1]);
|
|
|
|
opal_argv_append(&argc, &argv, param);
|
|
|
|
free(param);
|
|
|
|
|
|
|
|
/* give the daemon a pipe it can watch to tell when we have died */
|
|
|
|
opal_argv_append(&argc, &argv, "--singleton-died-pipe");
|
|
|
|
asprintf(¶m, "%d", death_pipe[0]);
|
|
|
|
opal_argv_append(&argc, &argv, param);
|
|
|
|
free(param);
|
|
|
|
|
|
|
|
/* add any debug flags */
|
|
|
|
if (orte_debug_flag) {
|
|
|
|
opal_argv_append(&argc, &argv, "--debug");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (orte_debug_daemons_flag) {
|
|
|
|
opal_argv_append(&argc, &argv, "--debug-daemons");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (orte_debug_daemons_file_flag) {
|
|
|
|
if (!orte_debug_daemons_flag) {
|
|
|
|
opal_argv_append(&argc, &argv, "--debug-daemons");
|
|
|
|
}
|
|
|
|
opal_argv_append(&argc, &argv, "--debug-daemons-file");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* indicate that it must use the novm state machine */
|
|
|
|
opal_argv_append(&argc, &argv, "-"OPAL_MCA_CMD_LINE_ID);
|
|
|
|
opal_argv_append(&argc, &argv, "state_novm_select");
|
|
|
|
opal_argv_append(&argc, &argv, "1");
|
|
|
|
|
2016-02-25 20:05:38 +03:00
|
|
|
/* direct the selection of the ess component */
|
|
|
|
opal_argv_append(&argc, &argv, "-"OPAL_MCA_CMD_LINE_ID);
|
|
|
|
opal_argv_append(&argc, &argv, "ess");
|
|
|
|
opal_argv_append(&argc, &argv, "hnp");
|
|
|
|
|
|
|
|
/* direct the selection of the pmix component */
|
|
|
|
opal_argv_append(&argc, &argv, "-"OPAL_MCA_CMD_LINE_ID);
|
|
|
|
opal_argv_append(&argc, &argv, "pmix");
|
|
|
|
opal_argv_append(&argc, &argv, "^s1,s2,cray,isolated");
|
|
|
|
|
2015-06-18 19:53:20 +03:00
|
|
|
/* Fork off the child */
|
|
|
|
orte_process_info.hnp_pid = fork();
|
|
|
|
if(orte_process_info.hnp_pid < 0) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_SYS_LIMITS_CHILDREN);
|
|
|
|
close(p[0]);
|
|
|
|
close(p[1]);
|
|
|
|
close(death_pipe[0]);
|
|
|
|
close(death_pipe[1]);
|
|
|
|
free(cmd);
|
|
|
|
opal_argv_free(argv);
|
|
|
|
return ORTE_ERR_SYS_LIMITS_CHILDREN;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (orte_process_info.hnp_pid == 0) {
|
|
|
|
close(p[0]);
|
|
|
|
close(death_pipe[1]);
|
|
|
|
/* I am the child - exec me */
|
|
|
|
|
|
|
|
/* Set signal handlers back to the default. Do this close
|
|
|
|
to the execve() because the event library may (and likely
|
|
|
|
will) reset them. If we don't do this, the event
|
|
|
|
library may have left some set that, at least on some
|
|
|
|
OS's, don't get reset via fork() or exec(). Hence, the
|
|
|
|
orted could be unkillable (for example). */
|
|
|
|
set_handler_default(SIGTERM);
|
|
|
|
set_handler_default(SIGINT);
|
|
|
|
set_handler_default(SIGHUP);
|
|
|
|
set_handler_default(SIGPIPE);
|
|
|
|
set_handler_default(SIGCHLD);
|
|
|
|
|
|
|
|
/* Unblock all signals, for many of the same reasons that
|
|
|
|
we set the default handlers, above. This is noticable
|
|
|
|
on Linux where the event library blocks SIGTERM, but we
|
|
|
|
don't want that blocked by the orted (or, more
|
|
|
|
specifically, we don't want it to be blocked by the
|
|
|
|
orted and then inherited by the ORTE processes that it
|
|
|
|
forks, making them unkillable by SIGTERM). */
|
|
|
|
sigprocmask(0, 0, &sigs);
|
|
|
|
sigprocmask(SIG_UNBLOCK, &sigs, 0);
|
|
|
|
|
|
|
|
execv(cmd, argv);
|
|
|
|
|
|
|
|
/* if I get here, the execv failed! */
|
|
|
|
orte_show_help("help-ess-base.txt", "ess-base:execv-error",
|
|
|
|
true, cmd, strerror(errno));
|
|
|
|
exit(1);
|
|
|
|
|
|
|
|
} else {
|
2016-08-23 03:29:17 +03:00
|
|
|
int count;
|
|
|
|
|
2015-06-18 19:53:20 +03:00
|
|
|
free(cmd);
|
|
|
|
/* I am the parent - wait to hear something back and
|
|
|
|
* report results
|
|
|
|
*/
|
|
|
|
close(p[1]); /* parent closes the write - orted will write its contact info to it*/
|
|
|
|
close(death_pipe[0]); /* parent closes the death_pipe's read */
|
|
|
|
opal_argv_free(argv);
|
|
|
|
|
|
|
|
/* setup the buffer to read the HNP's uri */
|
|
|
|
buffer_length = ORTE_URI_MSG_LGTH;
|
|
|
|
chunk = ORTE_URI_MSG_LGTH-1;
|
|
|
|
num_chars_read = 0;
|
|
|
|
orted_uri = (char*)malloc(buffer_length);
|
2015-08-30 07:19:27 +03:00
|
|
|
memset(orted_uri, 0, buffer_length);
|
2015-06-18 19:53:20 +03:00
|
|
|
|
2016-09-22 07:18:54 +03:00
|
|
|
while (0 != (rc = read(p[0], &orted_uri[num_chars_read], chunk))) {
|
2016-09-20 08:56:58 +03:00
|
|
|
if (rc < 0 && (EAGAIN == errno || EINTR == errno)) {
|
|
|
|
continue;
|
2016-09-22 07:18:54 +03:00
|
|
|
} else if (rc < 0) {
|
|
|
|
num_chars_read = -1;
|
2016-09-20 08:56:58 +03:00
|
|
|
break;
|
|
|
|
}
|
2016-09-22 07:18:54 +03:00
|
|
|
/* we read something - better get more */
|
|
|
|
num_chars_read += rc;
|
2016-09-23 10:35:59 +03:00
|
|
|
chunk -= rc;
|
|
|
|
if (0 == chunk) {
|
|
|
|
chunk = ORTE_URI_MSG_LGTH;
|
|
|
|
orted_uri = realloc((void*)orted_uri, buffer_length+chunk);
|
|
|
|
memset(&orted_uri[buffer_length], 0, chunk);
|
|
|
|
buffer_length += chunk;
|
|
|
|
}
|
2015-06-18 19:53:20 +03:00
|
|
|
}
|
2016-09-22 07:18:54 +03:00
|
|
|
close(p[0]);
|
2015-06-18 19:53:20 +03:00
|
|
|
|
|
|
|
if (num_chars_read <= 0) {
|
|
|
|
/* we didn't get anything back - this is bad */
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_HNP_COULD_NOT_START);
|
|
|
|
free(orted_uri);
|
|
|
|
return ORTE_ERR_HNP_COULD_NOT_START;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* parse the sysinfo from the returned info - must
|
|
|
|
* start from the end of the string as the uri itself
|
|
|
|
* can contain brackets */
|
|
|
|
if (NULL == (param = strrchr(orted_uri, '['))) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_COMM_FAILURE);
|
|
|
|
free(orted_uri);
|
|
|
|
return ORTE_ERR_COMM_FAILURE;
|
|
|
|
}
|
|
|
|
*param = '\0'; /* terminate the uri string */
|
|
|
|
++param; /* point to the start of the sysinfo */
|
|
|
|
|
|
|
|
/* find the end of the sysinfo */
|
|
|
|
if (NULL == (cptr = strchr(param, ']'))) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_COMM_FAILURE);
|
|
|
|
free(orted_uri);
|
|
|
|
return ORTE_ERR_COMM_FAILURE;
|
|
|
|
}
|
|
|
|
*cptr = '\0'; /* terminate the sysinfo string */
|
|
|
|
++cptr; /* point to the start of the pmix uri */
|
|
|
|
|
|
|
|
/* convert the sysinfo string */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_util_convert_string_to_sysinfo(&orte_local_cpu_type,
|
|
|
|
&orte_local_cpu_model, param))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
free(orted_uri);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* save the daemon uri - we will process it later */
|
|
|
|
orte_process_info.my_daemon_uri = strdup(orted_uri);
|
|
|
|
/* likewise, since this is also the HNP, set that uri too */
|
|
|
|
orte_process_info.my_hnp_uri = orted_uri;
|
|
|
|
|
|
|
|
/* split the pmix_uri into its parts */
|
|
|
|
argv = opal_argv_split(cptr, ',');
|
2016-08-23 03:29:17 +03:00
|
|
|
count = opal_argv_count(argv);
|
2015-06-18 19:53:20 +03:00
|
|
|
/* push each piece into the environment */
|
2016-08-23 03:29:17 +03:00
|
|
|
for (i=0; i < count; i++) {
|
2015-08-30 07:19:27 +03:00
|
|
|
pmixenvars[i] = strdup(argv[i]);
|
|
|
|
putenv(pmixenvars[i]);
|
|
|
|
}
|
|
|
|
opal_argv_free(argv);
|
2015-06-18 19:53:20 +03:00
|
|
|
added_pmix_envs = true;
|
|
|
|
|
|
|
|
/* all done - report success */
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
}
|