2008-04-30 23:49:53 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
2011-06-24 00:38:02 +04:00
|
|
|
* Copyright (c) 2004-2011 The University of Tennessee and The University
|
2008-04-30 23:49:53 +04:00
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
2012-05-03 01:00:22 +04:00
|
|
|
* Copyright (c) 2012 Los Alamos National Security, LLC.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
2008-04-30 23:49:53 +04:00
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
#include "orte_config.h"
|
|
|
|
#include "orte/types.h"
|
|
|
|
#include "orte/constants.h"
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <ctype.h>
|
2009-01-07 17:58:38 +03:00
|
|
|
#include <fcntl.h>
|
|
|
|
#ifdef HAVE_UNISTD_H
|
|
|
|
#include <unistd.h>
|
|
|
|
#endif
|
2009-05-16 08:15:55 +04:00
|
|
|
#ifdef HAVE_SYS_SOCKET_H
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_NETINET_IN_H
|
|
|
|
#include <netinet/in.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_ARPA_INET_H
|
|
|
|
#include <arpa/inet.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_NETDB_H
|
|
|
|
#include <netdb.h>
|
|
|
|
#endif
|
|
|
|
#ifdef HAVE_IFADDRS_H
|
|
|
|
#include <ifaddrs.h>
|
|
|
|
#endif
|
2008-04-30 23:49:53 +04:00
|
|
|
|
|
|
|
#include "opal/dss/dss.h"
|
2009-01-07 17:58:38 +03:00
|
|
|
#include "opal/runtime/opal.h"
|
2009-02-06 18:28:32 +03:00
|
|
|
#include "opal/class/opal_pointer_array.h"
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
#include "opal/mca/hwloc/base/base.h"
|
2009-02-14 05:26:12 +03:00
|
|
|
#include "opal/util/output.h"
|
2009-05-16 08:15:55 +04:00
|
|
|
#include "opal/util/argv.h"
|
2012-06-27 18:53:55 +04:00
|
|
|
#include "opal/datatype/opal_datatype.h"
|
2008-04-30 23:49:53 +04:00
|
|
|
|
2012-06-27 18:53:55 +04:00
|
|
|
#include "orte/mca/db/db.h"
|
2008-04-30 23:49:53 +04:00
|
|
|
#include "orte/mca/errmgr/errmgr.h"
|
2010-03-23 23:47:41 +03:00
|
|
|
#include "orte/mca/odls/base/odls_private.h"
|
2008-06-09 18:53:58 +04:00
|
|
|
#include "orte/util/show_help.h"
|
2008-04-30 23:49:53 +04:00
|
|
|
#include "orte/util/proc_info.h"
|
|
|
|
#include "orte/util/name_fns.h"
|
2009-06-24 00:25:38 +04:00
|
|
|
#include "orte/util/regex.h"
|
2008-04-30 23:49:53 +04:00
|
|
|
#include "orte/runtime/orte_globals.h"
|
2009-05-16 08:15:55 +04:00
|
|
|
#include "orte/mca/rml/base/rml_contact.h"
|
2012-04-29 04:10:01 +04:00
|
|
|
#include "orte/mca/state/state.h"
|
2008-04-30 23:49:53 +04:00
|
|
|
|
|
|
|
#include "orte/util/nidmap.h"
|
|
|
|
|
2012-07-04 04:04:16 +04:00
|
|
|
static int orte_nidmap_verbose, orte_nidmap_output=-1;
|
|
|
|
|
2009-01-07 17:58:38 +03:00
|
|
|
int orte_util_nidmap_init(opal_buffer_t *buffer)
|
|
|
|
{
|
|
|
|
int32_t cnt;
|
|
|
|
int rc;
|
|
|
|
opal_byte_object_t *bo;
|
|
|
|
|
2012-07-04 04:04:16 +04:00
|
|
|
mca_base_param_reg_int_name("orte", "nidmap_verbose",
|
|
|
|
"Verbosity of the nidmap subsystem",
|
|
|
|
true, false, 0, &orte_nidmap_verbose);
|
|
|
|
if (0 < orte_nidmap_verbose) {
|
|
|
|
orte_nidmap_output = opal_output_open(NULL);
|
|
|
|
opal_output_set_verbosity(orte_nidmap_output, orte_nidmap_verbose);
|
|
|
|
}
|
|
|
|
|
2009-02-06 18:28:32 +03:00
|
|
|
/* it is okay if the buffer is empty */
|
2009-01-07 17:58:38 +03:00
|
|
|
if (NULL == buffer || 0 == buffer->bytes_used) {
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
#if OPAL_HAVE_HWLOC
|
|
|
|
{
|
|
|
|
hwloc_topology_t topo;
|
|
|
|
|
|
|
|
/* extract the topology */
|
|
|
|
cnt=1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(buffer, &topo, &cnt, OPAL_HWLOC_TOPO))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
if (NULL == opal_hwloc_topology) {
|
|
|
|
opal_hwloc_topology = topo;
|
|
|
|
} else {
|
|
|
|
hwloc_topology_destroy(topo);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-01-07 17:58:38 +03:00
|
|
|
/* extract the byte object holding the daemonmap */
|
|
|
|
cnt=1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(buffer, &bo, &cnt, OPAL_BYTE_OBJECT))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
/* unpack the node map */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_util_decode_nodemap(bo))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
/* the bytes in the object were free'd by the decode */
|
|
|
|
|
|
|
|
/* extract the byte object holding the process map */
|
|
|
|
cnt=1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(buffer, &bo, &cnt, OPAL_BYTE_OBJECT))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
/* unpack the process map */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_util_decode_pidmap(bo))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
/* the bytes in the object were free'd by the decode */
|
2011-09-11 23:02:24 +04:00
|
|
|
|
2009-01-07 17:58:38 +03:00
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void orte_util_nidmap_finalize(void)
|
|
|
|
{
|
2011-09-11 23:02:24 +04:00
|
|
|
#if OPAL_HAVE_HWLOC
|
|
|
|
/* destroy the topology */
|
|
|
|
if (NULL != opal_hwloc_topology) {
|
|
|
|
hwloc_topology_destroy(opal_hwloc_topology);
|
2011-09-13 23:21:10 +04:00
|
|
|
opal_hwloc_topology = NULL;
|
2011-09-11 23:02:24 +04:00
|
|
|
}
|
|
|
|
#endif
|
2009-02-20 00:28:58 +03:00
|
|
|
}
|
|
|
|
|
2012-05-21 23:56:15 +04:00
|
|
|
#if ORTE_ENABLE_STATIC_PORTS
|
2009-05-16 08:15:55 +04:00
|
|
|
int orte_util_build_daemon_nidmap(char **nodes)
|
|
|
|
{
|
|
|
|
int i, num_nodes;
|
|
|
|
int rc;
|
|
|
|
struct hostent *h;
|
|
|
|
opal_buffer_t buf;
|
|
|
|
orte_process_name_t proc;
|
|
|
|
char *uri, *addr;
|
|
|
|
char *proc_name;
|
|
|
|
|
|
|
|
num_nodes = opal_argv_count(nodes);
|
|
|
|
|
2012-07-04 04:04:16 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_nidmap_output,
|
2011-08-18 18:59:18 +04:00
|
|
|
"%s orte:util:build:daemon:nidmap found %d nodes",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), num_nodes));
|
2009-05-16 08:15:55 +04:00
|
|
|
|
|
|
|
if (0 == num_nodes) {
|
|
|
|
/* nothing to do */
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2012-06-27 18:53:55 +04:00
|
|
|
/* install the entry for the HNP */
|
|
|
|
proc.jobid = ORTE_PROC_MY_NAME->jobid;
|
|
|
|
proc.vpid = 0;
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_DAEMON_VPID, &proc.vpid, ORTE_VPID))) {
|
2009-05-16 08:15:55 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
addr = "HNP";
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_HOSTNAME, addr, OPAL_STRING))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2009-05-16 08:15:55 +04:00
|
|
|
/* the daemon vpids will be assigned in order,
|
|
|
|
* starting with vpid=1 for the first node in
|
|
|
|
* the list
|
|
|
|
*/
|
|
|
|
OBJ_CONSTRUCT(&buf, opal_buffer_t);
|
|
|
|
for (i=0; i < num_nodes; i++) {
|
2012-06-27 18:53:55 +04:00
|
|
|
/* define the vpid for this daemon */
|
|
|
|
proc.vpid = i+1;
|
|
|
|
/* store the hostname for the proc */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_HOSTNAME, nodes[i], OPAL_STRING))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2009-05-16 08:15:55 +04:00
|
|
|
/* the arch defaults to our arch so that non-hetero
|
|
|
|
* case will yield correct behavior
|
|
|
|
*/
|
2012-06-27 18:53:55 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_ARCH, &opal_local_arch, OPAL_UINT32))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2009-05-16 08:15:55 +04:00
|
|
|
|
|
|
|
/* lookup the address of this node */
|
2012-06-27 18:53:55 +04:00
|
|
|
if (NULL == (h = gethostbyname(nodes[i]))) {
|
2009-05-16 08:15:55 +04:00
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
|
|
|
return ORTE_ERR_NOT_FOUND;
|
|
|
|
}
|
|
|
|
addr = inet_ntoa(*(struct in_addr*)h->h_addr_list[0]);
|
|
|
|
|
|
|
|
/* since we are using static ports, all my fellow daemons will be on my
|
|
|
|
* port. Setup the contact info for each daemon in my hash tables. Note
|
|
|
|
* that this will -not- open a port to those daemons, but will only
|
|
|
|
* define the info necessary for opening such a port if/when I communicate
|
|
|
|
* to them
|
|
|
|
*/
|
2011-06-24 00:38:02 +04:00
|
|
|
|
2012-06-27 18:53:55 +04:00
|
|
|
/* construct the URI */
|
2009-05-16 08:15:55 +04:00
|
|
|
orte_util_convert_process_name_to_string(&proc_name, &proc);
|
|
|
|
asprintf(&uri, "%s;tcp://%s:%d", proc_name, addr, (int)orte_process_info.my_port);
|
2012-07-04 04:04:16 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_nidmap_output,
|
2011-08-18 18:59:18 +04:00
|
|
|
"%s orte:util:build:daemon:nidmap node %s daemon %d addr %s uri %s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
2012-06-27 18:53:55 +04:00
|
|
|
nodes[i], i+1, addr, uri));
|
2009-05-16 08:15:55 +04:00
|
|
|
opal_dss.pack(&buf, &uri, 1, OPAL_STRING);
|
|
|
|
free(proc_name);
|
|
|
|
free(uri);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* load the hash tables */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_rml_base_update_contact_info(&buf))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&buf);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
2012-05-21 23:56:15 +04:00
|
|
|
#endif
|
2009-05-16 08:15:55 +04:00
|
|
|
|
2008-04-30 23:49:53 +04:00
|
|
|
int orte_util_encode_nodemap(opal_byte_object_t *boptr)
|
|
|
|
{
|
2012-06-27 18:53:55 +04:00
|
|
|
orte_vpid_t vpid;
|
|
|
|
orte_node_t *node;
|
2009-06-24 00:25:38 +04:00
|
|
|
int32_t i, num_nodes;
|
2008-04-30 23:49:53 +04:00
|
|
|
int rc;
|
|
|
|
opal_buffer_t buf;
|
2012-06-27 18:53:55 +04:00
|
|
|
char *ptr, *nodename;
|
2009-06-24 06:47:45 +04:00
|
|
|
|
2008-05-28 22:38:47 +04:00
|
|
|
/* setup a buffer for tmp use */
|
2008-04-30 23:49:53 +04:00
|
|
|
OBJ_CONSTRUCT(&buf, opal_buffer_t);
|
2008-05-28 22:38:47 +04:00
|
|
|
|
|
|
|
/* determine the number of nodes in the global node array */
|
|
|
|
num_nodes = 0;
|
2009-06-24 06:47:45 +04:00
|
|
|
for (i=0; i < orte_node_pool->size; i++) {
|
|
|
|
if (NULL == opal_pointer_array_get_item(orte_node_pool, i)) {
|
|
|
|
continue;
|
|
|
|
}
|
2008-05-28 22:38:47 +04:00
|
|
|
++num_nodes;
|
|
|
|
}
|
2009-07-02 00:46:05 +04:00
|
|
|
|
2008-05-28 22:38:47 +04:00
|
|
|
/* pack number of nodes */
|
2008-09-25 17:39:08 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, &num_nodes, 1, OPAL_INT32))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2008-04-30 23:49:53 +04:00
|
|
|
|
2012-06-27 18:53:55 +04:00
|
|
|
/* pack the data for each node by daemon */
|
2009-07-02 00:46:05 +04:00
|
|
|
for (i=0; i < orte_node_pool->size; i++) {
|
2009-06-27 02:07:25 +04:00
|
|
|
if (NULL == (node = (orte_node_t*)opal_pointer_array_get_item(orte_node_pool, i))) {
|
|
|
|
continue;
|
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
if (NULL == node->daemon) {
|
|
|
|
/* some nodes may not have daemons on them */
|
|
|
|
vpid = ORTE_VPID_INVALID;
|
|
|
|
} else {
|
|
|
|
vpid = node->daemon->name.vpid;
|
|
|
|
}
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, &vpid, 1, ORTE_VPID))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
/* pack the name of the node */
|
2009-06-24 00:25:38 +04:00
|
|
|
if (!orte_keep_fqdn_hostnames) {
|
2009-06-24 06:47:45 +04:00
|
|
|
nodename = strdup(node->name);
|
2009-06-24 00:25:38 +04:00
|
|
|
if (NULL != (ptr = strchr(nodename, '.'))) {
|
|
|
|
*ptr = '\0';
|
2008-04-30 23:49:53 +04:00
|
|
|
}
|
2009-06-24 00:25:38 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, &nodename, 1, OPAL_STRING))) {
|
2008-09-25 17:39:08 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2009-06-24 00:25:38 +04:00
|
|
|
free(nodename);
|
2008-04-30 23:49:53 +04:00
|
|
|
} else {
|
2009-06-24 06:47:45 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, &node->name, 1, OPAL_STRING))) {
|
2008-09-25 17:39:08 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2008-04-30 23:49:53 +04:00
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
/* pack the oversubscribed flag */
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, &node->oversubscribed, 1, OPAL_UINT8))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
2008-05-28 22:38:47 +04:00
|
|
|
}
|
2008-09-25 17:39:08 +04:00
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
|
2008-04-30 23:49:53 +04:00
|
|
|
/* transfer the payload to the byte object */
|
|
|
|
opal_dss.unload(&buf, (void**)&boptr->bytes, &boptr->size);
|
|
|
|
OBJ_DESTRUCT(&buf);
|
|
|
|
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2012-06-27 18:53:55 +04:00
|
|
|
/* decode a nodemap for an application process */
|
2009-01-07 17:58:38 +03:00
|
|
|
int orte_util_decode_nodemap(opal_byte_object_t *bo)
|
2008-04-30 23:49:53 +04:00
|
|
|
{
|
2009-06-24 00:25:38 +04:00
|
|
|
int n;
|
|
|
|
int32_t num_nodes, i, num_daemons;
|
2012-06-27 18:53:55 +04:00
|
|
|
orte_process_name_t daemon;
|
2008-04-30 23:49:53 +04:00
|
|
|
opal_buffer_t buf;
|
2008-09-25 17:39:08 +04:00
|
|
|
int rc;
|
2012-06-27 18:53:55 +04:00
|
|
|
uint8_t oversub;
|
|
|
|
char *nodename;
|
2008-04-30 23:49:53 +04:00
|
|
|
|
2012-07-04 04:04:16 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_nidmap_output,
|
2008-04-30 23:49:53 +04:00
|
|
|
"%s decode:nidmap decoding nodemap",
|
2009-03-06 00:50:47 +03:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
2008-04-30 23:49:53 +04:00
|
|
|
|
|
|
|
/* xfer the byte object to a buffer for unpacking */
|
|
|
|
OBJ_CONSTRUCT(&buf, opal_buffer_t);
|
|
|
|
opal_dss.load(&buf, bo->bytes, bo->size);
|
|
|
|
|
|
|
|
/* unpack number of nodes */
|
|
|
|
n=1;
|
2008-09-25 17:39:08 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &num_nodes, &n, OPAL_INT32))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2008-04-30 23:49:53 +04:00
|
|
|
|
2012-07-04 04:04:16 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_nidmap_output,
|
2009-07-02 00:46:05 +04:00
|
|
|
"%s decode:nidmap decoding %d nodes",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), num_nodes));
|
2008-04-30 23:49:53 +04:00
|
|
|
|
2012-06-27 18:53:55 +04:00
|
|
|
/* set the daemon jobid */
|
|
|
|
daemon.jobid = ORTE_DAEMON_JOBID(ORTE_PROC_MY_NAME->jobid);
|
|
|
|
|
|
|
|
num_daemons = 0;
|
2009-07-02 00:46:05 +04:00
|
|
|
for (i=0; i < num_nodes; i++) {
|
2012-06-27 18:53:55 +04:00
|
|
|
/* unpack the daemon vpid */
|
2008-04-30 23:49:53 +04:00
|
|
|
n=1;
|
2012-06-27 18:53:55 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &daemon.vpid, &n, ORTE_VPID))) {
|
2008-09-25 17:39:08 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
if (ORTE_VPID_INVALID != daemon.vpid) {
|
|
|
|
++num_daemons;
|
|
|
|
}
|
|
|
|
/* unpack and store the node's name */
|
|
|
|
n=1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &nodename, &n, OPAL_STRING))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.store(&daemon, ORTE_DB_HOSTNAME, nodename, OPAL_STRING))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2012-07-04 04:04:16 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_nidmap_output,
|
|
|
|
"%s orte:util:decode:nidmap daemon %s node %s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_VPID_PRINT(daemon.vpid), nodename));
|
2012-06-27 18:53:55 +04:00
|
|
|
/* if this is my daemon, then store the data for me too */
|
|
|
|
if (daemon.vpid == ORTE_PROC_MY_DAEMON->vpid) {
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.store(ORTE_PROC_MY_NAME, ORTE_DB_HOSTNAME, nodename, OPAL_STRING))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
2010-12-01 15:51:39 +03:00
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.store(ORTE_PROC_MY_NAME, ORTE_DB_DAEMON_VPID, &daemon.vpid, ORTE_VPID))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
2009-06-24 06:47:45 +04:00
|
|
|
}
|
2008-05-28 22:38:47 +04:00
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
/* unpack and discard the oversubscribed flag - procs don't need it */
|
|
|
|
n=1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &oversub, &n, OPAL_UINT8))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
2011-06-24 00:38:02 +04:00
|
|
|
}
|
2008-05-28 22:38:47 +04:00
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
|
2010-05-04 06:40:09 +04:00
|
|
|
/* update num_daemons */
|
|
|
|
orte_process_info.num_daemons = num_daemons;
|
2008-05-28 22:38:47 +04:00
|
|
|
|
2008-04-30 23:49:53 +04:00
|
|
|
OBJ_DESTRUCT(&buf);
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2012-06-27 18:53:55 +04:00
|
|
|
/* decode a nodemap for a daemon */
|
2012-04-29 04:10:01 +04:00
|
|
|
int orte_util_decode_daemon_nodemap(opal_byte_object_t *bo)
|
|
|
|
{
|
|
|
|
int n;
|
2012-05-27 20:48:19 +04:00
|
|
|
int32_t num_nodes, i;
|
2012-06-27 18:53:55 +04:00
|
|
|
orte_vpid_t vpid;
|
2012-04-29 04:10:01 +04:00
|
|
|
orte_node_t *node;
|
|
|
|
opal_buffer_t buf;
|
|
|
|
int rc;
|
|
|
|
uint8_t *oversub;
|
|
|
|
char *name;
|
|
|
|
orte_job_t *daemons;
|
|
|
|
orte_proc_t *dptr;
|
|
|
|
|
2012-07-04 04:04:16 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_nidmap_output,
|
2012-04-29 04:10:01 +04:00
|
|
|
"%s decode:nidmap decoding daemon nodemap",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
|
|
|
|
|
|
|
/* xfer the byte object to a buffer for unpacking */
|
|
|
|
OBJ_CONSTRUCT(&buf, opal_buffer_t);
|
|
|
|
opal_dss.load(&buf, bo->bytes, bo->size);
|
|
|
|
|
|
|
|
/* unpack number of nodes */
|
|
|
|
n=1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &num_nodes, &n, OPAL_INT32))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2012-07-04 04:04:16 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((1, orte_nidmap_output,
|
2012-04-29 04:10:01 +04:00
|
|
|
"%s decode:nidmap decoding %d nodes",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), num_nodes));
|
|
|
|
|
2012-06-27 18:53:55 +04:00
|
|
|
/* set the size of the node pool storage so we minimize realloc's */
|
2012-04-29 04:10:01 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = opal_pointer_array_set_size(orte_node_pool, num_nodes))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2012-06-27 18:53:55 +04:00
|
|
|
/* transfer the data to the nodes, counting the number of
|
|
|
|
* daemons in the system
|
|
|
|
*/
|
|
|
|
daemons = orte_get_job_data_object(ORTE_PROC_MY_NAME->jobid);
|
2012-04-29 04:10:01 +04:00
|
|
|
for (i=0; i < num_nodes; i++) {
|
2012-06-27 18:53:55 +04:00
|
|
|
/* unpack the daemon vpid */
|
|
|
|
n=1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &vpid, &n, ORTE_VPID))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
/* unpack and store the node's name */
|
2012-04-29 04:10:01 +04:00
|
|
|
n=1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &name, &n, OPAL_STRING))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2012-08-15 02:26:40 +04:00
|
|
|
/* do we already have this node? */
|
|
|
|
if (NULL == (node = (orte_node_t*)opal_pointer_array_get_item(orte_node_pool, vpid))) {
|
2012-04-29 04:10:01 +04:00
|
|
|
node = OBJ_NEW(orte_node_t);
|
|
|
|
node->name = name;
|
2012-08-15 02:26:40 +04:00
|
|
|
opal_pointer_array_set_item(orte_node_pool, vpid, node);
|
2012-04-29 04:10:01 +04:00
|
|
|
} else {
|
|
|
|
free(name);
|
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
/* unpack the oversubscribed flag */
|
|
|
|
n=1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &oversub, &n, OPAL_UINT8))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
if (ORTE_VPID_INVALID == vpid) {
|
2012-05-27 20:48:19 +04:00
|
|
|
/* no daemon on this node */
|
|
|
|
continue;
|
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
if (NULL == (dptr = (orte_proc_t*)opal_pointer_array_get_item(daemons->procs, vpid))) {
|
2012-04-29 04:10:01 +04:00
|
|
|
dptr = OBJ_NEW(orte_proc_t);
|
|
|
|
dptr->name.jobid = ORTE_PROC_MY_NAME->jobid;
|
2012-06-27 18:53:55 +04:00
|
|
|
dptr->name.vpid = vpid;
|
|
|
|
opal_pointer_array_set_item(daemons->procs, vpid, dptr);
|
2012-05-27 20:48:19 +04:00
|
|
|
daemons->num_procs++;
|
2012-04-29 04:10:01 +04:00
|
|
|
}
|
|
|
|
if (NULL != node->daemon) {
|
|
|
|
OBJ_RELEASE(node->daemon);
|
|
|
|
}
|
|
|
|
OBJ_RETAIN(dptr);
|
|
|
|
node->daemon = dptr;
|
|
|
|
if (NULL != dptr->node) {
|
|
|
|
OBJ_RELEASE(dptr->node);
|
|
|
|
}
|
|
|
|
OBJ_RETAIN(node);
|
|
|
|
dptr->node = node;
|
2012-06-27 18:53:55 +04:00
|
|
|
if (0 == oversub) {
|
2012-04-29 04:10:01 +04:00
|
|
|
node->oversubscribed = false;
|
|
|
|
} else {
|
|
|
|
node->oversubscribed = true;
|
|
|
|
}
|
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
|
2012-05-27 20:48:19 +04:00
|
|
|
orte_process_info.num_procs = daemons->num_procs;
|
2012-04-29 04:10:01 +04:00
|
|
|
|
|
|
|
if (orte_process_info.max_procs < orte_process_info.num_procs) {
|
|
|
|
orte_process_info.max_procs = orte_process_info.num_procs;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* update num_daemons */
|
2012-05-27 20:48:19 +04:00
|
|
|
orte_process_info.num_daemons = daemons->num_procs;
|
2012-04-29 04:10:01 +04:00
|
|
|
|
2012-07-04 04:04:16 +04:00
|
|
|
if (0 < opal_output_get_verbosity(orte_nidmap_output)) {
|
2012-04-29 04:10:01 +04:00
|
|
|
for (i=0; i < num_nodes; i++) {
|
|
|
|
if (NULL == (node = (orte_node_t*)opal_pointer_array_get_item(orte_node_pool, i))) {
|
|
|
|
continue;
|
|
|
|
}
|
2012-07-04 04:04:16 +04:00
|
|
|
opal_output(0, "%s node[%d].name %s daemon %s",
|
2012-04-29 04:10:01 +04:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), i,
|
|
|
|
(NULL == node->name) ? "NULL" : node->name,
|
2012-05-27 20:48:19 +04:00
|
|
|
(NULL == node->daemon) ? "NONE" : ORTE_VPID_PRINT(node->daemon->name.vpid));
|
2012-04-29 04:10:01 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
OBJ_DESTRUCT(&buf);
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2008-11-18 18:35:50 +03:00
|
|
|
int orte_util_encode_pidmap(opal_byte_object_t *boptr)
|
2008-04-30 23:49:53 +04:00
|
|
|
{
|
2009-05-11 07:24:49 +04:00
|
|
|
orte_proc_t *proc;
|
2008-04-30 23:49:53 +04:00
|
|
|
opal_buffer_t buf;
|
2009-07-15 23:36:53 +04:00
|
|
|
orte_local_rank_t *lrank = NULL;
|
|
|
|
orte_node_rank_t *nrank = NULL;
|
|
|
|
orte_job_t *jdata = NULL;
|
2012-06-27 18:53:55 +04:00
|
|
|
orte_vpid_t *daemons = NULL;
|
2009-07-16 21:38:09 +04:00
|
|
|
int i, j, k, rc = ORTE_SUCCESS;
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
#if OPAL_HAVE_HWLOC
|
|
|
|
unsigned int *bind_idx=NULL;
|
|
|
|
#endif
|
2012-04-29 04:10:01 +04:00
|
|
|
orte_proc_state_t *states=NULL;
|
|
|
|
orte_app_idx_t *app_idx=NULL;
|
|
|
|
int32_t *restarts=NULL;
|
2008-04-30 23:49:53 +04:00
|
|
|
|
|
|
|
/* setup the working buffer */
|
|
|
|
OBJ_CONSTRUCT(&buf, opal_buffer_t);
|
|
|
|
|
2009-03-03 19:39:13 +03:00
|
|
|
for (j=1; j < orte_job_data->size; j++) {
|
|
|
|
/* the job array is no longer left-justified and may
|
|
|
|
* have holes in it as we recover resources at job
|
|
|
|
* completion
|
|
|
|
*/
|
2009-04-13 23:06:54 +04:00
|
|
|
if (NULL == (jdata = (orte_job_t*)opal_pointer_array_get_item(orte_job_data, j))) {
|
2009-03-03 19:39:13 +03:00
|
|
|
continue;
|
2010-03-26 01:54:57 +03:00
|
|
|
}
|
|
|
|
/* if this job doesn't have a map, then it is a tool
|
|
|
|
* and doesn't need to be included
|
|
|
|
*/
|
|
|
|
if (NULL == jdata->map) {
|
|
|
|
continue;
|
2010-05-14 22:44:49 +04:00
|
|
|
}
|
2008-11-18 18:35:50 +03:00
|
|
|
/* pack the jobid */
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, &jdata->jobid, 1, ORTE_JOBID))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
2009-07-15 23:36:53 +04:00
|
|
|
goto cleanup_and_return;
|
2008-11-18 18:35:50 +03:00
|
|
|
}
|
|
|
|
/* pack the number of procs */
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, &jdata->num_procs, 1, ORTE_VPID))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
2009-07-15 23:36:53 +04:00
|
|
|
goto cleanup_and_return;
|
2008-11-18 18:35:50 +03:00
|
|
|
}
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
#if OPAL_HAVE_HWLOC
|
|
|
|
/* pack the bind level */
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, &(jdata->map->bind_level), 1, OPAL_HWLOC_LEVEL_T))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup_and_return;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* allocate memory for the nodes, local ranks, node ranks, and bind_idx */
|
2012-06-27 18:53:55 +04:00
|
|
|
daemons = (orte_vpid_t*)malloc(jdata->num_procs * sizeof(orte_vpid_t));
|
2009-07-15 23:36:53 +04:00
|
|
|
lrank = (orte_local_rank_t*)malloc(jdata->num_procs*sizeof(orte_local_rank_t));
|
|
|
|
nrank = (orte_node_rank_t*)malloc(jdata->num_procs*sizeof(orte_node_rank_t));
|
2012-04-29 04:10:01 +04:00
|
|
|
states = (orte_proc_state_t*)malloc(jdata->num_procs*sizeof(orte_proc_state_t));
|
|
|
|
app_idx = (orte_app_idx_t*)malloc(jdata->num_procs*sizeof(orte_app_idx_t));
|
|
|
|
restarts = (int32_t*)malloc(jdata->num_procs*sizeof(int32_t));
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
#if OPAL_HAVE_HWLOC
|
|
|
|
bind_idx = (unsigned int*)malloc(jdata->num_procs*sizeof(unsigned int));
|
|
|
|
#endif
|
2008-11-18 18:35:50 +03:00
|
|
|
/* transfer and pack the node info in one pack */
|
2009-07-14 00:03:41 +04:00
|
|
|
for (i=0, k=0; i < jdata->procs->size; i++) {
|
2009-05-12 13:46:52 +04:00
|
|
|
if (NULL == (proc = (orte_proc_t *) opal_pointer_array_get_item(jdata->procs, i))) {
|
2009-05-11 07:24:49 +04:00
|
|
|
continue;
|
|
|
|
}
|
2009-07-16 00:06:45 +04:00
|
|
|
if( k >= (int)jdata->num_procs ) {
|
2009-07-15 23:36:53 +04:00
|
|
|
orte_show_help("help-orte-runtime.txt", "orte_nidmap:too_many_nodes",
|
|
|
|
true, jdata->num_procs);
|
2009-07-16 00:06:45 +04:00
|
|
|
break;
|
2009-07-15 23:36:53 +04:00
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
daemons[k] = proc->node->daemon->name.vpid;
|
2009-07-16 00:06:45 +04:00
|
|
|
lrank[k] = proc->local_rank;
|
|
|
|
nrank[k] = proc->node_rank;
|
2012-04-29 04:10:01 +04:00
|
|
|
states[k] = proc->state;
|
|
|
|
app_idx[k] = proc->app_idx;
|
|
|
|
restarts[k] = proc->restarts;
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
#if OPAL_HAVE_HWLOC
|
|
|
|
bind_idx[k] = proc->bind_idx;
|
|
|
|
#endif
|
2009-07-16 00:06:45 +04:00
|
|
|
++k;
|
2008-11-18 18:35:50 +03:00
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, daemons, jdata->num_procs, ORTE_VPID))) {
|
2008-11-18 18:35:50 +03:00
|
|
|
ORTE_ERROR_LOG(rc);
|
2009-07-15 23:36:53 +04:00
|
|
|
goto cleanup_and_return;
|
2008-11-18 18:35:50 +03:00
|
|
|
}
|
|
|
|
/* transfer and pack the local_ranks in one pack */
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, lrank, jdata->num_procs, ORTE_LOCAL_RANK))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
2009-07-15 23:36:53 +04:00
|
|
|
goto cleanup_and_return;
|
2008-11-18 18:35:50 +03:00
|
|
|
}
|
|
|
|
/* transfer and pack the node ranks in one pack */
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, nrank, jdata->num_procs, ORTE_NODE_RANK))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
2009-07-15 23:36:53 +04:00
|
|
|
goto cleanup_and_return;
|
2008-11-18 18:35:50 +03:00
|
|
|
}
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
#if OPAL_HAVE_HWLOC
|
|
|
|
/* transfer and pack the bind_idx in one pack */
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, bind_idx, jdata->num_procs, OPAL_UINT))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup_and_return;
|
|
|
|
}
|
|
|
|
#endif
|
2012-04-29 04:10:01 +04:00
|
|
|
/* transfer and pack the states in one pack */
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, states, jdata->num_procs, ORTE_PROC_STATE))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup_and_return;
|
|
|
|
}
|
|
|
|
/* transfer and pack the app_idx's in one pack */
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, app_idx, jdata->num_procs, ORTE_APP_IDX))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup_and_return;
|
|
|
|
}
|
|
|
|
/* transfer and pack the restarts in one pack */
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, restarts, jdata->num_procs, OPAL_INT32))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup_and_return;
|
|
|
|
}
|
2008-09-25 17:39:08 +04:00
|
|
|
}
|
2008-04-30 23:49:53 +04:00
|
|
|
|
|
|
|
/* transfer the payload to the byte object */
|
|
|
|
opal_dss.unload(&buf, (void**)&boptr->bytes, &boptr->size);
|
2009-07-15 23:36:53 +04:00
|
|
|
|
|
|
|
cleanup_and_return:
|
|
|
|
|
|
|
|
if( NULL != lrank ) {
|
|
|
|
free(lrank);
|
|
|
|
}
|
|
|
|
if( NULL != nrank ) {
|
|
|
|
free(nrank);
|
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
if( NULL != daemons ) {
|
|
|
|
free(daemons);
|
2009-07-15 23:36:53 +04:00
|
|
|
}
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
#if OPAL_HAVE_HWLOC
|
|
|
|
if( NULL != bind_idx ) {
|
|
|
|
free(bind_idx);
|
|
|
|
}
|
|
|
|
#endif
|
2012-04-29 04:10:01 +04:00
|
|
|
if (NULL != states) {
|
|
|
|
free(states);
|
|
|
|
}
|
|
|
|
if (NULL != app_idx) {
|
|
|
|
free(app_idx);
|
|
|
|
}
|
|
|
|
if (NULL != restarts) {
|
|
|
|
free(restarts);
|
|
|
|
}
|
2008-04-30 23:49:53 +04:00
|
|
|
OBJ_DESTRUCT(&buf);
|
|
|
|
|
2009-07-15 23:36:53 +04:00
|
|
|
return rc;
|
2008-04-30 23:49:53 +04:00
|
|
|
}
|
|
|
|
|
2012-05-27 20:21:38 +04:00
|
|
|
/* only APPS call this function - daemons have their own */
|
2009-01-07 17:58:38 +03:00
|
|
|
int orte_util_decode_pidmap(opal_byte_object_t *bo)
|
2008-04-30 23:49:53 +04:00
|
|
|
{
|
2012-06-27 18:53:55 +04:00
|
|
|
orte_vpid_t i, num_procs, *vptr, daemon;
|
|
|
|
orte_vpid_t *daemons=NULL;
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
orte_local_rank_t *local_rank=NULL;
|
|
|
|
orte_node_rank_t *node_rank=NULL;
|
|
|
|
#if OPAL_HAVE_HWLOC
|
2012-06-27 18:53:55 +04:00
|
|
|
opal_hwloc_level_t bind_level = OPAL_HWLOC_NODE_LEVEL, pbind, *lvptr;
|
|
|
|
unsigned int *bind_idx=NULL, pbidx, *uiptr;
|
|
|
|
opal_hwloc_locality_t locality;
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
#endif
|
2008-04-30 23:49:53 +04:00
|
|
|
orte_std_cntr_t n;
|
|
|
|
opal_buffer_t buf;
|
2008-09-25 17:39:08 +04:00
|
|
|
int rc;
|
2012-04-29 04:10:01 +04:00
|
|
|
orte_proc_state_t *states = NULL;
|
|
|
|
orte_app_idx_t *app_idx = NULL;
|
|
|
|
int32_t *restarts = NULL;
|
2012-06-27 18:53:55 +04:00
|
|
|
orte_process_name_t proc, dmn;
|
|
|
|
orte_namelist_t *nm;
|
|
|
|
opal_list_t jobs;
|
|
|
|
char *hostname;
|
|
|
|
|
2008-04-30 23:49:53 +04:00
|
|
|
/* xfer the byte object to a buffer for unpacking */
|
|
|
|
OBJ_CONSTRUCT(&buf, opal_buffer_t);
|
2008-09-25 17:39:08 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.load(&buf, bo->bytes, bo->size))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
2008-11-24 20:57:55 +03:00
|
|
|
goto cleanup;
|
2008-09-25 17:39:08 +04:00
|
|
|
}
|
2008-04-30 23:49:53 +04:00
|
|
|
|
2008-11-18 18:35:50 +03:00
|
|
|
n = 1;
|
2008-11-24 20:57:55 +03:00
|
|
|
/* cycle through the buffer */
|
2012-06-27 18:53:55 +04:00
|
|
|
OBJ_CONSTRUCT(&jobs, opal_list_t);
|
|
|
|
while (ORTE_SUCCESS == (rc = opal_dss.unpack(&buf, &proc.jobid, &n, ORTE_JOBID))) {
|
2012-07-04 04:04:16 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_nidmap_output,
|
|
|
|
"%s orte:util:decode:pidmap working job %s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_JOBID_PRINT(proc.jobid)));
|
2012-06-27 18:53:55 +04:00
|
|
|
/* record the jobid */
|
|
|
|
nm = OBJ_NEW(orte_namelist_t);
|
|
|
|
nm->name.jobid = proc.jobid;
|
|
|
|
opal_list_append(&jobs, &nm->super);
|
|
|
|
|
|
|
|
/* unpack and store the number of procs */
|
2008-11-18 18:35:50 +03:00
|
|
|
n=1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &num_procs, &n, ORTE_VPID))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
2008-11-24 20:57:55 +03:00
|
|
|
goto cleanup;
|
2008-11-18 18:35:50 +03:00
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
proc.vpid = ORTE_VPID_INVALID;
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_NPROCS, &num_procs, ORTE_VPID))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2009-07-14 00:03:41 +04:00
|
|
|
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
#if OPAL_HAVE_HWLOC
|
2012-06-27 18:53:55 +04:00
|
|
|
/* unpack and store the binding level */
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
n=1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &bind_level, &n, OPAL_HWLOC_LEVEL_T))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
/* store it */
|
|
|
|
proc.vpid = ORTE_VPID_INVALID;
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_BIND_LEVEL, &bind_level, OPAL_HWLOC_LEVEL_T))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
/* set mine */
|
2012-06-27 18:53:55 +04:00
|
|
|
if (proc.jobid == ORTE_PROC_MY_NAME->jobid) {
|
2012-05-27 20:21:38 +04:00
|
|
|
orte_process_info.bind_level = bind_level;
|
|
|
|
}
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
#endif
|
2012-07-04 04:04:16 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_nidmap_output,
|
|
|
|
"%s orte:util:decode:pidmap nprocs %s bind level %s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_VPID_PRINT(num_procs),
|
|
|
|
opal_hwloc_base_print_level(bind_level)));
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
|
2012-06-27 18:53:55 +04:00
|
|
|
/* allocate memory for the daemon info */
|
|
|
|
daemons = (orte_vpid_t*)malloc(num_procs * sizeof(orte_vpid_t));
|
2008-11-18 18:35:50 +03:00
|
|
|
/* unpack it in one shot */
|
|
|
|
n=num_procs;
|
2012-06-27 18:53:55 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, daemons, &n, ORTE_VPID))) {
|
2008-11-18 18:35:50 +03:00
|
|
|
ORTE_ERROR_LOG(rc);
|
2008-11-24 20:57:55 +03:00
|
|
|
goto cleanup;
|
2008-11-18 18:35:50 +03:00
|
|
|
}
|
2011-11-06 21:06:41 +04:00
|
|
|
|
2008-11-18 18:35:50 +03:00
|
|
|
/* allocate memory for local ranks */
|
|
|
|
local_rank = (orte_local_rank_t*)malloc(num_procs*sizeof(orte_local_rank_t));
|
|
|
|
/* unpack them in one shot */
|
|
|
|
n=num_procs;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, local_rank, &n, ORTE_LOCAL_RANK))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
2008-11-24 20:57:55 +03:00
|
|
|
goto cleanup;
|
2008-11-18 18:35:50 +03:00
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
if (proc.jobid == ORTE_PROC_MY_NAME->jobid) {
|
|
|
|
/* set mine */
|
|
|
|
orte_process_info.my_local_rank = local_rank[ORTE_PROC_MY_NAME->vpid];
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.store(ORTE_PROC_MY_NAME, ORTE_DB_LOCALRANK,
|
|
|
|
&orte_process_info.my_local_rank, ORTE_LOCAL_RANK))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
2008-11-18 18:35:50 +03:00
|
|
|
|
|
|
|
/* allocate memory for node ranks */
|
|
|
|
node_rank = (orte_node_rank_t*)malloc(num_procs*sizeof(orte_node_rank_t));
|
|
|
|
/* unpack node ranks in one shot */
|
|
|
|
n=num_procs;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, node_rank, &n, ORTE_NODE_RANK))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
2008-11-24 20:57:55 +03:00
|
|
|
goto cleanup;
|
2008-11-18 18:35:50 +03:00
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
if (proc.jobid == ORTE_PROC_MY_NAME->jobid) {
|
|
|
|
/* set mine */
|
|
|
|
orte_process_info.my_node_rank = node_rank[ORTE_PROC_MY_NAME->vpid];
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.store(ORTE_PROC_MY_NAME, ORTE_DB_NODERANK,
|
|
|
|
&orte_process_info.my_node_rank, ORTE_NODE_RANK))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
2008-11-18 18:35:50 +03:00
|
|
|
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
#if OPAL_HAVE_HWLOC
|
|
|
|
/* allocate memory for bind_idx */
|
|
|
|
bind_idx = (unsigned int*)malloc(num_procs*sizeof(unsigned int));
|
|
|
|
/* unpack bind_idx in one shot */
|
|
|
|
n=num_procs;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, bind_idx, &n, OPAL_UINT))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
if (proc.jobid == ORTE_PROC_MY_NAME->jobid) {
|
2012-03-03 04:39:37 +04:00
|
|
|
/* set mine */
|
|
|
|
orte_process_info.bind_idx = bind_idx[ORTE_PROC_MY_NAME->vpid];
|
2012-06-27 18:53:55 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.store(ORTE_PROC_MY_NAME, ORTE_DB_BIND_INDEX,
|
|
|
|
&orte_process_info.bind_idx, OPAL_UINT))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2012-03-03 04:39:37 +04:00
|
|
|
}
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
#endif
|
|
|
|
|
2012-04-29 04:10:01 +04:00
|
|
|
/* allocate memory for states */
|
|
|
|
states = (orte_proc_state_t*)malloc(num_procs*sizeof(orte_proc_state_t));
|
|
|
|
/* unpack states in one shot */
|
|
|
|
n=num_procs;
|
|
|
|
if (OPAL_SUCCESS != (rc = opal_dss.unpack(&buf, states, &n, ORTE_PROC_STATE))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
/* dump this info - apps don't need it */
|
|
|
|
free(states);
|
|
|
|
states = NULL;
|
|
|
|
|
|
|
|
/* allocate memory for app_idx's */
|
|
|
|
app_idx = (orte_app_idx_t*)malloc(num_procs*sizeof(orte_app_idx_t));
|
|
|
|
/* unpack app_idx's in one shot */
|
|
|
|
n=num_procs;
|
|
|
|
if (OPAL_SUCCESS != (rc = opal_dss.unpack(&buf, app_idx, &n, ORTE_APP_IDX))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
/* dump this info - apps don't need it */
|
|
|
|
free(app_idx);
|
|
|
|
app_idx = NULL;
|
|
|
|
|
|
|
|
/* allocate memory for restarts */
|
|
|
|
restarts = (int32_t*)malloc(num_procs*sizeof(int32_t));
|
|
|
|
/* unpack restarts in one shot */
|
|
|
|
n=num_procs;
|
|
|
|
if (OPAL_SUCCESS != (rc = opal_dss.unpack(&buf, restarts, &n, OPAL_INT32))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
/* dump this info - apps don't need it */
|
|
|
|
free(restarts);
|
|
|
|
restarts = NULL;
|
|
|
|
|
2012-06-27 18:53:55 +04:00
|
|
|
/* set the daemon jobid */
|
|
|
|
dmn.jobid = ORTE_DAEMON_JOBID(ORTE_PROC_MY_NAME->jobid);
|
|
|
|
|
|
|
|
/* xfer the data */
|
|
|
|
for (i=0; i < num_procs; i++) {
|
|
|
|
if (proc.jobid == ORTE_PROC_MY_NAME->jobid &&
|
|
|
|
i == ORTE_PROC_MY_NAME->vpid) {
|
|
|
|
continue;
|
2009-06-27 00:54:58 +04:00
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
proc.vpid = i;
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_DAEMON_VPID, &daemons[i], ORTE_VPID))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
/* lookup and store the hostname for this proc */
|
|
|
|
dmn.vpid = daemons[i];
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.fetch_pointer(&dmn, ORTE_DB_HOSTNAME, (void**)&hostname, OPAL_STRING))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_HOSTNAME, hostname, OPAL_STRING))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_LOCALRANK, &local_rank[i], ORTE_LOCAL_RANK))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_NODERANK, &node_rank[i], ORTE_NODE_RANK))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
2008-11-24 20:57:55 +03:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-11-18 14:22:58 +04:00
|
|
|
#if OPAL_HAVE_HWLOC
|
2012-06-27 18:53:55 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_BIND_INDEX, &bind_idx[i], OPAL_UINT))) {
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
2008-11-18 18:35:50 +03:00
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
#endif
|
2012-07-04 04:04:16 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((10, orte_nidmap_output,
|
|
|
|
"%s orte:util:decode:pidmap proc %s host %s lrank %d nrank %d bindidx %u",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(&proc), hostname,
|
|
|
|
(int)local_rank[i], (int)node_rank[i], bind_idx[i]));
|
2008-11-18 18:35:50 +03:00
|
|
|
}
|
|
|
|
/* release data */
|
2012-06-27 18:53:55 +04:00
|
|
|
free(daemons);
|
|
|
|
daemons = NULL;
|
2008-11-18 18:35:50 +03:00
|
|
|
free(local_rank);
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
local_rank = NULL;
|
2008-11-18 18:35:50 +03:00
|
|
|
free(node_rank);
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
node_rank = NULL;
|
|
|
|
#if OPAL_HAVE_HWLOC
|
|
|
|
free(bind_idx);
|
|
|
|
bind_idx = NULL;
|
|
|
|
#endif
|
2008-11-24 20:57:55 +03:00
|
|
|
/* setup for next cycle */
|
|
|
|
n = 1;
|
|
|
|
}
|
2012-05-27 22:37:57 +04:00
|
|
|
if (ORTE_ERR_UNPACK_READ_PAST_END_OF_BUFFER != rc) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
2008-04-30 23:49:53 +04:00
|
|
|
}
|
2012-05-27 22:37:57 +04:00
|
|
|
rc = ORTE_SUCCESS;
|
|
|
|
|
|
|
|
/* now that we have all the data, we are guaranteed
|
|
|
|
* to know our own node, so go back and record the
|
|
|
|
* locality of each proc relative to me
|
|
|
|
*/
|
2012-06-27 18:53:55 +04:00
|
|
|
while (NULL != (nm = (orte_namelist_t*)opal_list_remove_first(&jobs))) {
|
|
|
|
proc.jobid = nm->name.jobid;
|
|
|
|
/* recover the number of procs in this job */
|
|
|
|
vptr = &num_procs;
|
|
|
|
proc.vpid = ORTE_VPID_INVALID;
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.fetch(&proc, ORTE_DB_NPROCS, (void**)&vptr, ORTE_VPID))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
2012-05-27 22:37:57 +04:00
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
|
|
|
|
for (i=0; i < num_procs; i++) {
|
|
|
|
if (ORTE_PROC_MY_NAME->vpid == i &&
|
|
|
|
ORTE_PROC_MY_NAME->jobid == proc.jobid) {
|
|
|
|
/* this is me */
|
2012-05-27 22:37:57 +04:00
|
|
|
continue;
|
|
|
|
}
|
2012-06-27 18:53:55 +04:00
|
|
|
proc.vpid = i;
|
|
|
|
/* recover the daemon for this proc */
|
|
|
|
vptr = &daemon;
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.fetch(&proc, ORTE_DB_DAEMON_VPID, (void**)&vptr, ORTE_VPID))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (daemon == ORTE_PROC_MY_DAEMON->vpid) {
|
2012-07-04 04:04:16 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_nidmap_output,
|
|
|
|
"%s orte:util:decode:pidmap proc %s shares node",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(&proc)));
|
2012-08-01 01:21:50 +04:00
|
|
|
/* we share a node, so add them to the count of peers
|
|
|
|
* sharing the node with me
|
|
|
|
*/
|
|
|
|
orte_process_info.num_local_peers++;
|
2012-05-27 22:37:57 +04:00
|
|
|
#if OPAL_HAVE_HWLOC
|
2012-06-27 18:53:55 +04:00
|
|
|
/* retrieve the bind level for the other proc's job */
|
|
|
|
lvptr = &pbind;
|
|
|
|
proc.vpid = ORTE_VPID_INVALID;
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.fetch(&proc, ORTE_DB_BIND_LEVEL, (void**)&lvptr, OPAL_HWLOC_LEVEL_T))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* retrieve the other's proc's bind idx */
|
|
|
|
uiptr = &pbidx;
|
|
|
|
proc.vpid = i;
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.fetch(&proc, ORTE_DB_BIND_INDEX, (void**)&uiptr, OPAL_UINT))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2012-05-27 22:37:57 +04:00
|
|
|
/* we share a node - see what else we share */
|
2012-06-27 18:53:55 +04:00
|
|
|
locality = opal_hwloc_base_get_relative_locality(opal_hwloc_topology,
|
|
|
|
orte_process_info.bind_level,
|
|
|
|
orte_process_info.bind_idx,
|
|
|
|
pbind, pbidx);
|
2012-05-27 22:37:57 +04:00
|
|
|
#else
|
2012-06-27 18:53:55 +04:00
|
|
|
locality = OPAL_PROC_ON_NODE;
|
2012-05-27 22:37:57 +04:00
|
|
|
#endif
|
|
|
|
} else {
|
2012-06-27 18:53:55 +04:00
|
|
|
/* we don't share a node */
|
2012-07-04 04:04:16 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_nidmap_output,
|
|
|
|
"%s orte:util:decode:pidmap proc %s does NOT node [my daemon %s, their daemon %s]",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(&proc),
|
|
|
|
ORTE_VPID_PRINT(ORTE_PROC_MY_DAEMON->vpid),
|
|
|
|
ORTE_VPID_PRINT(daemon)));
|
2012-06-27 18:53:55 +04:00
|
|
|
locality = OPAL_PROC_NON_LOCAL;
|
|
|
|
}
|
|
|
|
/* store the locality */
|
2012-07-04 04:04:16 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((2, orte_nidmap_output,
|
|
|
|
"%s orte:util:decode:pidmap set proc %s locality to %s",
|
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(&proc),
|
|
|
|
opal_hwloc_base_print_locality(locality)));
|
2012-06-27 18:53:55 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_db.store(&proc, ORTE_DB_LOCALITY, &locality, OPAL_HWLOC_LOCALITY_T))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
2012-05-27 22:37:57 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-04-30 23:49:53 +04:00
|
|
|
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
cleanup:
|
2012-06-27 18:53:55 +04:00
|
|
|
if (NULL != daemons) {
|
|
|
|
free(daemons);
|
At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement
The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.
In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:
1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.
2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.
3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.
As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.
This commit was SVN r25476.
2011-11-15 07:40:11 +04:00
|
|
|
}
|
|
|
|
if (NULL != local_rank) {
|
|
|
|
free(local_rank);
|
|
|
|
}
|
|
|
|
if (NULL != node_rank) {
|
|
|
|
free(node_rank);
|
|
|
|
}
|
|
|
|
#if OPAL_HAVE_HWLOC
|
|
|
|
if (NULL != bind_idx) {
|
|
|
|
free(bind_idx);
|
|
|
|
}
|
|
|
|
#endif
|
2012-04-29 04:10:01 +04:00
|
|
|
if (NULL != states) {
|
|
|
|
free(states);
|
|
|
|
}
|
|
|
|
if (NULL != app_idx) {
|
|
|
|
free(app_idx);
|
|
|
|
}
|
|
|
|
if (NULL != restarts) {
|
|
|
|
free(restarts);
|
|
|
|
}
|
2008-04-30 23:49:53 +04:00
|
|
|
OBJ_DESTRUCT(&buf);
|
2008-11-24 20:57:55 +03:00
|
|
|
return rc;
|
2008-04-30 23:49:53 +04:00
|
|
|
}
|
|
|
|
|
2012-04-29 04:10:01 +04:00
|
|
|
int orte_util_decode_daemon_pidmap(opal_byte_object_t *bo)
|
|
|
|
{
|
|
|
|
orte_jobid_t jobid;
|
|
|
|
orte_vpid_t i, num_procs;
|
2012-06-27 18:53:55 +04:00
|
|
|
orte_vpid_t *nodes=NULL;
|
2012-04-29 04:10:01 +04:00
|
|
|
orte_local_rank_t *local_rank=NULL;
|
|
|
|
orte_node_rank_t *node_rank=NULL;
|
|
|
|
#if OPAL_HAVE_HWLOC
|
|
|
|
opal_hwloc_level_t bind_level = OPAL_HWLOC_NODE_LEVEL;
|
|
|
|
unsigned int *bind_idx=NULL;
|
|
|
|
#endif
|
|
|
|
orte_std_cntr_t n;
|
|
|
|
opal_buffer_t buf;
|
2012-05-03 01:00:22 +04:00
|
|
|
int rc, j, k;
|
2012-08-14 22:17:59 +04:00
|
|
|
orte_job_t *jdata, *daemons;
|
2012-05-01 20:41:35 +04:00
|
|
|
orte_proc_t *proc, *pptr;
|
2012-05-03 01:00:22 +04:00
|
|
|
orte_node_t *node, *nptr;
|
2012-04-29 04:10:01 +04:00
|
|
|
orte_proc_state_t *states=NULL;
|
|
|
|
orte_app_idx_t *app_idx=NULL;
|
|
|
|
int32_t *restarts=NULL;
|
2012-05-03 01:00:22 +04:00
|
|
|
orte_job_map_t *map;
|
|
|
|
bool found;
|
2012-04-29 04:10:01 +04:00
|
|
|
|
|
|
|
/* xfer the byte object to a buffer for unpacking */
|
|
|
|
OBJ_CONSTRUCT(&buf, opal_buffer_t);
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.load(&buf, bo->bytes, bo->size))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2012-08-14 22:17:59 +04:00
|
|
|
daemons = orte_get_job_data_object(ORTE_PROC_MY_NAME->jobid);
|
|
|
|
|
2012-04-29 04:10:01 +04:00
|
|
|
n = 1;
|
|
|
|
/* cycle through the buffer */
|
|
|
|
while (ORTE_SUCCESS == (rc = opal_dss.unpack(&buf, &jobid, &n, ORTE_JOBID))) {
|
|
|
|
/* see if we have this job object - could be a restart scenario */
|
|
|
|
if (NULL == (jdata = orte_get_job_data_object(jobid))) {
|
|
|
|
/* need to create this job */
|
|
|
|
jdata = OBJ_NEW(orte_job_t);
|
|
|
|
jdata->jobid = jobid;
|
|
|
|
opal_pointer_array_set_item(orte_job_data, ORTE_LOCAL_JOBID(jobid), jdata);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* unpack the number of procs */
|
|
|
|
n=1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &num_procs, &n, ORTE_VPID))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
jdata->num_procs = num_procs;
|
|
|
|
|
|
|
|
#if OPAL_HAVE_HWLOC
|
|
|
|
/* unpack the binding level */
|
|
|
|
n=1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &bind_level, &n, OPAL_HWLOC_LEVEL_T))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* allocate memory for the node info */
|
2012-06-27 18:53:55 +04:00
|
|
|
nodes = (orte_vpid_t*)malloc(num_procs * 4);
|
2012-04-29 04:10:01 +04:00
|
|
|
/* unpack it in one shot */
|
|
|
|
n=num_procs;
|
2012-06-27 18:53:55 +04:00
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, nodes, &n, ORTE_VPID))) {
|
2012-04-29 04:10:01 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* allocate memory for local ranks */
|
|
|
|
local_rank = (orte_local_rank_t*)malloc(num_procs*sizeof(orte_local_rank_t));
|
|
|
|
/* unpack them in one shot */
|
|
|
|
n=num_procs;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, local_rank, &n, ORTE_LOCAL_RANK))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* allocate memory for node ranks */
|
|
|
|
node_rank = (orte_node_rank_t*)malloc(num_procs*sizeof(orte_node_rank_t));
|
|
|
|
/* unpack node ranks in one shot */
|
|
|
|
n=num_procs;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, node_rank, &n, ORTE_NODE_RANK))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if OPAL_HAVE_HWLOC
|
|
|
|
/* allocate memory for bind_idx */
|
|
|
|
bind_idx = (unsigned int*)malloc(num_procs*sizeof(unsigned int));
|
|
|
|
/* unpack bind_idx in one shot */
|
|
|
|
n=num_procs;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, bind_idx, &n, OPAL_UINT))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* allocate memory for states */
|
|
|
|
states = (orte_proc_state_t*)malloc(num_procs*sizeof(orte_proc_state_t));
|
|
|
|
/* unpack states in one shot */
|
|
|
|
n=num_procs;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, states, &n, ORTE_PROC_STATE))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* allocate memory for app_idx's */
|
|
|
|
app_idx = (orte_app_idx_t*)malloc(num_procs*sizeof(orte_app_idx_t));
|
|
|
|
/* unpack app_idx's in one shot */
|
|
|
|
n=num_procs;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, app_idx, &n, ORTE_APP_IDX))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* allocate memory for restarts */
|
|
|
|
restarts = (int32_t*)malloc(num_procs*sizeof(int32_t));
|
|
|
|
/* unpack restarts in one shot */
|
|
|
|
n=num_procs;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, restarts, &n, OPAL_INT32))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* xfer the data */
|
2012-05-03 01:00:22 +04:00
|
|
|
map = jdata->map;
|
|
|
|
if (NULL == map) {
|
|
|
|
jdata->map = OBJ_NEW(orte_job_map_t);
|
|
|
|
map = jdata->map;
|
|
|
|
}
|
2012-04-29 04:10:01 +04:00
|
|
|
for (i=0; i < num_procs; i++) {
|
|
|
|
if (NULL == (proc = (orte_proc_t*)opal_pointer_array_get_item(jdata->procs, i))) {
|
|
|
|
proc = OBJ_NEW(orte_proc_t);
|
|
|
|
proc->name.jobid = jdata->jobid;
|
|
|
|
proc->name.vpid = i;
|
|
|
|
opal_pointer_array_set_item(jdata->procs, i, proc);
|
|
|
|
}
|
2012-08-15 02:26:40 +04:00
|
|
|
/* lookup the node - should always be present */
|
|
|
|
if (NULL == (node = (orte_node_t*)opal_pointer_array_get_item(orte_node_pool, nodes[i]))) {
|
2012-05-30 00:11:51 +04:00
|
|
|
/* this should never happen, but protect ourselves anyway */
|
|
|
|
node = OBJ_NEW(orte_node_t);
|
2012-08-15 02:26:40 +04:00
|
|
|
/* get the daemon */
|
|
|
|
if (NULL == (pptr = (orte_proc_t*)opal_pointer_array_get_item(daemons->procs, nodes[i]))) {
|
2012-08-14 22:17:59 +04:00
|
|
|
pptr = OBJ_NEW(orte_proc_t);
|
|
|
|
pptr->name.jobid = ORTE_PROC_MY_NAME->jobid;
|
|
|
|
pptr->name.vpid = nodes[i];
|
|
|
|
opal_pointer_array_set_item(daemons->procs, nodes[i], pptr);
|
|
|
|
}
|
|
|
|
node->daemon = pptr;
|
|
|
|
opal_pointer_array_add(orte_node_pool, node);
|
2012-05-30 00:11:51 +04:00
|
|
|
}
|
2012-04-29 04:10:01 +04:00
|
|
|
if (NULL != proc->node) {
|
2012-05-01 20:41:35 +04:00
|
|
|
if (node != proc->node) {
|
|
|
|
/* proc has moved - cleanup the prior node proc array */
|
|
|
|
for (j=0; j < proc->node->procs->size; j++) {
|
|
|
|
if (NULL == (pptr = (orte_proc_t*)opal_pointer_array_get_item(proc->node->procs, j))) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (pptr == proc) {
|
|
|
|
/* maintain accounting */
|
|
|
|
OBJ_RELEASE(pptr);
|
|
|
|
opal_pointer_array_set_item(proc->node->procs, j, NULL);
|
|
|
|
proc->node->num_procs--;
|
2012-05-03 01:00:22 +04:00
|
|
|
if (0 == proc->node->num_procs) {
|
|
|
|
/* remove node from the map */
|
|
|
|
for (k=0; k < map->nodes->size; k++) {
|
|
|
|
if (NULL == (nptr = (orte_node_t*)opal_pointer_array_get_item(map->nodes, k))) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (nptr == proc->node) {
|
|
|
|
/* maintain accounting */
|
|
|
|
OBJ_RELEASE(nptr);
|
|
|
|
opal_pointer_array_set_item(map->nodes, k, NULL);
|
|
|
|
map->num_nodes--;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-05-01 20:41:35 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-04-29 04:10:01 +04:00
|
|
|
OBJ_RELEASE(proc->node);
|
|
|
|
}
|
2012-05-03 01:00:22 +04:00
|
|
|
/* see if this node is already in the map */
|
|
|
|
found = false;
|
|
|
|
for (j=0; j < map->nodes->size; j++) {
|
|
|
|
if (NULL == (nptr = (orte_node_t*)opal_pointer_array_get_item(map->nodes, j))) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (nptr == node) {
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found) {
|
|
|
|
opal_pointer_array_add(map->nodes, node);
|
|
|
|
map->num_nodes++;
|
|
|
|
}
|
2012-05-01 20:41:35 +04:00
|
|
|
/* add the node to the proc */
|
2012-04-29 04:10:01 +04:00
|
|
|
OBJ_RETAIN(node);
|
|
|
|
proc->node = node;
|
2012-05-01 20:41:35 +04:00
|
|
|
/* add the proc to the node */
|
|
|
|
OBJ_RETAIN(proc);
|
|
|
|
opal_pointer_array_add(node->procs, proc);
|
|
|
|
/* update proc values */
|
2012-04-29 04:10:01 +04:00
|
|
|
proc->local_rank = local_rank[i];
|
|
|
|
proc->node_rank = node_rank[i];
|
|
|
|
proc->app_idx = app_idx[i];
|
|
|
|
proc->restarts = restarts[i];
|
|
|
|
proc->state = states[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
/* release data */
|
|
|
|
free(nodes);
|
|
|
|
nodes = NULL;
|
|
|
|
free(local_rank);
|
|
|
|
local_rank = NULL;
|
|
|
|
free(node_rank);
|
|
|
|
node_rank = NULL;
|
|
|
|
#if OPAL_HAVE_HWLOC
|
|
|
|
free(bind_idx);
|
|
|
|
bind_idx = NULL;
|
|
|
|
#endif
|
|
|
|
free(states);
|
|
|
|
states = NULL;
|
|
|
|
free(app_idx);
|
|
|
|
app_idx = NULL;
|
|
|
|
free(restarts);
|
|
|
|
restarts = NULL;
|
|
|
|
/* setup for next cycle */
|
|
|
|
n = 1;
|
|
|
|
}
|
|
|
|
if (ORTE_ERR_UNPACK_READ_PAST_END_OF_BUFFER == rc) {
|
|
|
|
rc = ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (NULL != nodes) {
|
|
|
|
free(nodes);
|
|
|
|
}
|
|
|
|
if (NULL != local_rank) {
|
|
|
|
free(local_rank);
|
|
|
|
}
|
|
|
|
if (NULL != node_rank) {
|
|
|
|
free(node_rank);
|
|
|
|
}
|
|
|
|
#if OPAL_HAVE_HWLOC
|
|
|
|
if (NULL != bind_idx) {
|
|
|
|
free(bind_idx);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if (NULL != states) {
|
|
|
|
free(states);
|
|
|
|
}
|
|
|
|
if (NULL != app_idx) {
|
|
|
|
free(app_idx);
|
|
|
|
}
|
|
|
|
if (NULL != restarts) {
|
|
|
|
free(restarts);
|
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&buf);
|
|
|
|
return rc;
|
|
|
|
}
|