1
1
openmpi/orte/util/proc_info.h
Ralph Castain 6310361532 At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement

The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation.

In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions:

1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior.

2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation.

3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so.

As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes.

This commit was SVN r25476.
2011-11-15 03:40:11 +00:00

164 строки
6.4 KiB
C

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
/** @file:
*
* Populates global structure with process-specific information.
*
*
*/
#ifndef _ORTE_PROC_INFO_H_
#define _ORTE_PROC_INFO_H_
#include "orte_config.h"
#ifdef HAVE_STDINT_H
#include <stdint.h>
#endif
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#include "orte/types.h"
#include "opal/dss/dss_types.h"
#include "opal/mca/hwloc/hwloc.h"
BEGIN_C_DECLS
#define ORTE_MAX_HOSTNAME_SIZE 512
typedef uint32_t orte_proc_type_t;
#define ORTE_PROC_TYPE_NONE 0x0000
#define ORTE_PROC_SINGLETON 0x0001
#define ORTE_PROC_DAEMON 0x0002
#define ORTE_PROC_HNP 0x0004
#define ORTE_PROC_TOOL 0x0008
#define ORTE_PROC_NON_MPI 0x0010
#define ORTE_PROC_MPI 0x0020
#define ORTE_PROC_APP 0x0030
#define ORTE_PROC_CM 0x0040
#define ORTE_PROC_IOF_ENDPT 0x1000
#define ORTE_PROC_SCHEDULER 0x2000
#define ORTE_PROC_IS_SINGLETON (ORTE_PROC_SINGLETON & orte_process_info.proc_type)
#define ORTE_PROC_IS_DAEMON (ORTE_PROC_DAEMON & orte_process_info.proc_type)
#define ORTE_PROC_IS_HNP (ORTE_PROC_HNP & orte_process_info.proc_type)
#define ORTE_PROC_IS_TOOL (ORTE_PROC_TOOL & orte_process_info.proc_type)
#define ORTE_PROC_IS_NON_MPI (ORTE_PROC_NON_MPI & orte_process_info.proc_type)
#define ORTE_PROC_IS_MPI (ORTE_PROC_MPI & orte_process_info.proc_type)
#define ORTE_PROC_IS_APP (ORTE_PROC_APP & orte_process_info.proc_type)
#define ORTE_PROC_IS_CM (ORTE_PROC_CM & orte_process_info.proc_type)
#define ORTE_PROC_IS_IOF_ENDPT (ORTE_PROC_IOF_ENDPT & orte_process_info.proc_type)
#define ORTE_PROC_IS_SCHEDULER (ORTE_PROC_SCHEDULER & orte_process_info.proc_type)
/**
* Process information structure
*
* The orte_proc_info() function fills the pid field and obtains the
* process name, storing that information in the global structure. The
* structure also holds path names to the universe, job, and process
* session directories, and to the stdin, stdout, and stderr temp
* files - however, these are all initialized elsewhere.
*/
struct orte_proc_info_t {
orte_process_name_t my_name; /**< My official process name */
orte_process_name_t my_daemon; /**< Name of my local daemon */
char *my_daemon_uri; /**< Contact info to local daemon */
orte_process_name_t my_hnp; /**< Name of my hnp */
char *my_hnp_uri; /**< Contact info for my hnp */
orte_process_name_t my_parent; /**< Name of my parent (or my HNP if no parent was specified) */
pid_t hnp_pid; /**< hnp pid - used if singleton */
orte_app_idx_t app_num; /**< our index into the app_context array */
orte_vpid_t num_procs; /**< number of processes in this job */
orte_vpid_t max_procs; /**< Maximum number of processes ever in the job */
orte_vpid_t num_daemons; /**< number of daemons in system */
int num_nodes; /**< number of nodes in the job */
char *nodename; /**< string name for this node */
pid_t pid; /**< Local process ID for this process */
orte_proc_type_t proc_type; /**< Type of process */
opal_buffer_t *sync_buf; /**< buffer to store sync response */
uint16_t my_port; /**< TCP port for out-of-band comm */
int32_t num_restarts; /**< number of times this proc has restarted */
orte_node_rank_t my_node_rank; /**< node rank */
/* The session directory has the form
* <prefix>/<openmpi-sessions-user>/<jobid>/<procid>, where the prefix
* can either be provided by the user via the
* --tmpdir command-line flag, the use of one of several
* environmental variables, or else a default location.
*/
char *tmpdir_base; /**< Base directory of the session dir tree */
char *top_session_dir; /**< Top-most directory of the session tree */
char *job_session_dir; /**< Session directory for job */
char *proc_session_dir; /**< Session directory for the process */
char *sock_stdin; /**< Path name to temp file for stdin. */
char *sock_stdout; /**< Path name to temp file for stdout. */
char *sock_stderr; /**< Path name to temp file for stderr. */
#if OPAL_HAVE_HWLOC
opal_hwloc_level_t bind_level;
unsigned int bind_idx;
#endif
/* name/instance info for debug support */
char *job_name;
char *job_instance;
char *executable;
int32_t app_rank;
};
typedef struct orte_proc_info_t orte_proc_info_t;
/**
*
* Global process info descriptor. Initialized to almost no
* meaningful information - data is provided by calling \c
* orte_rte_init() (which calls \c orte_proc_info() to fill in the
* structure).
*
* The exception to this rule is the \c orte_process_info.seed field,
* which will be initialized to \c false, but should be set to \c true
* before calling \c orte_rte_info() if the caller is a seed daemon.
*/
ORTE_DECLSPEC extern orte_proc_info_t orte_process_info;
/**
* \internal
*
* Global structure to store a wide range of information about the
* process. orte_proc_info populates a global variable with
* information about the process being executing. This function should
* be called only once, from orte_rte_init().
*
* @param None.
*
* @retval ORTE_SUCCESS Successfully initialized the various fields.
* @retval OMPI_ERROR Failed to initialize one or more fields.
*/
ORTE_DECLSPEC int orte_proc_info(void);
ORTE_DECLSPEC int orte_proc_info_finalize(void);
END_C_DECLS
#endif