1
1
openmpi/orte/util/proc_info.h
Ralph Castain aec5cd08bd Per the PMIx RFC:
WHAT:    Merge the PMIx branch into the devel repo, creating a new
               OPAL “lmix” framework to abstract PMI support for all RTEs.
               Replace the ORTE daemon-level collectives with a new PMIx
               server and update the ORTE grpcomm framework to support
               server-to-server collectives

WHY:      We’ve had problems dealing with variations in PMI implementations,
               and need to extend the existing PMI definitions to meet exascale
               requirements.

WHEN:   Mon, Aug 25

WHERE:  https://github.com/rhc54/ompi-svn-mirror.git

Several community members have been working on a refactoring of the current PMI support within OMPI. Although the APIs are common, Slurm and Cray implement a different range of capabilities, and package them differently. For example, Cray provides an integrated PMI-1/2 library, while Slurm separates the two and requires the user to specify the one to be used at runtime. In addition, several bugs in the Slurm implementations have caused problems requiring extra coding.

All this has led to a slew of #if’s in the PMI code and bugs when the corner-case logic for one implementation accidentally traps the other. Extending this support to other implementations would have increased this complexity to an unacceptable level.

Accordingly, we have:

* created a new OPAL “pmix” framework to abstract the PMI support, with separate components for Cray, Slurm PMI-1, and Slurm PMI-2 implementations.

* Replaced the current ORTE grpcomm daemon-based collective operation with an integrated PMIx server, and updated the grpcomm APIs to provide more flexible, multi-algorithm support for collective operations. At this time, only the xcast and allgather operations are supported.

* Replaced the current global collective id with a signature based on the names of the participating procs. The allows an unlimited number of collectives to be executed by any group of processes, subject to the requirement that only one collective can be active at a time for a unique combination of procs. Note that a proc can be involved in any number of simultaneous collectives - it is the specific combination of procs that is subject to the constraint

* removed the prior OMPI/OPAL modex code

* added new macros for executing modex send/recv to simplify use of the new APIs. The send macros allow the caller to specify whether or not the BTL supports async modex operations - if so, then the non-blocking “fence” operation is used, if the active PMIx component supports it. Otherwise, the default is a full blocking modex exchange as we currently perform.

* retained the current flag that directs us to use a blocking fence operation, but only to retrieve data upon demand

This commit was SVN r32570.
2014-08-21 18:56:47 +00:00

172 строки
7.1 KiB
C

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Los Alamos National Security, LLC.
* All rights reserved.
* Copyright (c) 2013-2014 Intel, Inc. All rights reserved
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
/** @file:
*
* Populates global structure with process-specific information.
*
*
*/
#ifndef _ORTE_PROC_INFO_H_
#define _ORTE_PROC_INFO_H_
#include "orte_config.h"
#ifdef HAVE_STDINT_H
#include <stdint.h>
#endif
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#include "orte/types.h"
#include "opal/dss/dss_types.h"
#include "opal/mca/hwloc/hwloc.h"
#include "opal/util/proc.h"
BEGIN_C_DECLS
#define ORTE_MAX_HOSTNAME_SIZE 512
typedef uint32_t orte_proc_type_t;
#define ORTE_PROC_TYPE_NONE 0x0000
#define ORTE_PROC_SINGLETON 0x0001
#define ORTE_PROC_DAEMON 0x0002
#define ORTE_PROC_HNP 0x0004
#define ORTE_PROC_TOOL 0x0008
#define ORTE_PROC_NON_MPI 0x0010
#define ORTE_PROC_MPI 0x0020
#define ORTE_PROC_APP 0x0030
#define ORTE_PROC_CM 0x0040
#define ORTE_PROC_AGGREGATOR 0x0080
#define ORTE_PROC_IOF_ENDPT 0x1000
#define ORTE_PROC_SCHEDULER 0x2000
#define ORTE_PROC_MASTER 0x4000
#define ORTE_PROC_IS_SINGLETON (ORTE_PROC_SINGLETON & orte_process_info.proc_type)
#define ORTE_PROC_IS_DAEMON (ORTE_PROC_DAEMON & orte_process_info.proc_type)
#define ORTE_PROC_IS_HNP (ORTE_PROC_HNP & orte_process_info.proc_type)
#define ORTE_PROC_IS_TOOL (ORTE_PROC_TOOL & orte_process_info.proc_type)
#define ORTE_PROC_IS_NON_MPI (ORTE_PROC_NON_MPI & orte_process_info.proc_type)
#define ORTE_PROC_IS_MPI (ORTE_PROC_MPI & orte_process_info.proc_type)
#define ORTE_PROC_IS_APP (ORTE_PROC_APP & orte_process_info.proc_type)
#define ORTE_PROC_IS_CM (ORTE_PROC_CM & orte_process_info.proc_type)
#define ORTE_PROC_IS_AGGREGATOR (ORTE_PROC_AGGREGATOR & orte_process_info.proc_type)
#define ORTE_PROC_IS_IOF_ENDPT (ORTE_PROC_IOF_ENDPT & orte_process_info.proc_type)
#define ORTE_PROC_IS_SCHEDULER (ORTE_PROC_SCHEDULER & orte_process_info.proc_type)
#define ORTE_PROC_IS_MASTER (ORTE_PROC_MASTER & orte_process_info.proc_type)
/**
* Process information structure
*
* The orte_proc_info() function fills the pid field and obtains the
* process name, storing that information in the global structure. The
* structure also holds path names to the universe, job, and process
* session directories, and to the stdin, stdout, and stderr temp
* files - however, these are all initialized elsewhere.
*/
struct orte_proc_info_t {
opal_proc_t super;
orte_process_name_t my_name; /**< My official process name */
orte_process_name_t my_daemon; /**< Name of my local daemon */
char *my_daemon_uri; /**< Contact info to local daemon */
orte_process_name_t my_hnp; /**< Name of my hnp */
char *my_hnp_uri; /**< Contact info for my hnp */
orte_process_name_t my_parent; /**< Name of my parent (or my HNP if no parent was specified) */
orte_process_name_t my_scheduler; /**< name of the scheduler for this system */
pid_t hnp_pid; /**< hnp pid - used if singleton */
orte_app_idx_t app_num; /**< our index into the app_context array */
orte_vpid_t num_procs; /**< number of processes in this job */
orte_vpid_t max_procs; /**< Maximum number of processes ever in the job */
orte_vpid_t num_daemons; /**< number of daemons in system */
int num_nodes; /**< number of nodes in the job */
char *nodename; /**< string name for this node */
pid_t pid; /**< Local process ID for this process */
orte_proc_type_t proc_type; /**< Type of process */
opal_buffer_t *sync_buf; /**< buffer to store sync response */
uint16_t my_port; /**< TCP port for out-of-band comm */
int num_restarts; /**< number of times this proc has restarted */
orte_node_rank_t my_node_rank; /**< node rank */
orte_local_rank_t my_local_rank; /**< local rank */
int32_t num_local_peers; /**< number of procs from my job that share my node with me */
/* The session directory has the form
* <prefix>/<openmpi-sessions-user>/<jobid>/<procid>, where the prefix
* can either be provided by the user via the
* --tmpdir command-line flag, the use of one of several
* environmental variables, or else a default location.
*/
char *tmpdir_base; /**< Base directory of the session dir tree */
char *top_session_dir; /**< Top-most directory of the session tree */
char *job_session_dir; /**< Session directory for job */
char *proc_session_dir; /**< Session directory for the process */
char *sock_stdin; /**< Path name to temp file for stdin. */
char *sock_stdout; /**< Path name to temp file for stdout. */
char *sock_stderr; /**< Path name to temp file for stderr. */
#if OPAL_HAVE_HWLOC
char *cpuset; /**< String-representation of bitmap where we are bound */
#endif
int app_rank; /**< rank within my app_context */
orte_vpid_t my_hostid; /** identifies the local host for a coprocessor */
};
typedef struct orte_proc_info_t orte_proc_info_t;
/**
*
* Global process info descriptor. Initialized to almost no
* meaningful information - data is provided by calling \c
* orte_rte_init() (which calls \c orte_proc_info() to fill in the
* structure).
*
* The exception to this rule is the \c orte_process_info.seed field,
* which will be initialized to \c false, but should be set to \c true
* before calling \c orte_rte_info() if the caller is a seed daemon.
*/
ORTE_DECLSPEC extern orte_proc_info_t orte_process_info;
/**
* \internal
*
* Global structure to store a wide range of information about the
* process. orte_proc_info populates a global variable with
* information about the process being executing. This function should
* be called only once, from orte_rte_init().
*
* @param None.
*
* @retval ORTE_SUCCESS Successfully initialized the various fields.
* @retval OMPI_ERROR Failed to initialize one or more fields.
*/
ORTE_DECLSPEC int orte_proc_info(void);
ORTE_DECLSPEC int orte_proc_info_finalize(void);
END_C_DECLS
#endif