
This change contains a non-mandatory modification of the MPI-RTE interface. Anyone wishing to support coprocessors such as the Xeon Phi may wish to add the required definition and underlying support **************************************************************** Add locality support for coprocessors such as the Intel Xeon Phi. Detecting that we are on a coprocessor inside of a host node isn't straightforward. There are no good "hooks" provided for programmatically detecting that "we are on a coprocessor running its own OS", and the ORTE daemon just thinks it is on another node. However, in order to properly use the Phi's public interface for MPI transport, it is necessary that the daemon detect that it is colocated with procs on the host. So we have to split the locality to separately record "on the same host" vs "on the same board". We already have the board-level locality flag, but not quite enough flexibility to handle this use-case. Thus, do the following: 1. add OPAL_PROC_ON_HOST flag to indicate we share a host, but not necessarily the same board 2. modify OPAL_PROC_ON_NODE to indicate we share both a host AND the same board. Note that we have to modify the OPAL_PROC_ON_LOCAL_NODE macro to explicitly check both conditions 3. add support in opal/mca/hwloc/base/hwloc_base_util.c for the host to check for coprocessors, and for daemons to check to see if they are on a coprocessor. The former is done via hwloc, but support for the latter is not yet provided by hwloc. So the code for detecting we are on a coprocessor currently is Xeon Phi specific - hopefully, we will find more generic methods in the future. 4. modify the orted and the hnp startup so they check for coprocessors and to see if they are on a coprocessor, and have the orteds pass that info back in their callback message. Automatically detect that coprocessors have been found and identify which coprocessors are on which hosts. Note that this algo isn't scalable at the moment - this will hopefully be improved over time. 5. modify the ompi proc locality detection function to look for coprocessor host info IF the OMPI_RTE_HOST_ID database key has been defined. RTE's that choose not to provide this support do not have to do anything - the associated code will simply be ignored. 6. include some cleanup of the hwloc open/close code so it conforms to how we did things in other frameworks (e.g., having a single "frame" file instead of open/close). Also, fix the locality flags - e.g., being on the same node means you must also be on the same cluster/cu, so ensure those flags are also set. cmr:v1.7.4:reviewer=hjelmn This commit was SVN r29435.
126 строки
5.0 KiB
C
126 строки
5.0 KiB
C
/*
|
|
* Copyright (c) 2012-2013 Los Alamos National Security, LLC.
|
|
* All rights reserved.
|
|
* Copyright (c) 2013 Intel, Inc. All rights reserved
|
|
*
|
|
* $COPYRIGHT$
|
|
*
|
|
* Additional copyrights may follow
|
|
*
|
|
* $HEADER$
|
|
*
|
|
* When this component is used, this file is included in the rest of
|
|
* the OPAL/ORTE/OMPI code base via ompi/mca/rte/rte.h. As such,
|
|
* this header represents the public interface to this static component.
|
|
*/
|
|
|
|
#ifndef MCA_OMPI_RTE_ORTE_H
|
|
#define MCA_OMPI_RTE_ORTE_H
|
|
|
|
#include "ompi_config.h"
|
|
#include "ompi/constants.h"
|
|
|
|
#include "ompi/info/info.h"
|
|
|
|
#include "orte/types.h"
|
|
#include "orte/mca/errmgr/errmgr.h"
|
|
#include "orte/mca/grpcomm/grpcomm.h"
|
|
#include "orte/mca/rml/base/rml_contact.h"
|
|
#include "orte/mca/rml/rml.h"
|
|
#include "orte/mca/routed/routed.h"
|
|
#include "orte/runtime/orte_data_server.h"
|
|
#include "orte/runtime/runtime.h"
|
|
#include "orte/util/name_fns.h"
|
|
#include "orte/util/proc_info.h"
|
|
|
|
BEGIN_C_DECLS
|
|
|
|
/* Process name objects and operations */
|
|
typedef orte_process_name_t ompi_process_name_t;
|
|
typedef orte_jobid_t ompi_jobid_t;
|
|
typedef orte_vpid_t ompi_vpid_t;
|
|
typedef orte_ns_cmp_bitmask_t ompi_rte_cmp_bitmask_t;
|
|
#define OMPI_PROC_MY_NAME ORTE_PROC_MY_NAME
|
|
#define OMPI_NAME_PRINT(a) ORTE_NAME_PRINT(a)
|
|
#define ompi_rte_compare_name_fields(a, b, c) orte_util_compare_name_fields(a, b, c)
|
|
#define OMPI_NAME_WILDCARD ORTE_NAME_WILDCARD
|
|
#define OMPI_NODE_RANK_INVALID ORTE_NODE_RANK_INVALID
|
|
#define OMPI_LOCAL_RANK_INVALID ORTE_LOCAL_RANK_INVALID
|
|
#define OMPI_RTE_CMP_JOBID ORTE_NS_CMP_JOBID
|
|
#define OMPI_RTE_CMP_VPID ORTE_NS_CMP_VPID
|
|
#define OMPI_RTE_CMP_ALL ORTE_NS_CMP_ALL
|
|
#define ompi_rte_hash_name(a) orte_util_hash_name(a)
|
|
/* This is the DSS tag to serialize a proc name */
|
|
#define OMPI_NAME ORTE_NAME
|
|
#define OMPI_PROCESS_NAME_HTON ORTE_PROCESS_NAME_HTON
|
|
#define OMPI_PROCESS_NAME_NTOH ORTE_PROCESS_NAME_NTOH
|
|
#define OMPI_RTE_NODE_ID ORTE_DB_DAEMON_VPID
|
|
#define OMPI_RTE_MY_NODEID ORTE_PROC_MY_DAEMON->vpid
|
|
#define OMPI_RTE_HOST_ID ORTE_DB_HOSTID
|
|
|
|
/* Collective objects and operations */
|
|
#define ompi_rte_collective_t orte_grpcomm_collective_t
|
|
typedef orte_grpcomm_coll_id_t ompi_rte_collective_id_t;
|
|
#define ompi_rte_modex(a) orte_grpcomm.modex(a)
|
|
#define ompi_rte_barrier(a) orte_grpcomm.barrier(a)
|
|
|
|
/* Process info struct and values */
|
|
typedef orte_node_rank_t ompi_node_rank_t;
|
|
typedef orte_local_rank_t ompi_local_rank_t;
|
|
#define ompi_process_info orte_process_info
|
|
#define ompi_rte_proc_is_bound orte_proc_is_bound
|
|
|
|
/* Error handling objects and operations */
|
|
OMPI_DECLSPEC void ompi_rte_abort(int error_code, char *fmt, ...);
|
|
#define ompi_rte_abort_peers(a, b, c) orte_errmgr.abort_peers(a, b, c)
|
|
#define OMPI_RTE_ERRHANDLER_FIRST ORTE_ERRMGR_CALLBACK_FIRST
|
|
#define OMPI_RTE_ERRHANDLER_LAST ORTE_ERRMGR_CALLBACK_LAST
|
|
#define OMPI_RTE_ERRHANDLER_PREPEND ORTE_ERRMGR_CALLBACK_PREPEND
|
|
#define OMPI_RTE_ERRHANDLER_APPEND ORTE_ERRMGR_CALLBACK_APPEND
|
|
typedef orte_error_t ompi_rte_error_report_t;
|
|
#define ompi_rte_register_errhandler(a, b) orte_errmgr.register_error_callback(a, b)
|
|
#define OMPI_ERROR_LOG ORTE_ERROR_LOG
|
|
|
|
/* Init and finalize objects and operations */
|
|
#define ompi_rte_init(a, b) orte_init(a, b, ORTE_PROC_MPI)
|
|
#define ompi_rte_finalize() orte_finalize()
|
|
OMPI_DECLSPEC void ompi_rte_wait_for_debugger(void);
|
|
|
|
/* Database operations */
|
|
OMPI_DECLSPEC int ompi_rte_db_store(const ompi_process_name_t *nm, const char* key,
|
|
const void *data, opal_data_type_t type);
|
|
OMPI_DECLSPEC int ompi_rte_db_fetch(const ompi_process_name_t *nm,
|
|
const char *key,
|
|
void **data, opal_data_type_t type);
|
|
OMPI_DECLSPEC int ompi_rte_db_fetch_pointer(const ompi_process_name_t *nm,
|
|
const char *key,
|
|
void **data, opal_data_type_t type);
|
|
OMPI_DECLSPEC int ompi_rte_db_fetch_multiple(const ompi_process_name_t *nm,
|
|
const char *key,
|
|
opal_list_t *kvs);
|
|
OMPI_DECLSPEC int ompi_rte_db_remove(const ompi_process_name_t *nm,
|
|
const char *key);
|
|
#define OMPI_DB_HOSTNAME ORTE_DB_HOSTNAME
|
|
#define OMPI_DB_LOCALITY ORTE_DB_LOCALITY
|
|
|
|
/* Communications */
|
|
typedef orte_rml_tag_t ompi_rml_tag_t;
|
|
#define ompi_rte_send_buffer_nb(a, b, c, d, e) orte_rml.send_buffer_nb(a, b, c, d, e)
|
|
#define ompi_rte_recv_buffer_nb(a, b, c, d, e) orte_rml.recv_buffer_nb(a, b, c, d, e)
|
|
#define ompi_rte_recv_cancel(a, b) orte_rml.recv_cancel(a, b)
|
|
#define ompi_rte_parse_uris(a, b, c) orte_rml_base_parse_uris(a, b, c)
|
|
#define ompi_rte_send_cbfunc orte_rml_send_callback
|
|
|
|
/* Communication tags */
|
|
/* carry over the INVALID def */
|
|
#define OMPI_RML_TAG_INVALID ORTE_RML_TAG_INVALID
|
|
/* define a starting point to avoid conflicts */
|
|
#define OMPI_RML_TAG_BASE ORTE_RML_TAG_MAX
|
|
|
|
#define OMPI_RML_PERSISTENT ORTE_RML_PERSISTENT
|
|
#define OMPI_RML_NON_PERSISTENT ORTE_RML_NON_PERSISTENT
|
|
|
|
END_C_DECLS
|
|
|
|
#endif /* MCA_OMPI_RTE_ORTE_H */
|