1
1
http://www.open-mpi.org/community/lists/devel/2014/05/14838.php

Remove stale component

This commit was SVN r31917.
Этот коммит содержится в:
Ralph Castain 2014-06-01 17:03:03 +00:00
родитель 8736a1c138
Коммит 9305756276
10 изменённых файлов: 0 добавлений и 1616 удалений

Просмотреть файл

@ -1,45 +0,0 @@
#
# Copyright (c) 2011 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
AM_CPPFLAGS = $(rte_pmi_CPPFLAGS)
# Make the output library in this directory, and name it either
# mca_<type>_<name>.la (for DSO builds) or libmca_<type>_<name>.la
# (for static builds).
if MCA_BUILD_ompi_rte_pmi_DSO
component_noinst =
component_install = mca_rte_pmi.la
else
component_noinst = libmca_rte_pmi.la
component_install =
endif
local_sources = \
rte_pmi.h \
rte_pmi_internal.h \
rte_pmi_coll.c \
rte_pmi_component.c \
rte_pmi_name.c \
rte_pmi_error.c \
rte_pmi_db.c \
rte_pmi_comm.c
mcacomponentdir = $(ompilibdir)
mcacomponent_LTLIBRARIES = $(component_install)
mca_rte_pmi_la_SOURCES = $(local_sources)
mca_rte_pmi_la_LDFLAGS = -module -avoid-version $(rte_pmi_LDFLAGS)
mca_rte_pmi_la_LIBADD = $(rte_pmi_LIBS) \
$(OMPI_TOP_BUILDDIR)/orte/mca/common/pmi/libmca_common_pmi.la
noinst_LTLIBRARIES = $(component_noinst)
libmca_rte_pmi_la_SOURCES = $(local_sources)
libmca_rte_pmi_la_LIBADD = $(rte_pmi_LIBS)
libmca_rte_pmi_la_LDFLAGS = -module -avoid-version $(rte_pmi_LDFLAGS)

Просмотреть файл

@ -1,43 +0,0 @@
# -*- shell-script -*-
#
# Copyright (c) 2011 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2013 Sandia National Laboratories. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
# set our priority to be low (want to be after ORTE)
AC_DEFUN([MCA_ompi_rte_pmi_PRIORITY], [1])
# Force this component to compile in static-only mode
AC_DEFUN([MCA_ompi_rte_pmi_COMPILE_MODE], [
AC_MSG_CHECKING([for MCA component $2:$3 compile mode])
$4="static"
AC_MSG_RESULT([$$4])
])
# If component was selected, $1 will be 1 and we should set the base header
AC_DEFUN([MCA_ompi_rte_pmi_POST_CONFIG],[
AS_IF([test "$1" = "1"], [ompi_rte_base_include="pmi/rte_pmi.h"])
])dnl
#
# MCA_ompi_rte_pmi_CONFIG([action-if-found], [action-if-not-found])
# -----------------------------------------------------------
AC_DEFUN([MCA_ompi_rte_pmi_CONFIG], [
AC_CONFIG_FILES([ompi/mca/rte/pmi/Makefile])
OPAL_CHECK_PMI([rte_pmi], [rte_pmi_good=1], [rte_pmi_good=0])
# Evaluate succeed / fail
AS_IF([test "$rte_pmi_good" = 1],
[$1],
[$2])
# set build flags to use in makefile
AC_SUBST([rte_pmi_CPPFLAGS])
AC_SUBST([rte_pmi_LDFLAGS])
AC_SUBST([rte_pmi_LIBS])
])

Просмотреть файл

@ -1,187 +0,0 @@
/*
* Copyright (c) 2012-2013 Los Alamos National Security, LLC.
* All rights reserved.
* Copyright (c) 2013 Sandia National Laboratories. All rights reserved.
* Copyright (c) 2014 Intel, Inc. All rights reserved
*
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
*/
#ifndef MCA_OMPI_RTE_PMI_H
#define MCA_OMPI_RTE_PMI_H
#include "opal/dss/dss_types.h"
#include "opal/class/opal_pointer_array.h"
struct ompi_proc_t;
BEGIN_C_DECLS
/* Process name objects and operations */
typedef uint32_t ompi_jobid_t;
typedef uint32_t ompi_vpid_t;
struct ompi_process_name_t {
ompi_jobid_t jobid;
ompi_vpid_t vpid;
};
typedef struct ompi_process_name_t ompi_process_name_t;
#define OMPI_PROCESS_NAME_HTON(n) \
do { \
n.jobid = htonl(n.jobid); \
n.vpid = htonl(n.vpid); \
} while (0)
#define OMPI_PROCESS_NAME_NTOH(n) \
do { \
n.jobid = ntohl(n.jobid); \
n.vpid = ntohl(n.vpid); \
} while (0)
typedef int ompi_rte_cmp_bitmask_t;
OMPI_DECLSPEC extern ompi_process_name_t ompi_rte_my_process_name;
#define OMPI_PROC_MY_NAME (&ompi_rte_my_process_name)
OMPI_DECLSPEC char* ompi_rte_print_process_name(const ompi_process_name_t *name);
#define OMPI_NAME_PRINT(a) ompi_rte_print_process_name(a)
OMPI_DECLSPEC int ompi_rte_compare_name_fields(ompi_rte_cmp_bitmask_t fields,
const ompi_process_name_t *name1,
const ompi_process_name_t *name2);
#define OMPI_RTE_CMP_JOBID 0x01
#define OMPI_RTE_CMP_VPID 0x02
#define OMPI_RTE_CMP_ALL 0x03
OMPI_DECLSPEC extern ompi_process_name_t ompi_rte_wildcard_process_name;
#define OMPI_NAME_WILDCARD (&ompi_rte_wildcard_process_name)
OMPI_DECLSPEC uint64_t ompi_rte_hash_name(const ompi_process_name_t *name);
#define OMPI_NAME (OPAL_DSS_ID_DYNAMIC + 25)
/* Collective objects and operations */
struct ompi_rte_collective_t {
opal_object_t super;
int id;
bool active;
};
typedef struct ompi_rte_collective_t ompi_rte_collective_t;
OBJ_CLASS_DECLARATION(ompi_rte_collective_t);
OMPI_DECLSPEC int ompi_rte_modex(ompi_rte_collective_t *coll);
OMPI_DECLSPEC int ompi_rte_barrier(ompi_rte_collective_t *coll);
/* Process info struct and values */
typedef int ompi_node_rank_t;
typedef int ompi_local_rank_t;
struct ompi_process_info_t {
int app_num;
pid_t pid;
ompi_vpid_t num_procs;
ompi_local_rank_t my_local_rank;
ompi_node_rank_t my_node_rank;
ompi_node_rank_t num_local_peers;
char *my_hnp_uri;
int peer_modex;
int peer_init_barrier;
int peer_fini_barrier;
char *job_session_dir;
char *proc_session_dir;
char nodename[100]; /* BWB: FIX ME: This really needs to be a rational constant */
char *cpuset;
};
typedef struct ompi_process_info_t ompi_process_info_t;
#define OMPI_LOCAL_RANK_INVALID (-1)
#define OMPI_NODE_RANK_INVALID (-1)
OMPI_DECLSPEC extern ompi_process_info_t ompi_process_info;
OMPI_DECLSPEC extern bool ompi_rte_proc_is_bound;
/* Error handling objects and operations */
OMPI_DECLSPEC void ompi_rte_abort(int error_code, char *fmt, ...);
OMPI_DECLSPEC int ompi_rte_abort_peers(ompi_process_name_t *procs, size_t nprocs, int status);
OMPI_DECLSPEC int ompi_rte_error_log(const char *file, int line,
const char *func, int ret);
#define OMPI_ERROR_LOG(ret) ompi_rte_error_log(__FILE__, __LINE__, __func__, ret)
struct ompi_rte_error_report_t {
int errcode;
};
typedef struct ompi_rte_error_report_t ompi_rte_error_report_t;
#define OMPI_RTE_ERRHANDLER_LAST 0
void ompi_rte_register_errhandler(int (*)(opal_pointer_array_t*), int);
/* Init and finalize objects and operations */
OMPI_DECLSPEC int ompi_rte_init(int *argc, char ***argv);
OMPI_DECLSPEC int ompi_rte_finalize(void);
OMPI_DECLSPEC void ompi_rte_wait_for_debugger(void);
/* Database operations */
struct opal_buffer_t;
OMPI_DECLSPEC int ompi_rte_db_store(const ompi_process_name_t *proc,
const char *key,
const void *data,
opal_data_type_t type);
OMPI_DECLSPEC int ompi_rte_db_fetch(const struct ompi_proc_t *proc,
const char *key,
void **data,
opal_data_type_t type);
OMPI_DECLSPEC int ompi_rte_db_fetch_pointer(const struct ompi_proc_t *proc,
const char *key,
void **data,
opal_data_type_t type);
#define OMPI_DB_HOSTNAME "ompi.hostname"
#define OMPI_DB_LOCALITY "ompi.locality"
/* Communications */
typedef int ompi_rml_tag_t;
OMPI_DECLSPEC void ompi_rte_send_cbfunc(int, ompi_process_name_t*,
opal_buffer_t*, ompi_rml_tag_t,
void*);
OMPI_DECLSPEC int ompi_rte_send_buffer_nb(const ompi_process_name_t *peer,
struct opal_buffer_t *buffer,
ompi_rml_tag_t tag,
void (*cbfunc)(int, ompi_process_name_t*,
opal_buffer_t*, ompi_rml_tag_t,
void*),
void *cbdata);
OMPI_DECLSPEC int ompi_rte_recv_buffer_nb(const ompi_process_name_t *peer,
ompi_rml_tag_t tag,
int flags,
void (*cbfunc)(int, ompi_process_name_t*,
opal_buffer_t*, ompi_rml_tag_t,
void*),
void *cbdata);
OMPI_DECLSPEC int ompi_rte_recv_cancel(const ompi_process_name_t *peer,
ompi_rml_tag_t tag);
OMPI_DECLSPEC int ompi_rte_parse_uris(const char* contact_info,
ompi_process_name_t *peer,
char ***uris);
/* Communication tags */
/* carry over the INVALID def */
#define OMPI_RML_TAG_INVALID -1
/* define a starting point to avoid conflicts */
#define OMPI_RML_TAG_BASE 0
#define OMPI_RML_PERSISTENT true
#define OMPI_RML_NON_PERSISTENT false
/* BWB: FIX ME: THis is not the right way to do this... */
#define ORTE_ERR_NO_MATCH_YET OMPI_ERROR
#define OMPI_RTE_NODE_ID "rte.nodeid"
#define OMPI_RTE_MY_NODEID 0
END_C_DECLS
#endif /* MCA_OMPI_RTE_PMI_H */

Просмотреть файл

@ -1,62 +0,0 @@
/*
* Copyright (c) 2013 Sandia National Laboratories. All rights reserved.
*
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
*/
#include "ompi_config.h"
#include <stdio.h>
#include "opal/mca/common/pmi/common_pmi.h"
#include "opal/threads/tsd.h"
#include "ompi/constants.h"
#include "ompi/mca/rte/rte.h"
#include "rte_pmi.h"
static void
coll_construct(ompi_rte_collective_t *coll)
{
coll->id = 0;
coll->active = false;
}
OBJ_CLASS_INSTANCE(ompi_rte_collective_t, opal_object_t, coll_construct, NULL);
int ompi_rte_modex(ompi_rte_collective_t *coll)
{
int len, ret;
char *kvs;
len = mca_common_pmi_kvslen();
kvs = malloc(len);
if (NULL == kvs) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
mca_common_pmi_kvsname(kvs, len);
return mca_common_pmi_commit(kvs);
}
int
ompi_rte_barrier(ompi_rte_collective_t *coll)
{
int ret;
ret = mca_common_pmi_barrier();
if (OPAL_SUCCESS != ret)
return OMPI_ERROR;
coll->active = false;
return OMPI_SUCCESS;
}

Просмотреть файл

@ -1,68 +0,0 @@
/*
* Copyright (c) 2013 Sandia National Laboratories. All rights reserved.
*
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
*/
#include "ompi_config.h"
#include "opal/threads/tsd.h"
#include "ompi/constants.h"
#include "ompi/mca/rte/rte.h"
#include "rte_pmi.h"
#include "rte_pmi_internal.h"
void ompi_rte_send_cbfunc(int status, ompi_process_name_t* sender,
opal_buffer_t* buf, ompi_rml_tag_t tag,
void* cbdata)
{
}
int
ompi_rte_send_buffer_nb(const ompi_process_name_t *peer,
struct opal_buffer_t *buffer,
ompi_rml_tag_t tag,
void (*cbfunc)(int, ompi_process_name_t*,
opal_buffer_t*, ompi_rml_tag_t,
void*),
void *cbdata)
{
return OMPI_ERR_NOT_SUPPORTED;
}
int
ompi_rte_recv_buffer_nb(const ompi_process_name_t *peer,
ompi_rml_tag_t tag,
int flags,
void (*cbfunc)(int, ompi_process_name_t*,
opal_buffer_t*, ompi_rml_tag_t,
void*),
void *cbdata)
{
return OMPI_ERR_NOT_SUPPORTED;
}
int
ompi_rte_recv_cancel(const ompi_process_name_t *peer,
ompi_rml_tag_t tag)
{
return OMPI_ERR_NOT_SUPPORTED;
}
int
ompi_rte_parse_uris(const char* contact_info,
ompi_process_name_t *peer,
char ***uris)
{
return OMPI_ERR_NOT_SUPPORTED;
}

Просмотреть файл

@ -1,213 +0,0 @@
/*
* Copyright (c) 2013 Sandia National Laboratories. All rights reserved.
*
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
*/
#include "ompi_config.h"
#include <sys/types.h>
#include <unistd.h>
#include <stdio.h>
#include "opal/runtime/opal_params.h"
#include "opal/mca/common/pmi/common_pmi.h"
#include "opal/mca/hwloc/base/base.h"
#include "opal/runtime/opal.h"
#include "ompi/constants.h"
#include "ompi/mca/rte/rte.h"
#include "ompi/debuggers/debuggers.h"
#include "rte_pmi.h"
#include "rte_pmi_internal.h"
/*
* Public string showing the component version number
*/
const char *ompi_rte_pmi_component_version_string =
"OMPI pmi rte MCA component version " OMPI_VERSION;
/*
* Instantiate the public struct with all of our public information
* and pointers to our public functions in it
*/
const ompi_rte_component_t mca_rte_pmi_component = {
{
OMPI_RTE_BASE_VERSION_1_0_0,
/* Component name and version */
"pmi",
OMPI_MAJOR_VERSION,
OMPI_MINOR_VERSION,
OMPI_RELEASE_VERSION,
/* Component open and close functions */
},
{
/* The component is checkpoint ready */
MCA_BASE_METADATA_PARAM_CHECKPOINT
}
};
ompi_process_info_t ompi_process_info;
bool ompi_rte_proc_is_bound = false;
int
ompi_rte_init(int *argc, char ***argv)
{
int tmp, i, rank, size, ret;
int *node_ranks;
char *node_info;
hwloc_obj_t root;
hwloc_cpuset_t boundset, rootset;
char *tmp_str, *error;
// Initialize PMI
int rc = mca_common_pmi_init (opal_pmi_version);
if ( OPAL_SUCCESS != rc ) {
return rc;
}
/* be kind, set line buffering */
setvbuf(stdout, NULL, _IONBF, 0);
ret = opal_init(argc, argv);
if (OMPI_SUCCESS != ret) {
return ret;
}
// Setup job name
tmp = mca_common_pmi_appnum();
ompi_rte_my_process_name.jobid = tmp;
ompi_process_info.app_num = ompi_rte_my_process_name.jobid;
ompi_process_info.pid = getpid();
// Setup rank information
rank = mca_common_pmi_rank();
ompi_rte_my_process_name.vpid = rank;
// Setup process groups size
size = mca_common_pmi_size();
ompi_process_info.num_procs = size;
rc = mca_common_pmi_local_info(rank, &node_ranks, &tmp, &error);
if( OPAL_SUCCESS != rc ){
// FIX ME: maybe we somehow should use error message to
// help user understand the reason of failure?
return rc;
}
ompi_process_info.num_local_peers = tmp;
for (i = 0 ; i < ompi_process_info.num_local_peers ; ++i) {
if (rank == node_ranks[i]) {
ompi_process_info.my_local_rank = i;
ompi_process_info.my_node_rank = i;
break;
}
}
ompi_process_info.my_hnp_uri = NULL;
ompi_process_info.peer_modex = 0;
ompi_process_info.peer_init_barrier = 0;
ompi_process_info.peer_fini_barrier = 0;
ompi_process_info.job_session_dir = NULL; /* BWB: FIX ME */
ompi_process_info.proc_session_dir = NULL; /* BWB: FIX ME */
gethostname(ompi_process_info.nodename, sizeof(ompi_process_info.nodename));
ompi_process_info.cpuset = NULL;
/* setup hwloc */
if (NULL == opal_hwloc_topology) {
if (OPAL_SUCCESS != (ret = opal_hwloc_base_get_topology())) {
return ret;
}
}
root = hwloc_get_root_obj(opal_hwloc_topology);
/* get our bindings */
rootset = opal_hwloc_base_get_available_cpus(opal_hwloc_topology, root);
boundset = hwloc_bitmap_alloc();
if (hwloc_get_cpubind(opal_hwloc_topology, boundset,
HWLOC_CPUBIND_PROCESS) >= 0) {
/* we are bound if the two cpusets are not equal, or if there
is only ONE PU available to us */
if (0 != hwloc_bitmap_compare(boundset, rootset) ||
opal_hwloc_base_single_cpu(rootset) ||
opal_hwloc_base_single_cpu(boundset)) {
hwloc_bitmap_list_asprintf(&ompi_process_info.cpuset, boundset);
ompi_rte_proc_is_bound = true;
}
}
hwloc_bitmap_free(boundset);
ret = ompi_rte_pmi_name_init();
if (OMPI_SUCCESS != ret) return ret;
ret = ompi_rte_pmi_db_init();
if (OMPI_SUCCESS != ret) return ret;
/* Fill in things the attributes want to know... */
tmp = mca_common_pmi_universe();
asprintf(&tmp_str, "%d", tmp);
setenv("OMPI_UNIVERSE_SIZE", tmp_str, 1);
free(tmp_str);
/* BWB: FIX ME: Why is info looking at this instead of ompi_process_info.num_procs? */
asprintf(&tmp_str, "%d", ompi_process_info.num_procs);
setenv("OMPI_MCA_orte_ess_num_procs", tmp_str, 1);
free(tmp_str);
if (NULL != (tmp_str = (char*)hwloc_obj_get_info_by_name(root, "CPUType"))) {
setenv("OMPI_MCA_orte_cpu_type", tmp_str, 1);
}
asprintf(&node_info, "%s,%d",
ompi_process_info.nodename,
ompi_process_info.my_local_rank);
ret = ompi_rte_db_store(OMPI_PROC_MY_NAME, OMPI_DB_RTE_INFO, node_info, OPAL_STRING);
if (OMPI_SUCCESS != ret) return ret;
free(node_info);
return OMPI_SUCCESS;
}
int
ompi_rte_finalize(void)
{
ompi_rte_pmi_db_fini();
ompi_rte_pmi_name_fini();
mca_common_pmi_finalize();
opal_finalize();
return OMPI_SUCCESS;
}
void
ompi_rte_wait_for_debugger(void)
{
if (1 != MPIR_being_debugged) {
return;
}
/* if we are being debugged, then we need to find
* the correct plug-ins
*/
ompi_debugger_setup_dlls();
/* spin until debugger attaches and releases us */
while (MPIR_debug_gate == 0) {
#if defined(HAVE_USLEEP)
usleep(100000); /* microseconds */
#else
sleep(1); /* seconds */
#endif
}
}

Просмотреть файл

@ -1,672 +0,0 @@
/*
* Copyright (c) 2013 Sandia National Laboratories. All rights reserved.
*
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
*/
#include "ompi_config.h"
#include <stdio.h>
#include "opal/mca/common/pmi/common_pmi.h"
#include "opal/util/argv.h"
#include "opal/util/output.h"
#include "opal/dss/dss.h"
#include "opal/threads/tsd.h"
#include "opal/mca/hwloc/hwloc.h"
#include "opal/mca/hwloc/base/base.h"
#include "ompi/constants.h"
#include "ompi/mca/rte/rte.h"
#include "ompi/mca/rte/base/base.h"
#include "ompi/proc/proc.h"
#include "rte_pmi.h"
#include "rte_pmi_internal.h"
#define OMPI_PMI_PAD 10
/* Local variables */
static char *pmi_kvs_name = NULL;
static int pmi_vallen_max = -1;
static int pmi_keylen_max = -1;
static opal_pointer_array_t local_data;
/* local data storage */
typedef struct {
opal_object_t super;
char *nodename;
ompi_node_rank_t node_rank;
} local_data_t;
static void ld_con(local_data_t *ptr)
{
ptr->nodename = NULL;
}
static void ld_des(local_data_t *ptr)
{
if (NULL != ptr->nodename) {
free(ptr->nodename);
}
}
OBJ_CLASS_INSTANCE(local_data_t,
opal_object_t,
ld_con, ld_des);
/* Because Cray uses PMI2 extensions for some, but not all,
* PMI functions, we define a set of wrappers for those
* common functions we will use
*/
static int kvs_put(const char *key, const char *value)
{
return mca_common_pmi_put(pmi_kvs_name, key, value);
}
static int kvs_get(const char *key, char *value, int valuelen)
{
return mca_common_pmi_get(pmi_kvs_name, key, value, valuelen);
}
static int setup_pmi(void)
{
int max_length, rc;
pmi_vallen_max = mca_common_pmi_vallen();
max_length = mca_common_pmi_kvslen();
pmi_keylen_max = mca_common_pmi_keylen();
pmi_kvs_name = (char*)malloc(max_length);
if (NULL == pmi_kvs_name) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
rc = mca_common_pmi_kvsname(pmi_kvs_name, max_length);
if( OPAL_SUCCESS != rc ){
return rc;
}
return OMPI_SUCCESS;
}
static char* setup_key(const ompi_process_name_t *name, const char *key)
{
char *pmi_kvs_key;
if (pmi_keylen_max <= asprintf(&pmi_kvs_key, "%s-%s",
OMPI_NAME_PRINT(name), key)) {
free(pmi_kvs_key);
return NULL;
}
return pmi_kvs_key;
}
static inline unsigned char pmi_base64_encsym (unsigned char value) {
assert (value < 64);
if (value < 26) {
return 'A' + value;
} else if (value < 52) {
return 'a' + (value - 26);
} else if (value < 62) {
return '0' + (value - 52);
}
return (62 == value) ? '+' : '/';
}
static inline unsigned char pmi_base64_decsym (unsigned char value) {
if ('+' == value) {
return 62;
} else if ('/' == value) {
return 63;
} else if (' ' == value) {
return 64;
} else if (value <= '9') {
return (value - '0') + 52;
} else if (value <= 'Z') {
return (value - 'A');
} else if (value <= 'z') {
return (value - 'a') + 26;
}
return 64;
}
static inline void pmi_base64_encode_block (unsigned char in[3], unsigned char out[4], int len) {
out[0] = pmi_base64_encsym (in[0] >> 2);
out[1] = pmi_base64_encsym (((in[0] & 0x03) << 4) | ((in[1] & 0xf0) >> 4));
/* Cray PMI doesn't allow = in PMI attributes so pad with spaces */
out[2] = 1 < len ? pmi_base64_encsym(((in[1] & 0x0f) << 2) | ((in[2] & 0xc0) >> 6)) : ' ';
out[3] = 2 < len ? pmi_base64_encsym(in[2] & 0x3f) : ' ';
}
static inline int pmi_base64_decode_block (unsigned char in[4], unsigned char out[3]) {
char in_dec[4];
in_dec[0] = pmi_base64_decsym (in[0]);
in_dec[1] = pmi_base64_decsym (in[1]);
in_dec[2] = pmi_base64_decsym (in[2]);
in_dec[3] = pmi_base64_decsym (in[3]);
out[0] = in_dec[0] << 2 | in_dec[1] >> 4;
if (64 == in_dec[2]) {
return 1;
}
out[1] = in_dec[1] << 4 | in_dec[2] >> 2;
if (64 == in_dec[3]) {
return 2;
}
out[2] = ((in_dec[2] << 6) & 0xc0) | in_dec[3];
return 3;
}
static int pmi_encode(char *outdata, const void *val, size_t vallen) {
unsigned char *tmp = (unsigned char*)outdata;
size_t i;
/* check for size */
if ((size_t)pmi_vallen_max < (2 + vallen * 4) / 3 + 1) {
return OMPI_ERR_BAD_PARAM;
}
for (i = 0 ; i < vallen ; i += 3, tmp += 4) {
pmi_base64_encode_block((unsigned char *) val + i, tmp, vallen - i);
}
tmp[0] = (unsigned char)'\0';
return OMPI_SUCCESS;
}
static uint8_t* pmi_decode (char *data, size_t *retlen) {
size_t input_len = strlen (data) / 4;
unsigned char *ret, *val;
int out_len;
size_t i;
ret = calloc (1, 3 * input_len + 1);
if (NULL == ret) {
return ret;
}
val = (unsigned char *) data;
for (i = 0, out_len = 0 ; i < input_len ; i++, val += 4) {
out_len += pmi_base64_decode_block(val, ret + 3 * i);
}
ret[out_len] = '\0';
*retlen = out_len;
return ret;
}
static char* fetch_string(const char *key)
{
char *tmp_val, *ptr, *tmpkey;
int i, nsections;
char *data;
/* create our sandbox */
tmp_val = (char*)malloc(pmi_vallen_max * sizeof(char));
/* the first section of the string has the original key, so fetch it */
if (OPAL_SUCCESS != kvs_get(key, tmp_val, pmi_vallen_max)) {
OMPI_ERROR_LOG(OMPI_ERR_NOT_FOUND);
free(tmp_val);
return NULL;
}
opal_output_verbose(5, ompi_rte_base_framework.framework_output,
"%s db:pmi:fetch_string: received key %s DATA %s",
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME),
key, tmp_val);
/* the data in this section was prepended with the number of sections
* required to hold the entire string - get it
*/
ptr = strchr(tmp_val, ':');
*ptr = '\0';
nsections = strtol(tmp_val, NULL, 10);
/* save the actual data */
ptr++;
data = strdup(ptr);
/* get any remaining sections */
for (i=1; i < nsections; i++) {
/* create the key */
asprintf(&tmpkey, "%s:%d", key, i);
/* fetch it */
if (OPAL_SUCCESS != kvs_get(tmpkey, tmp_val, pmi_vallen_max)) {
OMPI_ERROR_LOG(OMPI_ERR_NOT_FOUND);
free(tmp_val);
free(tmpkey);
free(data);
return NULL;
}
opal_output_verbose(5, ompi_rte_base_framework.framework_output,
"%s db:pmi:fetch_string: received key %s DATA %s",
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME),
tmpkey, tmp_val);
/* add it to our data */
asprintf(&ptr, "%s%s", data, tmp_val);
free(data);
data = ptr;
/* cleanup */
free(tmpkey);
}
/* cleanup */
free(tmp_val);
return data;
}
static local_data_t* fetch_rtedat(const ompi_process_name_t *proc)
{
local_data_t *pdat;
char *pmikey, **fields;
char *tmp_val;
/* see if we already fetched the data for this proc */
if (NULL != (pdat = (local_data_t*)opal_pointer_array_get_item(&local_data, proc->vpid))) {
return pdat;
}
/* nope - go get it and break it down */
if (NULL == (pmikey = setup_key(proc, OMPI_DB_RTE_INFO))) {
OMPI_ERROR_LOG(OMPI_ERR_BAD_PARAM);
return NULL;
}
if (NULL == (tmp_val = fetch_string(pmikey))) {
OMPI_ERROR_LOG(OMPI_ERR_NOT_FOUND);
return NULL;
}
/* split on commas */
fields = opal_argv_split(tmp_val, ',');
free(tmp_val);
/* sanity check */
if (2 != opal_argv_count(fields)) {
OMPI_ERROR_LOG(OMPI_ERR_BAD_PARAM);
return NULL;
}
/* setup the data object */
pdat = OBJ_NEW(local_data_t);
/* next is the hostname */
pdat->nodename = strdup(fields[0]);
/* node rank */
pdat->node_rank = strtoul(fields[1], NULL, 10);
/* insert into the right place */
opal_pointer_array_set_item(&local_data, proc->vpid, pdat);
/* cleanup */
opal_argv_free(fields);
return pdat;
}
int
ompi_rte_pmi_db_init(void)
{
int rc;
if (OMPI_SUCCESS != (rc = setup_pmi())) {
OMPI_ERROR_LOG(rc);
}
OBJ_CONSTRUCT(&local_data, opal_pointer_array_t);
opal_pointer_array_init(&local_data, 1, INT_MAX, 2);
return rc;
}
void
ompi_rte_pmi_db_fini(void)
{
int i;
local_data_t *pdat;
if (NULL != pmi_kvs_name) {
free(pmi_kvs_name);
pmi_kvs_name = NULL;
}
for (i=0; i < local_data.size; i++) {
if (NULL != (pdat = (local_data_t*)opal_pointer_array_get_item(&local_data, i))) {
OBJ_RELEASE(pdat);
}
}
OBJ_DESTRUCT(&local_data);
}
int
ompi_rte_db_store(const ompi_process_name_t *proc,
const char *key,
const void *data,
opal_data_type_t type)
{
int i, rc;
char *pmidata, *str;
int64_t i64;
uint64_t ui64;
opal_byte_object_t *bo;
char *pmikey, *tmpkey, *tmp, sav;
char **strdata=NULL;
opal_output_verbose(5, ompi_rte_base_framework.framework_output,
"%s db:pmi:store: storing key %s[%s] for proc %s",
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME),
key, opal_dss.lookup_data_type(type), OMPI_NAME_PRINT(proc));
if (NULL == (pmikey = setup_key(OMPI_PROC_MY_NAME, key))) {
OMPI_ERROR_LOG(OMPI_ERR_BAD_PARAM);
return OMPI_ERR_BAD_PARAM;
}
switch (type) {
case OPAL_STRING:
str = (char*)data;
while (pmi_vallen_max < (int)(OMPI_PMI_PAD + strlen(str))) {
/* the string is too long, so we need to break it into
* multiple sections
*/
tmp = str + pmi_vallen_max - OMPI_PMI_PAD;
sav = *tmp;
*tmp = '\0';
opal_argv_append_nosize(&strdata, str);
*tmp = sav;
str = tmp;
}
/* put whatever remains on the stack */
opal_argv_append_nosize(&strdata, str);
/* the first value we put uses the original key, but
* the data is prepended with the number of sections
* required to hold the entire string
*/
asprintf(&pmidata, "%d:%s", opal_argv_count(strdata), strdata[0]);
opal_output_verbose(5, ompi_rte_base_framework.framework_output,
"%s db:pmi:store: storing key %s data %s",
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME),
pmikey, pmidata);
if ( OPAL_SUCCESS != (rc = kvs_put(pmikey, pmidata))) {
free(pmidata);
free(pmikey);
opal_argv_free(strdata);
return rc;
}
free(pmidata);
/* for each remaining segment, augment the key with the index */
for (i=1; NULL != strdata[i]; i++) {
asprintf(&tmpkey, "%s:%d", pmikey, i);
opal_output_verbose(5, ompi_rte_base_framework.framework_output,
"%s db:pmi:store: storing key %s data %s",
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME),
pmikey, strdata[i]);
if (OPAL_SUCCESS != (rc = kvs_put(tmpkey, strdata[i]))) {
free(pmikey);
opal_argv_free(strdata);
return rc;
}
free(tmpkey);
}
free(pmikey);
opal_argv_free(strdata);
return OMPI_SUCCESS;
case OPAL_INT:
i64 = (int64_t)(*((int*)data));
asprintf(&pmidata, "%ld", (long)i64);
break;
case OPAL_INT32:
i64 = (int64_t)(*((int32_t*)data));
asprintf(&pmidata, "%ld", (long)i64);
break;
case OPAL_INT64:
i64 = (int64_t)(*((int*)data));
asprintf(&pmidata, "%ld", (long)i64);
break;
case OPAL_UINT64:
ui64 = *((uint64_t*)data);
asprintf(&pmidata, "%lu", (unsigned long)ui64);
break;
case OPAL_UINT32:
ui64 = (uint64_t)(*((uint32_t*)data));
asprintf(&pmidata, "%lu", (unsigned long)ui64);
break;
case OPAL_UINT16:
ui64 = (uint64_t)(*((uint16_t*)data));
asprintf(&pmidata, "%lu", (unsigned long)ui64);
break;
case OPAL_BYTE_OBJECT:
bo = (opal_byte_object_t*)data;
pmidata = (char*)malloc(pmi_vallen_max*sizeof(char));
if (OMPI_SUCCESS != (rc = pmi_encode(pmidata, bo->bytes, bo->size))) {
OMPI_ERROR_LOG(rc);
free(pmidata);
return rc;
}
break;
default:
OMPI_ERROR_LOG(OMPI_ERR_NOT_SUPPORTED);
return OMPI_ERR_NOT_SUPPORTED;
}
opal_output_verbose(5, ompi_rte_base_framework.framework_output,
"%s PUTTING KEY %s DATA %s",
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME),
pmikey, pmidata);
rc = kvs_put(pmikey, pmidata);
if (OPAL_SUCCESS != rc) {
return rc;
}
free(pmidata);
free(pmikey);
return OMPI_SUCCESS;
}
int
ompi_rte_db_fetch(const struct ompi_proc_t *pptr,
const char *key,
void **data,
opal_data_type_t type)
{
local_data_t *pdat;
opal_byte_object_t *boptr;
uint16_t ui16;
uint32_t ui32;
int ival;
unsigned int uival;
char *pmikey;
char tmp_val[1024];
opal_hwloc_locality_t locality;
size_t sval;
ompi_process_name_t *proc;
proc = &((ompi_proc_t*)pptr)->proc_name;
opal_output_verbose(5, ompi_rte_base_framework.framework_output,
"%s db:pmi:fetch: searching for key %s[%s] on proc %s",
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME),
(NULL == key) ? "NULL" : key,
opal_dss.lookup_data_type(type),
OMPI_NAME_PRINT(proc));
/* if the key is NULL, that is an error */
if (NULL == key) {
OMPI_ERROR_LOG(OMPI_ERR_BAD_PARAM);
return OMPI_ERR_BAD_PARAM;
}
/* a few keys are consolidated to reduce the number of entries being
* pushed to PMI. This is an unfortunate requirement when running at
* scale on a Cray as the default max number of keys is set too low.
* See the corresponding entry in orte/mca/grpcomm/pmi where the
* consolidation occurs.
*/
if (0 == strcmp(key, OMPI_DB_HOSTNAME)) {
if (NULL == (pdat = fetch_rtedat(proc))) {
return OMPI_ERR_NOT_FOUND;
}
*data = strdup(pdat->nodename);
return OMPI_SUCCESS;
} else if (0 == strcmp(key, OMPI_DB_NODERANK)) {
if (NULL == (pdat = fetch_rtedat(proc))) {
return OMPI_ERR_NOT_FOUND;
}
memcpy(*data, &pdat->node_rank, sizeof(ompi_node_rank_t));
return OMPI_SUCCESS;
}
/* if it is the locality key, then compute that value as it
* isn't something that gets pushed to PMI
*/
if (0 == strcmp(key, OMPI_DB_LOCALITY)) {
if (proc->jobid == OMPI_PROC_MY_NAME->jobid &&
proc->vpid == OMPI_PROC_MY_NAME->vpid) {
/* if this is for myself, then set locality to all */
locality = OPAL_PROC_ALL_LOCAL;
} else {
if (NULL == (pdat = fetch_rtedat(proc))) {
return OMPI_ERR_NOT_FOUND;
}
if (0 != strcmp(pdat->nodename, ompi_process_info.nodename)) {
/* this is on a different node, then mark as non-local */
locality = OPAL_PROC_NON_LOCAL;
} else {
/* BWB: FiX ME: Might want to say more than on node... */
locality = OPAL_PROC_ON_NODE;
}
}
memcpy(*data, &locality, sizeof(opal_hwloc_locality_t));
opal_output_verbose(5, ompi_rte_base_framework.framework_output,
"%s db:pmi:fetch: done searching for key %s[%s] on proc %s",
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME),
(NULL == key) ? "NULL" : key,
opal_dss.lookup_data_type(type),
OMPI_NAME_PRINT(proc));
return OMPI_SUCCESS;
}
/* setup the key */
if (NULL == (pmikey = setup_key(proc, key))) {
OMPI_ERROR_LOG(OMPI_ERR_BAD_PARAM);
return OMPI_ERR_BAD_PARAM;
}
/* if it isn't an RTE key, then check to see if they are looking for a string */
if (OPAL_STRING == type) {
/* might have been passed in multiple sections */
*data = fetch_string(pmikey);
free(pmikey);
return OMPI_SUCCESS;
}
/* otherwise, retrieve the pmi keyval */
if (NULL == (pmikey = setup_key(proc, key))) {
OMPI_ERROR_LOG(OMPI_ERR_BAD_PARAM);
return OMPI_ERR_BAD_PARAM;
}
if (OPAL_SUCCESS != kvs_get(pmikey, tmp_val, pmi_vallen_max)) {
OMPI_ERROR_LOG(OMPI_ERR_NOT_FOUND);
free(pmikey);
return OMPI_ERR_NOT_FOUND;
}
free(pmikey);
/* return the value according to the provided type */
switch (type) {
case OPAL_UINT32:
ui32 = (uint32_t)strtoul(tmp_val, NULL, 10);
memcpy(*data, &ui32, sizeof(uint32_t));
break;
case OPAL_UINT16:
ui16 = (uint16_t)strtoul(tmp_val, NULL, 10);
memcpy(*data, &ui16, sizeof(uint16_t));
break;
case OPAL_INT:
ival = (int)strtol(tmp_val, NULL, 10);
memcpy(*data, &ival, sizeof(int));
break;
case OPAL_UINT:
uival = (unsigned int)strtoul(tmp_val, NULL, 10);
memcpy(*data, &uival, sizeof(unsigned int));
break;
case OPAL_BYTE_OBJECT:
boptr = (opal_byte_object_t*)malloc(sizeof(opal_byte_object_t));
boptr->bytes = (uint8_t*)pmi_decode(tmp_val, &sval);
boptr->size = sval;
*data = boptr;
break;
default:
OMPI_ERROR_LOG(OMPI_ERR_NOT_SUPPORTED);
return OMPI_ERR_NOT_SUPPORTED;
}
opal_output_verbose(5, ompi_rte_base_framework.framework_output,
"%s db:pmi:fetch: done searching for key %s[%s] on proc %s",
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME),
(NULL == key) ? "NULL" : key,
opal_dss.lookup_data_type(type),
OMPI_NAME_PRINT(proc));
return OMPI_SUCCESS;
}
int
ompi_rte_db_fetch_pointer(const struct ompi_proc_t *pptr,
const char *key,
void **data,
opal_data_type_t type)
{
local_data_t *pdat;
ompi_process_name_t *proc;
proc = &((ompi_proc_t*)pptr)->proc_name;
opal_output_verbose(5, ompi_rte_base_framework.framework_output,
"%s db:pmi:fetch_pointer: searching for key %s on proc %s",
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME),
(NULL == key) ? "NULL" : key, OMPI_NAME_PRINT(proc));
/* if the key is NULL, that is an error */
if (NULL == key) {
OMPI_ERROR_LOG(OMPI_ERR_BAD_PARAM);
return OMPI_ERR_BAD_PARAM;
}
/* we only support hostname for now */
if (0 != strcmp(key, OMPI_DB_HOSTNAME)) {
return OMPI_ERR_NOT_SUPPORTED;
}
if (NULL == (pdat = fetch_rtedat(proc))) {
return OMPI_ERR_NOT_FOUND;
}
*data = pdat->nodename;
return OMPI_SUCCESS;
}

Просмотреть файл

@ -1,65 +0,0 @@
/*
* Copyright (c) 2013 Sandia National Laboratories. All rights reserved.
*
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
*/
#include "ompi_config.h"
#include <stdio.h>
#include <stdarg.h>
#include "opal/mca/common/pmi/common_pmi.h"
#include "opal/util/error.h"
#include "opal/util/output.h"
#include "ompi/constants.h"
#include "ompi/mca/rte/rte.h"
#include "rte_pmi.h"
void
ompi_rte_abort(int error_code, char *fmt, ...)
{
char *msg;
int ret;
va_list ap;
va_start(ap, fmt);
ret = vasprintf(&msg, fmt, ap);
if (-1 == ret) msg = "";
va_end(ap);
mca_common_pmi_abort(error_code, msg);
}
int
ompi_rte_abort_peers(ompi_process_name_t *procs, size_t nprocs, int status)
{
mca_common_pmi_abort(status, "N/A");
return OMPI_SUCCESS;
}
int
ompi_rte_error_log(const char *file, int line,
const char *func, int ret)
{
opal_output(0, "%s:%d:%s: Error: %s\n", file, line, func, opal_strerror(ret));
return OMPI_SUCCESS;
}
void
ompi_rte_register_errhandler(int (*callback)(opal_pointer_array_t*), int order)
{
/* This is intentionally a no-op. We don't get async errors from PMI. */
}

Просмотреть файл

@ -1,30 +0,0 @@
/*
* Copyright (c) 2013 Sandia National Laboratories. All rights reserved.
*
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
*/
#ifndef MCA_OMPI_RTE_PMI_INTERNAL_H
#define MCA_OMPI_RTE_PMI_INTERNAL_H
BEGIN_C_DECLS
extern int ompi_rte_pmi_name_init(void);
extern int ompi_rte_pmi_name_fini(void);
extern int ompi_rte_pmi_db_init(void);
extern void ompi_rte_pmi_db_fini(void);
#define OMPI_DB_BIND_LEVEL "ompi.bind_level"
#define OMPI_DB_BIND_INDEX "ompi.bind_index"
#define OMPI_DB_NODERANK "ompi.noderank"
#define OMPI_DB_RTE_INFO "ompi.rte-info"
END_C_DECLS
#endif

Просмотреть файл

@ -1,231 +0,0 @@
/*
* Copyright (c) 2013 Sandia National Laboratories. All rights reserved.
*
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
*/
#include "ompi_config.h"
#include <stdio.h>
#include "opal/mca/common/pmi/common_pmi.h"
#include "opal/dss/dss.h"
#include "opal/threads/tsd.h"
#include "ompi/constants.h"
#include "ompi/mca/rte/rte.h"
#include "rte_pmi.h"
#include "rte_pmi_internal.h"
ompi_process_name_t ompi_rte_my_process_name;
ompi_process_name_t ompi_rte_wildcard_process_name = { UINT32_MAX, UINT32_MAX };
static opal_tsd_key_t print_tsd;
static int
dt_pack_name(opal_buffer_t *buffer, const void *src, int32_t num_vals, opal_data_type_t type)
{
int ret;
int32_t i;
uint32_t *tmp;
ompi_process_name_t* name = (ompi_process_name_t*) src;
tmp = malloc(sizeof(uint32_t) * num_vals);
if (NULL == tmp) return OMPI_ERR_OUT_OF_RESOURCE;
for (i = 0 ; i < num_vals ; ++i) {
tmp[i * 2] = name[i].jobid;
tmp[i * 2 + 1] = name[i].vpid;
}
ret = opal_dss.pack(buffer, tmp, 2 * num_vals, OPAL_UINT32);
free(tmp);
return ret;
}
static int
dt_unpack_name(opal_buffer_t *buffer, void *dest, int32_t *num_vals, opal_data_type_t type)
{
int ret;
int32_t i;
uint32_t *tmp;
ompi_process_name_t* name = (ompi_process_name_t*) dest;
int32_t num = *num_vals;
tmp = malloc(sizeof(uint32_t) * *num_vals);
if (NULL == tmp) return OMPI_ERR_OUT_OF_RESOURCE;
num = 2 * *num_vals;
ret = opal_dss.unpack(buffer, tmp, &num, OPAL_UINT32);
for (i = 0 ; i < *num_vals ; ++i) {
name[i].jobid = tmp[i * 2];
name[i].vpid = tmp[i * 2 + 1];
}
free(tmp);
return ret;
}
static int
dt_copy_name(void **dest, void *src, opal_data_type_t type)
{
ompi_process_name_t *a, *b;
*dest = malloc(sizeof(ompi_process_name_t));
if (NULL == *dest) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
a = *((ompi_process_name_t**) dest);
b = (ompi_process_name_t*) src;
a->jobid = b->jobid;
a->vpid = b->vpid;
return OMPI_SUCCESS;
}
static int
dt_compare_name(const void *value1,
const void *value2,
opal_data_type_t type)
{
return ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL,
(ompi_process_name_t*) value1,
(ompi_process_name_t*) value2);
}
static int
dt_print_name(char **output, char *prefix, void *name, opal_data_type_t type)
{
asprintf(output, "%sData type: ompi_process_name\tData Value: %s",
(NULL == prefix ? " " : prefix),
ompi_rte_print_process_name((ompi_process_name_t*) name));
return OMPI_SUCCESS;
}
static void
print_tsd_destructor(void *value)
{
if (NULL != value) free(value);
}
int
ompi_rte_pmi_name_init(void)
{
int ret;
opal_data_type_t tmp;
tmp = OMPI_NAME;
ret = opal_dss.register_type(dt_pack_name,
dt_unpack_name,
dt_copy_name,
dt_compare_name,
dt_print_name,
OPAL_DSS_UNSTRUCTURED,
"ompi_process_name",
&tmp);
if (OMPI_SUCCESS != ret) return ret;
ret = opal_tsd_key_create(&print_tsd, print_tsd_destructor);
if (OMPI_SUCCESS != ret) return ret;
return OMPI_SUCCESS;
}
int
ompi_rte_pmi_name_fini(void)
{
opal_tsd_key_delete(print_tsd);
return OMPI_SUCCESS;
}
char*
ompi_rte_print_process_name(const ompi_process_name_t *name)
{
int ret;
char *buf;
ret = opal_tsd_getspecific(print_tsd, (void**) &buf);
if (OMPI_SUCCESS != ret) return NULL;
if (NULL == buf) {
buf = malloc(sizeof(char) * 32);
if (NULL == buf) return NULL;
}
ret = opal_tsd_setspecific(print_tsd, buf);
if (OMPI_SUCCESS != ret) return NULL;
snprintf(buf, 32, "[%u, %u]",
(unsigned int) name->jobid,
(unsigned int) name->vpid);
return buf;
}
int
ompi_rte_compare_name_fields(ompi_rte_cmp_bitmask_t fields,
const ompi_process_name_t *name1,
const ompi_process_name_t *name2)
{
if (NULL == name1 && NULL == name2) {
return OPAL_EQUAL;
} else if (NULL == name1) {
return OPAL_VALUE2_GREATER;
} else if (NULL == name2) {
return OPAL_VALUE1_GREATER;
}
if (OMPI_RTE_CMP_JOBID & fields) {
if (name1->jobid > name2->jobid) {
return OPAL_VALUE1_GREATER;
} else if (name1->jobid < name2->jobid) {
return OPAL_VALUE2_GREATER;
}
}
if (OMPI_RTE_CMP_VPID & fields) {
if (name1->vpid > name2->vpid) {
return OPAL_VALUE1_GREATER;
} else if (name1->vpid < name2->vpid) {
return OPAL_VALUE2_GREATER;
}
}
return OPAL_EQUAL;
}
uint64_t
ompi_rte_hash_name(const ompi_process_name_t *name)
{
uint64_t hash;
hash = name->jobid;
hash <<= sizeof(name->jobid) * 8;
hash += name->vpid;
return hash;
}