1
1

As per the RFC:

http://www.open-mpi.org/community/lists/devel/2014/04/14496.php

Revamp the opal database framework, including renaming it to "dstore" to reflect that it isn't a "database". Move the "db" framework to ORTE for now, soon to move to ORCM

This commit was SVN r31557.
This commit is contained in:
Ralph Castain 2014-04-29 21:49:23 +00:00
parent fc0a75da91
commit c4c9bc1573
101 changed files with 5214 additions and 4178 deletions

View File

@ -1638,7 +1638,6 @@ ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm,
}
/* And now add the information into the database */
/* Store the remote processes into the opal_db */
if (OMPI_SUCCESS != (rc = MCA_PML_CALL(add_procs(rprocs, rsize)))) {
OMPI_ERROR_LOG(rc);
goto err_exit;

View File

@ -3,6 +3,7 @@
* Copyright (c) 2011-2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2011 UT-Battelle, LLC. All rights reserved.
* Copyright (c) 2014 Intel, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -14,7 +15,9 @@
#include "common_ugni.h"
#include "ompi/proc/proc.h"
#include "opal/mca/db/db.h"
#include "opal/mca/dstore/dstore.h"
#include "opal/class/opal_list.h"
#include "opal/dss/dss.h"
/* NTH: we need some options from the btl */
#include "ompi/mca/btl/ugni/btl_ugni.h"
@ -240,11 +243,25 @@ int ompi_common_ugni_init (void)
/* get a unique id from the runtime */
#if defined(OMPI_DB_GLOBAL_RANK)
ptr = &my_rank;
rc = opal_db.fetch ((opal_identifier_t *) &my_proc->proc_name, OMPI_DB_GLOBAL_RANK,
(void **) &ptr, OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
my_rank = my_proc->proc_name.vpid;
{
opal_list_t myvals;
opal_value_t *kv;
ptr = &my_rank;
OBJ_CONSTRUCT(&myvals, opal_list_t);
rc = opal_dstore.fetch (opal_dstore_internal,
ORTE_NAME_PRINT(&my_proc->proc_name),
OMPI_DB_GLOBAL_RANK,
&myvals);
if (OPAL_SUCCESS == rc) {
kv = (opal_value_t*)opal_list_get_first(&myvals):
if (OPAL_SUCCESS != opal_value_unload(&kv, (void**)&ptr, OPAL_UINT32)) {
my_rank = my_proc->proc_name.vpid;
}
} else {
my_rank = my_proc->proc_name.vpid;
}
OPAL_LIST_DESTRUCT(&myvals);
}
#else
my_rank = my_proc->proc_name.vpid;

View File

@ -15,7 +15,7 @@
* Copyright (c) 2009 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2011-2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2013 Intel, Inc. All rights reserved
* Copyright (c) 2013-2014 Intel, Inc. All rights reserved
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -39,7 +39,6 @@
#include "opal/util/argv.h"
#include "opal/util/opal_getcwd.h"
#include "opal/dss/dss.h"
#include "opal/mca/db/db.h"
#include "opal/mca/hwloc/base/base.h"
#include "orte/mca/errmgr/errmgr.h"

View File

@ -2,8 +2,7 @@
* Copyright (c) 2012-2013 Los Alamos National Security, LLC.
* All rights reserved.
* Copyright (c) 2013-2014 Intel, Inc. All rights reserved
*
* Copyright (c) 2014 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2014 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -99,14 +98,6 @@ OMPI_DECLSPEC int ompi_rte_db_store(const ompi_process_name_t *nm, const char* k
OMPI_DECLSPEC int ompi_rte_db_fetch(const struct ompi_proc_t *proc,
const char *key,
void **data, opal_data_type_t type);
OMPI_DECLSPEC int ompi_rte_db_fetch_pointer(const struct ompi_proc_t *proc,
const char *key,
void **data, opal_data_type_t type);
OMPI_DECLSPEC int ompi_rte_db_fetch_multiple(const struct ompi_proc_t *proc,
const char *key,
opal_list_t *kvs);
OMPI_DECLSPEC int ompi_rte_db_remove(const ompi_process_name_t *nm,
const char *key);
#define OMPI_DB_HOSTNAME ORTE_DB_HOSTNAME
#define OMPI_DB_LOCALITY ORTE_DB_LOCALITY
#define OMPI_DB_GLOBAL_RANK ORTE_DB_GLOBAL_RANK

View File

@ -2,7 +2,7 @@
* Copyright (c) 2012-2013 Los Alamos National Security, LLC.
* All rights reserved.
* Copyright (c) 2013-2014 Intel, Inc. All rights reserved
* Copyright (c) 2014 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2014 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*/
#include "ompi_config.h"
@ -15,8 +15,10 @@
#include "opal/dss/dss.h"
#include "opal/util/argv.h"
#include "opal/util/opal_getcwd.h"
#include "opal/mca/db/db.h"
#include "opal/mca/dstore/dstore.h"
#include "opal/threads/threads.h"
#include "opal/class/opal_list.h"
#include "opal/dss/dss.h"
#include "orte/mca/errmgr/errmgr.h"
#include "orte/mca/ess/ess.h"
@ -157,6 +159,11 @@ int ompi_rte_modex(ompi_rte_collective_t *coll)
/* mark that this process reached modex */
orte_grpcomm_base.modex_ready = true;
/* let the datastore commit any data we provided that needs
* to be shared with our peers, if required
*/
opal_dstore.commit(opal_dstore_peer, (opal_identifier_t*)ORTE_PROC_MY_NAME);
if ((orte_process_info.num_procs < ompi_hostname_cutoff) ||
!mca_rte_orte_component.direct_modex ||
orte_standalone_operation) {
@ -209,11 +216,23 @@ int ompi_rte_modex(ompi_rte_collective_t *coll)
int ompi_rte_db_store(const orte_process_name_t *nm, const char* key,
const void *data, opal_data_type_t type)
{
opal_value_t kv;
int rc;
OBJ_CONSTRUCT(&kv, opal_value_t);
kv.key = strdup(key);
if (OPAL_SUCCESS != (rc = opal_value_load(&kv, (void*)data, type))) {
OBJ_DESTRUCT(&kv);
return rc;
}
/* MPI connection data is to be shared with ALL other processes */
return opal_db.store((opal_identifier_t*)nm, OPAL_SCOPE_GLOBAL, key, data, type);
rc = opal_dstore.store(opal_dstore_peer, (opal_identifier_t*)nm, &kv);
OBJ_DESTRUCT(&kv);
return rc;
}
static int direct_modex(orte_process_name_t *peer, opal_scope_t scope)
static int direct_modex(orte_process_name_t *peer)
{
int rc;
ompi_orte_tracker_t *req;
@ -225,12 +244,6 @@ static int direct_modex(orte_process_name_t *peer, opal_scope_t scope)
ORTE_NAME_PRINT(peer)));
buf = OBJ_NEW(opal_buffer_t);
/* pack the scope of the request */
if (OPAL_SUCCESS != (rc = opal_dss.pack(buf, &scope, 1, OPAL_DATA_SCOPE_T))) {
ORTE_ERROR_LOG(rc);
OBJ_RELEASE(buf);
return rc;
}
/* create a tracker for this request */
req = OBJ_NEW(ompi_orte_tracker_t);
@ -272,13 +285,22 @@ int ompi_rte_db_fetch(const struct ompi_proc_t *proc,
void **data, opal_data_type_t type)
{
int rc;
opal_list_t myvals;
opal_value_t *kv;
OPAL_OUTPUT_VERBOSE((2, orte_grpcomm_base_framework.framework_output,
"%s fetch data from %s for %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(&proc->proc_name), key));
if (OPAL_SUCCESS != (rc = opal_db.fetch((opal_identifier_t*)(&proc->proc_name), key, data, type))) {
OBJ_CONSTRUCT(&myvals, opal_list_t);
/* the peer dstore contains our own data that will be shared
* with our peers - the nonpeer dstore contains data we received
* that would only be shared with nonpeer procs
*/
if (OPAL_SUCCESS != (rc = opal_dstore.fetch(opal_dstore_nonpeer,
(opal_identifier_t*)(&proc->proc_name),
key, &myvals))) {
if (direct_modex_enabled) {
OPAL_OUTPUT_VERBOSE((2, orte_grpcomm_base_framework.framework_output,
"%s requesting direct modex from %s for %s",
@ -287,115 +309,63 @@ int ompi_rte_db_fetch(const struct ompi_proc_t *proc,
/* if we couldn't fetch the data via the db, then we will attempt
* to retrieve it from the target proc
*/
if (ORTE_SUCCESS != (rc = direct_modex((orte_process_name_t*)&proc->proc_name, OPAL_SCOPE_PEER))) {
if (ORTE_SUCCESS != (rc = direct_modex((orte_process_name_t*)(&proc->proc_name)))) {
ORTE_ERROR_LOG(rc);
OPAL_LIST_DESTRUCT(&myvals);
return rc;
}
/* now retrieve the requested piece */
if (OPAL_SUCCESS != (rc = opal_db.fetch((opal_identifier_t*)(&proc->proc_name), key, data, type))) {
if (OPAL_SUCCESS != (rc = opal_dstore.fetch(opal_dstore_nonpeer,
(opal_identifier_t*)(&proc->proc_name),
key, &myvals))) {
ORTE_ERROR_LOG(rc);
OPAL_LIST_DESTRUCT(&myvals);
return rc;
}
} else {
return rc;
}
}
/* update the hostname upon first call to modex-recv for this proc */
if (NULL == proc->proc_hostname) {
opal_db.fetch_pointer((opal_identifier_t*)(&proc->proc_name), ORTE_DB_HOSTNAME, (void**)&proc->proc_hostname, OPAL_STRING);
}
return OMPI_SUCCESS;
}
int ompi_rte_db_fetch_pointer(const struct ompi_proc_t *proc,
const char *key,
void **data, opal_data_type_t type)
{
int rc;
OPAL_OUTPUT_VERBOSE((2, orte_grpcomm_base_framework.framework_output,
"%s fetch data pointer from %s for %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(&proc->proc_name), key));
if (OPAL_SUCCESS != (rc = opal_db.fetch_pointer((opal_identifier_t*)(&proc->proc_name), key, data, type))) {
if (direct_modex_enabled) {
/* if we couldn't fetch the data via the db, then we will attempt
* to retrieve it from the target proc
*/
/* see if we can find it in the internal dstore */
OPAL_OUTPUT_VERBOSE((2, orte_grpcomm_base_framework.framework_output,
"%s requesting direct modex from %s for %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(&proc->proc_name), key));
if (ORTE_SUCCESS != (rc = direct_modex((orte_process_name_t*)&proc->proc_name, OPAL_SCOPE_PEER))) {
ORTE_ERROR_LOG(rc);
return rc;
"%s searching nonpeer dstore for %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), key));
if (OPAL_SUCCESS != (rc = opal_dstore.fetch(opal_dstore_internal,
(opal_identifier_t*)(&proc->proc_name),
key, &myvals))) {
/* try one last place - the peer dstore in case it got stuck there for some reason */
OPAL_OUTPUT_VERBOSE((2, orte_grpcomm_base_framework.framework_output,
"%s searching internal dstore for %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), key));
if (OPAL_SUCCESS != (rc = opal_dstore.fetch(opal_dstore_peer,
(opal_identifier_t*)(&proc->proc_name),
key, &myvals))) {
ORTE_ERROR_LOG(rc);
OPAL_LIST_DESTRUCT(&myvals);
return rc;
}
}
/* now retrieve the requested piece */
if (OPAL_SUCCESS != (rc = opal_db.fetch_pointer((opal_identifier_t*)(&proc->proc_name), key, data, type))) {
ORTE_ERROR_LOG(rc);
return rc;
}
} else {
return rc;
}
}
/* only one value should have been returned */
kv = (opal_value_t*)opal_list_get_first(&myvals);
if (NULL == kv) {
return OMPI_ERROR;
}
opal_value_unload(kv, data, type);
OPAL_LIST_DESTRUCT(&myvals);
/* update the hostname upon first call to modex-recv for this proc */
if (NULL == proc->proc_hostname) {
opal_db.fetch_pointer((opal_identifier_t*)(&proc->proc_name), ORTE_DB_HOSTNAME, (void**)&proc->proc_hostname, OPAL_STRING);
}
return OMPI_SUCCESS;
}
int ompi_rte_db_fetch_multiple(const struct ompi_proc_t *proc,
const char *key,
opal_list_t *kvs)
{
int rc;
OPAL_OUTPUT_VERBOSE((2, orte_grpcomm_base_framework.framework_output,
"%s fetch multiple from %s for %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(&proc->proc_name), key));
/* MPI processes are only concerned with shared info */
if (OPAL_SUCCESS != (rc = opal_db.fetch_multiple((opal_identifier_t*)(&proc->proc_name),
OPAL_SCOPE_GLOBAL, key, kvs))) {
if (direct_modex_enabled) {
/* if we couldn't fetch the data via the db, then we will attempt
* to retrieve it from the target proc
*/
OPAL_OUTPUT_VERBOSE((2, orte_grpcomm_base_framework.framework_output,
"%s requesting direct modex from %s for %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(&proc->proc_name), key));
if (ORTE_SUCCESS != (rc = direct_modex((orte_process_name_t*)&proc->proc_name, OPAL_SCOPE_GLOBAL))) {
ORTE_ERROR_LOG(rc);
return rc;
OBJ_CONSTRUCT(&myvals, opal_list_t);
if (OPAL_SUCCESS == opal_dstore.fetch(opal_dstore_internal, (opal_identifier_t*)(&proc->proc_name), ORTE_DB_HOSTNAME, &myvals)) {
kv = (opal_value_t*)opal_list_get_first(&myvals);
if (NULL != kv) {
opal_value_unload(kv, (void**)&proc->proc_hostname, OPAL_STRING);
}
/* now retrieve the requested pieces */
if (OPAL_SUCCESS != (rc = opal_db.fetch_multiple((opal_identifier_t*)(&proc->proc_name),
OPAL_SCOPE_GLOBAL, key, kvs))) {
ORTE_ERROR_LOG(rc);
return rc;
}
} else {
return rc;
}
}
/* update the hostname upon first call to modex-recv for this proc */
if (NULL == proc->proc_hostname) {
opal_db.fetch_pointer((opal_identifier_t*)(&proc->proc_name), ORTE_DB_HOSTNAME, (void**)&proc->proc_hostname, OPAL_STRING);
OPAL_LIST_DESTRUCT(&myvals);
}
return OMPI_SUCCESS;
}
int ompi_rte_db_remove(const orte_process_name_t *nm,
const char *key)
{
return opal_db.remove((opal_identifier_t*)nm, key);
}
/* this function executes in the RML event base, and so
* we must take care to protect against threading conflicts

View File

@ -31,7 +31,7 @@
#include "opal/dss/dss.h"
#include "opal/util/arch.h"
#include "opal/util/show_help.h"
#include "opal/mca/db/db.h"
#include "opal/mca/dstore/dstore.h"
#include "opal/mca/hwloc/base/base.h"
#include "ompi/proc/proc.h"
@ -83,9 +83,9 @@ void ompi_proc_destruct(ompi_proc_t* proc)
* destroyed here. It will be destroyed later when the ompi_datatype_finalize is called.
*/
OBJ_RELEASE( proc->proc_convertor );
/* DO NOT FREE THE HOSTNAME FIELD AS THIS POINTS
* TO AN AREA ALLOCATED/FREE'D ELSEWHERE
*/
if (NULL != proc->proc_hostname) {
free(proc->proc_hostname);
}
OPAL_THREAD_LOCK(&ompi_proc_lock);
opal_list_remove_item(&ompi_proc_list, (opal_list_item_t*)proc);
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
@ -113,7 +113,7 @@ int ompi_proc_init(void)
if (i == OMPI_PROC_MY_NAME->vpid) {
ompi_proc_local_proc = proc;
proc->proc_flags = OPAL_PROC_ALL_LOCAL;
proc->proc_hostname = ompi_process_info.nodename;
proc->proc_hostname = strdup(ompi_process_info.nodename);
proc->proc_arch = opal_local_arch;
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
/* add our arch to the modex */
@ -129,26 +129,39 @@ int ompi_proc_init(void)
int ompi_proc_set_locality(ompi_proc_t *proc)
{
opal_hwloc_locality_t *hwlocale, locality;
ompi_vpid_t vpid, *vptr;
opal_hwloc_locality_t locality;
ompi_vpid_t vpid;
int ret;
opal_list_t myvals;
opal_value_t *kv, kvn;
/* get the locality information - do not use modex recv for
* this request as that will automatically cause the hostname
* to be loaded as well
*/
hwlocale = &(proc->proc_flags);
if (OMPI_SUCCESS == opal_db.fetch((opal_identifier_t*)&proc->proc_name, OPAL_DB_LOCALITY,
(void**)&hwlocale, OPAL_HWLOC_LOCALITY_T)) {
OBJ_CONSTRUCT(&myvals, opal_list_t);
if (OMPI_SUCCESS == opal_dstore.fetch(opal_dstore_internal,
(opal_identifier_t*)&proc->proc_name,
OPAL_DSTORE_LOCALITY, &myvals)) {
kv = (opal_value_t*)opal_list_get_first(&myvals);
proc->proc_flags = kv->data.uint16;
OPAL_LIST_DESTRUCT(&myvals);
return OMPI_SUCCESS;
}
OPAL_LIST_DESTRUCT(&myvals);
/* if we don't already have it, compute and save it for future use */
vptr = &vpid;
if (OMPI_SUCCESS != (ret = opal_db.fetch((opal_identifier_t*)&proc->proc_name, OMPI_RTE_NODE_ID,
(void**)&vptr, OPAL_UINT32))) {
OBJ_CONSTRUCT(&myvals, opal_list_t);
if (OMPI_SUCCESS != (ret = opal_dstore.fetch(opal_dstore_nonpeer,
(opal_identifier_t*)&proc->proc_name,
OMPI_RTE_NODE_ID, &myvals))) {
OPAL_LIST_DESTRUCT(&myvals);
return ret;
}
kv = (opal_value_t*)opal_list_get_first(&myvals);
vpid = kv->data.uint32;
OPAL_LIST_DESTRUCT(&myvals);
/* if we are on different nodes, then we are probably non-local */
if (vpid != OMPI_RTE_MY_NODEID) {
locality = OPAL_PROC_NON_LOCAL;
@ -157,9 +170,12 @@ int ompi_proc_set_locality(ompi_proc_t *proc)
* present, then no coprocessors were detected and we can
* ignore this test
*/
vptr = &vpid;
if (OMPI_SUCCESS == opal_db.fetch((opal_identifier_t*)&proc->proc_name, OMPI_RTE_HOST_ID,
(void**)&vptr, OPAL_UINT32)) {
OBJ_CONSTRUCT(&myvals, opal_list_t);
if (OMPI_SUCCESS == opal_dstore.fetch(opal_dstore_internal,
(opal_identifier_t*)&proc->proc_name,
OMPI_RTE_HOST_ID, &myvals)) {
kv = (opal_value_t*)opal_list_get_first(&myvals);
vpid = kv->data.uint32;
/* if this matches my host id, then we are on the same host,
* but not on the same board
*/
@ -169,6 +185,7 @@ int ompi_proc_set_locality(ompi_proc_t *proc)
locality = OPAL_PROC_NON_LOCAL;
}
}
OPAL_LIST_DESTRUCT(&myvals);
#endif
} else {
#if OPAL_HAVE_HWLOC
@ -176,29 +193,40 @@ int ompi_proc_set_locality(ompi_proc_t *proc)
char *cpu_bitmap;
/* retrieve the binding for the other proc */
if (OMPI_SUCCESS != opal_db.fetch((opal_identifier_t*)&proc->proc_name, OPAL_DB_CPUSET,
(void**)&cpu_bitmap, OPAL_STRING)) {
OBJ_CONSTRUCT(&myvals, opal_list_t);
if (OMPI_SUCCESS != opal_dstore.fetch(opal_dstore_internal,
(opal_identifier_t*)&proc->proc_name,
OPAL_DSTORE_CPUSET, &myvals)) {
/* we don't know their cpuset, so nothing more we can say */
locality = OPAL_PROC_ON_NODE;
} else if (NULL == cpu_bitmap || NULL == ompi_process_info.cpuset) {
/* one or both of us is not bound, so all we can say is we are on the
* same node
*/
locality = OPAL_PROC_ON_NODE;
} else {
/* we share a node - see what else we share */
locality = opal_hwloc_base_get_relative_locality(opal_hwloc_topology,
ompi_process_info.cpuset,
cpu_bitmap);
kv = (opal_value_t*)opal_list_get_first(&myvals);
cpu_bitmap = kv->data.string;
if (NULL == cpu_bitmap || NULL == ompi_process_info.cpuset) {
/* one or both of us is not bound, so all we can say is we are on the
* same node
*/
locality = OPAL_PROC_ON_NODE;
} else {
/* we share a node - see what else we share */
locality = opal_hwloc_base_get_relative_locality(opal_hwloc_topology,
ompi_process_info.cpuset,
cpu_bitmap);
}
}
OPAL_LIST_DESTRUCT(&myvals);
}
#else
/* all we know is that we share this node */
locality = OPAL_PROC_ON_NODE;
#endif
}
ret = opal_db.store((opal_identifier_t*)&proc, OPAL_SCOPE_INTERNAL,
OPAL_DB_LOCALITY, &locality, OPAL_HWLOC_LOCALITY_T);
OBJ_CONSTRUCT(&kvn, opal_value_t);
kvn.key = strdup(OPAL_DSTORE_LOCALITY);
kvn.type = OPAL_HWLOC_LOCALITY_T;
kvn.data.uint16 = locality;
ret = opal_dstore.store(opal_dstore_internal, (opal_identifier_t*)&proc, &kvn);
OBJ_DESTRUCT(&kvn);
/* set the proc's local value as well */
proc->proc_flags = locality;
return ret;
@ -241,8 +269,9 @@ int ompi_proc_complete_init(void)
* ALL modex info for this proc) will have no appreciable
* impact on launch scaling
*/
ret = ompi_modex_recv_string_pointer(OMPI_DB_HOSTNAME, proc, (void**)&(proc->proc_hostname), OPAL_STRING);
ret = ompi_modex_recv_key_value(OMPI_DB_HOSTNAME, proc, (void**)&(proc->proc_hostname), OPAL_STRING);
if (OMPI_SUCCESS != ret) {
errcode = ret;
break;
}
} else {
@ -486,7 +515,7 @@ int ompi_proc_refresh(void) {
* ALL modex info for this proc) will have no appreciable
* impact on launch scaling
*/
ret = ompi_modex_recv_string_pointer(OMPI_DB_HOSTNAME, proc, (void**)&(proc->proc_hostname), OPAL_STRING);
ret = ompi_modex_recv_key_value(OMPI_DB_HOSTNAME, proc, (void**)&(proc->proc_hostname), OPAL_STRING);
if (OMPI_SUCCESS != ret) {
break;
}
@ -505,6 +534,9 @@ int ompi_proc_refresh(void) {
/* get the remote architecture */
uint32_t* uiptr = &(proc->proc_arch);
ret = ompi_modex_recv_key_value("OMPI_ARCH", proc, (void**)&uiptr, OPAL_UINT32);
if (OMPI_SUCCESS != ret) {
break;
}
/* if arch is different than mine, create a new convertor for this proc */
if (proc->proc_arch != opal_local_arch) {
OBJ_RELEASE(proc->proc_convertor);
@ -563,14 +595,23 @@ ompi_proc_pack(ompi_proc_t **proclist, int proclistsize,
* data that each process computes about its peers
*/
OBJ_CONSTRUCT(&data, opal_list_t);
rc = opal_db.fetch_multiple((opal_identifier_t*)&proclist[i]->proc_name,
OPAL_SCOPE_GLOBAL, NULL, &data);
rc = opal_dstore.fetch(opal_dstore_peer,
(opal_identifier_t*)&proclist[i]->proc_name,
NULL, &data);
if (OPAL_SUCCESS != rc) {
OMPI_ERROR_LOG(rc);
num_entries = 0;
} else {
/* count the number of entries we will send */
num_entries = opal_list_get_size(&data);
rc = opal_dstore.fetch(opal_dstore_nonpeer,
(opal_identifier_t*)&proclist[i]->proc_name,
NULL, &data);
if (OPAL_SUCCESS != rc) {
OMPI_ERROR_LOG(rc);
num_entries = 0;
} else {
/* count the number of entries we will send */
num_entries = opal_list_get_size(&data);
}
}
/* put the number of entries into the buffer */
@ -657,6 +698,8 @@ ompi_proc_unpack(opal_buffer_t* buf,
int i;
size_t newprocs_len = 0;
ompi_proc_t **plist=NULL, **newprocs = NULL;
opal_list_t myvals;
opal_value_t *kv;
/* do not free plist *ever*, since it is used in the remote group
structure of a communicator */
@ -730,41 +773,51 @@ ompi_proc_unpack(opal_buffer_t* buf,
* Extract the attribute names and values
*/
for (j = 0; j < num_recvd_entries; j++) {
opal_value_t *kv;
cnt = 1;
if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &kv, &cnt, OPAL_VALUE))) {
OMPI_ERROR_LOG(rc);
break;
}
/* if this is me, dump the data - we already have it in the db */
if (OPAL_EQUAL == ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL,
/* if this is me, ignore the data - we already have it in the db */
if (OPAL_EQUAL != ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL,
OMPI_PROC_MY_NAME, &new_name)) {
OBJ_RELEASE(kv);
} else {
/* store it in the database */
if (OPAL_SUCCESS != (rc = opal_db.store_pointer((opal_identifier_t*)&new_name, kv))) {
if (OPAL_SUCCESS != (rc = opal_dstore.store(opal_dstore_peer,
(opal_identifier_t*)&new_name, kv))) {
OMPI_ERROR_LOG(rc);
OBJ_RELEASE(kv);
}
/* do not release the kv - the db holds that pointer */
}
OBJ_RELEASE(kv);
}
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
rc = opal_db.fetch((opal_identifier_t*)&new_name, "OMPI_ARCH",
OBJ_CONSTRUCT(&myvals, opal_list_t);
rc = opal_dstore.fetch(opal_dstore_peer,
(opal_identifier_t*)&new_name,
"OMPI_ARCH", &myvals);
(void**)&new_arch, OPAL_UINT32);
if( OPAL_SUCCESS == rc ) {
kv = (opal_value_t*)opal_list_get_first(&myvals);
new_arch = kv->data.uint32;
} else {
new_arch = opal_local_arch;
}
OPAL_LIST_DESTRUCT(&myvals);
#else
new_arch = opal_local_arch;
#endif
if (ompi_process_info.num_procs < ompi_hostname_cutoff) {
/* retrieve the hostname */
rc = opal_db.fetch_pointer((opal_identifier_t*)&new_name, OMPI_DB_HOSTNAME,
(void**)&new_hostname, OPAL_STRING);
if( OPAL_SUCCESS != rc ) {
OBJ_CONSTRUCT(&myvals, opal_list_t);
rc = opal_dstore.fetch(opal_dstore_peer,
(opal_identifier_t*)&new_name,
OMPI_DB_HOSTNAME, &myvals);
if( OPAL_SUCCESS == rc ) {
kv = (opal_value_t*)opal_list_get_first(&myvals);
new_hostname = strdup(kv->data.string);
} else {
new_hostname = NULL;
}
OPAL_LIST_DESTRUCT(&myvals);
} else {
/* just set the hostname to NULL for now - we'll fill it in
* as modex_recv's are called for procs we will talk to

View File

@ -11,6 +11,7 @@
* All rights reserved.
* Copyright (c) 2006-2012 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2014 Intel, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -86,27 +87,6 @@ ompi_modex_recv(const mca_base_component_t *component,
return rc;
}
/* return a pointer to the data, but don't create a new copy of it */
int ompi_modex_recv_pointer(const mca_base_component_t *component,
const ompi_proc_t *proc,
void **buffer, opal_data_type_t type)
{
int rc;
char *name = mca_base_component_to_string(component);
/* set defaults */
*buffer = NULL;
if (NULL == name) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* the fetch_poointer API returns a pointer to the data */
rc = ompi_rte_db_fetch_pointer(proc, name, buffer, type);
free(name);
return rc;
}
int
ompi_modex_send_string(const char* key,
@ -152,22 +132,6 @@ ompi_modex_recv_string(const char* key,
return rc;
}
/* return a pointer to the data, but don't create a new copy of it */
int ompi_modex_recv_string_pointer(const char* key,
const ompi_proc_t *source_proc,
void **buffer, opal_data_type_t type)
{
int rc;
/* set defaults */
*buffer = NULL;
/* the fetch_pointer API returns a pointer to the data */
rc = ompi_rte_db_fetch_pointer(source_proc, key, (void**)buffer, type);
return rc;
}
int
ompi_modex_send_key_value(const char* key,
const void *value,

View File

@ -12,6 +12,7 @@
* Copyright (c) 2006-2012 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2014 Intel, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -191,10 +192,6 @@ OMPI_DECLSPEC int ompi_modex_recv(const mca_base_component_t *dest_component,
const ompi_proc_t *source_proc,
void **buffer, size_t *size);
OMPI_DECLSPEC int ompi_modex_recv_pointer(const mca_base_component_t *component,
const ompi_proc_t *proc,
void **buffer, opal_data_type_t type);
/**
* Receive a buffer from a given peer
*
@ -227,10 +224,6 @@ OMPI_DECLSPEC int ompi_modex_recv_string(const char* key,
const ompi_proc_t *source_proc,
void **buffer, size_t *size);
OMPI_DECLSPEC int ompi_modex_recv_string_pointer(const char* key,
const ompi_proc_t *source_proc,
void **buffer, opal_data_type_t type);
/**
* Recv a value from a given peer
*

View File

@ -11,6 +11,7 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, Inc. All rights reserved.
* Copyright (c) 2014 Intel, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -33,8 +34,17 @@
BEGIN_C_DECLS
/* A non-API function for something that happens in a number
* of places throughout the code base - loading a value into
* an opal_value_t structure
*/
OPAL_DECLSPEC int opal_value_load(opal_value_t *kv,
void *data, opal_data_type_t type);
OPAL_DECLSPEC int opal_value_unload(opal_value_t *kv,
void **data, opal_data_type_t type);
/**
* Top-level itnerface function to pack one or more values into a
* Top-level interface function to pack one or more values into a
* buffer.
*
* The pack function packs one or more values of a specified type into

View File

@ -261,7 +261,6 @@ int opal_dss_copy_value(opal_value_t **dest, opal_value_t *src,
if (NULL != src->key) {
p->key = strdup(src->key);
}
p->scope = src->scope;
p->type = src->type;
/* copy the right field */

View File

@ -9,6 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2014 Intel, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -21,6 +22,8 @@
*/
#include "opal_config.h"
#include "opal/util/error.h"
#include "opal/dss/dss_internal.h"
@ -145,3 +148,157 @@ int opal_dss_copy_payload(opal_buffer_t *dest, opal_buffer_t *src)
return OPAL_SUCCESS;
}
int opal_value_load(opal_value_t *kv,
void *data, opal_data_type_t type)
{
opal_byte_object_t *boptr;
switch (type) {
case OPAL_STRING:
kv->type = OPAL_STRING;
if (NULL != data) {
kv->data.string = strdup( (const char *) data);
} else {
kv->data.string = NULL;
}
break;
case OPAL_UINT64:
if (NULL == data) {
OPAL_ERROR_LOG(OPAL_ERR_BAD_PARAM);
return OPAL_ERR_BAD_PARAM;
}
kv->type = OPAL_UINT64;
kv->data.uint64 = *(uint64_t*)(data);
break;
case OPAL_UINT32:
if (NULL == data) {
OPAL_ERROR_LOG(OPAL_ERR_BAD_PARAM);
return OPAL_ERR_BAD_PARAM;
}
kv->type = OPAL_UINT32;
kv->data.uint32 = *(uint32_t*)data;
break;
case OPAL_UINT16:
if (NULL == data) {
OPAL_ERROR_LOG(OPAL_ERR_BAD_PARAM);
return OPAL_ERR_BAD_PARAM;
}
kv->type = OPAL_UINT16;
kv->data.uint16 = *(uint16_t*)(data);
break;
case OPAL_INT:
if (NULL == data) {
OPAL_ERROR_LOG(OPAL_ERR_BAD_PARAM);
return OPAL_ERR_BAD_PARAM;
}
kv->type = OPAL_INT;
kv->data.integer = *(int*)(data);
break;
case OPAL_UINT:
if (NULL == data) {
OPAL_ERROR_LOG(OPAL_ERR_BAD_PARAM);
return OPAL_ERR_BAD_PARAM;
}
kv->type = OPAL_UINT;
kv->data.uint = *(unsigned int*)(data);
break;
case OPAL_FLOAT:
if (NULL == data) {
OPAL_ERROR_LOG(OPAL_ERR_BAD_PARAM);
return OPAL_ERR_BAD_PARAM;
}
kv->type = OPAL_FLOAT;
kv->data.fval = *(float*)(data);
break;
case OPAL_BYTE_OBJECT:
kv->type = OPAL_BYTE_OBJECT;
boptr = (opal_byte_object_t*)data;
if (NULL != boptr && NULL != boptr->bytes && 0 < boptr->size) {
kv->data.bo.bytes = (uint8_t *) malloc(boptr->size);
memcpy(kv->data.bo.bytes, boptr->bytes, boptr->size);
kv->data.bo.size = boptr->size;
} else {
kv->data.bo.bytes = NULL;
kv->data.bo.size = 0;
}
break;
default:
OPAL_ERROR_LOG(OPAL_ERR_NOT_SUPPORTED);
return OPAL_ERR_NOT_SUPPORTED;
}
return OPAL_SUCCESS;
}
int opal_value_unload(opal_value_t *kv,
void **data, opal_data_type_t type)
{
opal_byte_object_t *boptr;
switch (type) {
case OPAL_STRING:
if (OPAL_STRING != kv->type) {
return OPAL_ERR_TYPE_MISMATCH;
}
if (NULL != kv->data.string) {
*data = strdup(kv->data.string);
} else {
*data = NULL;
}
break;
case OPAL_UINT64:
if (OPAL_UINT64 != kv->type) {
return OPAL_ERR_TYPE_MISMATCH;
}
memcpy(*data, &kv->data.uint64, 8);
break;
case OPAL_UINT32:
if (OPAL_UINT32 != kv->type) {
return OPAL_ERR_TYPE_MISMATCH;
}
memcpy(*data, &kv->data.uint32, 4);
break;
case OPAL_UINT16:
if (OPAL_UINT16 != kv->type) {
return OPAL_ERR_TYPE_MISMATCH;
}
memcpy(*data, &kv->data.uint16, 2);
break;
case OPAL_INT:
if (OPAL_INT != kv->type) {
return OPAL_ERR_TYPE_MISMATCH;
}
memcpy(*data, &kv->data.integer, sizeof(int));
break;
case OPAL_UINT:
if (OPAL_UINT != kv->type) {
return OPAL_ERR_TYPE_MISMATCH;
}
memcpy(*data, &kv->data.uint, sizeof(unsigned int));
break;
case OPAL_FLOAT:
if (OPAL_FLOAT != kv->type) {
return OPAL_ERR_TYPE_MISMATCH;
}
memcpy(*data, &kv->data.fval, sizeof(float));
break;
case OPAL_BYTE_OBJECT:
if (OPAL_BYTE_OBJECT != kv->type) {
return OPAL_ERR_TYPE_MISMATCH;
}
boptr = (opal_byte_object_t*)malloc(sizeof(opal_byte_object_t));
if (NULL != kv->data.bo.bytes && 0 < kv->data.bo.size) {
boptr->bytes = (uint8_t *) malloc(kv->data.bo.size);
memcpy(boptr->bytes, kv->data.bo.bytes, kv->data.bo.size);
boptr->size = kv->data.bo.size;
} else {
boptr->bytes = NULL;
boptr->size = 0;
}
*data = boptr;
break;
default:
OPAL_ERROR_LOG(OPAL_ERR_NOT_SUPPORTED);
return OPAL_ERR_NOT_SUPPORTED;
}
return OPAL_SUCCESS;
}

View File

@ -72,7 +72,6 @@ static void opal_value_construct(opal_value_t* ptr)
{
ptr->key = NULL;
ptr->type = OPAL_UNDEF;
ptr->scope = OPAL_SCOPE_UNDEF;
}
static void opal_value_destruct(opal_value_t* ptr)
{

View File

@ -688,9 +688,6 @@ int opal_dss_pack_value(opal_buffer_t *buffer, const void *src,
if (OPAL_SUCCESS != (ret = opal_dss_pack_string(buffer, &ptr[i]->key, 1, OPAL_STRING))) {
return ret;
}
if (OPAL_SUCCESS != (ret = opal_dss_pack_data_type(buffer, &ptr[i]->scope, 1, OPAL_DATA_SCOPE_T))) {
return ret;
}
if (OPAL_SUCCESS != (ret = opal_dss_pack_data_type(buffer, &ptr[i]->type, 1, OPAL_DATA_TYPE))) {
return ret;
}

View File

@ -538,7 +538,6 @@ int opal_dss_print_node_stat(char **output, char *prefix, opal_node_stats_t *src
int opal_dss_print_value(char **output, char *prefix, opal_value_t *src, opal_data_type_t type)
{
char *prefx;
char *scope;
/* deal with NULL prefix */
if (NULL == prefix) asprintf(&prefx, " ");
@ -551,50 +550,34 @@ int opal_dss_print_value(char **output, char *prefix, opal_value_t *src, opal_da
return OPAL_SUCCESS;
}
if (OPAL_SCOPE_UNDEF == src->scope) {
scope = "UNDEF";
} else if (OPAL_SCOPE_PEER == src->scope) {
scope = "PEER";
} else if (OPAL_SCOPE_NON_PEER == src->scope) {
scope = "NON_PEER";
} else if (OPAL_SCOPE_GLOBAL == src->scope) {
scope = "GLOBAL";
} else if (OPAL_SCOPE_INTERNAL == src->scope) {
scope = "INTERNAL";
} else if (OPAL_SCOPE_ALL == src->scope) {
scope = "ALL";
} else {
scope = "INTERNAL";
}
switch (src->type) {
case OPAL_STRING:
asprintf(output, "%sOPAL_VALUE: Data type: OPAL_STRING\tKey: %s\tScope:%s\tValue: %s",
prefx, src->key, scope, src->data.string);
asprintf(output, "%sOPAL_VALUE: Data type: OPAL_STRING\tKey: %s\tValue: %s",
prefx, src->key, src->data.string);
break;
case OPAL_INT16:
asprintf(output, "%sOPAL_VALUE: Data type: OPAL_STRING\tKey: %s\tScope:%s\tValue: %d",
prefx, src->key, scope, (int)src->data.int16);
asprintf(output, "%sOPAL_VALUE: Data type: OPAL_STRING\tKey: %s\tValue: %d",
prefx, src->key, (int)src->data.int16);
break;
case OPAL_INT32:
asprintf(output, "%sOPAL_VALUE: Data type: OPAL_INT32\tKey: %s\tScope:%s\tValue: %d",
prefx, src->key, scope, src->data.int32);
asprintf(output, "%sOPAL_VALUE: Data type: OPAL_INT32\tKey: %s\tValue: %d",
prefx, src->key, src->data.int32);
break;
case OPAL_PID:
asprintf(output, "%sOPAL_VALUE: Data type: OPAL_STRING\tKey: %s\tScope:%s\tValue: %lu",
prefx, src->key, scope, (unsigned long)src->data.pid);
asprintf(output, "%sOPAL_VALUE: Data type: OPAL_STRING\tKey: %s\tValue: %lu",
prefx, src->key, (unsigned long)src->data.pid);
break;
case OPAL_FLOAT:
asprintf(output, "%sOPAL_VALUE: Data type: OPAL_FLOAT\tKey: %s\tScope:%s\tValue: %f",
prefx, src->key, scope, src->data.fval);
asprintf(output, "%sOPAL_VALUE: Data type: OPAL_FLOAT\tKey: %s\tValue: %f",
prefx, src->key, src->data.fval);
break;
case OPAL_TIMEVAL:
asprintf(output, "%sOPAL_VALUE: Data type: OPAL_TIMEVAL\tKey: %s\tScope:%s\tValue: %ld.%06ld", prefx,
src->key, scope, (long)src->data.tv.tv_sec, (long)src->data.tv.tv_usec);
asprintf(output, "%sOPAL_VALUE: Data type: OPAL_TIMEVAL\tKey: %s\tValue: %ld.%06ld", prefx,
src->key, (long)src->data.tv.tv_sec, (long)src->data.tv.tv_usec);
break;
default:
asprintf(output, "%sOPAL_VALUE: Data type: UNKNOWN\tKey: %s\tScope:%s\tValue: UNPRINTABLE",
prefx, src->key, scope);
asprintf(output, "%sOPAL_VALUE: Data type: UNKNOWN\tKey: %s\tValue: UNPRINTABLE",
prefx, src->key);
break;
}
free(prefx);

View File

@ -96,33 +96,10 @@ typedef uint64_t opal_identifier_t;
#define OPAL_VALUE2_GREATER -1
#define OPAL_EQUAL 0
/* define a flag to indicate the scope of data being
* stored in the database. The following options are supported:
*
* PEER: data to be shared with our peers
* NON_PEER: data to be shared only with non-peer
* processes (i.e., processes from other jobs)
* GLOBAL: data to be shared with all processes
* INTERNAL: data is to be internally stored in this app
* ALL: any of the above
*
* REFER: indicates the value is stored by reference
*/
typedef uint8_t opal_scope_t;
#define OPAL_SCOPE_UNDEF 0x00
#define OPAL_SCOPE_PEER 0x01
#define OPAL_SCOPE_NON_PEER 0x02
#define OPAL_SCOPE_GLOBAL 0x03
#define OPAL_SCOPE_INTERNAL 0x08
#define OPAL_SCOPE_ALL 0x0f
#define OPAL_SCOPE_REFER 0x10
#define OPAL_DATA_SCOPE_T OPAL_UINT8
/* Data value object */
typedef struct {
opal_list_item_t super; /* required for this to be on lists */
char *key; /* key string */
opal_scope_t scope;
opal_data_type_t type; /* the type of value stored */
union {
uint8_t byte;

View File

@ -920,10 +920,6 @@ int opal_dss_unpack_value(opal_buffer_t *buffer, void *dest,
return ret;
}
m=1;
if (OPAL_SUCCESS != (ret = opal_dss_unpack_data_type(buffer, &ptr[i]->scope, &m, OPAL_DATA_SCOPE_T))) {
return ret;
}
m=1;
if (OPAL_SUCCESS != (ret = opal_dss_unpack_data_type(buffer, &ptr[i]->type, &m, OPAL_DATA_TYPE))) {
return ret;
}

View File

@ -5,7 +5,7 @@
* Copyright (c) 2011 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2011-2013 Los Alamos National Security, LLC. All
* rights reserved.
* Copyright (c) 2013 Intel, Inc. All rights reserved.
* Copyright (c) 2013-2014 Intel, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -17,6 +17,8 @@
#include "opal/constants.h"
#include "opal/types.h"
#include "opal/util/output.h"
#include <string.h>
#include <pmi.h>
#if WANT_PMI2_SUPPORT
@ -38,6 +40,8 @@ bool mca_common_pmi_init (void) {
{
int spawned, size, rank, appnum;
opal_output(0, "INIT PMI");
/* if we can't startup PMI, we can't be used */
if (PMI2_Initialized ()) {
return true;

View File

@ -1,78 +0,0 @@
/*
* Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, Inc. All rights reserved.
* Copyright (c) 2013 Intel, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
/** @file:
*/
#ifndef MCA_DB_BASE_H
#define MCA_DB_BASE_H
#include "opal_config.h"
#include "opal/types.h"
#include "opal/mca/mca.h"
#include "opal/mca/base/mca_base_framework.h"
#include "opal/class/opal_list.h"
#include "opal/dss/dss.h"
#include "opal/mca/db/db.h"
BEGIN_C_DECLS
OPAL_DECLSPEC extern mca_base_framework_t opal_db_base_framework;
/**
* Select a db module
*/
OPAL_DECLSPEC int opal_db_base_select(bool restrict_local);
typedef struct {
opal_list_item_t super;
int pri;
opal_db_base_module_t *module;
opal_db_base_component_t *component;
} opal_db_active_module_t;
OBJ_CLASS_DECLARATION(opal_db_active_module_t);
typedef struct {
opal_identifier_t my_id;
bool id_set;
opal_list_t store_order;
opal_list_t fetch_order;
} opal_db_base_t;
OPAL_DECLSPEC extern opal_db_base_t opal_db_base;
OPAL_DECLSPEC void opal_db_base_set_id(const opal_identifier_t *proc);
OPAL_DECLSPEC int opal_db_base_store(const opal_identifier_t *proc,
opal_scope_t scope,
const char *key, const void *object,
opal_data_type_t type);
OPAL_DECLSPEC int opal_db_base_store_pointer(const opal_identifier_t *proc,
opal_value_t *kv);
OPAL_DECLSPEC int opal_db_base_fetch(const opal_identifier_t *proc,
const char *key, void **data,
opal_data_type_t type);
OPAL_DECLSPEC int opal_db_base_fetch_pointer(const opal_identifier_t *proc,
const char *key,
void **data, opal_data_type_t type);
OPAL_DECLSPEC int opal_db_base_fetch_multiple(const opal_identifier_t *proc,
opal_scope_t scope,
const char *key,
opal_list_t *kvs);
OPAL_DECLSPEC int opal_db_base_remove_data(const opal_identifier_t *proc,
const char *key);
OPAL_DECLSPEC int opal_db_base_add_log(const char *table,
const opal_value_t *kvs, int nkvs);
OPAL_DECLSPEC void opal_db_base_commit(const opal_identifier_t *proc);
END_C_DECLS
#endif

View File

@ -1,272 +0,0 @@
/*
* Copyright (c) 2012-2013 Los Alamos National Security, Inc. All rights reserved.
* Copyright (c) 2013 Intel Inc. All rights reserved
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "opal_config.h"
#include "opal/constants.h"
#include "opal_stdint.h"
#include "opal/mca/mca.h"
#include "opal/util/error.h"
#include "opal/util/output.h"
#include "opal/mca/base/base.h"
#include "opal/dss/dss_types.h"
#include "opal/mca/db/base/base.h"
void opal_db_base_set_id(const opal_identifier_t *proc)
{
/* to protect alignment, copy the data across */
memcpy(&opal_db_base.my_id, proc, sizeof(opal_identifier_t));
opal_db_base.id_set = true;
}
int opal_db_base_store(const opal_identifier_t *proc,
opal_scope_t scope,
const char *key, const void *object,
opal_data_type_t type)
{
opal_db_active_module_t *mod;
int rc;
if (!opal_db_base.id_set) {
return OPAL_ERR_FATAL;
}
/* cycle thru the active modules until one agrees to perform the op */
OPAL_LIST_FOREACH(mod, &opal_db_base.store_order, opal_db_active_module_t) {
if (NULL == mod->module->store) {