1
1
openmpi/ompi/info/info.c

683 строки
19 KiB
C
Исходник Обычный вид История

/* -*- Mode: C; c-basic-offset:4 ; -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2007 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
== Highlights == 1. New mpifort wrapper compiler: you can utilize mpif.h, use mpi, and use mpi_f08 through this one wrapper compiler 1. mpif77 and mpif90 still exist, but are sym links to mpifort and may be removed in a future release 1. The mpi module has been re-implemented and is significantly "mo' bettah" 1. The mpi_f08 module offers many, many improvements over mpif.h and the mpi module This stuff is coming from a VERY long-lived mercurial branch (3 years!); it'll almost certainly take a few SVN commits and a bunch of testing before I get it correctly committed to the SVN trunk. == More details == Craig Rasmussen and I have been working with the MPI-3 Fortran WG and Fortran J3 committees for a long, long time to make a prototype MPI-3 Fortran bindings implementation. We think we're at a stable enough state to bring this stuff back to the trunk, with the goal of including it in OMPI v1.7. Special thanks go out to everyone who has been incredibly patient and helpful to us in this journey: * Rolf Rabenseifner/HLRS (mastermind/genius behind the entire MPI-3 Fortran effort) * The Fortran J3 committee * Tobias Burnus/gfortran * Tony !Goetz/Absoft * Terry !Donte/Oracle * ...and probably others whom I'm forgetting :-( There's still opportunities for optimization in the mpi_f08 implementation, but by and large, it is as far along as it can be until Fortran compilers start implementing the new F08 dimension(..) syntax. Note that gfortran is currently unsupported for the mpi_f08 module and the new mpi module. gfortran users will a) fall back to the same mpi module implementation that is in OMPI v1.5.x, and b) not get the new mpi_f08 module. The gfortran maintainers are actively working hard to add the necessary features to support both the new mpi_f08 module and the new mpi module implementations. This will take some time. As mentioned above, ompi/mpi/f77 and ompi/mpi/f90 no longer exist. All the fortran bindings implementations have been collated under ompi/mpi/fortran; each implementation has its own subdirectory: {{{ ompi/mpi/fortran/ base/ - glue code mpif-h/ - what used to be ompi/mpi/f77 use-mpi-tkr/ - what used to be ompi/mpi/f90 use-mpi-ignore-tkr/ - new mpi module implementation use-mpi-f08/ - new mpi_f08 module implementation }}} There's also a prototype 6-function-MPI implementation under use-mpi-f08-desc that emulates the new F08 dimension(..) syntax that isn't fully available in Fortran compilers yet. We did that to prove it to ourselves that it could be done once the compilers fully support it. This directory/implementation will likely eventually replace the use-mpi-f08 version. Other things that were done: * ompi_info grew a few new output fields to describe what level of Fortran support is included * Existing Fortran examples in examples/ were renamed; new mpi_f08 examples were added * The old Fortran MPI libraries were renamed: * libmpi_f77 -> libmpi_mpifh * libmpi_f90 -> libmpi_usempi * The configury for Fortran was consolidated and significantly slimmed down. Note that the F77 env variable is now IGNORED for configure; you should only use FC. Example: {{{ shell$ ./configure CC=icc CXX=icpc FC=ifort ... }}} All of this work was done in a Mercurial branch off the SVN trunk, and hosted at Bitbucket. This branch has got to be one of OMPI's longest-running branches. Its first commit was Tue Apr 07 23:01:46 2009 -0400 -- it's over 3 years old! :-) We think we've pulled in all relevant changes from the OMPI trunk (e.g., Fortran implementations of the new MPI-3 MPROBE stuff for mpif.h, use mpi, and use mpi_f08, and the recent Fujitsu Fortran patches). I anticipate some instability when we bring this stuff into the trunk, simply because it touches a LOT of code in the MPI layer in the OMPI code base. We'll try our best to make it as pain-free as possible, but please bear with us when it is committed. This commit was SVN r26283.
2012-04-18 19:57:29 +04:00
* Copyright (c) 2007-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2009 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/constants.h"
#ifdef HAVE_STRING_H
#include <string.h>
#endif
#include <errno.h>
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <limits.h>
#include <ctype.h>
#ifdef HAVE_SYS_UTSNAME_H
#include <sys/utsname.h>
#endif
#include <assert.h>
#include "opal/util/argv.h"
#include "opal/util/opal_getcwd.h"
#include "opal/util/output.h"
#include "opal/util/strncpy.h"
#include "ompi/info/info.h"
#include "ompi/runtime/mpiruntime.h"
#include "ompi/runtime/params.h"
/*
* Global variables
*/
ompi_predefined_info_t ompi_mpi_info_null;
== Highlights == 1. New mpifort wrapper compiler: you can utilize mpif.h, use mpi, and use mpi_f08 through this one wrapper compiler 1. mpif77 and mpif90 still exist, but are sym links to mpifort and may be removed in a future release 1. The mpi module has been re-implemented and is significantly "mo' bettah" 1. The mpi_f08 module offers many, many improvements over mpif.h and the mpi module This stuff is coming from a VERY long-lived mercurial branch (3 years!); it'll almost certainly take a few SVN commits and a bunch of testing before I get it correctly committed to the SVN trunk. == More details == Craig Rasmussen and I have been working with the MPI-3 Fortran WG and Fortran J3 committees for a long, long time to make a prototype MPI-3 Fortran bindings implementation. We think we're at a stable enough state to bring this stuff back to the trunk, with the goal of including it in OMPI v1.7. Special thanks go out to everyone who has been incredibly patient and helpful to us in this journey: * Rolf Rabenseifner/HLRS (mastermind/genius behind the entire MPI-3 Fortran effort) * The Fortran J3 committee * Tobias Burnus/gfortran * Tony !Goetz/Absoft * Terry !Donte/Oracle * ...and probably others whom I'm forgetting :-( There's still opportunities for optimization in the mpi_f08 implementation, but by and large, it is as far along as it can be until Fortran compilers start implementing the new F08 dimension(..) syntax. Note that gfortran is currently unsupported for the mpi_f08 module and the new mpi module. gfortran users will a) fall back to the same mpi module implementation that is in OMPI v1.5.x, and b) not get the new mpi_f08 module. The gfortran maintainers are actively working hard to add the necessary features to support both the new mpi_f08 module and the new mpi module implementations. This will take some time. As mentioned above, ompi/mpi/f77 and ompi/mpi/f90 no longer exist. All the fortran bindings implementations have been collated under ompi/mpi/fortran; each implementation has its own subdirectory: {{{ ompi/mpi/fortran/ base/ - glue code mpif-h/ - what used to be ompi/mpi/f77 use-mpi-tkr/ - what used to be ompi/mpi/f90 use-mpi-ignore-tkr/ - new mpi module implementation use-mpi-f08/ - new mpi_f08 module implementation }}} There's also a prototype 6-function-MPI implementation under use-mpi-f08-desc that emulates the new F08 dimension(..) syntax that isn't fully available in Fortran compilers yet. We did that to prove it to ourselves that it could be done once the compilers fully support it. This directory/implementation will likely eventually replace the use-mpi-f08 version. Other things that were done: * ompi_info grew a few new output fields to describe what level of Fortran support is included * Existing Fortran examples in examples/ were renamed; new mpi_f08 examples were added * The old Fortran MPI libraries were renamed: * libmpi_f77 -> libmpi_mpifh * libmpi_f90 -> libmpi_usempi * The configury for Fortran was consolidated and significantly slimmed down. Note that the F77 env variable is now IGNORED for configure; you should only use FC. Example: {{{ shell$ ./configure CC=icc CXX=icpc FC=ifort ... }}} All of this work was done in a Mercurial branch off the SVN trunk, and hosted at Bitbucket. This branch has got to be one of OMPI's longest-running branches. Its first commit was Tue Apr 07 23:01:46 2009 -0400 -- it's over 3 years old! :-) We think we've pulled in all relevant changes from the OMPI trunk (e.g., Fortran implementations of the new MPI-3 MPROBE stuff for mpif.h, use mpi, and use mpi_f08, and the recent Fujitsu Fortran patches). I anticipate some instability when we bring this stuff into the trunk, simply because it touches a LOT of code in the MPI layer in the OMPI code base. We'll try our best to make it as pain-free as possible, but please bear with us when it is committed. This commit was SVN r26283.
2012-04-18 19:57:29 +04:00
ompi_predefined_info_t *ompi_mpi_info_null_addr = &ompi_mpi_info_null;
ompi_predefined_info_t ompi_mpi_info_env;
/*
* Local functions
*/
static void info_constructor(ompi_info_t *info);
static void info_destructor(ompi_info_t *info);
static void info_entry_constructor(ompi_info_entry_t *entry);
static void info_entry_destructor(ompi_info_entry_t *entry);
static ompi_info_entry_t *info_find_key (ompi_info_t *info, char *key);
/*
* ompi_info_t classes
*/
OBJ_CLASS_INSTANCE(ompi_info_t,
opal_list_t,
info_constructor,
info_destructor);
/*
* ompi_info_entry_t classes
*/
OBJ_CLASS_INSTANCE(ompi_info_entry_t,
opal_list_item_t,
info_entry_constructor,
info_entry_destructor);
/*
* The global fortran <-> C translation table
*/
opal_pointer_array_t ompi_info_f_to_c_table;
/*
* This function is called during ompi_init and initializes the
* fortran to C translation table. It also fills in the values
* for the MPI_INFO_GET_ENV object
*/
int ompi_info_init(void)
{
char val[MPI_MAX_INFO_VAL];
char *cptr;
/* initialize table */
OBJ_CONSTRUCT(&ompi_info_f_to_c_table, opal_pointer_array_t);
if( OPAL_SUCCESS != opal_pointer_array_init(&ompi_info_f_to_c_table, 0,
OMPI_FORTRAN_HANDLE_MAX, 64) ) {
return OMPI_ERROR;
}
/* Create MPI_INFO_NULL */
OBJ_CONSTRUCT(&ompi_mpi_info_null.info, ompi_info_t);
assert(ompi_mpi_info_null.info.i_f_to_c_index == 0);
/* Create MPI_INFO_ENV */
OBJ_CONSTRUCT(&ompi_mpi_info_env.info, ompi_info_t);
assert(ompi_mpi_info_env.info.i_f_to_c_index == 1);
/* fill the env info object */
/* command for this app_context */
if (NULL != (cptr = getenv("OMPI_COMMAND"))) {
ompi_info_set(&ompi_mpi_info_env.info, "command", cptr);
}
/* space-separated list of argv for this command */
if (NULL != (cptr = getenv("OMPI_ARGV"))) {
ompi_info_set(&ompi_mpi_info_env.info, "argv", cptr);
}
/* max procs for the entire job */
if (NULL != (cptr = getenv("OMPI_MCA_orte_ess_num_procs"))) {
ompi_info_set(&ompi_mpi_info_env.info, "maxprocs", cptr);
/* Open MPI does not support the "soft" option, so set it to maxprocs */
ompi_info_set(&ompi_mpi_info_env.info, "soft", cptr);
}
/* local host name */
gethostname(val, MPI_MAX_INFO_VAL);
ompi_info_set(&ompi_mpi_info_env.info, "host", val);
/* architecture name */
if (NULL != (cptr = getenv("OMPI_MCA_orte_cpu_type"))) {
ompi_info_set(&ompi_mpi_info_env.info, "arch", cptr);
}
#ifdef HAVE_SYS_UTSNAME_H
else {
struct utsname sysname;
uname(&sysname);
cptr = sysname.machine;
ompi_info_set(&ompi_mpi_info_env.info, "arch", cptr);
}
#endif
/* initial working dir of this process - only set when
* run by mpiexec as we otherwise have no reliable way
* of determining the value
*/
if (NULL != (cptr = getenv("OMPI_MCA_initial_wdir"))) {
ompi_info_set(&ompi_mpi_info_env.info, "wdir", cptr);
}
/* provide the REQUESTED thread level - may be different
* than the ACTUAL thread level you get.
* ugly, but have to do a switch to find the string representation */
switch (ompi_mpi_thread_requested) {
case MPI_THREAD_SINGLE:
ompi_info_set(&ompi_mpi_info_env.info, "thread_level", "MPI_THREAD_SINGLE");
break;
case MPI_THREAD_FUNNELED:
ompi_info_set(&ompi_mpi_info_env.info, "thread_level", "MPI_THREAD_FUNNELED");
break;
case MPI_THREAD_SERIALIZED:
ompi_info_set(&ompi_mpi_info_env.info, "thread_level", "MPI_THREAD_SERIALIZED");
break;
case MPI_THREAD_MULTIPLE:
ompi_info_set(&ompi_mpi_info_env.info, "thread_level", "MPI_THREAD_MULTIPLE");
break;
default:
/* do nothing - don't know the value */
break;
}
/**** now some OMPI-specific values that other MPIs may not provide ****/
/* the number of app_contexts in this job */
if (NULL != (cptr = getenv("OMPI_NUM_APP_CTX"))) {
ompi_info_set(&ompi_mpi_info_env.info, "ompi_num_apps", cptr);
}
/* space-separated list of first MPI rank of each app_context */
if (NULL != (cptr = getenv("OMPI_FIRST_RANKS"))) {
ompi_info_set(&ompi_mpi_info_env.info, "ompi_first_rank", cptr);
}
/* space-separated list of num procs for each app_context */
if (NULL != (cptr = getenv("OMPI_APP_CTX_NUM_PROCS"))) {
ompi_info_set(&ompi_mpi_info_env.info, "ompi_np", cptr);
}
/* location of the directory containing any prepositioned files
* the user may have requested
*/
if (NULL != (cptr = getenv("OMPI_FILE_LOCATION"))) {
ompi_info_set(&ompi_mpi_info_env.info, "ompi_positioned_file_dir", cptr);
}
/* All done */
return OMPI_SUCCESS;
}
/*
* Duplicate an info
*/
int ompi_info_dup (ompi_info_t *info, ompi_info_t **newinfo)
{
int err;
opal_list_item_t *item;
ompi_info_entry_t *iterator;
OPAL_THREAD_LOCK(info->i_lock);
for (item = opal_list_get_first(&(info->super));
item != opal_list_get_end(&(info->super));
item = opal_list_get_next(iterator)) {
iterator = (ompi_info_entry_t *) item;
err = ompi_info_set(*newinfo, iterator->ie_key, iterator->ie_value);
if (MPI_SUCCESS != err) {
OPAL_THREAD_UNLOCK(info->i_lock);
return err;
}
}
OPAL_THREAD_UNLOCK(info->i_lock);
return MPI_SUCCESS;
}
/*
* Set a value on the info
*/
int ompi_info_set (ompi_info_t *info, char *key, char *value)
{
char *new_value;
ompi_info_entry_t *new_info;
ompi_info_entry_t *old_info;
new_value = strdup(value);
if (NULL == new_value) {
return MPI_ERR_NO_MEM;
}
OPAL_THREAD_LOCK(info->i_lock);
old_info = info_find_key (info, key);
if (NULL != old_info) {
/*
* key already exists. remove the value associated with it
*/
free(old_info->ie_value);
old_info->ie_value = new_value;
} else {
new_info = OBJ_NEW(ompi_info_entry_t);
if (NULL == new_info) {
OPAL_THREAD_UNLOCK(info->i_lock);
return MPI_ERR_NO_MEM;
}
strncpy (new_info->ie_key, key, MPI_MAX_INFO_KEY);
new_info->ie_value = new_value;
opal_list_append (&(info->super), (opal_list_item_t *) new_info);
}
OPAL_THREAD_UNLOCK(info->i_lock);
return MPI_SUCCESS;
}
/*
* Free an info handle and all of its keys and values.
*/
int ompi_info_free (ompi_info_t **info)
{
(*info)->i_freed = true;
OBJ_RELEASE(*info);
*info = MPI_INFO_NULL;
return MPI_SUCCESS;
}
/*
* Get a value from an info
*/
int ompi_info_get (ompi_info_t *info, char *key, int valuelen,
char *value, int *flag)
{
ompi_info_entry_t *search;
int value_length;
OPAL_THREAD_LOCK(info->i_lock);
search = info_find_key (info, key);
if (NULL == search){
*flag = 0;
} else {
/*
* We have found the element, so we can return the value
* Set the flag, value_length and value
*/
*flag = 1;
value_length = strlen(search->ie_value);
/*
* If the stored value is shorter than valuelen, then
* we can copy the entire value out. Else, we have to
* copy ONLY valuelen bytes out
*/
if (value_length < valuelen ) {
strcpy(value, search->ie_value);
} else {
opal_strncpy(value, search->ie_value, valuelen);
if (MPI_MAX_INFO_VAL == valuelen) {
value[valuelen-1] = 0;
} else {
value[valuelen] = 0;
}
}
}
OPAL_THREAD_UNLOCK(info->i_lock);
return MPI_SUCCESS;
}
/*
* Similar to ompi_info_get(), but cast the result into a boolean
* using some well-defined rules.
*/
int ompi_info_get_bool(ompi_info_t *info, char *key, bool *value, int *flag)
{
char *ptr;
char str[256];
str[sizeof(str) - 1] = '\0';
ompi_info_get(info, key, sizeof(str) - 1, str, flag);
if (*flag) {
*value = false;
/* Trim whitespace */
ptr = str + sizeof(str) - 1;
while (ptr >= str && isspace(*ptr)) {
*ptr = '\0';
--ptr;
}
ptr = str;
while (ptr < str + sizeof(str) - 1 && *ptr != '\0' &&
isspace(*ptr)) {
++ptr;
}
if ('\0' != *ptr) {
if (isdigit(*ptr)) {
*value = (bool) atoi(ptr);
} else if (0 == strcasecmp(ptr, "yes") ||
0 == strcasecmp(ptr, "true")) {
*value = true;
} else if (0 != strcasecmp(ptr, "no") &&
0 != strcasecmp(ptr, "false")) {
/* RHC unrecognized value -- print a warning? */
}
}
}
return MPI_SUCCESS;
}
/*
* Delete a key from an info
*/
int ompi_info_delete (ompi_info_t *info, char *key)
{
ompi_info_entry_t *search;
OPAL_THREAD_LOCK(info->i_lock);
search = info_find_key (info, key);
if (NULL == search){
OPAL_THREAD_UNLOCK(info->i_lock);
return MPI_ERR_INFO_NOKEY;
} else {
/*
* An entry with this key value was found. Remove the item
* and free the memory allocated to it.
* As this key *must* be available, we do not check for errors.
*/
opal_list_remove_item (&(info->super),
(opal_list_item_t *)search);
OBJ_RELEASE(search);
}
OPAL_THREAD_UNLOCK(info->i_lock);
return MPI_SUCCESS;
}
/*
* Return the length of a value
*/
int ompi_info_get_valuelen (ompi_info_t *info, char *key, int *valuelen,
int *flag)
{
ompi_info_entry_t *search;
OPAL_THREAD_LOCK(info->i_lock);
search = info_find_key (info, key);
if (NULL == search){
*flag = 0;
} else {
/*
* We have found the element, so we can return the value
* Set the flag, value_length and value
*/
*flag = 1;
*valuelen = strlen(search->ie_value);
}
OPAL_THREAD_UNLOCK(info->i_lock);
return MPI_SUCCESS;
}
/*
* Get the nth key
*/
int ompi_info_get_nthkey (ompi_info_t *info, int n, char *key)
{
ompi_info_entry_t *iterator;
/*
* Iterate over and over till we get to the nth key
*/
OPAL_THREAD_LOCK(info->i_lock);
for (iterator = (ompi_info_entry_t *)opal_list_get_first(&(info->super));
n > 0;
--n) {
iterator = (ompi_info_entry_t *)opal_list_get_next(iterator);
if (opal_list_get_end(&(info->super)) ==
(opal_list_item_t *) iterator) {
OPAL_THREAD_UNLOCK(info->i_lock);
return MPI_ERR_ARG;
}
}
/*
2007-07-30 19:47:37 +04:00
* iterator is of the type opal_list_item_t. We have to
* cast it to ompi_info_entry_t before we can use it to
* access the value
*/
strncpy(key, iterator->ie_key, MPI_MAX_INFO_KEY);
OPAL_THREAD_UNLOCK(info->i_lock);
return MPI_SUCCESS;
}
/*
* Shut down MPI_Info handling
*/
int ompi_info_finalize(void)
{
size_t i, max;
ompi_info_t *info;
opal_list_item_t *item;
ompi_info_entry_t *entry;
bool found = false;
/* Release MPI_INFO_NULL. Do this so that we don't get a bogus
leak report on it. Plus, it's statically allocated, so we
don't want to call OBJ_RELEASE on it. */
OBJ_DESTRUCT(&ompi_mpi_info_null.info);
opal_pointer_array_set_item(&ompi_info_f_to_c_table, 0, NULL);
/* ditto for MPI_INFO_GET_ENV */
OBJ_DESTRUCT(&ompi_mpi_info_env.info);
opal_pointer_array_set_item(&ompi_info_f_to_c_table, 1, NULL);
/* Go through the f2c table and see if anything is left. Free them
all. */
max = opal_pointer_array_get_size(&ompi_info_f_to_c_table);
for (i = 2; i < max; ++i) {
info = (ompi_info_t *)opal_pointer_array_get_item(&ompi_info_f_to_c_table, i);
/* If the info was freed but still exists because the user
told us to never free handles, then do an OBJ_RELEASE it
and all is well. Then get the value again and see if it's
actually been freed. */
if (NULL != info && ompi_debug_no_free_handles && info->i_freed) {
OBJ_RELEASE(info);
info = (ompi_info_t *)opal_pointer_array_get_item(&ompi_info_f_to_c_table, i);
}
/* If it still exists here and was never freed, then it's an
orphan */
if (NULL != info) {
/* If the user wanted warnings about MPI object leaks, print out
a message */
if (!info->i_freed && ompi_debug_show_handle_leaks) {
if (ompi_debug_show_handle_leaks) {
opal_output(0, "WARNING: MPI_Info still allocated at MPI_FINALIZE");
for (item = opal_list_get_first(&(info->super));
opal_list_get_end(&(info->super)) != item;
item = opal_list_get_next(item)) {
entry = (ompi_info_entry_t *) item;
opal_output(0, "WARNING: key=\"%s\", value=\"%s\"",
entry->ie_key,
NULL != entry->ie_value ? entry->ie_value : "(null)");
found = true;
}
}
OBJ_RELEASE(info);
}
/* Don't bother setting each element back down to NULL; it
would just take a lot of thread locks / unlocks and
since we're destroying everything, it isn't worth it */
if (!found && ompi_debug_show_handle_leaks) {
opal_output(0, "WARNING: (no keys)");
}
}
}
/* All done -- destroy the table */
OBJ_DESTRUCT(&ompi_info_f_to_c_table);
return OMPI_SUCCESS;
}
/*
* This function is invoked when OBJ_NEW() is called. Here, we add this
* info pointer to the table and then store its index as the handle
*/
static void info_constructor(ompi_info_t *info)
{
info->i_f_to_c_index = opal_pointer_array_add(&ompi_info_f_to_c_table,
info);
info->i_lock = OBJ_NEW(opal_mutex_t);
info->i_freed = false;
/* If the user doesn't want us to ever free it, then add an extra
RETAIN here */
if (ompi_debug_no_free_handles) {
OBJ_RETAIN(&(info->super));
}
}
/*
* This function is called during OBJ_DESTRUCT of "info". When this
* done, we need to remove the entry from the ompi fortran to C
* translation table
*/
static void info_destructor(ompi_info_t *info)
{
opal_list_item_t *item;
ompi_info_entry_t *iterator;
/* Remove every key in the list */
for (item = opal_list_remove_first(&(info->super));
NULL != item;
item = opal_list_remove_first(&(info->super))) {
iterator = (ompi_info_entry_t *) item;
OBJ_RELEASE(iterator);
}
/* reset the &ompi_info_f_to_c_table entry - make sure that the
entry is in the table */
if (MPI_UNDEFINED != info->i_f_to_c_index &&
NULL != opal_pointer_array_get_item(&ompi_info_f_to_c_table,
info->i_f_to_c_index)){
opal_pointer_array_set_item(&ompi_info_f_to_c_table,
info->i_f_to_c_index, NULL);
}
/* Release the lock */
OBJ_RELEASE(info->i_lock);
}
/*
* ompi_info_entry_t interface functions
*/
static void info_entry_constructor(ompi_info_entry_t *entry)
{
memset(entry->ie_key, 0, sizeof(entry->ie_key));
entry->ie_key[MPI_MAX_INFO_KEY] = 0;
}
static void info_entry_destructor(ompi_info_entry_t *entry)
{
if (NULL != entry->ie_value) {
free(entry->ie_value);
}
}
/*
* Find a key
*
* Do NOT thread lock in here -- the calling function is responsible
* for that.
*/
static ompi_info_entry_t *info_find_key (ompi_info_t *info, char *key)
{
ompi_info_entry_t *iterator;
/* No thread locking in here! */
/* Iterate over all the entries. If the key is found, then
* return immediately. Else, the loop will fall of the edge
* and NULL is returned
*/
for (iterator = (ompi_info_entry_t *)opal_list_get_first(&(info->super));
opal_list_get_end(&(info->super)) != (opal_list_item_t*) iterator;
iterator = (ompi_info_entry_t *)opal_list_get_next(iterator)) {
if (0 == strcmp(key, iterator->ie_key)) {
return iterator;
}
}
return NULL;
}
int
ompi_info_value_to_int(char *value, int *interp)
{
long tmp;
char *endp;
if (NULL == value || '\0' == value[0]) return OMPI_ERR_BAD_PARAM;
errno = 0;
tmp = strtol(value, &endp, 10);
/* we found something not a number */
if (*endp != '\0') return OMPI_ERR_BAD_PARAM;
/* underflow */
if (tmp == 0 && errno == EINVAL) return OMPI_ERR_BAD_PARAM;
*interp = (int) tmp;
return OMPI_SUCCESS;
}
int
ompi_info_value_to_bool(char *value, bool *interp)
{
int tmp;
/* idiot case */
if (NULL == value || NULL == interp) return OMPI_ERR_BAD_PARAM;
/* is it true / false? */
if (0 == strcmp(value, "true")) {
*interp = true;
return OMPI_SUCCESS;
} else if (0 == strcmp(value, "false")) {
*interp = false;
return OMPI_SUCCESS;
/* is it a number? */
} else if (OMPI_SUCCESS == ompi_info_value_to_int(value, &tmp)) {
if (tmp == 0) {
*interp = false;
} else {
*interp = true;
}
return OMPI_SUCCESS;
}
return OMPI_ERR_BAD_PARAM;
}