1
1

* rename locking code from ompi to opal

This commit was SVN r6327.
Этот коммит содержится в:
Brian Barrett 2005-07-03 22:45:48 +00:00
родитель ccd2624e3f
Коммит 39dbeeedfb
286 изменённых файлов: 1952 добавлений и 1952 удалений

Просмотреть файл

@ -190,7 +190,7 @@
#include "ompi_config.h"
#include "attribute/attribute.h"
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#include "include/constants.h"
#include "datatype/datatype.h"
#include "communicator/communicator.h"
@ -250,7 +250,7 @@
&f_key, &attr_val, keyval_obj->extra_state, &f_err); \
if (MPI_SUCCESS != OMPI_FINT_2_INT(f_err)) { \
if (need_lock) { \
OMPI_THREAD_UNLOCK(&alock); \
OPAL_THREAD_UNLOCK(&alock); \
} \
return OMPI_FINT_2_INT(f_err); \
} \
@ -263,7 +263,7 @@
&f_key, &attr_val, keyval_obj->extra_state, &f_err); \
if (MPI_SUCCESS != OMPI_FINT_2_INT(f_err)) { \
if (need_lock) { \
OMPI_THREAD_UNLOCK(&alock); \
OPAL_THREAD_UNLOCK(&alock); \
} \
return OMPI_FINT_2_INT(f_err); \
} \
@ -277,7 +277,7 @@
key, attr_val, \
keyval_obj->extra_state)) != MPI_SUCCESS) {\
if (need_lock) { \
OMPI_THREAD_UNLOCK(&alock); \
OPAL_THREAD_UNLOCK(&alock); \
} \
return err;\
} \
@ -300,7 +300,7 @@
&f_key, keyval_obj->extra_state, \
&in, &out, &f_flag, &f_err); \
if (MPI_SUCCESS != OMPI_FINT_2_INT(f_err)) { \
OMPI_THREAD_UNLOCK(&alock); \
OPAL_THREAD_UNLOCK(&alock); \
return OMPI_FINT_2_INT(f_err); \
} \
out_attr->av_value = (void*) 0; \
@ -316,7 +316,7 @@
&f_key, keyval_obj->extra_state, &in, &out, \
&f_flag, &f_err); \
if (MPI_SUCCESS != OMPI_FINT_2_INT(f_err)) { \
OMPI_THREAD_UNLOCK(&alock); \
OPAL_THREAD_UNLOCK(&alock); \
return OMPI_FINT_2_INT(f_err); \
} \
out_attr->av_value = (void *) out; \
@ -330,7 +330,7 @@
if ((err = (*((keyval_obj->copy_attr_fn).attr_##type##_copy_fn)) \
((ompi_##type##_t *)old_object, key, keyval_obj->extra_state, \
in, &out, &flag)) != MPI_SUCCESS) { \
OMPI_THREAD_UNLOCK(&alock); \
OPAL_THREAD_UNLOCK(&alock); \
return err; \
} \
out_attr->av_value = out; \
@ -411,7 +411,7 @@ static unsigned int int_pos = 12345;
* -- they're not in the performance-critical portions of the code.
* So why bother?
*/
static ompi_mutex_t alock;
static opal_mutex_t alock;
#endif /* OMPI_HAVE_THREAD_SUPPORT */
@ -442,10 +442,10 @@ ompi_attrkey_item_destruct(ompi_attrkey_item_t *item)
{
/* Remove the key entry from the hash and free the key */
OMPI_THREAD_LOCK(&alock);
OPAL_THREAD_LOCK(&alock);
opal_hash_table_remove_value_uint32(keyval_hash, item->key);
FREE_KEY(item->key);
OMPI_THREAD_UNLOCK(&alock);
OPAL_THREAD_UNLOCK(&alock);
}
@ -524,12 +524,12 @@ int ompi_attr_create_keyval(ompi_attribute_type_t type,
/* Create a new unique key and fill the hash */
OMPI_THREAD_LOCK(&alock);
OPAL_THREAD_LOCK(&alock);
ret = CREATE_KEY(key);
if (OMPI_SUCCESS == ret) {
ret = opal_hash_table_set_value_uint32(keyval_hash, *key, attr);
}
OMPI_THREAD_UNLOCK(&alock);
OPAL_THREAD_UNLOCK(&alock);
if (OMPI_SUCCESS != ret) {
return ret;
}
@ -563,10 +563,10 @@ int ompi_attr_free_keyval(ompi_attribute_type_t type, int *key,
/* Find the key-value pair */
OMPI_THREAD_LOCK(&alock);
OPAL_THREAD_LOCK(&alock);
ret = opal_hash_table_get_value_uint32(keyval_hash, *key,
(void **) &key_item);
OMPI_THREAD_UNLOCK(&alock);
OPAL_THREAD_UNLOCK(&alock);
if ((OMPI_SUCCESS != ret) || (NULL == key_item) ||
(key_item->attr_type != type) ||
@ -609,7 +609,7 @@ int ompi_attr_delete(ompi_attribute_type_t type, void *object,
keyval_lock, so we should not try to lock it again. */
if (need_lock) {
OMPI_THREAD_LOCK(&alock);
OPAL_THREAD_LOCK(&alock);
}
/* Check if the key is valid in the master keyval hash */
@ -667,7 +667,7 @@ int ompi_attr_delete(ompi_attribute_type_t type, void *object,
exit:
if (need_lock) {
OMPI_THREAD_UNLOCK(&alock);
OPAL_THREAD_UNLOCK(&alock);
}
/* Decrement the ref count for the key, and if ref count is 0,
@ -830,7 +830,7 @@ int ompi_attr_copy_all(ompi_attribute_type_t type, void *old_object,
thread modify the structure of the keyval hash or bitmap while
we're traversing it */
OMPI_THREAD_LOCK(&alock);
OPAL_THREAD_LOCK(&alock);
/* Get the first key-attr in the object's key hash */
ret = opal_hash_table_get_first_key_uint32(oldkeyhash, &key,
@ -900,7 +900,7 @@ int ompi_attr_copy_all(ompi_attribute_type_t type, void *old_object,
/* All done */
OMPI_THREAD_UNLOCK(&alock);
OPAL_THREAD_UNLOCK(&alock);
return MPI_SUCCESS;
}
@ -931,7 +931,7 @@ int ompi_attr_delete_all(ompi_attribute_type_t type, void *object,
thread modify the structure of the keyval hash or bitmap while
we're traversing it */
OMPI_THREAD_LOCK(&alock);
OPAL_THREAD_LOCK(&alock);
/* Get the first key in local object's key hash */
key_ret = opal_hash_table_get_first_key_uint32(keyhash,
@ -958,7 +958,7 @@ int ompi_attr_delete_all(ompi_attribute_type_t type, void *object,
/* All done */
OMPI_THREAD_UNLOCK(&alock);
OPAL_THREAD_UNLOCK(&alock);
return del_ret;
}
@ -993,7 +993,7 @@ static int set_value(ompi_attribute_type_t type, void *object,
so we should not try to lock it again. */
if (need_lock) {
OMPI_THREAD_LOCK(&alock);
OPAL_THREAD_LOCK(&alock);
}
ret = opal_hash_table_get_value_uint32(keyval_hash, key,
(void **) &key_item);
@ -1004,7 +1004,7 @@ static int set_value(ompi_attribute_type_t type, void *object,
(key_item->attr_type != type) ||
((!predefined) && (key_item->attr_flag & OMPI_KEYVAL_PREDEFINED))) {
if (need_lock) {
OMPI_THREAD_UNLOCK(&alock);
OPAL_THREAD_UNLOCK(&alock);
}
return OMPI_ERR_BAD_PARAM;
}
@ -1045,7 +1045,7 @@ static int set_value(ompi_attribute_type_t type, void *object,
ret = opal_hash_table_set_value_uint32(*keyhash, key, new_attr);
if (need_lock) {
OMPI_THREAD_UNLOCK(&alock);
OPAL_THREAD_UNLOCK(&alock);
}
if (OMPI_SUCCESS != ret) {
return ret;
@ -1080,12 +1080,12 @@ static int get_value(opal_hash_table_t *keyhash, int key,
FALSE in the flag argument */
*flag = 0;
OMPI_THREAD_LOCK(&alock);
OPAL_THREAD_LOCK(&alock);
ret = opal_hash_table_get_value_uint32(keyval_hash, key,
(void**) &key_item);
if (OMPI_ERR_NOT_FOUND == ret) {
OMPI_THREAD_UNLOCK(&alock);
OPAL_THREAD_UNLOCK(&alock);
return MPI_KEYVAL_INVALID;
}
@ -1093,12 +1093,12 @@ static int get_value(opal_hash_table_t *keyhash, int key,
been cached on this object yet. So just return *flag = 0. */
if (NULL == keyhash) {
OMPI_THREAD_UNLOCK(&alock);
OPAL_THREAD_UNLOCK(&alock);
return OMPI_SUCCESS;
}
ret = opal_hash_table_get_value_uint32(keyhash, key, &attr);
OMPI_THREAD_UNLOCK(&alock);
OPAL_THREAD_UNLOCK(&alock);
if (OMPI_SUCCESS == ret) {
*attribute = attr;
*flag = 1;

Просмотреть файл

@ -34,7 +34,7 @@ opal_class_t ompi_free_list_t_class = {
static void ompi_free_list_construct(ompi_free_list_t* fl)
{
OBJ_CONSTRUCT(&fl->fl_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&fl->fl_lock, opal_mutex_t);
fl->fl_max_to_alloc = 0;
fl->fl_num_allocated = 0;
fl->fl_num_per_alloc = 0;

Просмотреть файл

@ -19,8 +19,8 @@
#include "ompi_config.h"
#include "opal/class/opal_list.h"
#include "threads/thread.h"
#include "threads/condition.h"
#include "opal/threads/thread.h"
#include "opal/threads/condition.h"
#include "include/constants.h"
#include "mca/mpool/mpool.h"
@ -41,8 +41,8 @@ struct ompi_free_list_t
size_t fl_elem_size;
opal_class_t* fl_elem_class;
mca_mpool_base_module_t* fl_mpool;
ompi_mutex_t fl_lock;
ompi_condition_t fl_condition;
opal_mutex_t fl_lock;
opal_condition_t fl_condition;
};
typedef struct ompi_free_list_t ompi_free_list_t;
@ -92,14 +92,14 @@ OMPI_DECLSPEC int ompi_free_list_grow(ompi_free_list_t* flist, size_t num_elemen
#define OMPI_FREE_LIST_GET(fl, item, rc) \
{ \
if(ompi_using_threads()) { \
ompi_mutex_lock(&((fl)->fl_lock)); \
if(opal_using_threads()) { \
opal_mutex_lock(&((fl)->fl_lock)); \
item = opal_list_remove_first(&((fl)->super)); \
if(NULL == item) { \
ompi_free_list_grow((fl), (fl)->fl_num_per_alloc); \
item = opal_list_remove_first(&((fl)->super)); \
} \
ompi_mutex_unlock(&((fl)->fl_lock)); \
opal_mutex_unlock(&((fl)->fl_lock)); \
} else { \
item = opal_list_remove_first(&((fl)->super)); \
if(NULL == item) { \
@ -126,19 +126,19 @@ OMPI_DECLSPEC int ompi_free_list_grow(ompi_free_list_t* flist, size_t num_elemen
#define OMPI_FREE_LIST_WAIT(fl, item, rc) \
{ \
OMPI_THREAD_LOCK(&((fl)->fl_lock)); \
OPAL_THREAD_LOCK(&((fl)->fl_lock)); \
item = opal_list_remove_first(&((fl)->super)); \
while(NULL == item) { \
if((fl)->fl_max_to_alloc <= (fl)->fl_num_allocated) { \
(fl)->fl_num_waiting++; \
ompi_condition_wait(&((fl)->fl_condition), &((fl)->fl_lock)); \
opal_condition_wait(&((fl)->fl_condition), &((fl)->fl_lock)); \
(fl)->fl_num_waiting--; \
} else { \
ompi_free_list_grow((fl), (fl)->fl_num_per_alloc); \
} \
item = opal_list_remove_first(&((fl)->super)); \
} \
OMPI_THREAD_UNLOCK(&((fl)->fl_lock)); \
OPAL_THREAD_UNLOCK(&((fl)->fl_lock)); \
rc = (NULL == item) ? OMPI_ERR_OUT_OF_RESOURCE : OMPI_SUCCESS; \
}
@ -153,12 +153,12 @@ OMPI_DECLSPEC int ompi_free_list_grow(ompi_free_list_t* flist, size_t num_elemen
#define OMPI_FREE_LIST_RETURN(fl, item) \
{ \
OMPI_THREAD_LOCK(&(fl)->fl_lock); \
OPAL_THREAD_LOCK(&(fl)->fl_lock); \
opal_list_prepend(&((fl)->super), (item)); \
if((fl)->fl_num_waiting > 0) { \
ompi_condition_signal(&((fl)->fl_condition)); \
opal_condition_signal(&((fl)->fl_condition)); \
} \
OMPI_THREAD_UNLOCK(&(fl)->fl_lock); \
OPAL_THREAD_UNLOCK(&(fl)->fl_lock); \
}
#if defined(c_plusplus) || defined(__cplusplus)
}

Просмотреть файл

@ -39,7 +39,7 @@ OBJ_CLASS_INSTANCE(ompi_pointer_array_t, opal_object_t,
*/
void ompi_pointer_array_construct(ompi_pointer_array_t *array)
{
OBJ_CONSTRUCT(&array->lock, ompi_mutex_t);
OBJ_CONSTRUCT(&array->lock, opal_mutex_t);
array->lowest_free = 0;
array->number_free = 0;
array->size = 0;
@ -83,7 +83,7 @@ int ompi_pointer_array_add(ompi_pointer_array_t *table, void *ptr)
assert(table != NULL);
OMPI_THREAD_LOCK(&(table->lock));
OPAL_THREAD_LOCK(&(table->lock));
if (table->addr == NULL) {
@ -97,7 +97,7 @@ int ompi_pointer_array_add(ompi_pointer_array_t *table, void *ptr)
p = (void **) malloc(TABLE_INIT * sizeof(void *));
if (p == NULL) {
OMPI_THREAD_UNLOCK(&(table->lock));
OPAL_THREAD_UNLOCK(&(table->lock));
return OMPI_ERROR;
}
table->lowest_free = 0;
@ -114,7 +114,7 @@ int ompi_pointer_array_add(ompi_pointer_array_t *table, void *ptr)
if (!grow_table(table, table->size * TABLE_GROW,
OMPI_FORTRAN_HANDLE_MAX)) {
OMPI_THREAD_UNLOCK(&(table->lock));
OPAL_THREAD_UNLOCK(&(table->lock));
return OMPI_ERR_OUT_OF_RESOURCE;
}
}
@ -154,7 +154,7 @@ int ompi_pointer_array_add(ompi_pointer_array_t *table, void *ptr)
index, ptr);
}
OMPI_THREAD_UNLOCK(&(table->lock));
OPAL_THREAD_UNLOCK(&(table->lock));
return index;
}
@ -184,11 +184,11 @@ int ompi_pointer_array_set_item(ompi_pointer_array_t *table, int index,
/* expand table if required to set a specific index */
OMPI_THREAD_LOCK(&(table->lock));
OPAL_THREAD_LOCK(&(table->lock));
if (table->size <= index) {
if (!grow_table(table, ((index / TABLE_GROW) + 1) * TABLE_GROW,
index)) {
OMPI_THREAD_UNLOCK(&(table->lock));
OPAL_THREAD_UNLOCK(&(table->lock));
return OMPI_ERROR;
}
}
@ -255,7 +255,7 @@ int ompi_pointer_array_set_item(ompi_pointer_array_t *table, int index,
index, table->addr[index]);
#endif
OMPI_THREAD_UNLOCK(&(table->lock));
OPAL_THREAD_UNLOCK(&(table->lock));
return OMPI_SUCCESS;
}
@ -288,10 +288,10 @@ bool ompi_pointer_array_test_and_set_item (ompi_pointer_array_t *table,
#endif
/* expand table if required to set a specific index */
OMPI_THREAD_LOCK(&(table->lock));
OPAL_THREAD_LOCK(&(table->lock));
if ( index < table->size && table->addr[index] != NULL ) {
/* This element is already in use */
OMPI_THREAD_UNLOCK(&(table->lock));
OPAL_THREAD_UNLOCK(&(table->lock));
return false;
}
@ -300,7 +300,7 @@ bool ompi_pointer_array_test_and_set_item (ompi_pointer_array_t *table,
if (table->size <= index) {
if (!grow_table(table, (((index / TABLE_GROW) + 1) * TABLE_GROW),
index)) {
OMPI_THREAD_UNLOCK(&(table->lock));
OPAL_THREAD_UNLOCK(&(table->lock));
return false;
}
}
@ -331,7 +331,7 @@ bool ompi_pointer_array_test_and_set_item (ompi_pointer_array_t *table,
index, table->addr[index]);
#endif
OMPI_THREAD_UNLOCK(&(table->lock));
OPAL_THREAD_UNLOCK(&(table->lock));
return true;
}

Просмотреть файл

@ -31,7 +31,7 @@
#include "ompi_config.h"
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#include "opal/class/opal_object.h"
#if defined(c_plusplus) || defined(__cplusplus)
@ -45,7 +45,7 @@ struct ompi_pointer_array_t {
/** base class */
opal_object_t super;
/** synchronization object */
ompi_mutex_t lock;
opal_mutex_t lock;
/** Index of lowest free element. NOTE: This is only an
optimization to know where to search for the first free slot.
It does \em not necessarily imply indices all above this index
@ -105,9 +105,9 @@ static inline void *ompi_pointer_array_get_item(ompi_pointer_array_t *table,
int index)
{
void *p;
OMPI_THREAD_LOCK(&(table->lock));
OPAL_THREAD_LOCK(&(table->lock));
p = table->addr[index];
OMPI_THREAD_UNLOCK(&(table->lock));
OPAL_THREAD_UNLOCK(&(table->lock));
return p;
}

Просмотреть файл

@ -22,7 +22,7 @@
#include "include/constants.h"
#include "dps/dps.h"
#include "proc/proc.h"
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#include "util/bit_ops.h"
#include "util/output.h"
#include "util/convert.h"
@ -659,14 +659,14 @@ int ompi_comm_set_name (ompi_communicator_t *comm, char *name )
{
#ifdef USE_MUTEX_FOR_COMMS
OMPI_THREAD_LOCK(&(comm->c_lock));
OPAL_THREAD_LOCK(&(comm->c_lock));
#endif
memset(comm->c_name, 0, MPI_MAX_OBJECT_NAME);
strncpy(comm->c_name, name, MPI_MAX_OBJECT_NAME);
comm->c_name[MPI_MAX_OBJECT_NAME - 1] = 0;
comm->c_flags |= OMPI_COMM_NAMEISSET;
#ifdef USE_MUTEX_FOR_COMMS
OMPI_THREAD_UNLOCK(&(comm->c_lock));
OPAL_THREAD_UNLOCK(&(comm->c_lock));
#endif
return OMPI_SUCCESS;

Просмотреть файл

@ -101,7 +101,7 @@ OBJ_CLASS_INSTANCE (ompi_comm_reg_t,
ompi_comm_reg_destructor );
#if OMPI_HAVE_THREAD_SUPPORT
static ompi_mutex_t ompi_cid_lock;
static opal_mutex_t ompi_cid_lock;
#endif /* OMPI_HAVE_THREAD_SUPPORT */
static opal_list_t ompi_registered_comms;
@ -148,22 +148,22 @@ int ompi_comm_nextcid ( ompi_communicator_t* newcomm,
}
OMPI_THREAD_LOCK(&ompi_cid_lock);
OPAL_THREAD_LOCK(&ompi_cid_lock);
ompi_comm_register_cid (comm->c_contextid);
OMPI_THREAD_UNLOCK(&ompi_cid_lock);
OPAL_THREAD_UNLOCK(&ompi_cid_lock);
while (!done) {
/**
* This is the real algorithm described in the doc
*/
OMPI_THREAD_LOCK(&ompi_cid_lock);
OPAL_THREAD_LOCK(&ompi_cid_lock);
if (comm->c_contextid != ompi_comm_lowest_cid() ) {
/* if not lowest cid, we do not continue, but sleep and try again */
OMPI_THREAD_UNLOCK(&ompi_cid_lock);
OPAL_THREAD_UNLOCK(&ompi_cid_lock);
continue;
}
OMPI_THREAD_UNLOCK(&ompi_cid_lock);
OPAL_THREAD_UNLOCK(&ompi_cid_lock);
for (i=start; i<OMPI_MAX_COMM ;i++) {
@ -214,9 +214,9 @@ int ompi_comm_nextcid ( ompi_communicator_t* newcomm,
newcomm->c_f_to_c_index = newcomm->c_contextid;
ompi_pointer_array_set_item (&ompi_mpi_communicators, nextcid, newcomm);
OMPI_THREAD_LOCK(&ompi_cid_lock);
OPAL_THREAD_LOCK(&ompi_cid_lock);
ompi_comm_unregister_cid (comm->c_contextid);
OMPI_THREAD_UNLOCK(&ompi_cid_lock);
OPAL_THREAD_UNLOCK(&ompi_cid_lock);
return (MPI_SUCCESS);
}

Просмотреть файл

@ -31,7 +31,7 @@
#include "group/group.h"
#include "proc/proc.h"
#include "info/info.h"
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#include "util/proc_info.h"
#include "util/bit_ops.h"
#include "util/argv.h"

Просмотреть файл

@ -29,7 +29,7 @@
#include "mca/rml/rml_types.h"
#if OMPI_HAVE_THREAD_SUPPORT
static ompi_mutex_t ompi_port_lock;
static opal_mutex_t ompi_port_lock;
#endif /* OMPI_HAVE_THREAD_SUPPORT */
#define OMPI_COMM_PORT_KEY "ompi-port-name"
@ -54,11 +54,11 @@ int ompi_open_port(char *port_name)
return rc;
}
OMPI_THREAD_LOCK(&ompi_port_lock);
OPAL_THREAD_LOCK(&ompi_port_lock);
if (ORTE_SUCCESS != (rc = orte_ns.assign_rml_tag(&lport_id, NULL))) {
return rc;
}
OMPI_THREAD_UNLOCK(&ompi_port_lock);
OPAL_THREAD_UNLOCK(&ompi_port_lock);
sprintf (port_name, "%s:%d", name, lport_id);
free ( myproc );

Просмотреть файл

@ -20,7 +20,7 @@
#include "opal/class/opal_object.h"
#include "class/opal_hash_table.h"
#include "errhandler/errhandler.h"
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#include "util/output.h"
#include "mpi.h"
@ -90,7 +90,7 @@ OMPI_DECLSPEC extern ompi_pointer_array_t ompi_mpi_communicators;
struct ompi_communicator_t {
opal_object_t c_base;
ompi_mutex_t c_lock; /* mutex for name and potentially
opal_mutex_t c_lock; /* mutex for name and potentially
attributes */
char c_name[MPI_MAX_OBJECT_NAME];
uint32_t c_contextid;

Просмотреть файл

@ -20,7 +20,7 @@
#include "mpi.h"
#include "opal/class/opal_list.h"
#include "errhandler/errhandler.h"
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#include "mca/io/io.h"
/*
@ -87,7 +87,7 @@ struct ompi_file_t {
opal_list_t f_io_requests;
/** Lock for the per-module io request freelist */
ompi_mutex_t f_io_requests_lock;
opal_mutex_t f_io_requests_lock;
};
/**
* Convenience typedef

Просмотреть файл

@ -90,18 +90,18 @@ int ompi_info_dup (ompi_info_t *info, ompi_info_t **newinfo)
opal_list_item_t *item;
ompi_info_entry_t *iterator;
OMPI_THREAD_LOCK(info->i_lock);
OPAL_THREAD_LOCK(info->i_lock);
for (item = opal_list_get_first(&(info->super));
item != opal_list_get_end(&(info->super));
item = opal_list_get_next(iterator)) {
iterator = (ompi_info_entry_t *) item;
err = ompi_info_set(*newinfo, iterator->ie_key, iterator->ie_value);
if (MPI_SUCCESS != err) {
OMPI_THREAD_UNLOCK(info->i_lock);
OPAL_THREAD_UNLOCK(info->i_lock);
return err;
}
}
OMPI_THREAD_UNLOCK(info->i_lock);
OPAL_THREAD_UNLOCK(info->i_lock);
return MPI_SUCCESS;
}
@ -120,7 +120,7 @@ int ompi_info_set (ompi_info_t *info, char *key, char *value)
return MPI_ERR_NO_MEM;
}
OMPI_THREAD_LOCK(info->i_lock);
OPAL_THREAD_LOCK(info->i_lock);
old_info = info_find_key (info, key);
if (NULL != old_info) {
/*
@ -131,14 +131,14 @@ int ompi_info_set (ompi_info_t *info, char *key, char *value)
} else {
new_info = OBJ_NEW(ompi_info_entry_t);
if (NULL == new_info) {
OMPI_THREAD_UNLOCK(info->i_lock);
OPAL_THREAD_UNLOCK(info->i_lock);
return MPI_ERR_NO_MEM;
}
strcpy (new_info->ie_key, key);
new_info->ie_value = new_value;
opal_list_append (&(info->super), (opal_list_item_t *) new_info);
}
OMPI_THREAD_UNLOCK(info->i_lock);
OPAL_THREAD_UNLOCK(info->i_lock);
return MPI_SUCCESS;
}
@ -164,7 +164,7 @@ int ompi_info_get (ompi_info_t *info, char *key, int valuelen,
ompi_info_entry_t *search;
int value_length;
OMPI_THREAD_LOCK(info->i_lock);
OPAL_THREAD_LOCK(info->i_lock);
search = info_find_key (info, key);
if (NULL == search){
*flag = 0;
@ -187,7 +187,7 @@ int ompi_info_get (ompi_info_t *info, char *key, int valuelen,
value[valuelen] = 0;
}
}
OMPI_THREAD_UNLOCK(info->i_lock);
OPAL_THREAD_UNLOCK(info->i_lock);
return MPI_SUCCESS;
}
@ -200,10 +200,10 @@ int ompi_info_delete (ompi_info_t *info, char *key)
ompi_info_entry_t *search;
ompi_info_entry_t *found;
OMPI_THREAD_LOCK(info->i_lock);
OPAL_THREAD_LOCK(info->i_lock);
search = info_find_key (info, key);
if (NULL == search){
OMPI_THREAD_UNLOCK(info->i_lock);
OPAL_THREAD_UNLOCK(info->i_lock);
return MPI_ERR_INFO_NOKEY;
} else {
/*
@ -215,7 +215,7 @@ int ompi_info_delete (ompi_info_t *info, char *key)
(opal_list_item_t *)search);
OBJ_RELEASE(search);
}
OMPI_THREAD_UNLOCK(info->i_lock);
OPAL_THREAD_UNLOCK(info->i_lock);
return MPI_SUCCESS;
}
@ -228,7 +228,7 @@ int ompi_info_get_valuelen (ompi_info_t *info, char *key, int *valuelen,
{
ompi_info_entry_t *search;
OMPI_THREAD_LOCK(info->i_lock);
OPAL_THREAD_LOCK(info->i_lock);
search = info_find_key (info, key);
if (NULL == search){
*flag = 0;
@ -240,7 +240,7 @@ int ompi_info_get_valuelen (ompi_info_t *info, char *key, int *valuelen,
*flag = 1;
*valuelen = strlen(search->ie_value);
}
OMPI_THREAD_UNLOCK(info->i_lock);
OPAL_THREAD_UNLOCK(info->i_lock);
return MPI_SUCCESS;
}
@ -255,14 +255,14 @@ int ompi_info_get_nthkey (ompi_info_t *info, int n, char *key)
/*
* Iterate over and over till we get to the nth key
*/
OMPI_THREAD_LOCK(info->i_lock);
OPAL_THREAD_LOCK(info->i_lock);
for (iterator = (ompi_info_entry_t *)opal_list_get_first(&(info->super));
n > 0;
--n) {
iterator = (ompi_info_entry_t *)opal_list_get_next(iterator);
if (opal_list_get_end(&(info->super)) ==
(opal_list_item_t *) iterator) {
OMPI_THREAD_UNLOCK(info->i_lock);
OPAL_THREAD_UNLOCK(info->i_lock);
return MPI_ERR_ARG;
}
}
@ -272,7 +272,7 @@ int ompi_info_get_nthkey (ompi_info_t *info, int n, char *key)
* access the value
*/
strcpy(key, iterator->ie_key);
OMPI_THREAD_UNLOCK(info->i_lock);
OPAL_THREAD_UNLOCK(info->i_lock);
return MPI_SUCCESS;
}
@ -361,7 +361,7 @@ static void info_constructor(ompi_info_t *info)
{
info->i_f_to_c_index = ompi_pointer_array_add(&ompi_info_f_to_c_table,
info);
info->i_lock = OBJ_NEW(ompi_mutex_t);
info->i_lock = OBJ_NEW(opal_mutex_t);
info->i_freed = false;
/* If the user doesn't want us to ever free it, then add an extra

Просмотреть файл

@ -23,7 +23,7 @@
#include "util/strncpy.h"
#include "opal/class/opal_list.h"
#include "class/ompi_pointer_array.h"
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
/**
@ -37,7 +37,7 @@ struct ompi_info_t {
int i_f_to_c_index;
/**< fortran handle for info. This is needed for translation from
fortran to C and vice versa */
ompi_mutex_t *i_lock;
opal_mutex_t *i_lock;
/**< Mutex for thread safety */
bool i_freed;
/**< Whether this info has been freed or not */

Просмотреть файл

@ -94,7 +94,7 @@ mca_allocator_base_module_t* mca_allocator_basic_component_init(
module->seg_alloc = segment_alloc;
module->seg_free = segment_free;
OBJ_CONSTRUCT(&module->seg_list, opal_list_t);
OBJ_CONSTRUCT(&module->seg_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&module->seg_lock, opal_mutex_t);
OBJ_CONSTRUCT(&module->seg_descriptors, ompi_free_list_t);
ompi_free_list_init(&module->seg_descriptors,
@ -168,7 +168,7 @@ void *mca_allocator_basic_alloc(
opal_list_item_t* item;
unsigned char* addr;
size_t allocated_size;
OMPI_THREAD_LOCK(&module->seg_lock);
OPAL_THREAD_LOCK(&module->seg_lock);
/* search the list for a segment of the required size */
size += sizeof(size_t);
@ -182,14 +182,14 @@ void *mca_allocator_basic_alloc(
addr = seg->seg_addr;
seg->seg_addr += size;
seg->seg_size -= size;
OMPI_THREAD_UNLOCK(&module->seg_lock);
OPAL_THREAD_UNLOCK(&module->seg_lock);
*(size_t*)addr = size;
return addr+sizeof(size_t);
} else if (seg->seg_size == size) {
addr = seg->seg_addr;
opal_list_remove_item(&module->seg_list, item);
OMPI_FREE_LIST_RETURN(&module->seg_descriptors, item);
OMPI_THREAD_UNLOCK(&module->seg_lock);
OPAL_THREAD_UNLOCK(&module->seg_lock);
*(size_t*)addr = size;
return addr+sizeof(size_t);
}
@ -198,7 +198,7 @@ void *mca_allocator_basic_alloc(
/* request additional block */
allocated_size = (unsigned char)size;
if(NULL == (addr = (unsigned char *)module->seg_alloc(module->super.alc_mpool, &allocated_size, registration))) {
OMPI_THREAD_UNLOCK(&module->seg_lock);
OPAL_THREAD_UNLOCK(&module->seg_lock);
return NULL;
}
@ -207,7 +207,7 @@ void *mca_allocator_basic_alloc(
int rc;
OMPI_FREE_LIST_GET(&module->seg_descriptors, item, rc);
if(rc != OMPI_SUCCESS) {
OMPI_THREAD_UNLOCK(&module->seg_lock);
OPAL_THREAD_UNLOCK(&module->seg_lock);
return NULL;
}
seg = (mca_allocator_basic_segment_t*)item;
@ -217,7 +217,7 @@ void *mca_allocator_basic_alloc(
}
*(size_t*)addr = size;
OMPI_THREAD_UNLOCK(&module->seg_lock);
OPAL_THREAD_UNLOCK(&module->seg_lock);
return addr+sizeof(size_t);
}
@ -276,7 +276,7 @@ void mca_allocator_basic_free(
unsigned char* addr = (unsigned char*)ptr - sizeof(size_t);
size_t size = *(size_t*)addr;
int rc;
OMPI_THREAD_LOCK(&module->seg_lock);
OPAL_THREAD_LOCK(&module->seg_lock);
/* maintain the free list in sorted order by address */
for(item = opal_list_get_first(&module->seg_list);
@ -290,7 +290,7 @@ void mca_allocator_basic_free(
if(seg->seg_addr + seg->seg_size == addr) {
seg->seg_size += size;
mca_allocator_basic_combine_next(module, seg);
OMPI_THREAD_UNLOCK(&module->seg_lock);
OPAL_THREAD_UNLOCK(&module->seg_lock);
return;
}
/* otherwise continue to check next larger entry */
@ -302,7 +302,7 @@ void mca_allocator_basic_free(
seg->seg_addr = addr;
seg->seg_size += size;
mca_allocator_basic_combine_prev(module, seg);
OMPI_THREAD_UNLOCK(&module->seg_lock);
OPAL_THREAD_UNLOCK(&module->seg_lock);
return;
/* insert before larger entry */
@ -310,14 +310,14 @@ void mca_allocator_basic_free(
mca_allocator_basic_segment_t* new_seg;
OMPI_FREE_LIST_GET(&module->seg_descriptors, item, rc);
if(rc != OMPI_SUCCESS) {
OMPI_THREAD_UNLOCK(&module->seg_lock);
OPAL_THREAD_UNLOCK(&module->seg_lock);
return;
}
new_seg = (mca_allocator_basic_segment_t*)item;
new_seg->seg_addr = addr;
new_seg->seg_size = size;
opal_list_insert_pos(&module->seg_list, &seg->seg_item, item);
OMPI_THREAD_UNLOCK(&module->seg_lock);
OPAL_THREAD_UNLOCK(&module->seg_lock);
return;
}
}
@ -326,14 +326,14 @@ void mca_allocator_basic_free(
/* append to the end of the list */
OMPI_FREE_LIST_GET(&module->seg_descriptors, item, rc);
if(rc != OMPI_SUCCESS) {
OMPI_THREAD_UNLOCK(&module->seg_lock);
OPAL_THREAD_UNLOCK(&module->seg_lock);
return;
}
seg = (mca_allocator_basic_segment_t*)item;
seg->seg_addr = addr;
seg->seg_size = size;
opal_list_append(&module->seg_list, item);
OMPI_THREAD_UNLOCK(&module->seg_lock);
OPAL_THREAD_UNLOCK(&module->seg_lock);
}

Просмотреть файл

@ -23,7 +23,7 @@
#include <stdlib.h>
#include <string.h>
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#include "opal/class/opal_object.h"
#include "class/ompi_free_list.h"
#include "mca/allocator/allocator.h"
@ -50,7 +50,7 @@ struct mca_allocator_basic_module_t {
mca_allocator_base_component_segment_alloc_fn_t seg_alloc;
mca_allocator_base_component_segment_free_fn_t seg_free;
opal_list_t seg_list;
ompi_mutex_t seg_lock;
opal_mutex_t seg_lock;
ompi_free_list_t seg_descriptors;
};
typedef struct mca_allocator_basic_module_t mca_allocator_basic_module_t;

Просмотреть файл

@ -54,7 +54,7 @@ mca_allocator_bucket_t * mca_allocator_bucket_init(
for(i = 0; i < num_buckets; i++) {
mem_options->buckets[i].free_chunk = NULL;
mem_options->buckets[i].segment_head = NULL;
OBJ_CONSTRUCT(&(mem_options->buckets[i].lock), ompi_mutex_t);
OBJ_CONSTRUCT(&(mem_options->buckets[i].lock), opal_mutex_t);
}
mem_options->num_buckets = num_buckets;
mem_options->get_mem_fn = get_mem_funct;
@ -91,7 +91,7 @@ void * mca_allocator_bucket_alloc(
}
/* now that we know what bucket it will come from, we must get the lock */
OMPI_THREAD_LOCK(&(mem_options->buckets[bucket_num].lock));
OPAL_THREAD_LOCK(&(mem_options->buckets[bucket_num].lock));
/* see if there is already a free chunk */
if(NULL != mem_options->buckets[bucket_num].free_chunk) {
chunk = mem_options->buckets[bucket_num].free_chunk;
@ -100,7 +100,7 @@ void * mca_allocator_bucket_alloc(
/* go past the header */
chunk += 1;
/*release the lock */
OMPI_THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
OPAL_THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
return((void *) chunk);
}
/* figure out the size of bucket we need */
@ -113,7 +113,7 @@ void * mca_allocator_bucket_alloc(
mem_options->get_mem_fn(mem_options->super.alc_mpool, &allocated_size, registration);
if(NULL == segment_header) {
/* release the lock */
OMPI_THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
OPAL_THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
return(NULL);
}
/* if were allocated more memory then we actually need, then we will try to
@ -142,7 +142,7 @@ void * mca_allocator_bucket_alloc(
first_chunk->next_in_segment = first_chunk;
}
first_chunk->u.bucket = bucket_num;
OMPI_THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
OPAL_THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
/* return the memory moved past the header */
return((void *) (first_chunk + 1));
}
@ -203,7 +203,7 @@ void * mca_allocator_bucket_alloc_align(
allocated_size -= aligned_max_size;
chunk = segment_header->first_chunk = first_chunk;
/* we now need to get a lock on the bucket */
OMPI_THREAD_LOCK(&(mem_options->buckets[bucket_num].lock));
OPAL_THREAD_LOCK(&(mem_options->buckets[bucket_num].lock));
/* add the segment into the segment list */
segment_header->next_segment = mem_options->buckets[bucket_num].segment_head;
mem_options->buckets[bucket_num].segment_head = segment_header;
@ -225,7 +225,7 @@ void * mca_allocator_bucket_alloc_align(
first_chunk->next_in_segment = first_chunk;
}
first_chunk->u.bucket = bucket_num;
OMPI_THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
OPAL_THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
/* return the aligned memory */
return((void *) (aligned_memory));
}
@ -280,10 +280,10 @@ void mca_allocator_bucket_free(mca_allocator_base_module_t * mem, void * ptr)
mca_allocator_bucket_t * mem_options = (mca_allocator_bucket_t *) mem;
mca_allocator_bucket_chunk_header_t * chunk = (mca_allocator_bucket_chunk_header_t *) ptr - 1;
int bucket_num = chunk->u.bucket;
OMPI_THREAD_LOCK(&(mem_options->buckets[bucket_num].lock));
OPAL_THREAD_LOCK(&(mem_options->buckets[bucket_num].lock));
chunk->u.next_free = mem_options->buckets[bucket_num].free_chunk;
mem_options->buckets[bucket_num].free_chunk = chunk;
OMPI_THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
OPAL_THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
}
/*
@ -304,7 +304,7 @@ int mca_allocator_bucket_cleanup(mca_allocator_base_module_t * mem)
bool empty = true;
for(i = 0; i < mem_options->num_buckets; i++) {
OMPI_THREAD_LOCK(&(mem_options->buckets[i].lock));
OPAL_THREAD_LOCK(&(mem_options->buckets[i].lock));
segment_header = &(mem_options->buckets[i].segment_head);
/* traverse the list of segment headers until we hit NULL */
while(NULL != *segment_header) {
@ -346,7 +346,7 @@ int mca_allocator_bucket_cleanup(mca_allocator_base_module_t * mem)
empty = true;
}
/* relese the lock on the bucket */
OMPI_THREAD_UNLOCK(&(mem_options->buckets[i].lock));
OPAL_THREAD_UNLOCK(&(mem_options->buckets[i].lock));
}
return(OMPI_SUCCESS);
}

Просмотреть файл

@ -23,7 +23,7 @@
#include <stdlib.h>
#include <string.h>
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#include "opal/class/opal_object.h"
#include "mca/allocator/allocator.h"
#if defined(c_plusplus) || defined(__cplusplus)
@ -69,7 +69,7 @@ typedef struct mca_allocator_bucket_segment_head_t mca_allocator_bucket_segment_
*/
struct mca_allocator_bucket_bucket_t {
mca_allocator_bucket_chunk_header_t * free_chunk; /**< the first free chunk of memory */
ompi_mutex_t lock; /**< the lock on the bucket */
opal_mutex_t lock; /**< the lock on the bucket */
mca_allocator_bucket_segment_head_t * segment_head; /**< the list of segment headers */
};
/**

Просмотреть файл

@ -87,7 +87,7 @@ int mca_btl_gm_add_procs(
* don't bind this PTL instance to the proc.
*/
OMPI_THREAD_LOCK(&gm_proc->proc_lock);
OPAL_THREAD_LOCK(&gm_proc->proc_lock);
/* The btl_proc datastructure is shared by all GM PTL
* instances that are trying to reach this destination.
@ -95,7 +95,7 @@ int mca_btl_gm_add_procs(
*/
gm_endpoint = OBJ_NEW(mca_btl_gm_endpoint_t);
if(NULL == gm_endpoint) {
OMPI_THREAD_UNLOCK(&module_proc->proc_lock);
OPAL_THREAD_UNLOCK(&module_proc->proc_lock);
return OMPI_ERR_OUT_OF_RESOURCE;
}
@ -103,12 +103,12 @@ int mca_btl_gm_add_procs(
rc = mca_btl_gm_proc_insert(gm_proc, gm_endpoint);
if(rc != OMPI_SUCCESS) {
OBJ_RELEASE(gm_endpoint);
OMPI_THREAD_UNLOCK(&module_proc->proc_lock);
OPAL_THREAD_UNLOCK(&module_proc->proc_lock);
continue;
}
ompi_bitmap_set_bit(reachable, i);
OMPI_THREAD_UNLOCK(&module_proc->proc_lock);
OPAL_THREAD_UNLOCK(&module_proc->proc_lock);
peers[i] = gm_endpoint;
}
return OMPI_SUCCESS;
@ -529,7 +529,7 @@ mca_btl_base_descriptor_t* mca_btl_gm_prepare_dst(
static void mca_btl_gm_drop_callback( struct gm_port* port, void* context, gm_status_t status )
{
mca_btl_gm_module_t* btl = (mca_btl_gm_module_t*)context;
OMPI_THREAD_ADD32( &btl->gm_num_send_tokens, 1 );
OPAL_THREAD_ADD32( &btl->gm_num_send_tokens, 1 );
}
static void mca_btl_gm_send_callback( struct gm_port* port, void* context, gm_status_t status )
@ -556,23 +556,23 @@ static void mca_btl_gm_send_callback( struct gm_port* port, void* context, gm_st
break;
case GM_SEND_DROPPED:
/* release the send token */
OMPI_THREAD_ADD32(&btl->gm_num_send_tokens, 1);
OPAL_THREAD_ADD32(&btl->gm_num_send_tokens, 1);
/* retry the dropped fragment */
mca_btl_gm_send(&btl->super, frag->endpoint, &frag->base, frag->hdr->tag);
break;
case GM_SUCCESS:
/* release the send token */
OMPI_THREAD_ADD32( &btl->gm_num_send_tokens, 1 );
OPAL_THREAD_ADD32( &btl->gm_num_send_tokens, 1 );
/* call the completion callback */
frag->base.des_cbfunc(&btl->super, frag->endpoint, &frag->base, OMPI_SUCCESS);
/* check for pending fragments */
if(opal_list_get_size(&btl->gm_pending)) {
OMPI_THREAD_LOCK(&btl->gm_lock);
OPAL_THREAD_LOCK(&btl->gm_lock);
frag = (mca_btl_gm_frag_t*)opal_list_remove_first(&btl->gm_pending);
OMPI_THREAD_UNLOCK(&btl->gm_lock);
OPAL_THREAD_UNLOCK(&btl->gm_lock);
mca_btl_gm_send(&btl->super, frag->endpoint, &frag->base, frag->hdr->tag);
}
break;
@ -582,7 +582,7 @@ static void mca_btl_gm_send_callback( struct gm_port* port, void* context, gm_st
ompi_output(0, "[%s:%d] send completed with unhandled gm error %d\n", __FILE__,__LINE__,status);
/* release the send token */
OMPI_THREAD_ADD32( &btl->gm_num_send_tokens, 1 );
OPAL_THREAD_ADD32( &btl->gm_num_send_tokens, 1 );
/* call the completion callback */
frag->base.des_cbfunc(&btl->super, frag->endpoint, &frag->base, OMPI_ERROR);
@ -632,11 +632,11 @@ int mca_btl_gm_send(
frag->endpoint = endpoint;
/* queue the descriptor if there are no send tokens */
if(OMPI_THREAD_ADD32(&gm_btl->gm_num_send_tokens, -1) < 0) {
OMPI_THREAD_LOCK(&gm_btl->gm_lock);
if(OPAL_THREAD_ADD32(&gm_btl->gm_num_send_tokens, -1) < 0) {
OPAL_THREAD_LOCK(&gm_btl->gm_lock);
opal_list_append(&gm_btl->gm_pending, (opal_list_item_t*)frag);
OMPI_THREAD_UNLOCK(&gm_btl->gm_lock);
OMPI_THREAD_ADD32(&gm_btl->gm_num_send_tokens, 1);
OPAL_THREAD_UNLOCK(&gm_btl->gm_lock);
OPAL_THREAD_ADD32(&gm_btl->gm_num_send_tokens, 1);
return OMPI_SUCCESS;
}

Просмотреть файл

@ -61,7 +61,7 @@ struct mca_btl_gm_component_t {
int gm_free_list_inc; /**< number of elements to alloc when growing free lists */
opal_list_t gm_procs; /**< list of gm proc structures */
ompi_mutex_t gm_lock; /**< lock for accessing module state */
opal_mutex_t gm_lock; /**< lock for accessing module state */
char* gm_mpool_name; /**< name of memory pool */
bool leave_pinned;
@ -97,7 +97,7 @@ struct mca_btl_gm_module_t {
/* lock for accessing module state */
opal_list_t gm_pending; /**< list of pending send descriptors */
ompi_mutex_t gm_lock;
opal_mutex_t gm_lock;
struct mca_mpool_base_module_t* gm_mpool;
};
typedef struct mca_btl_gm_module_t mca_btl_gm_module_t;

Просмотреть файл

@ -108,7 +108,7 @@ int mca_btl_gm_component_open(void)
/* initialize objects */
OBJ_CONSTRUCT(&mca_btl_gm_component.gm_procs, opal_list_t);
OBJ_CONSTRUCT(&mca_btl_gm_component.gm_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&mca_btl_gm_component.gm_lock, opal_mutex_t);
/* register GM component parameters */
mca_btl_gm_component.gm_free_list_num =
@ -179,7 +179,7 @@ mca_btl_gm_module_init (mca_btl_gm_module_t * btl)
OBJ_CONSTRUCT(&btl->gm_frag_max, ompi_free_list_t);
OBJ_CONSTRUCT(&btl->gm_frag_user, ompi_free_list_t);
OBJ_CONSTRUCT(&btl->gm_pending, opal_list_t);
OBJ_CONSTRUCT(&btl->gm_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&btl->gm_lock, opal_mutex_t);
/* query nic tokens */
btl->gm_num_send_tokens = gm_num_send_tokens (btl->gm_port);

Просмотреть файл

@ -35,11 +35,11 @@ void mca_btl_gm_proc_construct(mca_btl_gm_proc_t* proc)
proc->proc_addr_count = 0;
proc->proc_endpoints = 0;
proc->proc_endpoint_count = 0;
OBJ_CONSTRUCT(&proc->proc_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&proc->proc_lock, opal_mutex_t);
/* add to list of all proc instance */
OMPI_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
opal_list_append(&mca_btl_gm_component.gm_procs, &proc->super);
OMPI_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
}
/*
@ -49,9 +49,9 @@ void mca_btl_gm_proc_construct(mca_btl_gm_proc_t* proc)
void mca_btl_gm_proc_destruct(mca_btl_gm_proc_t* proc)
{
/* remove from list of all proc instances */
OMPI_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
opal_list_remove_item(&mca_btl_gm_component.gm_procs, &proc->super);
OMPI_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
/* release resources */
if(NULL != proc->proc_endpoints) {
@ -68,7 +68,7 @@ static mca_btl_gm_proc_t* mca_btl_gm_proc_lookup_ompi(ompi_proc_t* ompi_proc)
{
mca_btl_gm_proc_t* gm_proc;
OMPI_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
for(gm_proc = (mca_btl_gm_proc_t*)
opal_list_get_first(&mca_btl_gm_component.gm_procs);
@ -77,13 +77,13 @@ static mca_btl_gm_proc_t* mca_btl_gm_proc_lookup_ompi(ompi_proc_t* ompi_proc)
gm_proc = (mca_btl_gm_proc_t*)opal_list_get_next(gm_proc)) {
if(gm_proc->proc_ompi == ompi_proc) {
OMPI_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
return gm_proc;
}
}
OMPI_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
return NULL;
}

Просмотреть файл

@ -56,7 +56,7 @@ struct mca_btl_gm_proc_t {
size_t proc_endpoint_count;
/**< number of endpoints */
ompi_mutex_t proc_lock;
opal_mutex_t proc_lock;
/**< lock to protect against concurrent access to proc state */
};
typedef struct mca_btl_gm_proc_t mca_btl_gm_proc_t;

Просмотреть файл

@ -86,7 +86,7 @@ int mca_btl_mvapi_add_procs(
* don't bind this PTL instance to the proc.
*/
OMPI_THREAD_LOCK(&ib_proc->proc_lock);
OPAL_THREAD_LOCK(&ib_proc->proc_lock);
/* The btl_proc datastructure is shared by all IB PTL
* instances that are trying to reach this destination.
@ -94,7 +94,7 @@ int mca_btl_mvapi_add_procs(
*/
ib_peer = OBJ_NEW(mca_btl_mvapi_endpoint_t);
if(NULL == ib_peer) {
OMPI_THREAD_UNLOCK(&module_proc->proc_lock);
OPAL_THREAD_UNLOCK(&module_proc->proc_lock);
return OMPI_ERR_OUT_OF_RESOURCE;
}
@ -102,12 +102,12 @@ int mca_btl_mvapi_add_procs(
rc = mca_btl_mvapi_proc_insert(ib_proc, ib_peer);
if(rc != OMPI_SUCCESS) {
OBJ_RELEASE(ib_peer);
OMPI_THREAD_UNLOCK(&module_proc->proc_lock);
OPAL_THREAD_UNLOCK(&module_proc->proc_lock);
continue;
}
ompi_bitmap_set_bit(reachable, i);
OMPI_THREAD_UNLOCK(&module_proc->proc_lock);
OPAL_THREAD_UNLOCK(&module_proc->proc_lock);
peers[i] = ib_peer;
}
@ -134,10 +134,10 @@ int mca_btl_mvapi_register(
mca_btl_mvapi_module_t* mvapi_btl = (mca_btl_mvapi_module_t*) btl;
OMPI_THREAD_LOCK(&ib->btl.ib_lock);
OPAL_THREAD_LOCK(&ib->btl.ib_lock);
mvapi_btl->ib_reg[tag].cbfunc = cbfunc;
mvapi_btl->ib_reg[tag].cbdata = cbdata;
OMPI_THREAD_UNLOCK(&ib->btl.ib_lock);
OPAL_THREAD_UNLOCK(&ib->btl.ib_lock);
return OMPI_SUCCESS;
}

Просмотреть файл

@ -74,7 +74,7 @@ struct mca_btl_mvapi_component_t {
ompi_event_t ib_recv_event;
/**< event structure for recvs */
ompi_mutex_t ib_lock;
opal_mutex_t ib_lock;
/**< lock for accessing module state */
int ib_mem_registry_hints_log_size;
@ -134,7 +134,7 @@ struct mca_btl_mvapi_module_t {
entries, this allows us to keep a working set of memory pinned */
opal_list_t repost; /**< list of buffers to repost */
ompi_mutex_t ib_lock; /**< module level lock */
opal_mutex_t ib_lock; /**< module level lock */
mca_mpool_base_module_t* ib_pool; /**< ib memory pool */

Просмотреть файл

@ -283,7 +283,7 @@ mca_btl_base_module_t** mca_btl_mvapi_component_init(int *num_btl_modules,
a distinct btl module for each hca port */
OBJ_CONSTRUCT(&btl_list, opal_list_t);
OBJ_CONSTRUCT(&mca_btl_mvapi_component.ib_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&mca_btl_mvapi_component.ib_lock, opal_mutex_t);
for(i = 0; i < num_hcas; i++){
@ -361,7 +361,7 @@ mca_btl_base_module_t** mca_btl_mvapi_component_init(int *num_btl_modules,
/* Initialize module state */
OBJ_CONSTRUCT(&mvapi_btl->ib_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&mvapi_btl->ib_lock, opal_mutex_t);
OBJ_CONSTRUCT(&mvapi_btl->send_free_eager, ompi_free_list_t);
OBJ_CONSTRUCT(&mvapi_btl->send_free_max, ompi_free_list_t);
OBJ_CONSTRUCT(&mvapi_btl->send_free_frag, ompi_free_list_t);
@ -533,7 +533,7 @@ int mca_btl_mvapi_component_progress()
mvapi_btl->ib_reg[frag->hdr->tag].cbfunc(&mvapi_btl->super, frag->hdr->tag, &frag->base, mvapi_btl->ib_reg[frag->hdr->tag].cbdata);
OMPI_FREE_LIST_RETURN(&(mvapi_btl->recv_free_eager), (opal_list_item_t*) frag);
OMPI_THREAD_ADD32(&mvapi_btl->rr_posted_high, -1);
OPAL_THREAD_ADD32(&mvapi_btl->rr_posted_high, -1);
mca_btl_mvapi_endpoint_post_rr(((mca_btl_mvapi_frag_t*)comp.id)->endpoint, 0);
@ -579,7 +579,7 @@ int mca_btl_mvapi_component_progress()
mvapi_btl->ib_reg[frag->hdr->tag].cbfunc(&mvapi_btl->super, frag->hdr->tag, &frag->base, mvapi_btl->ib_reg[frag->hdr->tag].cbdata);
OMPI_FREE_LIST_RETURN(&(mvapi_btl->recv_free_max), (opal_list_item_t*) frag);
OMPI_THREAD_ADD32(&mvapi_btl->rr_posted_low, -1);
OPAL_THREAD_ADD32(&mvapi_btl->rr_posted_low, -1);
mca_btl_mvapi_endpoint_post_rr(((mca_btl_mvapi_frag_t*)comp.id)->endpoint, 0);

Просмотреть файл

@ -111,8 +111,8 @@ static void mca_btl_mvapi_endpoint_construct(mca_btl_base_endpoint_t* endpoint)
endpoint->endpoint_tstamp = 0.0;
endpoint->endpoint_state = MCA_BTL_IB_CLOSED;
endpoint->endpoint_retries = 0;
OBJ_CONSTRUCT(&endpoint->endpoint_send_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&endpoint->endpoint_recv_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&endpoint->endpoint_send_lock, opal_mutex_t);
OBJ_CONSTRUCT(&endpoint->endpoint_recv_lock, opal_mutex_t);
OBJ_CONSTRUCT(&endpoint->pending_send_frags, opal_list_t);
}
@ -510,7 +510,7 @@ int mca_btl_mvapi_endpoint_send(
int rc;
mca_btl_mvapi_module_t *mvapi_btl;
OMPI_THREAD_LOCK(&endpoint->endpoint_send_lock);
OPAL_THREAD_LOCK(&endpoint->endpoint_send_lock);
switch(endpoint->endpoint_state) {
case MCA_BTL_IB_CONNECTING:
@ -568,7 +568,7 @@ int mca_btl_mvapi_endpoint_send(
rc = OMPI_ERR_UNREACH;
}
OMPI_THREAD_UNLOCK(&endpoint->endpoint_send_lock);
OPAL_THREAD_UNLOCK(&endpoint->endpoint_send_lock);
return rc;
}

Просмотреть файл

@ -79,10 +79,10 @@ struct mca_btl_base_endpoint_t {
double endpoint_tstamp;
/**< timestamp of when the first connection was attempted */
ompi_mutex_t endpoint_send_lock;
opal_mutex_t endpoint_send_lock;
/**< lock for concurrent access to endpoint state */
ompi_mutex_t endpoint_recv_lock;
opal_mutex_t endpoint_recv_lock;
/**< lock for concurrent access to endpoint state */
opal_list_t pending_send_frags;
@ -154,14 +154,14 @@ static inline int mca_btl_mvapi_endpoint_post_rr_sub(int cnt,
MCA_BTL_IB_VAPI_ERROR(frag->ret, "EVAPI_post_rr_list");
return OMPI_ERROR;
}
OMPI_THREAD_ADD32(rr_posted, cnt);
OPAL_THREAD_ADD32(rr_posted, cnt);
return OMPI_SUCCESS;
}
static inline int mca_btl_mvapi_endpoint_post_rr( mca_btl_mvapi_endpoint_t * endpoint, int additional){
mca_btl_mvapi_module_t * mvapi_btl = endpoint->endpoint_btl;
int rc;
OMPI_THREAD_LOCK(&mvapi_btl->ib_lock);
OPAL_THREAD_LOCK(&mvapi_btl->ib_lock);
if(mvapi_btl->rr_posted_high <= mca_btl_mvapi_component.ib_rr_buf_min+additional && mvapi_btl->rr_posted_high < mca_btl_mvapi_component.ib_rr_buf_max){
@ -173,7 +173,7 @@ static inline int mca_btl_mvapi_endpoint_post_rr( mca_btl_mvapi_endpoint_t * end
endpoint->lcl_qp_hndl_high
);
if(rc != OMPI_SUCCESS){
OMPI_THREAD_UNLOCK(&mvapi_btl->ib_lock);
OPAL_THREAD_UNLOCK(&mvapi_btl->ib_lock);
return rc;
}
}
@ -187,12 +187,12 @@ static inline int mca_btl_mvapi_endpoint_post_rr( mca_btl_mvapi_endpoint_t * end
endpoint->lcl_qp_hndl_low
);
if(rc != OMPI_SUCCESS) {
OMPI_THREAD_UNLOCK(&mvapi_btl->ib_lock);
OPAL_THREAD_UNLOCK(&mvapi_btl->ib_lock);
return rc;
}
}
OMPI_THREAD_UNLOCK(&mvapi_btl->ib_lock);
OPAL_THREAD_UNLOCK(&mvapi_btl->ib_lock);
return OMPI_SUCCESS;

Просмотреть файл

@ -35,11 +35,11 @@ void mca_btl_mvapi_proc_construct(mca_btl_mvapi_proc_t* proc)
proc->proc_addr_count = 0;
proc->proc_endpoints = 0;
proc->proc_endpoint_count = 0;
OBJ_CONSTRUCT(&proc->proc_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&proc->proc_lock, opal_mutex_t);
/* add to list of all proc instance */
OMPI_THREAD_LOCK(&mca_btl_mvapi_component.ib_lock);
OPAL_THREAD_LOCK(&mca_btl_mvapi_component.ib_lock);
opal_list_append(&mca_btl_mvapi_component.ib_procs, &proc->super);
OMPI_THREAD_UNLOCK(&mca_btl_mvapi_component.ib_lock);
OPAL_THREAD_UNLOCK(&mca_btl_mvapi_component.ib_lock);
}
/*
@ -49,9 +49,9 @@ void mca_btl_mvapi_proc_construct(mca_btl_mvapi_proc_t* proc)
void mca_btl_mvapi_proc_destruct(mca_btl_mvapi_proc_t* proc)
{
/* remove from list of all proc instances */
OMPI_THREAD_LOCK(&mca_btl_mvapi_component.ib_lock);
OPAL_THREAD_LOCK(&mca_btl_mvapi_component.ib_lock);
opal_list_remove_item(&mca_btl_mvapi_component.ib_procs, &proc->super);
OMPI_THREAD_UNLOCK(&mca_btl_mvapi_component.ib_lock);
OPAL_THREAD_UNLOCK(&mca_btl_mvapi_component.ib_lock);
/* release resources */
if(NULL != proc->proc_endpoints) {
@ -68,7 +68,7 @@ static mca_btl_mvapi_proc_t* mca_btl_mvapi_proc_lookup_ompi(ompi_proc_t* ompi_pr
{
mca_btl_mvapi_proc_t* ib_proc;
OMPI_THREAD_LOCK(&mca_btl_mvapi_component.ib_lock);
OPAL_THREAD_LOCK(&mca_btl_mvapi_component.ib_lock);
for(ib_proc = (mca_btl_mvapi_proc_t*)
opal_list_get_first(&mca_btl_mvapi_component.ib_procs);
@ -77,13 +77,13 @@ static mca_btl_mvapi_proc_t* mca_btl_mvapi_proc_lookup_ompi(ompi_proc_t* ompi_pr
ib_proc = (mca_btl_mvapi_proc_t*)opal_list_get_next(ib_proc)) {
if(ib_proc->proc_ompi == ompi_proc) {
OMPI_THREAD_UNLOCK(&mca_btl_mvapi_component.ib_lock);
OPAL_THREAD_UNLOCK(&mca_btl_mvapi_component.ib_lock);
return ib_proc;
}
}
OMPI_THREAD_UNLOCK(&mca_btl_mvapi_component.ib_lock);
OPAL_THREAD_UNLOCK(&mca_btl_mvapi_component.ib_lock);
return NULL;
}

Просмотреть файл

@ -53,7 +53,7 @@ struct mca_btl_mvapi_proc_t {
size_t proc_endpoint_count;
/**< number of endpoints */
ompi_mutex_t proc_lock;
opal_mutex_t proc_lock;
/**< lock to protect against concurrent access to proc state */
};
typedef struct mca_btl_mvapi_proc_t mca_btl_mvapi_proc_t;

Просмотреть файл

@ -86,7 +86,7 @@ int mca_btl_openib_add_procs(
* don't bind this PTL instance to the proc.
*/
OMPI_THREAD_LOCK(&ib_proc->proc_lock);
OPAL_THREAD_LOCK(&ib_proc->proc_lock);
/* The btl_proc datastructure is shared by all IB PTL
* instances that are trying to reach this destination.
@ -94,7 +94,7 @@ int mca_btl_openib_add_procs(
*/
ib_peer = OBJ_NEW(mca_btl_openib_endpoint_t);
if(NULL == ib_peer) {
OMPI_THREAD_UNLOCK(&module_proc->proc_lock);
OPAL_THREAD_UNLOCK(&module_proc->proc_lock);
return OMPI_ERR_OUT_OF_RESOURCE;
}
@ -102,12 +102,12 @@ int mca_btl_openib_add_procs(
rc = mca_btl_openib_proc_insert(ib_proc, ib_peer);
if(rc != OMPI_SUCCESS) {
OBJ_RELEASE(ib_peer);
OMPI_THREAD_UNLOCK(&module_proc->proc_lock);
OPAL_THREAD_UNLOCK(&module_proc->proc_lock);
continue;
}
ompi_bitmap_set_bit(reachable, i);
OMPI_THREAD_UNLOCK(&module_proc->proc_lock);
OPAL_THREAD_UNLOCK(&module_proc->proc_lock);
peers[i] = ib_peer;
}
@ -134,10 +134,10 @@ int mca_btl_openib_register(
mca_btl_openib_module_t* mvapi_btl = (mca_btl_openib_module_t*) btl;
OMPI_THREAD_LOCK(&ib->btl.ib_lock);
OPAL_THREAD_LOCK(&ib->btl.ib_lock);
mvapi_btl->ib_reg[tag].cbfunc = cbfunc;
mvapi_btl->ib_reg[tag].cbdata = cbdata;
OMPI_THREAD_UNLOCK(&ib->btl.ib_lock);
OPAL_THREAD_UNLOCK(&ib->btl.ib_lock);
return OMPI_SUCCESS;
}

Просмотреть файл

@ -74,7 +74,7 @@ struct mca_btl_openib_component_t {
ompi_event_t ib_recv_event;
/**< event structure for recvs */
ompi_mutex_t ib_lock;
opal_mutex_t ib_lock;
/**< lock for accessing module state */
int ib_mem_registry_hints_log_size;
@ -136,7 +136,7 @@ struct mca_btl_openib_module_t {
entries, this allows us to keep a working set of memory pinned */
opal_list_t repost; /**< list of buffers to repost */
ompi_mutex_t ib_lock; /**< module level lock */
opal_mutex_t ib_lock; /**< module level lock */
mca_mpool_base_module_t* ib_pool; /**< ib memory pool */

Просмотреть файл

@ -294,7 +294,7 @@ mca_btl_base_module_t** mca_btl_openib_component_init(int *num_btl_modules,
a distinct btl module for each hca port */
OBJ_CONSTRUCT(&btl_list, opal_list_t);
OBJ_CONSTRUCT(&mca_btl_openib_component.ib_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&mca_btl_openib_component.ib_lock, opal_mutex_t);
for(i = 0; i < num_devs; i++){
@ -370,7 +370,7 @@ mca_btl_base_module_t** mca_btl_openib_component_init(int *num_btl_modules,
/* Initialize module state */
OBJ_CONSTRUCT(&mvapi_btl->ib_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&mvapi_btl->ib_lock, opal_mutex_t);
OBJ_CONSTRUCT(&mvapi_btl->send_free_eager, ompi_free_list_t);
OBJ_CONSTRUCT(&mvapi_btl->send_free_max, ompi_free_list_t);
OBJ_CONSTRUCT(&mvapi_btl->send_free_frag, ompi_free_list_t);
@ -542,7 +542,7 @@ int mca_btl_openib_component_progress()
mvapi_btl->ib_reg[frag->hdr->tag].cbfunc(&mvapi_btl->super, frag->hdr->tag, &frag->base, mvapi_btl->ib_reg[frag->hdr->tag].cbdata);
OMPI_FREE_LIST_RETURN(&(mvapi_btl->recv_free_eager), (opal_list_item_t*) frag);
OMPI_THREAD_ADD32(&mvapi_btl->rr_posted_high, -1);
OPAL_THREAD_ADD32(&mvapi_btl->rr_posted_high, -1);
mca_btl_openib_endpoint_post_rr(((mca_btl_openib_frag_t*)comp.id)->endpoint, 0);
@ -588,7 +588,7 @@ int mca_btl_openib_component_progress()
mvapi_btl->ib_reg[frag->hdr->tag].cbfunc(&mvapi_btl->super, frag->hdr->tag, &frag->base, mvapi_btl->ib_reg[frag->hdr->tag].cbdata);
OMPI_FREE_LIST_RETURN(&(mvapi_btl->recv_free_max), (opal_list_item_t*) frag);
OMPI_THREAD_ADD32(&mvapi_btl->rr_posted_low, -1);
OPAL_THREAD_ADD32(&mvapi_btl->rr_posted_low, -1);
mca_btl_openib_endpoint_post_rr(((mca_btl_openib_frag_t*)comp.id)->endpoint, 0);

Просмотреть файл

@ -111,8 +111,8 @@ static void mca_btl_openib_endpoint_construct(mca_btl_base_endpoint_t* endpoint)
endpoint->endpoint_tstamp = 0.0;
endpoint->endpoint_state = MCA_BTL_IB_CLOSED;
endpoint->endpoint_retries = 0;
OBJ_CONSTRUCT(&endpoint->endpoint_send_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&endpoint->endpoint_recv_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&endpoint->endpoint_send_lock, opal_mutex_t);
OBJ_CONSTRUCT(&endpoint->endpoint_recv_lock, opal_mutex_t);
OBJ_CONSTRUCT(&endpoint->pending_send_frags, opal_list_t);
}
@ -510,7 +510,7 @@ int mca_btl_openib_endpoint_send(
int rc;
mca_btl_openib_module_t *mvapi_btl;
OMPI_THREAD_LOCK(&endpoint->endpoint_send_lock);
OPAL_THREAD_LOCK(&endpoint->endpoint_send_lock);
switch(endpoint->endpoint_state) {
case MCA_BTL_IB_CONNECTING:
@ -568,7 +568,7 @@ int mca_btl_openib_endpoint_send(
rc = OMPI_ERR_UNREACH;
}
OMPI_THREAD_UNLOCK(&endpoint->endpoint_send_lock);
OPAL_THREAD_UNLOCK(&endpoint->endpoint_send_lock);
return rc;
}

Просмотреть файл

@ -79,10 +79,10 @@ struct mca_btl_base_endpoint_t {
double endpoint_tstamp;
/**< timestamp of when the first connection was attempted */
ompi_mutex_t endpoint_send_lock;
opal_mutex_t endpoint_send_lock;
/**< lock for concurrent access to endpoint state */
ompi_mutex_t endpoint_recv_lock;
opal_mutex_t endpoint_recv_lock;
/**< lock for concurrent access to endpoint state */
opal_list_t pending_send_frags;
@ -154,14 +154,14 @@ static inline int mca_btl_openib_endpoint_post_rr_sub(int cnt,
MCA_BTL_IB_VAPI_ERROR(frag->ret, "EVAPI_post_rr_list");
return OMPI_ERROR;
}
OMPI_THREAD_ADD32(rr_posted, cnt);
OPAL_THREAD_ADD32(rr_posted, cnt);
return OMPI_SUCCESS;
}
static inline int mca_btl_openib_endpoint_post_rr( mca_btl_openib_endpoint_t * endpoint, int additional){
mca_btl_openib_module_t * mvapi_btl = endpoint->endpoint_btl;
int rc;
OMPI_THREAD_LOCK(&mvapi_btl->ib_lock);
OPAL_THREAD_LOCK(&mvapi_btl->ib_lock);
if(mvapi_btl->rr_posted_high <= mca_btl_openib_component.ib_rr_buf_min+additional && mvapi_btl->rr_posted_high < mca_btl_openib_component.ib_rr_buf_max){
@ -173,7 +173,7 @@ static inline int mca_btl_openib_endpoint_post_rr( mca_btl_openib_endpoint_t * e
endpoint->lcl_qp_hndl_high
);
if(rc != OMPI_SUCCESS){
OMPI_THREAD_UNLOCK(&mvapi_btl->ib_lock);
OPAL_THREAD_UNLOCK(&mvapi_btl->ib_lock);
return rc;
}
}
@ -187,12 +187,12 @@ static inline int mca_btl_openib_endpoint_post_rr( mca_btl_openib_endpoint_t * e
endpoint->lcl_qp_hndl_low
);
if(rc != OMPI_SUCCESS) {
OMPI_THREAD_UNLOCK(&mvapi_btl->ib_lock);
OPAL_THREAD_UNLOCK(&mvapi_btl->ib_lock);
return rc;
}
}
OMPI_THREAD_UNLOCK(&mvapi_btl->ib_lock);
OPAL_THREAD_UNLOCK(&mvapi_btl->ib_lock);
return OMPI_SUCCESS;

Просмотреть файл

@ -35,11 +35,11 @@ void mca_btl_openib_proc_construct(mca_btl_openib_proc_t* proc)
proc->proc_addr_count = 0;
proc->proc_endpoints = 0;
proc->proc_endpoint_count = 0;
OBJ_CONSTRUCT(&proc->proc_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&proc->proc_lock, opal_mutex_t);
/* add to list of all proc instance */
OMPI_THREAD_LOCK(&mca_btl_openib_component.ib_lock);
OPAL_THREAD_LOCK(&mca_btl_openib_component.ib_lock);
opal_list_append(&mca_btl_openib_component.ib_procs, &proc->super);
OMPI_THREAD_UNLOCK(&mca_btl_openib_component.ib_lock);
OPAL_THREAD_UNLOCK(&mca_btl_openib_component.ib_lock);
}
/*
@ -49,9 +49,9 @@ void mca_btl_openib_proc_construct(mca_btl_openib_proc_t* proc)
void mca_btl_openib_proc_destruct(mca_btl_openib_proc_t* proc)
{
/* remove from list of all proc instances */
OMPI_THREAD_LOCK(&mca_btl_openib_component.ib_lock);
OPAL_THREAD_LOCK(&mca_btl_openib_component.ib_lock);
opal_list_remove_item(&mca_btl_openib_component.ib_procs, &proc->super);
OMPI_THREAD_UNLOCK(&mca_btl_openib_component.ib_lock);
OPAL_THREAD_UNLOCK(&mca_btl_openib_component.ib_lock);
/* release resources */
if(NULL != proc->proc_endpoints) {
@ -68,7 +68,7 @@ static mca_btl_openib_proc_t* mca_btl_openib_proc_lookup_ompi(ompi_proc_t* ompi_
{
mca_btl_openib_proc_t* ib_proc;
OMPI_THREAD_LOCK(&mca_btl_openib_component.ib_lock);
OPAL_THREAD_LOCK(&mca_btl_openib_component.ib_lock);
for(ib_proc = (mca_btl_openib_proc_t*)
opal_list_get_first(&mca_btl_openib_component.ib_procs);
@ -77,13 +77,13 @@ static mca_btl_openib_proc_t* mca_btl_openib_proc_lookup_ompi(ompi_proc_t* ompi_
ib_proc = (mca_btl_openib_proc_t*)opal_list_get_next(ib_proc)) {
if(ib_proc->proc_ompi == ompi_proc) {
OMPI_THREAD_UNLOCK(&mca_btl_openib_component.ib_lock);
OPAL_THREAD_UNLOCK(&mca_btl_openib_component.ib_lock);
return ib_proc;
}
}
OMPI_THREAD_UNLOCK(&mca_btl_openib_component.ib_lock);
OPAL_THREAD_UNLOCK(&mca_btl_openib_component.ib_lock);
return NULL;
}

Просмотреть файл

@ -53,7 +53,7 @@ struct mca_btl_openib_proc_t {
size_t proc_endpoint_count;
/**< number of endpoints */
ompi_mutex_t proc_lock;
opal_mutex_t proc_lock;
/**< lock to protect against concurrent access to proc state */
};
typedef struct mca_btl_openib_proc_t mca_btl_openib_proc_t;

Просмотреть файл

@ -68,7 +68,7 @@ struct mca_btl_portals_component_t {
int portals_free_list_inc_num;
/* lock for accessing component */
ompi_mutex_t portals_lock;
opal_mutex_t portals_lock;
};
typedef struct mca_btl_portals_component_t mca_btl_portals_component_t;

Просмотреть файл

@ -23,7 +23,7 @@
#include "include/constants.h"
#include "util/output.h"
#include "threads/thread.h"
#include "opal/threads/thread.h"
#include "btl_portals.h"
#include "btl_portals_compat.h"
@ -113,7 +113,7 @@ mca_btl_portals_component_open(void)
/* initalize component objects */
OBJ_CONSTRUCT(&mca_btl_portals_component.portals_lock,
ompi_mutex_t);
opal_mutex_t);
/* get configured state for component */
#if PTL_PORTALS_UTCP

Просмотреть файл

@ -23,7 +23,7 @@
#include <fcntl.h>
#include <errno.h>
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#include "datatype/convertor.h"
#include "include/sys/atomic.h"
#include "util/output.h"

Просмотреть файл

@ -44,7 +44,7 @@ struct mca_btl_self_component_t {
int free_list_num; /**< initial size of free lists */
int free_list_max; /**< maximum size of free lists */
int free_list_inc; /**< number of elements to alloc when growing free lists */
ompi_mutex_t self_lock;
opal_mutex_t self_lock;
ompi_free_list_t self_frags_eager; /**< free list of self first */
ompi_free_list_t self_frags_send; /**< free list of self second */
ompi_free_list_t self_frags_rdma; /**< free list of self second */

Просмотреть файл

@ -122,7 +122,7 @@ int mca_btl_self_component_open(void)
mca_btl_self_param_register_int("flags", MCA_BTL_FLAGS_RDMA);
/* initialize objects */
OBJ_CONSTRUCT(&mca_btl_self_component.self_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&mca_btl_self_component.self_lock, opal_mutex_t);
OBJ_CONSTRUCT(&mca_btl_self_component.self_frags_eager, ompi_free_list_t);
OBJ_CONSTRUCT(&mca_btl_self_component.self_frags_send, ompi_free_list_t);
OBJ_CONSTRUCT(&mca_btl_self_component.self_frags_rdma, ompi_free_list_t);

Просмотреть файл

@ -23,7 +23,7 @@
#include <fcntl.h>
#include <errno.h>
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#include "datatype/convertor.h"
#include "include/sys/atomic.h"
#include "util/output.h"

Просмотреть файл

@ -81,7 +81,7 @@ struct mca_btl_sm_component_t {
void* sm_mpool_base; /**< base address of shared memory pool */
size_t eager_limit; /**< first fragment size */
size_t max_frag_size; /**< maximum (second and beyone) fragment size */
ompi_mutex_t sm_lock;
opal_mutex_t sm_lock;
char* sm_resouce_ctl_file; /**< name of shared memory file used
to coordinate resource usage */
mca_common_sm_mmap_t *mmap_file; /**< description of mmap'ed file */
@ -126,7 +126,7 @@ struct mca_btl_sm_component_t {
#if OMPI_ENABLE_PROGRESS_THREADS == 1
char sm_fifo_path[PATH_MAX]; /**< path to fifo used to signal this process */
int sm_fifo_fd; /**< file descriptor corresponding to opened fifo */
ompi_thread_t sm_fifo_thread;
opal_thread_t sm_fifo_thread;
#endif
};
typedef struct mca_btl_sm_component_t mca_btl_sm_component_t;

Просмотреть файл

@ -150,7 +150,7 @@ int mca_btl_sm_component_open(void)
mca_btl_sm_param_register_int("sm_extra_procs", 2);
/* initialize objects */
OBJ_CONSTRUCT(&mca_btl_sm_component.sm_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&mca_btl_sm_component.sm_lock, opal_mutex_t);
OBJ_CONSTRUCT(&mca_btl_sm_component.sm_frags1, ompi_free_list_t);
OBJ_CONSTRUCT(&mca_btl_sm_component.sm_frags2, ompi_free_list_t);
return OMPI_SUCCESS;
@ -198,7 +198,7 @@ int mca_btl_sm_component_close(void)
ompi_output(0, "mca_btl_sm_component_close: write fifo failed: errno=%d\n",
errno);
}
ompi_thread_join(&mca_btl_sm_component.sm_fifo_thread, NULL);
opal_thread_join(&mca_btl_sm_component.sm_fifo_thread, NULL);
close(mca_btl_sm_component.sm_fifo_fd);
unlink(mca_btl_sm_component.sm_fifo_path);
}
@ -250,9 +250,9 @@ mca_btl_base_module_t** mca_btl_sm_component_init(
return NULL;
}
OBJ_CONSTRUCT(&mca_btl_sm_component.sm_fifo_thread, ompi_thread_t);
mca_btl_sm_component.sm_fifo_thread.t_run = (ompi_thread_fn_t) mca_btl_sm_component_event_thread;
ompi_thread_start(&mca_btl_sm_component.sm_fifo_thread);
OBJ_CONSTRUCT(&mca_btl_sm_component.sm_fifo_thread, opal_thread_t);
mca_btl_sm_component.sm_fifo_thread.t_run = (opal_thread_fn_t) mca_btl_sm_component_event_thread;
opal_thread_start(&mca_btl_sm_component.sm_fifo_thread);
#endif
/* allocate the Shared Memory PTL */
@ -346,7 +346,7 @@ int mca_btl_sm_component_progress(void)
}
/* aquire thread lock */
if( ompi_using_threads() ) {
if( opal_using_threads() ) {
opal_atomic_lock( &(fifo->tail_lock) );
}
@ -356,14 +356,14 @@ int mca_btl_sm_component_progress(void)
ompi_fifo_read_from_tail_same_base_addr( fifo );
if( OMPI_CB_FREE == frag ) {
/* release thread lock */
if( ompi_using_threads() ) {
if( opal_using_threads() ) {
opal_atomic_unlock(&(fifo->tail_lock));
}
continue;
}
/* release thread lock */
if( ompi_using_threads() ) {
if( opal_using_threads() ) {
opal_atomic_unlock(&(fifo->tail_lock));
}
@ -423,7 +423,7 @@ int mca_btl_sm_component_progress(void)
}
/* aquire thread lock */
if( ompi_using_threads() ) {
if( opal_using_threads() ) {
opal_atomic_lock(&(fifo->tail_lock));
}
@ -433,14 +433,14 @@ int mca_btl_sm_component_progress(void)
mca_btl_sm_component.sm_offset[peer_smp_rank]);
if( OMPI_CB_FREE == frag ) {
/* release thread lock */
if( ompi_using_threads() ) {
if( opal_using_threads() ) {
opal_atomic_unlock(&(fifo->tail_lock));
}
continue;
}
/* release thread lock */
if( ompi_using_threads() ) {
if( opal_using_threads() ) {
opal_atomic_unlock(&(fifo->tail_lock));
}

Просмотреть файл

@ -11,7 +11,7 @@ do { \
fifo=&(mca_btl_sm_component.fifo[my_smp_rank][peer_smp_rank]); \
\
/* thread lock */ \
if(ompi_using_threads()) \
if(opal_using_threads()) \
opal_atomic_lock(&fifo->head_lock); \
if(OMPI_CB_FREE == fifo->head) { \
/* no queues have been allocated - allocate now */ \
@ -23,7 +23,7 @@ do { \
0,0,0, \
fifo, mca_btl_sm_component.sm_mpool); \
if( rc != OMPI_SUCCESS ) { \
if(ompi_using_threads()) \
if(opal_using_threads()) \
opal_atomic_unlock(&(fifo->head_lock)); \
break; \
} \
@ -36,7 +36,7 @@ do { \
MCA_BTL_SM_SIGNAL_PEER(btl_peer); \
rc=OMPI_SUCCESS; \
} \
if(ompi_using_threads()) \
if(opal_using_threads()) \
opal_atomic_unlock(&fifo->head_lock); \
} while(0)

Просмотреть файл

@ -86,7 +86,7 @@ int mca_btl_template_add_procs(
* don't bind this PTL instance to the proc.
*/
OMPI_THREAD_LOCK(&template_proc->proc_lock);
OPAL_THREAD_LOCK(&template_proc->proc_lock);
/* The btl_proc datastructure is shared by all TEMPLATE PTL
* instances that are trying to reach this destination.
@ -94,7 +94,7 @@ int mca_btl_template_add_procs(
*/
template_endpoint = OBJ_NEW(mca_btl_template_endpoint_t);
if(NULL == template_endpoint) {
OMPI_THREAD_UNLOCK(&module_proc->proc_lock);
OPAL_THREAD_UNLOCK(&module_proc->proc_lock);
return OMPI_ERR_OUT_OF_RESOURCE;
}
@ -102,12 +102,12 @@ int mca_btl_template_add_procs(
rc = mca_btl_template_proc_insert(template_proc, template_endpoint);
if(rc != OMPI_SUCCESS) {
OBJ_RELEASE(template_endpoint);
OMPI_THREAD_UNLOCK(&module_proc->proc_lock);
OPAL_THREAD_UNLOCK(&module_proc->proc_lock);
continue;
}
ompi_bitmap_set_bit(reachable, i);
OMPI_THREAD_UNLOCK(&module_proc->proc_lock);
OPAL_THREAD_UNLOCK(&module_proc->proc_lock);
peers[i] = template_endpoint;
}

Просмотреть файл

@ -66,7 +66,7 @@ struct mca_btl_template_component_t {
opal_list_t template_procs;
/**< list of template proc structures */
ompi_mutex_t template_lock;
opal_mutex_t template_lock;
/**< lock for accessing module state */
char* template_mpool_name;
@ -94,7 +94,7 @@ struct mca_btl_template_module_t {
ompi_free_list_t template_frag_user;
/* lock for accessing module state */
ompi_mutex_t template_lock;
opal_mutex_t template_lock;
#if MCA_BTL_HAS_MPOOL
struct mca_mpool_base_module_t* template_mpool;

Просмотреть файл

@ -35,11 +35,11 @@ void mca_btl_template_proc_construct(mca_btl_template_proc_t* proc)
proc->proc_addr_count = 0;
proc->proc_endpoints = 0;
proc->proc_endpoint_count = 0;
OBJ_CONSTRUCT(&proc->proc_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&proc->proc_lock, opal_mutex_t);
/* add to list of all proc instance */
OMPI_THREAD_LOCK(&mca_btl_template_component.template_lock);
OPAL_THREAD_LOCK(&mca_btl_template_component.template_lock);
opal_list_append(&mca_btl_template_component.template_procs, &proc->super);
OMPI_THREAD_UNLOCK(&mca_btl_template_component.template_lock);
OPAL_THREAD_UNLOCK(&mca_btl_template_component.template_lock);
}
/*
@ -49,9 +49,9 @@ void mca_btl_template_proc_construct(mca_btl_template_proc_t* proc)
void mca_btl_template_proc_destruct(mca_btl_template_proc_t* proc)
{
/* remove from list of all proc instances */
OMPI_THREAD_LOCK(&mca_btl_template_component.template_lock);
OPAL_THREAD_LOCK(&mca_btl_template_component.template_lock);
opal_list_remove_item(&mca_btl_template_component.template_procs, &proc->super);
OMPI_THREAD_UNLOCK(&mca_btl_template_component.template_lock);
OPAL_THREAD_UNLOCK(&mca_btl_template_component.template_lock);
/* release resources */
if(NULL != proc->proc_endpoints) {
@ -68,7 +68,7 @@ static mca_btl_template_proc_t* mca_btl_template_proc_lookup_ompi(ompi_proc_t* o
{
mca_btl_template_proc_t* template_proc;
OMPI_THREAD_LOCK(&mca_btl_template_component.template_lock);
OPAL_THREAD_LOCK(&mca_btl_template_component.template_lock);
for(template_proc = (mca_btl_template_proc_t*)
opal_list_get_first(&mca_btl_template_component.template_procs);
@ -77,13 +77,13 @@ static mca_btl_template_proc_t* mca_btl_template_proc_lookup_ompi(ompi_proc_t* o
template_proc = (mca_btl_template_proc_t*)opal_list_get_next(template_proc)) {
if(template_proc->proc_ompi == ompi_proc) {
OMPI_THREAD_UNLOCK(&mca_btl_template_component.template_lock);
OPAL_THREAD_UNLOCK(&mca_btl_template_component.template_lock);
return template_proc;
}
}
OMPI_THREAD_UNLOCK(&mca_btl_template_component.template_lock);
OPAL_THREAD_UNLOCK(&mca_btl_template_component.template_lock);
return NULL;
}

Просмотреть файл

@ -53,7 +53,7 @@ struct mca_btl_template_proc_t {
size_t proc_endpoint_count;
/**< number of endpoints */
ompi_mutex_t proc_lock;
opal_mutex_t proc_lock;
/**< lock to protect against concurrent access to proc state */
};
typedef struct mca_btl_template_proc_t mca_btl_template_proc_t;

Просмотреть файл

@ -17,7 +17,7 @@
#include "ompi_config.h"
#include "opal/class/opal_list.h"
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#include "mca/base/base.h"
#include "mca/io/io.h"
#include "mca/io/base/base.h"
@ -29,7 +29,7 @@
static bool initialized = false;
static opal_list_t components_in_use;
#if OMPI_HAVE_THREAD_SUPPORT
static ompi_mutex_t mutex;
static opal_mutex_t mutex;
#endif /* OMPI_HAVE_THREAD_SUPPORT */
struct component_item_t {
@ -67,7 +67,7 @@ int mca_io_base_component_add(mca_io_base_components_t *comp)
component_item_t *citem;
mca_base_component_t *c;
OMPI_THREAD_LOCK(&mutex);
OPAL_THREAD_LOCK(&mutex);
/* Save the component in ref-counted list of compoonents in use.
This is used for the progression of non-blocking IO requests.
@ -114,7 +114,7 @@ int mca_io_base_component_add(mca_io_base_components_t *comp)
opal_list_append(&components_in_use, (opal_list_item_t *) citem);
}
OMPI_THREAD_UNLOCK(&mutex);
OPAL_THREAD_UNLOCK(&mutex);
/* All done */
@ -131,7 +131,7 @@ int mca_io_base_component_del(mca_io_base_components_t *comp)
opal_list_item_t *item;
component_item_t *citem;
OMPI_THREAD_LOCK(&mutex);
OPAL_THREAD_LOCK(&mutex);
/* Find the component in the list */
@ -158,7 +158,7 @@ int mca_io_base_component_del(mca_io_base_components_t *comp)
}
}
OMPI_THREAD_UNLOCK(&mutex);
OPAL_THREAD_UNLOCK(&mutex);
/* All done */
@ -175,7 +175,7 @@ int mca_io_base_component_run_progress(void)
if (! initialized) return 0;
OMPI_THREAD_LOCK(&mutex);
OPAL_THREAD_LOCK(&mutex);
/* Go through all the components and call their progress
function */
@ -198,7 +198,7 @@ int mca_io_base_component_run_progress(void)
}
}
OMPI_THREAD_UNLOCK(&mutex);
OPAL_THREAD_UNLOCK(&mutex);
return count;
}

Просмотреть файл

@ -126,14 +126,14 @@ int mca_io_base_request_alloc(ompi_file_t *file,
avoid locking and unlocking. */
if (opal_list_get_size(&file->f_io_requests) > 0) {
OMPI_THREAD_LOCK(&file->f_io_requests_lock);
OPAL_THREAD_LOCK(&file->f_io_requests_lock);
if (opal_list_get_size(&file->f_io_requests) > 0) {
*req = (mca_io_base_request_t*)
opal_list_remove_first(&file->f_io_requests);
} else {
*req = NULL;
}
OMPI_THREAD_UNLOCK(&file->f_io_requests_lock);
OPAL_THREAD_UNLOCK(&file->f_io_requests_lock);
} else {
*req = NULL;
}
@ -207,9 +207,9 @@ void mca_io_base_request_free(ompi_file_t *file,
/* Put the request back on the per-module freelist, since it's
been initialized for that module */
OMPI_THREAD_LOCK(&file->f_io_requests_lock);
OPAL_THREAD_LOCK(&file->f_io_requests_lock);
opal_list_prepend(&file->f_io_requests, (opal_list_item_t*) req);
OMPI_THREAD_UNLOCK(&file->f_io_requests_lock);
OPAL_THREAD_UNLOCK(&file->f_io_requests_lock);
}
@ -220,22 +220,22 @@ void mca_io_base_request_return(ompi_file_t *file)
{
opal_list_item_t *p, *next;
OMPI_THREAD_LOCK(&file->f_io_requests_lock);
OPAL_THREAD_LOCK(&file->f_io_requests_lock);
for (p = opal_list_get_first(&file->f_io_requests);
p != opal_list_get_end(&file->f_io_requests);
p = next) {
next = opal_list_get_next(p);
OMPI_FREE_LIST_RETURN(&mca_io_base_requests, p);
}
OMPI_THREAD_UNLOCK(&file->f_io_requests_lock);
OPAL_THREAD_UNLOCK(&file->f_io_requests_lock);
}
#if OMPI_ENABLE_PROGRESS_THREADS
static volatile bool thread_running = false;
static volatile bool thread_done = false;
static ompi_thread_t progress_thread;
static ompi_mutex_t progress_mutex;
static ompi_condition_t progress_cond;
static opal_thread_t progress_thread;
static opal_mutex_t progress_mutex;
static opal_condition_t progress_cond;
static void*
request_progress_thread(opal_object_t *arg)
@ -252,7 +252,7 @@ request_progress_thread(opal_object_t *arg)
mca_io_base_component_run_progress();
sleep(2);
}
ompi_condition_timedwait(&progress_cond, &progress_mutex, &abstime);
opal_condition_timedwait(&progress_cond, &progress_mutex, &abstime);
}
return NULL;
@ -268,9 +268,9 @@ mca_io_base_request_progress_init()
thread_running = false;
thread_done = false;
OBJ_CONSTRUCT(&progress_mutex, ompi_mutex_t);
OBJ_CONSTRUCT(&progress_cond, ompi_condition_t);
OBJ_CONSTRUCT(&progress_thread, ompi_thread_t);
OBJ_CONSTRUCT(&progress_mutex, opal_mutex_t);
OBJ_CONSTRUCT(&progress_cond, opal_condition_t);
OBJ_CONSTRUCT(&progress_thread, opal_thread_t);
progress_thread.t_run = request_progress_thread;
progress_thread.t_arg = NULL;
@ -285,19 +285,19 @@ mca_io_base_request_progress_add()
/* if we don't have a progress thread, make us have a progress
thread */
if (! thread_running) {
OMPI_THREAD_LOCK(&progress_mutex);
OPAL_THREAD_LOCK(&progress_mutex);
if (! thread_running) {
thread_running = true;
ompi_thread_start(&progress_thread);
opal_thread_start(&progress_thread);
}
OMPI_THREAD_UNLOCK(&progress_mutex);
OPAL_THREAD_UNLOCK(&progress_mutex);
}
#endif /* OMPI_ENABLE_PROGRESS_THREADS */
OMPI_THREAD_ADD32(&mca_io_base_request_num_pending, 1);
OPAL_THREAD_ADD32(&mca_io_base_request_num_pending, 1);
#if OMPI_ENABLE_PROGRESS_THREADS
ompi_condition_signal(&progress_cond);
opal_condition_signal(&progress_cond);
#endif /* OMPI_ENABLE_PROGRESS_THREADS */
}
@ -305,7 +305,7 @@ mca_io_base_request_progress_add()
void
mca_io_base_request_progress_del()
{
OMPI_THREAD_ADD32(&mca_io_base_request_num_pending, -1);
OPAL_THREAD_ADD32(&mca_io_base_request_num_pending, -1);
}
@ -318,8 +318,8 @@ mca_io_base_request_progress_fini()
/* make the helper thread die */
thread_done = true;
if (thread_running) {
ompi_condition_signal(&progress_cond);
ompi_thread_join(&progress_thread, &ret);
opal_condition_signal(&progress_cond);
opal_thread_join(&progress_thread, &ret);
}
/* clean up */

Просмотреть файл

@ -120,7 +120,7 @@ extern "C" {
/*
* count of number of pending requests in the IO subsystem. Should
* only be modified with OMPI_THREAD_ADD32. Probably should not be
* only be modified with OPAL_THREAD_ADD32. Probably should not be
* used outside of IO components. Here only for the progress check
* optimzation.
*/

Просмотреть файл

@ -19,7 +19,7 @@
#include "request/request.h"
#include "file/file.h"
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#include "romio-dist/adio/include/romioconf.h"
#include "romio-dist/include/mpio.h"
#include "mca/io/io.h"
@ -32,7 +32,7 @@ extern "C" {
/*
* global variables, instantiated in module.c
*/
extern ompi_mutex_t mca_io_romio_mutex;
extern opal_mutex_t mca_io_romio_mutex;
extern mca_io_base_module_1_0_0_t mca_io_romio_module;
extern opal_list_t mca_io_romio_pending_requests;

Просмотреть файл

@ -17,7 +17,7 @@
#include "ompi_config.h"
#include "mpi.h"
#include "opal/class/opal_list.h"
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#include "mca/base/base.h"
#include "mca/io/io.h"
#include "io_romio.h"
@ -55,7 +55,7 @@ static int delete_priority_param = -1;
/*
* Global, component-wide ROMIO mutex because ROMIO is not thread safe
*/
ompi_mutex_t mca_io_romio_mutex;
opal_mutex_t mca_io_romio_mutex;
/*
@ -226,9 +226,9 @@ static int delete_select(char *filename, struct ompi_info_t *info,
{
int ret;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_delete)(filename, info);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -245,7 +245,7 @@ static int progress()
If a request finishes, remove it from the list. */
count = 0;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
for (item = opal_list_get_first(&mca_io_romio_pending_requests);
item != opal_list_get_end(&mca_io_romio_pending_requests);
item = next) {
@ -256,7 +256,7 @@ static int progress()
ret = ROMIO_PREFIX(MPIO_Test)(&romio_rq, &flag,
&(((ompi_request_t *) item)->req_status));
if (ret < 0) {
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
} else if (1 == flag) {
++count;
@ -272,13 +272,13 @@ static int progress()
if (ioreq->free_called) {
ret = ioreq->super.req_fini((ompi_request_t**) &ioreq);
if (OMPI_SUCCESS != ret) {
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK(&mca_io_romio_mutex);
return ret;
}
}
}
}
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
/* Return how many requests completed */

Просмотреть файл

@ -34,10 +34,10 @@ mca_io_romio_file_open (ompi_communicator_t *comm,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_open)(comm, filename, amode, info,
&data->romio_fh);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -51,9 +51,9 @@ mca_io_romio_file_close (ompi_file_t *fh)
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_close) (&data->romio_fh);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -67,9 +67,9 @@ mca_io_romio_file_set_size (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_set_size) (data->romio_fh, size);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -82,9 +82,9 @@ mca_io_romio_file_preallocate (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_preallocate) (data->romio_fh, size);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -98,9 +98,9 @@ mca_io_romio_file_get_size (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_get_size) (data->romio_fh, size);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -114,9 +114,9 @@ mca_io_romio_file_get_amode (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_get_amode) (data->romio_fh, amode);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -130,9 +130,9 @@ mca_io_romio_file_set_info (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_set_info) (data->romio_fh, info);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -146,9 +146,9 @@ mca_io_romio_file_get_info (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_get_info) (data->romio_fh, info_used);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -166,11 +166,11 @@ mca_io_romio_file_set_view (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_set_view) (data->romio_fh, disp, etype, filetype,
datarep, info);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -187,11 +187,11 @@ mca_io_romio_file_get_view (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_get_view) (data->romio_fh, disp, etype, filetype,
datarep);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
@ -207,10 +207,10 @@ mca_io_romio_file_get_type_extent (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_get_type_extent) (data->romio_fh, datatype, extent);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -224,9 +224,9 @@ mca_io_romio_file_set_atomicity (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_set_atomicity) (data->romio_fh, flag);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -239,9 +239,9 @@ mca_io_romio_file_get_atomicity (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_get_atomicity) (data->romio_fh, flag);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -253,9 +253,9 @@ mca_io_romio_file_sync (ompi_file_t *fh)
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_sync) (data->romio_fh);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -270,9 +270,9 @@ mca_io_romio_file_seek_shared (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_seek_shared) (data->romio_fh, offset, whence);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -286,9 +286,9 @@ mca_io_romio_file_get_position_shared (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_get_position_shared) (data->romio_fh, offset);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -303,9 +303,9 @@ mca_io_romio_file_seek (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_seek) (data->romio_fh, offset, whence);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -319,9 +319,9 @@ mca_io_romio_file_get_position (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_get_position) (data->romio_fh, offset);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -336,9 +336,9 @@ mca_io_romio_file_get_byte_offset (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_get_byte_offset) (data->romio_fh, offset, disp);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}

Просмотреть файл

@ -33,11 +33,11 @@ mca_io_romio_file_read_at (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_read_at) (data->romio_fh, offset, buf, count,
datatype, status);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -55,11 +55,11 @@ mca_io_romio_file_read_at_all (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_read_at_all) (data->romio_fh, offset, buf, count,
datatype, status);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -79,14 +79,14 @@ mca_io_romio_file_iread_at (ompi_file_t *fh,
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
req = (mca_io_romio_request_t *) request;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_iread_at) (data->romio_fh, offset, buf, count,
datatype, &req->romio_rq);
if (MPI_SUCCESS == ret) {
MCA_IO_ROMIO_REQUEST_ADD(request);
}
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -103,11 +103,11 @@ mca_io_romio_file_read (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_read) (data->romio_fh, buf, count, datatype,
status);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -124,11 +124,11 @@ mca_io_romio_file_read_all (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_read_all) (data->romio_fh, buf, count, datatype,
status);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -147,14 +147,14 @@ mca_io_romio_file_iread (ompi_file_t *fh,
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
req = (mca_io_romio_request_t *) request;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_iread) (data->romio_fh, buf, count, datatype,
&req->romio_rq);
if (MPI_SUCCESS == ret) {
MCA_IO_ROMIO_REQUEST_ADD(request);
}
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -171,11 +171,11 @@ mca_io_romio_file_read_shared (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_read_shared) (data->romio_fh, buf, count,
datatype, status);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -194,14 +194,14 @@ mca_io_romio_file_iread_shared (ompi_file_t *fh,
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
req = (mca_io_romio_request_t *) request;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_iread_shared) (data->romio_fh, buf, count,
datatype, &req->romio_rq);
if (MPI_SUCCESS == ret) {
MCA_IO_ROMIO_REQUEST_ADD(request);
}
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -218,11 +218,11 @@ mca_io_romio_file_read_ordered (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_read_ordered) (data->romio_fh, buf, count,
datatype, status);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -239,11 +239,11 @@ mca_io_romio_file_read_at_all_begin (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_read_at_all_begin) (data->romio_fh, offset, buf,
count, datatype);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -258,9 +258,9 @@ mca_io_romio_file_read_at_all_end (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_read_at_all_end) (data->romio_fh, buf, status);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -276,11 +276,11 @@ mca_io_romio_file_read_all_begin (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_read_all_begin) (data->romio_fh, buf, count,
datatype);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -295,9 +295,9 @@ mca_io_romio_file_read_all_end (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_read_all_end) (data->romio_fh, buf, status);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -313,11 +313,11 @@ mca_io_romio_file_read_ordered_begin (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_read_ordered_begin) (data->romio_fh, buf, count,
datatype);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -332,10 +332,10 @@ mca_io_romio_file_read_ordered_end (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_read_ordered_end) (data->romio_fh, buf,
status);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}

Просмотреть файл

@ -33,11 +33,11 @@ mca_io_romio_file_write_at (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_write_at) (data->romio_fh, offset, buf, count,
datatype, status);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -56,11 +56,11 @@ mca_io_romio_file_write_at_all (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_write_at_all) (data->romio_fh, offset, buf,
count, datatype, status);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -81,14 +81,14 @@ mca_io_romio_file_iwrite_at (ompi_file_t *fh,
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
req = (mca_io_romio_request_t *) request;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_iwrite_at) (data->romio_fh, offset, buf, count,
datatype, &req->romio_rq);
if (MPI_SUCCESS == ret) {
MCA_IO_ROMIO_REQUEST_ADD(request);
}
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -108,11 +108,11 @@ mca_io_romio_file_write (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_write) (data->romio_fh, buf, count, datatype,
status);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -128,11 +128,11 @@ mca_io_romio_file_write_all (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_write_all) (data->romio_fh, buf, count, datatype,
status);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -150,14 +150,14 @@ mca_io_romio_file_iwrite (ompi_file_t *fh,
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
req = (mca_io_romio_request_t *) request;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_iwrite) (data->romio_fh, buf, count, datatype,
&req->romio_rq);
if (MPI_SUCCESS == ret) {
MCA_IO_ROMIO_REQUEST_ADD(request);
}
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -174,11 +174,11 @@ mca_io_romio_file_write_shared (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_write_shared) (data->romio_fh, buf, count,
datatype, status);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -196,14 +196,14 @@ mca_io_romio_file_iwrite_shared (ompi_file_t *fh,
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
req = (mca_io_romio_request_t *) request;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_iwrite_shared) (data->romio_fh, buf, count,
datatype, &req->romio_rq);
if (MPI_SUCCESS == ret) {
MCA_IO_ROMIO_REQUEST_ADD(request);
}
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -219,11 +219,11 @@ mca_io_romio_file_write_ordered (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret =
ROMIO_PREFIX(MPI_File_write_ordered) (data->romio_fh, buf, count,
datatype, status);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -239,10 +239,10 @@ mca_io_romio_file_write_at_all_begin (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_write_at_all_begin) (data->romio_fh, offset,
buf, count, datatype);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -256,10 +256,10 @@ mca_io_romio_file_write_at_all_end (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_write_at_all_end) (data->romio_fh, buf,
status);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -274,10 +274,10 @@ mca_io_romio_file_write_all_begin (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_write_all_begin) (data->romio_fh, buf, count,
datatype);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -291,9 +291,9 @@ mca_io_romio_file_write_all_end (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_write_all_end) (data->romio_fh, buf, status);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -308,10 +308,10 @@ mca_io_romio_file_write_ordered_begin (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_write_ordered_begin) (data->romio_fh, buf,
count, datatype);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}
@ -325,10 +325,10 @@ mca_io_romio_file_write_ordered_end (ompi_file_t *fh,
mca_io_romio_data_t *data;
data = (mca_io_romio_data_t *) fh->f_io_selected_data;
OMPI_THREAD_LOCK (&mca_io_romio_mutex);
OPAL_THREAD_LOCK (&mca_io_romio_mutex);
ret = ROMIO_PREFIX(MPI_File_write_ordered_end) (data->romio_fh, buf,
status);
OMPI_THREAD_UNLOCK (&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK (&mca_io_romio_mutex);
return ret;
}

Просмотреть файл

@ -16,7 +16,7 @@
#include "ompi_config.h"
#include "mpi.h"
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#include "mca/io/io.h"
#include "io_romio.h"

Просмотреть файл

@ -27,14 +27,14 @@ int mca_io_romio_request_fini(ompi_request_t **req)
mca_io_base_request_t *ioreq = *((mca_io_base_request_t**) req);
int ret = OMPI_SUCCESS;
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
OPAL_THREAD_LOCK(&mca_io_romio_mutex);
/* clean up the fortran stuff, mark us as invalid */
OMPI_REQUEST_FINI(*req);
/* and shove us back in the free list */
mca_io_base_request_free(ioreq->req_file, ioreq);
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK(&mca_io_romio_mutex);
*req = MPI_REQUEST_NULL;
return ret;
@ -46,7 +46,7 @@ int mca_io_romio_request_free(ompi_request_t **req)
mca_io_base_request_t *ioreq = *((mca_io_base_request_t**) req);
int ret = OMPI_SUCCESS;
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
OPAL_THREAD_LOCK(&mca_io_romio_mutex);
ioreq->free_called = true;
@ -55,7 +55,7 @@ int mca_io_romio_request_free(ompi_request_t **req)
ret = ioreq->super.req_fini(req);
}
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
OPAL_THREAD_UNLOCK(&mca_io_romio_mutex);
*req = MPI_REQUEST_NULL;
return ret;

Просмотреть файл

@ -25,7 +25,7 @@
#include "class/ompi_rb_tree.h"
#include "mca/mca.h"
#include "mca/mpool/mpool.h"
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
@ -115,7 +115,7 @@ OMPI_DECLSPEC extern opal_list_t mca_mpool_base_components;
OMPI_DECLSPEC extern opal_list_t mca_mpool_base_modules;
OMPI_DECLSPEC extern ompi_free_list_t mca_mpool_base_mem_list;
OMPI_DECLSPEC extern ompi_rb_tree_t mca_mpool_base_tree;
OMPI_DECLSPEC extern ompi_mutex_t mca_mpool_base_tree_lock;
OMPI_DECLSPEC extern opal_mutex_t mca_mpool_base_tree_lock;
#if defined(c_plusplus) || defined(__cplusplus)
}

Просмотреть файл

@ -23,11 +23,11 @@
#endif /* HAVE_STRING_H */
#include "mca/mpool/mpool.h"
#include "mca/mpool/base/base.h"
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
ompi_rb_tree_t mca_mpool_base_tree;
ompi_free_list_t mca_mpool_base_mem_list;
ompi_mutex_t mca_mpool_base_tree_lock;
opal_mutex_t mca_mpool_base_tree_lock;
/**
@ -61,7 +61,7 @@ struct mca_mpool_base_chunk_t * mca_mpool_base_find(void * base)
mca_mpool_base_chunk_t* found;
mca_mpool_base_chunk_t* copy;
OMPI_THREAD_LOCK(&mca_mpool_base_tree_lock);
OPAL_THREAD_LOCK(&mca_mpool_base_tree_lock);
if(NULL != (found = mca_mpool_base_find(base))) {
mca_mpool_base_reg_mpool_t* reg;
copy = OBJ_NEW(mca_mpool_base_chunk_t);
@ -74,7 +74,7 @@ struct mca_mpool_base_chunk_t * mca_mpool_base_find(void * base)
} else {
copy = NULL;
}
OMPI_THREAD_UNLOCK(&mca_mpool_base_tree_lock);
OPAL_THREAD_UNLOCK(&mca_mpool_base_tree_lock);
return copy;
}
@ -152,10 +152,10 @@ int mca_mpool_base_insert(void * addr, size_t size,
((mca_mpool_base_chunk_t *) item)->mpools[0].user_data = user_data;
((mca_mpool_base_chunk_t *) item)->mpools[0].mpool_registration = registration;
OMPI_THREAD_LOCK(&mca_mpool_base_tree_lock);
OPAL_THREAD_LOCK(&mca_mpool_base_tree_lock);
rc = ompi_rb_tree_insert(&mca_mpool_base_tree,
&((mca_mpool_base_chunk_t *)item)->key, item);
OMPI_THREAD_UNLOCK(&mca_mpool_base_tree_lock);
OPAL_THREAD_UNLOCK(&mca_mpool_base_tree_lock);
if(OMPI_SUCCESS != rc) {
OMPI_FREE_LIST_RETURN(&mca_mpool_base_mem_list, item);
return rc;
@ -176,13 +176,13 @@ int mca_mpool_base_remove(void * base)
int rc;
mca_mpool_base_chunk_t *chunk;
OMPI_THREAD_LOCK(&mca_mpool_base_tree_lock);
OPAL_THREAD_LOCK(&mca_mpool_base_tree_lock);
if(NULL == (chunk = mca_mpool_base_find_nl(base))) {
OMPI_THREAD_UNLOCK(&mca_mpool_base_tree_lock);
OPAL_THREAD_UNLOCK(&mca_mpool_base_tree_lock);
return OMPI_ERR_BAD_PARAM;
}
rc = ompi_rb_tree_delete(&mca_mpool_base_tree, &chunk->key);
OMPI_THREAD_UNLOCK(&mca_mpool_base_tree_lock);
OPAL_THREAD_UNLOCK(&mca_mpool_base_tree_lock);
return rc;
}
@ -300,11 +300,11 @@ void * mca_mpool_base_alloc(size_t size, ompi_info_t * info)
((mca_mpool_base_chunk_t *) item)->key.bottom = mem;
((mca_mpool_base_chunk_t *) item)->key.top = (void *)
((char *) mem + size - 1);
OMPI_THREAD_LOCK(&mca_mpool_base_tree_lock);
OPAL_THREAD_LOCK(&mca_mpool_base_tree_lock);
ompi_rb_tree_insert(&mca_mpool_base_tree,
&((mca_mpool_base_chunk_t *)item)->key, item);
OMPI_THREAD_UNLOCK(&mca_mpool_base_tree_lock);
OPAL_THREAD_UNLOCK(&mca_mpool_base_tree_lock);
return mem;
}
}
@ -367,10 +367,10 @@ void * mca_mpool_base_alloc(size_t size, ompi_info_t * info)
{
((mca_mpool_base_chunk_t *) item)->mpools[num_modules].mpool = NULL;
}
OMPI_THREAD_LOCK(&mca_mpool_base_tree_lock);
OPAL_THREAD_LOCK(&mca_mpool_base_tree_lock);
ompi_rb_tree_insert(&mca_mpool_base_tree,
&((mca_mpool_base_chunk_t *)item)->key, item);
OMPI_THREAD_UNLOCK(&mca_mpool_base_tree_lock);
OPAL_THREAD_UNLOCK(&mca_mpool_base_tree_lock);
free(has_reg_function);
return mem;
}
@ -389,10 +389,10 @@ int mca_mpool_base_free(void * base)
int i = 0;
int rc;
OMPI_THREAD_LOCK(&mca_mpool_base_tree_lock);
OPAL_THREAD_LOCK(&mca_mpool_base_tree_lock);
if(NULL == (chunk = mca_mpool_base_find_nl(base)))
{
OMPI_THREAD_UNLOCK(&mca_mpool_base_tree_lock);
OPAL_THREAD_UNLOCK(&mca_mpool_base_tree_lock);
return OMPI_ERR_BAD_PARAM;
}
@ -402,7 +402,7 @@ int mca_mpool_base_free(void * base)
free(chunk->key.bottom);
OMPI_FREE_LIST_RETURN(&mca_mpool_base_mem_list, (opal_list_item_t*) chunk);
rc = ompi_rb_tree_delete(&mca_mpool_base_tree, &chunk->key);
OMPI_THREAD_UNLOCK(&mca_mpool_base_tree_lock);
OPAL_THREAD_UNLOCK(&mca_mpool_base_tree_lock);
return rc;
}
@ -421,7 +421,7 @@ int mca_mpool_base_free(void * base)
OMPI_FREE_LIST_RETURN(&mca_mpool_base_mem_list, (opal_list_item_t *) chunk);
rc = ompi_rb_tree_delete(&mca_mpool_base_tree, &chunk->key);
OMPI_THREAD_UNLOCK(&mca_mpool_base_tree_lock);
OPAL_THREAD_UNLOCK(&mca_mpool_base_tree_lock);
return rc;
}

Просмотреть файл

@ -24,7 +24,7 @@
#include "mca/mpool/base/base.h"
#include "class/ompi_rb_tree.h"
#include "class/ompi_free_list.h"
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
OBJ_CLASS_INSTANCE(mca_mpool_base_selected_module_t, opal_list_item_t, NULL, NULL);
static bool mca_mpool_enable_progress_threads = true;
@ -48,7 +48,7 @@ int mca_mpool_base_init(bool enable_progress_threads, bool enable_mpi_threads)
ompi_free_list_init(&mca_mpool_base_mem_list, sizeof(mca_mpool_base_chunk_t),
OBJ_CLASS(mca_mpool_base_chunk_t), 0, -1 , 128, NULL);
OBJ_CONSTRUCT(&mca_mpool_base_tree, ompi_rb_tree_t);
OBJ_CONSTRUCT(&mca_mpool_base_tree_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&mca_mpool_base_tree_lock, opal_mutex_t);
return ompi_rb_tree_init(&mca_mpool_base_tree, mca_mpool_base_tree_node_compare);
}

Просмотреть файл

@ -15,8 +15,8 @@
*/
#include "ompi_config.h"
#include "threads/mutex.h"
#include "threads/condition.h"
#include "opal/threads/mutex.h"
#include "opal/threads/condition.h"
#include "mca/allocator/base/base.h"
#include "mca/allocator/allocator.h"
#include "mca/base/mca_base_param.h"
@ -27,8 +27,8 @@
#include "mca/mpool/mpool.h"
static ompi_mutex_t mca_pml_bsend_mutex; /* lock for thread safety */
static ompi_condition_t mca_pml_bsend_condition; /* condition variable to block on detach */
static opal_mutex_t mca_pml_bsend_mutex; /* lock for thread safety */
static opal_condition_t mca_pml_bsend_condition; /* condition variable to block on detach */
static mca_allocator_base_component_t* mca_pml_bsend_allocator_component;
static mca_allocator_base_module_t* mca_pml_bsend_allocator; /* sub-allocator to manage users buffer */
static unsigned char *mca_pml_bsend_base; /* base address of users buffer */
@ -73,12 +73,12 @@ int mca_pml_base_bsend_init(bool thread_safe)
char *name;
size_t tmp;
if(OMPI_THREAD_ADD32(&mca_pml_bsend_init, 1) > 1)
if(OPAL_THREAD_ADD32(&mca_pml_bsend_init, 1) > 1)
return OMPI_SUCCESS;
/* initialize static objects */
OBJ_CONSTRUCT(&mca_pml_bsend_mutex, ompi_mutex_t);
OBJ_CONSTRUCT(&mca_pml_bsend_condition, ompi_condition_t);
OBJ_CONSTRUCT(&mca_pml_bsend_mutex, opal_mutex_t);
OBJ_CONSTRUCT(&mca_pml_bsend_condition, opal_condition_t);
/* lookup name of the allocator to use for buffered sends */
mca_base_param_lookup_string(id, &name);
@ -104,7 +104,7 @@ int mca_pml_base_bsend_init(bool thread_safe)
*/
int mca_pml_base_bsend_fini()
{
if(OMPI_THREAD_ADD32(&mca_pml_bsend_init,-1) > 0)
if(OPAL_THREAD_ADD32(&mca_pml_bsend_init,-1) > 0)
return OMPI_SUCCESS;
if(NULL != mca_pml_bsend_allocator)
@ -128,16 +128,16 @@ int mca_pml_base_bsend_attach(void* addr, int size)
}
/* check for buffer already attached */
OMPI_THREAD_LOCK(&mca_pml_bsend_mutex);
OPAL_THREAD_LOCK(&mca_pml_bsend_mutex);
if(NULL != mca_pml_bsend_allocator) {
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
OPAL_THREAD_UNLOCK(&mca_pml_bsend_mutex);
return OMPI_ERR_BUFFER;
}
/* try to create an instance of the allocator - to determine thread safety level */
mca_pml_bsend_allocator = mca_pml_bsend_allocator_component->allocator_init(thread_safe, mca_pml_bsend_alloc_segment, NULL, NULL);
if(NULL == mca_pml_bsend_allocator) {
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
OPAL_THREAD_UNLOCK(&mca_pml_bsend_mutex);
return OMPI_ERR_BUFFER;
}
@ -146,7 +146,7 @@ int mca_pml_base_bsend_attach(void* addr, int size)
mca_pml_bsend_addr = addr;
mca_pml_bsend_size = size;
mca_pml_bsend_count = 0;
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
OPAL_THREAD_UNLOCK(&mca_pml_bsend_mutex);
return OMPI_SUCCESS;
}
@ -155,17 +155,17 @@ int mca_pml_base_bsend_attach(void* addr, int size)
*/
int mca_pml_base_bsend_detach(void* addr, int* size)
{
OMPI_THREAD_LOCK(&mca_pml_bsend_mutex);
OPAL_THREAD_LOCK(&mca_pml_bsend_mutex);
/* is buffer attached */
if(NULL == mca_pml_bsend_allocator) {
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
OPAL_THREAD_UNLOCK(&mca_pml_bsend_mutex);
return OMPI_ERR_BUFFER;
}
/* wait on any pending requests */
while(mca_pml_bsend_count != 0)
ompi_condition_wait(&mca_pml_bsend_condition, &mca_pml_bsend_mutex);
opal_condition_wait(&mca_pml_bsend_condition, &mca_pml_bsend_mutex);
/* free resources associated with the allocator */
mca_pml_bsend_allocator->alc_finalize(mca_pml_bsend_allocator);
@ -182,7 +182,7 @@ int mca_pml_base_bsend_detach(void* addr, int* size)
mca_pml_bsend_addr = NULL;
mca_pml_bsend_size = 0;
mca_pml_bsend_count = 0;
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
OPAL_THREAD_UNLOCK(&mca_pml_bsend_mutex);
return OMPI_SUCCESS;
}
@ -202,10 +202,10 @@ int mca_pml_base_bsend_request_start(ompi_request_t* request)
if(sendreq->req_count > 0) {
/* has a buffer been provided */
OMPI_THREAD_LOCK(&mca_pml_bsend_mutex);
OPAL_THREAD_LOCK(&mca_pml_bsend_mutex);
if(NULL == mca_pml_bsend_addr) {
sendreq->req_addr = NULL;
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
OPAL_THREAD_UNLOCK(&mca_pml_bsend_mutex);
return OMPI_ERR_BUFFER;
}
@ -215,7 +215,7 @@ int mca_pml_base_bsend_request_start(ompi_request_t* request)
if(NULL == sendreq->req_addr) {
/* release resources when request is freed */
sendreq->req_base.req_pml_complete = true;
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
OPAL_THREAD_UNLOCK(&mca_pml_bsend_mutex);
return OMPI_ERR_BUFFER;
}
@ -225,7 +225,7 @@ int mca_pml_base_bsend_request_start(ompi_request_t* request)
/* increment count of pending requests */
mca_pml_bsend_count++;
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
OPAL_THREAD_UNLOCK(&mca_pml_bsend_mutex);
/* The convertor is already initialized in the begining so we just have to
* pack the data in the newly allocated buffer.
@ -258,7 +258,7 @@ int mca_pml_base_bsend_request_fini(ompi_request_t* request)
return OMPI_SUCCESS;
/* remove from list of pending requests */
OMPI_THREAD_LOCK(&mca_pml_bsend_mutex);
OPAL_THREAD_LOCK(&mca_pml_bsend_mutex);
/* free buffer */
mca_pml_bsend_allocator->alc_free(mca_pml_bsend_allocator, sendreq->req_addr);
@ -266,9 +266,9 @@ int mca_pml_base_bsend_request_fini(ompi_request_t* request)
/* decrement count of buffered requests */
if(--mca_pml_bsend_count == 0)
ompi_condition_signal(&mca_pml_bsend_condition);
opal_condition_signal(&mca_pml_bsend_condition);
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
OPAL_THREAD_UNLOCK(&mca_pml_bsend_mutex);
return OMPI_SUCCESS;
}

Просмотреть файл

@ -17,7 +17,7 @@
#include "ompi_config.h"
#include "class/opal_hash_table.h"
#include "threads/condition.h"
#include "opal/threads/condition.h"
#include "util/output.h"
#include "util/proc_info.h"
@ -50,13 +50,13 @@ struct mca_base_modex_module_t {
void *module_data;
size_t module_data_size;
bool module_data_avail;
ompi_condition_t module_data_cond;
opal_condition_t module_data_cond;
};
typedef struct mca_base_modex_module_t mca_base_modex_module_t;
static void mca_base_modex_module_construct(mca_base_modex_module_t *module)
{
OBJ_CONSTRUCT(&module->module_data_cond, ompi_condition_t);
OBJ_CONSTRUCT(&module->module_data_cond, opal_condition_t);
memset(&module->component, 0, sizeof(module->component));
module->module_data = NULL;
module->module_data_size = 0;
@ -127,7 +127,7 @@ OBJ_CLASS_INSTANCE(
*/
static opal_list_t mca_base_modex_subscriptions;
static ompi_mutex_t mca_base_modex_lock;
static opal_mutex_t mca_base_modex_lock;
/**
@ -136,7 +136,7 @@ static ompi_mutex_t mca_base_modex_lock;
int mca_base_modex_init(void)
{
OBJ_CONSTRUCT(&mca_base_modex_subscriptions, opal_list_t);
OBJ_CONSTRUCT(&mca_base_modex_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&mca_base_modex_lock, opal_mutex_t);
return OMPI_SUCCESS;
}
@ -250,12 +250,12 @@ orte_gpr_base_dump_notify_data(data,0);
* Lookup the modex data structure.
*/
OMPI_THREAD_LOCK(&proc->proc_lock);
OPAL_THREAD_LOCK(&proc->proc_lock);
if(NULL == (modex = (mca_base_modex_t*)proc->proc_modex)) {
modex = OBJ_NEW(mca_base_modex_t);
if(NULL == modex) {
ompi_output(0, "mca_base_modex_registry_callback: unable to allocate mca_base_modex_t\n");
OMPI_THREAD_UNLOCK(&proc->proc_lock);
OPAL_THREAD_UNLOCK(&proc->proc_lock);
return;
}
proc->proc_modex = &modex->super;
@ -335,7 +335,7 @@ orte_gpr_base_dump_notify_data(data,0);
if(NULL == (modex_module = mca_base_modex_create_module(modex, &component))) {
ompi_output(0, "mca_base_modex_registry_callback: mca_base_modex_create_module failed\n");
OBJ_RELEASE(data);
OMPI_THREAD_UNLOCK(&proc->proc_lock);
OPAL_THREAD_UNLOCK(&proc->proc_lock);
return;
}
@ -351,9 +351,9 @@ ompi_output(0, "[%lu,%lu,%lu] mca_base_modex_registry_callback: %s-%s-%d-%d rece
component.mca_component_minor_version,
num_bytes);
#endif
ompi_condition_signal(&modex_module->module_data_cond);
opal_condition_signal(&modex_module->module_data_cond);
}
OMPI_THREAD_UNLOCK(&proc->proc_lock);
OPAL_THREAD_UNLOCK(&proc->proc_lock);
} /* convert string to process name */
} /* if value[i]->cnt > 0 */
@ -382,19 +382,19 @@ static int mca_base_modex_subscribe(orte_process_name_t* name)
int rc;
/* check for an existing subscription */
OMPI_LOCK(&mca_base_modex_lock);
OPAL_LOCK(&mca_base_modex_lock);
if (!opal_list_is_empty(&mca_base_modex_subscriptions)) {
for(item = opal_list_get_first(&mca_base_modex_subscriptions);
item != opal_list_get_end(&mca_base_modex_subscriptions);
item = opal_list_get_next(item)) {
subscription = (mca_base_modex_subscription_t*)item;
if(subscription->jobid == name->jobid) {
OMPI_UNLOCK(&mca_base_modex_lock);
OPAL_UNLOCK(&mca_base_modex_lock);
return OMPI_SUCCESS;
}
}
}
OMPI_UNLOCK(&mca_base_modex_lock);
OPAL_UNLOCK(&mca_base_modex_lock);
/* otherwise - subscribe to get this jobid's ptl contact info */
if (ORTE_SUCCESS != (rc = orte_ns.get_jobid(&jobid, name))) {
@ -500,11 +500,11 @@ static int mca_base_modex_subscribe(orte_process_name_t* name)
}
/* add this jobid to our list of subscriptions */
OMPI_LOCK(&mca_base_modex_lock);
OPAL_LOCK(&mca_base_modex_lock);
subscription = OBJ_NEW(mca_base_modex_subscription_t);
subscription->jobid = name->jobid;
opal_list_append(&mca_base_modex_subscriptions, &subscription->item);
OMPI_UNLOCK(&mca_base_modex_lock);
OPAL_UNLOCK(&mca_base_modex_lock);
OBJ_DESTRUCT(&sub);
OBJ_DESTRUCT(&trig);
return OMPI_SUCCESS;
@ -627,24 +627,24 @@ int mca_base_modex_recv(
mca_base_modex_module_t* modex_module;
/* check the proc for cached data */
OMPI_THREAD_LOCK(&proc->proc_lock);
OPAL_THREAD_LOCK(&proc->proc_lock);
if(NULL == (modex = (mca_base_modex_t*)proc->proc_modex)) {
modex = OBJ_NEW(mca_base_modex_t);
if(modex == NULL) {
OMPI_THREAD_UNLOCK(&proc->proc_lock);
OPAL_THREAD_UNLOCK(&proc->proc_lock);
return OMPI_ERR_OUT_OF_RESOURCE;
}
proc->proc_modex = &modex->super;
/* verify that we have subscribed to this segment */
OMPI_THREAD_UNLOCK(&proc->proc_lock);
OPAL_THREAD_UNLOCK(&proc->proc_lock);
mca_base_modex_subscribe(&proc->proc_name);
OMPI_THREAD_LOCK(&proc->proc_lock);
OPAL_THREAD_LOCK(&proc->proc_lock);
}
/* lookup/create the module */
if(NULL == (modex_module = mca_base_modex_create_module(modex, component))) {
OMPI_THREAD_UNLOCK(&proc->proc_lock);
OPAL_THREAD_UNLOCK(&proc->proc_lock);
return OMPI_ERR_OUT_OF_RESOURCE;
}
@ -658,7 +658,7 @@ ompi_output(0, "[%lu,%lu,%lu] mca_base_modex_registry_callback: waiting for %s-%
component->mca_component_major_version,
component->mca_component_minor_version);
#endif
ompi_condition_wait(&modex_module->module_data_cond, &proc->proc_lock);
opal_condition_wait(&modex_module->module_data_cond, &proc->proc_lock);
}
/* copy the data out to the user */
@ -674,7 +674,7 @@ ompi_output(0, "[%lu,%lu,%lu] mca_base_modex_registry_callback: waiting for %s-%
*buffer = copy;
*size = modex_module->module_data_size;
}
OMPI_THREAD_UNLOCK(&proc->proc_lock);
OPAL_THREAD_UNLOCK(&proc->proc_lock);
return OMPI_SUCCESS;
}

Просмотреть файл

@ -11,8 +11,8 @@
#ifndef PML_EXAMPLE_H_HAS_BEEN_INCLUDED
#define PML_EXAMPLE_H_HAS_BEEN_INCLUDED
#include "threads/thread.h"
#include "threads/condition.h"
#include "opal/threads/thread.h"
#include "opal/threads/condition.h"
#include "class/ompi_free_list.h"
#include "util/cmd_line.h"
#include "request/request.h"
@ -33,7 +33,7 @@ struct mca_pml_example_t {
size_t example_num_ptl_modules;
opal_list_t example_procs;
ompi_mutex_t example_lock;
opal_mutex_t example_lock;
/* free list of requests */
ompi_free_list_t example_send_requests;

Просмотреть файл

@ -21,8 +21,8 @@
#define MCA_PML_OB1_H
#include "ompi_config.h"
#include "threads/thread.h"
#include "threads/condition.h"
#include "opal/threads/thread.h"
#include "opal/threads/condition.h"
#include "class/ompi_free_list.h"
#include "util/cmd_line.h"
#include "request/request.h"
@ -63,7 +63,7 @@ struct mca_pml_ob1_t {
bool leave_pinned;
/* lock queue access */
ompi_mutex_t lock;
opal_mutex_t lock;
/* free lists */
ompi_free_list_t send_requests;

Просмотреть файл

@ -49,7 +49,7 @@ static OBJ_CLASS_INSTANCE(
static void mca_pml_ob1_comm_construct(mca_pml_ob1_comm_t* comm)
{
OBJ_CONSTRUCT(&comm->wild_receives, opal_list_t);
OBJ_CONSTRUCT(&comm->matching_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&comm->matching_lock, opal_mutex_t);
comm->recv_sequence = 0;
comm->procs = NULL;
comm->num_procs = 0;

Просмотреть файл

@ -19,8 +19,8 @@
#ifndef MCA_PML_OB1_COMM_H
#define MCA_PML_OB1_COMM_H
#include "threads/mutex.h"
#include "threads/condition.h"
#include "opal/threads/mutex.h"
#include "opal/threads/condition.h"
#include "mca/ptl/ptl.h"
#include "opal/class/opal_list.h"
#if defined(c_plusplus) || defined(__cplusplus)
@ -45,7 +45,7 @@ typedef struct mca_pml_ob1_comm_proc_t mca_pml_ob1_comm_proc_t;
struct mca_pml_comm_t {
opal_object_t super;
mca_ptl_sequence_t recv_sequence; /**< recv request sequence number - receiver side */
ompi_mutex_t matching_lock; /**< matching lock */
opal_mutex_t matching_lock; /**< matching lock */
opal_list_t wild_receives; /**< queue of unmatched wild (source process not specified) receives */
mca_pml_ob1_comm_proc_t* procs;
size_t num_procs;

Просмотреть файл

@ -78,7 +78,7 @@ static inline int mca_pml_ob1_param_register_int(
int mca_pml_ob1_component_open(void)
{
int param, value;
OBJ_CONSTRUCT(&mca_pml_ob1.lock, ompi_mutex_t);
OBJ_CONSTRUCT(&mca_pml_ob1.lock, opal_mutex_t);
/* requests */
OBJ_CONSTRUCT(&mca_pml_ob1.send_requests, ompi_free_list_t);

Просмотреть файл

@ -64,17 +64,17 @@ int mca_pml_ob1_probe(int src,
if (recvreq.req_recv.req_base.req_ompi.req_complete == false) {
/* give up and sleep until completion */
if (ompi_using_threads()) {
ompi_mutex_lock(&ompi_request_lock);
if (opal_using_threads()) {
opal_mutex_lock(&ompi_request_lock);
ompi_request_waiting++;
while (recvreq.req_recv.req_base.req_ompi.req_complete == false)
ompi_condition_wait(&ompi_request_cond, &ompi_request_lock);
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
ompi_request_waiting--;
ompi_mutex_unlock(&ompi_request_lock);
opal_mutex_unlock(&ompi_request_lock);
} else {
ompi_request_waiting++;
while (recvreq.req_recv.req_base.req_ompi.req_complete == false)
ompi_condition_wait(&ompi_request_cond, &ompi_request_lock);
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
ompi_request_waiting--;
}
}

Просмотреть файл

@ -87,17 +87,17 @@ int mca_pml_ob1_recv(void *addr,
MCA_PML_OB1_RECV_REQUEST_START(recvreq);
if (recvreq->req_recv.req_base.req_ompi.req_complete == false) {
/* give up and sleep until completion */
if (ompi_using_threads()) {
ompi_mutex_lock(&ompi_request_lock);
if (opal_using_threads()) {
opal_mutex_lock(&ompi_request_lock);
ompi_request_waiting++;
while (recvreq->req_recv.req_base.req_ompi.req_complete == false)
ompi_condition_wait(&ompi_request_cond, &ompi_request_lock);
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
ompi_request_waiting--;
ompi_mutex_unlock(&ompi_request_lock);
opal_mutex_unlock(&ompi_request_lock);
} else {
ompi_request_waiting++;
while (recvreq->req_recv.req_base.req_ompi.req_complete == false)
ompi_condition_wait(&ompi_request_cond, &ompi_request_lock);
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
ompi_request_waiting--;
}
}

Просмотреть файл

@ -107,17 +107,17 @@ int mca_pml_ob1_send(void *buf,
if (sendreq->req_send.req_base.req_ompi.req_complete == false) {
/* give up and sleep until completion */
if (ompi_using_threads()) {
ompi_mutex_lock(&ompi_request_lock);
if (opal_using_threads()) {
opal_mutex_lock(&ompi_request_lock);
ompi_request_waiting++;
while (sendreq->req_send.req_base.req_ompi.req_complete == false)
ompi_condition_wait(&ompi_request_cond, &ompi_request_lock);
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
ompi_request_waiting--;
ompi_mutex_unlock(&ompi_request_lock);
opal_mutex_unlock(&ompi_request_lock);
} else {
ompi_request_waiting++;
while (sendreq->req_send.req_base.req_ompi.req_complete == false)
ompi_condition_wait(&ompi_request_cond, &ompi_request_lock);
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
ompi_request_waiting--;
}
}

Просмотреть файл

@ -25,7 +25,7 @@ static void mca_pml_ob1_proc_construct(mca_pml_ob1_proc_t* proc)
{
proc->proc_ompi = NULL;
proc->proc_sequence = 0;
OBJ_CONSTRUCT(&proc->proc_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&proc->proc_lock, opal_mutex_t);
OBJ_CONSTRUCT(&proc->btl_eager, mca_pml_ob1_ep_array_t);
OBJ_CONSTRUCT(&proc->btl_send, mca_pml_ob1_ep_array_t);
OBJ_CONSTRUCT(&proc->btl_rdma, mca_pml_ob1_ep_array_t);

Просмотреть файл

@ -19,7 +19,7 @@
#ifndef MCA_PML_PROC_H
#define MCA_PML_PROC_H
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#include "communicator/communicator.h"
#include "group/group.h"
#include "proc/proc.h"
@ -35,7 +35,7 @@ extern "C" {
struct mca_pml_proc_t {
opal_object_t super;
ompi_proc_t *proc_ompi; /**< back-pointer to ompi_proc_t */
ompi_mutex_t proc_lock; /**< lock to protect against concurrent access */
opal_mutex_t proc_lock; /**< lock to protect against concurrent access */
int proc_flags; /**< prefered method of accessing this peer */
volatile uint32_t proc_sequence; /**< sequence number for send */
mca_pml_ob1_ep_array_t btl_eager; /**< array of endpoints to use for first fragments */

Просмотреть файл

@ -21,7 +21,7 @@
#include "ompi_config.h"
#include "opal/class/opal_list.h"
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#include "include/constants.h"
#include "communicator/communicator.h"
#include "mca/pml/pml.h"
@ -440,7 +440,7 @@ int mca_pml_ob1_recv_frag_match(
* end points) from being processed, and potentially "loosing"
* the fragment.
*/
OMPI_THREAD_LOCK(&comm->matching_lock);
OPAL_THREAD_LOCK(&comm->matching_lock);
/* get sequence number of next message that can be processed */
next_msg_seq_expected = (uint16_t)proc->expected_sequence;
@ -494,7 +494,7 @@ int mca_pml_ob1_recv_frag_match(
mca_pml_ob1_recv_frag_t* frag;
MCA_PML_OB1_RECV_FRAG_ALLOC(frag, rc);
if(OMPI_SUCCESS != rc) {
OMPI_THREAD_UNLOCK(&pml_comm->matching_lock);
OPAL_THREAD_UNLOCK(&pml_comm->matching_lock);
return rc;
}
MCA_PML_OB1_RECV_FRAG_INIT(frag,btl,hdr,segments,num_segments);
@ -519,14 +519,14 @@ int mca_pml_ob1_recv_frag_match(
mca_pml_ob1_recv_frag_t* frag;
MCA_PML_OB1_RECV_FRAG_ALLOC(frag, rc);
if(OMPI_SUCCESS != rc) {
OMPI_THREAD_UNLOCK(&pml_comm->matching_lock);
OPAL_THREAD_UNLOCK(&pml_comm->matching_lock);
return rc;
}
MCA_PML_OB1_RECV_FRAG_INIT(frag,btl,hdr,segments,num_segments);
opal_list_append(&proc->frags_cant_match, (opal_list_item_t *)frag);
}
OMPI_THREAD_UNLOCK(&pml_comm->matching_lock);
OPAL_THREAD_UNLOCK(&pml_comm->matching_lock);
/* release matching lock before processing fragment */

Просмотреть файл

@ -51,7 +51,7 @@ static int mca_pml_ob1_recv_request_cancel(struct ompi_request_t* ompi_request,
}
/* The rest should be protected behind the match logic lock */
OMPI_THREAD_LOCK(&comm->matching_lock);
OPAL_THREAD_LOCK(&comm->matching_lock);
if( OMPI_ANY_TAG == ompi_request->req_status.MPI_TAG ) { /* the match has not been already done */
if( request->req_recv.req_base.req_peer == OMPI_ANY_SOURCE ) {
opal_list_remove_item( &comm->wild_receives, (opal_list_item_t*)request );
@ -60,9 +60,9 @@ static int mca_pml_ob1_recv_request_cancel(struct ompi_request_t* ompi_request,
opal_list_remove_item(&proc->specific_receives, (opal_list_item_t*)request);
}
}
OMPI_THREAD_UNLOCK(&comm->matching_lock);
OPAL_THREAD_UNLOCK(&comm->matching_lock);
OMPI_THREAD_LOCK(&ompi_request_lock);
OPAL_THREAD_LOCK(&ompi_request_lock);
ompi_request->req_status._cancelled = true;
ompi_request->req_complete = true; /* mark it as completed so all the test/wait functions
* on this particular request will finish */
@ -71,9 +71,9 @@ static int mca_pml_ob1_recv_request_cancel(struct ompi_request_t* ompi_request,
* to complete their test/wait functions.
*/
if(ompi_request_waiting) {
ompi_condition_broadcast(&ompi_request_cond);
opal_condition_broadcast(&ompi_request_cond);
}
OMPI_THREAD_UNLOCK(&ompi_request_lock);
OPAL_THREAD_UNLOCK(&ompi_request_lock);
return OMPI_SUCCESS;
}
@ -274,7 +274,7 @@ void mca_pml_ob1_recv_request_progress(
case MCA_PML_OB1_HDR_TYPE_FIN:
bytes_delivered = bytes_received = hdr->hdr_fin.hdr_rdma_length;
OMPI_THREAD_ADD32(&recvreq->req_pipeline_depth,-1);
OPAL_THREAD_ADD32(&recvreq->req_pipeline_depth,-1);
break;
default:
@ -282,7 +282,7 @@ void mca_pml_ob1_recv_request_progress(
}
/* check completion status */
OMPI_THREAD_LOCK(&ompi_request_lock);
OPAL_THREAD_LOCK(&ompi_request_lock);
recvreq->req_bytes_received += bytes_received;
recvreq->req_bytes_delivered += bytes_delivered;
if (recvreq->req_bytes_received >= recvreq->req_recv.req_bytes_packed) {
@ -308,13 +308,13 @@ void mca_pml_ob1_recv_request_progress(
}
#endif
if(ompi_request_waiting) {
ompi_condition_broadcast(&ompi_request_cond);
opal_condition_broadcast(&ompi_request_cond);
}
schedule = false;
} else if (recvreq->req_rdma_offset < recvreq->req_recv.req_bytes_packed) {
schedule = true;
}
OMPI_THREAD_UNLOCK(&ompi_request_lock);
OPAL_THREAD_UNLOCK(&ompi_request_lock);
/* schedule additional rdma operations */
if(schedule) {
@ -330,7 +330,7 @@ void mca_pml_ob1_recv_request_progress(
void mca_pml_ob1_recv_request_schedule(mca_pml_ob1_recv_request_t* recvreq)
{
if(OMPI_THREAD_ADD32(&recvreq->req_lock,1) == 1) {
if(OPAL_THREAD_ADD32(&recvreq->req_lock,1) == 1) {
mca_pml_ob1_proc_t* proc = recvreq->req_proc;
size_t num_btl_avail = mca_pml_ob1_ep_array_get_size(&proc->btl_rdma);
do {
@ -406,9 +406,9 @@ void mca_pml_ob1_recv_request_schedule(mca_pml_ob1_recv_request_t* recvreq)
recvreq->pin_index++;
#endif
if(dst == NULL) {
OMPI_THREAD_LOCK(&mca_pml_ob1.lock);
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
opal_list_append(&mca_pml_ob1.recv_pending, (opal_list_item_t*)recvreq);
OMPI_THREAD_UNLOCK(&mca_pml_ob1.lock);
OPAL_THREAD_UNLOCK(&mca_pml_ob1.lock);
break;
}
dst->des_cbdata = recvreq;
@ -422,9 +422,9 @@ void mca_pml_ob1_recv_request_schedule(mca_pml_ob1_recv_request_t* recvreq)
MCA_PML_OB1_ENDPOINT_DES_ALLOC(ep, ctl, hdr_size);
if(ctl == NULL) {
ep->btl_free(ep->btl,dst);
OMPI_THREAD_LOCK(&mca_pml_ob1.lock);
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
opal_list_append(&mca_pml_ob1.recv_pending, (opal_list_item_t*)recvreq);
OMPI_THREAD_UNLOCK(&mca_pml_ob1.lock);
OPAL_THREAD_UNLOCK(&mca_pml_ob1.lock);
break;
}
ctl->des_flags |= MCA_BTL_DES_FLAGS_PRIORITY;
@ -443,7 +443,7 @@ void mca_pml_ob1_recv_request_schedule(mca_pml_ob1_recv_request_t* recvreq)
/* update request state */
recvreq->req_rdma_offset += size;
OMPI_THREAD_ADD32(&recvreq->req_pipeline_depth,1);
OPAL_THREAD_ADD32(&recvreq->req_pipeline_depth,1);
/* send rdma request to peer */
rc = ep->btl_send(ep->btl, ep->btl_endpoint, ctl, MCA_BTL_TAG_PML);
@ -453,17 +453,17 @@ void mca_pml_ob1_recv_request_schedule(mca_pml_ob1_recv_request_t* recvreq)
ep->btl_free(ep->btl,ctl);
ep->btl_free(ep->btl,dst);
recvreq->req_rdma_offset -= size;
OMPI_THREAD_ADD32(&recvreq->req_pipeline_depth,-1);
OMPI_THREAD_LOCK(&mca_pml_ob1.lock);
OPAL_THREAD_ADD32(&recvreq->req_pipeline_depth,-1);
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
opal_list_append(&mca_pml_ob1.recv_pending, (opal_list_item_t*)recvreq);
OMPI_THREAD_UNLOCK(&mca_pml_ob1.lock);
OPAL_THREAD_UNLOCK(&mca_pml_ob1.lock);
break;
}
/* run progress as the prepare (pinning) can take some time */
mca_pml_ob1_progress();
}
} while(OMPI_THREAD_ADD32(&recvreq->req_lock,-1) > 0);
} while(OPAL_THREAD_ADD32(&recvreq->req_lock,-1) > 0);
}
}
@ -479,14 +479,14 @@ void mca_pml_ob1_recv_request_match_specific(mca_pml_ob1_recv_request_t* request
mca_pml_ob1_recv_frag_t* frag;
/* check for a specific match */
OMPI_THREAD_LOCK(&comm->matching_lock);
OPAL_THREAD_LOCK(&comm->matching_lock);
/* assign sequence number */
request->req_recv.req_base.req_sequence = comm->recv_sequence++;
if (opal_list_get_size(&proc->unexpected_frags) > 0 &&
(frag = mca_pml_ob1_recv_request_match_specific_proc(request, proc)) != NULL) {
OMPI_THREAD_UNLOCK(&comm->matching_lock);
OPAL_THREAD_UNLOCK(&comm->matching_lock);
mca_pml_ob1_recv_request_progress(request,frag->btl,frag->segments,frag->num_segments);
if( !((MCA_PML_REQUEST_IPROBE == request->req_recv.req_base.req_type) ||
@ -502,7 +502,7 @@ void mca_pml_ob1_recv_request_match_specific(mca_pml_ob1_recv_request_t* request
if(request->req_recv.req_base.req_type != MCA_PML_REQUEST_IPROBE) {
opal_list_append(&proc->specific_receives, (opal_list_item_t*)request);
}
OMPI_THREAD_UNLOCK(&comm->matching_lock);
OPAL_THREAD_UNLOCK(&comm->matching_lock);
}
@ -524,7 +524,7 @@ void mca_pml_ob1_recv_request_match_wild(mca_pml_ob1_recv_request_t* request)
* process, then an inner loop over the messages from the
* process.
*/
OMPI_THREAD_LOCK(&pml_comm->c_matching_lock);
OPAL_THREAD_LOCK(&pml_comm->c_matching_lock);
/* assign sequence number */
request->req_recv.req_base.req_sequence = comm->recv_sequence++;
@ -540,7 +540,7 @@ void mca_pml_ob1_recv_request_match_wild(mca_pml_ob1_recv_request_t* request)
/* loop over messages from the current proc */
if ((frag = mca_pml_ob1_recv_request_match_specific_proc(request, proc)) != NULL) {
OMPI_THREAD_UNLOCK(&comm->matching_lock);
OPAL_THREAD_UNLOCK(&comm->matching_lock);
mca_pml_ob1_recv_request_progress(request,frag->btl,frag->segments,frag->num_segments);
if( !((MCA_PML_REQUEST_IPROBE == request->req_recv.req_base.req_type) ||
@ -558,7 +558,7 @@ void mca_pml_ob1_recv_request_match_wild(mca_pml_ob1_recv_request_t* request)
if(request->req_recv.req_base.req_type != MCA_PML_REQUEST_IPROBE)
opal_list_append(&comm->wild_receives, (opal_list_item_t*)request);
OMPI_THREAD_UNLOCK(&comm->matching_lock);
OPAL_THREAD_UNLOCK(&comm->matching_lock);
}

Просмотреть файл

@ -95,10 +95,10 @@ static void mca_pml_ob1_short_completion(
MCA_PML_OB1_ENDPOINT_DES_RETURN(btl_ep,descriptor);
/* signal request completion */
OMPI_THREAD_LOCK(&ompi_request_lock);
OPAL_THREAD_LOCK(&ompi_request_lock);
sendreq->req_bytes_delivered = sendreq->req_send.req_bytes_packed;
MCA_PML_OB1_SEND_REQUEST_COMPLETE(sendreq);
OMPI_THREAD_UNLOCK(&ompi_request_lock);
OPAL_THREAD_UNLOCK(&ompi_request_lock);
}
/**
@ -152,12 +152,12 @@ static void mca_pml_ob1_send_completion(
#endif
/* check for request completion */
OMPI_THREAD_LOCK(&ompi_request_lock);
if (OMPI_THREAD_ADD32(&sendreq->req_pipeline_depth,-1) == 0 &&
OPAL_THREAD_LOCK(&ompi_request_lock);
if (OPAL_THREAD_ADD32(&sendreq->req_pipeline_depth,-1) == 0 &&
sendreq->req_bytes_delivered == sendreq->req_send.req_bytes_packed) {
MCA_PML_OB1_SEND_REQUEST_COMPLETE(sendreq);
}
OMPI_THREAD_UNLOCK(&ompi_request_lock);
OPAL_THREAD_UNLOCK(&ompi_request_lock);
/* return the descriptor */
@ -172,9 +172,9 @@ static void mca_pml_ob1_send_completion(
default:
break;
}
OMPI_THREAD_LOCK(&mca_pml_ob1.ob1_lock);
OPAL_THREAD_LOCK(&mca_pml_ob1.ob1_lock);
sendreq = (mca_pml_ob1_send_request_t*)opal_list_remove_first(&mca_pml_ob1.send_pending);
OMPI_THREAD_UNLOCK(&mca_pml_ob1.ob1_lock);
OPAL_THREAD_UNLOCK(&mca_pml_ob1.ob1_lock);
}
}
@ -347,7 +347,7 @@ int mca_pml_ob1_send_request_start(
}
descriptor->des_flags |= MCA_BTL_DES_FLAGS_PRIORITY;
descriptor->des_cbdata = sendreq;
OMPI_THREAD_ADD32(&sendreq->req_pipeline_depth,1);
OPAL_THREAD_ADD32(&sendreq->req_pipeline_depth,1);
/* send */
#if MCA_PML_OB1_TIMESTAMPS
@ -377,7 +377,7 @@ int mca_pml_ob1_send_request_schedule(mca_pml_ob1_send_request_t* sendreq)
* of the number of times the routine has been called and run through
* the scheduling logic once for every call.
*/
if(OMPI_THREAD_ADD32(&sendreq->req_lock,1) == 1) {
if(OPAL_THREAD_ADD32(&sendreq->req_lock,1) == 1) {
mca_pml_ob1_proc_t* proc = sendreq->req_proc;
size_t num_btl_avail = mca_pml_ob1_ep_array_get_size(&proc->btl_send);
do {
@ -422,9 +422,9 @@ int mca_pml_ob1_send_request_schedule(mca_pml_ob1_send_request_t* sendreq)
sizeof(mca_pml_ob1_frag_hdr_t),
&size);
if(des == NULL) {
OMPI_THREAD_LOCK(&mca_pml_ob1.lock);
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
opal_list_append(&mca_pml_ob1.send_pending, (opal_list_item_t*)sendreq);
OMPI_THREAD_UNLOCK(&mca_pml_ob1.lock);
OPAL_THREAD_UNLOCK(&mca_pml_ob1.lock);
break;
}
des->des_cbfunc = mca_pml_ob1_send_completion;
@ -441,7 +441,7 @@ int mca_pml_ob1_send_request_schedule(mca_pml_ob1_send_request_t* sendreq)
/* update state */
sendreq->req_send_offset += size;
OMPI_THREAD_ADD32(&sendreq->req_pipeline_depth,1);
OPAL_THREAD_ADD32(&sendreq->req_pipeline_depth,1);
/* initiate send - note that this may complete before the call returns */
rc = ep->btl_send(ep->btl, ep->btl_endpoint, des, MCA_BTL_TAG_PML);
@ -449,11 +449,11 @@ int mca_pml_ob1_send_request_schedule(mca_pml_ob1_send_request_t* sendreq)
bytes_remaining -= size;
} else {
sendreq->req_send_offset -= size;
OMPI_THREAD_ADD32(&sendreq->req_pipeline_depth,-1);
OPAL_THREAD_ADD32(&sendreq->req_pipeline_depth,-1);
ep->btl_free(ep->btl,des);
OMPI_THREAD_LOCK(&mca_pml_ob1.lock);
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
opal_list_append(&mca_pml_ob1.send_pending, (opal_list_item_t*)sendreq);
OMPI_THREAD_UNLOCK(&mca_pml_ob1.lock);
OPAL_THREAD_UNLOCK(&mca_pml_ob1.lock);
break;
}
#if MCA_PML_OB1_TIMESTAMPS
@ -461,7 +461,7 @@ int mca_pml_ob1_send_request_schedule(mca_pml_ob1_send_request_t* sendreq)
sendreq->t_scheduled = get_profiler_timestamp();
#endif
}
} while (OMPI_THREAD_ADD32(&sendreq->req_lock,-1) > 0);
} while (OPAL_THREAD_ADD32(&sendreq->req_lock,-1) > 0);
}
return OMPI_SUCCESS;
}
@ -517,12 +517,12 @@ static void mca_pml_ob1_put_completion(
#endif
/* check for request completion */
OMPI_THREAD_LOCK(&ompi_request_lock);
OPAL_THREAD_LOCK(&ompi_request_lock);
sendreq->req_bytes_delivered += frag->rdma_length;
if(sendreq->req_bytes_delivered >= sendreq->req_send.req_bytes_packed) {
MCA_PML_OB1_SEND_REQUEST_COMPLETE(sendreq);
}
OMPI_THREAD_UNLOCK(&ompi_request_lock);
OPAL_THREAD_UNLOCK(&ompi_request_lock);
/* allocate descriptor for fin control message - note that
* the rdma descriptor cannot be reused as it points directly
@ -532,9 +532,9 @@ static void mca_pml_ob1_put_completion(
MCA_PML_OB1_ENDPOINT_DES_ALLOC(frag->rdma_ep, fin, sizeof(mca_pml_ob1_fin_hdr_t));
if(NULL == fin) {
OMPI_THREAD_LOCK(&mca_pml_ob1.lock);
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
opal_list_append(&mca_pml_ob1.rdma_pending, (opal_list_item_t*)frag);
OMPI_THREAD_LOCK(&mca_pml_ob1.lock);
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
goto cleanup;
}
fin->des_flags |= MCA_BTL_DES_FLAGS_PRIORITY;
@ -559,9 +559,9 @@ static void mca_pml_ob1_put_completion(
if(OMPI_SUCCESS != rc) {
btl->btl_free(btl, fin);
if(rc == OMPI_ERR_OUT_OF_RESOURCE) {
OMPI_THREAD_LOCK(&mca_pml_ob1.lock);
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
opal_list_append(&mca_pml_ob1.rdma_pending, (opal_list_item_t*)frag);
OMPI_THREAD_LOCK(&mca_pml_ob1.lock);
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
} else {
/* TSW - FIX */
ORTE_ERROR_LOG(rc);
@ -646,9 +646,9 @@ void mca_pml_ob1_send_request_put(
0,
&size);
if(NULL == des) {
OMPI_THREAD_LOCK(&mca_pml_ob1.lock);
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
opal_list_append(&mca_pml_ob1.rdma_pending, (opal_list_item_t*)frag);
OMPI_THREAD_UNLOCK(&mca_pml_ob1.lock);
OPAL_THREAD_UNLOCK(&mca_pml_ob1.lock);
}
frag->rdma_state = MCA_PML_OB1_RDMA_PUT;
frag->rdma_length = size;
@ -667,9 +667,9 @@ void mca_pml_ob1_send_request_put(
if(OMPI_SUCCESS != (rc = btl->btl_put(btl, ep->btl_endpoint, des))) {
if(rc == OMPI_ERR_OUT_OF_RESOURCE) {
OMPI_THREAD_LOCK(&mca_pml_ob1.lock);
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
opal_list_append(&mca_pml_ob1.rdma_pending, (opal_list_item_t*)frag);
OMPI_THREAD_UNLOCK(&mca_pml_ob1.lock);
OPAL_THREAD_UNLOCK(&mca_pml_ob1.lock);
} else {
/* TSW - FIX */
ORTE_ERROR_LOG(rc);

Просмотреть файл

@ -181,7 +181,7 @@ OBJ_CLASS_DECLARATION(mca_pml_ob1_send_request_t);
sendreq->req_state = MCA_PML_OB1_SR_START; \
sendreq->req_send.req_base.req_ompi.req_complete = false; \
sendreq->req_send.req_base.req_ompi.req_state = OMPI_REQUEST_ACTIVE; \
sendreq->req_send.req_base.req_sequence = OMPI_THREAD_ADD32(&proc->proc_sequence,1); \
sendreq->req_send.req_base.req_sequence = OPAL_THREAD_ADD32(&proc->proc_sequence,1); \
sendreq->req_endpoint = endpoint; \
\
/* handle buffered send */ \
@ -210,7 +210,7 @@ OBJ_CLASS_DECLARATION(mca_pml_ob1_send_request_t);
(sendreq)->req_state = MCA_PML_OB1_SR_COMPLETE; \
MCA_PML_OB1_SEND_REQUEST_TSTAMPS_DUMP(sendreq); \
if(ompi_request_waiting) { \
ompi_condition_broadcast(&ompi_request_cond); \
opal_condition_broadcast(&ompi_request_cond); \
} \
} else if((sendreq)->req_send.req_base.req_free_called) { \
MCA_PML_OB1_FREE((ompi_request_t**)&sendreq); \

Просмотреть файл

@ -45,13 +45,13 @@ int mca_pml_ob1_start(size_t count, ompi_request_t** requests)
case OMPI_REQUEST_ACTIVE: {
ompi_request_t *request;
OMPI_THREAD_LOCK(&ompi_request_lock);
OPAL_THREAD_LOCK(&ompi_request_lock);
if (pml_request->req_pml_complete == false) {
/* free request after it completes */
pml_request->req_free_called = true;
} else {
/* can reuse the existing request */
OMPI_THREAD_UNLOCK(&ompi_request_lock);
OPAL_THREAD_UNLOCK(&ompi_request_lock);
break;
}
@ -85,7 +85,7 @@ int mca_pml_ob1_start(size_t count, ompi_request_t** requests)
rc = OMPI_ERR_REQUEST;
break;
}
OMPI_THREAD_UNLOCK(&ompi_request_lock);
OPAL_THREAD_UNLOCK(&ompi_request_lock);
if(OMPI_SUCCESS != rc)
return rc;
pml_request = (mca_pml_base_request_t*)request;

Просмотреть файл

@ -20,8 +20,8 @@
#ifndef MCA_PML_TEG_H
#define MCA_PML_TEG_H
#include "threads/thread.h"
#include "threads/condition.h"
#include "opal/threads/thread.h"
#include "opal/threads/condition.h"
#include "class/ompi_free_list.h"
#include "util/cmd_line.h"
#include "request/request.h"
@ -52,7 +52,7 @@ struct mca_pml_teg_t {
size_t teg_num_ptl_progress;
opal_list_t teg_procs;
ompi_mutex_t teg_lock;
opal_mutex_t teg_lock;
int teg_priority;

Просмотреть файл

@ -81,7 +81,7 @@ int mca_pml_teg_component_open(void)
return OMPI_ERROR;
}
#endif
OBJ_CONSTRUCT(&mca_pml_teg.teg_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&mca_pml_teg.teg_lock, opal_mutex_t);
OBJ_CONSTRUCT(&mca_pml_teg.teg_send_requests, ompi_free_list_t);
OBJ_CONSTRUCT(&mca_pml_teg.teg_recv_requests, ompi_free_list_t);
OBJ_CONSTRUCT(&mca_pml_teg.teg_procs, opal_list_t);

Просмотреть файл

@ -69,17 +69,17 @@ int mca_pml_teg_probe(int src,
if (recvreq.req_recv.req_base.req_ompi.req_complete == false) {
/* give up and sleep until completion */
if (ompi_using_threads()) {
ompi_mutex_lock(&ompi_request_lock);
if (opal_using_threads()) {
opal_mutex_lock(&ompi_request_lock);
ompi_request_waiting++;
while (recvreq.req_recv.req_base.req_ompi.req_complete == false)
ompi_condition_wait(&ompi_request_cond, &ompi_request_lock);
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
ompi_request_waiting--;
ompi_mutex_unlock(&ompi_request_lock);
opal_mutex_unlock(&ompi_request_lock);
} else {
ompi_request_waiting++;
while (recvreq.req_recv.req_base.req_ompi.req_complete == false)
ompi_condition_wait(&ompi_request_cond, &ompi_request_lock);
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
ompi_request_waiting--;
}
}

Просмотреть файл

@ -93,17 +93,17 @@ int mca_pml_teg_recv(void *addr,
if (recvreq->req_recv.req_base.req_ompi.req_complete == false) {
/* give up and sleep until completion */
if (ompi_using_threads()) {
ompi_mutex_lock(&ompi_request_lock);
if (opal_using_threads()) {
opal_mutex_lock(&ompi_request_lock);
ompi_request_waiting++;
while (recvreq->req_recv.req_base.req_ompi.req_complete == false)
ompi_condition_wait(&ompi_request_cond, &ompi_request_lock);
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
ompi_request_waiting--;
ompi_mutex_unlock(&ompi_request_lock);
opal_mutex_unlock(&ompi_request_lock);
} else {
ompi_request_waiting++;
while (recvreq->req_recv.req_base.req_ompi.req_complete == false)
ompi_condition_wait(&ompi_request_cond, &ompi_request_lock);
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
ompi_request_waiting--;
}
}

Просмотреть файл

@ -106,17 +106,17 @@ int mca_pml_teg_send(void *buf,
if (sendreq->req_send.req_base.req_ompi.req_complete == false) {
/* give up and sleep until completion */
if (ompi_using_threads()) {
ompi_mutex_lock(&ompi_request_lock);
if (opal_using_threads()) {
opal_mutex_lock(&ompi_request_lock);
ompi_request_waiting++;
while (sendreq->req_send.req_base.req_ompi.req_complete == false)
ompi_condition_wait(&ompi_request_cond, &ompi_request_lock);
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
ompi_request_waiting--;
ompi_mutex_unlock(&ompi_request_lock);
opal_mutex_unlock(&ompi_request_lock);
} else {
ompi_request_waiting++;
while (sendreq->req_send.req_base.req_ompi.req_complete == false)
ompi_condition_wait(&ompi_request_cond, &ompi_request_lock);
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
ompi_request_waiting--;
}
}

Просмотреть файл

@ -26,21 +26,21 @@ static void mca_pml_teg_proc_construct(mca_pml_proc_t* proc)
{
proc->proc_ompi = NULL;
proc->proc_ptl_flags = 0;
OBJ_CONSTRUCT(&proc->proc_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&proc->proc_lock, opal_mutex_t);
OBJ_CONSTRUCT(&proc->proc_ptl_first, mca_pml_teg_ptl_array_t);
OBJ_CONSTRUCT(&proc->proc_ptl_next, mca_pml_teg_ptl_array_t);
OMPI_THREAD_LOCK(&mca_pml_teg.teg_lock);
OPAL_THREAD_LOCK(&mca_pml_teg.teg_lock);
opal_list_append(&mca_pml_teg.teg_procs, (opal_list_item_t*)proc);
OMPI_THREAD_UNLOCK(&mca_pml_teg.teg_lock);
OPAL_THREAD_UNLOCK(&mca_pml_teg.teg_lock);
}
static void mca_pml_teg_proc_destruct(mca_pml_proc_t* proc)
{
OMPI_THREAD_LOCK(&mca_pml_teg.teg_lock);
OPAL_THREAD_LOCK(&mca_pml_teg.teg_lock);
opal_list_remove_item(&mca_pml_teg.teg_procs, (opal_list_item_t*)proc);
OMPI_THREAD_UNLOCK(&mca_pml_teg.teg_lock);
OPAL_THREAD_UNLOCK(&mca_pml_teg.teg_lock);
OBJ_DESTRUCT(&proc->proc_lock);
OBJ_DESTRUCT(&proc->proc_ptl_first);

Просмотреть файл

@ -19,7 +19,7 @@
#ifndef MCA_PML_PROC_H
#define MCA_PML_PROC_H
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#include "communicator/communicator.h"
#include "group/group.h"
#include "proc/proc.h"
@ -35,7 +35,7 @@ extern "C" {
struct mca_pml_proc_t {
opal_list_item_t super;
ompi_proc_t *proc_ompi; /**< back-pointer to ompi_proc_t */
ompi_mutex_t proc_lock; /**< lock to protect against concurrent access */
opal_mutex_t proc_lock; /**< lock to protect against concurrent access */
mca_ptl_array_t proc_ptl_first; /**< array of ptls to use for first fragments */
mca_ptl_array_t proc_ptl_next; /**< array of ptls to use for remaining fragments */
uint32_t proc_ptl_flags; /**< aggregate ptl flags */

Просмотреть файл

@ -22,7 +22,7 @@
static void mca_pml_base_ptl_construct(mca_pml_base_ptl_t* ptl)
{
OBJ_CONSTRUCT(&ptl->ptl_cache, opal_list_t);
OBJ_CONSTRUCT(&ptl->ptl_cache_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&ptl->ptl_cache_lock, opal_mutex_t);
ptl->ptl = NULL;
ptl->ptl_cache_size = 0;
ptl->ptl_cache_alloc = 0;

Просмотреть файл

@ -19,7 +19,7 @@
#include "mca/pml/pml.h"
#include "mca/ptl/ptl.h"
#include "threads/condition.h"
#include "opal/threads/condition.h"
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
@ -29,7 +29,7 @@ struct mca_pml_base_ptl_t {
opal_list_t ptl_cache; /**< cache of send requests */
size_t ptl_cache_size; /**< maximum size of cache */
size_t ptl_cache_alloc; /**< current number of allocated items */
ompi_mutex_t ptl_cache_lock; /**< lock for queue access */
opal_mutex_t ptl_cache_lock; /**< lock for queue access */
struct mca_ptl_base_module_t* ptl; /**< back pointer to ptl */
};
typedef struct mca_pml_base_ptl_t mca_pml_base_ptl_t;

Просмотреть файл

@ -49,7 +49,7 @@ static int mca_pml_teg_recv_request_cancel(struct ompi_request_t* request, int c
}
/* The rest should be protected behind the match logic lock */
OMPI_THREAD_LOCK(&pml_comm->c_matching_lock);
OPAL_THREAD_LOCK(&pml_comm->c_matching_lock);
if( OMPI_ANY_TAG == request->req_status.MPI_TAG ) { /* the match have not been already done */
@ -62,7 +62,7 @@ static int mca_pml_teg_recv_request_cancel(struct ompi_request_t* request, int c
}
}
OMPI_THREAD_UNLOCK(&pml_comm->c_matching_lock);
OPAL_THREAD_UNLOCK(&pml_comm->c_matching_lock);
request->req_status._cancelled = true;
request->req_complete = true; /* mark it as completed so all the test/wait functions
@ -72,7 +72,7 @@ static int mca_pml_teg_recv_request_cancel(struct ompi_request_t* request, int c
* to complete their test/wait functions.
*/
if(ompi_request_waiting) {
ompi_condition_broadcast(&ompi_request_cond);
opal_condition_broadcast(&ompi_request_cond);
}
return OMPI_SUCCESS;
}
@ -107,7 +107,7 @@ void mca_pml_teg_recv_request_progress(
size_t bytes_received,
size_t bytes_delivered)
{
OMPI_THREAD_LOCK(&ompi_request_lock);
OPAL_THREAD_LOCK(&ompi_request_lock);
req->req_bytes_received += bytes_received;
req->req_bytes_delivered += bytes_delivered;
if (req->req_bytes_received >= req->req_recv.req_bytes_packed) {
@ -116,10 +116,10 @@ void mca_pml_teg_recv_request_progress(
req->req_recv.req_base.req_pml_complete = true;
req->req_recv.req_base.req_ompi.req_complete = true;
if(ompi_request_waiting) {
ompi_condition_broadcast(&ompi_request_cond);
opal_condition_broadcast(&ompi_request_cond);
}
}
OMPI_THREAD_UNLOCK(&ompi_request_lock);
OPAL_THREAD_UNLOCK(&ompi_request_lock);
}
@ -137,7 +137,7 @@ void mca_pml_teg_recv_request_match_specific(mca_ptl_base_recv_request_t* reques
mca_ptl_base_recv_frag_t* frag;
/* check for a specific match */
OMPI_THREAD_LOCK(&pml_comm->c_matching_lock);
OPAL_THREAD_LOCK(&pml_comm->c_matching_lock);
/* assign sequence number */
request->req_recv.req_base.req_sequence = pml_comm->c_recv_seq++;
@ -148,7 +148,7 @@ void mca_pml_teg_recv_request_match_specific(mca_ptl_base_recv_request_t* reques
/* setup pointer to ptls peer */
if(NULL == frag->frag_base.frag_peer)
frag->frag_base.frag_peer = mca_pml_teg_proc_lookup_remote_peer(comm,req_peer,ptl);
OMPI_THREAD_UNLOCK(&pml_comm->c_matching_lock);
OPAL_THREAD_UNLOCK(&pml_comm->c_matching_lock);
if( !((MCA_PML_REQUEST_IPROBE == request->req_recv.req_base.req_type) ||
(MCA_PML_REQUEST_PROBE == request->req_recv.req_base.req_type)) ) {
MCA_PML_TEG_RECV_MATCHED( ptl, frag );
@ -162,7 +162,7 @@ void mca_pml_teg_recv_request_match_specific(mca_ptl_base_recv_request_t* reques
if(request->req_recv.req_base.req_type != MCA_PML_REQUEST_IPROBE) {
opal_list_append(pml_comm->c_specific_receives+req_peer, (opal_list_item_t*)request);
}
OMPI_THREAD_UNLOCK(&pml_comm->c_matching_lock);
OPAL_THREAD_UNLOCK(&pml_comm->c_matching_lock);
}
@ -184,7 +184,7 @@ void mca_pml_teg_recv_request_match_wild(mca_ptl_base_recv_request_t* request)
* process, then an inner loop over the messages from the
* process.
*/
OMPI_THREAD_LOCK(&pml_comm->c_matching_lock);
OPAL_THREAD_LOCK(&pml_comm->c_matching_lock);
/* assign sequence number */
request->req_recv.req_base.req_sequence = pml_comm->c_recv_seq++;
@ -202,7 +202,7 @@ void mca_pml_teg_recv_request_match_wild(mca_ptl_base_recv_request_t* request)
/* if required - setup pointer to ptls peer */
if(NULL == frag->frag_base.frag_peer)
frag->frag_base.frag_peer = mca_pml_teg_proc_lookup_remote_peer(comm,proc,ptl);
OMPI_THREAD_UNLOCK(&pml_comm->c_matching_lock);
OPAL_THREAD_UNLOCK(&pml_comm->c_matching_lock);
if( !((MCA_PML_REQUEST_IPROBE == request->req_recv.req_base.req_type) ||
(MCA_PML_REQUEST_PROBE == request->req_recv.req_base.req_type)) ) {
MCA_PML_TEG_RECV_MATCHED( ptl, frag );
@ -217,7 +217,7 @@ void mca_pml_teg_recv_request_match_wild(mca_ptl_base_recv_request_t* request)
if(request->req_recv.req_base.req_type != MCA_PML_REQUEST_IPROBE)
opal_list_append(&pml_comm->c_wild_receives, (opal_list_item_t*)request);
OMPI_THREAD_UNLOCK(&pml_comm->c_matching_lock);
OPAL_THREAD_UNLOCK(&pml_comm->c_matching_lock);
}

Просмотреть файл

@ -92,7 +92,7 @@ int mca_pml_teg_send_request_schedule(mca_ptl_base_send_request_t* req)
* of the number of times the routine has been called and run through
* the scheduling logic once for every call.
*/
if(OMPI_THREAD_ADD32(&req->req_lock,1) == 1) {
if(OPAL_THREAD_ADD32(&req->req_lock,1) == 1) {
proc = ompi_comm_peer_lookup(req->req_send.req_base.req_comm, req->req_send.req_base.req_peer);
proc_pml = proc->proc_pml;
do {
@ -135,15 +135,15 @@ int mca_pml_teg_send_request_schedule(mca_ptl_base_send_request_t* req)
/* unable to complete send - queue for later */
if(send_count == 0) {
OMPI_THREAD_LOCK(&mca_pml_teg.teg_lock);
OPAL_THREAD_LOCK(&mca_pml_teg.teg_lock);
opal_list_append(&mca_pml_teg.teg_send_pending, (opal_list_item_t*)req);
OMPI_THREAD_UNLOCK(&mca_pml_teg.teg_lock);
OPAL_THREAD_UNLOCK(&mca_pml_teg.teg_lock);
req->req_lock = 0;
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* fragments completed while scheduling - so retry */
} while(OMPI_THREAD_ADD32(&req->req_lock,-1) > 0);
} while(OPAL_THREAD_ADD32(&req->req_lock,-1) > 0);
/* free the request if completed while in the scheduler */
if (req->req_send.req_base.req_free_called && req->req_send.req_base.req_pml_complete) {
@ -171,7 +171,7 @@ void mca_pml_teg_send_request_progress(
{
bool schedule = false;
OMPI_THREAD_LOCK(&ompi_request_lock);
OPAL_THREAD_LOCK(&ompi_request_lock);
req->req_bytes_sent += bytes_sent;
if (req->req_bytes_sent >= req->req_send.req_bytes_packed) {
req->req_send.req_base.req_pml_complete = true;
@ -182,7 +182,7 @@ void mca_pml_teg_send_request_progress(
req->req_send.req_base.req_ompi.req_status._count = req->req_bytes_sent;
req->req_send.req_base.req_ompi.req_complete = true;
if(ompi_request_waiting) {
ompi_condition_broadcast(&ompi_request_cond);
opal_condition_broadcast(&ompi_request_cond);
}
} else if(req->req_send.req_base.req_free_called) {
/* don't free the request if in the scheduler */
@ -196,7 +196,7 @@ void mca_pml_teg_send_request_progress(
} else if (req->req_offset < req->req_send.req_bytes_packed) {
schedule = true;
}
OMPI_THREAD_UNLOCK(&ompi_request_lock);
OPAL_THREAD_UNLOCK(&ompi_request_lock);
/* schedule remaining fragments of this request */
if(schedule) {
@ -205,9 +205,9 @@ void mca_pml_teg_send_request_progress(
/* check for pending requests that need to be progressed */
while(opal_list_get_size(&mca_pml_teg.teg_send_pending) != 0) {
OMPI_THREAD_LOCK(&mca_pml_teg.teg_lock);
OPAL_THREAD_LOCK(&mca_pml_teg.teg_lock);
req = (mca_ptl_base_send_request_t*)opal_list_remove_first(&mca_pml_teg.teg_send_pending);
OMPI_THREAD_UNLOCK(&mca_pml_teg.teg_lock);
OPAL_THREAD_UNLOCK(&mca_pml_teg.teg_lock);
if(req == NULL)
break;
if(mca_pml_teg_send_request_schedule(req) != OMPI_SUCCESS)

Просмотреть файл

@ -48,7 +48,7 @@ OBJ_CLASS_DECLARATION(mca_pml_teg_send_request_t);
if(NULL == proc) { \
return OMPI_ERR_OUT_OF_RESOURCE; \
} \
OMPI_THREAD_SCOPED_LOCK(&proc->proc_lock, \
OPAL_THREAD_SCOPED_LOCK(&proc->proc_lock, \
(ptl_proc = mca_ptl_array_get_next(&proc->proc_ptl_first))); \
ptl_base = ptl_proc->ptl_base; \
/* \
@ -56,11 +56,11 @@ OBJ_CLASS_DECLARATION(mca_pml_teg_send_request_t);
* this ptl - if so try the allocation from there. \
*/ \
if(NULL != ptl_base) { \
OMPI_THREAD_LOCK(&ptl_base->ptl_cache_lock); \
OPAL_THREAD_LOCK(&ptl_base->ptl_cache_lock); \
sendreq = (mca_pml_teg_send_request_t*) \
opal_list_remove_first(&ptl_base->ptl_cache); \
if(NULL != sendreq) { \
OMPI_THREAD_UNLOCK(&ptl_base->ptl_cache_lock); \
OPAL_THREAD_UNLOCK(&ptl_base->ptl_cache_lock); \
rc = OMPI_SUCCESS; \
} else if (ptl_base->ptl_cache_alloc < ptl_base->ptl_cache_size) { \
/* \
@ -75,13 +75,13 @@ OBJ_CLASS_DECLARATION(mca_pml_teg_send_request_t);
sendreq->req_cached = true; \
ptl_base->ptl_cache_alloc++; \
} \
OMPI_THREAD_UNLOCK(&ptl_base->ptl_cache_lock); \
OPAL_THREAD_UNLOCK(&ptl_base->ptl_cache_lock); \
} else { \
/* \
* take a request from the global pool \
*/ \
opal_list_item_t* item; \
OMPI_THREAD_UNLOCK(&ptl_base->ptl_cache_lock); \
OPAL_THREAD_UNLOCK(&ptl_base->ptl_cache_lock); \
OMPI_FREE_LIST_WAIT(&mca_pml_teg.teg_send_requests, item, rc); \
sendreq = (mca_pml_teg_send_request_t*)item; \
sendreq->req_ptl = ptl_proc->ptl; \
@ -135,10 +135,10 @@ OBJ_CLASS_DECLARATION(mca_pml_teg_send_request_t);
* to return the send descriptor to the cache. \
*/ \
if(NULL != ptl->ptl_base && (sendreq)->req_cached) { \
OMPI_THREAD_LOCK(&ptl_base->ptl_cache_lock); \
OPAL_THREAD_LOCK(&ptl_base->ptl_cache_lock); \
opal_list_prepend(&ptl_base->ptl_cache, \
(opal_list_item_t*)sendreq); \
OMPI_THREAD_UNLOCK(&ptl_base->ptl_cache_lock); \
OPAL_THREAD_UNLOCK(&ptl_base->ptl_cache_lock); \
} else { \
OMPI_FREE_LIST_RETURN( \
&mca_pml_teg.teg_send_requests, (opal_list_item_t*)sendreq); \

Просмотреть файл

@ -45,13 +45,13 @@ int mca_pml_teg_start(size_t count, ompi_request_t** requests)
case OMPI_REQUEST_ACTIVE: {
ompi_request_t *request;
OMPI_THREAD_LOCK(&ompi_request_lock);
OPAL_THREAD_LOCK(&ompi_request_lock);
if (pml_request->req_pml_complete == false) {
/* free request after it completes */
pml_request->req_free_called = true;
} else {
/* can reuse the existing request */
OMPI_THREAD_UNLOCK(&ompi_request_lock);
OPAL_THREAD_UNLOCK(&ompi_request_lock);
break;
}
@ -85,7 +85,7 @@ int mca_pml_teg_start(size_t count, ompi_request_t** requests)
rc = OMPI_ERR_REQUEST;
break;
}
OMPI_THREAD_UNLOCK(&ompi_request_lock);
OPAL_THREAD_UNLOCK(&ompi_request_lock);
if(OMPI_SUCCESS != rc)
return rc;
pml_request = (mca_pml_base_request_t*)request;

Просмотреть файл

@ -20,8 +20,8 @@
#ifndef MCA_PML_UNIQ_H
#define MCA_PML_UNIQ_H
#include "threads/thread.h"
#include "threads/condition.h"
#include "opal/threads/thread.h"
#include "opal/threads/condition.h"
#include "class/ompi_free_list.h"
#include "util/cmd_line.h"
#include "request/request.h"
@ -52,7 +52,7 @@ struct mca_pml_uniq_t {
size_t uniq_num_ptl_progress;
opal_list_t uniq_procs;
ompi_mutex_t uniq_lock;
opal_mutex_t uniq_lock;
int uniq_free_list_num; /* initial size of free list */
int uniq_free_list_max; /* maximum size of free list */

Просмотреть файл

@ -81,7 +81,7 @@ int mca_pml_uniq_component_open(void)
return OMPI_ERROR;
}
#endif
OBJ_CONSTRUCT(&mca_pml_uniq.uniq_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&mca_pml_uniq.uniq_lock, opal_mutex_t);
OBJ_CONSTRUCT(&mca_pml_uniq.uniq_send_requests, ompi_free_list_t);
OBJ_CONSTRUCT(&mca_pml_uniq.uniq_recv_requests, ompi_free_list_t);
OBJ_CONSTRUCT(&mca_pml_uniq.uniq_procs, opal_list_t);

Просмотреть файл

@ -69,17 +69,17 @@ int mca_pml_uniq_probe(int src,
if (recvreq.req_recv.req_base.req_ompi.req_complete == false) {
/* give up and sleep until completion */
if (ompi_using_threads()) {
ompi_mutex_lock(&ompi_request_lock);
if (opal_using_threads()) {
opal_mutex_lock(&ompi_request_lock);
ompi_request_waiting++;
while (recvreq.req_recv.req_base.req_ompi.req_complete == false)
ompi_condition_wait(&ompi_request_cond, &ompi_request_lock);
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
ompi_request_waiting--;
ompi_mutex_unlock(&ompi_request_lock);
opal_mutex_unlock(&ompi_request_lock);
} else {
ompi_request_waiting++;
while (recvreq.req_recv.req_base.req_ompi.req_complete == false)
ompi_condition_wait(&ompi_request_cond, &ompi_request_lock);
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
ompi_request_waiting--;
}
}

Просмотреть файл

@ -93,17 +93,17 @@ int mca_pml_uniq_recv(void *addr,
if (recvreq->req_recv.req_base.req_ompi.req_complete == false) {
/* give up and sleep until completion */
if (ompi_using_threads()) {
ompi_mutex_lock(&ompi_request_lock);
if (opal_using_threads()) {
opal_mutex_lock(&ompi_request_lock);
ompi_request_waiting++;
while (recvreq->req_recv.req_base.req_ompi.req_complete == false)
ompi_condition_wait(&ompi_request_cond, &ompi_request_lock);
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
ompi_request_waiting--;
ompi_mutex_unlock(&ompi_request_lock);
opal_mutex_unlock(&ompi_request_lock);
} else {
ompi_request_waiting++;
while (recvreq->req_recv.req_base.req_ompi.req_complete == false)
ompi_condition_wait(&ompi_request_cond, &ompi_request_lock);
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
ompi_request_waiting--;
}
}

Просмотреть файл

@ -106,17 +106,17 @@ int mca_pml_uniq_send(void *buf,
if (sendreq->req_send.req_base.req_ompi.req_complete == false) {
/* give up and sleep until completion */
if (ompi_using_threads()) {
ompi_mutex_lock(&ompi_request_lock);
if (opal_using_threads()) {
opal_mutex_lock(&ompi_request_lock);
ompi_request_waiting++;
while (sendreq->req_send.req_base.req_ompi.req_complete == false)
ompi_condition_wait(&ompi_request_cond, &ompi_request_lock);
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
ompi_request_waiting--;
ompi_mutex_unlock(&ompi_request_lock);
opal_mutex_unlock(&ompi_request_lock);
} else {
ompi_request_waiting++;
while (sendreq->req_send.req_base.req_ompi.req_complete == false)
ompi_condition_wait(&ompi_request_cond, &ompi_request_lock);
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
ompi_request_waiting--;
}
}

Просмотреть файл

@ -25,7 +25,7 @@ static void mca_pml_uniq_proc_construct(mca_pml_proc_t* proc)
{
proc->proc_ompi = NULL;
proc->proc_ptl_flags = 0;
OBJ_CONSTRUCT(&proc->proc_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&proc->proc_lock, opal_mutex_t);
proc->proc_ptl_first.ptl_peer = NULL;
proc->proc_ptl_first.ptl_base = NULL;
@ -35,17 +35,17 @@ static void mca_pml_uniq_proc_construct(mca_pml_proc_t* proc)
proc->proc_ptl_next.ptl_base = NULL;
proc->proc_ptl_next.ptl = NULL;
#endif /* PML_UNIQ_ACCEPT_NEXT_PTL */
OMPI_THREAD_LOCK(&mca_pml_uniq.uniq_lock);
OPAL_THREAD_LOCK(&mca_pml_uniq.uniq_lock);
opal_list_append(&mca_pml_uniq.uniq_procs, (opal_list_item_t*)proc);
OMPI_THREAD_UNLOCK(&mca_pml_uniq.uniq_lock);
OPAL_THREAD_UNLOCK(&mca_pml_uniq.uniq_lock);
}
static void mca_pml_uniq_proc_destruct(mca_pml_proc_t* proc)
{
OMPI_THREAD_LOCK(&mca_pml_uniq.uniq_lock);
OPAL_THREAD_LOCK(&mca_pml_uniq.uniq_lock);
opal_list_remove_item(&mca_pml_uniq.uniq_procs, (opal_list_item_t*)proc);
OMPI_THREAD_UNLOCK(&mca_pml_uniq.uniq_lock);
OPAL_THREAD_UNLOCK(&mca_pml_uniq.uniq_lock);
OBJ_DESTRUCT(&proc->proc_lock);
}

Просмотреть файл

@ -19,7 +19,7 @@
#ifndef MCA_PML_PROC_H
#define MCA_PML_PROC_H
#include "threads/mutex.h"
#include "opal/threads/mutex.h"
#include "communicator/communicator.h"
#include "group/group.h"
#include "proc/proc.h"
@ -52,7 +52,7 @@ extern "C" {
struct mca_pml_proc_t {
opal_list_item_t super;
ompi_proc_t *proc_ompi; /**< back-pointer to ompi_proc_t */
ompi_mutex_t proc_lock; /**< lock to protect against concurrent access */
opal_mutex_t proc_lock; /**< lock to protect against concurrent access */
mca_ptl_proc_t proc_ptl_first; /**< ptl for the first fragment */
#if PML_UNIQ_ACCEPT_NEXT_PTL
mca_ptl_proc_t proc_ptl_next; /**< ptl for the remaining fragments */

Просмотреть файл

@ -22,7 +22,7 @@
static void mca_pml_base_ptl_construct(mca_pml_base_ptl_t* ptl)
{
OBJ_CONSTRUCT(&ptl->ptl_cache, opal_list_t);
OBJ_CONSTRUCT(&ptl->ptl_cache_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&ptl->ptl_cache_lock, opal_mutex_t);
ptl->ptl = NULL;
ptl->ptl_cache_size = 0;
ptl->ptl_cache_alloc = 0;

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше