changed THREAD_LOCK/THREAD_UNLOCK to OMPI_THREAD_LOCK/OMPI_THREAD_UNLOCK
to prevent conflicts w/ external libraries/headers This commit was SVN r1467.
Этот коммит содержится в:
родитель
7616c4dc01
Коммит
9b830472fd
@ -155,7 +155,7 @@ main(int argc, char **argv)
|
||||
|
||||
if (pipe(fd) == -1)
|
||||
exit(1);
|
||||
if (fcntl(fd[[1]], F_SETFL, O_NONBLOCK) == -1)
|
||||
if (fcntl(fd[[1]], F_SETFL, O_NONBOMPI_LOCK) == -1)
|
||||
exit(1);
|
||||
|
||||
while ((n = write(fd[[1]], buf, sizeof(buf))) == sizeof(buf))
|
||||
|
@ -115,7 +115,7 @@ extern "C" {
|
||||
#define MPI_MAX_NAME_LEN MPI_MAX_PORT_NAME /* max port name length */
|
||||
#define MPI_ORDER_C 0 /* C row major order */
|
||||
#define MPI_ORDER_FORTRAN 1 /* Fortran column major order */
|
||||
#define MPI_DISTRIBUTE_BLOCK 0 /* block distribution */
|
||||
#define MPI_DISTRIBUTE_BOMPI_LOCK 0 /* block distribution */
|
||||
#define MPI_DISTRIBUTE_CYCLIC 1 /* cyclic distribution */
|
||||
#define MPI_DISTRIBUTE_NONE 2 /* not distributed */
|
||||
#define MPI_DISTRIBUTE_DFLT_DARG (-1) /* default distribution arg */
|
||||
@ -201,7 +201,7 @@ enum {
|
||||
#define MPI_ERR_INFO 34
|
||||
#define MPI_ERR_IO 35
|
||||
#define MPI_ERR_KEYVAL 36
|
||||
#define MPI_ERR_LOCKTYPE 37
|
||||
#define MPI_ERR_OMPI_LOCKTYPE 37
|
||||
#define MPI_ERR_NAME 38
|
||||
#define MPI_ERR_NO_MEM 39
|
||||
#define MPI_ERR_NOT_SAME 40
|
||||
@ -257,7 +257,7 @@ enum {
|
||||
MPI_COMBINER_INDEXED,
|
||||
MPI_COMBINER_HINDEXED_INTEGER,
|
||||
MPI_COMBINER_HINDEXED,
|
||||
MPI_COMBINER_INDEXED_BLOCK,
|
||||
MPI_COMBINER_INDEXED_BOMPI_LOCK,
|
||||
MPI_COMBINER_STRUCT_INTEGER,
|
||||
MPI_COMBINER_STRUCT,
|
||||
MPI_COMBINER_SUBARRAY,
|
||||
|
@ -71,7 +71,7 @@
|
||||
integer MPI_MAX_INFO_KEY, MPI_MAX_INFO_VAL
|
||||
integer MPI_MAX_PORT_NAME, MPI_MAX_OBJECT_NAME
|
||||
integer MPI_ORDER_C, MPI_ORDER_FORTRAN
|
||||
integer MPI_DISTRIBUTE_BLOCK, MPI_DISTRIBUTE_CYCLIC
|
||||
integer MPI_DISTRIBUTE_BOMPI_LOCK, MPI_DISTRIBUTE_CYCLIC
|
||||
integer MPI_DISTRIBUTE_NONE, MPI_DISTRIBUTE_DFLT_DARG
|
||||
|
||||
parameter (MPI_ANY_SOURCE=-1)
|
||||
@ -103,7 +103,7 @@
|
||||
parameter (MPI_MAX_OBJECT_NAME=63)
|
||||
parameter (MPI_ORDER_C=0)
|
||||
parameter (MPI_ORDER_FORTRAN=1)
|
||||
parameter (MPI_DISTRIBUTE_BLOCK=0)
|
||||
parameter (MPI_DISTRIBUTE_BOMPI_LOCK=0)
|
||||
parameter (MPI_DISTRIBUTE_CYCLIC=1)
|
||||
parameter (MPI_DISTRIBUTE_NONE=2)
|
||||
parameter (MPI_DISTRIBUTE_DFLT_DARG=-1)
|
||||
@ -183,7 +183,7 @@
|
||||
integer MPI_ERR_INFO
|
||||
integer MPI_ERR_IO
|
||||
integer MPI_ERR_KEYVAL
|
||||
integer MPI_ERR_LOCKTYPE
|
||||
integer MPI_ERR_OMPI_LOCKTYPE
|
||||
integer MPI_ERR_NAME
|
||||
integer MPI_ERR_NO_MEM
|
||||
integer MPI_ERR_NOT_SAME
|
||||
@ -241,7 +241,7 @@
|
||||
parameter( MPI_ERR_INFO = 34)
|
||||
parameter( MPI_ERR_IO = 35)
|
||||
parameter( MPI_ERR_KEYVAL = 36)
|
||||
parameter( MPI_ERR_LOCKTYPE = 37)
|
||||
parameter( MPI_ERR_OMPI_LOCKTYPE = 37)
|
||||
parameter( MPI_ERR_NAME = 38)
|
||||
parameter( MPI_ERR_NO_MEM = 39)
|
||||
parameter( MPI_ERR_NOT_SAME = 40)
|
||||
|
@ -62,7 +62,7 @@ int ompi_free_list_grow(ompi_free_list_t* flist, size_t num_elements);
|
||||
|
||||
|
||||
#define OMPI_FREE_LIST_RETURN(fl, item) \
|
||||
THREAD_SCOPED_LOCK(&((fl)->fl_lock), ompi_list_append(&((fl)->super), (item)));
|
||||
OMPI_THREAD_SCOPED_LOCK(&((fl)->fl_lock), ompi_list_append(&((fl)->super), (item)));
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -75,7 +75,7 @@ size_t ompi_pointer_array_add(ompi_pointer_array_t *table, void *ptr)
|
||||
|
||||
assert(table != NULL);
|
||||
|
||||
THREAD_LOCK(&(table->lock));
|
||||
OMPI_THREAD_LOCK(&(table->lock));
|
||||
|
||||
if (table->addr == NULL) {
|
||||
|
||||
@ -89,7 +89,7 @@ size_t ompi_pointer_array_add(ompi_pointer_array_t *table, void *ptr)
|
||||
|
||||
p = malloc(TABLE_INIT * sizeof(void *));
|
||||
if (p == NULL) {
|
||||
THREAD_UNLOCK(&(table->lock));
|
||||
OMPI_THREAD_UNLOCK(&(table->lock));
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
table->lowest_free = 0;
|
||||
@ -113,7 +113,7 @@ size_t ompi_pointer_array_add(ompi_pointer_array_t *table, void *ptr)
|
||||
|
||||
p = realloc(table->addr, TABLE_GROW * table->size * sizeof(void *));
|
||||
if (p == NULL) {
|
||||
THREAD_UNLOCK(&(table->lock));
|
||||
OMPI_THREAD_UNLOCK(&(table->lock));
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
table->lowest_free = table->size;
|
||||
@ -160,7 +160,7 @@ size_t ompi_pointer_array_add(ompi_pointer_array_t *table, void *ptr)
|
||||
index, ptr);
|
||||
}
|
||||
|
||||
THREAD_UNLOCK(&(table->lock));
|
||||
OMPI_THREAD_UNLOCK(&(table->lock));
|
||||
|
||||
return index;
|
||||
}
|
||||
@ -190,12 +190,12 @@ int ompi_pointer_array_set_item(ompi_pointer_array_t *table, size_t index,
|
||||
#endif
|
||||
|
||||
/* expand table if required to set a specific index */
|
||||
THREAD_LOCK(&(table->lock));
|
||||
OMPI_THREAD_LOCK(&(table->lock));
|
||||
if(table->size <= index) {
|
||||
size_t i, new_size = (((index / TABLE_GROW) + 1) * TABLE_GROW);
|
||||
void *p = realloc(table->addr, new_size * sizeof(void *));
|
||||
if (p == NULL) {
|
||||
THREAD_UNLOCK(&(table->lock));
|
||||
OMPI_THREAD_UNLOCK(&(table->lock));
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
table->number_free += new_size - table->size;
|
||||
@ -239,7 +239,7 @@ int ompi_pointer_array_set_item(ompi_pointer_array_t *table, size_t index,
|
||||
index, table->addr[index]);
|
||||
#endif
|
||||
|
||||
THREAD_UNLOCK(&(table->lock));
|
||||
OMPI_THREAD_UNLOCK(&(table->lock));
|
||||
return OMPI_SUCCESS;
|
||||
}
|
||||
|
||||
@ -274,11 +274,11 @@ int ompi_pointer_array_test_and_set_item (ompi_pointer_array_t *table, size_t in
|
||||
#endif
|
||||
|
||||
/* expand table if required to set a specific index */
|
||||
THREAD_LOCK(&(table->lock));
|
||||
OMPI_THREAD_LOCK(&(table->lock));
|
||||
if ( index < table->size && table->addr[index] != NULL ) {
|
||||
/* This element is already in use */
|
||||
flag = false;
|
||||
THREAD_UNLOCK(&(table->lock));
|
||||
OMPI_THREAD_UNLOCK(&(table->lock));
|
||||
return flag;
|
||||
}
|
||||
|
||||
@ -286,7 +286,7 @@ int ompi_pointer_array_test_and_set_item (ompi_pointer_array_t *table, size_t in
|
||||
size_t i, new_size = (((index / TABLE_GROW) + 1) * TABLE_GROW);
|
||||
void *p = realloc(table->addr, new_size * sizeof(void *));
|
||||
if (p == NULL) {
|
||||
THREAD_UNLOCK(&(table->lock));
|
||||
OMPI_THREAD_UNLOCK(&(table->lock));
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
table->number_free += new_size - table->size;
|
||||
@ -322,6 +322,6 @@ int ompi_pointer_array_test_and_set_item (ompi_pointer_array_t *table, size_t in
|
||||
index, table->addr[index]);
|
||||
#endif
|
||||
|
||||
THREAD_UNLOCK(&(table->lock));
|
||||
OMPI_THREAD_UNLOCK(&(table->lock));
|
||||
return flag;
|
||||
}
|
||||
|
@ -68,9 +68,9 @@ int ompi_pointer_array_set_item(ompi_pointer_array_t *array,
|
||||
static inline void *ompi_pointer_array_get_item(ompi_pointer_array_t *table, size_t index)
|
||||
{
|
||||
void *p;
|
||||
THREAD_LOCK(&(table->lock));
|
||||
OMPI_THREAD_LOCK(&(table->lock));
|
||||
p = table->addr[index];
|
||||
THREAD_UNLOCK(&(table->lock));
|
||||
OMPI_THREAD_UNLOCK(&(table->lock));
|
||||
return p;
|
||||
}
|
||||
|
||||
|
@ -89,7 +89,7 @@ int ompi_ddt_set_args( ompi_datatype_t* pData,
|
||||
memcpy( pArgs->i + 1, i, i[0][0] * sizeof(int) );
|
||||
break;
|
||||
/******************************************************************/
|
||||
case MPI_COMBINER_INDEXED_BLOCK:
|
||||
case MPI_COMBINER_INDEXED_BOMPI_LOCK:
|
||||
pArgs->i[0] = i[0][0];
|
||||
pArgs->i[1] = i[1][0];
|
||||
memcpy( pArgs->i + 2, i[2], i[0][0] * sizeof(int) );
|
||||
|
@ -181,7 +181,7 @@ static ompi_convertor_t* pDumpConv = NULL;
|
||||
strncpy( (PDATA)->name, MPIDDTNAME, MPI_MAX_OBJECT_NAME ); \
|
||||
} while(0)
|
||||
|
||||
#define DECLARE_MPI2_COMPOSED_BLOCK_DDT( PDATA, MPIDDT, MPIDDTNAME, MPIType ) \
|
||||
#define DECLARE_MPI2_COMPOSED_BOMPI_LOCK_DDT( PDATA, MPIDDT, MPIDDTNAME, MPIType ) \
|
||||
do { \
|
||||
ompi_datatype_t *ptype; \
|
||||
ompi_ddt_create_contiguous( 2, &(basicDatatypes[MPIType]), &ptype ); \
|
||||
@ -232,12 +232,12 @@ int ompi_ddt_init( void )
|
||||
DECLARE_MPI2_COMPOSED_STRUCT_DDT( ompi_mpi_short_int, DT_SHORT_INT, "MPI_SHORT_INT", short, int, DT_SHORT, DT_INT );
|
||||
DECLARE_MPI2_COMPOSED_STRUCT_DDT( ompi_mpi_longdbl_int, DT_LONG_DOUBLE_INT, "MPI_LONG_DOUBLE_INT", long double, int, DT_LONG_DOUBLE, DT_INT );
|
||||
|
||||
DECLARE_MPI2_COMPOSED_BLOCK_DDT( ompi_mpi_2int, DT_2INT, "MPI_2INT", DT_INT );
|
||||
DECLARE_MPI2_COMPOSED_BLOCK_DDT( ompi_mpi_2integer, DT_2INTEGER, "MPI_2INTEGER", DT_INT );
|
||||
DECLARE_MPI2_COMPOSED_BLOCK_DDT( ompi_mpi_2real, DT_2REAL, "MPI_2REAL", DT_FLOAT );
|
||||
DECLARE_MPI2_COMPOSED_BLOCK_DDT( ompi_mpi_2dblprec, DT_2DBLPREC, "MPI_2DOUBLE_PRECISION", DT_DOUBLE );
|
||||
DECLARE_MPI2_COMPOSED_BLOCK_DDT( ompi_mpi_2cplex, DT_2COMPLEX, "MPI_2COMPLEX", DT_COMPLEX_FLOAT );
|
||||
DECLARE_MPI2_COMPOSED_BLOCK_DDT( ompi_mpi_2dblcplex, DT_2DOUBLE_COMPLEX, "MPI_2DOUBLE_COMPLEX", DT_COMPLEX_DOUBLE );
|
||||
DECLARE_MPI2_COMPOSED_BOMPI_LOCK_DDT( ompi_mpi_2int, DT_2INT, "MPI_2INT", DT_INT );
|
||||
DECLARE_MPI2_COMPOSED_BOMPI_LOCK_DDT( ompi_mpi_2integer, DT_2INTEGER, "MPI_2INTEGER", DT_INT );
|
||||
DECLARE_MPI2_COMPOSED_BOMPI_LOCK_DDT( ompi_mpi_2real, DT_2REAL, "MPI_2REAL", DT_FLOAT );
|
||||
DECLARE_MPI2_COMPOSED_BOMPI_LOCK_DDT( ompi_mpi_2dblprec, DT_2DBLPREC, "MPI_2DOUBLE_PRECISION", DT_DOUBLE );
|
||||
DECLARE_MPI2_COMPOSED_BOMPI_LOCK_DDT( ompi_mpi_2cplex, DT_2COMPLEX, "MPI_2COMPLEX", DT_COMPLEX_FLOAT );
|
||||
DECLARE_MPI2_COMPOSED_BOMPI_LOCK_DDT( ompi_mpi_2dblcplex, DT_2DOUBLE_COMPLEX, "MPI_2DOUBLE_COMPLEX", DT_COMPLEX_DOUBLE );
|
||||
|
||||
for( i = 0; i < DT_MAX_PREDEFINED; i++ )
|
||||
local_sizes[i] = basicDatatypes[i].size;
|
||||
|
@ -237,8 +237,8 @@ int ompi_errclass_init (void)
|
||||
ompi_pointer_array_set_item(&ompi_errclasses, MPI_ERR_KEYVAL, &ompi_errclass_keyval);
|
||||
|
||||
OBJ_CONSTRUCT(&ompi_errclass_locktype, ompi_errclass_t);
|
||||
ompi_errclass_locktype.class = MPI_ERR_LOCKTYPE;
|
||||
ompi_pointer_array_set_item(&ompi_errclasses, MPI_ERR_LOCKTYPE, &ompi_errclass_locktype);
|
||||
ompi_errclass_locktype.class = MPI_ERR_OMPI_LOCKTYPE;
|
||||
ompi_pointer_array_set_item(&ompi_errclasses, MPI_ERR_OMPI_LOCKTYPE, &ompi_errclass_locktype);
|
||||
|
||||
OBJ_CONSTRUCT(&ompi_errclass_name, ompi_errclass_t);
|
||||
ompi_errclass_name.class = MPI_ERR_NAME;
|
||||
|
@ -152,10 +152,10 @@ int ompi_errcode_intern_init (void)
|
||||
&ompi_err_interupted);
|
||||
|
||||
OBJ_CONSTRUCT(&ompi_err_would_block, ompi_errcode_intern_t);
|
||||
ompi_err_would_block.code = OMPI_ERR_WOULD_BLOCK;
|
||||
ompi_err_would_block.code = OMPI_ERR_WOULD_BOMPI_LOCK;
|
||||
ompi_err_would_block.mpi_code = MPI_ERR_INTERN;
|
||||
ompi_err_would_block.index = pos++;
|
||||
strcpy(ompi_err_would_block.errstring, "OMPI_ERR_WOULD_BLOCK");
|
||||
strcpy(ompi_err_would_block.errstring, "OMPI_ERR_WOULD_BOMPI_LOCK");
|
||||
ompi_pointer_array_set_item(&ompi_errcodes_intern, ompi_err_would_block.index,
|
||||
&ompi_err_would_block);
|
||||
|
||||
|
@ -310,10 +310,10 @@ int ompi_mpi_errcode_init (void)
|
||||
ompi_pointer_array_set_item(&ompi_mpi_errcodes, MPI_ERR_KEYVAL, &ompi_err_keyval);
|
||||
|
||||
OBJ_CONSTRUCT(&ompi_err_locktype, ompi_mpi_errcode_t);
|
||||
ompi_err_locktype.code = MPI_ERR_LOCKTYPE;
|
||||
ompi_err_locktype.class = MPI_ERR_LOCKTYPE;
|
||||
strcpy(ompi_err_locktype.errstring, "MPI_ERR_LOCKTYPE: invalid lock");
|
||||
ompi_pointer_array_set_item(&ompi_mpi_errcodes, MPI_ERR_LOCKTYPE, &ompi_err_locktype);
|
||||
ompi_err_locktype.code = MPI_ERR_OMPI_LOCKTYPE;
|
||||
ompi_err_locktype.class = MPI_ERR_OMPI_LOCKTYPE;
|
||||
strcpy(ompi_err_locktype.errstring, "MPI_ERR_OMPI_LOCKTYPE: invalid lock");
|
||||
ompi_pointer_array_set_item(&ompi_mpi_errcodes, MPI_ERR_OMPI_LOCKTYPE, &ompi_err_locktype);
|
||||
|
||||
OBJ_CONSTRUCT(&ompi_err_name, ompi_mpi_errcode_t);
|
||||
ompi_err_name.code = MPI_ERR_NAME;
|
||||
|
@ -151,9 +151,9 @@ struct clockinfo {
|
||||
int profhz; /* profiling clock frequency */
|
||||
};
|
||||
|
||||
#define CLOCK_REALTIME 0
|
||||
#define CLOCK_VIRTUAL 1
|
||||
#define CLOCK_PROF 2
|
||||
#define COMPI_LOCK_REALTIME 0
|
||||
#define COMPI_LOCK_VIRTUAL 1
|
||||
#define COMPI_LOCK_PROF 2
|
||||
|
||||
#define TIMER_RELTIME 0x0 /* relative timer */
|
||||
#define TIMER_ABSTIME 0x1 /* absolute timer */
|
||||
|
@ -141,7 +141,7 @@ function provides an interface for single pass execution of pending
|
||||
events. The flags
|
||||
.Va EVLOOP_ONCE
|
||||
and
|
||||
.Va EVLOOP_NONBLOCK
|
||||
.Va EVLOOP_NONBOMPI_LOCK
|
||||
are recognized.
|
||||
.Pp
|
||||
It is the responsibility of the caller to provide these functions with
|
||||
|
@ -320,13 +320,13 @@ ompi_event_loop(int flags)
|
||||
int res, done;
|
||||
|
||||
if(ompi_using_threads()) {
|
||||
THREAD_LOCK(&ompi_event_lock);
|
||||
OMPI_THREAD_LOCK(&ompi_event_lock);
|
||||
}
|
||||
|
||||
/* Calculate the initial events that we are waiting for */
|
||||
if (ompi_evsel->recalc && ompi_evsel->recalc(ompi_evbase, 0) == -1) {
|
||||
ompi_output(0, "ompi_event_loop: ompi_evsel->recalc() failed.");
|
||||
THREAD_UNLOCK(&ompi_event_lock);
|
||||
OMPI_THREAD_UNLOCK(&ompi_event_lock);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
@ -339,13 +339,13 @@ ompi_event_loop(int flags)
|
||||
if (res == -1) {
|
||||
ompi_output(0, "ompi_event_loop: ompi_event_sigcb() failed.");
|
||||
errno = EINTR;
|
||||
THREAD_UNLOCK(&ompi_event_lock);
|
||||
OMPI_THREAD_UNLOCK(&ompi_event_lock);
|
||||
return (-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!(flags & OMPI_EVLOOP_NONBLOCK)) {
|
||||
if (!(flags & OMPI_EVLOOP_NONBOMPI_LOCK)) {
|
||||
static struct timeval dflt = OMPI_TIMEOUT_DEFAULT;
|
||||
tv = dflt;
|
||||
} else
|
||||
@ -360,7 +360,7 @@ ompi_event_loop(int flags)
|
||||
#endif
|
||||
if (res == -1) {
|
||||
ompi_output(0, "ompi_event_loop: ompi_evesel->dispatch() failed.");
|
||||
THREAD_UNLOCK(&ompi_event_lock);
|
||||
OMPI_THREAD_UNLOCK(&ompi_event_lock);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
@ -384,16 +384,16 @@ ompi_event_loop(int flags)
|
||||
ompi_event_process_active();
|
||||
if (flags & OMPI_EVLOOP_ONCE)
|
||||
done = 1;
|
||||
} else if (flags & (OMPI_EVLOOP_NONBLOCK|OMPI_EVLOOP_ONCE))
|
||||
} else if (flags & (OMPI_EVLOOP_NONBOMPI_LOCK|OMPI_EVLOOP_ONCE))
|
||||
done = 1;
|
||||
|
||||
if (ompi_evsel->recalc && ompi_evsel->recalc(ompi_evbase, 0) == -1) {
|
||||
ompi_output(0, "ompi_event_loop: ompi_evesel->recalc() failed.");
|
||||
THREAD_UNLOCK(&ompi_event_lock);
|
||||
OMPI_THREAD_UNLOCK(&ompi_event_lock);
|
||||
return (-1);
|
||||
}
|
||||
}
|
||||
THREAD_UNLOCK(&ompi_event_lock);
|
||||
OMPI_THREAD_UNLOCK(&ompi_event_lock);
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
@ -139,7 +139,7 @@ struct ompi_eventop {
|
||||
|
||||
#define OMPI_TIMEOUT_DEFAULT {10, 0}
|
||||
#define OMPI_EVLOOP_ONCE 0x01
|
||||
#define OMPI_EVLOOP_NONBLOCK 0x02
|
||||
#define OMPI_EVLOOP_NONBOMPI_LOCK 0x02
|
||||
|
||||
|
||||
int ompi_event_init(void);
|
||||
|
@ -161,7 +161,7 @@ rtsig_init(void)
|
||||
sigemptyset(&op->sigs);
|
||||
sigaddset(&op->sigs, SIGIO);
|
||||
sigaddset(&op->sigs, SIGRTMIN);
|
||||
sigprocmask(SIG_BLOCK, &op->sigs, NULL);
|
||||
sigprocmask(SIG_BOMPI_LOCK, &op->sigs, NULL);
|
||||
|
||||
return (op);
|
||||
}
|
||||
@ -177,7 +177,7 @@ rtsig_add(void *arg, struct event *ev)
|
||||
|
||||
if (ev->ev_events & EV_SIGNAL) {
|
||||
sigaddset(&op->sigs, EVENT_SIGNAL(ev));
|
||||
return sigprocmask(SIG_BLOCK, &op->sigs, NULL);
|
||||
return sigprocmask(SIG_BOMPI_LOCK, &op->sigs, NULL);
|
||||
}
|
||||
|
||||
if (!(ev->ev_events & (EV_READ | EV_WRITE))) return 0;
|
||||
@ -232,7 +232,7 @@ rtsig_del(void *arg, struct event *ev)
|
||||
|
||||
sigemptyset(&sigs);
|
||||
sigaddset(&sigs, EVENT_SIGNAL(ev));
|
||||
return (sigprocmask(SIG_UNBLOCK, &sigs, NULL));
|
||||
return (sigprocmask(SIG_UNBOMPI_LOCK, &sigs, NULL));
|
||||
}
|
||||
|
||||
if (!(ev->ev_events & (EV_READ | EV_WRITE)))
|
||||
|
@ -101,9 +101,9 @@ main (int argc, char **argv)
|
||||
|
||||
/* Linux pipes are broken, we need O_RDWR instead of O_RDONLY */
|
||||
#ifdef __linux
|
||||
socket = open (fifo, O_RDWR | O_NONBLOCK, 0);
|
||||
socket = open (fifo, O_RDWR | O_NONBOMPI_LOCK, 0);
|
||||
#else
|
||||
socket = open (fifo, O_RDONLY | O_NONBLOCK, 0);
|
||||
socket = open (fifo, O_RDONLY | O_NONBOMPI_LOCK, 0);
|
||||
#endif
|
||||
|
||||
if (socket == -1) {
|
||||
|
@ -88,7 +88,7 @@ run_once(void)
|
||||
ompi_event_add(&events[i], NULL);
|
||||
}
|
||||
|
||||
ompi_event_loop(OMPI_EVLOOP_ONCE | OMPI_EVLOOP_NONBLOCK);
|
||||
ompi_event_loop(OMPI_EVLOOP_ONCE | OMPI_EVLOOP_NONBOMPI_LOCK);
|
||||
|
||||
fired = 0;
|
||||
space = num_pipes / num_active;
|
||||
@ -101,7 +101,7 @@ run_once(void)
|
||||
{ int xcount = 0;
|
||||
gettimeofday(&ts, NULL);
|
||||
do {
|
||||
ompi_event_loop(OMPI_EVLOOP_ONCE | OMPI_EVLOOP_NONBLOCK);
|
||||
ompi_event_loop(OMPI_EVLOOP_ONCE | OMPI_EVLOOP_NONBOMPI_LOCK);
|
||||
xcount++;
|
||||
} while (count != fired);
|
||||
gettimeofday(&te, NULL);
|
||||
|
@ -20,7 +20,7 @@ enum {
|
||||
OMPI_ERR_NOT_IMPLEMENTED = -10,
|
||||
OMPI_ERR_NOT_SUPPORTED = -11,
|
||||
OMPI_ERR_INTERUPTED = -12,
|
||||
OMPI_ERR_WOULD_BLOCK = -13,
|
||||
OMPI_ERR_WOULD_BOMPI_LOCK = -13,
|
||||
OMPI_ERR_IN_ERRNO = -14,
|
||||
OMPI_ERR_UNREACH = -15,
|
||||
OMPI_ERR_NOT_FOUND = -16,
|
||||
|
@ -11,10 +11,10 @@
|
||||
|
||||
|
||||
#ifdef HAVE_SMP
|
||||
#define SMPLOCK "lock; "
|
||||
#define SMPOMPI_LOCK "lock; "
|
||||
#define MB() __asm__ __volatile__("": : :"memory")
|
||||
#else
|
||||
#define SMPLOCK
|
||||
#define SMPOMPI_LOCK
|
||||
#define MB()
|
||||
#endif
|
||||
|
||||
@ -44,7 +44,7 @@ static inline int ompi_atomic_cmpset_32(volatile uint32_t *addr,
|
||||
uint32_t ret = oldval;
|
||||
|
||||
__asm__ __volatile (
|
||||
SMPLOCK "cmpxchgl %1,%2 \n\
|
||||
SMPOMPI_LOCK "cmpxchgl %1,%2 \n\
|
||||
setz %%al \n\
|
||||
movzbl %%al,%0 \n"
|
||||
: "+a" (ret)
|
||||
@ -78,7 +78,7 @@ static inline int ompi_atomic_cmpset_64(volatile uint64_t *addr,
|
||||
uint64_t ret = oldval;
|
||||
|
||||
__asm__ __volatile (
|
||||
SMPLOCK "cmpxchgq %1,%2 \n\
|
||||
SMPOMPI_LOCK "cmpxchgq %1,%2 \n\
|
||||
setz %%al \n\
|
||||
movzbl %%al,%0 \n"
|
||||
: "+a" (ret)
|
||||
|
@ -11,10 +11,10 @@
|
||||
|
||||
|
||||
#ifdef HAVE_SMP
|
||||
#define SMPLOCK "lock; "
|
||||
#define SMPOMPI_LOCK "lock; "
|
||||
#define MB() __asm__ __volatile__("": : :"memory")
|
||||
#else
|
||||
#define SMPLOCK
|
||||
#define SMPOMPI_LOCK
|
||||
#define MB()
|
||||
#endif
|
||||
|
||||
@ -44,7 +44,7 @@ static inline int ompi_atomic_cmpset_32(volatile uint32_t *addr,
|
||||
uint32_t ret = oldval;
|
||||
|
||||
__asm__ __volatile (
|
||||
SMPLOCK "cmpxchgl %1,%2 \n\
|
||||
SMPOMPI_LOCK "cmpxchgl %1,%2 \n\
|
||||
setz %%al \n\
|
||||
movzbl %%al,%0 \n"
|
||||
: "+a" (ret)
|
||||
@ -85,7 +85,7 @@ static inline int ompi_atomic_cmpset_64(volatile uint64_t *addr,
|
||||
struct { uint32_t lo; uint32_t hi; } *p = (struct lwords *) &newval;
|
||||
|
||||
__asm__ __volatile(
|
||||
SMPLOCK "cmpxchg8b %1\n"
|
||||
SMPOMPI_LOCK "cmpxchg8b %1\n"
|
||||
: "+A" (ret)
|
||||
: "m" (*addr), "b" (p->lo), "c" (p->hi)
|
||||
: "memory");
|
||||
|
@ -72,7 +72,7 @@ void * mca_allocator_bucket_alloc(mca_allocator_t * mem, size_t size)
|
||||
bucket_num++;
|
||||
}
|
||||
/* now that we know what bucket it will come from, we must get the lock */
|
||||
THREAD_LOCK(&(mem_options->buckets[bucket_num].lock));
|
||||
OMPI_THREAD_LOCK(&(mem_options->buckets[bucket_num].lock));
|
||||
/* see if there is already a free chunk */
|
||||
if(NULL != mem_options->buckets[bucket_num].free_chunk) {
|
||||
chunk = mem_options->buckets[bucket_num].free_chunk;
|
||||
@ -81,7 +81,7 @@ void * mca_allocator_bucket_alloc(mca_allocator_t * mem, size_t size)
|
||||
/* go past the header */
|
||||
chunk += 1;
|
||||
/*release the lock */
|
||||
THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
|
||||
OMPI_THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
|
||||
return((void *) chunk);
|
||||
}
|
||||
/* figure out the size of bucket we need */
|
||||
@ -95,7 +95,7 @@ void * mca_allocator_bucket_alloc(mca_allocator_t * mem, size_t size)
|
||||
mem_options->get_mem_fn(&allocated_size);
|
||||
if(NULL == segment_header) {
|
||||
/* release the lock */
|
||||
THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
|
||||
OMPI_THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
|
||||
return(NULL);
|
||||
}
|
||||
/* if were allocated more memory then we actually need, then we will try to
|
||||
@ -124,7 +124,7 @@ void * mca_allocator_bucket_alloc(mca_allocator_t * mem, size_t size)
|
||||
first_chunk->next_in_segment = first_chunk;
|
||||
}
|
||||
first_chunk->u.bucket = bucket_num;
|
||||
THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
|
||||
OMPI_THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
|
||||
/* return the memory moved past the header */
|
||||
return((void *) (first_chunk + 1));
|
||||
}
|
||||
@ -185,7 +185,7 @@ void * mca_allocator_bucket_alloc_align(mca_allocator_t * mem, size_t size, size
|
||||
allocated_size -= aligned_max_size;
|
||||
chunk = segment_header->first_chunk = first_chunk;
|
||||
/* we now need to get a lock on the bucket */
|
||||
THREAD_LOCK(&(mem_options->buckets[bucket_num].lock));
|
||||
OMPI_THREAD_LOCK(&(mem_options->buckets[bucket_num].lock));
|
||||
/* add the segment into the segment list */
|
||||
segment_header->next_segment = mem_options->buckets[bucket_num].segment_head;
|
||||
mem_options->buckets[bucket_num].segment_head = segment_header;
|
||||
@ -209,7 +209,7 @@ void * mca_allocator_bucket_alloc_align(mca_allocator_t * mem, size_t size, size
|
||||
first_chunk->next_in_segment = first_chunk;
|
||||
}
|
||||
first_chunk->u.bucket = bucket_num;
|
||||
THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
|
||||
OMPI_THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
|
||||
/* return the aligned memory */
|
||||
return((void *) (aligned_memory));
|
||||
}
|
||||
@ -260,10 +260,10 @@ void mca_allocator_bucket_free(mca_allocator_t * mem, void * ptr)
|
||||
mca_allocator_bucket_t * mem_options = (mca_allocator_bucket_t *) mem;
|
||||
mca_allocator_bucket_chunk_header_t * chunk = (mca_allocator_bucket_chunk_header_t *) ptr - 1;
|
||||
int bucket_num = chunk->u.bucket;
|
||||
THREAD_LOCK(&(mem_options->buckets[bucket_num].lock));
|
||||
OMPI_THREAD_LOCK(&(mem_options->buckets[bucket_num].lock));
|
||||
chunk->u.next_free = mem_options->buckets[bucket_num].free_chunk;
|
||||
mem_options->buckets[bucket_num].free_chunk = chunk;
|
||||
THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
|
||||
OMPI_THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -284,7 +284,7 @@ int mca_allocator_bucket_cleanup(mca_allocator_t * mem)
|
||||
bool empty = true;
|
||||
|
||||
for(i = 0; i < mem_options->num_buckets; i++) {
|
||||
THREAD_LOCK(&(mem_options->buckets[i].lock));
|
||||
OMPI_THREAD_LOCK(&(mem_options->buckets[i].lock));
|
||||
segment_header = &(mem_options->buckets[i].segment_head);
|
||||
/* traverse the list of segment headers until we hit NULL */
|
||||
while(NULL != *segment_header) {
|
||||
@ -326,7 +326,7 @@ int mca_allocator_bucket_cleanup(mca_allocator_t * mem)
|
||||
empty = true;
|
||||
}
|
||||
/* relese the lock on the bucket */
|
||||
THREAD_UNLOCK(&(mem_options->buckets[i].lock));
|
||||
OMPI_THREAD_UNLOCK(&(mem_options->buckets[i].lock));
|
||||
}
|
||||
return(OMPI_SUCCESS);
|
||||
}
|
||||
|
@ -131,24 +131,24 @@ int mca_base_modex_send(mca_base_module_t *source_module, const void *buffer, si
|
||||
if(NULL == self)
|
||||
return OMPI_ERROR;
|
||||
|
||||
THREAD_LOCK(&self->proc_lock);
|
||||
OMPI_THREAD_LOCK(&self->proc_lock);
|
||||
if(NULL == (modex = self->proc_modex)) {
|
||||
self->proc_modex = modex = OBJ_NEW(mca_base_modex_t);
|
||||
}
|
||||
|
||||
if(NULL == (modex_module = mca_base_modex_create_module(modex, source_module))) {
|
||||
THREAD_UNLOCK(&self->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&self->proc_lock);
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
|
||||
modex_module->module_data = malloc(size);
|
||||
if(NULL == modex_module->module_data) {
|
||||
THREAD_UNLOCK(&self->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&self->proc_lock);
|
||||
return OMPI_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
memcpy(modex_module->module_data, buffer, size);
|
||||
modex_module->module_data_size = size;
|
||||
THREAD_UNLOCK(&self->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&self->proc_lock);
|
||||
return OMPI_SUCCESS;
|
||||
}
|
||||
|
||||
@ -165,29 +165,29 @@ int mca_base_modex_recv(mca_base_module_t *module, ompi_proc_t *source_proc, voi
|
||||
mca_base_modex_module_t* modex_module;
|
||||
void *copy;
|
||||
|
||||
THREAD_LOCK(&source_proc->proc_lock);
|
||||
OMPI_THREAD_LOCK(&source_proc->proc_lock);
|
||||
if(NULL == (modex = source_proc->proc_modex) ||
|
||||
NULL == (modex_module = mca_base_modex_lookup_module(modex, module))) {
|
||||
THREAD_UNLOCK(&source_proc->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&source_proc->proc_lock);
|
||||
return OMPI_ERR_NOT_FOUND;
|
||||
}
|
||||
|
||||
if(0 == modex_module->module_data_size) {
|
||||
*buffer = NULL;
|
||||
*size = 0;
|
||||
THREAD_UNLOCK(&source_proc->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&source_proc->proc_lock);
|
||||
return OMPI_SUCCESS;
|
||||
}
|
||||
|
||||
copy = malloc(modex_module->module_data_size);
|
||||
if(NULL == copy) {
|
||||
THREAD_UNLOCK(&source_proc->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&source_proc->proc_lock);
|
||||
return OMPI_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
memcpy(copy, modex_module->module_data, modex_module->module_data_size);
|
||||
*buffer = copy;
|
||||
*size = modex_module->module_data_size;
|
||||
THREAD_UNLOCK(&source_proc->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&source_proc->proc_lock);
|
||||
return OMPI_SUCCESS;
|
||||
}
|
||||
|
||||
@ -221,7 +221,7 @@ int mca_base_modex_exchange(void)
|
||||
}
|
||||
|
||||
/* loop through all modules with data cached on local process and send to all peers */
|
||||
THREAD_LOCK(&self->proc_lock);
|
||||
OMPI_THREAD_LOCK(&self->proc_lock);
|
||||
for(self_module = (mca_base_modex_module_t*)ompi_list_get_first(&modex->modex_modules);
|
||||
self_module != (mca_base_modex_module_t*)ompi_list_get_end(&modex->modex_modules);
|
||||
self_module = (mca_base_modex_module_t*)ompi_list_get_next(self_module)) {
|
||||
@ -241,7 +241,7 @@ int mca_base_modex_exchange(void)
|
||||
self_module->module_data_size);
|
||||
if(rc != OMPI_SUCCESS) {
|
||||
free(procs);
|
||||
THREAD_UNLOCK(&self->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&self->proc_lock);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
@ -261,20 +261,20 @@ int mca_base_modex_exchange(void)
|
||||
if(proc == self)
|
||||
continue;
|
||||
|
||||
THREAD_LOCK(&proc->proc_lock);
|
||||
OMPI_THREAD_LOCK(&proc->proc_lock);
|
||||
if(NULL == proc->proc_modex) {
|
||||
proc->proc_modex = OBJ_NEW(mca_base_modex_t);
|
||||
if(NULL == proc->proc_modex) {
|
||||
free(procs);
|
||||
THREAD_UNLOCK(&self->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&self->proc_lock);
|
||||
return OMPI_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
proc_module = mca_base_modex_create_module(proc->proc_modex, self_module->module);
|
||||
if(NULL == proc_module) {
|
||||
free(procs);
|
||||
THREAD_UNLOCK(&proc->proc_lock);
|
||||
THREAD_UNLOCK(&self->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&proc->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&self->proc_lock);
|
||||
return OMPI_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
|
||||
@ -286,15 +286,15 @@ int mca_base_modex_exchange(void)
|
||||
&proc_module->module_data_size);
|
||||
if(rc != OMPI_SUCCESS) {
|
||||
free(procs);
|
||||
THREAD_UNLOCK(&proc->proc_lock);
|
||||
THREAD_UNLOCK(&self->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&proc->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&self->proc_lock);
|
||||
return rc;
|
||||
}
|
||||
THREAD_UNLOCK(&proc->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&proc->proc_lock);
|
||||
}
|
||||
}
|
||||
free(procs);
|
||||
THREAD_UNLOCK(&self->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&self->proc_lock);
|
||||
return OMPI_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -24,9 +24,9 @@ int mca_io_romio_File_open(MPI_Comm comm, char *filename, int amode,
|
||||
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_open(comm,filename,amode,info,&romio_fh);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -41,9 +41,9 @@ int mca_io_romio_File_close(MPI_File *fh) {
|
||||
mca_romio_fh = (mca_io_romio_file_t *)(*fh);
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_close(&romio_fh);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
free(*fh);
|
||||
|
||||
@ -55,9 +55,9 @@ int mca_io_romio_File_close(MPI_File *fh) {
|
||||
int mca_io_romio_File_delete(char *filename, MPI_Info info) {
|
||||
int ret;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_delete(filename, info);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -72,9 +72,9 @@ int mca_io_romio_File_set_size(MPI_File fh, MPI_Offset size){
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_set_size(romio_fh, size);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
|
||||
@ -90,9 +90,9 @@ int mca_io_romio_File_preallocate(MPI_File fh, MPI_Offset size){
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_preallocate(romio_fh,size);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -107,9 +107,9 @@ int mca_io_romio_File_get_size(MPI_File fh, MPI_Offset *size){
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_get_size(romio_fh,size);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -124,9 +124,9 @@ int mca_io_romio_File_get_group(MPI_File fh, MPI_Group *group){
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_get_group(romio_fh,group);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -141,9 +141,9 @@ int mca_io_romio_File_get_amode(MPI_File fh, int *amode){
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_get_amode(romio_fh, amode);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -158,9 +158,9 @@ int mca_io_romio_File_set_info(MPI_File fh, MPI_Info info){
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_set_info(romio_fh,info);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -175,9 +175,9 @@ int mca_io_romio_File_get_info(MPI_File fh, MPI_Info *info_used){
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_get_info(romio_fh,info_used);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -194,9 +194,9 @@ int mca_io_romio_File_set_view(MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_set_view(romio_fh,disp,etype,filetype,datarep,info);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
|
||||
@ -211,9 +211,9 @@ int mca_io_romio_File_get_view(MPI_File fh, MPI_Offset *disp, MPI_Datatype *etyp
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_get_view(romio_fh,disp,etype,filetype,datarep);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
|
||||
@ -231,9 +231,9 @@ int mca_io_romio_File_get_type_extent(MPI_File fh, MPI_Datatype datatype, MPI_Ai
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_get_type_extent(romio_fh,datatype,extent);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -248,9 +248,9 @@ int mca_io_romio_File_set_atomicity(MPI_File fh, int flag){
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_set_atomicity(romio_fh,flag);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -264,9 +264,9 @@ int mca_io_romio_File_get_atomicity(MPI_File fh, int *flag){
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_get_atomicity(romio_fh,flag);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -280,9 +280,9 @@ int mca_io_romio_File_sync(MPI_File fh){
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_sync(romio_fh);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -300,9 +300,9 @@ int mca_io_romio_File_seek_shared(MPI_File fh, MPI_Offset offset, int whence){
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_seek_shared(romio_fh, offset, whence);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -315,9 +315,9 @@ int mca_io_romio_File_get_position_shared(MPI_File fh, MPI_Offset *offset){
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_get_position_shared(romio_fh,offset);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -331,9 +331,9 @@ int mca_io_romio_File_seek(MPI_File fh, MPI_Offset offset, int whence){
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_seek(romio_fh,offset,whence);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -346,9 +346,9 @@ int mca_io_romio_File_get_position(MPI_File fh, MPI_Offset *offset){
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_get_position(romio_fh,offset);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -361,9 +361,9 @@ int mca_io_romio_File_get_byte_offset(MPI_File fh, MPI_Offset offset, MPI_Offset
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_get_byte_offset(romio_fh,offset,disp);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -378,9 +378,9 @@ int mca_io_romio_File_set_errhandler(MPI_File fh, MPI_Errhandler eh){
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_set_errhandler(romio_fh,eh);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -393,9 +393,9 @@ int mca_io_romio_File_get_errhandler(MPI_File fh, MPI_Errhandler *eh ){
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_get_errhandler(romio_fh,eh );
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -16,9 +16,9 @@ int mca_io_romio_File_read_at(MPI_File fh, MPI_Offset offset, void *buf,int coun
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_read_at(romio_fh,offset,buf,count,datatype,status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
|
||||
@ -33,9 +33,9 @@ int mca_io_romio_File_read_at_all(MPI_File fh, MPI_Offset offset, void *buf,int
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_read_at_all(romio_fh,offset,buf,count,datatype,status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -61,10 +61,10 @@ int mca_io_romio_File_iread_at(MPI_File fh, MPI_Offset offset, void *buf, int co
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_iread_at(romio_fh,offset,buf,count,datatype,
|
||||
&romio_rq);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -82,9 +82,9 @@ int mca_io_romio_File_read(MPI_File fh, void *buf, int count, MPI_Datatype datat
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_read(romio_fh,buf,count,datatype,status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -97,9 +97,9 @@ int mca_io_romio_File_read_all(MPI_File fh, void *buf, int count, MPI_Datatype d
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_read_all(romio_fh,buf,count,datatype,status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -123,9 +123,9 @@ int mca_io_romio_File_iread(MPI_File fh, void *buf, int count, MPI_Datatype data
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_iread(romio_fh,buf,count,datatype,&romio_rq);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -138,9 +138,9 @@ int mca_io_romio_File_read_shared(MPI_File fh, void *buf, int count, MPI_Datatyp
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_read_shared(romio_fh, buf, count, datatype,status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -164,9 +164,9 @@ int mca_io_romio_File_iread_shared(MPI_File fh, void *buf, int count, MPI_Dataty
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_iread_shared(romio_fh,buf,count,datatype,&romio_rq);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -179,9 +179,9 @@ int mca_io_romio_File_read_ordered(MPI_File fh, void *buf, int count, MPI_Dataty
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_read_ordered(romio_fh,buf,count,datatype,status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -194,9 +194,9 @@ int mca_io_romio_File_read_at_all_begin(MPI_File fh, MPI_Offset offset, void *bu
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_read_at_all_begin(romio_fh,offset,buf,count,datatype);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -209,9 +209,9 @@ int mca_io_romio_File_read_at_all_end(MPI_File fh, void *buf, MPI_Status *status
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_read_at_all_end(romio_fh, buf, status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -224,9 +224,9 @@ int mca_io_romio_File_read_all_begin(MPI_File fh, void *buf, int count, MPI_Data
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_read_all_begin(romio_fh,buf,count,datatype);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -239,9 +239,9 @@ int mca_io_romio_File_read_all_end(MPI_File fh, void *buf, MPI_Status *status){
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_read_all_end(romio_fh,buf,status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -254,9 +254,9 @@ int mca_io_romio_File_read_ordered_begin(MPI_File fh, void *buf, int count, MPI_
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_read_ordered_begin(romio_fh,buf,count,datatype);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -269,9 +269,9 @@ int mca_io_romio_File_read_ordered_end(MPI_File fh, void *buf, MPI_Status *statu
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_read_ordered_end(romio_fh, buf,status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -19,13 +19,13 @@ int mca_io_romio_Test(MPI_Request *request, int *flag, MPI_Status *status){
|
||||
romio_rq = rq->romio_rq;
|
||||
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPIO_Test(&romio_rq, flag,status);
|
||||
if (*flag) {
|
||||
free(*request);
|
||||
*request = MPI_REQUEST_NULL;
|
||||
}
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -41,9 +41,9 @@ int mca_io_romio_Wait(MPI_Request *request, MPI_Status *status){
|
||||
romio_rq = rq->romio_rq;
|
||||
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPIO_Wait(&romio_rq, status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
free(*request);
|
||||
*request = MPI_REQUEST_NULL;
|
||||
|
@ -17,9 +17,9 @@ int mca_io_romio_File_write_at(MPI_File fh, MPI_Offset offset, void *buf,int cou
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_write_at(romio_fh,offset,buf,count,datatype,status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -35,9 +35,9 @@ int mca_io_romio_File_write_at_all(MPI_File fh, MPI_Offset offset, void *buf, in
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_write_at_all(romio_fh,offset,buf,count,datatype,status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -64,10 +64,10 @@ int mca_io_romio_File_iwrite_at(MPI_File fh, MPI_Offset offset, void *buf,int co
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_iwrite_at(romio_fh,offset,buf,count,datatype,
|
||||
&romio_rq);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -85,9 +85,9 @@ int mca_io_romio_File_write(MPI_File fh, void *buf, int count, MPI_Datatype data
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_write(romio_fh,buf,count,datatype,status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -101,9 +101,9 @@ int mca_io_romio_File_write_all(MPI_File fh, void *buf, int count, MPI_Datatype
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_write_all(romio_fh,buf,count,datatype,status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -127,9 +127,9 @@ int mca_io_romio_File_iwrite(MPI_File fh, void *buf, int count,
|
||||
|
||||
romio_fh = ((mca_io_romio_file_t *) fh)->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_iwrite(romio_fh, buf, count, datatype, &romio_rq);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -143,9 +143,9 @@ int mca_io_romio_File_write_shared(MPI_File fh, void *buf, int count, MPI_Dataty
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_write_shared(romio_fh,buf,count,datatype,status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -171,9 +171,9 @@ int mca_io_romio_File_iwrite_shared(MPI_File fh, void *buf, int count,
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret= mca_io_romio_MPI_File_iwrite_shared(romio_fh,buf,count,datatype,&romio_rq);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -187,9 +187,9 @@ int mca_io_romio_File_write_ordered(MPI_File fh, void *buf, int count,
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret= mca_io_romio_MPI_File_write_ordered(romio_fh,buf,count,datatype,status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -203,10 +203,10 @@ int mca_io_romio_File_write_at_all_begin(MPI_File fh, MPI_Offset offset, void *b
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret= mca_io_romio_MPI_File_write_at_all_begin(romio_fh,offset, buf,
|
||||
count, datatype);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -219,9 +219,9 @@ int mca_io_romio_File_write_at_all_end(MPI_File fh, void *buf, MPI_Status *statu
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_write_at_all_end(romio_fh,buf,status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -235,10 +235,10 @@ int mca_io_romio_File_write_all_begin(MPI_File fh, void *buf, int count,
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret= mca_io_romio_MPI_File_write_all_begin(romio_fh, buf, count,
|
||||
datatype);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -251,9 +251,9 @@ int mca_io_romio_File_write_all_end(MPI_File fh, void *buf, MPI_Status *status){
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret= mca_io_romio_MPI_File_write_all_end(romio_fh,buf,status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -266,9 +266,9 @@ int mca_io_romio_File_write_ordered_begin(MPI_File fh, void *buf, int count,MPI_
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_write_ordered_begin(romio_fh,buf,count,datatype);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -281,9 +281,9 @@ int mca_io_romio_File_write_ordered_end(MPI_File fh, void *buf, MPI_Status *stat
|
||||
mca_romio_fh = (mca_io_romio_file_t *) fh;
|
||||
romio_fh = mca_romio_fh->romio_fh;
|
||||
|
||||
THREAD_LOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_io_romio_mutex);
|
||||
ret=mca_io_romio_MPI_File_write_ordered_end(romio_fh,buf,status);
|
||||
THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_io_romio_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -71,9 +71,9 @@ int
|
||||
mca_oob_cofs_recv(ompi_job_handle_t job_handle, int vpid, int* tag,
|
||||
void** data, size_t* data_len)
|
||||
{
|
||||
int ret = OMPI_ERR_WOULD_BLOCK;
|
||||
int ret = OMPI_ERR_WOULD_BOMPI_LOCK;
|
||||
blocking_recv_posted = 1;
|
||||
while (ret == OMPI_ERR_WOULD_BLOCK) {
|
||||
while (ret == OMPI_ERR_WOULD_BOMPI_LOCK) {
|
||||
ret = do_recv(job_handle, vpid, tag, data, data_len);
|
||||
sleep(1);
|
||||
}
|
||||
@ -87,7 +87,7 @@ mca_oob_cofs_recv_nb(ompi_job_handle_t job_handle, int vpid, int* tag,
|
||||
void** data, size_t* data_len)
|
||||
{
|
||||
if (blocking_recv_posted != 0) {
|
||||
return OMPI_ERR_WOULD_BLOCK;
|
||||
return OMPI_ERR_WOULD_BOMPI_LOCK;
|
||||
}
|
||||
|
||||
return do_recv(job_handle, vpid, tag, data, data_len);
|
||||
@ -173,7 +173,7 @@ do_recv(ompi_job_handle_t job_handle, int vpid, int* tag,
|
||||
|
||||
fname = find_match(job_handle, vpid, tag);
|
||||
if (fname == NULL) {
|
||||
return OMPI_ERR_WOULD_BLOCK;
|
||||
return OMPI_ERR_WOULD_BOMPI_LOCK;
|
||||
}
|
||||
snprintf(full_fname, OMPI_PATH_MAX, "%s/%s", mca_oob_cofs_comm_loc, fname);
|
||||
free(fname);
|
||||
|
@ -40,7 +40,7 @@ static void* mca_pml_bsend_alloc_segment(size_t* size_inout)
|
||||
if( mca_pml_bsend_addr + *size_inout <= mca_pml_bsend_base + mca_pml_bsend_size ) {
|
||||
size = *size_inout;
|
||||
} else {
|
||||
THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
@ -117,16 +117,16 @@ int mca_pml_base_bsend_attach(void* addr, int size)
|
||||
}
|
||||
|
||||
/* check for buffer already attached */
|
||||
THREAD_LOCK(&mca_pml_bsend_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_pml_bsend_mutex);
|
||||
if(NULL != mca_pml_bsend_allocator) {
|
||||
THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
return OMPI_ERR_BUFFER;
|
||||
}
|
||||
|
||||
/* try to create an instance of the allocator - to determine thread safety level */
|
||||
mca_pml_bsend_allocator = mca_pml_bsend_allocator_component->allocator_init(&thread_safe, mca_pml_bsend_alloc_segment, NULL);
|
||||
if(NULL == mca_pml_bsend_allocator) {
|
||||
THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
return OMPI_ERR_BUFFER;
|
||||
}
|
||||
|
||||
@ -135,7 +135,7 @@ int mca_pml_base_bsend_attach(void* addr, int size)
|
||||
mca_pml_bsend_addr = addr;
|
||||
mca_pml_bsend_size = size;
|
||||
mca_pml_bsend_count = 0;
|
||||
THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
return OMPI_SUCCESS;
|
||||
}
|
||||
|
||||
@ -144,11 +144,11 @@ int mca_pml_base_bsend_attach(void* addr, int size)
|
||||
*/
|
||||
int mca_pml_base_bsend_detach(void* addr, int* size)
|
||||
{
|
||||
THREAD_LOCK(&mca_pml_bsend_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_pml_bsend_mutex);
|
||||
|
||||
/* is buffer attached */
|
||||
if(NULL == mca_pml_bsend_allocator) {
|
||||
THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
return OMPI_ERR_BUFFER;
|
||||
}
|
||||
|
||||
@ -171,7 +171,7 @@ int mca_pml_base_bsend_detach(void* addr, int* size)
|
||||
mca_pml_bsend_addr = NULL;
|
||||
mca_pml_bsend_size = 0;
|
||||
mca_pml_bsend_count = 0;
|
||||
THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
return OMPI_SUCCESS;
|
||||
}
|
||||
|
||||
@ -186,16 +186,16 @@ int mca_pml_base_bsend_request_init(ompi_request_t* request, bool persistent)
|
||||
void* buf;
|
||||
int rc;
|
||||
|
||||
THREAD_LOCK(&mca_pml_bsend_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_pml_bsend_mutex);
|
||||
if(NULL == mca_pml_bsend_addr) {
|
||||
THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
return OMPI_ERR_BUFFER;
|
||||
}
|
||||
|
||||
/* allocate a buffer to hold packed message */
|
||||
buf = mca_pml_bsend_allocator->alc_alloc(mca_pml_bsend_allocator, sendreq->req_bytes_packed, 0);
|
||||
if(NULL == buf) {
|
||||
THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
return OMPI_ERR_BUFFER;
|
||||
}
|
||||
|
||||
@ -204,14 +204,14 @@ int mca_pml_base_bsend_request_init(ompi_request_t* request, bool persistent)
|
||||
iov.iov_len = sendreq->req_bytes_packed;
|
||||
if((rc = ompi_convertor_pack(&sendreq->req_convertor, &iov, 1)) < 0) {
|
||||
mca_pml_bsend_allocator->alc_free(mca_pml_bsend_allocator, buf);
|
||||
THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
|
||||
/* setup convertor to reflect contiguous buffer */
|
||||
if((rc = ompi_convertor_init_for_send(&sendreq->req_convertor, 0, MPI_BYTE, iov.iov_len, iov.iov_base, 0)) != OMPI_SUCCESS) {
|
||||
mca_pml_bsend_allocator->alc_free(mca_pml_bsend_allocator, buf);
|
||||
THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -221,7 +221,7 @@ int mca_pml_base_bsend_request_init(ompi_request_t* request, bool persistent)
|
||||
/* set flag indicating mpi layer is done */
|
||||
sendreq->super.req_persistent = persistent;
|
||||
sendreq->super.req_mpi_done = true;
|
||||
THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
return OMPI_SUCCESS;
|
||||
}
|
||||
|
||||
@ -234,7 +234,7 @@ int mca_pml_base_bsend_request_fini(ompi_request_t* request)
|
||||
mca_pml_base_send_request_t* sendreq = (mca_pml_base_send_request_t*)request;
|
||||
|
||||
/* remove from list of pending requests */
|
||||
THREAD_LOCK(&mca_pml_bsend_mutex);
|
||||
OMPI_THREAD_LOCK(&mca_pml_bsend_mutex);
|
||||
|
||||
/* free buffer */
|
||||
mca_pml_bsend_allocator->alc_free(mca_pml_bsend_allocator, sendreq->req_convertor.pBaseBuf);
|
||||
@ -243,7 +243,7 @@ int mca_pml_base_bsend_request_fini(ompi_request_t* request)
|
||||
if(--mca_pml_bsend_count == 0)
|
||||
ompi_condition_signal(&mca_pml_bsend_condition);
|
||||
|
||||
THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_bsend_mutex);
|
||||
return OMPI_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -16,17 +16,17 @@ static void mca_pml_teg_proc_construct(mca_pml_proc_t* proc)
|
||||
OBJ_CONSTRUCT(&proc->proc_ptl_first, mca_pml_teg_ptl_array_t);
|
||||
OBJ_CONSTRUCT(&proc->proc_ptl_next, mca_pml_teg_ptl_array_t);
|
||||
|
||||
THREAD_LOCK(&mca_pml_teg.teg_lock);
|
||||
OMPI_THREAD_LOCK(&mca_pml_teg.teg_lock);
|
||||
ompi_list_append(&mca_pml_teg.teg_procs, (ompi_list_item_t*)proc);
|
||||
THREAD_UNLOCK(&mca_pml_teg.teg_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_teg.teg_lock);
|
||||
}
|
||||
|
||||
|
||||
static void mca_pml_teg_proc_destruct(mca_pml_proc_t* proc)
|
||||
{
|
||||
THREAD_LOCK(&mca_pml_teg.teg_lock);
|
||||
OMPI_THREAD_LOCK(&mca_pml_teg.teg_lock);
|
||||
ompi_list_remove_item(&mca_pml_teg.teg_procs, (ompi_list_item_t*)proc);
|
||||
THREAD_UNLOCK(&mca_pml_teg.teg_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_teg.teg_lock);
|
||||
|
||||
OBJ_DESTRUCT(&proc->proc_lock);
|
||||
OBJ_DESTRUCT(&proc->proc_ptl_first);
|
||||
|
@ -16,7 +16,7 @@ void mca_pml_teg_recv_request_progress(
|
||||
mca_pml_base_recv_request_t* req,
|
||||
mca_ptl_base_recv_frag_t* frag)
|
||||
{
|
||||
THREAD_LOCK(&mca_pml_teg.teg_request_lock);
|
||||
OMPI_THREAD_LOCK(&mca_pml_teg.teg_request_lock);
|
||||
req->req_bytes_delivered += frag->super.frag_size;
|
||||
req->req_bytes_received += frag->super.frag_header.hdr_frag.hdr_frag_length;
|
||||
if (req->req_bytes_received >= req->req_bytes_packed) {
|
||||
@ -34,7 +34,7 @@ void mca_pml_teg_recv_request_progress(
|
||||
ompi_condition_broadcast(&mca_pml_teg.teg_request_cond);
|
||||
}
|
||||
}
|
||||
THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
|
||||
}
|
||||
|
||||
|
||||
@ -52,7 +52,7 @@ void mca_pml_teg_recv_request_match_specific(mca_pml_base_recv_request_t* reques
|
||||
mca_ptl_base_recv_frag_t* frag;
|
||||
|
||||
/* check for a specific match */
|
||||
THREAD_LOCK(&pml_comm->c_matching_lock);
|
||||
OMPI_THREAD_LOCK(&pml_comm->c_matching_lock);
|
||||
|
||||
/* assign sequence number */
|
||||
request->super.req_sequence = pml_comm->c_recv_seq++;
|
||||
@ -60,7 +60,7 @@ void mca_pml_teg_recv_request_match_specific(mca_pml_base_recv_request_t* reques
|
||||
if (ompi_list_get_size(&pml_comm->c_unexpected_frags[req_peer]) > 0 &&
|
||||
(frag = mca_pml_teg_recv_request_match_specific_proc(request, req_peer)) != NULL) {
|
||||
mca_ptl_t* ptl = frag->super.frag_owner;
|
||||
THREAD_UNLOCK(&pml_comm->c_matching_lock);
|
||||
OMPI_THREAD_UNLOCK(&pml_comm->c_matching_lock);
|
||||
ptl->ptl_matched(ptl, frag);
|
||||
return; /* match found */
|
||||
}
|
||||
@ -70,7 +70,7 @@ void mca_pml_teg_recv_request_match_specific(mca_pml_base_recv_request_t* reques
|
||||
*/
|
||||
if(request->super.req_type != MCA_PML_REQUEST_IPROBE)
|
||||
ompi_list_append(pml_comm->c_specific_receives+req_peer, (ompi_list_item_t*)request);
|
||||
THREAD_UNLOCK(&pml_comm->c_matching_lock);
|
||||
OMPI_THREAD_UNLOCK(&pml_comm->c_matching_lock);
|
||||
}
|
||||
|
||||
|
||||
@ -92,7 +92,7 @@ void mca_pml_teg_recv_request_match_wild(mca_pml_base_recv_request_t* request)
|
||||
* process, then an inner loop over the messages from the
|
||||
* process.
|
||||
*/
|
||||
THREAD_LOCK(&pml_comm->c_matching_lock);
|
||||
OMPI_THREAD_LOCK(&pml_comm->c_matching_lock);
|
||||
|
||||
/* assign sequence number */
|
||||
request->super.req_sequence = pml_comm->c_recv_seq++;
|
||||
@ -107,7 +107,7 @@ void mca_pml_teg_recv_request_match_wild(mca_pml_base_recv_request_t* request)
|
||||
/* loop over messages from the current proc */
|
||||
if ((frag = mca_pml_teg_recv_request_match_specific_proc(request, proc)) != NULL) {
|
||||
mca_ptl_t* ptl = frag->super.frag_owner;
|
||||
THREAD_UNLOCK(&pml_comm->c_matching_lock);
|
||||
OMPI_THREAD_UNLOCK(&pml_comm->c_matching_lock);
|
||||
ptl->ptl_matched(ptl, frag);
|
||||
return; /* match found */
|
||||
}
|
||||
@ -119,7 +119,7 @@ void mca_pml_teg_recv_request_match_wild(mca_pml_base_recv_request_t* request)
|
||||
|
||||
if(request->super.req_type != MCA_PML_REQUEST_IPROBE)
|
||||
ompi_list_append(&pml_comm->c_wild_receives, (ompi_list_item_t*)request);
|
||||
THREAD_UNLOCK(&pml_comm->c_matching_lock);
|
||||
OMPI_THREAD_UNLOCK(&pml_comm->c_matching_lock);
|
||||
}
|
||||
|
||||
|
||||
|
@ -59,12 +59,12 @@ void mca_pml_teg_send_request_schedule(mca_pml_base_send_request_t* req)
|
||||
|
||||
/* unable to complete send - signal request failed */
|
||||
if(bytes_remaining > 0) {
|
||||
THREAD_LOCK(&mca_pml_teg.teg_request_lock);
|
||||
OMPI_THREAD_LOCK(&mca_pml_teg.teg_request_lock);
|
||||
req->super.req_mpi_done = true;
|
||||
/* FIX - set status correctly */
|
||||
if(mca_pml_teg.teg_request_waiting)
|
||||
ompi_condition_broadcast(&mca_pml_teg.teg_request_cond);
|
||||
THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -85,7 +85,7 @@ void mca_pml_teg_send_request_progress(
|
||||
mca_ptl_base_send_frag_t* frag)
|
||||
{
|
||||
bool first_frag;
|
||||
THREAD_LOCK(&mca_pml_teg.teg_request_lock);
|
||||
OMPI_THREAD_LOCK(&mca_pml_teg.teg_request_lock);
|
||||
first_frag = (req->req_bytes_sent == 0 && req->req_bytes_packed > 0);
|
||||
req->req_bytes_sent += frag->super.frag_size;
|
||||
if (req->req_bytes_sent >= req->req_bytes_packed) {
|
||||
@ -102,10 +102,10 @@ void mca_pml_teg_send_request_progress(
|
||||
} else if (req->super.req_free_called) {
|
||||
MCA_PML_TEG_FREE((ompi_request_t**)&req);
|
||||
}
|
||||
THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
|
||||
return;
|
||||
}
|
||||
THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
|
||||
|
||||
/* if first fragment - shedule remaining fragments */
|
||||
if(first_frag == true) {
|
||||
|
@ -24,7 +24,7 @@
|
||||
mca_ptl_proc_t* ptl_proc; \
|
||||
mca_ptl_t* ptl; \
|
||||
\
|
||||
THREAD_SCOPED_LOCK(&proc->proc_lock, \
|
||||
OMPI_THREAD_SCOPED_LOCK(&proc->proc_lock, \
|
||||
(ptl_proc = mca_ptl_array_get_next(&proc->proc_ptl_first))); \
|
||||
ptl = ptl_proc->ptl; \
|
||||
rc = ptl->ptl_request_alloc(ptl,&sendreq); \
|
||||
|
@ -26,16 +26,16 @@ int mca_pml_teg_start(size_t count, ompi_request_t** requests)
|
||||
case OMPI_REQUEST_ACTIVE: {
|
||||
|
||||
ompi_request_t *request;
|
||||
THREAD_LOCK(&mca_pml_teg.teg_request_lock);
|
||||
OMPI_THREAD_LOCK(&mca_pml_teg.teg_request_lock);
|
||||
if (pml_request->req_pml_done == false) {
|
||||
/* free request after it completes */
|
||||
pml_request->req_free_called = true;
|
||||
} else {
|
||||
/* can reuse the existing request */
|
||||
THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
|
||||
break;
|
||||
}
|
||||
THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
|
||||
|
||||
/* allocate a new request */
|
||||
switch(pml_request->req_type) {
|
||||
|
@ -40,7 +40,7 @@ int mca_pml_teg_wait(
|
||||
|
||||
if(completed < 0) {
|
||||
/* give up and sleep until completion */
|
||||
THREAD_LOCK(&mca_pml_teg.teg_request_lock);
|
||||
OMPI_THREAD_LOCK(&mca_pml_teg.teg_request_lock);
|
||||
mca_pml_teg.teg_request_waiting++;
|
||||
do {
|
||||
for(i=0; i<count; i++) {
|
||||
@ -58,7 +58,7 @@ int mca_pml_teg_wait(
|
||||
}
|
||||
} while(completed < 0);
|
||||
mca_pml_teg.teg_request_waiting--;
|
||||
THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
|
||||
}
|
||||
|
||||
/* return status */
|
||||
@ -92,7 +92,7 @@ int mca_pml_teg_wait_all(
|
||||
* acquire lock and test for completion - if all requests are not completed
|
||||
* pend on condition variable until a request completes
|
||||
*/
|
||||
THREAD_LOCK(&mca_pml_teg.teg_request_lock);
|
||||
OMPI_THREAD_LOCK(&mca_pml_teg.teg_request_lock);
|
||||
mca_pml_teg.teg_request_waiting++;
|
||||
do {
|
||||
completed = 0;
|
||||
@ -107,7 +107,7 @@ int mca_pml_teg_wait_all(
|
||||
ompi_condition_wait(&mca_pml_teg.teg_request_cond, &mca_pml_teg.teg_request_lock);
|
||||
} while (completed != count);
|
||||
mca_pml_teg.teg_request_waiting--;
|
||||
THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
|
||||
}
|
||||
|
||||
if(NULL != statuses) {
|
||||
|
@ -53,7 +53,7 @@ extern int mca_pml_ptl_comm_init_size(mca_pml_ptl_comm_t* comm, size_t size);
|
||||
static inline mca_ptl_sequence_t mca_pml_ptl_comm_send_sequence(mca_pml_ptl_comm_t* comm, int dst)
|
||||
{
|
||||
mca_ptl_sequence_t sequence;
|
||||
THREAD_SCOPED_LOCK(&comm->c_matching_lock, sequence = comm->c_msg_seq[dst]++);
|
||||
OMPI_THREAD_SCOPED_LOCK(&comm->c_matching_lock, sequence = comm->c_msg_seq[dst]++);
|
||||
return sequence;
|
||||
}
|
||||
|
||||
|
@ -103,7 +103,7 @@ bool mca_ptl_base_match(
|
||||
* end points) from being processed, and potentially "loosing"
|
||||
* the fragment.
|
||||
*/
|
||||
THREAD_LOCK(&pml_comm->c_matching_lock);
|
||||
OMPI_THREAD_LOCK(&pml_comm->c_matching_lock);
|
||||
|
||||
/* get sequence number of next message that can be processed */
|
||||
next_msg_seq_expected = *((pml_comm->c_next_msg_seq)+frag_src);
|
||||
@ -164,7 +164,7 @@ bool mca_ptl_base_match(
|
||||
(ompi_list_item_t *)frag_desc);
|
||||
}
|
||||
|
||||
THREAD_UNLOCK(&pml_comm->c_matching_lock);
|
||||
OMPI_THREAD_UNLOCK(&pml_comm->c_matching_lock);
|
||||
return match_made;
|
||||
}
|
||||
|
||||
|
@ -65,9 +65,9 @@ int mca_ptl_tcp_add_procs(
|
||||
* exported as we are trying to use. If not, then don't bind this PTL instance
|
||||
* to the proc.
|
||||
*/
|
||||
THREAD_LOCK(&ptl_proc->proc_lock);
|
||||
OMPI_THREAD_LOCK(&ptl_proc->proc_lock);
|
||||
if(ptl_proc->proc_addr_count == ptl_proc->proc_peer_count) {
|
||||
THREAD_UNLOCK(&ptl_proc->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_proc->proc_lock);
|
||||
return OMPI_ERR_UNREACH;
|
||||
}
|
||||
|
||||
@ -76,18 +76,18 @@ int mca_ptl_tcp_add_procs(
|
||||
*/
|
||||
ptl_peer = OBJ_NEW(mca_ptl_tcp_peer_t);
|
||||
if(NULL == ptl_peer) {
|
||||
THREAD_UNLOCK(&ptl_proc->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_proc->proc_lock);
|
||||
return OMPI_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
ptl_peer->peer_ptl = (mca_ptl_tcp_t*)ptl;
|
||||
rc = mca_ptl_tcp_proc_insert(ptl_proc, ptl_peer);
|
||||
if(rc != OMPI_SUCCESS) {
|
||||
OBJ_RELEASE(ptl_peer);
|
||||
THREAD_UNLOCK(&ptl_proc->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_proc->proc_lock);
|
||||
return rc;
|
||||
}
|
||||
ompi_bitmap_set_bit(reachable, i);
|
||||
THREAD_UNLOCK(&ptl_proc->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_proc->proc_lock);
|
||||
peers[i] = ptl_peer;
|
||||
}
|
||||
return OMPI_SUCCESS;
|
||||
@ -141,14 +141,14 @@ void mca_ptl_tcp_send_frag_return(struct mca_ptl_t* ptl, struct mca_ptl_tcp_send
|
||||
{
|
||||
if(ompi_list_get_size(&mca_ptl_tcp_module.tcp_pending_acks)) {
|
||||
mca_ptl_tcp_recv_frag_t* pending;
|
||||
THREAD_LOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
OMPI_THREAD_LOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
pending = (mca_ptl_tcp_recv_frag_t*)ompi_list_remove_first(&mca_ptl_tcp_module.tcp_pending_acks);
|
||||
if(NULL == pending) {
|
||||
THREAD_UNLOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
OMPI_FREE_LIST_RETURN(&mca_ptl_tcp_module.tcp_send_frags, (ompi_list_item_t*)frag);
|
||||
return;
|
||||
}
|
||||
THREAD_UNLOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
mca_ptl_tcp_send_frag_init_ack(frag, ptl, pending->super.super.frag_peer, pending);
|
||||
mca_ptl_tcp_peer_send(pending->super.super.frag_peer, frag);
|
||||
mca_ptl_tcp_recv_frag_return(ptl, pending);
|
||||
@ -213,10 +213,10 @@ void mca_ptl_tcp_matched(
|
||||
ack = (mca_ptl_tcp_send_frag_t*)item;
|
||||
|
||||
if(NULL == ack) {
|
||||
THREAD_LOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
OMPI_THREAD_LOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
recv_frag->frag_ack_pending = true;
|
||||
ompi_list_append(&mca_ptl_tcp_module.tcp_pending_acks, (ompi_list_item_t*)frag);
|
||||
THREAD_UNLOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
} else {
|
||||
mca_ptl_tcp_send_frag_init_ack(ack, ptl, recv_frag->super.super.frag_peer, recv_frag);
|
||||
mca_ptl_tcp_peer_send(ack->super.super.frag_peer, ack);
|
||||
|
@ -149,7 +149,7 @@ static void mca_ptl_tcp_peer_destruct(mca_ptl_base_peer_t* ptl_peer)
|
||||
int mca_ptl_tcp_peer_send(mca_ptl_base_peer_t* ptl_peer, mca_ptl_tcp_send_frag_t* frag)
|
||||
{
|
||||
int rc = OMPI_SUCCESS;
|
||||
THREAD_LOCK(&ptl_peer->peer_send_lock);
|
||||
OMPI_THREAD_LOCK(&ptl_peer->peer_send_lock);
|
||||
switch(ptl_peer->peer_state) {
|
||||
case MCA_PTL_TCP_CONNECTING:
|
||||
case MCA_PTL_TCP_CONNECT_ACK:
|
||||
@ -167,7 +167,7 @@ int mca_ptl_tcp_peer_send(mca_ptl_base_peer_t* ptl_peer, mca_ptl_tcp_send_frag_t
|
||||
} else {
|
||||
#if 0
|
||||
if(mca_ptl_tcp_send_frag_handler(frag, ptl_peer->peer_sd)) {
|
||||
THREAD_UNLOCK(&ptl_peer->peer_send_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_peer->peer_send_lock);
|
||||
mca_ptl_tcp_send_frag_progress(frag);
|
||||
return rc;
|
||||
} else
|
||||
@ -179,7 +179,7 @@ int mca_ptl_tcp_peer_send(mca_ptl_base_peer_t* ptl_peer, mca_ptl_tcp_send_frag_t
|
||||
}
|
||||
break;
|
||||
}
|
||||
THREAD_UNLOCK(&ptl_peer->peer_send_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_peer->peer_send_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -239,8 +239,8 @@ bool mca_ptl_tcp_peer_accept(mca_ptl_base_peer_t* ptl_peer, struct sockaddr_in*
|
||||
{
|
||||
mca_ptl_tcp_addr_t* ptl_addr;
|
||||
mca_ptl_tcp_proc_t* this_proc = mca_ptl_tcp_proc_local();
|
||||
THREAD_LOCK(&ptl_peer->peer_recv_lock);
|
||||
THREAD_LOCK(&ptl_peer->peer_send_lock);
|
||||
OMPI_THREAD_LOCK(&ptl_peer->peer_recv_lock);
|
||||
OMPI_THREAD_LOCK(&ptl_peer->peer_send_lock);
|
||||
if((ptl_addr = ptl_peer->peer_addr) != NULL &&
|
||||
ptl_addr->addr_inet.s_addr == addr->sin_addr.s_addr) {
|
||||
mca_ptl_tcp_proc_t *peer_proc = ptl_peer->peer_proc;
|
||||
@ -251,8 +251,8 @@ bool mca_ptl_tcp_peer_accept(mca_ptl_base_peer_t* ptl_peer, struct sockaddr_in*
|
||||
ptl_peer->peer_sd = sd;
|
||||
if(mca_ptl_tcp_peer_send_connect_ack(ptl_peer) != OMPI_SUCCESS) {
|
||||
mca_ptl_tcp_peer_close(ptl_peer);
|
||||
THREAD_UNLOCK(&ptl_peer->peer_send_lock);
|
||||
THREAD_UNLOCK(&ptl_peer->peer_recv_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_peer->peer_send_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_peer->peer_recv_lock);
|
||||
return false;
|
||||
}
|
||||
mca_ptl_tcp_peer_event_init(ptl_peer, sd);
|
||||
@ -261,13 +261,13 @@ bool mca_ptl_tcp_peer_accept(mca_ptl_base_peer_t* ptl_peer, struct sockaddr_in*
|
||||
#if OMPI_ENABLE_DEBUG
|
||||
mca_ptl_tcp_peer_dump(ptl_peer, "accepted");
|
||||
#endif
|
||||
THREAD_UNLOCK(&ptl_peer->peer_send_lock);
|
||||
THREAD_UNLOCK(&ptl_peer->peer_recv_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_peer->peer_send_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_peer->peer_recv_lock);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
THREAD_UNLOCK(&ptl_peer->peer_send_lock);
|
||||
THREAD_UNLOCK(&ptl_peer->peer_recv_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_peer->peer_send_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_peer->peer_recv_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -526,7 +526,7 @@ static void mca_ptl_tcp_peer_complete_connect(mca_ptl_base_peer_t* ptl_peer)
|
||||
static void mca_ptl_tcp_peer_recv_handler(int sd, short flags, void* user)
|
||||
{
|
||||
mca_ptl_base_peer_t* ptl_peer = user;
|
||||
THREAD_LOCK(&ptl_peer->peer_recv_lock);
|
||||
OMPI_THREAD_LOCK(&ptl_peer->peer_recv_lock);
|
||||
switch(ptl_peer->peer_state) {
|
||||
case MCA_PTL_TCP_CONNECT_ACK:
|
||||
{
|
||||
@ -540,7 +540,7 @@ static void mca_ptl_tcp_peer_recv_handler(int sd, short flags, void* user)
|
||||
int rc;
|
||||
MCA_PTL_TCP_RECV_FRAG_ALLOC(recv_frag, rc);
|
||||
if(NULL == recv_frag) {
|
||||
THREAD_UNLOCK(&ptl_peer->peer_recv_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_peer->peer_recv_lock);
|
||||
return;
|
||||
}
|
||||
mca_ptl_tcp_recv_frag_init(recv_frag, ptl_peer);
|
||||
@ -560,7 +560,7 @@ static void mca_ptl_tcp_peer_recv_handler(int sd, short flags, void* user)
|
||||
break;
|
||||
}
|
||||
}
|
||||
THREAD_UNLOCK(&ptl_peer->peer_recv_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_peer->peer_recv_lock);
|
||||
}
|
||||
|
||||
|
||||
@ -572,7 +572,7 @@ static void mca_ptl_tcp_peer_recv_handler(int sd, short flags, void* user)
|
||||
static void mca_ptl_tcp_peer_send_handler(int sd, short flags, void* user)
|
||||
{
|
||||
mca_ptl_tcp_peer_t* ptl_peer = user;
|
||||
THREAD_LOCK(&ptl_peer->peer_send_lock);
|
||||
OMPI_THREAD_LOCK(&ptl_peer->peer_send_lock);
|
||||
switch(ptl_peer->peer_state) {
|
||||
case MCA_PTL_TCP_CONNECTING:
|
||||
mca_ptl_tcp_peer_complete_connect(ptl_peer);
|
||||
@ -587,9 +587,9 @@ static void mca_ptl_tcp_peer_send_handler(int sd, short flags, void* user)
|
||||
}
|
||||
|
||||
/* if required - update request status and release fragment */
|
||||
THREAD_UNLOCK(&ptl_peer->peer_send_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_peer->peer_send_lock);
|
||||
mca_ptl_tcp_send_frag_progress(frag);
|
||||
THREAD_LOCK(&ptl_peer->peer_send_lock);
|
||||
OMPI_THREAD_LOCK(&ptl_peer->peer_send_lock);
|
||||
|
||||
/* progress any pending sends */
|
||||
ptl_peer->peer_send_frag = (mca_ptl_tcp_send_frag_t*)
|
||||
@ -608,7 +608,7 @@ static void mca_ptl_tcp_peer_send_handler(int sd, short flags, void* user)
|
||||
ompi_event_del(&ptl_peer->peer_send_event);
|
||||
break;
|
||||
}
|
||||
THREAD_UNLOCK(&ptl_peer->peer_send_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_peer->peer_send_lock);
|
||||
}
|
||||
|
||||
|
||||
|
@ -37,9 +37,9 @@ void mca_ptl_tcp_proc_construct(mca_ptl_tcp_proc_t* proc)
|
||||
OBJ_CONSTRUCT(&proc->proc_lock, ompi_mutex_t);
|
||||
|
||||
/* add to list of all proc instance */
|
||||
THREAD_LOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
OMPI_THREAD_LOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
ompi_list_append(&mca_ptl_tcp_module.tcp_procs, &proc->super);
|
||||
THREAD_UNLOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
}
|
||||
|
||||
|
||||
@ -50,9 +50,9 @@ void mca_ptl_tcp_proc_construct(mca_ptl_tcp_proc_t* proc)
|
||||
void mca_ptl_tcp_proc_destruct(mca_ptl_tcp_proc_t* proc)
|
||||
{
|
||||
/* remove from list of all proc instances */
|
||||
THREAD_LOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
OMPI_THREAD_LOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
ompi_list_remove_item(&mca_ptl_tcp_module.tcp_procs, &proc->super);
|
||||
THREAD_UNLOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
|
||||
/* release resources */
|
||||
if(NULL != proc->proc_peers)
|
||||
@ -128,16 +128,16 @@ mca_ptl_tcp_proc_t* mca_ptl_tcp_proc_create(ompi_proc_t* ompi_proc)
|
||||
static mca_ptl_tcp_proc_t* mca_ptl_tcp_proc_lookup_ompi(ompi_proc_t* ompi_proc)
|
||||
{
|
||||
mca_ptl_tcp_proc_t* tcp_proc;
|
||||
THREAD_LOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
OMPI_THREAD_LOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
for(tcp_proc = (mca_ptl_tcp_proc_t*)ompi_list_get_first(&mca_ptl_tcp_module.tcp_procs);
|
||||
tcp_proc != (mca_ptl_tcp_proc_t*)ompi_list_get_end(&mca_ptl_tcp_module.tcp_procs);
|
||||
tcp_proc = (mca_ptl_tcp_proc_t*)ompi_list_get_next(tcp_proc)) {
|
||||
if(tcp_proc->proc_ompi == ompi_proc) {
|
||||
THREAD_UNLOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
return tcp_proc;
|
||||
}
|
||||
}
|
||||
THREAD_UNLOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -149,16 +149,16 @@ static mca_ptl_tcp_proc_t* mca_ptl_tcp_proc_lookup_ompi(ompi_proc_t* ompi_proc)
|
||||
mca_ptl_tcp_proc_t* mca_ptl_tcp_proc_lookup(void *guid, size_t size)
|
||||
{
|
||||
mca_ptl_tcp_proc_t* tcp_proc;
|
||||
THREAD_LOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
OMPI_THREAD_LOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
for(tcp_proc = (mca_ptl_tcp_proc_t*)ompi_list_get_first(&mca_ptl_tcp_module.tcp_procs);
|
||||
tcp_proc != (mca_ptl_tcp_proc_t*)ompi_list_get_end(&mca_ptl_tcp_module.tcp_procs);
|
||||
tcp_proc = (mca_ptl_tcp_proc_t*)ompi_list_get_next(tcp_proc)) {
|
||||
if(tcp_proc->proc_guid_size == size && memcmp(tcp_proc->proc_guid, guid, size) == 0) {
|
||||
THREAD_UNLOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
return tcp_proc;
|
||||
}
|
||||
}
|
||||
THREAD_UNLOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
OMPI_THREAD_UNLOCK(&mca_ptl_tcp_module.tcp_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -205,7 +205,7 @@ int mca_ptl_tcp_proc_insert(mca_ptl_tcp_proc_t* ptl_proc, mca_ptl_base_peer_t* p
|
||||
int mca_ptl_tcp_proc_remove(mca_ptl_tcp_proc_t* ptl_proc, mca_ptl_base_peer_t* ptl_peer)
|
||||
{
|
||||
size_t i;
|
||||
THREAD_LOCK(&ptl_proc->proc_lock);
|
||||
OMPI_THREAD_LOCK(&ptl_proc->proc_lock);
|
||||
for(i=0; i<ptl_proc->proc_peer_count; i++) {
|
||||
if(ptl_proc->proc_peers[i] == ptl_peer) {
|
||||
memmove(&ptl_proc->proc_peers+i,ptl_proc->proc_peers+i+1,
|
||||
@ -214,7 +214,7 @@ int mca_ptl_tcp_proc_remove(mca_ptl_tcp_proc_t* ptl_proc, mca_ptl_base_peer_t* p
|
||||
}
|
||||
ptl_proc->proc_peer_count--;
|
||||
ptl_peer->peer_addr->addr_inuse--;
|
||||
THREAD_UNLOCK(&ptl_proc->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_proc->proc_lock);
|
||||
return OMPI_SUCCESS;
|
||||
}
|
||||
|
||||
@ -226,15 +226,15 @@ int mca_ptl_tcp_proc_remove(mca_ptl_tcp_proc_t* ptl_proc, mca_ptl_base_peer_t* p
|
||||
bool mca_ptl_tcp_proc_accept(mca_ptl_tcp_proc_t* ptl_proc, struct sockaddr_in* addr, int sd)
|
||||
{
|
||||
size_t i;
|
||||
THREAD_LOCK(&ptl_proc->proc_lock);
|
||||
OMPI_THREAD_LOCK(&ptl_proc->proc_lock);
|
||||
for(i=0; i<ptl_proc->proc_peer_count; i++) {
|
||||
mca_ptl_base_peer_t* ptl_peer = ptl_proc->proc_peers[i];
|
||||
if(mca_ptl_tcp_peer_accept(ptl_peer, addr, sd)) {
|
||||
THREAD_UNLOCK(&ptl_proc->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_proc->proc_lock);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
THREAD_UNLOCK(&ptl_proc->proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&ptl_proc->proc_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -55,7 +55,7 @@ MPI_Type_create_indexed_block(int count,
|
||||
a_i[0] = &count;
|
||||
a_i[1] = &blocklength;
|
||||
a_i[2] = array_of_displacements;
|
||||
ompi_ddt_set_args( *newtype, 2 + count, a_i, 0, NULL, 1, &oldtype, MPI_COMBINER_INDEXED_BLOCK );
|
||||
ompi_ddt_set_args( *newtype, 2 + count, a_i, 0, NULL, 1, &oldtype, MPI_COMBINER_INDEXED_BOMPI_LOCK );
|
||||
}
|
||||
return MPI_SUCCESS;
|
||||
}
|
||||
|
@ -1393,7 +1393,7 @@ void PMPI_TYPE_CREATE_F90_REAL(MPI_Fint *p, MPI_Fint *r, MPI_Fint *newtype, MPI_
|
||||
void PMPI_TYPE_CREATE_HINDEXED(MPI_Fint *count, MPI_Fint *array_of_blocklengths, MPI_Fint *array_of_displacements, MPI_Fint *oldtype, MPI_Fint *newtype, MPI_Fint *ierr);
|
||||
void PMPI_TYPE_CREATE_HVECTOR(MPI_Fint *count, MPI_Fint *blocklength, MPI_Fint *stride, MPI_Fint *oldtype, MPI_Fint *newtype, MPI_Fint *ierr);
|
||||
void PMPI_TYPE_CREATE_KEYVAL(MPI_Fint *type_copy_attr_fn, MPI_Fint *type_delete_attr_fn, MPI_Fint *type_keyval, char *extra_state, MPI_Fint *ierr);
|
||||
void PMPI_TYPE_CREATE_INDEXED_BLOCK(MPI_Fint *count, MPI_Fint *blocklength, MPI_Fint *array_of_displacements, MPI_Fint *oldtype, MPI_Fint *newtype, MPI_Fint *ierr);
|
||||
void PMPI_TYPE_CREATE_INDEXED_BOMPI_LOCK(MPI_Fint *count, MPI_Fint *blocklength, MPI_Fint *array_of_displacements, MPI_Fint *oldtype, MPI_Fint *newtype, MPI_Fint *ierr);
|
||||
void PMPI_TYPE_CREATE_STRUCT(MPI_Fint *count, MPI_Fint *array_of_block_lengths, MPI_Fint *array_of_displacements, MPI_Fint *array_of_types, MPI_Fint *newtype, MPI_Fint *ierr);
|
||||
void PMPI_TYPE_CREATE_SUBARRAY(MPI_Fint *ndims, MPI_Fint *size_array, MPI_Fint *subsize_array, MPI_Fint *start_array, MPI_Fint *order, MPI_Fint *oldtype, MPI_Fint *newtype, MPI_Fint *ierr);
|
||||
void PMPI_TYPE_CREATE_RESIZED(MPI_Fint *oldtype, MPI_Fint *lb, MPI_Fint *extent, MPI_Fint *newtype, MPI_Fint *ierr);
|
||||
@ -1439,14 +1439,14 @@ void PMPI_WIN_GET_ATTR(MPI_Fint *win, MPI_Fint *win_keyval, char *attribute_val,
|
||||
void PMPI_WIN_GET_ERRHANDLER(MPI_Fint *win, MPI_Fint *errhandler, MPI_Fint *ierr);
|
||||
void PMPI_WIN_GET_GROUP(MPI_Fint *win, MPI_Fint *group, MPI_Fint *ierr);
|
||||
void PMPI_WIN_GET_NAME(MPI_Fint *win, char *win_name, MPI_Fint *resultlen, MPI_Fint *ierr);
|
||||
void PMPI_WIN_LOCK(MPI_Fint *lock_type, MPI_Fint *rank, MPI_Fint *assert, MPI_Fint *win, MPI_Fint *ierr);
|
||||
void PMPI_WIN_OMPI_LOCK(MPI_Fint *lock_type, MPI_Fint *rank, MPI_Fint *assert, MPI_Fint *win, MPI_Fint *ierr);
|
||||
void PMPI_WIN_POST(MPI_Fint *group, MPI_Fint *assert, MPI_Fint *win, MPI_Fint *ierr);
|
||||
void PMPI_WIN_SET_ATTR(MPI_Fint *win, MPI_Fint *win_keyval, char *attribute_val, MPI_Fint *ierr);
|
||||
void PMPI_WIN_SET_ERRHANDLER(MPI_Fint *win, MPI_Fint *errhandler, MPI_Fint *ierr);
|
||||
void PMPI_WIN_SET_NAME(MPI_Fint *win, char *win_name, MPI_Fint *ierr);
|
||||
void PMPI_WIN_START(MPI_Fint *group, MPI_Fint *assert, MPI_Fint *win, MPI_Fint *ierr);
|
||||
void PMPI_WIN_TEST(MPI_Fint *win, MPI_Fint *flag, MPI_Fint *ierr);
|
||||
void PMPI_WIN_UNLOCK(MPI_Fint *rank, MPI_Fint *win, MPI_Fint *ierr);
|
||||
void PMPI_WIN_OMPI_UNLOCK(MPI_Fint *rank, MPI_Fint *win, MPI_Fint *ierr);
|
||||
void PMPI_WIN_WAIT(MPI_Fint *win, MPI_Fint *ierr);
|
||||
double *PMPI_WTICK(void);
|
||||
double *PMPI_WTIME(void);
|
||||
|
@ -1396,7 +1396,7 @@ void MPI_TYPE_CREATE_F90_REAL(MPI_Fint *p, MPI_Fint *r, MPI_Fint *newtype, MPI_F
|
||||
void MPI_TYPE_CREATE_HINDEXED(MPI_Fint *count, MPI_Fint *array_of_blocklengths, MPI_Fint *array_of_displacements, MPI_Fint *oldtype, MPI_Fint *newtype, MPI_Fint *ierr);
|
||||
void MPI_TYPE_CREATE_HVECTOR(MPI_Fint *count, MPI_Fint *blocklength, MPI_Fint *stride, MPI_Fint *oldtype, MPI_Fint *newtype, MPI_Fint *ierr);
|
||||
void MPI_TYPE_CREATE_KEYVAL(MPI_Fint *type_copy_attr_fn, MPI_Fint *type_delete_attr_fn, MPI_Fint *type_keyval, char *extra_state, MPI_Fint *ierr);
|
||||
void MPI_TYPE_CREATE_INDEXED_BLOCK(MPI_Fint *count, MPI_Fint *blocklength, MPI_Fint *array_of_displacements, MPI_Fint *oldtype, MPI_Fint *newtype, MPI_Fint *ierr);
|
||||
void MPI_TYPE_CREATE_INDEXED_BOMPI_LOCK(MPI_Fint *count, MPI_Fint *blocklength, MPI_Fint *array_of_displacements, MPI_Fint *oldtype, MPI_Fint *newtype, MPI_Fint *ierr);
|
||||
void MPI_TYPE_CREATE_STRUCT(MPI_Fint *count, MPI_Fint *array_of_block_lengths, MPI_Fint *array_of_displacements, MPI_Fint *array_of_types, MPI_Fint *newtype, MPI_Fint *ierr);
|
||||
void MPI_TYPE_CREATE_SUBARRAY(MPI_Fint *ndims, MPI_Fint *size_array, MPI_Fint *subsize_array, MPI_Fint *start_array, MPI_Fint *order, MPI_Fint *oldtype, MPI_Fint *newtype, MPI_Fint *ierr);
|
||||
void MPI_TYPE_CREATE_RESIZED(MPI_Fint *oldtype, MPI_Fint *lb, MPI_Fint *extent, MPI_Fint *newtype, MPI_Fint *ierr);
|
||||
@ -1442,14 +1442,14 @@ void MPI_WIN_GET_ATTR(MPI_Fint *win, MPI_Fint *win_keyval, char *attribute_val,
|
||||
void MPI_WIN_GET_ERRHANDLER(MPI_Fint *win, MPI_Fint *errhandler, MPI_Fint *ierr);
|
||||
void MPI_WIN_GET_GROUP(MPI_Fint *win, MPI_Fint *group, MPI_Fint *ierr);
|
||||
void MPI_WIN_GET_NAME(MPI_Fint *win, char *win_name, MPI_Fint *resultlen, MPI_Fint *ierr);
|
||||
void MPI_WIN_LOCK(MPI_Fint *lock_type, MPI_Fint *rank, MPI_Fint *assert, MPI_Fint *win, MPI_Fint *ierr);
|
||||
void MPI_WIN_OMPI_LOCK(MPI_Fint *lock_type, MPI_Fint *rank, MPI_Fint *assert, MPI_Fint *win, MPI_Fint *ierr);
|
||||
void MPI_WIN_POST(MPI_Fint *group, MPI_Fint *assert, MPI_Fint *win, MPI_Fint *ierr);
|
||||
void MPI_WIN_SET_ATTR(MPI_Fint *win, MPI_Fint *win_keyval, char *attribute_val, MPI_Fint *ierr);
|
||||
void MPI_WIN_SET_ERRHANDLER(MPI_Fint *win, MPI_Fint *errhandler, MPI_Fint *ierr);
|
||||
void MPI_WIN_SET_NAME(MPI_Fint *win, char *win_name, MPI_Fint *ierr);
|
||||
void MPI_WIN_START(MPI_Fint *group, MPI_Fint *assert, MPI_Fint *win, MPI_Fint *ierr);
|
||||
void MPI_WIN_TEST(MPI_Fint *win, MPI_Fint *flag, MPI_Fint *ierr);
|
||||
void MPI_WIN_UNLOCK(MPI_Fint *rank, MPI_Fint *win, MPI_Fint *ierr);
|
||||
void MPI_WIN_OMPI_UNLOCK(MPI_Fint *rank, MPI_Fint *win, MPI_Fint *ierr);
|
||||
void MPI_WIN_WAIT(MPI_Fint *win, MPI_Fint *ierr);
|
||||
double MPI_WTICK(void);
|
||||
double MPI_WTIME(void);
|
||||
|
@ -10,12 +10,12 @@
|
||||
#include "mpi/f77/bindings.h"
|
||||
|
||||
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
|
||||
#pragma weak PMPI_TYPE_CREATE_INDEXED_BLOCK = mpi_type_create_indexed_block_f
|
||||
#pragma weak PMPI_TYPE_CREATE_INDEXED_BOMPI_LOCK = mpi_type_create_indexed_block_f
|
||||
#pragma weak pmpi_type_create_indexed_block = mpi_type_create_indexed_block_f
|
||||
#pragma weak pmpi_type_create_indexed_block_ = mpi_type_create_indexed_block_f
|
||||
#pragma weak pmpi_type_create_indexed_block__ = mpi_type_create_indexed_block_f
|
||||
#elif OMPI_PROFILE_LAYER
|
||||
OMPI_GENERATE_F77_BINDINGS (PMPI_TYPE_CREATE_INDEXED_BLOCK,
|
||||
OMPI_GENERATE_F77_BINDINGS (PMPI_TYPE_CREATE_INDEXED_BOMPI_LOCK,
|
||||
pmpi_type_create_indexed_block,
|
||||
pmpi_type_create_indexed_block_,
|
||||
pmpi_type_create_indexed_block__,
|
||||
@ -25,14 +25,14 @@ OMPI_GENERATE_F77_BINDINGS (PMPI_TYPE_CREATE_INDEXED_BLOCK,
|
||||
#endif
|
||||
|
||||
#if OMPI_HAVE_WEAK_SYMBOLS
|
||||
#pragma weak MPI_TYPE_CREATE_INDEXED_BLOCK = mpi_type_create_indexed_block_f
|
||||
#pragma weak MPI_TYPE_CREATE_INDEXED_BOMPI_LOCK = mpi_type_create_indexed_block_f
|
||||
#pragma weak mpi_type_create_indexed_block = mpi_type_create_indexed_block_f
|
||||
#pragma weak mpi_type_create_indexed_block_ = mpi_type_create_indexed_block_f
|
||||
#pragma weak mpi_type_create_indexed_block__ = mpi_type_create_indexed_block_f
|
||||
#endif
|
||||
|
||||
#if ! OMPI_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
|
||||
OMPI_GENERATE_F77_BINDINGS (MPI_TYPE_CREATE_INDEXED_BLOCK,
|
||||
OMPI_GENERATE_F77_BINDINGS (MPI_TYPE_CREATE_INDEXED_BOMPI_LOCK,
|
||||
mpi_type_create_indexed_block,
|
||||
mpi_type_create_indexed_block_,
|
||||
mpi_type_create_indexed_block__,
|
||||
|
@ -10,12 +10,12 @@
|
||||
#include "mpi/f77/bindings.h"
|
||||
|
||||
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
|
||||
#pragma weak PMPI_WIN_LOCK = mpi_win_lock_f
|
||||
#pragma weak PMPI_WIN_OMPI_LOCK = mpi_win_lock_f
|
||||
#pragma weak pmpi_win_lock = mpi_win_lock_f
|
||||
#pragma weak pmpi_win_lock_ = mpi_win_lock_f
|
||||
#pragma weak pmpi_win_lock__ = mpi_win_lock_f
|
||||
#elif OMPI_PROFILE_LAYER
|
||||
OMPI_GENERATE_F77_BINDINGS (PMPI_WIN_LOCK,
|
||||
OMPI_GENERATE_F77_BINDINGS (PMPI_WIN_OMPI_LOCK,
|
||||
pmpi_win_lock,
|
||||
pmpi_win_lock_,
|
||||
pmpi_win_lock__,
|
||||
@ -25,14 +25,14 @@ OMPI_GENERATE_F77_BINDINGS (PMPI_WIN_LOCK,
|
||||
#endif
|
||||
|
||||
#if OMPI_HAVE_WEAK_SYMBOLS
|
||||
#pragma weak MPI_WIN_LOCK = mpi_win_lock_f
|
||||
#pragma weak MPI_WIN_OMPI_LOCK = mpi_win_lock_f
|
||||
#pragma weak mpi_win_lock = mpi_win_lock_f
|
||||
#pragma weak mpi_win_lock_ = mpi_win_lock_f
|
||||
#pragma weak mpi_win_lock__ = mpi_win_lock_f
|
||||
#endif
|
||||
|
||||
#if ! OMPI_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
|
||||
OMPI_GENERATE_F77_BINDINGS (MPI_WIN_LOCK,
|
||||
OMPI_GENERATE_F77_BINDINGS (MPI_WIN_OMPI_LOCK,
|
||||
mpi_win_lock,
|
||||
mpi_win_lock_,
|
||||
mpi_win_lock__,
|
||||
|
@ -10,12 +10,12 @@
|
||||
#include "mpi/f77/bindings.h"
|
||||
|
||||
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
|
||||
#pragma weak PMPI_WIN_UNLOCK = mpi_win_unlock_f
|
||||
#pragma weak PMPI_WIN_OMPI_UNLOCK = mpi_win_unlock_f
|
||||
#pragma weak pmpi_win_unlock = mpi_win_unlock_f
|
||||
#pragma weak pmpi_win_unlock_ = mpi_win_unlock_f
|
||||
#pragma weak pmpi_win_unlock__ = mpi_win_unlock_f
|
||||
#elif OMPI_PROFILE_LAYER
|
||||
OMPI_GENERATE_F77_BINDINGS (PMPI_WIN_UNLOCK,
|
||||
OMPI_GENERATE_F77_BINDINGS (PMPI_WIN_OMPI_UNLOCK,
|
||||
pmpi_win_unlock,
|
||||
pmpi_win_unlock_,
|
||||
pmpi_win_unlock__,
|
||||
@ -25,14 +25,14 @@ OMPI_GENERATE_F77_BINDINGS (PMPI_WIN_UNLOCK,
|
||||
#endif
|
||||
|
||||
#if OMPI_HAVE_WEAK_SYMBOLS
|
||||
#pragma weak MPI_WIN_UNLOCK = mpi_win_unlock_f
|
||||
#pragma weak MPI_WIN_OMPI_UNLOCK = mpi_win_unlock_f
|
||||
#pragma weak mpi_win_unlock = mpi_win_unlock_f
|
||||
#pragma weak mpi_win_unlock_ = mpi_win_unlock_f
|
||||
#pragma weak mpi_win_unlock__ = mpi_win_unlock_f
|
||||
#endif
|
||||
|
||||
#if ! OMPI_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
|
||||
OMPI_GENERATE_F77_BINDINGS (MPI_WIN_UNLOCK,
|
||||
OMPI_GENERATE_F77_BINDINGS (MPI_WIN_OMPI_UNLOCK,
|
||||
mpi_win_unlock,
|
||||
mpi_win_unlock_,
|
||||
mpi_win_unlock__,
|
||||
|
@ -65,9 +65,9 @@
|
||||
* macros
|
||||
*/
|
||||
|
||||
#define ATOMIC_LOCK_INIT(LOCKPTR) spinunlock(LOCKPTR)
|
||||
#define ATOMIC_LOCK(LOCKPTR) spinlock(LOCKPTR)
|
||||
#define ATOMIC_UNLOCK(LOCKPTR) spinunlock(LOCKPTR)
|
||||
#define ATOMIC_TRYLOCK(LOCKPTR) spintrylock(LOCKPTR)
|
||||
#define ATOMIC_OMPI_LOCK_INIT(OMPI_LOCKPTR) spinunlock(OMPI_LOCKPTR)
|
||||
#define ATOMIC_OMPI_LOCK(OMPI_LOCKPTR) spinlock(OMPI_LOCKPTR)
|
||||
#define ATOMIC_OMPI_UNLOCK(OMPI_LOCKPTR) spinunlock(OMPI_LOCKPTR)
|
||||
#define ATOMIC_TRYOMPI_LOCK(OMPI_LOCKPTR) spintrylock(OMPI_LOCKPTR)
|
||||
|
||||
#endif /* OMPI_ATOMIC_H_INCLUDED */
|
||||
|
@ -9,7 +9,7 @@
|
||||
* Lock structure
|
||||
*/
|
||||
|
||||
enum { LOCK_UNLOCKED = 1 };
|
||||
enum { OMPI_LOCK_OMPI_UNLOCKED = 1 };
|
||||
|
||||
typedef struct {
|
||||
union {
|
||||
@ -185,7 +185,7 @@ static inline void setBigAtomicUnsignedInt(bigAtomicUnsignedInt *addr,
|
||||
unsigned long long value)
|
||||
{
|
||||
addr->data = value;
|
||||
addr->lock.data.lockData_m = LOCK_UNLOCKED;
|
||||
addr->lock.data.lockData_m = OMPI_LOCK_OMPI_UNLOCKED;
|
||||
}
|
||||
|
||||
#endif /* CYGWIN_I686_ATOMIC_H_INCLUDED */
|
||||
|
@ -19,7 +19,7 @@
|
||||
* Lock structure
|
||||
*/
|
||||
|
||||
enum { LOCK_UNLOCKED = 0 };
|
||||
enum { OMPI_LOCK_OMPI_UNLOCKED = 0 };
|
||||
|
||||
typedef struct {
|
||||
union {
|
||||
@ -92,7 +92,7 @@ static inline int spintrylock(ompi_lock_data_t *lockData)
|
||||
*/
|
||||
static inline void spinunlock(ompi_lock_data_t *lockData)
|
||||
{
|
||||
lockData->data.lockData_m = LOCK_UNLOCKED;
|
||||
lockData->data.lockData_m = OMPI_LOCK_OMPI_UNLOCKED;
|
||||
}
|
||||
|
||||
|
||||
@ -180,7 +180,7 @@ static inline void setBigAtomicUnsignedInt(bigAtomicUnsignedInt *addr,
|
||||
unsigned long long value)
|
||||
{
|
||||
addr->data = value;
|
||||
addr->lock.data.lockData_m = LOCK_UNLOCKED;
|
||||
addr->lock.data.lockData_m = OMPI_LOCK_OMPI_UNLOCKED;
|
||||
}
|
||||
|
||||
#endif /* DARWIN_POWERPC_ATOMIC_H_INCLUDED */
|
||||
|
@ -23,7 +23,7 @@ extern "C" {
|
||||
* Lock structure
|
||||
*/
|
||||
|
||||
enum { LOCK_UNLOCKED = 0 };
|
||||
enum { OMPI_LOCK_OMPI_UNLOCKED = 0 };
|
||||
|
||||
typedef struct {
|
||||
union {
|
||||
@ -93,7 +93,7 @@ static inline int spintrylock(ompi_lock_data_t *lockData)
|
||||
*/
|
||||
static inline void spinunlock(ompi_lock_data_t *lockData)
|
||||
{
|
||||
lockData->data.lockData_m = LOCK_UNLOCKED;
|
||||
lockData->data.lockData_m = OMPI_LOCK_OMPI_UNLOCKED;
|
||||
}
|
||||
|
||||
|
||||
|
@ -15,7 +15,7 @@ typedef volatile unsigned long long bigAtomicUnsignedInt;
|
||||
/*
|
||||
* Lock structure
|
||||
*/
|
||||
enum { LOCK_UNLOCKED = 0 };
|
||||
enum { OMPI_LOCK_OMPI_UNLOCKED = 0 };
|
||||
|
||||
typedef struct {
|
||||
union {
|
||||
@ -28,7 +28,7 @@ CDECL_BEGIN
|
||||
|
||||
static inline void spinunlock(ompi_lock_data_t *ctlData_m)
|
||||
{
|
||||
ctlData_m->data.lockData_m = LOCK_UNLOCKED;
|
||||
ctlData_m->data.lockData_m = OMPI_LOCK_OMPI_UNLOCKED;
|
||||
}
|
||||
|
||||
void spinlock(ompi_lock_data_t *);
|
||||
|
@ -18,7 +18,7 @@ CDECL_BEGIN
|
||||
/*
|
||||
* Lock structure
|
||||
*/
|
||||
enum { LOCK_UNLOCKED = 0 };
|
||||
enum { OMPI_LOCK_OMPI_UNLOCKED = 0 };
|
||||
|
||||
typedef struct {
|
||||
union {
|
||||
|
@ -9,7 +9,7 @@
|
||||
* Lock structure
|
||||
*/
|
||||
|
||||
enum { LOCK_UNLOCKED = 1 };
|
||||
enum { OMPI_LOCK_OMPI_UNLOCKED = 1 };
|
||||
|
||||
typedef struct {
|
||||
union {
|
||||
@ -185,7 +185,7 @@ static inline void setBigAtomicUnsignedInt(bigAtomicUnsignedInt *addr,
|
||||
unsigned long long value)
|
||||
{
|
||||
addr->data = value;
|
||||
addr->lock.data.lockData_m = LOCK_UNLOCKED;
|
||||
addr->lock.data.lockData_m = OMPI_LOCK_OMPI_UNLOCKED;
|
||||
}
|
||||
|
||||
#endif /* LINUX_I686_ATOMIC_H_INCLUDED */
|
||||
|
@ -9,7 +9,7 @@
|
||||
* Lock structure
|
||||
*/
|
||||
|
||||
enum { LOCK_UNLOCKED = 1 };
|
||||
enum { OMPI_LOCK_OMPI_UNLOCKED = 1 };
|
||||
|
||||
typedef struct {
|
||||
union {
|
||||
@ -121,7 +121,7 @@ inline int fetchNset(volatile int *addr, int setValue)
|
||||
*/
|
||||
inline void spinunlock(ompi_lock_data_t *lockData)
|
||||
{
|
||||
lockData->data.lockData_m = LOCK_UNLOCKED;
|
||||
lockData->data.lockData_m = OMPI_LOCK_OMPI_UNLOCKED;
|
||||
}
|
||||
|
||||
inline unsigned long long fetchNaddLong(bigAtomicUnsignedInt *addr,
|
||||
@ -167,7 +167,7 @@ inline void setBigAtomicUnsignedInt(bigAtomicUnsignedInt *addr,
|
||||
unsigned long long value)
|
||||
{
|
||||
addr->data = value;
|
||||
addr->lock.data.lockData_m = LOCK_UNLOCKED;
|
||||
addr->lock.data.lockData_m = OMPI_LOCK_OMPI_UNLOCKED;
|
||||
}
|
||||
|
||||
#endif /* LINUX_IA64_ATOMIC_H_INCLUDED */
|
||||
|
@ -9,7 +9,7 @@
|
||||
* Lock structure
|
||||
*/
|
||||
|
||||
enum { LOCK_UNLOCKED = 1 };
|
||||
enum { OMPI_LOCK_OMPI_UNLOCKED = 1 };
|
||||
|
||||
typedef struct {
|
||||
union {
|
||||
@ -184,7 +184,7 @@ static inline void setBigAtomicUnsignedInt(bigAtomicUnsignedInt *addr,
|
||||
unsigned long long value)
|
||||
{
|
||||
addr->data = value;
|
||||
addr->lock.data.lockData_m = LOCK_UNLOCKED;
|
||||
addr->lock.data.lockData_m = OMPI_LOCK_OMPI_UNLOCKED;
|
||||
}
|
||||
|
||||
#endif /* LINUX_X86_64_ATOMIC_H_INCLUDED */
|
||||
|
@ -17,7 +17,7 @@
|
||||
/*
|
||||
* Lock structure
|
||||
*/
|
||||
enum { LOCK_UNLOCKED = 0 };
|
||||
enum { OMPI_LOCK_OMPI_UNLOCKED = 0 };
|
||||
|
||||
typedef struct {
|
||||
union {
|
||||
@ -88,7 +88,7 @@ static inline int spintrylock(ompi_lock_data_t *lock)
|
||||
static inline void spinunlock(ompi_lock_data_t *lock)
|
||||
{
|
||||
asm("mb");
|
||||
lock->data.lockData_m = LOCK_UNLOCKED;
|
||||
lock->data.lockData_m = OMPI_LOCK_OMPI_UNLOCKED;
|
||||
}
|
||||
|
||||
static inline int fetchNadd(volatile int *addr, int inc)
|
||||
|
@ -38,17 +38,17 @@ void ompi_proc_construct(ompi_proc_t* proc)
|
||||
/* FIX - need to determine remote process architecture */
|
||||
proc->proc_convertor = ompi_convertor_create(0, 0);
|
||||
|
||||
THREAD_LOCK(&ompi_proc_lock);
|
||||
OMPI_THREAD_LOCK(&ompi_proc_lock);
|
||||
ompi_list_append(&ompi_proc_list, (ompi_list_item_t*)proc);
|
||||
THREAD_UNLOCK(&ompi_proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&ompi_proc_lock);
|
||||
}
|
||||
|
||||
|
||||
void ompi_proc_destruct(ompi_proc_t* proc)
|
||||
{
|
||||
THREAD_LOCK(&ompi_proc_lock);
|
||||
OMPI_THREAD_LOCK(&ompi_proc_lock);
|
||||
ompi_list_remove_item(&ompi_proc_list, (ompi_list_item_t*)proc);
|
||||
THREAD_UNLOCK(&ompi_proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&ompi_proc_lock);
|
||||
OBJ_DESTRUCT(&proc->proc_lock);
|
||||
}
|
||||
|
||||
@ -104,7 +104,7 @@ ompi_proc_t** ompi_proc_world(size_t *size)
|
||||
return NULL;
|
||||
|
||||
/* return only the procs that match this jobid */
|
||||
THREAD_LOCK(&ompi_proc_lock);
|
||||
OMPI_THREAD_LOCK(&ompi_proc_lock);
|
||||
for(proc = (ompi_proc_t*)ompi_list_get_first(&ompi_proc_list);
|
||||
proc != (ompi_proc_t*)ompi_list_get_end(&ompi_proc_list);
|
||||
proc = (ompi_proc_t*)ompi_list_get_next(proc)) {
|
||||
@ -113,7 +113,7 @@ ompi_proc_t** ompi_proc_world(size_t *size)
|
||||
procs[count++] = proc;
|
||||
}
|
||||
}
|
||||
THREAD_UNLOCK(&ompi_proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&ompi_proc_lock);
|
||||
*size = count;
|
||||
return procs;
|
||||
}
|
||||
@ -128,14 +128,14 @@ ompi_proc_t** ompi_proc_all(size_t* size)
|
||||
if(NULL == procs)
|
||||
return NULL;
|
||||
|
||||
THREAD_LOCK(&ompi_proc_lock);
|
||||
OMPI_THREAD_LOCK(&ompi_proc_lock);
|
||||
for(proc = (ompi_proc_t*)ompi_list_get_first(&ompi_proc_list);
|
||||
proc != (ompi_proc_t*)ompi_list_get_end(&ompi_proc_list);
|
||||
proc = (ompi_proc_t*)ompi_list_get_next(proc)) {
|
||||
OBJ_RETAIN(proc);
|
||||
procs[count++] = proc;
|
||||
}
|
||||
THREAD_UNLOCK(&ompi_proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&ompi_proc_lock);
|
||||
*size = count;
|
||||
return procs;
|
||||
}
|
||||
@ -157,7 +157,7 @@ ompi_proc_t * ompi_proc_find ( ompi_job_handle_t jobid, uint32_t vpid )
|
||||
ompi_proc_t *proc;
|
||||
|
||||
/* return the proc-struct which matches this jobid+process id */
|
||||
THREAD_LOCK(&ompi_proc_lock);
|
||||
OMPI_THREAD_LOCK(&ompi_proc_lock);
|
||||
for(proc = (ompi_proc_t*)ompi_list_get_first(&ompi_proc_list);
|
||||
proc != (ompi_proc_t*)ompi_list_get_end(&ompi_proc_list);
|
||||
proc = (ompi_proc_t*)ompi_list_get_next(proc)) {
|
||||
@ -167,6 +167,6 @@ ompi_proc_t * ompi_proc_find ( ompi_job_handle_t jobid, uint32_t vpid )
|
||||
break;
|
||||
}
|
||||
}
|
||||
THREAD_UNLOCK(&ompi_proc_lock);
|
||||
OMPI_THREAD_UNLOCK(&ompi_proc_lock);
|
||||
return proc;
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* $HEADER$
|
||||
*/
|
||||
#ifndef OMPI_CONDITION_SPINLOCK_H
|
||||
#define OMPI_CONDITION_SPINLOCK_H
|
||||
#ifndef OMPI_CONDITION_SPINOMPI_LOCK_H
|
||||
#define OMPI_CONDITION_SPINOMPI_LOCK_H
|
||||
|
||||
#include "threads/condition.h"
|
||||
#include "threads/mutex.h"
|
||||
|
@ -72,7 +72,7 @@ static inline bool ompi_using_threads(void)
|
||||
* If there is no possibility that multiple threads are running in the
|
||||
* process, invoke the action without acquiring the lock.
|
||||
*/
|
||||
#define THREAD_SCOPED_LOCK(mutex,action) \
|
||||
#define OMPI_THREAD_SCOPED_LOCK(mutex,action) \
|
||||
if(ompi_using_threads()) { \
|
||||
ompi_mutex_lock(mutex); \
|
||||
(action); \
|
||||
@ -121,7 +121,7 @@ static inline bool ompi_set_using_threads(bool have)
|
||||
* If there is no possibility that multiple threads are running in the
|
||||
* process, return immediately.
|
||||
*/
|
||||
#define THREAD_LOCK(a) if (ompi_using_threads()) \
|
||||
#define OMPI_THREAD_LOCK(a) if (ompi_using_threads()) \
|
||||
ompi_mutex_lock((a));
|
||||
|
||||
/*
|
||||
@ -137,7 +137,7 @@ static inline bool ompi_set_using_threads(bool have)
|
||||
* If there is no possibility that multiple threads are running in the
|
||||
* process, return immediately without modifying the mutex.
|
||||
*/
|
||||
#define THREAD_UNLOCK(a) if (ompi_using_threads()) \
|
||||
#define OMPI_THREAD_UNLOCK(a) if (ompi_using_threads()) \
|
||||
ompi_mutex_unlock((a));
|
||||
|
||||
/**
|
||||
@ -150,7 +150,7 @@ static inline bool ompi_set_using_threads(bool have)
|
||||
* multiple threads or not. This is useful, for example, with shared
|
||||
* memory.
|
||||
*/
|
||||
#define LOCK(a) ompi_mutex_lock((a))
|
||||
#define OMPI_LOCK(a) ompi_mutex_lock((a))
|
||||
|
||||
/**
|
||||
* Always unlocks a mutex (never compile- or run-time removed)
|
||||
@ -162,7 +162,7 @@ static inline bool ompi_set_using_threads(bool have)
|
||||
* process has multiple threads or not. This is useful, for example,
|
||||
* with shared memory.
|
||||
*/
|
||||
#define UNLOCK(a) ompi_mutex_unlock((a));
|
||||
#define OMPI_UNLOCK(a) ompi_mutex_unlock((a));
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -2,8 +2,8 @@
|
||||
* $HEADER$
|
||||
*/
|
||||
|
||||
#ifndef OMPI_MUTEX_SPINLOCK_
|
||||
#define OMPI_MUTEX_SPINLOCK_
|
||||
#ifndef OMPI_MUTEX_SPINOMPI_LOCK_
|
||||
#define OMPI_MUTEX_SPINOMPI_LOCK_
|
||||
|
||||
#include "class/ompi_object.h"
|
||||
#include "os/atomic.h"
|
||||
|
@ -215,7 +215,7 @@ void ompi_output_close(int output_id)
|
||||
|
||||
/* If no one has the syslog open, we should close it */
|
||||
|
||||
THREAD_LOCK(&mutex);
|
||||
OMPI_THREAD_LOCK(&mutex);
|
||||
for (i = 0; i < OMPI_OUTPUT_MAX_STREAMS; ++i)
|
||||
if (info[i].ldi_used && info[i].ldi_syslog)
|
||||
break;
|
||||
@ -229,7 +229,7 @@ void ompi_output_close(int output_id)
|
||||
temp_str = NULL;
|
||||
temp_str_len = 0;
|
||||
}
|
||||
THREAD_UNLOCK(&mutex);
|
||||
OMPI_THREAD_UNLOCK(&mutex);
|
||||
}
|
||||
|
||||
|
||||
@ -310,12 +310,12 @@ static int do_open(int output_id, ompi_output_stream_t *lds)
|
||||
OMPI_ERROR */
|
||||
|
||||
if (-1 == output_id) {
|
||||
THREAD_LOCK(&mutex);
|
||||
OMPI_THREAD_LOCK(&mutex);
|
||||
for (i = 0; i < OMPI_OUTPUT_MAX_STREAMS; ++i)
|
||||
if (!info[i].ldi_used)
|
||||
break;
|
||||
if (i >= OMPI_OUTPUT_MAX_STREAMS) {
|
||||
THREAD_UNLOCK(&mutex);
|
||||
OMPI_THREAD_UNLOCK(&mutex);
|
||||
return OMPI_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
@ -338,7 +338,7 @@ static int do_open(int output_id, ompi_output_stream_t *lds)
|
||||
/* Got a stream -- now initialize it and open relevant outputs */
|
||||
|
||||
info[i].ldi_used = true;
|
||||
THREAD_UNLOCK(&mutex);
|
||||
OMPI_THREAD_UNLOCK(&mutex);
|
||||
info[i].ldi_enabled = lds->lds_is_debugging ? (bool) OMPI_ENABLE_DEBUG : true;
|
||||
info[i].ldi_verbose_level = 0;
|
||||
|
||||
@ -469,7 +469,7 @@ static void output(int output_id, char *format, va_list arglist)
|
||||
|
||||
/* Make the formatted string */
|
||||
|
||||
THREAD_LOCK(&mutex);
|
||||
OMPI_THREAD_LOCK(&mutex);
|
||||
str = ompi_vsnprintf(format, arglist);
|
||||
total_len = len = strlen(str);
|
||||
if ('\n' != str[len - 1]) {
|
||||
@ -519,7 +519,7 @@ static void output(int output_id, char *format, va_list arglist)
|
||||
|
||||
if (ldi->ldi_fd != -1)
|
||||
write(ldi->ldi_fd, temp_str, total_len);
|
||||
THREAD_UNLOCK(&mutex);
|
||||
OMPI_THREAD_UNLOCK(&mutex);
|
||||
|
||||
free(str);
|
||||
}
|
||||
|
Загрузка…
x
Ссылка в новой задаче
Block a user