1
1

The standard C (ISO C) does not allow macros returning values. Switch to an inline function.

This commit was SVN r4023.
Этот коммит содержится в:
George Bosilca 2005-01-17 21:26:17 +00:00
родитель 653b259d75
Коммит 20ddf644e2
2 изменённых файлов: 140 добавлений и 157 удалений

Просмотреть файл

@ -676,66 +676,59 @@ static inline int ompi_cb_fifo_get_slot_same_base_addr(ompi_cb_fifo_t *fifo)
* @returncode Slot index to which data is written
*
*/
/*
static inline void *ompi_cb_fifo_read_from_tail_same_base_addr(
ompi_cb_fifo_t *fifo,
bool flush_entries_read, bool *queue_empty)
*/
#define ompi_cb_fifo_read_from_tail_same_base_addr(fifo, \
flush_entries_read, queue_empty) \
({ \
int index = 0,clearIndex, i; \
volatile void **q_ptr; \
ompi_cb_fifo_ctl_t *h_ptr, *t_ptr; \
void *read_from_tail = (void *)OMPI_CB_ERROR; \
\
*queue_empty=false; \
\
h_ptr=fifo->head; \
t_ptr=fifo->tail; \
q_ptr=fifo->queue; \
\
/* check to see that the data is valid */ \
if ((q_ptr[t_ptr->fifo_index] == OMPI_CB_FREE) || \
(q_ptr[t_ptr->fifo_index] == OMPI_CB_RESERVED)) \
{ \
read_from_tail=(void *)OMPI_CB_FREE; \
goto CLEANUP; \
} \
\
/* set return data */ \
index = t_ptr->fifo_index; \
read_from_tail = (void *)q_ptr[index]; \
t_ptr->num_to_clear++; \
\
/* increment counter for later lazy free */ \
(t_ptr->fifo_index)++; \
(t_ptr->fifo_index) &= fifo->mask; \
\
/* check to see if time to do a lazy free of queue slots */ \
if ( (t_ptr->num_to_clear == fifo->lazy_free_frequency) || \
flush_entries_read ) { \
clearIndex = index - t_ptr->num_to_clear + 1; \
clearIndex &= fifo->mask; \
\
for (i = 0; i < t_ptr->num_to_clear; i++) { \
q_ptr[clearIndex] = OMPI_CB_FREE; \
clearIndex++; \
clearIndex &= fifo->mask; \
} \
t_ptr->num_to_clear = 0; \
\
/* check to see if queue is empty */ \
if( flush_entries_read && \
(t_ptr->fifo_index == h_ptr->fifo_index) ) { \
*queue_empty=true; \
} \
} \
\
\
CLEANUP: \
/* return */ \
read_from_tail; \
})
{
int index = 0,clearIndex, i;
volatile void **q_ptr;
ompi_cb_fifo_ctl_t *h_ptr, *t_ptr;
void *read_from_tail = (void *)OMPI_CB_ERROR;
*queue_empty=false;
h_ptr=fifo->head;
t_ptr=fifo->tail;
q_ptr=fifo->queue;
/* check to see that the data is valid */
if ((q_ptr[t_ptr->fifo_index] == OMPI_CB_FREE) ||
(q_ptr[t_ptr->fifo_index] == OMPI_CB_RESERVED)) {
read_from_tail=(void *)OMPI_CB_FREE;
goto CLEANUP;
}
/* set return data */
index = t_ptr->fifo_index;
read_from_tail = (void *)q_ptr[index];
t_ptr->num_to_clear++;
/* increment counter for later lazy free */
(t_ptr->fifo_index)++;
(t_ptr->fifo_index) &= fifo->mask;
/* check to see if time to do a lazy free of queue slots */
if ( (t_ptr->num_to_clear == fifo->lazy_free_frequency) ||
flush_entries_read ) {
clearIndex = index - t_ptr->num_to_clear + 1;
clearIndex &= fifo->mask;
for (i = 0; i < t_ptr->num_to_clear; i++) {
q_ptr[clearIndex] = OMPI_CB_FREE;
clearIndex++;
clearIndex &= fifo->mask;
}
t_ptr->num_to_clear = 0;
/* check to see if queue is empty */
if( flush_entries_read &&
(t_ptr->fifo_index == h_ptr->fifo_index) ) {
*queue_empty=true;
}
}
CLEANUP:
return read_from_tail;
}
#endif /* !_OMPI_CIRCULAR_BUFFER_FIFO */

Просмотреть файл

@ -505,80 +505,73 @@ static inline int ompi_fifo_write_to_slot_same_base_addr(cb_slot_t *slot,
* @returncode Slot index to which data is written
*
*/
/*
static inline int ompi_fifo_write_to_head_same_base_addr(void *data,
ompi_fifo_t *fifo, mca_mpool_base_module_t *fifo_allocator)
*/
#define ompi_fifo_write_to_head_same_base_addr(data, \
fifo, fifo_allocator) \
({ \
int error_code=OMPI_SUCCESS; \
size_t len_to_allocate; \
ompi_cb_fifo_wrapper_t *next_ff; \
bool available; \
\
/* attempt to write data to head ompi_fifo_cb_fifo_t */ \
error_code=ompi_cb_fifo_write_to_head_same_base_addr(data, \
(ompi_cb_fifo_t *)&(fifo->head->cb_fifo)); \
if( OMPI_CB_ERROR == error_code ) { \
/* \
* queue is full \
*/ \
\
/* mark queue as overflown */ \
fifo->head->cb_overflow=true; \
\
/* see if next queue is available - while the next queue \
* has not been emptied, it will be marked as overflowen*/ \
next_ff=(ompi_cb_fifo_wrapper_t *)fifo->head->next_fifo_wrapper; \
available=!(next_ff->cb_overflow); \
\
/* if next queue not available, allocate new queue */ \
if( !available ) { \
\
/* allocate head ompi_cb_fifo_t structure */ \
len_to_allocate=sizeof(ompi_cb_fifo_wrapper_t); \
next_ff=fifo_allocator->mpool_alloc \
(len_to_allocate,CACHE_LINE_SIZE); \
if ( NULL == next_ff) { \
return OMPI_ERR_OUT_OF_RESOURCE; \
} \
\
/* initialize the circular buffer fifo head structure */ \
error_code=ompi_cb_fifo_init_same_base_addr( \
fifo->head->cb_fifo.size, \
fifo->head->cb_fifo.lazy_free_frequency, \
fifo->head->cb_fifo.fifo_memory_locality_index, \
fifo->head->cb_fifo.head_memory_locality_index, \
fifo->head->cb_fifo.tail_memory_locality_index, \
&(next_ff->cb_fifo), \
fifo_allocator); \
if ( OMPI_SUCCESS != error_code ) { \
return error_code; \
} \
\
/* finish new element initialization */ \
next_ff->next_fifo_wrapper=fifo->head->next_fifo_wrapper; /* only one \
element in the \
link list */ \
next_ff->cb_overflow=false; /* no attempt to overflow the queue */ \
} \
\
/* reset head pointer */ \
fifo->head->next_fifo_wrapper=next_ff; \
fifo->head=next_ff; \
\
/* write data to new head structure */ \
error_code=ompi_cb_fifo_write_to_head_same_base_addr(data, \
(ompi_cb_fifo_t *)&(fifo->head->cb_fifo)); \
if( OMPI_CB_ERROR == error_code ) { \
return error_code; \
} \
} \
\
/* return */ \
error_code; \
}) \
{
int error_code=OMPI_SUCCESS;
size_t len_to_allocate;
ompi_cb_fifo_wrapper_t *next_ff;
bool available;
/* attempt to write data to head ompi_fifo_cb_fifo_t */
error_code=ompi_cb_fifo_write_to_head_same_base_addr(data,
(ompi_cb_fifo_t *)&(fifo->head->cb_fifo));
if( OMPI_CB_ERROR == error_code ) {
/*
* queue is full
*/
/* mark queue as overflown */
fifo->head->cb_overflow=true;
/* see if next queue is available - while the next queue
* has not been emptied, it will be marked as overflowen*/
next_ff=(ompi_cb_fifo_wrapper_t *)fifo->head->next_fifo_wrapper;
available=!(next_ff->cb_overflow);
/* if next queue not available, allocate new queue */
if( !available ) {
/* allocate head ompi_cb_fifo_t structure */
len_to_allocate=sizeof(ompi_cb_fifo_wrapper_t);
next_ff=fifo_allocator->mpool_alloc
(len_to_allocate,CACHE_LINE_SIZE);
if ( NULL == next_ff) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* initialize the circular buffer fifo head structure */
error_code=ompi_cb_fifo_init_same_base_addr(
fifo->head->cb_fifo.size,
fifo->head->cb_fifo.lazy_free_frequency,
fifo->head->cb_fifo.fifo_memory_locality_index,
fifo->head->cb_fifo.head_memory_locality_index,
fifo->head->cb_fifo.tail_memory_locality_index,
&(next_ff->cb_fifo),
fifo_allocator);
if ( OMPI_SUCCESS != error_code ) {
return error_code;
}
/* finish new element initialization */
next_ff->next_fifo_wrapper=fifo->head->next_fifo_wrapper; /* only one element in the link list */
next_ff->cb_overflow=false; /* no attempt to overflow the queue */
}
/* reset head pointer */
fifo->head->next_fifo_wrapper=next_ff;
fifo->head=next_ff;
/* write data to new head structure */
error_code=ompi_cb_fifo_write_to_head_same_base_addr(data,
(ompi_cb_fifo_t *)&(fifo->head->cb_fifo));
if( OMPI_CB_ERROR == error_code ) {
return error_code;
}
}
return error_code;
}
/**
@ -674,32 +667,29 @@ static inline cb_slot_t ompi_fifo_get_slot_same_base_addr(ompi_fifo_t *fifo,
* @returncode Pointer - OMPI_CB_FREE indicates no data to read
*
*/
/*
static inline void *ompi_fifo_read_from_tail_same_base_addr(
ompi_fifo_t *fifo)
*/
#define ompi_fifo_read_from_tail_same_base_addr( fifo ) \
({ \
/* local parameters */ \
void *return_value; \
bool queue_empty,flush_entries_read; \
ompi_cb_fifo_t *cb_fifo; \
\
/* get next element */ \
cb_fifo=(ompi_cb_fifo_t *)&(fifo->tail->cb_fifo); \
flush_entries_read=fifo->tail->cb_overflow; \
return_value=ompi_cb_fifo_read_from_tail_same_base_addr(cb_fifo, \
flush_entries_read,&queue_empty); \
\
/* check to see if need to move on to next cb_fifo in the link list */ \
if( queue_empty ) { \
/* queue_emptied - move on to next element in fifo */ \
fifo->tail->cb_overflow=false; \
fifo->tail=fifo->tail->next_fifo_wrapper; \
} \
\
/* return */ \
return_value; \
})
static inline
void *ompi_fifo_read_from_tail_same_base_addr( ompi_fifo_t *fifo)
{
/* local parameters */
void *return_value;
bool queue_empty, flush_entries_read;
ompi_cb_fifo_t *cb_fifo;
/* get next element */
cb_fifo=(ompi_cb_fifo_t *)&(fifo->tail->cb_fifo);
flush_entries_read=fifo->tail->cb_overflow;
return_value = ompi_cb_fifo_read_from_tail_same_base_addr( cb_fifo,
flush_entries_read,
&queue_empty);
/* check to see if need to move on to next cb_fifo in the link list */
if( queue_empty ) {
/* queue_emptied - move on to next element in fifo */
fifo->tail->cb_overflow=false;
fifo->tail=fifo->tail->next_fifo_wrapper;
}
return return_value;
}
#endif /* !_OMPI_FIFO */