1
1

Remove the compilation warnings. Change the my_smp_rank to uint32_t and it's defaul

t value from -1 to 0xffffffff. Remove the volatile from the locks definition, as th
ey are useless ie. only the internal integer should be volatile (and that's already the case).

Left something inside for the SM maintainer:
- one warning in ptl_sm.c. For me it look like a bug (line 856). The PTL does not h
ave to signal that the request is completed. That's the PML job. Anyway, the ompi_r
equest_complete is called with a mca_pml_base_send_request_t, and that's not exactl
y how it should work.
- in ptl_sm.c line 122 there is a definition (volatile char** tmp_ptr). As far as I
 understand the code it's not the char** who should be volatile, but the pointer to
 char ...

This commit was SVN r4024.
Этот коммит содержится в:
George Bosilca 2005-01-17 21:37:56 +00:00
родитель 20ddf644e2
Коммит 72a28e7796
3 изменённых файлов: 14 добавлений и 13 удалений

Просмотреть файл

@ -119,7 +119,7 @@ int mca_ptl_sm_add_procs_same_base_addr(
ompi_fifo_t * volatile *fifo_tmp;
bool same_sm_base;
ssize_t diff;
char **tmp_ptr;
volatile char **tmp_ptr;
/* initializion */
for(i=0 ; i < nprocs ; i++ ) {
@ -214,7 +214,7 @@ int mca_ptl_sm_add_procs_same_base_addr(
}
/* make sure that my_smp_rank has been defined */
if(-1 == mca_ptl_sm_component.my_smp_rank){
if( 0xFFFFFFFF == mca_ptl_sm_component.my_smp_rank ) {
return_code=OMPI_ERROR;
goto CLEANUP;
}
@ -600,7 +600,8 @@ int mca_ptl_sm_add_procs(
struct mca_ptl_base_peer_t **peers,
ompi_bitmap_t* reachability)
{
int return_code=OMPI_SUCCESS,proc,n_local_procs,tmp_cnt;
int return_code = OMPI_SUCCESS, tmp_cnt;
uint32_t proc, n_local_procs;
/* initializion */
for(proc=0 ; proc < nprocs ; proc++ ) {
@ -845,7 +846,7 @@ int mca_ptl_sm_send(
return_status=OMPI_SUCCESS;
}
/* release threa lock */
/* release thread lock */
if( ompi_using_threads() ) {
ompi_atomic_unlock(&(send_fifo->head_lock));
}
@ -877,7 +878,7 @@ int mca_ptl_sm_send_continue(
mca_ptl_sm_send_request_t *sm_request;
int my_local_smp_rank, peer_local_smp_rank, return_code;
int return_status=OMPI_SUCCESS, free_after=0;
volatile ompi_fifo_t *send_fifo;
ompi_fifo_t *send_fifo;
mca_ptl_base_header_t* hdr;
void *sm_data_ptr ;
ompi_list_item_t* item;

Просмотреть файл

@ -111,8 +111,8 @@ struct mca_ptl_sm_component_t {
int *list_smp_procs_different_base_addr; /* number of procs with different
base shared memory virtual
address as this process */
int my_smp_rank; /**< My SMP process rank. Used for accessing
* SMP specfic data structures. */
uint32_t my_smp_rank; /**< My SMP process rank. Used for accessing
* SMP specfic data structures. */
ompi_free_list_t sm_first_frags; /**< free list of sm first
fragments */
ompi_free_list_t sm_second_frags; /**< free list of sm second

Просмотреть файл

@ -305,8 +305,8 @@ mca_ptl_base_module_t** mca_ptl_sm_component_init(
/* initialize some PTL data */
/* start with no SM procs */
mca_ptl_sm_component.num_smp_procs=0;
mca_ptl_sm_component.my_smp_rank=-1;
mca_ptl_sm_component.num_smp_procs = 0;
mca_ptl_sm_component.my_smp_rank = 0xFFFFFFFF; /* not defined */
/* set flag indicating ptl not inited */
mca_ptl_sm[0].ptl_inited=false;
@ -360,7 +360,7 @@ int mca_ptl_sm_component_progress(mca_ptl_tstamp_t tstamp)
int my_local_smp_rank, proc;
unsigned int peer_local_smp_rank ;
mca_ptl_sm_frag_t *header_ptr;
volatile ompi_fifo_t *send_fifo;
ompi_fifo_t *send_fifo;
bool frag_matched;
mca_ptl_base_match_header_t *matching_header;
mca_pml_base_send_request_t *base_send_req;
@ -395,13 +395,13 @@ int mca_ptl_sm_component_progress(mca_ptl_tstamp_t tstamp)
/* aquire thread lock */
if( ompi_using_threads() ) {
ompi_atomic_lock(&(send_fifo->tail_lock));
ompi_atomic_lock( &(send_fifo->tail_lock) );
}
/* get pointer - pass in offset to change queue pointer
* addressing from that of the sender */
header_ptr=(mca_ptl_sm_frag_t *)
ompi_fifo_read_from_tail_same_base_addr( send_fifo);
header_ptr = (mca_ptl_sm_frag_t *)
ompi_fifo_read_from_tail_same_base_addr( send_fifo );
if( OMPI_CB_FREE == header_ptr ) {
/* release thread lock */
if( ompi_using_threads() ) {