Complete the SM BTL changes. Now all displacements are ptrdiff_t and there is
no warnings about any issue with signed/unsigned. This commit was SVN r12234.
Этот коммит содержится в:
родитель
08a9b6458c
Коммит
d7268557a8
@ -112,7 +112,7 @@ typedef struct ompi_cb_fifo_t ompi_cb_fifo_t;
|
||||
*
|
||||
*/
|
||||
static inline void *ompi_cb_fifo_read_from_tail(ompi_cb_fifo_t *fifo,
|
||||
bool flush_entries_read, bool *queue_empty, ssize_t offset)
|
||||
bool flush_entries_read, bool *queue_empty, ptrdiff_t offset)
|
||||
{
|
||||
int index = 0,clearIndex, i;
|
||||
void **q_ptr;
|
||||
|
@ -429,7 +429,7 @@ void *ompi_fifo_read_from_tail_same_base_addr( ompi_fifo_t *fifo)
|
||||
*
|
||||
*/
|
||||
static inline void *ompi_fifo_read_from_tail(ompi_fifo_t *fifo,
|
||||
ssize_t offset)
|
||||
ptrdiff_t offset)
|
||||
{
|
||||
/* local parameters */
|
||||
void *return_value;
|
||||
|
@ -153,8 +153,8 @@ int mca_btl_sm_add_procs_same_base_addr(
|
||||
ompi_bitmap_t* reachability)
|
||||
{
|
||||
int return_code = OMPI_SUCCESS, cnt, len;
|
||||
size_t i,j,proc,size,n_to_allocate,length;
|
||||
int32_t n_local_procs;
|
||||
size_t i, j, size, n_to_allocate, length;
|
||||
int32_t n_local_procs, proc;
|
||||
ompi_proc_t* my_proc; /* pointer to caller's proc structure */
|
||||
mca_btl_sm_t *btl_sm;
|
||||
ompi_fifo_t *my_fifos;
|
||||
@ -179,7 +179,7 @@ int mca_btl_sm_add_procs_same_base_addr(
|
||||
}
|
||||
|
||||
/* initialize and sm_proc_connect*/
|
||||
for(proc=0 ; proc < nprocs ; proc++ ) {
|
||||
for( proc = 0 ; proc < (int32_t)nprocs ; proc++ ) {
|
||||
mca_btl_sm_component.sm_proc_connect[proc] = 0;
|
||||
}
|
||||
|
||||
@ -195,7 +195,7 @@ int mca_btl_sm_add_procs_same_base_addr(
|
||||
* host to shared memory reachbility list. Also, get number
|
||||
* of local procs in the prcs list. */
|
||||
n_local_procs=0;
|
||||
for( proc=0 ; proc < nprocs; proc++ ) {
|
||||
for( proc=0 ; proc < (int32_t)nprocs; proc++ ) {
|
||||
#if OMPI_ENABLE_PROGRESS_THREADS == 1
|
||||
char path[PATH_MAX];
|
||||
#endif
|
||||
@ -285,8 +285,8 @@ int mca_btl_sm_add_procs_same_base_addr(
|
||||
|
||||
if ( !mca_btl_sm[0].btl_inited ) {
|
||||
/* set the shared memory offset */
|
||||
mca_btl_sm_component.sm_offset=(ssize_t *)
|
||||
malloc(n_to_allocate*sizeof(ssize_t));
|
||||
mca_btl_sm_component.sm_offset=(ptrdiff_t*)
|
||||
malloc(n_to_allocate*sizeof(ptrdiff_t));
|
||||
if(NULL == mca_btl_sm_component.sm_offset ) {
|
||||
return_code=OMPI_ERR_OUT_OF_RESOURCE;
|
||||
goto CLEANUP;
|
||||
@ -303,7 +303,7 @@ int mca_btl_sm_add_procs_same_base_addr(
|
||||
|
||||
/* set local proc's smp rank in the peers structure for
|
||||
* rapid access */
|
||||
for( proc=0 ; proc < nprocs; proc++ ) {
|
||||
for( proc=0 ; proc < (int32_t)nprocs; proc++ ) {
|
||||
struct mca_btl_base_endpoint_t* peer = peers[proc];
|
||||
if(NULL != peer) {
|
||||
mca_btl_sm_component.sm_peers[peer->peer_smp_rank] = peer;
|
||||
@ -575,7 +575,7 @@ int mca_btl_sm_add_procs_same_base_addr(
|
||||
|
||||
/* set connectivity */
|
||||
cnt=0;
|
||||
for(proc = 0 ; proc < nprocs ; proc++ ) {
|
||||
for(proc = 0 ; proc < (int32_t)nprocs ; proc++ ) {
|
||||
|
||||
struct mca_btl_base_endpoint_t* peer = peers[proc];
|
||||
if(peer == NULL)
|
||||
|
@ -106,7 +106,7 @@ struct mca_btl_sm_component_t {
|
||||
a real virtual address */
|
||||
size_t size_of_cb_queue; /**< size of each circular buffer queue array */
|
||||
size_t cb_lazy_free_freq; /**< frequency of lazy free */
|
||||
ssize_t *sm_offset; /**< offset to be applied to shared memory
|
||||
ptrdiff_t *sm_offset; /**< offset to be applied to shared memory
|
||||
addresses, per local process value */
|
||||
int *sm_proc_connect; /* scratch array used by the 0'th btl to
|
||||
* set indicate sm connectivty. Used by
|
||||
|
@ -474,9 +474,9 @@ int mca_btl_sm_component_progress(void)
|
||||
{
|
||||
/* completion callback */
|
||||
frag->base.des_src =
|
||||
( mca_btl_base_segment_t* )((unsigned char*)frag->base.des_dst + mca_btl_sm_component.sm_offset[peer_smp_rank]);
|
||||
frag->base.des_src->seg_addr.pval =
|
||||
((unsigned char*)frag->base.des_src->seg_addr.pval +
|
||||
( mca_btl_base_segment_t* )((ptrdiff_t)frag->base.des_dst + mca_btl_sm_component.sm_offset[peer_smp_rank]);
|
||||
frag->base.des_src->seg_addr.pval = (void*)
|
||||
((ptrdiff_t)frag->base.des_src->seg_addr.pval +
|
||||
mca_btl_sm_component.sm_offset[peer_smp_rank]);
|
||||
frag->base.des_dst = frag->base.des_src;
|
||||
frag->base.des_cbfunc(&mca_btl_sm[1].super, frag->endpoint, &frag->base, frag->rc);
|
||||
@ -487,9 +487,9 @@ int mca_btl_sm_component_progress(void)
|
||||
/* recv upcall */
|
||||
mca_btl_sm_recv_reg_t* reg = mca_btl_sm[1].sm_reg + frag->tag;
|
||||
frag->base.des_dst = (mca_btl_base_segment_t*)
|
||||
((unsigned char*)frag->base.des_src + mca_btl_sm_component.sm_offset[peer_smp_rank]);
|
||||
frag->base.des_dst->seg_addr.pval =
|
||||
((unsigned char*)frag->base.des_dst->seg_addr.pval +
|
||||
((ptrdiff_t)frag->base.des_src + mca_btl_sm_component.sm_offset[peer_smp_rank]);
|
||||
frag->base.des_dst->seg_addr.pval = (void*)
|
||||
((ptrdiff_t)frag->base.des_dst->seg_addr.pval +
|
||||
mca_btl_sm_component.sm_offset[peer_smp_rank]);
|
||||
frag->base.des_src = frag->base.des_dst;
|
||||
reg->cbfunc(&mca_btl_sm[1].super,frag->tag,&frag->base,reg->cbdata);
|
||||
|
@ -15,8 +15,8 @@ do { \
|
||||
if(OMPI_CB_FREE == fifo->head) { \
|
||||
/* no queues have been allocated - allocate now */ \
|
||||
rc=ompi_fifo_init_same_base_addr( \
|
||||
mca_btl_sm_component.size_of_cb_queue, \
|
||||
mca_btl_sm_component.cb_lazy_free_freq, \
|
||||
(int)mca_btl_sm_component.size_of_cb_queue, \
|
||||
(int)mca_btl_sm_component.cb_lazy_free_freq, \
|
||||
/* at this stage we are not doing anything with memory \
|
||||
* locality */ \
|
||||
0,0,0, \
|
||||
|
Загрузка…
Ссылка в новой задаче
Block a user