Remove rc parameter from MCA_BTL_SM_FIFO_WRITE() macro. It cannot fail in
current implementation. This commit was SVN r16015.
Этот коммит содержится в:
родитель
690fb95bda
Коммит
435e7d80e9
@ -799,7 +799,6 @@ int mca_btl_sm_send(
|
||||
mca_btl_base_tag_t tag)
|
||||
{
|
||||
mca_btl_sm_frag_t* frag = (mca_btl_sm_frag_t*)descriptor;
|
||||
int rc;
|
||||
|
||||
/* availble header space */
|
||||
frag->hdr->len = frag->segment.seg_len;
|
||||
@ -810,8 +809,8 @@ int mca_btl_sm_send(
|
||||
* post the descriptor in the queue - post with the relative
|
||||
* address
|
||||
*/
|
||||
MCA_BTL_SM_FIFO_WRITE(endpoint, endpoint->my_smp_rank, endpoint->peer_smp_rank, frag->hdr, rc);
|
||||
return rc;
|
||||
MCA_BTL_SM_FIFO_WRITE(endpoint, endpoint->my_smp_rank, endpoint->peer_smp_rank, frag->hdr);
|
||||
return OMPI_SUCCESS;
|
||||
}
|
||||
|
||||
int mca_btl_sm_ft_event(int state) {
|
||||
|
@ -397,9 +397,7 @@ int mca_btl_sm_component_progress(void)
|
||||
reg->cbdata);
|
||||
MCA_BTL_SM_FIFO_WRITE(
|
||||
mca_btl_sm_component.sm_peers[peer_smp_rank],
|
||||
my_smp_rank, peer_smp_rank, hdr->frag, rc);
|
||||
if(OMPI_SUCCESS != rc)
|
||||
goto err;
|
||||
my_smp_rank, peer_smp_rank, hdr->frag);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -408,16 +406,10 @@ int mca_btl_sm_component_progress(void)
|
||||
MCA_BTL_SM_FRAG_STATUS_MASK);
|
||||
MCA_BTL_SM_FIFO_WRITE(
|
||||
mca_btl_sm_component.sm_peers[peer_smp_rank],
|
||||
my_smp_rank, peer_smp_rank, hdr, rc);
|
||||
if(OMPI_SUCCESS != rc)
|
||||
goto err;
|
||||
my_smp_rank, peer_smp_rank, hdr);
|
||||
break;
|
||||
}
|
||||
rc++;
|
||||
}
|
||||
return rc;
|
||||
err:
|
||||
BTL_ERROR(("SM faild to send message due to shortage of shared memory.\n"));
|
||||
mca_btl_sm.error_cb(&mca_btl_sm.super, MCA_BTL_ERROR_FLAGS_FATAL);
|
||||
return rc;
|
||||
}
|
||||
|
@ -4,24 +4,19 @@
|
||||
#include "btl_sm.h"
|
||||
#include "btl_sm_endpoint.h"
|
||||
|
||||
#define MCA_BTL_SM_FIFO_WRITE(endpoint_peer, my_smp_rank,peer_smp_rank,hdr,rc) \
|
||||
do { \
|
||||
ompi_fifo_t* fifo; \
|
||||
fifo=&(mca_btl_sm_component.fifo[peer_smp_rank][my_smp_rank]); \
|
||||
\
|
||||
/* thread lock */ \
|
||||
if(opal_using_threads()) \
|
||||
opal_atomic_lock(fifo->head_lock); \
|
||||
/* post fragment */ \
|
||||
while(ompi_fifo_write_to_head(hdr, fifo, \
|
||||
mca_btl_sm_component.sm_mpool) != OMPI_SUCCESS) \
|
||||
opal_progress(); \
|
||||
MCA_BTL_SM_SIGNAL_PEER(endpoint_peer); \
|
||||
rc=OMPI_SUCCESS; \
|
||||
if(opal_using_threads()) \
|
||||
opal_atomic_unlock(fifo->head_lock); \
|
||||
#define MCA_BTL_SM_FIFO_WRITE(endpoint_peer, my_smp_rank,peer_smp_rank,hdr) \
|
||||
do { \
|
||||
ompi_fifo_t* fifo; \
|
||||
fifo=&(mca_btl_sm_component.fifo[peer_smp_rank][my_smp_rank]); \
|
||||
\
|
||||
/* thread lock */ \
|
||||
OPAL_THREAD_LOCK(&fifo->head_lock); \
|
||||
/* post fragment */ \
|
||||
while(ompi_fifo_write_to_head(hdr, fifo, \
|
||||
mca_btl_sm_component.sm_mpool) != OMPI_SUCCESS) \
|
||||
opal_progress(); \
|
||||
MCA_BTL_SM_SIGNAL_PEER(endpoint_peer); \
|
||||
OPAL_THREAD_UNLOCK(&fifo->head_lock); \
|
||||
} while(0)
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
Загрузка…
x
Ссылка в новой задаче
Block a user