btl/vader: change the way fast boxes are used
There were multiple paths that could lead to a fast box allocation. One of them made little sense (in-place send) so it has been removed to allow a rework of the fast-box send function. This should fix a number of issues with hanging/crashing when using the vader btl. References #4260 Signed-off-by: Nathan Hjelm <hjelmn@lanl.gov>
Этот коммит содержится в:
родитель
3e12cfff03
Коммит
a82f761a4a
@ -22,12 +22,12 @@ typedef union mca_btl_vader_fbox_hdr_t {
|
||||
* in multiple instructions. To ensure that seq is never loaded before tag
|
||||
* and the tag is never read before seq put them in the same 32-bits of the
|
||||
* header. */
|
||||
/** message size */
|
||||
uint32_t size;
|
||||
/** message tag */
|
||||
uint16_t tag;
|
||||
/** sequence number */
|
||||
uint16_t seq;
|
||||
/** message size */
|
||||
uint32_t size;
|
||||
} data;
|
||||
uint64_t ival;
|
||||
} mca_btl_vader_fbox_hdr_t;
|
||||
@ -52,20 +52,24 @@ static inline void mca_btl_vader_fbox_set_header (mca_btl_vader_fbox_hdr_t *hdr,
|
||||
{
|
||||
mca_btl_vader_fbox_hdr_t tmp = {.data = {.tag = tag, .seq = seq, .size = size}};
|
||||
hdr->ival = tmp.ival;
|
||||
opal_atomic_wmb ();
|
||||
}
|
||||
|
||||
/* attempt to reserve a contiguous segment from the remote ep */
|
||||
static inline unsigned char *mca_btl_vader_reserve_fbox (mca_btl_base_endpoint_t *ep, size_t size)
|
||||
static inline bool mca_btl_vader_fbox_sendi (mca_btl_base_endpoint_t *ep, unsigned char tag,
|
||||
void * restrict header, const size_t header_size,
|
||||
void * restrict payload, const size_t payload_size)
|
||||
{
|
||||
const unsigned int fbox_size = mca_btl_vader_component.fbox_size;
|
||||
size_t size = header_size + payload_size;
|
||||
unsigned int start, end, buffer_free;
|
||||
size_t data_size = size;
|
||||
unsigned char *dst;
|
||||
unsigned char *dst, *data;
|
||||
bool hbs, hbm;
|
||||
|
||||
/* don't try to use the per-peer buffer for messages that will fill up more than 25% of the buffer */
|
||||
if (OPAL_UNLIKELY(NULL == ep->fbox_out.buffer || size > (fbox_size >> 2))) {
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
OPAL_THREAD_LOCK(&ep->lock);
|
||||
@ -119,15 +123,23 @@ static inline unsigned char *mca_btl_vader_reserve_fbox (mca_btl_base_endpoint_t
|
||||
ep->fbox_out.end = (hbs << 31) | end;
|
||||
opal_atomic_wmb ();
|
||||
OPAL_THREAD_UNLOCK(&ep->lock);
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
BTL_VERBOSE(("writing fragment of size %u to offset %u {start: 0x%x, end: 0x%x (hbs: %d)} of peer's buffer. free = %u",
|
||||
(unsigned int) size, end, start, end, hbs, buffer_free));
|
||||
|
||||
data = dst + sizeof (mca_btl_vader_fbox_hdr_t);
|
||||
|
||||
memcpy (data, header, header_size);
|
||||
if (payload) {
|
||||
/* inline sends are typically just pml headers (due to MCA_BTL_FLAGS_SEND_INPLACE) */
|
||||
memcpy (data + header_size, payload, payload_size);
|
||||
}
|
||||
|
||||
/* write out part of the header now. the tag will be written when the data is available */
|
||||
mca_btl_vader_fbox_set_header (MCA_BTL_VADER_FBOX_HDR(dst), 0, ep->fbox_out.seq++, data_size);
|
||||
mca_btl_vader_fbox_set_header (MCA_BTL_VADER_FBOX_HDR(dst), tag, ep->fbox_out.seq++, data_size);
|
||||
|
||||
end += size;
|
||||
|
||||
@ -145,40 +157,6 @@ static inline unsigned char *mca_btl_vader_reserve_fbox (mca_btl_base_endpoint_t
|
||||
opal_atomic_wmb ();
|
||||
OPAL_THREAD_UNLOCK(&ep->lock);
|
||||
|
||||
return dst + sizeof (mca_btl_vader_fbox_hdr_t);
|
||||
}
|
||||
|
||||
static inline void mca_btl_vader_fbox_send (unsigned char * restrict fbox, unsigned char tag)
|
||||
{
|
||||
/* ensure data writes have completed before we mark the data as available */
|
||||
opal_atomic_wmb ();
|
||||
|
||||
/* the header proceeds the fbox buffer */
|
||||
MCA_BTL_VADER_FBOX_HDR ((intptr_t) fbox)[-1].data.tag = tag;
|
||||
}
|
||||
|
||||
static inline bool mca_btl_vader_fbox_sendi (mca_btl_base_endpoint_t *ep, unsigned char tag,
|
||||
void * restrict header, const size_t header_size,
|
||||
void * restrict payload, const size_t payload_size)
|
||||
{
|
||||
const size_t total_size = header_size + payload_size;
|
||||
unsigned char * restrict fbox;
|
||||
|
||||
fbox = mca_btl_vader_reserve_fbox(ep, total_size);
|
||||
if (OPAL_UNLIKELY(NULL == fbox)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
memcpy (fbox, header, header_size);
|
||||
if (payload) {
|
||||
/* inline sends are typically just pml headers (due to MCA_BTL_FLAGS_SEND_INPLACE) */
|
||||
memcpy (fbox + header_size, payload, payload_size);
|
||||
}
|
||||
|
||||
/* mark the fbox as sent */
|
||||
mca_btl_vader_fbox_send (fbox, tag);
|
||||
|
||||
/* send complete */
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,6 @@ static inline void mca_btl_vader_frag_constructor (mca_btl_vader_frag_t *frag)
|
||||
|
||||
frag->base.des_segments = frag->segments;
|
||||
frag->base.des_segment_count = 1;
|
||||
frag->fbox = NULL;
|
||||
}
|
||||
|
||||
int mca_btl_vader_frag_init (opal_free_list_item_t *item, void *ctx)
|
||||
|
@ -67,8 +67,6 @@ struct mca_btl_vader_frag_t {
|
||||
mca_btl_base_segment_t segments[2];
|
||||
/** endpoint this fragment is active on */
|
||||
struct mca_btl_base_endpoint_t *endpoint;
|
||||
/** fast box in use (or NULL) */
|
||||
unsigned char * restrict fbox;
|
||||
/** fragment header (in the shared memory region) */
|
||||
mca_btl_vader_hdr_t *hdr;
|
||||
/** free list this fragment was allocated within */
|
||||
@ -95,7 +93,6 @@ static inline void mca_btl_vader_frag_return (mca_btl_vader_frag_t *frag)
|
||||
|
||||
frag->segments[0].seg_addr.pval = (char *)(frag->hdr + 1);
|
||||
frag->base.des_segment_count = 1;
|
||||
frag->fbox = NULL;
|
||||
|
||||
opal_free_list_return (frag->my_list, (opal_free_list_item_t *)frag);
|
||||
}
|
||||
|
@ -440,7 +440,6 @@ static struct mca_btl_base_descriptor_t *vader_prepare_src (struct mca_btl_base_
|
||||
{
|
||||
const size_t total_size = reserve + *size;
|
||||
mca_btl_vader_frag_t *frag;
|
||||
unsigned char *fbox;
|
||||
void *data_ptr;
|
||||
int rc;
|
||||
|
||||
@ -506,19 +505,6 @@ static struct mca_btl_base_descriptor_t *vader_prepare_src (struct mca_btl_base_
|
||||
frag->base.des_segment_count = 2;
|
||||
} else {
|
||||
#endif
|
||||
|
||||
/* inline send */
|
||||
if (OPAL_LIKELY(MCA_BTL_DES_FLAGS_BTL_OWNERSHIP & flags)) {
|
||||
/* try to reserve a fast box for this transfer only if the
|
||||
* fragment does not belong to the caller */
|
||||
fbox = mca_btl_vader_reserve_fbox (endpoint, total_size);
|
||||
if (OPAL_LIKELY(fbox)) {
|
||||
frag->segments[0].seg_addr.pval = fbox;
|
||||
}
|
||||
|
||||
frag->fbox = fbox;
|
||||
}
|
||||
|
||||
/* NTH: the covertor adds some latency so we bypass it here */
|
||||
memcpy ((void *)((uintptr_t)frag->segments[0].seg_addr.pval + reserve), data_ptr, *size);
|
||||
frag->segments[0].seg_len = total_size;
|
||||
|
@ -42,13 +42,6 @@ int mca_btl_vader_send (struct mca_btl_base_module_t *btl,
|
||||
mca_btl_vader_frag_t *frag = (mca_btl_vader_frag_t *) descriptor;
|
||||
const size_t total_size = frag->segments[0].seg_len;
|
||||
|
||||
if (OPAL_LIKELY(frag->fbox)) {
|
||||
mca_btl_vader_fbox_send (frag->fbox, tag);
|
||||
mca_btl_vader_frag_complete (frag);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* in order to work around a long standing ob1 bug (see #3845) we have to always
|
||||
* make the callback. once this is fixed in ob1 we can restore the code below. */
|
||||
frag->base.des_flags |= MCA_BTL_DES_SEND_ALWAYS_CALLBACK;
|
||||
|
Загрузка…
x
Ссылка в новой задаче
Block a user