1
1

Merge pull request #688 from jsquyres/pr/usnic-libfabric-msg-prefix-fix

usnic fixes for differences between libfabric v1.0.0 and v1.1.0
Этот коммит содержится в:
Jeff Squyres 2015-07-21 10:18:36 -04:00
родитель 46a87cabf0 633da6641e
Коммит ec3a38384f
10 изменённых файлов: 180 добавлений и 75 удалений

Просмотреть файл

@ -93,7 +93,7 @@ extern opal_rng_buff_t opal_btl_usnic_rand_buff;
/* Set to >0 to randomly drop received frags. The higher the number,
the more frequent the drops. */
#define WANT_RECV_FRAG_DROPS 0
#define WANT_RECV_DROPS 0
/* Set to >0 to randomly fail to send an ACK, mimicing a lost ACK.
The higher the number, the more frequent the failed-to-send-ACK. */
#define WANT_FAIL_TO_SEND_ACK 0
@ -102,10 +102,10 @@ extern opal_rng_buff_t opal_btl_usnic_rand_buff;
the failed-to-resend-frag. */
#define WANT_FAIL_TO_RESEND_FRAG 0
#if WANT_RECV_FRAG_DROPS > 0
#define FAKE_RECV_FRAG_DROP (opal_rand(&opal_btl_usnic_rand_buff) < WANT_RECV_FRAG_DROPS)
#if WANT_RECV_DROPS > 0
#define FAKE_RECV_DROP (opal_rand(&opal_btl_usnic_rand_buff) < WANT_RECV_DROPS)
#else
#define FAKE_RECV_FRAG_DROP 0
#define FAKE_RECV_DROP 0
#endif
#if WANT_FAIL_TO_SEND_ACK > 0
@ -213,6 +213,19 @@ typedef struct opal_btl_usnic_component_t {
/* Prefix for the connectivity map filename (map will be output if
the prefix is non-NULL) */
char *connectivity_map_prefix;
/** Expected return value from fi_cq_readerr() upon success. In
libfabric v1.0.0 / API v1.0, the usnic provider returned
sizeof(fi_cq_err_entry) upon success. In libfabric >=v1.1 /
API >=v1.1, the usnic provider returned 1 upon success. */
ssize_t cq_readerr_success_value;
ssize_t cq_readerr_try_again_value;
/** Offset into the send buffer where the payload will go. For
libfabric v1.0.0 / API v1.0, this is 0. For libfabric >=v1.1
/ API >=v1.1, this is the endpoint.msg_prefix_size (i.e.,
component.transport_header_len). */
uint32_t prefix_send_offset;
} opal_btl_usnic_component_t;
OPAL_MODULE_DECLSPEC extern opal_btl_usnic_component_t mca_btl_usnic_component;

Просмотреть файл

@ -201,8 +201,7 @@ opal_btl_usnic_ack_send(
/* Get an ACK frag. If we don't get one, just discard this ACK. */
ack = opal_btl_usnic_ack_segment_alloc(module);
if (OPAL_UNLIKELY(NULL == ack)) {
opal_output(0, "====================== No frag for sending the ACK -- skipped");
abort();
return;
}
/* send the seq of the lowest item in the window that
@ -252,6 +251,7 @@ opal_btl_usnic_ack_complete(opal_btl_usnic_module_t *module,
opal_btl_usnic_ack_segment_t *ack)
{
opal_btl_usnic_ack_segment_return(module, ack);
++module->mod_channels[ack->ss_channel].credits;
}
/*****************************************************************************/

Просмотреть файл

@ -715,6 +715,25 @@ opal_btl_usnic_put(struct mca_btl_base_module_t *base_module,
sfrag->sf_size = size;
sfrag->sf_ack_bytes_left = size;
/* JMS NOTE: This is currently broken, and is deactivated by
removing the MCA_BTL_FLAGS_PUT from .btl_flags in btl_module.c.
Overwriting the uf_local_seg values is not a good idea, and
doesn't do anything to actually send the data in the
progression past finish_put_or_send().
The proper fix is to change the plumbing here to eventually
call fi_sendv() with an iov[0] = the internal buffer that's
already allocated, and iov[1] = the user's buffer. The usnic
provider in fi_sendv() will be smart enough to figure out which
is more performance: memcpy'ing the 2 buffers together and
doing a single xfer down to the hardware, or actually doing a
SG list down to the hardware. */
opal_btl_usnic_frag_t *frag;
frag = &sfrag->sf_base;
frag->uf_local_seg[0].seg_len = size;

Просмотреть файл

@ -163,6 +163,7 @@ static int usnic_component_open(void)
mca_btl_usnic_component.usnic_all_modules = NULL;
mca_btl_usnic_component.usnic_active_modules = NULL;
mca_btl_usnic_component.transport_header_len = -1;
mca_btl_usnic_component.prefix_send_offset = 0;
/* initialize objects */
OBJ_CONSTRUCT(&mca_btl_usnic_component.usnic_procs, opal_list_t);
@ -630,7 +631,29 @@ static mca_btl_base_module_t** usnic_component_init(int* num_btl_modules,
hints.ep_attr = &ep_attr;
hints.fabric_attr = &fabric_attr;
ret = fi_getinfo(FI_VERSION(1, 0), NULL, 0, 0, &hints, &info_list);
/* This code understands libfabric API v1.0 and v1.1. Even if we
were compiled with libfabric API v1.0, we still want to request
v1.1 -- here's why:
- In libfabric v1.0.0 (i.e., API v1.0), the usnic provider did
not check the value of the "version" parameter passed into
fi_getinfo()
- If you pass FI_VERSION(1,0) to libfabric v1.1.0 (i.e., API
v1.1), the usnic provider will disable FI_MSG_PREFIX support
(on the assumption that the application will not handle
FI_MSG_PREFIX properly). This can happen if you compile OMPI
against libfabric v1.0.0 (i.e., API v1.0) and run OMPI
against libfabric v1.1.0 (i.e., API v1.1).
So never request API v1.0 -- always request a minimum of
v1.1. */
uint32_t libfabric_api;
libfabric_api = FI_VERSION(FI_MAJOR_VERSION, FI_MINOR_VERSION);
if (libfabric_api == FI_VERSION(1, 0)) {
libfabric_api = FI_VERSION(1, 1);
}
ret = fi_getinfo(libfabric_api, NULL, 0, 0, &hints, &info_list);
if (0 != ret) {
opal_output_verbose(5, USNIC_OUT,
"btl:usnic: disqualifiying myself due to fi_getinfo failure: %s (%d)", strerror(-ret), ret);
@ -664,6 +687,29 @@ static mca_btl_base_module_t** usnic_component_init(int* num_btl_modules,
opal_output_verbose(5, USNIC_OUT,
"btl:usnic: usNIC fabrics found");
/* Due to ambiguities in documentation, in libfabric v1.0.0 (i.e.,
API v1.0) the usnic provider returned sizeof(struct
fi_cq_err_entry) from fi_cq_readerr() upon success.
The ambiguities were clarified in libfabric v1.1.0 (i.e., API
v1.1); the usnic provider returned 1 from fi_cq_readerr() upon
success.
So query to see what version of the libfabric API we are
running with, and adapt accordingly. */
libfabric_api = fi_version();
if (1 == FI_MAJOR(libfabric_api) &&
0 == FI_MINOR(libfabric_api)) {
// Old fi_cq_readerr() behavior: success=sizeof(...), try again=0
mca_btl_usnic_component.cq_readerr_success_value =
sizeof(struct fi_cq_err_entry);
mca_btl_usnic_component.cq_readerr_try_again_value = 0;
} else {
// New fi_cq_readerr() behavior: success=1, try again=-FI_EAGAIN
mca_btl_usnic_component.cq_readerr_success_value = 1;
mca_btl_usnic_component.cq_readerr_try_again_value = -FI_EAGAIN;
}
/* libnl initialization */
opal_proc_t *me = opal_proc_local_get();
opal_process_name_t *name = &(me->proc_name);
@ -1087,6 +1133,9 @@ static int usnic_handle_completion(
seg = (opal_btl_usnic_segment_t*)completion->op_context;
rseg = (opal_btl_usnic_recv_segment_t*)seg;
/* Make the completion be Valgrind-defined */
opal_memchecker_base_mem_defined(seg, sizeof(*seg));
/* Handle work completions */
switch(seg->us_type) {
@ -1094,27 +1143,18 @@ static int usnic_handle_completion(
case OPAL_BTL_USNIC_SEG_ACK:
opal_btl_usnic_ack_complete(module,
(opal_btl_usnic_ack_segment_t *)seg);
{ opal_btl_usnic_send_segment_t *sseg = (opal_btl_usnic_send_segment_t *)seg;
++module->mod_channels[sseg->ss_channel].credits;
}
break;
/**** Send of frag segment completion ****/
case OPAL_BTL_USNIC_SEG_FRAG:
opal_btl_usnic_frag_send_complete(module,
(opal_btl_usnic_frag_segment_t*)seg);
{ opal_btl_usnic_send_segment_t *sseg = (opal_btl_usnic_send_segment_t *)seg;
++module->mod_channels[sseg->ss_channel].credits;
}
break;
/**** Send of chunk segment completion ****/
case OPAL_BTL_USNIC_SEG_CHUNK:
opal_btl_usnic_chunk_send_complete(module,
(opal_btl_usnic_chunk_segment_t*)seg);
{ opal_btl_usnic_send_segment_t *sseg = (opal_btl_usnic_send_segment_t *)seg;
++module->mod_channels[sseg->ss_channel].credits;
}
break;
/**** Receive completions ****/
@ -1145,17 +1185,27 @@ usnic_handle_cq_error(opal_btl_usnic_module_t* module,
}
rc = fi_cq_readerr(channel->cq, &err_entry, 0);
if (rc != sizeof(err_entry)) {
BTL_ERROR(("%s: cq_readerr ret = %d",
module->fabric_info->fabric_attr->name, rc));
if (rc == mca_btl_usnic_component.cq_readerr_try_again_value) {
return;
} else if (rc != mca_btl_usnic_component.cq_readerr_success_value) {
BTL_ERROR(("%s: cq_readerr ret = %d (expected %d)",
module->fabric_info->fabric_attr->name, rc,
(int) mca_btl_usnic_component.cq_readerr_success_value));
channel->chan_error = true;
} else if (err_entry.prov_errno == 1) {
}
/* Silently count CRC errors. Truncation errors are usually a
different symptom of a CRC error. */
else if (FI_ECRC == err_entry.prov_errno ||
FI_ETRUNC == err_entry.prov_errno) {
#if MSGDEBUG1
static int once = 0;
if (once++ == 0) {
BTL_ERROR(("%s: Channel %d, CRC error",
module->fabric_info->fabric_attr->name,
channel->chan_index));
BTL_ERROR(("%s: Channel %d, %s",
module->fabric_info->fabric_attr->name,
channel->chan_index,
FI_ECRC == err_entry.prov_errno ?
"CRC error" : "message truncation"));
}
#endif
@ -1171,23 +1221,10 @@ usnic_handle_cq_error(opal_btl_usnic_module_t* module,
rseg->rs_next = channel->repost_recv_head;
channel->repost_recv_head = rseg;
}
} else if (FI_ETRUNC == err_entry.prov_errno) {
/* This error is usually a different symptom of a CRC error */
#if MSGDEBUG1
static int once = 0;
if (once++ == 0) {
BTL_ERROR(("%s: Channel %d, message truncation",
module->fabric_info->fabric_attr->name,
channel->chan_index));
}
#endif
/* silently count CRC errors */
++module->stats.num_crc_errors;
} else {
BTL_ERROR(("%s: CQ[%d] prov_err = %d",
module->fabric_info->fabric_attr->name, channel->chan_index,
err_entry.prov_errno));
err_entry.prov_errno));
channel->chan_error = true;
}
}

Просмотреть файл

@ -30,23 +30,22 @@
#include "btl_usnic_ack.h"
static void
common_send_seg_helper(
opal_btl_usnic_send_segment_t *seg,
int offset)
common_send_seg_helper(opal_btl_usnic_send_segment_t *seg)
{
opal_btl_usnic_segment_t *bseg;
bseg = &seg->ss_base;
bseg->us_btl_header = (opal_btl_usnic_btl_header_t *)
(((char*) bseg->us_list.ptr) + offset);
bseg->us_btl_header->sender = mca_btl_usnic_component.my_hashed_rte_name;
/* send ptr for fi_send(). ss_len will be filled in right before
the actual send. */
seg->ss_ptr = (uint8_t *) seg->ss_base.us_list.ptr;
seg->ss_send_posted = 0;
seg->ss_ack_pending = false;
/* send ptr, len will be filled in just before send */
seg->ss_ptr = (uint8_t *)bseg->us_btl_header;
/* Offset the BTL header by (prefix_send_offset) bytes into the
raw buffer */
bseg = &seg->ss_base;
bseg->us_btl_header = (opal_btl_usnic_btl_header_t *)
(seg->ss_ptr + mca_btl_usnic_component.prefix_send_offset);
bseg->us_btl_header->sender = mca_btl_usnic_component.my_hashed_rte_name;
}
static void
@ -59,7 +58,7 @@ chunk_seg_constructor(
bseg->us_type = OPAL_BTL_USNIC_SEG_CHUNK;
/* some more common initializaiton */
common_send_seg_helper(seg, mca_btl_usnic_component.transport_header_len);
common_send_seg_helper(seg);
/* payload starts next byte beyond BTL chunk header */
bseg->us_payload.raw = (uint8_t *)(bseg->us_btl_chunk_header + 1);
@ -77,7 +76,7 @@ frag_seg_constructor(
bseg->us_type = OPAL_BTL_USNIC_SEG_FRAG;
/* some more common initializaiton */
common_send_seg_helper(seg, mca_btl_usnic_component.transport_header_len);
common_send_seg_helper(seg);
/* payload starts next byte beyond BTL header */
bseg->us_payload.raw = (uint8_t *)(bseg->us_btl_header + 1);
@ -95,7 +94,7 @@ ack_seg_constructor(
bseg->us_type = OPAL_BTL_USNIC_SEG_ACK;
/* some more common initializaiton */
common_send_seg_helper(ack, mca_btl_usnic_component.transport_header_len);
common_send_seg_helper(ack);
/* ACK value embedded in BTL header */
bseg->us_btl_header->payload_type = OPAL_BTL_USNIC_PAYLOAD_TYPE_ACK;
@ -176,12 +175,13 @@ send_frag_constructor(opal_btl_usnic_send_frag_t *frag)
static void
send_frag_destructor(opal_btl_usnic_send_frag_t *frag)
{
mca_btl_base_descriptor_t *desc;
#if OPAL_ENABLE_DEBUG
/* make sure nobody twiddled these values after the constructor */
mca_btl_base_descriptor_t *desc;
desc = &frag->sf_base.uf_base;
assert(desc->USNIC_SEND_LOCAL == frag->sf_base.uf_local_seg);
assert(0 == frag->sf_base.uf_local_seg[0].seg_len);
#endif
/* PML may change desc->des_remote to point elsewhere, cannot assert that it
* still points to our embedded segment */

Просмотреть файл

@ -1421,7 +1421,7 @@ static int create_ep(opal_btl_usnic_module_t* module,
opal_process_info.my_local_rank);
}
rc = fi_getinfo(FI_VERSION(1, 0), NULL, 0, 0, hint, &channel->info);
rc = fi_getinfo(FI_VERSION(1, 1), NULL, 0, 0, hint, &channel->info);
fi_freeinfo(hint);
if (0 != rc) {
opal_show_help("help-mpi-btl-usnic.txt",
@ -1450,12 +1450,13 @@ static int create_ep(opal_btl_usnic_module_t* module,
sa = (struct sockaddr *)channel->info->src_addr;
assert(AF_INET == sa->sa_family);
}
#endif
sin = (struct sockaddr_in *)channel->info->src_addr;
assert(sizeof(struct sockaddr_in) == channel->info->src_addrlen);
/* no matter the version of libfabric, this should hold */
assert(0 == sin->sin_port);
#endif
rc = fi_endpoint(module->domain, channel->info, &channel->ep, NULL);
if (0 != rc || NULL == channel->ep) {
@ -1634,6 +1635,9 @@ static int init_one_channel(opal_btl_usnic_module_t *module,
goto error;
}
assert(channel->info->ep_attr->msg_prefix_size ==
(uint32_t) mca_btl_usnic_component.transport_header_len);
/*
* Initialize pool of receive segments. Round MTU up to cache
* line size so that each segment is guaranteed to start on a
@ -1777,6 +1781,33 @@ static void init_find_transport_header_len(opal_btl_usnic_module_t *module)
module->fabric_info->ep_attr->msg_prefix_size;
mca_btl_usnic_component.transport_protocol =
module->fabric_info->ep_attr->protocol;
/* The usnic provider in libfabric v1.0.0 (i.e., API v1.0) treated
FI_MSG_PREFIX inconsistently between senders and receivers. It
was corrected in libfabric v1.1.0 (i.e., API v1.1), meaning
that FI_MSG_PREFIX is treated consistently between senders and
receivers.
So check what version of the libfabric API we have, and setup
to use the "old" (inconsistent) MSG_PREFIX behavior, or the
"new" MSG_PREFIX (consistent) behavior.
NOTE: This is a little redundant; we're setting a
component-level attribute during each module's setup. We do
this here (and not earlier, when we check fi_version() during
the component setup) because we can't obtain the value of the
endpoint msg_prefix_size until we setup the first module.
Also, it's safe because each module will set the component
attribute to the same value. So it's ok. */
uint32_t libfabric_api;
libfabric_api = fi_version();
if (1 == FI_MAJOR(libfabric_api) &&
0 == FI_MINOR(libfabric_api)) {
mca_btl_usnic_component.prefix_send_offset = 0;
} else {
mca_btl_usnic_component.prefix_send_offset =
module->fabric_info->ep_attr->msg_prefix_size;
}
}
/*
@ -1835,13 +1866,15 @@ static void init_payload_lengths(opal_btl_usnic_module_t *module)
/* Find the max payload this port can handle */
module->max_frag_payload =
module->local_modex.max_msg_size - /* start with the MTU */
sizeof(opal_btl_usnic_btl_header_t); /* subtract size of
the BTL header */
sizeof(opal_btl_usnic_btl_header_t) - /* subtract size of
the BTL header */
mca_btl_usnic_component.prefix_send_offset;
/* same, but use chunk header */
module->max_chunk_payload =
module->local_modex.max_msg_size -
sizeof(opal_btl_usnic_btl_chunk_header_t);
sizeof(opal_btl_usnic_btl_chunk_header_t) -
mca_btl_usnic_component.prefix_send_offset;
/* Priorirty queue MTU and max size */
if (0 == module->max_tiny_msg_size) {
@ -2093,11 +2126,10 @@ static void init_random_objects(opal_btl_usnic_module_t *module)
static void init_freelists(opal_btl_usnic_module_t *module)
{
int rc;
int rc __opal_attribute_unused__;
uint32_t segsize;
segsize = (module->local_modex.max_msg_size +
module->fabric_info->ep_attr->msg_prefix_size +
opal_cache_line_size - 1) &
~(opal_cache_line_size - 1);
@ -2105,7 +2137,7 @@ static void init_freelists(opal_btl_usnic_module_t *module)
OBJ_CONSTRUCT(&module->small_send_frags, opal_free_list_t);
rc = usnic_compat_free_list_init(&module->small_send_frags,
sizeof(opal_btl_usnic_small_send_frag_t) +
mca_btl_usnic_component.transport_header_len,
mca_btl_usnic_component.prefix_send_offset,
opal_cache_line_size,
OBJ_CLASS(opal_btl_usnic_small_send_frag_t),
segsize,
@ -2123,7 +2155,7 @@ static void init_freelists(opal_btl_usnic_module_t *module)
OBJ_CONSTRUCT(&module->large_send_frags, opal_free_list_t);
rc = usnic_compat_free_list_init(&module->large_send_frags,
sizeof(opal_btl_usnic_large_send_frag_t) +
mca_btl_usnic_component.transport_header_len,
mca_btl_usnic_component.prefix_send_offset,
opal_cache_line_size,
OBJ_CLASS(opal_btl_usnic_large_send_frag_t),
0, /* payload size */
@ -2141,7 +2173,7 @@ static void init_freelists(opal_btl_usnic_module_t *module)
OBJ_CONSTRUCT(&module->put_dest_frags, opal_free_list_t);
rc = usnic_compat_free_list_init(&module->put_dest_frags,
sizeof(opal_btl_usnic_put_dest_frag_t) +
mca_btl_usnic_component.transport_header_len,
mca_btl_usnic_component.prefix_send_offset,
opal_cache_line_size,
OBJ_CLASS(opal_btl_usnic_put_dest_frag_t),
0, /* payload size */
@ -2160,7 +2192,7 @@ static void init_freelists(opal_btl_usnic_module_t *module)
OBJ_CONSTRUCT(&module->chunk_segs, opal_free_list_t);
rc = usnic_compat_free_list_init(&module->chunk_segs,
sizeof(opal_btl_usnic_chunk_segment_t) +
mca_btl_usnic_component.transport_header_len,
mca_btl_usnic_component.prefix_send_offset,
opal_cache_line_size,
OBJ_CLASS(opal_btl_usnic_chunk_segment_t),
segsize,
@ -2178,12 +2210,11 @@ static void init_freelists(opal_btl_usnic_module_t *module)
/* ACK segments freelist */
uint32_t ack_segment_len;
ack_segment_len = (sizeof(opal_btl_usnic_btl_header_t) +
module->fabric_info->ep_attr->msg_prefix_size +
opal_cache_line_size - 1) & ~(opal_cache_line_size - 1);
OBJ_CONSTRUCT(&module->ack_segs, opal_free_list_t);
rc = usnic_compat_free_list_init(&module->ack_segs,
sizeof(opal_btl_usnic_ack_segment_t) +
mca_btl_usnic_component.transport_header_len,
mca_btl_usnic_component.prefix_send_offset,
opal_cache_line_size,
OBJ_CLASS(opal_btl_usnic_ack_segment_t),
ack_segment_len,
@ -2308,7 +2339,6 @@ opal_btl_usnic_module_t opal_btl_usnic_module_template = {
.btl_exclusivity = MCA_BTL_EXCLUSIVITY_DEFAULT,
.btl_flags =
MCA_BTL_FLAGS_SEND |
MCA_BTL_FLAGS_PUT |
MCA_BTL_FLAGS_SEND_INPLACE,
.btl_add_procs = usnic_add_procs,

Просмотреть файл

@ -77,7 +77,7 @@ void opal_btl_usnic_recv_call(opal_btl_usnic_module_t *module,
/* Find out who sent this segment */
endpoint = seg->rs_endpoint;
if (FAKE_RECV_FRAG_DROP || OPAL_UNLIKELY(NULL == endpoint)) {
if (FAKE_RECV_DROP || OPAL_UNLIKELY(NULL == endpoint)) {
/* No idea who this was from, so drop it */
#if MSGDEBUG1
opal_output(0, "=== Unknown sender; dropped: seq %" UDSEQ,

Просмотреть файл

@ -267,6 +267,9 @@ opal_btl_usnic_recv_fast(opal_btl_usnic_module_t *module,
int delta;
int i;
/* Make the whole payload Valgrind defined */
opal_memchecker_base_mem_defined(seg->rs_protocol_header, seg->rs_len);
bseg = &seg->rs_base;
/* Find out who sent this segment */
@ -286,10 +289,6 @@ opal_btl_usnic_dump_hex(bseg->us_btl_header, bseg->us_btl_header->payload_len +
bseg->us_btl_header->payload_type) &&
seg->rs_base.us_btl_header->put_addr == NULL) {
/* Valgrind help */
opal_memchecker_base_mem_defined(
(void*)(seg->rs_protocol_header), seg->rs_len);
seq = seg->rs_base.us_btl_header->pkt_seq;
delta = SEQ_DIFF(seq, endpoint->endpoint_next_contig_seq_to_recv);
if (delta < 0 || delta >= WINDOW_SIZE) {
@ -382,6 +381,9 @@ opal_btl_usnic_recv(opal_btl_usnic_module_t *module,
opal_btl_usnic_endpoint_t *endpoint;
int rc;
/* Make the whole payload Valgrind defined */
opal_memchecker_base_mem_defined(seg->rs_protocol_header, seg->rs_len);
bseg = &seg->rs_base;
/* Find out who sent this segment */

Просмотреть файл

@ -66,6 +66,8 @@ opal_btl_usnic_frag_send_complete(opal_btl_usnic_module_t *module,
/* see if this endpoint needs to be made ready-to-send */
opal_btl_usnic_check_rts(frag->sf_endpoint);
++module->mod_channels[sseg->ss_channel].credits;
}
/*
@ -97,6 +99,8 @@ opal_btl_usnic_chunk_send_complete(opal_btl_usnic_module_t *module,
/* see if this endpoint needs to be made ready-to-send */
opal_btl_usnic_check_rts(frag->sf_endpoint);
++module->mod_channels[sseg->ss_channel].credits;
}
/* Responsible for completing non-fastpath parts of a put or send operation,

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013-2015 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -79,7 +79,7 @@ opal_btl_usnic_post_segment(
/* Send the segment */
ret = fi_send(channel->ep,
sseg->ss_ptr,
sseg->ss_len,
sseg->ss_len + mca_btl_usnic_component.prefix_send_offset,
NULL,
endpoint->endpoint_remote_addrs[channel_id],
sseg);
@ -128,7 +128,7 @@ opal_btl_usnic_post_ack(
ret = fi_send(channel->ep,
sseg->ss_ptr,
sseg->ss_len,
sseg->ss_len + mca_btl_usnic_component.prefix_send_offset,
NULL,
endpoint->endpoint_remote_addrs[channel_id],
sseg);