1
1

Fix a collection of bugs found by QA and Coverity, and make some minor

improvements:

* Fix minor memory leaks during component_init
* Ensure that an initialization loop does not underflow an unsigned int
* Improve mlock limit checking
* Fix set of BTL modules created during component_init when failing to
  get QP resources or otherwise excluding some (but not all) usnic
  verbs devices
* Fix/improve error messages to be consistent with other Cisco
  documentation
* Randomize the initial sliding window sequence number so that we
  silently drop incoming frames from previous jobs that still have
  existant processes in the middle of dying (and are still
  transmitting) 
* Ensure we don't break out of add_procs too soon and create an
  asymetrical view of what interfaces are available

This commit was SVN r28975.
Этот коммит содержится в:
Jeff Squyres 2013-08-01 16:56:15 +00:00
родитель 37db1727a2
Коммит 87910daf51
8 изменённых файлов: 174 добавлений и 126 удалений

Просмотреть файл

@ -115,8 +115,10 @@ typedef struct ompi_btl_usnic_component_t {
protocol headers) */
uint64_t my_hashed_rte_name;
/** array of available BTLs */
struct ompi_btl_usnic_module_t* usnic_modules;
/** array of possible BTLs (>= num_modules elements) */
struct ompi_btl_usnic_module_t* usnic_all_modules;
/** array of pointers to active BTLs (num_modules elements) */
struct ompi_btl_usnic_module_t** usnic_active_modules;
/** list of usnic proc structures */
opal_list_t usnic_procs;

Просмотреть файл

@ -38,6 +38,7 @@
#include <errno.h>
#include <infiniband/verbs.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/types.h>
@ -152,7 +153,8 @@ static int usnic_component_open(void)
{
/* initialize state */
mca_btl_usnic_component.num_modules = 0;
mca_btl_usnic_component.usnic_modules = NULL;
mca_btl_usnic_component.usnic_all_modules = NULL;
mca_btl_usnic_component.usnic_active_modules = NULL;
/* In this version, the USNIC stack does not support having more
* than one GID. So just hard-wire this value to 0. */
@ -202,6 +204,9 @@ static int usnic_component_close(void)
usnic_clock_timer_event_set = false;
}
free(mca_btl_usnic_component.usnic_all_modules);
free(mca_btl_usnic_component.usnic_active_modules);
return OMPI_SUCCESS;
}
@ -227,7 +232,7 @@ static int usnic_modex_send(void)
for (i = 0; i < mca_btl_usnic_component.num_modules; i++) {
ompi_btl_usnic_module_t* module =
&mca_btl_usnic_component.usnic_modules[i];
mca_btl_usnic_component.usnic_active_modules[i];
addrs[i] = module->local_addr;
opal_output_verbose(5, USNIC_OUT,
"btl:usnic: modex_send DQP:%d, CQP:%d, subnet = 0x%016" PRIx64 " interface =0x%016" PRIx64,
@ -345,7 +350,7 @@ static int check_usnic_config(struct ibv_device_attr *device_attr,
}
if (num_vfs * qp_per_vf < num_local_procs * USNIC_NUM_CHANNELS) {
snprintf(str, sizeof(str), "Not enough SQ/RQ (found %d, need %d)",
snprintf(str, sizeof(str), "Not enough WQ/RQ (found %d, need %d)",
num_vfs * qp_per_vf,
num_local_procs * USNIC_NUM_CHANNELS);
goto error;
@ -441,6 +446,8 @@ static mca_btl_base_module_t** usnic_component_init(int* num_btl_modules,
seed_prng();
srandom((unsigned int)getpid());
/* Find the ports that we want to use. We do our own interface name
* filtering below, so don't let the verbs code see our
* if_include/if_exclude strings */
@ -459,18 +466,19 @@ static mca_btl_base_module_t** usnic_component_init(int* num_btl_modules,
malloc(mca_btl_usnic_component.num_modules *
sizeof(ompi_btl_usnic_module_t*));
if (NULL == btls) {
btls = NULL;
goto free_include_list;
}
/* Allocate space for btl module instances */
mca_btl_usnic_component.usnic_modules = (ompi_btl_usnic_module_t*)
mca_btl_usnic_component.usnic_all_modules =
calloc(mca_btl_usnic_component.num_modules,
sizeof(ompi_btl_usnic_module_t));
if (NULL == mca_btl_usnic_component.usnic_modules) {
free(btls);
btls = NULL;
goto free_include_list;
sizeof(*mca_btl_usnic_component.usnic_all_modules));
mca_btl_usnic_component.usnic_active_modules =
calloc(mca_btl_usnic_component.num_modules,
sizeof(*mca_btl_usnic_component.usnic_active_modules));
if (NULL == mca_btl_usnic_component.usnic_all_modules ||
NULL == mca_btl_usnic_component.usnic_active_modules) {
goto error;
}
/* If we have include or exclude list, parse and set up now
@ -506,7 +514,7 @@ static mca_btl_base_module_t** usnic_component_init(int* num_btl_modules,
item != opal_list_get_end(port_list) &&
(0 == mca_btl_usnic_component.max_modules ||
i < mca_btl_usnic_component.max_modules);
++i, item = opal_list_get_next(item)) {
item = opal_list_get_next(item)) {
port = (ompi_common_verbs_port_item_t*) item;
opal_output_verbose(5, USNIC_OUT,
@ -520,15 +528,13 @@ static mca_btl_base_module_t** usnic_component_init(int* num_btl_modules,
opal_output_verbose(5, USNIC_OUT,
"btl:usnic: this is not a usnic-capable device");
--mca_btl_usnic_component.num_modules;
--i;
continue; /* next port */
}
/* Fill in a bunch of the module struct */
module = &(mca_btl_usnic_component.usnic_modules[i]);
module = &(mca_btl_usnic_component.usnic_all_modules[i]);
if (OMPI_SUCCESS != init_module_from_port(module, port)) {
--mca_btl_usnic_component.num_modules;
--i;
continue; /* next port */
}
@ -542,7 +548,6 @@ static mca_btl_base_module_t** usnic_component_init(int* num_btl_modules,
(filter_incl ? "if_include" : "if_exclude"));
if (!keep_module) {
--mca_btl_usnic_component.num_modules;
--i;
continue; /* next port */
}
}
@ -557,7 +562,6 @@ static mca_btl_base_module_t** usnic_component_init(int* num_btl_modules,
"ibv_query_device", __FILE__, __LINE__,
"Failed to query usNIC device; is the usnic_verbs Linux kernel module loaded?");
--mca_btl_usnic_component.num_modules;
--i;
continue;
}
opal_memchecker_base_mem_defined(&device_attr, sizeof(device_attr));
@ -566,7 +570,6 @@ static mca_btl_base_module_t** usnic_component_init(int* num_btl_modules,
if (check_usnic_config(&device_attr, module,
num_local_procs) != OMPI_SUCCESS) {
--mca_btl_usnic_component.num_modules;
--i;
continue;
}
@ -595,8 +598,8 @@ static mca_btl_base_module_t** usnic_component_init(int* num_btl_modules,
* override.
*/
if (-1 == mca_btl_usnic_component.prio_sd_num) {
module->prio_sd_num =
max(128, 32 * ompi_process_info.num_procs) - 1;
module->prio_sd_num =
max(128, 32 * orte_process_info.num_procs) - 1;
} else {
module->prio_sd_num = mca_btl_usnic_component.prio_sd_num;
}
@ -604,8 +607,8 @@ static mca_btl_base_module_t** usnic_component_init(int* num_btl_modules,
module->prio_sd_num = device_attr.max_qp_wr;
}
if (-1 == mca_btl_usnic_component.prio_rd_num) {
module->prio_rd_num =
max(128, 32 * ompi_process_info.num_procs) - 1;
module->prio_rd_num =
max(128, 32 * orte_process_info.num_procs) - 1;
} else {
module->prio_rd_num = mca_btl_usnic_component.prio_rd_num;
}
@ -651,7 +654,7 @@ static mca_btl_base_module_t** usnic_component_init(int* num_btl_modules,
opal_hash_table_init(&module->senders, 4096);
/* Let this module advance to the next round! */
btls[i] = &(module->super);
btls[i++] = &(module->super);
}
/* free filter if created */
@ -747,6 +750,10 @@ static mca_btl_base_module_t** usnic_component_init(int* num_btl_modules,
module->super.btl_exclusivity);
}
/* We may have skipped some modules, so reset
component.num_modules */
mca_btl_usnic_component.num_modules = num_final_modules;
/* We've packed all the modules and pointers to those modules in
the lower ends of their respective arrays. If not all the
modules initialized successfully, we're wasting a little space.
@ -756,23 +763,15 @@ static mca_btl_base_module_t** usnic_component_init(int* num_btl_modules,
That being said, if we ended up with zero acceptable ports,
then free everything. */
if (0 == num_final_modules) {
if (NULL != mca_btl_usnic_component.usnic_modules) {
free(mca_btl_usnic_component.usnic_modules);
mca_btl_usnic_component.usnic_modules = NULL;
}
if (NULL != btls) {
free(btls);
btls = NULL;
}
opal_output_verbose(5, USNIC_OUT,
"btl:usnic: returning 0 modules");
goto modex_send;
goto error;
}
/* We may have skipped some modules, so reset
component.num_modules */
mca_btl_usnic_component.num_modules = num_final_modules;
/* we have a nonzero number of modules, so save a copy of the btls array
* for later use */
memcpy(mca_btl_usnic_component.usnic_active_modules, btls,
num_final_modules*sizeof(*btls));
/* Loop over the modules and find the minimum value for
module->numa_distance. For every module that has a
@ -822,6 +821,16 @@ static mca_btl_base_module_t** usnic_component_init(int* num_btl_modules,
modex_send:
usnic_modex_send();
return btls;
error:
/* clean up as much allocated memory as possible */
free(btls);
btls = NULL;
free(mca_btl_usnic_component.usnic_all_modules);
mca_btl_usnic_component.usnic_all_modules = NULL;
free(mca_btl_usnic_component.usnic_active_modules);
mca_btl_usnic_component.usnic_active_modules = NULL;
goto free_include_list;
}
/*
@ -845,7 +854,7 @@ static int usnic_component_progress(void)
/* Poll for completions */
for (i = 0; i < mca_btl_usnic_component.num_modules; i++) {
module = &mca_btl_usnic_component.usnic_modules[i];
module = mca_btl_usnic_component.usnic_active_modules[i];
/* poll each channel */
for (c=0; c<USNIC_NUM_CHANNELS; ++c) {
@ -855,7 +864,8 @@ static int usnic_component_progress(void)
opal_memchecker_base_mem_defined(&num_events, sizeof(num_events));
opal_memchecker_base_mem_defined(wc, sizeof(wc[0]) * num_events);
if (OPAL_UNLIKELY(num_events < 0)) {
BTL_ERROR(("error polling CQ with %d: %s",
BTL_ERROR(("%s: error polling CQ[%d] with %d: %s",
ibv_get_device_name(module->device), c,
num_events, strerror(errno)));
return OMPI_ERROR;
}
@ -874,11 +884,11 @@ static int usnic_component_progress(void)
if (cwc->byte_len <
(sizeof(ompi_btl_usnic_protocol_header_t)+
sizeof(ompi_btl_usnic_btl_header_t))) {
BTL_ERROR(("RX error polling CQ[%d] with status %d for wr_id %" PRIx64 " vend_err %d, byte_len %d (%d of %d)",
BTL_ERROR(("%s: RX error polling CQ[%d] with status %d for wr_id %" PRIx64 " vend_err %d, byte_len %d (%d of %d)",
ibv_get_device_name(module->device),
c, cwc->status, cwc->wr_id,
cwc->vendor_err,
j, num_events, cwc->byte_len));
abort();
cwc->vendor_err, cwc->byte_len,
j, num_events));
} else {
/* silently count CRC errors */
++module->num_crc_errors;
@ -887,7 +897,8 @@ abort();
repost_recv_head = &rseg->rs_recv_desc;
continue;
} else {
BTL_ERROR(("error polling CQ with status %d for wr_id %" PRIx64 " opcode %d, vend_err %d (%d of %d)",
BTL_ERROR(("%s: error polling CQ[%d] with status %d for wr_id %" PRIx64 " opcode %d, vend_err %d (%d of %d)",
ibv_get_device_name(module->device), c,
cwc->status, cwc->wr_id, cwc->opcode,
cwc->vendor_err,
j, num_events));
@ -1144,6 +1155,7 @@ static usnic_if_filter_t *parse_ifex_str(const char *orig_str,
argv = opal_argv_split(orig_str, ',');
if (NULL == argv || 0 == (n_argv = opal_argv_count(argv))) {
free(filter);
opal_argv_free(argv);
return NULL;
}

Просмотреть файл

@ -67,10 +67,6 @@ static void endpoint_construct(mca_btl_base_endpoint_t* endpoint)
/* list of fragments queued to be sent */
OBJ_CONSTRUCT(&endpoint->endpoint_frag_send_queue, opal_list_t);
endpoint->endpoint_next_seq_to_send = 10;
endpoint->endpoint_ack_seq_rcvd = 9;
endpoint->endpoint_next_contig_seq_to_recv = 10;
endpoint->endpoint_highest_seq_rcvd = 9;
endpoint->endpoint_next_frag_id = 1;
endpoint->endpoint_acktime = 0;

Просмотреть файл

@ -65,6 +65,7 @@ typedef enum ompi_btl_usnic_channel_id_t {
} ompi_btl_usnic_channel_id_t;
typedef struct ompi_btl_usnic_addr_t {
ompi_btl_usnic_seq_t isn;
uint32_t qp_num[USNIC_NUM_CHANNELS];
union ibv_gid gid;
uint32_t ipv4_addr;

Просмотреть файл

@ -24,6 +24,7 @@
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <stdlib.h>
#include "opal/class/opal_bitmap.h"
#include "opal/prefetch.h"
@ -96,18 +97,19 @@ static int usnic_add_procs(struct mca_btl_base_module_t* base_module,
the proc is shared by all usnic modules that are trying
to reach this destination. */
usnic_proc = NULL;
if (OMPI_SUCCESS !=
ompi_btl_usnic_proc_match(ompi_proc, module, &usnic_proc)) {
rc = ompi_btl_usnic_proc_match(ompi_proc, module, &usnic_proc);
if (OMPI_SUCCESS != rc) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* Create the endpoint for this proc/module combination */
/* Create the endpoint for this proc/module combination. If we cannot
* reach this proc via this module, move on to the next proc. */
usnic_endpoint = NULL;
rc = ompi_btl_usnic_create_endpoint(module, usnic_proc,
&usnic_endpoint);
if (OMPI_SUCCESS != rc) {
OBJ_RELEASE(usnic_proc);
return rc;
continue;
}
/* Add to array of all procs */
@ -1551,16 +1553,19 @@ ompi_btl_usnic_channel_init(
/* Initialize pool of receive segments */
OBJ_CONSTRUCT(&channel->recv_segs, ompi_free_list_t);
channel->recv_segs.ctx = module;
ompi_free_list_init_new(&channel->recv_segs,
sizeof(ompi_btl_usnic_recv_segment_t),
opal_cache_line_size,
OBJ_CLASS(ompi_btl_usnic_recv_segment_t),
mtu,
opal_cache_line_size,
rd_num,
rd_num,
rd_num,
module->super.btl_mpool);
rc = ompi_free_list_init_new(&channel->recv_segs,
sizeof(ompi_btl_usnic_recv_segment_t),
opal_cache_line_size,
OBJ_CLASS(ompi_btl_usnic_recv_segment_t),
mtu,
opal_cache_line_size,
rd_num,
rd_num,
rd_num,
module->super.btl_mpool);
if (OMPI_SUCCESS != rc) {
goto error;
}
/* Post receive descriptors */
for (i = 0; i < rd_num; i++) {
@ -1611,6 +1616,21 @@ error:
return OMPI_ERROR;
}
/*
* generate initial send sequence number
*/
static ompi_btl_usnic_seq_t
get_initial_seq_no(void)
{
ompi_btl_usnic_seq_t isn;
/* only utilize the bottom 62 bits to avoid hitting seq # overflow */
isn = (((uint64_t)random() & ((1LL<<30)-1)) << 32) |
((uint64_t)random() & ((1LL<<32)-1));
isn += 2; /* guarantee > 1 */
return isn;
}
/*
* Initialize the btl module by allocating a protection domain,
* memory pool, priority and data channels, and free lists
@ -1691,6 +1711,8 @@ int ompi_btl_usnic_module_init(ompi_btl_usnic_module_t *module)
goto chan_destroy;
}
module->local_addr.isn = get_initial_seq_no();
/* Place QP number in our local address information */
module->local_addr.qp_num[USNIC_PRIORITY_CHANNEL] =
module->mod_channels[USNIC_PRIORITY_CHANNEL].qp->qp_num;
@ -1720,71 +1742,76 @@ int ompi_btl_usnic_module_init(ompi_btl_usnic_module_t *module)
/* Send frags freelists */
module->small_send_frags.ctx = module;
OBJ_CONSTRUCT(&module->small_send_frags, ompi_free_list_t);
ompi_free_list_init_new(&module->small_send_frags,
sizeof(ompi_btl_usnic_small_send_frag_t),
opal_cache_line_size,
OBJ_CLASS(ompi_btl_usnic_small_send_frag_t),
module->if_mtu,
opal_cache_line_size,
module->sd_num * 4,
-1,
module->sd_num / 2,
module->super.btl_mpool);
rc = ompi_free_list_init_new(&module->small_send_frags,
sizeof(ompi_btl_usnic_small_send_frag_t),
opal_cache_line_size,
OBJ_CLASS(ompi_btl_usnic_small_send_frag_t),
module->if_mtu,
opal_cache_line_size,
module->sd_num * 4,
-1,
module->sd_num / 2,
module->super.btl_mpool);
assert(OMPI_SUCCESS == rc);
module->large_send_frags.ctx = module;
OBJ_CONSTRUCT(&module->large_send_frags, ompi_free_list_t);
ompi_free_list_init_new(&module->large_send_frags,
sizeof(ompi_btl_usnic_large_send_frag_t),
opal_cache_line_size,
OBJ_CLASS(ompi_btl_usnic_large_send_frag_t),
0, /* payload size */
0, /* payload align */
module->sd_num / 8,
-1,
module->sd_num / 8,
NULL);
rc = ompi_free_list_init_new(&module->large_send_frags,
sizeof(ompi_btl_usnic_large_send_frag_t),
opal_cache_line_size,
OBJ_CLASS(ompi_btl_usnic_large_send_frag_t),
0, /* payload size */
0, /* payload align */
module->sd_num / 8,
-1,
module->sd_num / 8,
NULL);
assert(OMPI_SUCCESS == rc);
module->put_dest_frags.ctx = module;
OBJ_CONSTRUCT(&module->put_dest_frags, ompi_free_list_t);
ompi_free_list_init_new(&module->put_dest_frags,
sizeof(ompi_btl_usnic_put_dest_frag_t),
opal_cache_line_size,
OBJ_CLASS(ompi_btl_usnic_put_dest_frag_t),
0, /* payload size */
0, /* payload align */
module->sd_num / 8,
-1,
module->sd_num / 8,
NULL);
rc = ompi_free_list_init_new(&module->put_dest_frags,
sizeof(ompi_btl_usnic_put_dest_frag_t),
opal_cache_line_size,
OBJ_CLASS(ompi_btl_usnic_put_dest_frag_t),
0, /* payload size */
0, /* payload align */
module->sd_num / 8,
-1,
module->sd_num / 8,
NULL);
assert(OMPI_SUCCESS == rc);
/* list of segments to use for sending */
module->chunk_segs.ctx = module;
OBJ_CONSTRUCT(&module->chunk_segs, ompi_free_list_t);
ompi_free_list_init_new(&module->chunk_segs,
sizeof(ompi_btl_usnic_chunk_segment_t),
opal_cache_line_size,
OBJ_CLASS(ompi_btl_usnic_chunk_segment_t),
module->if_mtu,
opal_cache_line_size,
module->sd_num * 4,
-1,
module->sd_num / 2,
module->super.btl_mpool);
rc = ompi_free_list_init_new(&module->chunk_segs,
sizeof(ompi_btl_usnic_chunk_segment_t),
opal_cache_line_size,
OBJ_CLASS(ompi_btl_usnic_chunk_segment_t),
module->if_mtu,
opal_cache_line_size,
module->sd_num * 4,
-1,
module->sd_num / 2,
module->super.btl_mpool);
assert(OMPI_SUCCESS == rc);
/* ACK segments freelist */
module->ack_segs.ctx = module;
ack_segment_len = sizeof(ompi_btl_usnic_btl_header_t);
OBJ_CONSTRUCT(&module->ack_segs, ompi_free_list_t);
ompi_free_list_init_new(&module->ack_segs,
sizeof(ompi_btl_usnic_ack_segment_t),
opal_cache_line_size,
OBJ_CLASS(ompi_btl_usnic_ack_segment_t),
ack_segment_len,
opal_cache_line_size,
module->sd_num * 4,
-1,
module->sd_num / 2,
module->super.btl_mpool);
rc = ompi_free_list_init_new(&module->ack_segs,
sizeof(ompi_btl_usnic_ack_segment_t),
opal_cache_line_size,
OBJ_CLASS(ompi_btl_usnic_ack_segment_t),
ack_segment_len,
opal_cache_line_size,
module->sd_num * 4,
-1,
module->sd_num / 2,
module->super.btl_mpool);
assert(OMPI_SUCCESS == rc);
/*
* Initialize pools of large recv buffers
@ -1797,16 +1824,17 @@ int ompi_btl_usnic_module_init(ompi_btl_usnic_module_t *module)
for (i=module->first_pool; i<=module->last_pool; ++i) {
module->module_recv_buffers[i].ctx = module;
OBJ_CONSTRUCT(&module->module_recv_buffers[i], ompi_free_list_t);
ompi_free_list_init_new(&module->module_recv_buffers[i],
1 << i,
opal_cache_line_size,
OBJ_CLASS(ompi_btl_usnic_large_send_frag_t),
0, /* payload size */
0, /* payload align */
8,
128,
8,
NULL);
rc = ompi_free_list_init_new(&module->module_recv_buffers[i],
1 << i,
opal_cache_line_size,
OBJ_CLASS(ompi_btl_usnic_large_send_frag_t),
0, /* payload size */
0, /* payload align */
8,
128,
8,
NULL);
assert(OMPI_SUCCESS == rc);
}
if (mca_btl_usnic_component.stats_enabled) {

Просмотреть файл

@ -191,6 +191,7 @@ static ompi_btl_usnic_proc_t *create_proc(ompi_proc_t *ompi_proc)
"<none>", 0,
"ompi_modex_recv() failed", __FILE__, __LINE__,
opal_strerror(rc));
OBJ_RELEASE(proc);
return NULL;
}
@ -345,6 +346,14 @@ ompi_btl_usnic_create_endpoint(ompi_btl_usnic_module_t *module,
endpoint->endpoint_module = module;
endpoint->endpoint_remote_addr = proc->proc_modex[modex_index];
/* Initialize endpoint sequence number info */
endpoint->endpoint_next_seq_to_send = module->local_addr.isn;
endpoint->endpoint_ack_seq_rcvd = endpoint->endpoint_next_seq_to_send - 1;
endpoint->endpoint_next_contig_seq_to_recv =
endpoint->endpoint_remote_addr.isn;
endpoint->endpoint_highest_seq_rcvd =
endpoint->endpoint_next_contig_seq_to_recv - 1;
/* Create the address handle on this endpoint from the modex info.
memset to both silence valgrind warnings (since the attr struct
ends up getting written down an fd to the kernel) and actually

Просмотреть файл

@ -72,7 +72,7 @@ ompi_btl_usnic_post_segment(
sseg->ss_base.us_type,
(void*) sseg->ss_send_desc.sg_list->addr,
sseg->ss_send_desc.sg_list->length);
ompi_btl_usnic_dump_hex((void *)(sseg->ss_send_desc.sg_list->addr + sizeof(ompi_btl_usnic_btl_header_t)), 16);
/*ompi_btl_usnic_dump_hex((void *)(sseg->ss_send_desc.sg_list->addr + sizeof(ompi_btl_usnic_btl_header_t)), 16); */
#endif
/* set target address */

Просмотреть файл

@ -1,6 +1,6 @@
# -*- text -*-
#
# Copyright (c) 2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2012-2013 Cisco Systems, Inc. All rights reserved.
#
# $COPYRIGHT$
#
@ -55,7 +55,7 @@ usually means one of two things:
1. You are running something other than this MPI job on this server
that is consuming usNIC resources.
2. You have run our of locked Linux memory. You should probably set
2. You have run out of locked Linux memory. You should probably set
the Linux "memlock" limits to "unlimited". See this FAQ entry for
details:
@ -148,7 +148,7 @@ in either lower performance or your job aborting.
The usNIC BTL failed to initialize while trying to register some
memory. This typically can indicate that the "memlock" limits are set
too low. For most HPC installations, the memlock limits should be set
to "unlimited". The failure occured here:
to "unlimited". The failure occurred here:
Local host: %s
Memlock limit: %s