1
1
openmpi/opal/mca/btl/usnic/btl_usnic_module.h
Ralph Castain 552c9ca5a0 George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-)
WHAT:    Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL

All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies.  This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP.  Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose.  UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs.  A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic.

This commit was SVN r32317.
2014-07-26 00:47:28 +00:00

273 строки
7.8 KiB
C

/*
* Copyright (c) 2004-2008 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006 Sandia National Laboratories. All rights
* reserved.
* Copyright (c) 2011-2014 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
/**
* @file
*/
#ifndef OPAL_BTL_USNIC_MODULE_H
#define OPAL_BTL_USNIC_MODULE_H
#include "opal/class/opal_pointer_array.h"
#include "opal/mca/common/verbs/common_verbs.h"
#include "btl_usnic_endpoint.h"
#include "btl_usnic_stats.h"
/*
* Default limits.
*
* These values obtained from empirical testing on Intel E5-2690
* machines with Sereno/Lexington cards through an N3546 switch.
*/
#define USNIC_DFLT_EAGER_LIMIT_1DEVICE (150 * 1024)
#define USNIC_DFLT_EAGER_LIMIT_NDEVICES (25 * 1024)
#define USNIC_DFLT_RNDV_EAGER_LIMIT 500
#define USNIC_DFLT_PACK_LAZY_THRESHOLD (16 * 1024)
BEGIN_C_DECLS
/*
* Forward declarations to avoid include loops
*/
struct opal_btl_usnic_send_segment_t;
struct opal_btl_usnic_recv_segment_t;
/*
* Abstraction of a set of IB queues
*/
typedef struct opal_btl_usnic_channel_t {
int chan_index;
struct ibv_cq *cq;
int chan_mtu;
int chan_rd_num;
int chan_sd_num;
/** available send WQ entries */
int32_t sd_wqe;
/* fastsend enabled if sd_wqe >= fastsend_wqe_thresh */
int fastsend_wqe_thresh;
/* pointer to receive segment whose bookkeeping has been deferred */
struct opal_btl_usnic_recv_segment_t *chan_deferred_recv;
/** queue pair */
struct ibv_qp* qp;
struct ibv_recv_wr *repost_recv_head;
/** receive segments & buffers */
ompi_free_list_t recv_segs;
bool chan_error; /* set when error detected on channel */
/* statistics */
uint32_t num_channel_sends;
} opal_btl_usnic_channel_t;
/**
* usNIC verbs BTL interface
*/
typedef struct opal_btl_usnic_module_t {
mca_btl_base_module_t super;
/* Cache for use during component_init to associate a module with
the opal_common_verbs_port_item_t that it came from. */
opal_common_verbs_port_item_t *port;
mca_btl_base_module_error_cb_fn_t pml_error_callback;
/* Information about the usNIC verbs device */
uint8_t port_num;
struct ibv_device *device;
struct ibv_context *device_context;
struct event device_async_event;
bool device_async_event_active;
struct ibv_pd *pd;
int numa_distance; /* hwloc NUMA distance from this process */
/* Information about the IP interface corresponding to this USNIC
interface */
char if_name[64];
uint32_t if_ipv4_addr; /* in network byte order */
uint32_t if_cidrmask; /* X in "/X" CIDR addr fmt, host byte order */
uint8_t if_mac[6];
int if_mtu;
/** desired send, receive, and completion queue entries (from MCA
params; cached here on the component because the MCA param
might == 0, which means "max supported on that device") */
int sd_num;
int rd_num;
int cq_num;
int prio_sd_num;
int prio_rd_num;
/*
* Fragments larger than max_frag_payload will be broken up into
* multiple chunks. The amount that can be held in a single chunk
* segment is slightly less than what can be held in frag segment due
* to fragment reassembly info.
*/
size_t tiny_mtu;
size_t max_frag_payload; /* most that fits in a frag segment */
size_t max_chunk_payload; /* most that can fit in chunk segment */
size_t max_tiny_payload; /* threshold for using inline send */
/** Hash table to keep track of senders */
opal_hash_table_t senders;
/** local address information */
struct opal_btl_usnic_addr_t local_addr;
/** list of all endpoints */
opal_list_t all_endpoints;
/** array of procs used by this module (can't use a list because a
proc can be used by multiple modules) */
opal_pointer_array_t all_procs;
/** send fragments & buffers */
ompi_free_list_t small_send_frags;
ompi_free_list_t large_send_frags;
ompi_free_list_t put_dest_frags;
ompi_free_list_t chunk_segs;
/** receive buffer pools */
int first_pool;
int last_pool;
ompi_free_list_t *module_recv_buffers;
/** list of endpoints with data to send */
/* this list uses base endpoint ptr */
opal_list_t endpoints_with_sends;
/** list of send frags that are waiting to be resent (they
previously deferred because of lack of resources) */
opal_list_t pending_resend_segs;
/** ack segments */
ompi_free_list_t ack_segs;
/** list of endpoints to which we need to send ACKs */
/* this list uses endpoint->endpoint_ack_li */
opal_list_t endpoints_that_need_acks;
/* abstract queue-pairs into channels */
opal_btl_usnic_channel_t mod_channels[USNIC_NUM_CHANNELS];
uint32_t qp_max_inline;
/* Performance / debugging statistics */
opal_btl_usnic_module_stats_t stats;
} opal_btl_usnic_module_t;
struct opal_btl_usnic_frag_t;
extern opal_btl_usnic_module_t opal_btl_usnic_module_template;
/*
* Manipulate the "endpoints_that_need_acks" list
*/
/* get first endpoint needing ACK */
static inline opal_btl_usnic_endpoint_t *
opal_btl_usnic_get_first_endpoint_needing_ack(
opal_btl_usnic_module_t *module)
{
opal_list_item_t *item;
opal_btl_usnic_endpoint_t *endpoint;
item = opal_list_get_first(&module->endpoints_that_need_acks);
if (item != opal_list_get_end(&module->endpoints_that_need_acks)) {
endpoint = container_of(item, mca_btl_base_endpoint_t, endpoint_ack_li);
return endpoint;
} else {
return NULL;
}
}
/* get next item in chain */
static inline opal_btl_usnic_endpoint_t *
opal_btl_usnic_get_next_endpoint_needing_ack(
opal_btl_usnic_endpoint_t *endpoint)
{
opal_list_item_t *item;
opal_btl_usnic_module_t *module;
module = endpoint->endpoint_module;
item = opal_list_get_next(&(endpoint->endpoint_ack_li));
if (item != opal_list_get_end(&module->endpoints_that_need_acks)) {
endpoint = container_of(item, mca_btl_base_endpoint_t, endpoint_ack_li);
return endpoint;
} else {
return NULL;
}
}
static inline void
opal_btl_usnic_remove_from_endpoints_needing_ack(
opal_btl_usnic_endpoint_t *endpoint)
{
opal_list_remove_item(
&(endpoint->endpoint_module->endpoints_that_need_acks),
&endpoint->endpoint_ack_li);
endpoint->endpoint_ack_needed = false;
endpoint->endpoint_acktime = 0;
#if MSGDEBUG1
opal_output(0, "clear ack_needed on %p\n", (void*)endpoint);
#endif
}
static inline void
opal_btl_usnic_add_to_endpoints_needing_ack(
opal_btl_usnic_endpoint_t *endpoint)
{
opal_list_append(&(endpoint->endpoint_module->endpoints_that_need_acks),
&endpoint->endpoint_ack_li);
endpoint->endpoint_ack_needed = true;
#if MSGDEBUG1
opal_output(0, "set ack_needed on %p\n", (void*)endpoint);
#endif
}
/*
* Initialize a module
*/
int opal_btl_usnic_module_init(opal_btl_usnic_module_t* module);
/*
* Progress pending sends on a module
*/
void opal_btl_usnic_module_progress_sends(opal_btl_usnic_module_t *module);
/* opal_output statistics that are useful for debugging */
void opal_btl_usnic_print_stats(
opal_btl_usnic_module_t *module,
const char *prefix,
bool reset_stats);
END_C_DECLS
#endif