1
1
- Move endpoint code back up to BTL
 - Use opal_pointer_array_t for bounce buffer to identify local smsg completions.
 - Update and reenable sendi
 - Create a new endpoint for FMA/BTE transactions (keep local smsg/fma transactions seperate)
 - Move reverse get code into btl_ugni_put.c
 - Move eager get code into btl_ugni_get.c
 - Handle remote SMSG overruns correctly
 - Added support for inplace sends
 - etc

This commit was SVN r26307.
Этот коммит содержится в:
Nathan Hjelm 2012-04-19 21:51:55 +00:00
родитель 2b9827f45c
Коммит 1340f9c65a
20 изменённых файлов: 851 добавлений и 761 удалений

Просмотреть файл

@ -33,10 +33,12 @@ ugni_SOURCES = \
btl_ugni_frag.h \
btl_ugni_rdma.h \
btl_ugni_send.c \
btl_ugni_sendi.c \
btl_ugni_put.c \
btl_ugni_get.c \
btl_ugni.h \
btl_ugni_smsg.h
btl_ugni_smsg.h \
btl_ugni_smsg.c
mcacomponentdir = $(pkglibdir)
mcacomponent_LTLIBRARIES = $(component_install)

Просмотреть файл

@ -27,7 +27,6 @@
#include "ompi/runtime/ompi_module_exchange.h"
#include "opal/util/output.h"
#include "opal_stdint.h"
#include "opal/class/opal_hash_table.h"
#include "ompi/mca/btl/btl.h"
#include "ompi/mca/btl/base/base.h"
@ -64,8 +63,9 @@ typedef struct mca_btl_ugni_module_t {
gni_ep_handle_t wildcard_ep;
gni_smsg_attr_t wc_remote_attr, wc_local_attr;
gni_cq_handle_t bte_local_cq;
gni_cq_handle_t rdma_local_cq;
gni_cq_handle_t smsg_remote_cq;
gni_cq_handle_t smsg_local_cq;
/* eager (registered) fragment list */
ompi_free_list_t eager_frags_send;
@ -78,8 +78,8 @@ typedef struct mca_btl_ugni_module_t {
ompi_free_list_t rdma_frags;
ompi_free_list_t rdma_int_frags;
/* fragment buffer (for message if lookup) */
opal_hash_table_t pending_smsg_frags;
/* fragment id bounce buffer (smsg msg ids are only 32 bits) */
opal_pointer_array_t pending_smsg_frags_bb;
int32_t next_frag_id;
uint32_t reg_max;

Просмотреть файл

@ -175,7 +175,8 @@ mca_btl_ugni_setup_mpools (mca_btl_ugni_module_t *ugni_module)
int mbox_increment, rc;
size_t nprocs;
rc = opal_hash_table_init (&ugni_module->pending_smsg_frags, 1024);
opal_pointer_array_init (&ugni_module->pending_smsg_frags_bb, 0,
1 << 31, 32768);
if (OPAL_SUCCESS != rc) {
return rc;
}

Просмотреть файл

@ -132,7 +132,7 @@ btl_ugni_component_register(void)
mca_btl_ugni_module.super.btl_min_rdma_pipeline_size = 8 * 1024;
mca_btl_ugni_module.super.btl_flags = MCA_BTL_FLAGS_SEND |
MCA_BTL_FLAGS_RDMA;
MCA_BTL_FLAGS_RDMA | MCA_BTL_FLAGS_SEND_INPLACE;
mca_btl_ugni_module.super.btl_bandwidth = 40000; /* Mbs */
mca_btl_ugni_module.super.btl_latency = 2; /* Microsecs */
@ -318,255 +318,6 @@ mca_btl_ugni_component_init (int *num_btl_modules,
return base_modules;
}
static void mca_btl_ugni_callback_rdma_complete (ompi_common_ugni_post_desc_t *desc, int rc)
{
mca_btl_ugni_base_frag_t *frag = MCA_BTL_UGNI_DESC_TO_FRAG(desc);
BTL_VERBOSE(("rdma operation for rem_ctx %p complete", frag->hdr.rdma.ctx));
/* tell peer the put is complete */
rc = ompi_mca_btl_ugni_smsg_send (frag, false, &frag->hdr.rdma, sizeof (frag->hdr.rdma),
NULL, 0, MCA_BTL_UGNI_TAG_RDMA_COMPLETE);
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
/* call this callback again later */
frag->post_desc.cbfunc = mca_btl_ugni_callback_rdma_complete;
opal_list_append (&frag->endpoint->btl->failed_frags, (opal_list_item_t *) frag);
}
}
static void mca_btl_ugni_callback_eager_get (ompi_common_ugni_post_desc_t *desc, int rc)
{
mca_btl_ugni_base_frag_t *frag = MCA_BTL_UGNI_DESC_TO_FRAG(desc);
mca_btl_active_message_callback_t *reg;
BTL_VERBOSE(("eager get for rem_ctx %p complete", frag->hdr.eager.ctx));
/* the frag is already set up for the send callback */
frag->segments[0].seg_len = frag->hdr.eager.len;
reg = mca_btl_base_active_message_trigger + frag->hdr.eager.tag;
reg->cbfunc(&frag->endpoint->btl->super, frag->hdr.eager.tag, &(frag->base), reg->cbdata);
frag->hdr.rdma.ctx = frag->hdr.eager.ctx;
/* tell the remote peer the operation is complete */
mca_btl_ugni_callback_rdma_complete (desc, rc);
}
static inline int mca_btl_ugni_start_reverse_get (mca_btl_base_endpoint_t *ep,
mca_btl_ugni_rdma_frag_hdr_t hdr,
mca_btl_ugni_base_frag_t *frag);
static void mca_btl_ugni_callback_reverse_get_retry (ompi_common_ugni_post_desc_t *desc, int rc)
{
mca_btl_ugni_base_frag_t *frag = MCA_BTL_UGNI_DESC_TO_FRAG(desc);
(void) mca_btl_ugni_start_reverse_get(frag->endpoint, frag->hdr.rdma, frag);
}
static inline int mca_btl_ugni_start_reverse_get (mca_btl_base_endpoint_t *ep,
mca_btl_ugni_rdma_frag_hdr_t hdr,
mca_btl_ugni_base_frag_t *frag)
{
int rc;
BTL_VERBOSE(("starting reverse get (put) for remote ctx: %p", hdr.ctx));
if (NULL == frag) {
rc = MCA_BTL_UGNI_FRAG_ALLOC_RDMA_INT(ep, frag);
if (OPAL_UNLIKELY(NULL == frag)) {
BTL_ERROR(("error allocating rdma frag for reverse get. rc = %d. fl_num_allocated = %d", rc,
ep->btl->rdma_int_frags.fl_num_allocated));
return rc;
}
}
frag->hdr.rdma = hdr;
frag->base.des_cbfunc = NULL;
frag->base.des_flags = MCA_BTL_DES_FLAGS_BTL_OWNERSHIP;
frag->segments[0] = hdr.src_seg;
frag->base.des_src = frag->segments;
frag->base.des_src_cnt = 1;
frag->segments[1] = hdr.dst_seg;
frag->base.des_dst = frag->segments + 1;
frag->base.des_dst_cnt = 1;
rc = mca_btl_ugni_put (&ep->btl->super, ep, &frag->base);
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
frag->post_desc.cbfunc = mca_btl_ugni_callback_reverse_get_retry;
opal_list_append (&ep->btl->failed_frags, (opal_list_item_t *) frag);
return rc;
}
frag->post_desc.cbfunc = mca_btl_ugni_callback_rdma_complete;
return OMPI_SUCCESS;
}
static inline int mca_btl_ugni_start_eager_get (mca_btl_base_endpoint_t *ep,
mca_btl_ugni_eager_frag_hdr_t hdr,
mca_btl_ugni_base_frag_t *frag);
static void mca_btl_ugni_callback_eager_get_retry (ompi_common_ugni_post_desc_t *desc, int rc)
{
mca_btl_ugni_base_frag_t *frag = MCA_BTL_UGNI_DESC_TO_FRAG(desc);
(void) mca_btl_ugni_start_eager_get(frag->endpoint, frag->hdr.eager, frag);
}
static inline int mca_btl_ugni_start_eager_get (mca_btl_base_endpoint_t *ep,
mca_btl_ugni_eager_frag_hdr_t hdr,
mca_btl_ugni_base_frag_t *frag)
{
int rc;
if (OPAL_UNLIKELY(frag && frag->my_list == &ep->btl->rdma_int_frags)) {
mca_btl_ugni_frag_return (frag);
frag = NULL;
}
BTL_VERBOSE(("starting eager get for remote ctx: %p", hdr.ctx));
do {
if (NULL == frag) {
rc = MCA_BTL_UGNI_FRAG_ALLOC_EAGER_RECV(ep, frag);
if (OPAL_UNLIKELY(NULL == frag)) {
(void) MCA_BTL_UGNI_FRAG_ALLOC_RDMA_INT(ep, frag);
assert (NULL != frag);
frag->hdr.eager = hdr;
break;
}
}
frag->hdr.eager = hdr;
frag->base.des_cbfunc = NULL;
frag->base.des_flags = MCA_BTL_DES_FLAGS_BTL_OWNERSHIP;
frag->base.des_dst = frag->segments;
frag->base.des_dst_cnt = 1;
frag->segments[1] = hdr.src_seg;
frag->base.des_src = frag->segments + 1;
frag->base.des_src_cnt = 1;
/* increase size to a multiple of 4 bytes (required for get) */
frag->segments[0].seg_len = (hdr.len + 3) & ~3;
frag->segments[1].seg_len = (hdr.len + 3) & ~3;
rc = mca_btl_ugni_post_fma (frag, GNI_POST_FMA_GET, frag->base.des_dst, frag->base.des_src);
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
break;
}
frag->post_desc.cbfunc = mca_btl_ugni_callback_eager_get;
return OMPI_SUCCESS;
} while (0);
frag->post_desc.cbfunc = mca_btl_ugni_callback_eager_get_retry;
opal_list_append (&ep->btl->failed_frags, (opal_list_item_t *) frag);
return rc;
}
static inline int
mca_btl_ugni_smsg_process (mca_btl_base_endpoint_t *ep)
{
mca_btl_active_message_callback_t *reg;
mca_btl_ugni_base_frag_t frag;
bool disconnect = false;
uintptr_t data_ptr;
gni_return_t rc;
int count = 0;
/* per uGNI documentation we loop until the mailbox is empty */
do {
uint8_t tag = GNI_SMSG_ANY_TAG;
rc = GNI_SmsgGetNextWTag (ep->common->ep_handle, (void **) &data_ptr, &tag);
if (GNI_RC_NOT_DONE == rc) {
BTL_VERBOSE(("no smsg message waiting. rc = %d", rc));
return count;
}
if (OPAL_UNLIKELY(GNI_RC_SUCCESS != rc)) {
fprintf (stderr, "Unhandled Smsg error: %d\n", rc);
assert (0);
return OMPI_ERROR;
}
if (OPAL_UNLIKELY(0 == data_ptr)) {
BTL_ERROR(("null data ptr!"));
assert (0);
return OMPI_ERROR;
}
count++;
BTL_VERBOSE(("got smsg fragment. tag = %d\n", tag));
switch (tag) {
case MCA_BTL_UGNI_TAG_SEND:
frag.hdr.send = ((mca_btl_ugni_send_frag_hdr_t *) data_ptr)[0];
BTL_VERBOSE(("received smsg fragment. hdr = {len = %u, tag = %d}",
(unsigned int) frag.hdr.send.len, frag.hdr.send.tag));
reg = mca_btl_base_active_message_trigger + frag.hdr.send.tag;
frag.base.des_dst = frag.segments;
frag.base.des_dst_cnt = 1;
frag.segments[0].seg_addr.pval = (void *)((uintptr_t)data_ptr + sizeof (mca_btl_ugni_send_frag_hdr_t));
frag.segments[0].seg_len = frag.hdr.send.len;
assert (NULL != reg->cbfunc);
reg->cbfunc(&ep->btl->super, frag.hdr.send.tag, &(frag.base), reg->cbdata);
break;
case MCA_BTL_UGNI_TAG_PUT_INIT:
frag.hdr.rdma = ((mca_btl_ugni_rdma_frag_hdr_t *) data_ptr)[0];
mca_btl_ugni_start_reverse_get (ep, frag.hdr.rdma, NULL);
break;
case MCA_BTL_UGNI_TAG_GET_INIT:
frag.hdr.eager = ((mca_btl_ugni_eager_frag_hdr_t *) data_ptr)[0];
mca_btl_ugni_start_eager_get (ep, frag.hdr.eager, NULL);
break;
case MCA_BTL_UGNI_TAG_RDMA_COMPLETE:
frag.hdr.rdma = ((mca_btl_ugni_rdma_frag_hdr_t *) data_ptr)[0];
mca_btl_ugni_post_frag_complete (frag.hdr.rdma.ctx, OMPI_SUCCESS);
break;
case MCA_BTL_UGNI_TAG_DISCONNECT:
/* remote endpoint has disconnected */
disconnect = true;
break;
default:
BTL_ERROR(("unknown tag %d\n", tag));
break;
}
rc = GNI_SmsgRelease (ep->common->ep_handle);
if (OPAL_UNLIKELY(GNI_RC_SUCCESS != rc)) {
BTL_ERROR(("Smsg release failed!"));
return OMPI_ERROR;
}
} while (!disconnect);
/* disconnect if we get here */
mca_btl_ugni_ep_disconnect (ep, false);
return count;
}
static inline int
mca_btl_ugni_progress_datagram (mca_btl_ugni_module_t *btl)
{
@ -589,12 +340,12 @@ mca_btl_ugni_progress_datagram (mca_btl_ugni_module_t *btl)
handle = btl->wildcard_ep;
} else {
handle =
btl->endpoints[(uint32_t)(datagram_id & 0xffffffffull)]->common->ep_handle;
btl->endpoints[(uint32_t)(datagram_id & 0xffffffffull)]->smsg_ep_handle;
}
/* wait for the incoming datagram to complete (in case it isn't) */
grc = GNI_EpPostDataWaitById (handle, datagram_id, -1, &post_state,
&remote_addr, &remote_id);
&remote_addr, &remote_id);
if (GNI_RC_SUCCESS != grc) {
BTL_ERROR(("GNI_EpPostDataWaitById failed with rc = %d", grc));
return ompi_common_rc_ugni_to_ompi (grc);
@ -610,7 +361,7 @@ mca_btl_ugni_progress_datagram (mca_btl_ugni_module_t *btl)
/* NTH: TODO -- error handling */
(void) mca_btl_ugni_ep_connect_progress (ep);
if (OMPI_COMMON_UGNI_CONNECTED == MCA_BTL_UGNI_EP_STATE(ep)) {
if (MCA_BTL_UGNI_EP_STATE_CONNECTED == ep->state) {
/* process messages waiting in the endpoint's smsg mailbox */
count = mca_btl_ugni_smsg_process (ep);
}
@ -626,92 +377,61 @@ mca_btl_ugni_progress_datagram (mca_btl_ugni_module_t *btl)
}
static inline int
mca_btl_ugni_handle_smsg_overrun (mca_btl_ugni_module_t *btl)
mca_btl_ugni_progress_rdma (mca_btl_ugni_module_t *btl)
{
gni_cq_entry_t event_data;
unsigned int ep_index;
int count, rc;
ompi_common_ugni_post_desc_t *desc;
gni_return_t rc = GNI_RC_NOT_DONE;
mca_btl_ugni_base_frag_t *frag;
gni_cq_entry_t event_data = 0;
uint32_t recoverable = 1;
BTL_VERBOSE(("btl/ugni_component detect SMSG CQ overrun. "
"processing message backlog..."));
/* we don't know which endpoint lost an smsg completion. clear the
smsg remote cq and check all mailboxes */
/* clear out remote cq */
do {
rc = GNI_CqGetEvent (btl->smsg_remote_cq, &event_data);
} while (GNI_RC_SUCCESS == rc);
count = 0;
for (ep_index = 0 ; ep_index < btl->endpoint_count ; ++ep_index) {
mca_btl_base_endpoint_t *ep = btl->endpoints[ep_index];
if (NULL == ep || OMPI_COMMON_UGNI_CONNECTED != MCA_BTL_UGNI_EP_STATE(ep)) {
continue;
}
do {
/* clear out smsg mailbox */
rc = mca_btl_ugni_smsg_process (ep);
if (rc > 0)
count += rc;
} while (rc > 0);
}
return count;
}
static inline int
mca_btl_ugni_progress_smsg (mca_btl_ugni_module_t *btl)
{
mca_btl_base_endpoint_t *ep;
gni_cq_entry_t event_data;
int rc;
rc = GNI_CqGetEvent (btl->smsg_remote_cq, &event_data);
rc = GNI_CqGetEvent (btl->rdma_local_cq, &event_data);
if (GNI_RC_NOT_DONE == rc) {
return 0;
}
if (OPAL_UNLIKELY(GNI_RC_SUCCESS != rc || !GNI_CQ_STATUS_OK(event_data) ||
GNI_CQ_OVERRUN(event_data))) {
if (GNI_RC_ERROR_RESOURCE == rc ||
(GNI_RC_SUCCESS == rc && GNI_CQ_OVERRUN(event_data))) {
/* recover from smsg cq overrun */
return mca_btl_ugni_handle_smsg_overrun (btl);
if (OPAL_UNLIKELY((GNI_RC_SUCCESS != rc && !event_data) || GNI_CQ_OVERRUN(event_data))) {
/* TODO -- need to handle overrun -- how do we do this without an event?
will the event eventually come back? Ask Cray */
BTL_ERROR(("post error! cq overrun = %d", (int)GNI_CQ_OVERRUN(event_data)));
assert (0);
return ompi_common_rc_ugni_to_ompi (rc);
}
rc = GNI_GetCompleted (btl->rdma_local_cq, event_data, (gni_post_descriptor_t **) &desc);
if (OPAL_UNLIKELY(GNI_RC_SUCCESS != rc && GNI_RC_TRANSACTION_ERROR != rc)) {
BTL_ERROR(("Error in GNI_GetComplete %s", gni_err_str[rc]));
return ompi_common_rc_ugni_to_ompi (rc);
}
frag = MCA_BTL_UGNI_DESC_TO_FRAG(desc);
if (OPAL_UNLIKELY(!GNI_CQ_STATUS_OK(event_data))) {
(void) GNI_CqErrorRecoverable (event_data, &recoverable);
if (OPAL_UNLIKELY(++desc->tries >= mca_btl_ugni_component.rdma_max_retries ||
!recoverable)) {
/* give up */
BTL_ERROR(("giving up on frag %p", (void *) frag))
frag->cbfunc (frag, OMPI_ERROR);
return OMPI_ERROR;
}
BTL_ERROR(("unhandled error in GNI_CqGetEvent"));
/* repost transaction */
if (GNI_POST_RDMA_PUT == desc->base.type ||
GNI_POST_RDMA_GET == desc->base.type) {
rc = GNI_PostRdma (frag->endpoint->rdma_ep_handle, &desc->base);
} else {
rc = GNI_PostFma (frag->endpoint->rdma_ep_handle, &desc->base);
}
/* unhandled error: crash */
assert (0);
return OMPI_ERROR;
return ompi_common_rc_ugni_to_ompi (rc);
}
BTL_VERBOSE(("REMOTE CQ: Got event 0x%" PRIx64 ". msg id = %" PRIu64
". ok = %d, type = %" PRIu64 "\n", (uint64_t) event_data,
GNI_CQ_GET_MSG_ID(event_data), GNI_CQ_STATUS_OK(event_data),
GNI_CQ_GET_TYPE(event_data)));
frag->cbfunc (frag, OMPI_SUCCESS);
/* we could check the message type here but it seems to always be a POST */
ep = btl->endpoints[GNI_CQ_GET_MSG_ID(event_data)];
if (OPAL_UNLIKELY(OMPI_COMMON_UGNI_CONNECTED != MCA_BTL_UGNI_EP_STATE(ep))) {
/* due to the nature of datagrams we may get a smsg completion before
we get mailbox info from the peer */
BTL_VERBOSE(("event occurred on an unconnected endpoint! ep state = %d", MCA_BTL_UGNI_EP_STATE(ep)));
return 0;
}
return mca_btl_ugni_smsg_process (ep);
}
static inline int
mca_btl_ugni_progress_bte (mca_btl_ugni_module_t *btl)
{
return ompi_common_ugni_process_completed_post (btl->device, btl->bte_local_cq);
return 1;
}
static int
@ -723,20 +443,17 @@ mca_btl_ugni_retry_failed (mca_btl_ugni_module_t *btl)
while (count-- && NULL != (item = opal_list_remove_first (&btl->failed_frags))) {
mca_btl_ugni_base_frag_t *frag = (mca_btl_ugni_base_frag_t *) item;
frag->post_desc.cbfunc (&frag->post_desc, OMPI_SUCCESS);
frag->cbfunc (frag, OMPI_SUCCESS);
}
return 0;
}
static int
mca_btl_ugni_component_progress (void)
static int mca_btl_ugni_component_progress (void)
{
mca_btl_ugni_module_t *btl;
unsigned int i;
int count;
count = ompi_common_ugni_progress ();
int count = 0;
for (i = 0 ; i < mca_btl_ugni_component.ugni_num_btls ; ++i) {
btl = mca_btl_ugni_component.modules + i;
@ -744,8 +461,9 @@ mca_btl_ugni_component_progress (void)
mca_btl_ugni_retry_failed (btl);
count += mca_btl_ugni_progress_datagram (btl);
count += mca_btl_ugni_progress_smsg (btl);
count += mca_btl_ugni_progress_bte (btl);
count += mca_btl_ugni_progress_local_smsg (btl);
count += mca_btl_ugni_progress_remote_smsg (btl);
count += mca_btl_ugni_progress_rdma (btl);
}
return count;

Просмотреть файл

@ -23,14 +23,17 @@ OBJ_CLASS_INSTANCE(mca_btl_base_endpoint_t, opal_object_t,
static void mca_btl_ugni_ep_construct (mca_btl_base_endpoint_t *ep)
{
memset ((char *) ep + sizeof(ep->super), 0, sizeof (*ep) - sizeof (ep->super));
OBJ_CONSTRUCT(&ep->pending_list, opal_list_t);
OBJ_CONSTRUCT(&ep->pending_smsg_sends, opal_list_t);
OBJ_CONSTRUCT(&ep->lock, opal_mutex_t);
}
static void mca_btl_ugni_ep_destruct (mca_btl_base_endpoint_t *ep)
{
OBJ_DESTRUCT(&ep->pending_list);
OBJ_DESTRUCT(&ep->pending_smsg_sends);
OBJ_DESTRUCT(&ep->lock);
}
static void mca_btl_ugni_smsg_mbox_construct (mca_btl_ugni_smsg_mbox_t *mbox) {
@ -74,13 +77,13 @@ int mca_btl_ugni_ep_disconnect (mca_btl_base_endpoint_t *ep, bool send_disconnec
OPAL_THREAD_LOCK(&ep->common->lock);
do {
if (OMPI_COMMON_UGNI_INIT == MCA_BTL_UGNI_EP_STATE(ep)) {
if (MCA_BTL_UGNI_EP_STATE_INIT == ep->state) {
/* nothing to do */
break;
}
if (OMPI_COMMON_UGNI_CONNECTED == MCA_BTL_UGNI_EP_STATE(ep) && send_disconnect) {
rc = GNI_SmsgSendWTag (ep->common->ep_handle, NULL, 0, NULL, 0, -1,
if (MCA_BTL_UGNI_EP_STATE_CONNECTED == ep->state && send_disconnect) {
rc = GNI_SmsgSendWTag (ep->smsg_ep_handle, NULL, 0, NULL, 0, -1,
MCA_BTL_UGNI_TAG_DISCONNECT);
if (GNI_RC_SUCCESS != rc) {
BTL_VERBOSE(("btl/ugni could not send close message"));
@ -89,15 +92,10 @@ int mca_btl_ugni_ep_disconnect (mca_btl_base_endpoint_t *ep, bool send_disconnec
/* we might want to wait for local completion here (do we even care) */
}
ep->common->state = OMPI_COMMON_UGNI_BOUND;
(void) ompi_common_ugni_ep_destroy (&ep->smsg_ep_handle);
(void) ompi_common_ugni_ep_destroy (&ep->rdma_ep_handle);
/* drop the lock before we unbind */
OPAL_THREAD_UNLOCK(&ep->common->lock);
rc = ompi_common_ugni_endpoint_unbind (ep->common);
OPAL_THREAD_LOCK(&ep->common->lock);
if (OMPI_SUCCESS != rc) {
BTL_VERBOSE(("btl/ugni error unbinding ugni endpoint"));
}
ep->state = MCA_BTL_UGNI_EP_STATE_INIT;
OMPI_FREE_LIST_RETURN(&ep->btl->smsg_mboxes, ((ompi_free_list_item_t *) ep->mailbox));
ep->mailbox = NULL;
@ -115,14 +113,15 @@ static inline int mca_btl_ugni_ep_connect_start (mca_btl_base_endpoint_t *ep) {
ep->common->ep_rem_addr, ep->common->ep_rem_id));
/* bind endpoint to remote address */
OPAL_THREAD_UNLOCK(&ep->common->lock);
rc = ompi_common_ugni_endpoint_bind (ep->common);
OPAL_THREAD_LOCK(&ep->common->lock);
rc = ompi_common_ugni_ep_create (ep->common, ep->btl->smsg_local_cq, &ep->smsg_ep_handle);
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
return rc;
}
MCA_BTL_UGNI_EP_STATE(ep) = OMPI_COMMON_UGNI_CONNECTING;
rc = ompi_common_ugni_ep_create (ep->common, ep->btl->rdma_local_cq, &ep->rdma_ep_handle);
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
return rc;
}
/* build connection data */
rc = mca_btl_ugni_ep_smsg_get_mbox (ep);
@ -130,6 +129,8 @@ static inline int mca_btl_ugni_ep_connect_start (mca_btl_base_endpoint_t *ep) {
return rc;
}
ep->state = MCA_BTL_UGNI_EP_STATE_CONNECTING;
memset (&ep->remote_smsg_attrib, 0, sizeof (ep->remote_smsg_attrib));
BTL_VERBOSE(("btl/ugni connection to remote peer initiated"));
@ -137,11 +138,9 @@ static inline int mca_btl_ugni_ep_connect_start (mca_btl_base_endpoint_t *ep) {
return OMPI_SUCCESS;
}
static void mca_btl_ugni_retry_send (ompi_common_ugni_post_desc_t *desc, int rc)
static void mca_btl_ugni_retry_send (mca_btl_ugni_base_frag_t *frag, int rc)
{
mca_btl_ugni_base_frag_t *frag = MCA_BTL_UGNI_DESC_TO_FRAG(desc);
rc = mca_btl_ugni_send (&frag->endpoint->btl->super, frag->endpoint, &frag->base, frag->hdr.send.tag);
rc = mca_btl_ugni_send (&frag->endpoint->btl->super, frag->endpoint, &frag->base, frag->hdr.send.lag >> 24);
if (OPAL_UNLIKELY(0 > rc)) {
opal_list_append (&frag->endpoint->btl->failed_frags, (opal_list_item_t *) frag);
}
@ -165,22 +164,23 @@ static inline int mca_btl_ugni_ep_connect_finish (mca_btl_base_endpoint_t *ep) {
ep->mailbox->smsg_attrib.mem_hndl.qword2, ep->mailbox->smsg_attrib.mbox_offset,
ep->mailbox->smsg_attrib.mbox_maxcredit, ep->mailbox->smsg_attrib.msg_maxsize));
rc = GNI_SmsgInit (ep->common->ep_handle, &ep->mailbox->smsg_attrib, &ep->remote_smsg_attrib);
if (OPAL_UNLIKELY(GNI_RC_SUCCESS != rc)) {
rc = GNI_SmsgInit (ep->smsg_ep_handle, &ep->mailbox->smsg_attrib, &ep->remote_smsg_attrib);
if (GNI_RC_SUCCESS != rc) {
BTL_ERROR(("error initializing SMSG protocol. rc = %d", rc));
return ompi_common_rc_ugni_to_ompi (rc);
}
BTL_VERBOSE(("endpoint connected. posting %u sends", (unsigned int) opal_list_get_size (&ep->pending_list)));
MCA_BTL_UGNI_EP_STATE(ep) = OMPI_COMMON_UGNI_CONNECTED;
ep->state = MCA_BTL_UGNI_EP_STATE_CONNECTED;
/* post pending sends */
while (NULL != (item = opal_list_remove_first (&ep->pending_list))) {
mca_btl_ugni_base_frag_t *frag = (mca_btl_ugni_base_frag_t *) item;
rc = mca_btl_ugni_send (&ep->btl->super, ep, &frag->base, frag->hdr.send.tag);
rc = mca_btl_ugni_send (&ep->btl->super, ep, &frag->base, frag->hdr.send.lag >> 24);
if (OPAL_UNLIKELY(0 > rc)) {
frag->post_desc.cbfunc = mca_btl_ugni_retry_send;
frag->cbfunc = mca_btl_ugni_retry_send;
opal_list_append (&ep->btl->failed_frags, (opal_list_item_t *) frag);
}
}
@ -191,11 +191,11 @@ static inline int mca_btl_ugni_ep_connect_finish (mca_btl_base_endpoint_t *ep) {
int mca_btl_ugni_ep_connect_progress (mca_btl_base_endpoint_t *ep) {
int rc;
if (OMPI_COMMON_UGNI_CONNECTED == MCA_BTL_UGNI_EP_STATE(ep)) {
if (MCA_BTL_UGNI_EP_STATE_CONNECTED == ep->state) {
return OMPI_SUCCESS;
}
if (OMPI_COMMON_UGNI_CONNECTING > ep->common->state) {
if (MCA_BTL_UGNI_EP_STATE_INIT == ep->state) {
rc = mca_btl_ugni_ep_connect_start (ep);
if (OMPI_SUCCESS != rc) {
return rc;
@ -203,8 +203,11 @@ int mca_btl_ugni_ep_connect_progress (mca_btl_base_endpoint_t *ep) {
}
if (GNI_SMSG_TYPE_INVALID == ep->remote_smsg_attrib.msg_type) {
(void) mca_btl_ugni_directed_ep_post (ep);
return OMPI_ERR_RESOURCE_BUSY;
rc = mca_btl_ugni_directed_ep_post (ep);
if (OMPI_SUCCESS == rc) {
rc = OMPI_ERR_RESOURCE_BUSY;
}
return rc;
}
return mca_btl_ugni_ep_connect_finish (ep);

Просмотреть файл

@ -15,6 +15,13 @@
#include "btl_ugni.h"
enum mca_btl_ugni_endpoint_state_t {
MCA_BTL_UGNI_EP_STATE_INIT = 0,
MCA_BTL_UGNI_EP_STATE_CONNECTING,
MCA_BTL_UGNI_EP_STATE_CONNECTED
};
typedef enum mca_btl_ugni_endpoint_state_t mca_btl_ugni_endpoint_state_t;
typedef struct mca_btl_ugni_smsg_mbox_t {
ompi_free_list_item_t super;
gni_smsg_attr_t smsg_attrib;
@ -25,23 +32,28 @@ OBJ_CLASS_DECLARATION(mca_btl_ugni_smsg_mbox_t);
typedef struct mca_btl_base_endpoint_t {
opal_object_t super;
opal_mutex_t lock;
mca_btl_ugni_endpoint_state_t state;
ompi_common_ugni_endpoint_t *common;
mca_btl_ugni_module_t *btl;
gni_ep_handle_t smsg_ep_handle;
gni_ep_handle_t rdma_ep_handle;
gni_smsg_attr_t remote_smsg_attrib;
mca_btl_ugni_smsg_mbox_t *mailbox;
opal_list_t pending_list;
opal_list_t pending_smsg_sends;
uint32_t smsg_progressing;
} mca_btl_base_endpoint_t;
OBJ_CLASS_DECLARATION(mca_btl_base_endpoint_t);
#define MCA_BTL_UGNI_EP_STATE(ep) ((ep)->common->state)
int mca_btl_ugni_ep_connect_progress (mca_btl_base_endpoint_t *ep);
int mca_btl_ugni_ep_disconnect (mca_btl_base_endpoint_t *ep, bool send_disconnect);
@ -54,6 +66,9 @@ static inline int mca_btl_ugni_init_ep (mca_btl_base_endpoint_t **ep,
endpoint = OBJ_NEW(mca_btl_base_endpoint_t);
assert (endpoint != NULL);
endpoint->smsg_progressing = 0;
endpoint->state = MCA_BTL_UGNI_EP_STATE_INIT;
rc = ompi_common_ugni_endpoint_for_proc (btl->device, peer_proc, &endpoint->common);
if (OMPI_SUCCESS != rc) {
assert (0);
@ -61,11 +76,10 @@ static inline int mca_btl_ugni_init_ep (mca_btl_base_endpoint_t **ep,
}
endpoint->btl = btl;
endpoint->common->btl_ctx = (void *) endpoint;
*ep = endpoint;
return OMPI_SUCCESS;
return OMPI_SUCCESS;
}
static inline void mca_btl_ugni_release_ep (mca_btl_base_endpoint_t *ep) {
@ -84,32 +98,32 @@ static inline void mca_btl_ugni_release_ep (mca_btl_base_endpoint_t *ep) {
static inline int mca_btl_ugni_check_endpoint_state (mca_btl_base_endpoint_t *ep) {
int rc;
if (OPAL_LIKELY(OMPI_COMMON_UGNI_CONNECTED == ep->common->state)) {
if (OPAL_LIKELY(MCA_BTL_UGNI_EP_STATE_CONNECTED == ep->state)) {
return OMPI_SUCCESS;
}
OPAL_THREAD_LOCK(&ep->common->lock);
OPAL_THREAD_LOCK(&ep->lock);
switch (ep->common->state) {
case OMPI_COMMON_UGNI_INIT:
switch (ep->state) {
case MCA_BTL_UGNI_EP_STATE_INIT:
rc = mca_btl_ugni_ep_connect_progress (ep);
if (OMPI_SUCCESS != rc) {
break;
}
case OMPI_COMMON_UGNI_CONNECTING:
case MCA_BTL_UGNI_EP_STATE_CONNECTING:
rc = OMPI_ERR_RESOURCE_BUSY;
break;
default:
rc = OMPI_SUCCESS;
}
OPAL_THREAD_UNLOCK(&ep->common->lock);
OPAL_THREAD_UNLOCK(&ep->lock);
return rc;
}
static inline int mca_btl_ugni_wildcard_ep_post (mca_btl_ugni_module_t *ugni_module) {
int rc;
gni_return_t rc;
memset (&ugni_module->wc_local_attr, 0, sizeof (ugni_module->wc_local_attr));
rc = GNI_EpPostDataWId (ugni_module->wildcard_ep, &ugni_module->wc_local_attr, sizeof (ugni_module->wc_local_attr),
@ -120,8 +134,9 @@ static inline int mca_btl_ugni_wildcard_ep_post (mca_btl_ugni_module_t *ugni_mod
}
static inline int mca_btl_ugni_directed_ep_post (mca_btl_base_endpoint_t *ep) {
int rc;
rc = GNI_EpPostDataWId (ep->common->ep_handle, &ep->mailbox->smsg_attrib, sizeof (ep->mailbox->smsg_attrib),
gni_return_t rc;
rc = GNI_EpPostDataWId (ep->smsg_ep_handle, &ep->mailbox->smsg_attrib, sizeof (ep->mailbox->smsg_attrib),
&ep->remote_smsg_attrib, sizeof (ep->remote_smsg_attrib),
MCA_BTL_UGNI_CONNECT_DIRECTED_ID | ep->common->ep_rem_id);

Просмотреть файл

@ -52,4 +52,6 @@ OBJ_CLASS_INSTANCE(mca_btl_ugni_eager_frag_t, mca_btl_base_descriptor_t,
void mca_btl_ugni_frag_init (mca_btl_ugni_base_frag_t *frag, mca_btl_ugni_module_t *ugni_module)
{
frag->msg_id = opal_atomic_add_32 (&ugni_module->next_frag_id, 1);
opal_pointer_array_set_item (&ugni_module->pending_smsg_frags_bb, frag->msg_id, (void *) frag);
}

Просмотреть файл

@ -17,10 +17,14 @@
#include "btl_ugni_endpoint.h"
typedef struct mca_btl_ugni_send_frag_hdr_t {
size_t len;
mca_btl_base_tag_t tag;
uint32_t lag;
} mca_btl_ugni_send_frag_hdr_t;
typedef struct mca_btl_ugni_send_ex_frag_hdr_t {
mca_btl_ugni_send_frag_hdr_t send;
uint8_t pml_header[128];
} mca_btl_ugni_send_ex_frag_hdr_t;
typedef struct mca_btl_ugni_rdma_frag_hdr_t {
mca_btl_base_segment_t src_seg;
mca_btl_base_segment_t dst_seg;
@ -28,27 +32,36 @@ typedef struct mca_btl_ugni_rdma_frag_hdr_t {
} mca_btl_ugni_rdma_frag_hdr_t;
typedef struct mca_btl_ugni_eager_frag_hdr_t {
size_t len;
mca_btl_base_tag_t tag;
mca_btl_ugni_send_frag_hdr_t send;
mca_btl_base_segment_t src_seg;
void *ctx;
} mca_btl_ugni_eager_frag_hdr_t;
typedef union mca_btl_ugni_frag_hdr_t {
mca_btl_ugni_send_frag_hdr_t send;
mca_btl_ugni_rdma_frag_hdr_t rdma;
typedef struct mca_btl_ugni_eager_ex_frag_hdr_t {
mca_btl_ugni_eager_frag_hdr_t eager;
uint8_t pml_header[128];
} mca_btl_ugni_eager_ex_frag_hdr_t;
typedef union mca_btl_ugni_frag_hdr_t {
mca_btl_ugni_send_frag_hdr_t send;
mca_btl_ugni_send_ex_frag_hdr_t send_ex;
mca_btl_ugni_rdma_frag_hdr_t rdma;
mca_btl_ugni_eager_frag_hdr_t eager;
mca_btl_ugni_eager_ex_frag_hdr_t eager_ex;
} mca_btl_ugni_frag_hdr_t;
typedef struct mca_btl_ugni_base_frag_t {
mca_btl_base_descriptor_t base;
mca_btl_base_segment_t segments[2];
mca_btl_ugni_frag_hdr_t hdr;
size_t hdr_size;
ompi_common_ugni_post_desc_t post_desc;
mca_btl_base_endpoint_t *endpoint;
mca_btl_ugni_reg_t *registration;
ompi_free_list_t *my_list;
uint32_t msg_id;
void (*cbfunc) (struct mca_btl_ugni_base_frag_t*, int);
} mca_btl_ugni_base_frag_t;
typedef struct mca_btl_ugni_base_frag_t mca_btl_ugni_smsg_frag_t;
@ -81,7 +94,7 @@ static inline int mca_btl_ugni_frag_alloc (mca_btl_base_endpoint_t *ep,
return rc;
}
static inline void mca_btl_ugni_frag_return (mca_btl_ugni_base_frag_t *frag)
static inline int mca_btl_ugni_frag_return (mca_btl_ugni_base_frag_t *frag)
{
if (frag->registration) {
frag->endpoint->btl->super.btl_mpool->mpool_deregister(frag->endpoint->btl->super.btl_mpool,
@ -90,6 +103,19 @@ static inline void mca_btl_ugni_frag_return (mca_btl_ugni_base_frag_t *frag)
}
OMPI_FREE_LIST_RETURN(frag->my_list, (ompi_free_list_item_t *) frag);
return OMPI_SUCCESS;
}
static inline void mca_btl_ugni_frag_complete (mca_btl_ugni_base_frag_t *frag, int rc) {
/* call callback if specified */
if (frag->base.des_flags & MCA_BTL_DES_SEND_ALWAYS_CALLBACK) {
frag->base.des_cbfunc(&frag->endpoint->btl->super, frag->endpoint, &frag->base, rc);
}
if (OPAL_LIKELY(frag->base.des_flags & MCA_BTL_DES_FLAGS_BTL_OWNERSHIP)) {
mca_btl_ugni_frag_return (frag);
}
}
#define MCA_BTL_UGNI_FRAG_ALLOC_SMSG(ep, frag) \

Просмотреть файл

@ -13,12 +13,12 @@
#include "btl_ugni_rdma.h"
#include "btl_ugni_smsg.h"
static inline int mca_btl_ugni_init_reverse_get (struct mca_btl_base_module_t *btl,
mca_btl_ugni_base_frag_t *frag) {
static int mca_btl_ugni_init_put (struct mca_btl_base_module_t *btl,
mca_btl_ugni_base_frag_t *frag) {
/* off alignment/off size. switch to put */
frag->hdr.rdma.src_seg = frag->base.des_src[0];
frag->hdr.rdma.dst_seg = frag->base.des_dst[0];
frag->hdr.rdma.ctx = (void *) &frag->post_desc;
frag->hdr.rdma.ctx = (void *) frag;
/* send the fragment header using smsg. ignore local completion */
return ompi_mca_btl_ugni_smsg_send (frag, true, &frag->hdr.rdma,
@ -54,7 +54,11 @@ int mca_btl_ugni_get (struct mca_btl_base_module_t *btl,
if (OPAL_UNLIKELY(check || size > mca_btl_ugni_component.ugni_get_limit)) {
/* switch to put */
return mca_btl_ugni_init_reverse_get (btl, frag);
return mca_btl_ugni_init_put (btl, frag);
}
if (NULL != frag->base.des_cbfunc) {
des->des_flags |= MCA_BTL_DES_SEND_ALWAYS_CALLBACK;
}
if (size <= mca_btl_ugni_component.ugni_fma_limit) {
@ -63,3 +67,114 @@ int mca_btl_ugni_get (struct mca_btl_base_module_t *btl,
return mca_btl_ugni_post_bte (frag, GNI_POST_RDMA_GET, des->des_dst, des->des_src);
}
void mca_btl_ugni_callback_rdma_complete (mca_btl_ugni_base_frag_t *frag, int rc)
{
BTL_VERBOSE(("rdma operation for rem_ctx %p complete", frag->hdr.rdma.ctx));
/* tell peer the put is complete */
rc = ompi_mca_btl_ugni_smsg_send (frag, false, &frag->hdr.rdma, sizeof (frag->hdr.rdma),
NULL, 0, MCA_BTL_UGNI_TAG_RDMA_COMPLETE);
if (OPAL_UNLIKELY(0 > rc)) {
/* call this callback again later */
frag->cbfunc = mca_btl_ugni_callback_rdma_complete;
opal_list_append (&frag->endpoint->btl->failed_frags, (opal_list_item_t *) frag);
}
}
/* eager get */
static void mca_btl_ugni_callback_eager_get_retry (mca_btl_ugni_base_frag_t *frag, int rc)
{
(void) mca_btl_ugni_start_eager_get(frag->endpoint, frag->hdr.eager_ex, frag);
}
static void mca_btl_ugni_callback_eager_get (mca_btl_ugni_base_frag_t *frag, int rc)
{
uint32_t len = frag->hdr.eager.send.lag & 0x00ffffff;
uint8_t tag = frag->hdr.eager.send.lag >> 24;
size_t payload_len = frag->hdr.eager.src_seg.seg_len;
size_t hdr_len = len - payload_len;
mca_btl_active_message_callback_t *reg;
mca_btl_ugni_base_frag_t tmp;
BTL_VERBOSE(("eager get for rem_ctx %p complete", frag->hdr.eager.ctx));
tmp.base.des_dst = tmp.segments;
tmp.base.des_dst_cnt = 2;
tmp.segments[0].seg_addr.pval = frag->hdr.eager_ex.pml_header;
tmp.segments[0].seg_len = hdr_len;
tmp.segments[1].seg_addr.pval = frag->segments[0].seg_addr.pval;
tmp.segments[1].seg_len = payload_len;
reg = mca_btl_base_active_message_trigger + tag;
reg->cbfunc(&frag->endpoint->btl->super, tag, &(tmp.base), reg->cbdata);
frag->hdr.rdma.ctx = frag->hdr.eager.ctx;
/* tell the remote peer the operation is complete */
mca_btl_ugni_callback_rdma_complete (frag, rc);
}
int mca_btl_ugni_start_eager_get (mca_btl_base_endpoint_t *ep,
mca_btl_ugni_eager_ex_frag_hdr_t hdr,
mca_btl_ugni_base_frag_t *frag)
{
int rc;
if (OPAL_UNLIKELY(frag && frag->my_list == &ep->btl->rdma_int_frags)) {
mca_btl_ugni_frag_return (frag);
frag = NULL;
}
BTL_VERBOSE(("starting eager get for remote ctx: %p", hdr.ctx));
do {
if (NULL == frag) {
rc = MCA_BTL_UGNI_FRAG_ALLOC_EAGER_RECV(ep, frag);
if (OPAL_UNLIKELY(NULL == frag)) {
(void) MCA_BTL_UGNI_FRAG_ALLOC_RDMA_INT(ep, frag);
assert (NULL != frag);
frag->hdr.eager_ex = hdr;
break;
}
}
frag->hdr.eager_ex = hdr;
frag->base.des_cbfunc = NULL;
frag->base.des_flags = MCA_BTL_DES_FLAGS_BTL_OWNERSHIP;
frag->base.des_dst = frag->segments;
frag->base.des_dst_cnt = 1;
frag->segments[1] = hdr.eager.src_seg;
frag->base.des_src = frag->segments + 1;
frag->base.des_src_cnt = 1;
/* increase size to a multiple of 4 bytes (required for get) */
frag->segments[0].seg_len = frag->segments[1].seg_len =
(hdr.eager.src_seg.seg_len + 3) & ~3;
if (frag->segments[0].seg_len <= mca_btl_ugni_component.ugni_fma_limit) {
rc = mca_btl_ugni_post_fma (frag, GNI_POST_FMA_GET, frag->base.des_dst, frag->base.des_src);
} else {
rc = mca_btl_ugni_post_bte (frag, GNI_POST_RDMA_GET, frag->base.des_dst, frag->base.des_src);
}
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
break;
}
frag->cbfunc = mca_btl_ugni_callback_eager_get;
return OMPI_SUCCESS;
} while (0);
frag->cbfunc = mca_btl_ugni_callback_eager_get_retry;
opal_list_append (&ep->btl->failed_frags, (opal_list_item_t *) frag);
return rc;
}

Просмотреть файл

@ -40,6 +40,13 @@ mca_btl_ugni_free (struct mca_btl_base_module_t *btl,
static int
mca_btl_ugni_module_finalize (struct mca_btl_base_module_t* btl);
static mca_btl_base_descriptor_t *
mca_btl_ugni_prepare_dst (mca_btl_base_module_t *btl,
mca_btl_base_endpoint_t *endpoint,
mca_mpool_base_registration_t *registration,
opal_convertor_t *convertor, uint8_t order,
size_t reserve, size_t *size, uint32_t flags);
static struct mca_btl_base_descriptor_t *
mca_btl_ugni_prepare_src (struct mca_btl_base_module_t *btl,
struct mca_btl_base_endpoint_t *endpoint,
@ -48,13 +55,6 @@ mca_btl_ugni_prepare_src (struct mca_btl_base_module_t *btl,
uint8_t order, size_t reserve, size_t *size,
uint32_t flags);
static mca_btl_base_descriptor_t *
mca_btl_ugni_prepare_dst (mca_btl_base_module_t *btl,
mca_btl_base_endpoint_t *endpoint,
mca_mpool_base_registration_t *registration,
opal_convertor_t *convertor, uint8_t order,
size_t reserve, size_t *size, uint32_t flags);
mca_btl_ugni_module_t mca_btl_ugni_module = {
{
/* .btl_component = */ &mca_btl_ugni_component.super,
@ -81,7 +81,7 @@ mca_btl_ugni_module_t mca_btl_ugni_module = {
mca_btl_ugni_prepare_src,
mca_btl_ugni_prepare_dst,
mca_btl_ugni_send,
NULL, /* sendi */
mca_btl_ugni_sendi,
mca_btl_ugni_put,
mca_btl_ugni_get,
NULL, /* mca_btl_base_dump, */
@ -109,7 +109,7 @@ mca_btl_ugni_module_init (mca_btl_ugni_module_t *ugni_module,
OBJ_CONSTRUCT(&ugni_module->smsg_frags, ompi_free_list_t);
OBJ_CONSTRUCT(&ugni_module->rdma_frags, ompi_free_list_t);
OBJ_CONSTRUCT(&ugni_module->rdma_int_frags, ompi_free_list_t);
OBJ_CONSTRUCT(&ugni_module->pending_smsg_frags, opal_hash_table_t);
OBJ_CONSTRUCT(&ugni_module->pending_smsg_frags_bb, opal_pointer_array_t);
ugni_module->device = dev;
ugni_module->endpoints = NULL;
@ -132,9 +132,16 @@ mca_btl_ugni_module_init (mca_btl_ugni_module_t *ugni_module,
}
rc = GNI_CqCreate (ugni_module->device->dev_handle, mca_btl_ugni_component.cq_size,
0, GNI_CQ_NOBLOCK, NULL, NULL, &ugni_module->bte_local_cq);
0, GNI_CQ_NOBLOCK, NULL, NULL, &ugni_module->rdma_local_cq);
if (GNI_RC_SUCCESS != rc) {
BTL_ERROR(("error creating local BTE CQ"));
BTL_ERROR(("error creating local BTE/FMA CQ"));
return ompi_common_rc_ugni_to_ompi (rc);
}
rc = GNI_CqCreate (ugni_module->device->dev_handle, mca_btl_ugni_component.cq_size,
0, GNI_CQ_NOBLOCK, NULL, NULL, &ugni_module->smsg_local_cq);
if (GNI_RC_SUCCESS != rc) {
BTL_ERROR(("error creating local SMSG CQ"));
return ompi_common_rc_ugni_to_ompi (rc);
}
@ -154,21 +161,17 @@ static int
mca_btl_ugni_module_finalize (struct mca_btl_base_module_t *btl)
{
mca_btl_ugni_module_t *ugni_module = (mca_btl_ugni_module_t *)btl;
size_t ntotal_procs, i;
int rc;
int rc, i;
OBJ_DESTRUCT(&ugni_module->eager_frags_send);
OBJ_DESTRUCT(&ugni_module->eager_frags_recv);
OBJ_DESTRUCT(&ugni_module->smsg_frags);
OBJ_DESTRUCT(&ugni_module->rdma_frags);
OBJ_DESTRUCT(&ugni_module->rdma_int_frags);
OBJ_DESTRUCT(&ugni_module->pending_smsg_frags);
/* close all open connections and release endpoints */
if (NULL != ugni_module->endpoints) {
(void) ompi_proc_world (&ntotal_procs);
for (i = 0 ; i < ntotal_procs ; ++i) {
for (i = 0 ; i < ugni_module->endpoint_count ; ++i) {
if (ugni_module->endpoints[i]) {
mca_btl_ugni_release_ep (ugni_module->endpoints[i]);
}
@ -176,14 +179,21 @@ mca_btl_ugni_module_finalize (struct mca_btl_base_module_t *btl)
ugni_module->endpoints[i] = NULL;
}
free (ugni_module->endpoints);
ugni_module->endpoint_count = 0;
ugni_module->endpoints = NULL;
}
/* destroy all cqs */
rc = GNI_CqDestroy (ugni_module->bte_local_cq);
rc = GNI_CqDestroy (ugni_module->rdma_local_cq);
if (GNI_RC_SUCCESS != rc) {
BTL_ERROR(("error tearing down local BTE CQ"));
BTL_ERROR(("error tearing down local BTE/FMA CQ"));
}
rc = GNI_CqDestroy (ugni_module->smsg_local_cq);
if (GNI_RC_SUCCESS != rc) {
BTL_ERROR(("error tearing down local SMSG CQ"));
}
rc = GNI_CqDestroy (ugni_module->smsg_remote_cq);
@ -211,6 +221,8 @@ mca_btl_ugni_module_finalize (struct mca_btl_base_module_t *btl)
(void) mca_mpool_base_module_destroy (ugni_module->super.btl_mpool);
ugni_module->super.btl_mpool = NULL;
OBJ_DESTRUCT(&ugni_module->pending_smsg_frags_bb);
OBJ_DESTRUCT(&ugni_module->failed_frags);
return OMPI_SUCCESS;
@ -230,30 +242,131 @@ mca_btl_ugni_alloc(struct mca_btl_base_module_t *btl,
(void) MCA_BTL_UGNI_FRAG_ALLOC_EAGER_SEND(endpoint, frag);
}
if (OPAL_UNLIKELY(NULL == frag)) {
return NULL;
}
BTL_VERBOSE(("btl/ugni_module allocated frag of size: %u, flags: %x. frag = %p",
(unsigned int)size, flags, (void *) frag));
if (OPAL_LIKELY(NULL != frag)) {
frag->base.des_flags = flags;
frag->base.order = order;
frag->base.des_src = frag->segments;
frag->base.des_src_cnt = 1;
frag->base.des_dst = frag->segments;
frag->base.des_dst_cnt = 1;
frag->base.des_flags = flags;
frag->base.order = order;
frag->base.des_src = frag->segments + 1;
frag->base.des_src_cnt = 1;
frag->base.des_dst = frag->segments + 1;
frag->base.des_dst_cnt = 1;
frag->segments[0].seg_len = size;
}
frag->hdr_size = (size <= mca_btl_ugni_component.smsg_max_data) ? sizeof (frag->hdr.send) :
sizeof (frag->hdr.eager);
return (mca_btl_base_descriptor_t *) frag;
frag->segments[0].seg_addr.pval = NULL;
frag->segments[0].seg_len = 0;
frag->segments[1].seg_addr.pval = frag->base.super.ptr;
frag->segments[1].seg_len = size;
return &frag->base;
}
static int
mca_btl_ugni_free (struct mca_btl_base_module_t *btl,
mca_btl_base_descriptor_t *des)
{
mca_btl_ugni_frag_return ((mca_btl_ugni_base_frag_t *) des);
return mca_btl_ugni_frag_return ((mca_btl_ugni_base_frag_t *) des);
}
return OMPI_SUCCESS;
static inline struct mca_btl_base_descriptor_t *
mca_btl_ugni_prepare_src_send (struct mca_btl_base_module_t *btl,
mca_btl_base_endpoint_t *endpoint,
struct opal_convertor_t *convertor,
uint8_t order, size_t reserve, size_t *size,
uint32_t flags)
{
bool use_eager_get = (*size + reserve) > mca_btl_ugni_component.smsg_max_data;
mca_mpool_base_registration_t *registration = NULL;
mca_btl_ugni_base_frag_t *frag = NULL;
bool send_in_place;
void *data_ptr;
int rc;
opal_convertor_get_current_pointer (convertor, &data_ptr);
send_in_place = !(opal_convertor_need_buffers(convertor) ||
(use_eager_get && ((uintptr_t)data_ptr & 3)));
if (OPAL_UNLIKELY(*size > btl->btl_eager_limit)) {
*size = btl->btl_eager_limit;
}
if (OPAL_LIKELY(send_in_place)) {
(void) MCA_BTL_UGNI_FRAG_ALLOC_RDMA(endpoint, frag);
if (OPAL_UNLIKELY(NULL == frag)) {
return NULL;
}
BTL_VERBOSE(("preparing src for send fragment. size = %u",
(unsigned int)(*size + reserve)));
if (OPAL_UNLIKELY(true == use_eager_get)) {
rc = btl->btl_mpool->mpool_register(btl->btl_mpool, data_ptr,
*size, 0, &registration);
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
mca_btl_ugni_frag_return (frag);
return NULL;
}
frag->registration = (mca_btl_ugni_reg_t *) registration;
memcpy ((void *) frag->segments[1].seg_key.key64,
(void *)&((mca_btl_ugni_reg_t *)registration)->memory_hdl,
sizeof (((mca_btl_ugni_reg_t *)registration)->memory_hdl));
}
} else {
uint32_t iov_count = 1;
struct iovec iov;
/* buffer the user's data */
if (OPAL_LIKELY(!use_eager_get)) {
(void) MCA_BTL_UGNI_FRAG_ALLOC_SMSG(endpoint, frag);
} else {
(void) MCA_BTL_UGNI_FRAG_ALLOC_EAGER_SEND(endpoint, frag);
}
if (OPAL_UNLIKELY(NULL == frag)) {
return NULL;
}
data_ptr = frag->base.super.ptr;
iov.iov_len = *size;
iov.iov_base = (IOVBASE_TYPE *) data_ptr;
rc = opal_convertor_pack (convertor, &iov, &iov_count, size);
if (OPAL_UNLIKELY(rc < 0)) {
mca_btl_ugni_frag_return (frag);
return NULL;
}
if (true == use_eager_get) {
registration = frag->base.super.registration;
memcpy ((void *) frag->segments[1].seg_key.key64,
(void *)&((mca_btl_ugni_reg_t *)registration)->memory_hdl,
sizeof (((mca_btl_ugni_reg_t *)registration)->memory_hdl));
}
}
frag->hdr_size = reserve + (use_eager_get ? sizeof (frag->hdr.eager) : sizeof (frag->hdr.send));
frag->segments[0].seg_addr.pval = use_eager_get ? frag->hdr.eager_ex.pml_header : frag->hdr.send_ex.pml_header;
frag->segments[0].seg_len = reserve;
frag->segments[1].seg_addr.pval = data_ptr;
frag->segments[1].seg_len = *size;
frag->base.des_src = frag->segments;
frag->base.des_src_cnt = 2;
frag->base.order = order;
frag->base.des_flags = flags;
return &frag->base;
}
static struct mca_btl_base_descriptor_t *
@ -268,80 +381,45 @@ mca_btl_ugni_prepare_src (struct mca_btl_base_module_t *btl,
void *data_ptr;
int rc;
if (OPAL_LIKELY(reserve)) {
return mca_btl_ugni_prepare_src_send (btl, endpoint, convertor,
order, reserve, size, flags);
}
opal_convertor_get_current_pointer (convertor, &data_ptr);
if (OPAL_LIKELY(reserve)) {
if ((*size + reserve) <= mca_btl_ugni_component.smsg_max_data) {
(void) MCA_BTL_UGNI_FRAG_ALLOC_SMSG(endpoint, frag);
} else {
(void) MCA_BTL_UGNI_FRAG_ALLOC_EAGER_SEND(endpoint, frag);
}
(void) MCA_BTL_UGNI_FRAG_ALLOC_RDMA(endpoint, frag);
if (OPAL_UNLIKELY(NULL == frag)) {
return NULL;
}
if (OPAL_UNLIKELY(NULL == frag)) {
return NULL;
}
if ((*size + reserve) > btl->btl_eager_limit) {
*size = btl->btl_eager_limit - reserve;
}
BTL_VERBOSE(("preparing src for send fragment. size = %u",
(unsigned int)(*size + reserve)));
if (OPAL_UNLIKELY(opal_convertor_need_buffers(convertor))) {
/* non-contiguous data requires using the convertor */
uint32_t iov_count = 1;
struct iovec iov;
iov.iov_len = *size;
iov.iov_base =
(IOVBASE_TYPE *)(((uintptr_t)(frag->segments[0].seg_addr.pval)) +
reserve);
rc = opal_convertor_pack (convertor, &iov, &iov_count, size);
if (OPAL_UNLIKELY(rc < 0)) {
/*
* For medium message use FMA protocols and for large message
* use BTE protocols
*/
/* No need to register while using FMA Put (registration is
* non-null in get-- is this always true?) */
if (*size >= mca_btl_ugni_component.ugni_fma_limit || (flags & MCA_BTL_DES_FLAGS_GET)) {
if (NULL == registration) {
rc = btl->btl_mpool->mpool_register(btl->btl_mpool, data_ptr,
*size, 0, &registration);
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
mca_btl_ugni_frag_return (frag);
return NULL;
}
} else {
memmove ((void *)((uintptr_t)frag->segments[0].seg_addr.pval + reserve),
data_ptr, *size);
frag->registration = (mca_btl_ugni_reg_t *) registration;
}
memcpy ((void *) frag->segments[0].seg_key.key64,
(void *)&((mca_btl_ugni_reg_t *)registration)->memory_hdl,
sizeof (((mca_btl_ugni_reg_t *)registration)->memory_hdl));
} else {
(void) MCA_BTL_UGNI_FRAG_ALLOC_RDMA(endpoint, frag);
if (OPAL_UNLIKELY(NULL == frag)) {
return NULL;
}
/*
* For medium message use FMA protocols and for large message
* use BTE protocols
*/
/* No need to register while using FMA Put (registration is
* non-null in get-- is this always true?) */
if (*size >= mca_btl_ugni_component.ugni_fma_limit || (flags & MCA_BTL_DES_FLAGS_GET)) {
if (NULL == registration) {
rc = btl->btl_mpool->mpool_register(btl->btl_mpool, data_ptr,
*size, 0, &registration);
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
mca_btl_ugni_frag_return (frag);
return NULL;
}
frag->registration = (mca_btl_ugni_reg_t *) registration;
}
memcpy ((void *) frag->segments[0].seg_key.key64,
(void *)&((mca_btl_ugni_reg_t *)registration)->memory_hdl,
sizeof (((mca_btl_ugni_reg_t *)registration)->memory_hdl));
} else {
memset ((void *) frag->segments[0].seg_key.key64, 0,
sizeof (frag->segments[0].seg_key.key64));
}
frag->segments[0].seg_addr.pval = data_ptr;
memset ((void *) frag->segments[0].seg_key.key64, 0,
sizeof (frag->segments[0].seg_key.key64));
}
frag->segments[0].seg_addr.pval = data_ptr;
frag->segments[0].seg_len = reserve + *size;
frag->base.des_src = frag->segments;

Просмотреть файл

@ -36,9 +36,61 @@ int mca_btl_ugni_put (struct mca_btl_base_module_t *btl,
return rc;
}
if (NULL != frag->base.des_cbfunc) {
des->des_flags |= MCA_BTL_DES_SEND_ALWAYS_CALLBACK;
}
if (frag->base.des_src->seg_len <= mca_btl_ugni_component.ugni_fma_limit) {
return mca_btl_ugni_post_fma (frag, GNI_POST_FMA_PUT, des->des_src, des->des_dst);
}
return mca_btl_ugni_post_bte (frag, GNI_POST_RDMA_PUT, des->des_src, des->des_dst);
}
/* reversed get */
static void mca_btl_ugni_callback_put_retry (mca_btl_ugni_base_frag_t *frag, int rc)
{
(void) mca_btl_ugni_start_put(frag->endpoint, frag->hdr.rdma, frag);
}
int mca_btl_ugni_start_put (mca_btl_base_endpoint_t *ep,
mca_btl_ugni_rdma_frag_hdr_t hdr,
mca_btl_ugni_base_frag_t *frag)
{
int rc;
BTL_VERBOSE(("starting reverse get (put) for remote ctx: %p", hdr.ctx));
if (NULL == frag) {
rc = MCA_BTL_UGNI_FRAG_ALLOC_RDMA_INT(ep, frag);
if (OPAL_UNLIKELY(NULL == frag)) {
BTL_ERROR(("error allocating rdma frag for reverse get. rc = %d. fl_num_allocated = %d", rc,
ep->btl->rdma_int_frags.fl_num_allocated));
return rc;
}
}
frag->hdr.rdma = hdr;
frag->base.des_cbfunc = NULL;
frag->base.des_flags = MCA_BTL_DES_FLAGS_BTL_OWNERSHIP;
frag->segments[0] = hdr.src_seg;
frag->base.des_src = frag->segments;
frag->base.des_src_cnt = 1;
frag->segments[1] = hdr.dst_seg;
frag->base.des_dst = frag->segments + 1;
frag->base.des_dst_cnt = 1;
rc = mca_btl_ugni_put (&ep->btl->super, ep, &frag->base);
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
frag->cbfunc = mca_btl_ugni_callback_put_retry;
opal_list_append (&ep->btl->failed_frags, (opal_list_item_t *) frag);
return rc;
}
frag->cbfunc = mca_btl_ugni_callback_rdma_complete;
return OMPI_SUCCESS;
}

Просмотреть файл

@ -16,19 +16,16 @@
#include "btl_ugni.h"
#include "btl_ugni_frag.h"
static inline void
mca_btl_ugni_post_frag_complete (ompi_common_ugni_post_desc_t *desc, int rc) {
mca_btl_ugni_base_frag_t *frag = MCA_BTL_UGNI_DESC_TO_FRAG(desc);
/* mca_btl_ugni_start_put: get operation could not be completed. start put instead */
int mca_btl_ugni_start_put (mca_btl_base_endpoint_t *ep,
mca_btl_ugni_rdma_frag_hdr_t hdr,
mca_btl_ugni_base_frag_t *frag);
/* always call put/get callback (if one is set) */
if (NULL != frag->base.des_cbfunc) {
frag->base.des_cbfunc(&frag->endpoint->btl->super, frag->endpoint, &frag->base, rc);
}
int mca_btl_ugni_start_eager_get (mca_btl_base_endpoint_t *ep,
mca_btl_ugni_eager_ex_frag_hdr_t hdr,
mca_btl_ugni_base_frag_t *frag);
if (OPAL_LIKELY(frag->base.des_flags & MCA_BTL_DES_FLAGS_BTL_OWNERSHIP)) {
mca_btl_ugni_frag_return (frag);
}
}
void mca_btl_ugni_callback_rdma_complete (mca_btl_ugni_base_frag_t *frag, int rc);
static inline int init_gni_post_desc(mca_btl_ugni_base_frag_t *frag,
gni_post_type_t op_type,
@ -49,7 +46,7 @@ static inline int init_gni_post_desc(mca_btl_ugni_base_frag_t *frag,
frag->post_desc.base.rdma_mode = 0;
frag->post_desc.base.src_cq_hndl = cq_hndl;
frag->post_desc.cbfunc = mca_btl_ugni_post_frag_complete;
frag->cbfunc = mca_btl_ugni_frag_complete;
frag->post_desc.endpoint = frag->endpoint->common;
frag->post_desc.tries = 0;
@ -65,11 +62,10 @@ static inline int mca_btl_ugni_post_fma (mca_btl_ugni_base_frag_t *frag, gni_pos
init_gni_post_desc (frag, op_type, lcl_seg->seg_addr.lval,
(gni_mem_handle_t *)&lcl_seg->seg_key.key64,
rem_seg->seg_addr.lval, (gni_mem_handle_t *)&rem_seg->seg_key.key64,
lcl_seg->seg_len, 0);
lcl_seg->seg_len, 0); /* CQ is ignored for FMA transactions */
rc = GNI_PostFma (frag->endpoint->common->ep_handle, &frag->post_desc.base);
rc = GNI_PostFma (frag->endpoint->rdma_ep_handle, &frag->post_desc.base);
if (GNI_RC_SUCCESS != rc) {
/* BTL_ERROR(("GNI_PostFma failed with rc = %d", rc)); */
assert(rc < 4);
rc = OMPI_ERR_OUT_OF_RESOURCE;
}
@ -86,9 +82,9 @@ static inline int mca_btl_ugni_post_bte (mca_btl_ugni_base_frag_t *frag, gni_pos
init_gni_post_desc (frag, op_type, lcl_seg->seg_addr.lval,
(gni_mem_handle_t *)&lcl_seg->seg_key.key64,
rem_seg->seg_addr.lval, (gni_mem_handle_t *)&rem_seg->seg_key.key64,
lcl_seg->seg_len, frag->endpoint->btl->bte_local_cq);
lcl_seg->seg_len, frag->endpoint->btl->rdma_local_cq);
rc = GNI_PostRdma (frag->endpoint->common->ep_handle, &frag->post_desc.base);
rc = GNI_PostRdma (frag->endpoint->rdma_ep_handle, &frag->post_desc.base);
if (GNI_RC_SUCCESS != rc) {
rc = ompi_common_rc_ugni_to_ompi (rc);
BTL_ERROR(("GNI_PostRdma failed with rc = %d", rc));

Просмотреть файл

@ -14,68 +14,44 @@
#include "btl_ugni_frag.h"
#include "btl_ugni_smsg.h"
void mca_btl_ugni_local_smsg_complete (void *btl_ctx, uint32_t msg_id, int rc)
{
mca_btl_ugni_module_t *btl = (mca_btl_ugni_module_t *) btl_ctx;
mca_btl_ugni_base_frag_t *frag;
int lrc;
lrc = opal_hash_table_get_value_uint32 (&btl->pending_smsg_frags,
msg_id, (void **) &frag);
if (OPAL_UNLIKELY(OPAL_SUCCESS != lrc)) {
return;
}
opal_hash_table_remove_value_uint32 (&btl->pending_smsg_frags,
msg_id);
assert (NULL != frag);
/* completion callback */
if (NULL != frag->base.des_cbfunc) {
frag->base.des_cbfunc(&btl->super, frag->endpoint, &frag->base, rc);
}
if (frag->base.des_flags & MCA_BTL_DES_FLAGS_BTL_OWNERSHIP) {
mca_btl_ugni_frag_return (frag);
}
}
int mca_btl_ugni_send (struct mca_btl_base_module_t *btl,
struct mca_btl_base_endpoint_t *btl_peer,
struct mca_btl_base_descriptor_t *descriptor,
mca_btl_base_tag_t tag)
{
mca_btl_ugni_base_frag_t *frag = (mca_btl_ugni_base_frag_t *) descriptor;
size_t size = frag->segments[0].seg_len + frag->segments[1].seg_len;
bool use_eager_get = size > mca_btl_ugni_component.smsg_max_data;
int rc;
BTL_VERBOSE(("btl/ugni sending descriptor %p from %d -> %d. length = %d", (void *)descriptor,
ORTE_PROC_MY_NAME->vpid, btl_peer->common->ep_rem_id, frag->segments[0].seg_len));
/* tag and len are at the same location in eager and smsg frag hdrs */
frag->hdr.send.tag = tag;
frag->hdr.send.len = frag->segments[0].seg_len;
frag->hdr.send.lag = (tag << 24) | size;
if (OPAL_UNLIKELY(use_eager_get)) {
frag->hdr.eager.src_seg = frag->segments[1];
frag->hdr.eager.ctx = (void *) frag;
}
frag->base.des_flags |= MCA_BTL_DES_SEND_ALWAYS_CALLBACK;
frag->endpoint = btl_peer;
rc = mca_btl_ugni_check_endpoint_state (btl_peer);
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
descriptor->des_flags |= MCA_BTL_DES_SEND_ALWAYS_CALLBACK;
opal_list_append (&btl_peer->pending_list, (opal_list_item_t *) frag);
/* connection started and request queued or something bad happened */
return OMPI_SUCCESS;
}
if (frag->segments[0].seg_len <= mca_btl_ugni_component.smsg_max_data) {
return ompi_mca_btl_ugni_smsg_send (frag, false, &frag->hdr.send, sizeof (frag->hdr.send),
descriptor->des_src->seg_addr.pval, descriptor->des_src->seg_len,
MCA_BTL_UGNI_TAG_SEND);
} else {
frag->hdr.eager.src_seg = frag->segments[0];
frag->hdr.eager.ctx = (void *) &frag->post_desc;
return ompi_mca_btl_ugni_smsg_send (frag, true, &frag->hdr.eager, sizeof (frag->hdr.eager),
NULL, 0, MCA_BTL_UGNI_TAG_GET_INIT);
rc = ompi_mca_btl_ugni_smsg_send (frag, use_eager_get, &frag->hdr.send, frag->hdr_size,
frag->segments[1].seg_addr.pval, use_eager_get ? 0 : frag->segments[1].seg_len,
use_eager_get ? MCA_BTL_UGNI_TAG_GET_INIT : MCA_BTL_UGNI_TAG_SEND);
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
return rc;
}
(void) mca_btl_ugni_progress_local_smsg ((mca_btl_ugni_module_t *) btl);
return OMPI_SUCCESS;
}

Просмотреть файл

@ -22,9 +22,10 @@ int mca_btl_ugni_sendi (struct mca_btl_base_module_t *btl,
uint32_t flags, mca_btl_base_tag_t tag,
mca_btl_base_descriptor_t **descriptor)
{
size_t length = header_size + payload_size;
const size_t length = header_size + payload_size;
mca_btl_ugni_base_frag_t *frag;
void *data_ptr = NULL;
uint32_t iov_count = 1;
struct iovec iov;
size_t max_data;
int rc;
@ -38,10 +39,11 @@ int mca_btl_ugni_sendi (struct mca_btl_base_module_t *btl,
return OMPI_ERR_RESOURCE_BUSY;
}
if (length <= mca_btl_ugni_component.smsg_max_data) {
rc = MCA_BTL_UGNI_FRAG_ALLOC_SMSG(endpoint, frag);
if (OPAL_LIKELY(length <= mca_btl_ugni_component.smsg_max_data)) {
(void) MCA_BTL_UGNI_FRAG_ALLOC_SMSG(endpoint, frag);
frag->segments[0].seg_addr.pval = frag->base.super.ptr;
} else {
rc = MCA_BTL_UGNI_FRAG_ALLOC_EAGER_SEND(endpoint, frag);
(void) MCA_BTL_UGNI_FRAG_ALLOC_EAGER_SEND(endpoint, frag);
}
if (OPAL_UNLIKELY(NULL == frag)) {
@ -55,40 +57,31 @@ int mca_btl_ugni_sendi (struct mca_btl_base_module_t *btl,
frag->base.des_cbfunc = NULL;
frag->base.des_flags = flags | MCA_BTL_DES_FLAGS_BTL_OWNERSHIP;
frag->segments[0].seg_len = length;
frag->hdr.send.tag = tag;
frag->hdr.send.len = length;
frag->hdr.send.lag = (tag << 24) | length;
/* write match header (with MPI comm/tag/etc. info) */
memcpy (frag->segments[0].seg_addr.pval, header, header_size);
memmove (frag->segments[0].seg_addr.pval, header, header_size);
/*
We can add MEMCHECKER calls before and after the packing.
*/
if (payload_size) {
if (OPAL_UNLIKELY(opal_convertor_need_buffers (convertor))) {
uint32_t iov_count = 1;
struct iovec iov;
/* pack the data into the supplied buffer */
iov.iov_base = (IOVBASE_TYPE *)((uintptr_t)frag->segments[0].seg_addr.pval + header_size);
iov.iov_len = max_data = payload_size;
/* pack the data into the supplied buffer */
iov.iov_base = (IOVBASE_TYPE *)((uintptr_t)frag->segments[0].seg_addr.pval + header_size);
iov.iov_len = max_data = payload_size;
(void) opal_convertor_pack (convertor, &iov, &iov_count, &max_data);
(void) opal_convertor_pack (convertor, &iov, &iov_count, &max_data);
assert (max_data == payload_size);
} else {
opal_convertor_get_current_pointer (convertor, &data_ptr);
memmove ((void *)((uintptr_t)frag->segments[0].seg_addr.pval + header_size), data_ptr, payload_size);
}
assert (max_data == payload_size);
}
/* send message */
if (length <= mca_btl_ugni_component.smsg_max_data) {
if (OPAL_LIKELY(length <= mca_btl_ugni_component.smsg_max_data)) {
rc = ompi_mca_btl_ugni_smsg_send (frag, false, &frag->hdr.send, sizeof (frag->hdr.send),
frag->segments[0].seg_addr.pval, length, MCA_BTL_UGNI_TAG_SEND);
} else {
frag->segments[0].seg_len = length;
frag->hdr.eager.src_seg = frag->segments[0];
frag->hdr.eager.ctx = (void *) &frag->post_desc;
@ -96,7 +89,7 @@ int mca_btl_ugni_sendi (struct mca_btl_base_module_t *btl,
NULL, 0, MCA_BTL_UGNI_TAG_GET_INIT);
}
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
if (OPAL_UNLIKELY(0 > rc)) {
/* return this frag */
mca_btl_ugni_frag_return (frag);
}

196
ompi/mca/btl/ugni/btl_ugni_smsg.c Обычный файл
Просмотреть файл

@ -0,0 +1,196 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2011-2012 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2011 UT-Battelle, LLC. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "btl_ugni_smsg.h"
#include "btl_ugni_rdma.h"
/* progress */
int mca_btl_ugni_smsg_process (mca_btl_base_endpoint_t *ep)
{
mca_btl_active_message_callback_t *reg;
mca_btl_ugni_base_frag_t frag;
bool disconnect = false;
uintptr_t data_ptr;
gni_return_t rc;
uint32_t len;
int count = 0;
if (!opal_atomic_cmpset_32 (&ep->smsg_progressing, 0, 1)) {
/* already progressing (we can't support reentry here) */
return 0;
}
/* per uGNI documentation we loop until the mailbox is empty */
do {
uint8_t tag = GNI_SMSG_ANY_TAG;
rc = GNI_SmsgGetNextWTag (ep->smsg_ep_handle, (void **) &data_ptr, &tag);
if (GNI_RC_NOT_DONE == rc) {
BTL_VERBOSE(("no smsg message waiting. rc = %d", rc));
ep->smsg_progressing = 0;
return count;
}
if (OPAL_UNLIKELY(GNI_RC_SUCCESS != rc)) {
fprintf (stderr, "Unhandled Smsg error: %d\n", rc);
assert (0);
return OMPI_ERROR;
}
if (OPAL_UNLIKELY(0 == data_ptr)) {
BTL_ERROR(("null data ptr!"));
assert (0);
return OMPI_ERROR;
}
count++;
BTL_VERBOSE(("got smsg fragment. tag = %d\n", tag));
switch (tag) {
case MCA_BTL_UGNI_TAG_SEND:
frag.hdr.send = ((mca_btl_ugni_send_frag_hdr_t *) data_ptr)[0];
tag = frag.hdr.send.lag >> 24;
len = frag.hdr.send.lag & 0x00ffffff;
BTL_VERBOSE(("received smsg fragment. hdr = {len = %u, tag = %d}", len, tag));
reg = mca_btl_base_active_message_trigger + tag;
frag.base.des_dst = frag.segments;
frag.base.des_dst_cnt = 1;
frag.segments[0].seg_addr.pval = (void *)((uintptr_t)data_ptr + sizeof (mca_btl_ugni_send_frag_hdr_t));
frag.segments[0].seg_len = len;
assert (NULL != reg->cbfunc);
reg->cbfunc(&ep->btl->super, tag, &(frag.base), reg->cbdata);
break;
case MCA_BTL_UGNI_TAG_PUT_INIT:
frag.hdr.rdma = ((mca_btl_ugni_rdma_frag_hdr_t *) data_ptr)[0];
mca_btl_ugni_start_put (ep, frag.hdr.rdma, NULL);
break;
case MCA_BTL_UGNI_TAG_GET_INIT:
frag.hdr.eager_ex = ((mca_btl_ugni_eager_ex_frag_hdr_t *) data_ptr)[0];
mca_btl_ugni_start_eager_get (ep, frag.hdr.eager_ex, NULL);
break;
case MCA_BTL_UGNI_TAG_RDMA_COMPLETE:
frag.hdr.rdma = ((mca_btl_ugni_rdma_frag_hdr_t *) data_ptr)[0];
mca_btl_ugni_frag_complete (frag.hdr.rdma.ctx, OMPI_SUCCESS);
break;
case MCA_BTL_UGNI_TAG_DISCONNECT:
/* remote endpoint has disconnected */
disconnect = true;
break;
default:
BTL_ERROR(("unknown tag %d\n", tag));
break;
}
rc = GNI_SmsgRelease (ep->smsg_ep_handle);
if (OPAL_UNLIKELY(GNI_RC_SUCCESS != rc)) {
BTL_ERROR(("Smsg release failed! rc = %d", rc));
return OMPI_ERROR;
}
} while (!disconnect);
ep->smsg_progressing = false;
/* disconnect if we get here */
mca_btl_ugni_ep_disconnect (ep, false);
return count;
}
static inline int
mca_btl_ugni_handle_remote_smsg_overrun (mca_btl_ugni_module_t *btl)
{
gni_cq_entry_t event_data;
unsigned int ep_index;
int count, rc;
BTL_VERBOSE(("btl/ugni_component detected SMSG CQ overrun. "
"processing message backlog..."));
/* we don't know which endpoint lost an smsg completion. clear the
smsg remote cq and check all mailboxes */
/* clear out remote cq */
do {
rc = GNI_CqGetEvent (btl->smsg_remote_cq, &event_data);
} while (GNI_RC_NOT_DONE != rc);
for (ep_index = 0, count = 0 ; ep_index < btl->endpoint_count ; ++ep_index) {
mca_btl_base_endpoint_t *ep = btl->endpoints[ep_index];
if (NULL == ep || MCA_BTL_UGNI_EP_STATE_CONNECTED != ep->state) {
continue;
}
/* clear out smsg mailbox */
rc = mca_btl_ugni_smsg_process (ep);
if (OPAL_LIKELY(rc >= 0)) {
count += rc;
}
}
return count;
}
int mca_btl_ugni_progress_remote_smsg (mca_btl_ugni_module_t *btl)
{
mca_btl_base_endpoint_t *ep;
gni_cq_entry_t event_data;
gni_return_t rc;
rc = GNI_CqGetEvent (btl->smsg_remote_cq, &event_data);
if (GNI_RC_NOT_DONE == rc) {
return 0;
}
if (OPAL_UNLIKELY(GNI_RC_SUCCESS != rc || !GNI_CQ_STATUS_OK(event_data) ||
GNI_CQ_OVERRUN(event_data))) {
if (GNI_RC_ERROR_RESOURCE == rc ||
(GNI_RC_SUCCESS == rc && GNI_CQ_OVERRUN(event_data))) {
/* recover from smsg cq overrun */
return mca_btl_ugni_handle_remote_smsg_overrun (btl);
}
BTL_ERROR(("unhandled error in GNI_CqGetEvent"));
/* unhandled error: crash */
assert (0);
return ompi_common_rc_ugni_to_ompi (rc);
}
BTL_VERBOSE(("REMOTE CQ: Got event 0x%" PRIx64 ". msg id = %" PRIu64
". ok = %d, type = %" PRIu64 "\n", (uint64_t) event_data,
GNI_CQ_GET_MSG_ID(event_data), GNI_CQ_STATUS_OK(event_data),
GNI_CQ_GET_TYPE(event_data)));
ep = btl->endpoints[GNI_CQ_GET_MSG_ID(event_data)];
if (OPAL_UNLIKELY(MCA_BTL_UGNI_EP_STATE_CONNECTED != ep->state)) {
/* due to the nature of datagrams we may get a smsg completion before
we get mailbox info from the peer */
BTL_VERBOSE(("event occurred on an unconnected endpoint! ep state = %d", ep->state));
return 0;
}
return mca_btl_ugni_smsg_process (ep);
}

Просмотреть файл

@ -15,6 +15,8 @@
#include "btl_ugni.h"
#include "btl_ugni_endpoint.h"
#include "btl_ugni_frag.h"
#include "btl_ugni_rdma.h"
typedef enum {
MCA_BTL_UGNI_TAG_SEND,
@ -24,6 +26,57 @@ typedef enum {
MCA_BTL_UGNI_TAG_RDMA_COMPLETE
} mca_btl_ugni_smsg_tag_t;
int mca_btl_ugni_smsg_process (mca_btl_base_endpoint_t *ep);
int mca_btl_ugni_progress_remote_smsg (mca_btl_ugni_module_t *btl);
static inline int mca_btl_ugni_smsg_next_local_completion (mca_btl_ugni_module_t *ugni_module, mca_btl_ugni_base_frag_t **frag)
{
gni_cq_entry_t event_data;
gni_return_t rc;
uint32_t msg_id;
*frag = NULL;
rc = GNI_CqGetEvent (ugni_module->smsg_local_cq, &event_data);
if (GNI_RC_NOT_DONE == rc) {
return OMPI_SUCCESS;
}
if (OPAL_UNLIKELY((GNI_RC_SUCCESS != rc && !event_data) || GNI_CQ_OVERRUN(event_data))) {
/* TODO -- need to handle overrun -- how do we do this without an event?
will the event eventually come back? Ask Cray */
BTL_ERROR(("post error! cq overrun = %d", (int)GNI_CQ_OVERRUN(event_data)));
assert (0);
return ompi_common_rc_ugni_to_ompi (rc);
}
assert (GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_SMSG);
msg_id = GNI_CQ_GET_MSG_ID(event_data);
if ((uint32_t) -1 == msg_id) {
/* nothing to do */
return OMPI_SUCCESS;
}
*frag = (mca_btl_ugni_base_frag_t *) opal_pointer_array_get_item (&ugni_module->pending_smsg_frags_bb, msg_id);
assert (NULL != *frag);
return GNI_CQ_STATUS_OK(event_data) ? OMPI_SUCCESS : OMPI_ERROR;
}
static inline int mca_btl_ugni_progress_local_smsg (mca_btl_ugni_module_t *ugni_module)
{
mca_btl_ugni_base_frag_t *frag;
int rc;
rc = mca_btl_ugni_smsg_next_local_completion (ugni_module, &frag);
if (NULL != frag) {
mca_btl_ugni_frag_complete (frag, rc);
}
return 1;
}
static inline int ompi_mca_btl_ugni_smsg_send (mca_btl_ugni_base_frag_t *frag,
const bool ignore_local_comp,
void *hdr, size_t hdr_len,
@ -32,21 +85,11 @@ static inline int ompi_mca_btl_ugni_smsg_send (mca_btl_ugni_base_frag_t *frag,
gni_return_t grc;
int rc;
if (!ignore_local_comp) {
rc = opal_hash_table_set_value_uint32 (&frag->endpoint->btl->pending_smsg_frags,
frag->msg_id, (void *) frag);
if (OPAL_UNLIKELY(OPAL_SUCCESS != rc)) {
return rc;
}
}
grc = GNI_SmsgSendWTag (frag->endpoint->common->ep_handle, hdr, hdr_len, payload, payload_len,
ignore_local_comp ? (uint32_t)-1 : frag->msg_id, tag);
grc = GNI_SmsgSendWTag (frag->endpoint->smsg_ep_handle, hdr, hdr_len, payload, payload_len,
ignore_local_comp ? (uint32_t) -1 : frag->msg_id, tag);
if (OPAL_UNLIKELY(GNI_RC_SUCCESS != grc)) {
BTL_VERBOSE(("GNI_SmsgSendWTag failed with rc = %d", rc));
opal_hash_table_remove_value_uint32 (&frag->endpoint->btl->pending_smsg_frags,
frag->msg_id);
/* see if we can free up some credits */
mca_btl_ugni_progress_remote_smsg (frag->endpoint->btl);
if (OPAL_LIKELY(GNI_RC_NOT_DONE == grc)) {
BTL_VERBOSE(("out of credits"));
@ -54,6 +97,9 @@ static inline int ompi_mca_btl_ugni_smsg_send (mca_btl_ugni_base_frag_t *frag,
return OMPI_ERR_OUT_OF_RESOURCE;
}
BTL_ERROR(("GNI_SmsgSendWTag failed with rc = %d. handle = %d, hdr_len = %d, payload_len = %d",
grc, frag->endpoint->smsg_ep_handle, hdr_len, payload_len));
return OMPI_ERROR;
}

Просмотреть файл

@ -274,8 +274,7 @@ int ompi_common_ugni_init (void)
/* Create a communication domain */
modes = GNI_CDM_MODE_FORK_FULLCOPY | GNI_CDM_MODE_CACHED_AMO_ENABLED |
GNI_CDM_MODE_DUAL_EVENTS | GNI_CDM_MODE_ERR_NO_KILL |
GNI_CDM_MODE_FAST_DATAGRAM_POLL;
GNI_CDM_MODE_ERR_NO_KILL | GNI_CDM_MODE_FAST_DATAGRAM_POLL;
/* collect uGNI information */
rc = get_ptag(&ompi_common_ugni_module.ptag);

Просмотреть файл

@ -83,9 +83,6 @@ struct ompi_common_ugni_post_desc_t {
ompi_common_ugni_endpoint_t *endpoint;
int tries;
/* NTH: callback function for this post. this may change in the future */
void (*cbfunc) (struct ompi_common_ugni_post_desc_t *, int);
};
typedef struct ompi_common_ugni_post_desc_t ompi_common_ugni_post_desc_t;
@ -122,92 +119,4 @@ int ompi_common_ugni_init (void);
*/
int ompi_common_ugni_fini (void);
extern void mca_btl_ugni_local_smsg_complete (void *, uint32_t, int);
static inline int
ompi_common_ugni_process_completed_post (ompi_common_ugni_device_t *dev,
gni_cq_handle_t cq_handle) {
ompi_common_ugni_post_desc_t *desc;
gni_return_t rc = GNI_RC_NOT_DONE;
gni_cq_entry_t event_data = 0;
uint32_t recoverable = 1;
rc = GNI_CqGetEvent (cq_handle, &event_data);
if (GNI_RC_NOT_DONE == rc) {
return 0;
}
if (OPAL_UNLIKELY((GNI_RC_SUCCESS != rc && !event_data) || GNI_CQ_OVERRUN(event_data))) {
/* TODO -- need to handle overrun -- how do we do this without an event?
will the event eventually come back? Ask Cray */
OPAL_OUTPUT((-1, "post error! cq overrun = %d", (int)GNI_CQ_OVERRUN(event_data)));
assert (0);
return ompi_common_rc_ugni_to_ompi (rc);
}
/* local SMS completion */
if (GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_SMSG) {
uint32_t msg_id = GNI_CQ_GET_MSG_ID(event_data);
assert (GNI_CQ_STATUS_OK(event_data));
if ((uint32_t)-1 == msg_id) {
/* nothing to do */
return 1;
}
/* inform the btl of local smsg completion */
mca_btl_ugni_local_smsg_complete (dev->btl_ctx, msg_id,
GNI_CQ_STATUS_OK(event_data) ? OMPI_SUCCESS : OMPI_ERROR);
return 1;
}
rc = GNI_GetCompleted (cq_handle, event_data, (gni_post_descriptor_t **) &desc);
if (OPAL_UNLIKELY(GNI_RC_SUCCESS != rc)) {
OPAL_OUTPUT((-1, "Error in GNI_GetComplete %s", gni_err_str[rc]));
return ompi_common_rc_ugni_to_ompi (rc);
}
if (OPAL_UNLIKELY(!GNI_CQ_STATUS_OK(event_data))) {
(void) GNI_CqErrorRecoverable (event_data, &recoverable);
if (OPAL_UNLIKELY(!recoverable ||
++desc->tries >= ompi_common_ugni_module.rdma_max_retries)) {
OPAL_OUTPUT((-1, "giving up on descriptor %p", (void *) desc));
/* give up */
desc->cbfunc (desc, OMPI_ERROR);
return OMPI_ERROR;
}
/* repost transaction */
if (GNI_POST_RDMA_PUT == desc->base.type ||
GNI_POST_RDMA_GET == desc->base.type) {
rc = GNI_PostRdma (desc->endpoint->ep_handle, &desc->base);
} else {
rc = GNI_PostFma (desc->endpoint->ep_handle, &desc->base);
}
return ompi_common_rc_ugni_to_ompi (rc);
}
desc->cbfunc (desc, OMPI_SUCCESS);
return 1;
}
static inline int ompi_common_ugni_progress (void) {
ompi_common_ugni_device_t *dev;
int count, i;
for (i = 0, count = 0 ; i < ompi_common_ugni_module.device_count ; ++i) {
dev = ompi_common_ugni_module.devices + i;
/* progress fma/local smsg completions */
count += ompi_common_ugni_process_completed_post (dev, dev->dev_local_cq);
}
return count;
}
#endif /* MPI_COMMON_UGNI_H */

Просмотреть файл

@ -12,22 +12,13 @@
#include "common_ugni.h"
static void ompi_common_ugni_ep_construct (ompi_common_ugni_endpoint_t *ep)
{
OBJ_CONSTRUCT(&ep->lock, opal_mutex_t);
ep->state = OMPI_COMMON_UGNI_INIT;
ep->bind_count = 0;
}
static void ompi_common_ugni_ep_destruct (ompi_common_ugni_endpoint_t *ep)
{
OBJ_DESTRUCT(&ep->lock);
ompi_common_ugni_endpoint_unbind (ep);
ep->dev->dev_eps[ep->ep_rem_id] = NULL;
}
OBJ_CLASS_INSTANCE(ompi_common_ugni_endpoint_t, opal_object_t,
ompi_common_ugni_ep_construct, ompi_common_ugni_ep_destruct);
NULL, ompi_common_ugni_ep_destruct);
int ompi_common_ugni_endpoint_for_proc (ompi_common_ugni_device_t *dev, ompi_proc_t *peer_proc,
ompi_common_ugni_endpoint_t **ep)
@ -82,66 +73,49 @@ void ompi_common_ugni_endpoint_return (ompi_common_ugni_endpoint_t *ep)
OBJ_RELEASE(ep);
}
int ompi_common_ugni_endpoint_bind (ompi_common_ugni_endpoint_t *ep)
int ompi_common_ugni_ep_create (ompi_common_ugni_endpoint_t *cep, gni_cq_handle_t cq, gni_ep_handle_t *ep_handle)
{
int rc;
gni_return_t grc;
assert (NULL != ep);
if (OPAL_UNLIKELY(NULL == ep)) {
if (OPAL_UNLIKELY(NULL == cep)) {
assert (0);
return OPAL_ERR_BAD_PARAM;
}
do {
if (OPAL_LIKELY(OMPI_COMMON_UGNI_BOUND <= ep->state)) {
return OMPI_SUCCESS;
}
OPAL_THREAD_LOCK(&ep->lock);
/* create a uGNI endpoint handle and bind it to the remote peer */
rc = GNI_EpCreate (ep->dev->dev_handle, ep->dev->dev_local_cq,
&ep->ep_handle);
if (GNI_RC_SUCCESS != rc) {
rc = ompi_common_rc_ugni_to_ompi (rc);
break;
}
rc = GNI_EpBind (ep->ep_handle, ep->ep_rem_addr, ep->ep_rem_id);
if (GNI_RC_SUCCESS != rc) {
rc = ompi_common_rc_ugni_to_ompi (rc);
break;
}
ep->state = OMPI_COMMON_UGNI_BOUND;
} while (0);
OPAL_THREAD_UNLOCK(&ep->lock);
return rc;
}
int ompi_common_ugni_endpoint_unbind (ompi_common_ugni_endpoint_t *ep)
{
int rc;
if (0 == ep->bind_count) {
return OMPI_SUCCESS;
/* create a uGNI endpoint handle and bind it to the remote peer */
grc = GNI_EpCreate (cep->dev->dev_handle, cq, ep_handle);
if (OPAL_UNLIKELY(GNI_RC_SUCCESS != grc)) {
return ompi_common_rc_ugni_to_ompi (grc);
}
assert (OMPI_COMMON_UGNI_BOUND == ep->state);
rc = GNI_EpUnbind (ep->ep_handle);
if (OPAL_UNLIKELY(GNI_RC_SUCCESS != rc)) {
/* should warn */
grc = GNI_EpBind (*ep_handle, cep->ep_rem_addr, cep->ep_rem_id);
if (GNI_RC_SUCCESS != grc) {
return ompi_common_rc_ugni_to_ompi (grc);
}
GNI_EpDestroy (ep->ep_handle);
if (OPAL_UNLIKELY(GNI_RC_SUCCESS != rc)) {
/* should warn */
}
ep->state = OMPI_COMMON_UGNI_INIT;
ep->bind_count--;
return OMPI_SUCCESS;
}
int ompi_common_ugni_ep_destroy (gni_ep_handle_t *ep)
{
int rc;
if (NULL == ep || 0 == *ep) {
return OMPI_SUCCESS;
}
rc = GNI_EpUnbind (*ep);
if (OPAL_UNLIKELY(GNI_RC_SUCCESS != rc)) {
/* should warn */
}
GNI_EpDestroy (*ep);
if (OPAL_UNLIKELY(GNI_RC_SUCCESS != rc)) {
/* should warn */
}
*ep = 0;
return OMPI_SUCCESS;
}

Просмотреть файл

@ -13,25 +13,12 @@
#if !defined(MPI_COMMON_UGNI_EP_H)
#define MPI_COMMON_UGNI_EP_H
enum ompi_common_ugni_endpoint_state_t {
OMPI_COMMON_UGNI_INIT = 0,
OMPI_COMMON_UGNI_BOUND,
OMPI_COMMON_UGNI_CONNECTING,
OMPI_COMMON_UGNI_CONNECTED
};
typedef enum ompi_common_ugni_endpoint_state_t ompi_common_ugni_endpoint_state_t;
struct ompi_common_ugni_device_t;
struct ompi_common_ugni_endpoint_t {
opal_object_t super;
gni_ep_handle_t ep_handle; /**< uGNI handle for this endpoint */
ompi_common_ugni_endpoint_state_t state; /**< bind/connection state */
uint32_t ep_rem_addr, ep_rem_id; /**< remote information */
struct ompi_common_ugni_device_t *dev; /**< device this endpoint is using */
opal_mutex_t lock;
int bind_count; /**< bind reference count */
void *btl_ctx; /**< btl context for this endpoint */
};
typedef struct ompi_common_ugni_endpoint_t ompi_common_ugni_endpoint_t;
@ -51,19 +38,21 @@ int ompi_common_ugni_endpoint_for_proc (struct ompi_common_ugni_device_t *dev, o
/*
* Allocate and bind a uGNI endpoint handle to the remote peer.
*
* @param[IN] ep uGNI endpoint to bind
* @param[IN] cep common endpoint
* @param[IN] cq completion queue
* @param[OUT] ep_handle uGNI endpoint handle
*/
int ompi_common_ugni_endpoint_bind (ompi_common_ugni_endpoint_t *ep);
int ompi_common_ugni_ep_create (ompi_common_ugni_endpoint_t *cep, gni_cq_handle_t cq, gni_ep_handle_t *ep_handle);
/*
* Unbind and free the uGNI endpoint handle associated with this endpoint.
* Unbind and free the uGNI endpoint handle.
*
* @param[IN] ep uGNI endpoint to unbind
* @param[IN] ep_handle uGNI endpoint handle to unbind and release
*/
int ompi_common_ugni_endpoint_unbind (ompi_common_ugni_endpoint_t *ep);
int ompi_common_ugni_ep_destroy (gni_ep_handle_t *ep_handle);
/*
* Return (and possibly free) an endpoint. The endpoint may not be used
* Return (and possibly free) a common endpoint. The endpoint may not be used
* once it is returned.
*
* @param[IN] ep uGNI endpoint to return