552c9ca5a0
WHAT: Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies. This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP. Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose. UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs. A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic. This commit was SVN r32317.
146 строки
4.0 KiB
C
146 строки
4.0 KiB
C
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
|
|
/*
|
|
* Copyright (c) 2011-2013 Los Alamos National Security, LLC. All rights
|
|
* reserved.
|
|
* Copyright (c) 2011 UT-Battelle, LLC. All rights reserved.
|
|
* $COPYRIGHT$
|
|
*
|
|
* Additional copyrights may follow
|
|
*
|
|
* $HEADER$
|
|
*/
|
|
|
|
#ifndef MCA_BTL_UGNI_ENDPOINT_H
|
|
#define MCA_BTL_UGNI_ENDPOINT_H
|
|
|
|
#include "btl_ugni.h"
|
|
|
|
enum mca_btl_ugni_endpoint_state_t {
|
|
MCA_BTL_UGNI_EP_STATE_INIT = 0,
|
|
MCA_BTL_UGNI_EP_STATE_CONNECTING,
|
|
MCA_BTL_UGNI_EP_STATE_CONNECTED
|
|
};
|
|
typedef enum mca_btl_ugni_endpoint_state_t mca_btl_ugni_endpoint_state_t;
|
|
|
|
struct mca_btl_ugni_smsg_mbox_t;
|
|
|
|
typedef struct mca_btl_base_endpoint_t {
|
|
opal_list_item_t super;
|
|
|
|
opal_proc_t *peer_proc;
|
|
|
|
opal_mutex_t lock;
|
|
mca_btl_ugni_endpoint_state_t state;
|
|
|
|
opal_common_ugni_endpoint_t *common;
|
|
|
|
mca_btl_ugni_module_t *btl;
|
|
|
|
gni_ep_handle_t smsg_ep_handle;
|
|
gni_ep_handle_t rdma_ep_handle;
|
|
|
|
mca_btl_ugni_endpoint_attr_t remote_attr;
|
|
|
|
struct mca_btl_ugni_smsg_mbox_t *mailbox;
|
|
|
|
opal_list_t frag_wait_list;
|
|
bool wait_listed;
|
|
|
|
int32_t smsg_progressing;
|
|
|
|
int index;
|
|
} mca_btl_base_endpoint_t;
|
|
|
|
typedef mca_btl_base_endpoint_t mca_btl_ugni_endpoint_t;
|
|
OBJ_CLASS_DECLARATION(mca_btl_ugni_endpoint_t);
|
|
|
|
int mca_btl_ugni_ep_connect_progress (mca_btl_ugni_endpoint_t *ep);
|
|
int mca_btl_ugni_ep_disconnect (mca_btl_ugni_endpoint_t *ep, bool send_disconnect);
|
|
|
|
static inline int mca_btl_ugni_init_ep (mca_btl_ugni_module_t *ugni_module,
|
|
mca_btl_ugni_endpoint_t **ep,
|
|
mca_btl_ugni_module_t *btl,
|
|
opal_proc_t *peer_proc) {
|
|
mca_btl_ugni_endpoint_t *endpoint;
|
|
|
|
endpoint = OBJ_NEW(mca_btl_ugni_endpoint_t);
|
|
assert (endpoint != NULL);
|
|
|
|
endpoint->smsg_progressing = 0;
|
|
endpoint->state = MCA_BTL_UGNI_EP_STATE_INIT;
|
|
|
|
endpoint->btl = btl;
|
|
endpoint->peer_proc = peer_proc;
|
|
endpoint->common = NULL;
|
|
endpoint->index = opal_pointer_array_add (&ugni_module->endpoints, endpoint);
|
|
|
|
*ep = endpoint;
|
|
|
|
return OPAL_SUCCESS;
|
|
}
|
|
|
|
static inline void mca_btl_ugni_release_ep (mca_btl_ugni_endpoint_t *ep) {
|
|
int rc;
|
|
|
|
if (ep->common) {
|
|
opal_mutex_lock (&ep->lock);
|
|
|
|
rc = mca_btl_ugni_ep_disconnect (ep, false);
|
|
if (OPAL_UNLIKELY(OPAL_SUCCESS != rc)) {
|
|
BTL_VERBOSE(("btl/ugni error disconnecting endpoint"));
|
|
}
|
|
|
|
/* TODO -- Clear space at the end of the endpoint array */
|
|
opal_pointer_array_set_item (&ep->btl->endpoints, ep->index, NULL);
|
|
|
|
opal_mutex_unlock (&ep->lock);
|
|
|
|
opal_common_ugni_endpoint_return (ep->common);
|
|
}
|
|
|
|
OBJ_RELEASE(ep);
|
|
}
|
|
|
|
static inline int mca_btl_ugni_check_endpoint_state (mca_btl_ugni_endpoint_t *ep) {
|
|
int rc;
|
|
|
|
if (OPAL_LIKELY(MCA_BTL_UGNI_EP_STATE_CONNECTED == ep->state)) {
|
|
return OPAL_SUCCESS;
|
|
}
|
|
|
|
opal_mutex_lock (&ep->lock);
|
|
|
|
switch (ep->state) {
|
|
case MCA_BTL_UGNI_EP_STATE_INIT:
|
|
rc = mca_btl_ugni_ep_connect_progress (ep);
|
|
if (OPAL_SUCCESS != rc) {
|
|
break;
|
|
}
|
|
case MCA_BTL_UGNI_EP_STATE_CONNECTING:
|
|
rc = OPAL_ERR_RESOURCE_BUSY;
|
|
break;
|
|
default:
|
|
rc = OPAL_SUCCESS;
|
|
}
|
|
|
|
opal_mutex_unlock (&ep->lock);
|
|
|
|
return rc;
|
|
}
|
|
|
|
static inline int mca_btl_ugni_wildcard_ep_post (mca_btl_ugni_module_t *ugni_module) {
|
|
gni_return_t rc;
|
|
|
|
BTL_VERBOSE(("posting wildcard datagram"));
|
|
|
|
memset (&ugni_module->wc_local_attr, 0, sizeof (ugni_module->wc_local_attr));
|
|
memset (&ugni_module->wc_remote_attr, 0, sizeof (ugni_module->wc_remote_attr));
|
|
rc = GNI_EpPostDataWId (ugni_module->wildcard_ep, &ugni_module->wc_local_attr,
|
|
sizeof (ugni_module->wc_local_attr), &ugni_module->wc_remote_attr,
|
|
sizeof (ugni_module->wc_remote_attr), MCA_BTL_UGNI_CONNECT_WILDCARD_ID);
|
|
|
|
return opal_common_rc_ugni_to_opal (rc);
|
|
}
|
|
|
|
#endif /* MCA_BTL_UGNI_ENDPOINT_H */
|