1
1
This commit was SVN r5573.
Этот коммит содержится в:
Tim Woodall 2005-05-02 16:40:25 +00:00
родитель 350d7786b4
Коммит 8c8c3eb86e
16 изменённых файлов: 749 добавлений и 1372 удалений

Просмотреть файл

@ -24,10 +24,10 @@ libmca_ptl_ib_la_SOURCES = \
ptl_ib.h \ ptl_ib.h \
ptl_ib_addr.h \ ptl_ib_addr.h \
ptl_ib_component.c \ ptl_ib_component.c \
ptl_ib_priv.c \
ptl_ib_priv.h \
ptl_ib_proc.c \ ptl_ib_proc.c \
ptl_ib_proc.h \ ptl_ib_proc.h \
ptl_ib_priv.c \
ptl_ib_priv.h \
ptl_ib_peer.c \ ptl_ib_peer.c \
ptl_ib_peer.h \ ptl_ib_peer.h \
ptl_ib_recvfrag.c \ ptl_ib_recvfrag.c \

Просмотреть файл

@ -57,93 +57,23 @@ mca_ptl_ib_module_t mca_ptl_ib_module = {
} }
}; };
/* int mca_ptl_ib_add_procs(
* 1. RDMA local buffer to remote buffer address. struct mca_ptl_base_module_t* ptl,
* 2. Generate a FIN size_t nprocs,
*/ struct ompi_proc_t **ompi_procs,
struct mca_ptl_base_peer_t** peers,
int mca_ptl_ib_put( struct mca_ptl_base_module_t* ptl, ompi_bitmap_t* reachable)
struct mca_ptl_base_peer_t* ptl_peer,
struct mca_pml_base_send_request_t* req, size_t offset,
size_t size, int flags)
{
int rc;
mca_ptl_ib_send_frag_t *send_frag, *send_frag_fin;
mca_ptl_ib_state_t *ib_state;
mca_ptl_ib_peer_conn_t *peer_conn;
void *local_addr, *remote_addr;
VAPI_rkey_t rkey;
/* RDMA the data over to the peer */
send_frag = mca_ptl_ib_alloc_send_frag(ptl, req);
if(NULL == send_frag) {
ompi_output(0, "Unable to allocate send descriptor");
return OMPI_ERROR;
}
A_PRINT("IB put to %p, rkey : %d",
req->req_peer_addr.pval,
*(VAPI_rkey_t *)(((mca_ptl_ib_send_request_t *)req)->req_buf));
ib_state = ((mca_ptl_ib_module_t *)ptl)->ib_state;
peer_conn = ((mca_ptl_ib_peer_t *)ptl_peer)->peer_conn;
local_addr = (void*) ((char*) req->req_base.req_addr + offset);
remote_addr = (void*) req->req_peer_addr.pval;
rkey = *(VAPI_rkey_t *)(((mca_ptl_ib_send_request_t *)req)->req_buf);
rc = mca_ptl_ib_rdma_write(ib_state, peer_conn,
&send_frag->ib_buf, local_addr, size, remote_addr, rkey,
(void*) send_frag);
if(rc != OMPI_SUCCESS) {
return OMPI_ERROR;
}
/* Send FIN to receiver */
#if 0
send_frag_fin = mca_ptl_ib_alloc_send_frag(ptl, req);
if(NULL == send_frag_fin) {
ompi_output(0, "Unable to allocate send descriptor");
return OMPI_ERROR;
}
rc = mca_ptl_ib_put_frag_init(send_frag_fin, ptl_peer,
req, offset, &size, flags);
if(rc != OMPI_SUCCESS) {
return rc;
}
#endif
rc = mca_ptl_ib_put_frag_init(send_frag, ptl_peer,
req, offset, &size, flags);
if(rc != OMPI_SUCCESS) {
return rc;
}
/* Update offset */
req->req_offset += size;
rc = mca_ptl_ib_peer_send(ptl_peer, send_frag);
return rc;
}
int mca_ptl_ib_add_procs(struct mca_ptl_base_module_t* base_module,
size_t nprocs, struct ompi_proc_t **ompi_procs,
struct mca_ptl_base_peer_t** peers, ompi_bitmap_t* reachable)
{ {
mca_ptl_ib_module_t* ib_ptl = (mca_ptl_ib_module_t*)ptl;
int i, rc; int i, rc;
struct ompi_proc_t* ompi_proc;
mca_ptl_ib_proc_t* module_proc;
mca_ptl_base_peer_t* module_peer;
for(i = 0; i < nprocs; i++) { for(i = 0; i < nprocs; i++) {
ompi_proc = ompi_procs[i]; struct ompi_proc_t* ompi_proc = ompi_procs[i];
module_proc = mca_ptl_ib_proc_create(ompi_proc); mca_ptl_ib_proc_t* ib_proc;
mca_ptl_base_peer_t* ib_peer;
if(NULL == module_proc) { if(NULL == (ib_proc = mca_ptl_ib_proc_create(ompi_proc))) {
return OMPI_ERR_OUT_OF_RESOURCE; return OMPI_ERR_OUT_OF_RESOURCE;
} }
@ -153,35 +83,29 @@ int mca_ptl_ib_add_procs(struct mca_ptl_base_module_t* base_module,
* don't bind this PTL instance to the proc. * don't bind this PTL instance to the proc.
*/ */
OMPI_THREAD_LOCK(&module_proc->proc_lock); OMPI_THREAD_LOCK(&ib_proc->proc_lock);
if(module_proc->proc_addr_count == module_proc->proc_peer_count) {
OMPI_THREAD_UNLOCK(&module_proc->proc_lock);
return OMPI_ERR_UNREACH;
}
/* The ptl_proc datastructure is shared by all IB PTL /* The ptl_proc datastructure is shared by all IB PTL
* instances that are trying to reach this destination. * instances that are trying to reach this destination.
* Cache the peer instance on the ptl_proc. * Cache the peer instance on the ptl_proc.
*/ */
module_peer = OBJ_NEW(mca_ptl_ib_peer_t); ib_peer = OBJ_NEW(mca_ptl_ib_peer_t);
if(NULL == ib_peer) {
if(NULL == module_peer) {
OMPI_THREAD_UNLOCK(&module_proc->proc_lock); OMPI_THREAD_UNLOCK(&module_proc->proc_lock);
return OMPI_ERR_OUT_OF_RESOURCE; return OMPI_ERR_OUT_OF_RESOURCE;
} }
module_peer->peer_module = (mca_ptl_ib_module_t*)base_module; ib_peer->peer_ptl = ib_ptl;
rc = mca_ptl_ib_proc_insert(ib_proc, ib_peer);
rc = mca_ptl_ib_proc_insert(module_proc, module_peer);
if(rc != OMPI_SUCCESS) { if(rc != OMPI_SUCCESS) {
OBJ_RELEASE(module_peer); OBJ_RELEASE(ib_peer);
OMPI_THREAD_UNLOCK(&module_proc->proc_lock); OMPI_THREAD_UNLOCK(&module_proc->proc_lock);
return rc; continue;
} }
ompi_bitmap_set_bit(reachable, i); ompi_bitmap_set_bit(reachable, i);
OMPI_THREAD_UNLOCK(&module_proc->proc_lock); OMPI_THREAD_UNLOCK(&module_proc->proc_lock);
peers[i] = module_peer; peers[i] = ib_peer;
} }
return OMPI_SUCCESS; return OMPI_SUCCESS;
@ -205,36 +129,28 @@ int mca_ptl_ib_finalize(struct mca_ptl_base_module_t* ptl)
} }
int mca_ptl_ib_request_init( struct mca_ptl_base_module_t* ptl, int mca_ptl_ib_request_init( struct mca_ptl_base_module_t* ptl,
struct mca_pml_base_send_request_t* request) struct mca_pml_base_send_request_t* request)
{ {
#if 0 mca_ptl_ib_module_t* ib_ptl = (mca_ptl_ib_module_t*)ptl;
mca_ptl_ib_send_request_t *ib_send_req; mca_ptl_ib_send_frag_t* sendfrag;
mca_ptl_ib_send_frag_t *ib_send_frag; ompi_list_item_t* item;
int rc;
A_PRINT(""); OMPI_FREE_LIST_GET(&ib_ptl->send_free, item, rc);
if(NULL == (sendfrag = (mca_ptl_ib_send_frag_t*)item)) {
ib_send_frag = mca_ptl_ib_alloc_send_frag(ptl, return rc;
request);
if(NULL == ib_send_frag) {
D_PRINT("Unable to allocate ib_send_frag");
return OMPI_ERR_OUT_OF_RESOURCE;
} else {
ib_send_req = (mca_ptl_ib_send_request_t *) request;
ib_send_req->req_frag = ib_send_frag;
memset(ib_send_req->req_buf, 7, 8);
} }
((mca_ptl_ib_send_request_t*) request)->req_frag = sendfrag;
#endif return OMPI_SUCCESS;
return OMPI_ERROR;
} }
void mca_ptl_ib_request_fini( struct mca_ptl_base_module_t* ptl, void mca_ptl_ib_request_fini( struct mca_ptl_base_module_t* ptl,
struct mca_pml_base_send_request_t* request) struct mca_pml_base_send_request_t* request)
{ {
D_PRINT(""); mca_ptl_ib_module_t* ib_ptl = (mca_ptl_ib_module_t*)ptl;
OBJ_DESTRUCT(request+1); mca_ptl_ib_send_request_t* sendreq = (mca_ptl_ib_send_request_t*)request;
OMPI_FREE_LIST_RETURN(&ib_ptl->send_free, (ompi_list_item_t*)sendreq->req_frag);
} }
/* /*
@ -251,58 +167,138 @@ int mca_ptl_ib_send( struct mca_ptl_base_module_t* ptl,
size_t size, size_t size,
int flags) int flags)
{ {
mca_ptl_ib_module_t* ib_ptl = (mca_ptl_ib_module_t*)ptl;
mca_ptl_ib_send_frag_t* sendfrag; mca_ptl_ib_send_frag_t* sendfrag;
mca_ptl_ib_send_request_t *ib_send_req; mca_ptl_base_header_t *hdr;
size_t hdr_length;
int rc = OMPI_SUCCESS; int rc = OMPI_SUCCESS;
sendfrag = mca_ptl_ib_alloc_send_frag(ptl, if(sendreq->req_cached) {
sendreq); sendfrag = ((mca_ptl_ib_send_request_t*)sendreq)->req_frag;
if(NULL == sendfrag) {
D_PRINT("Unable to allocate ib_send_frag");
return OMPI_ERR_OUT_OF_RESOURCE;
} else { } else {
ompi_list_item_t* item;
ib_send_req = (mca_ptl_ib_send_request_t *) sendreq; OMPI_FREE_LIST_GET(&ib_ptl->send_free, item, rc);
ib_send_req->req_frag = sendfrag; if(NULL == (sendfrag = (mca_ptl_ib_send_frag_t*)item)) {
memset(ib_send_req->req_buf, 7, 8); return rc;
}
#if 0
if (0 == offset) {
sendfrag = (mca_ptl_ib_send_frag_t *)
((mca_ptl_ib_send_request_t*)sendreq)->req_frag;
} else {
/* Implementation for messages > frag size */
sendfrag = mca_ptl_ib_alloc_send_frag(ptl,
sendreq);
if(NULL == sendfrag) {
ompi_output(0,"Unable to allocate send fragment");
} }
} }
#endif
rc = mca_ptl_ib_send_frag_init(sendfrag, ptl_peer, /* initialize convertor */
sendreq, offset, &size, flags); if(size > 0) {
if(rc != OMPI_SUCCESS) { ompi_convertor_t *convertor;
return rc; int rc, freeAfter;
unsigned int iov_count, max_data;
struct iovec iov;
/* first fragment (eager send) and first fragment of long
* protocol can use the convertor initialized on the request,
* remaining fragments must copy/reinit the convertor as the
* transfer could be in parallel.
*/
if( offset <= mca_ptl_ib_module.super.ptl_first_frag_size ) {
convertor = &sendreq->req_convertor;
} else {
convertor = &sendfrag->frag_send.frag_base.frag_convertor;
ompi_convertor_copy(&sendreq->req_convertor, convertor);
ompi_convertor_init_for_send( convertor,
0,
sendreq->req_base.req_datatype,
sendreq->req_base.req_count,
sendreq->req_base.req_addr,
offset,
NULL );
}
/* if data is contigous, convertor will return an offset
* into users buffer - otherwise will return an allocated buffer
* that holds the packed data
*/
if((flags & MCA_PTL_FLAGS_ACK) == 0) {
iov.iov_base = &sendfrag->ib_buf.buf[sizeof(mca_ptl_base_match_header_t)];
} else {
iov.iov_base = &sendfrag->ib_buf.buf[sizeof(mca_ptl_base_rendezvous_header_t)];
}
iov.iov_len = size;
iov_count = 1;
max_data = size;
if((rc = ompi_convertor_pack(convertor,&iov, &iov_count, &max_data, &freeAfter)) < 0) {
ompi_output(0, "Unable to pack data");
return rc;
}
/* adjust size to reflect actual number of bytes packed by convertor */
size = iov.iov_len;
sendfrag->frag_send.frag_base.frag_addr = iov.iov_base;
sendfrag->frag_send.frag_base.frag_size = iov.iov_len;
} else {
sendfrag->frag_send.frag_base.frag_addr = NULL;
sendfrag->frag_send.frag_base.frag_size = 0;
}
/* fragment state */
sendfrag->frag_send.frag_base.frag_owner = &ptl_peer->peer_ptl->super;
sendfrag->frag_send.frag_request = sendreq;
sendfrag->frag_send.frag_base.frag_peer = ptl_peer;
sendfrag->frag_progressed = 0;
/* Initialize header */
hdr = (mca_ptl_base_header_t *) &sendfrag->ib_buf.buf[0];
hdr->hdr_common.hdr_flags = flags;
hdr->hdr_match.hdr_contextid = sendreq->req_base.req_comm->c_contextid;
hdr->hdr_match.hdr_src = sendreq->req_base.req_comm->c_my_rank;
hdr->hdr_match.hdr_dst = sendreq->req_base.req_peer;
hdr->hdr_match.hdr_tag = sendreq->req_base.req_tag;
hdr->hdr_match.hdr_msg_length = sendreq->req_bytes_packed;
hdr->hdr_match.hdr_msg_seq = sendreq->req_base.req_sequence;
if((flags & MCA_PTL_FLAGS_ACK) == 0) {
hdr->hdr_common.hdr_type = MCA_PTL_HDR_TYPE_MATCH;
hdr_length = sizeof(mca_ptl_base_match_header_t);
} else {
hdr->hdr_common.hdr_type = MCA_PTL_HDR_TYPE_MATCH;
hdr->hdr_rndv.hdr_frag_length = sendfrag->frag_send.frag_base.frag_size;
hdr->hdr_rndv.hdr_src_ptr.lval = 0; /* for VALGRIND/PURIFY - REPLACE WITH MACRO */
hdr->hdr_rndv.hdr_src_ptr.pval = sendfrag;
hdr_length = sizeof(mca_ptl_base_rendezvous_header_t);
} }
/* Update the offset after actual fragment size is determined, /* Update the offset after actual fragment size is determined,
* and before attempting to send the fragment */ * and before attempting to send the fragment */
sendreq->req_offset += size; sendreq->req_offset += size;
rc = mca_ptl_ib_peer_send(ptl_peer, sendfrag); IB_SET_SEND_DESC_LEN((&sendfrag->ib_buf), (hdr_length + size));
if(OMPI_SUCCESS != (rc = mca_ptl_ib_peer_send(ptl_peer, sendfrag))) {
return rc;
}
return rc; /* if this is the entire message - signal request is complete */
if(sendreq->req_bytes_packed == size) {
ompi_request_complete( &(sendreq->req_base.req_ompi) );
}
return OMPI_SUCCESS;
}
/*
* RDMA local buffer to remote buffer address.
*/
int mca_ptl_ib_put( struct mca_ptl_base_module_t* ptl,
struct mca_ptl_base_peer_t* ptl_peer,
struct mca_pml_base_send_request_t* req, size_t offset,
size_t size, int flags)
{
return OMPI_ERR_NOT_IMPLEMENTED;
} }
static void mca_ptl_ib_start_ack(mca_ptl_base_module_t *module, /*
mca_ptl_ib_send_frag_t *send_frag, * On a match send an ack to the peer.
mca_ptl_ib_recv_frag_t *recv_frag) */
static void mca_ptl_ib_ack(
mca_ptl_ib_module_t *ib_ptl,
mca_ptl_ib_send_frag_t *send_frag,
mca_ptl_ib_recv_frag_t *recv_frag)
{ {
mca_ptl_base_header_t *hdr; mca_ptl_base_header_t *hdr;
mca_pml_base_recv_request_t *request; mca_pml_base_recv_request_t *request;
@ -312,8 +308,6 @@ static void mca_ptl_ib_start_ack(mca_ptl_base_module_t *module,
int len_to_reg, len_added = 0; int len_to_reg, len_added = 0;
void *addr_to_reg, *ack_buf; void *addr_to_reg, *ack_buf;
A_PRINT("");
/* Header starts at beginning of registered /* Header starts at beginning of registered
* buffer space */ * buffer space */
@ -353,8 +347,7 @@ static void mca_ptl_ib_start_ack(mca_ptl_base_module_t *module,
sizeof(mca_ptl_base_ack_header_t)); sizeof(mca_ptl_base_ack_header_t));
/* Prepare ACK packet with IB specific stuff */ /* Prepare ACK packet with IB specific stuff */
mca_ptl_ib_prepare_ack(((mca_ptl_ib_module_t *)module)->ib_state, mca_ptl_ib_prepare_ack(ib_ptl, addr_to_reg, len_to_reg,
addr_to_reg, len_to_reg,
ack_buf, &len_added); ack_buf, &len_added);
/* Send it right away! */ /* Send it right away! */
@ -366,12 +359,10 @@ static void mca_ptl_ib_start_ack(mca_ptl_base_module_t *module,
IB_SET_SEND_DESC_LEN(ib_buf, IB_SET_SEND_DESC_LEN(ib_buf,
(sizeof(mca_ptl_base_ack_header_t) + len_added)); (sizeof(mca_ptl_base_ack_header_t) + len_added));
mca_ptl_ib_post_send(((mca_ptl_ib_module_t *)module)->ib_state, mca_ptl_ib_post_send(ib_ptl, ib_peer, &send_frag->ib_buf, send_frag);
ib_peer->peer_conn,
&send_frag->ib_buf, send_frag);
/* fragment state */ /* fragment state */
send_frag->frag_send.frag_base.frag_owner = module; send_frag->frag_send.frag_base.frag_owner = &ib_ptl->super;
send_frag->frag_send.frag_base.frag_peer = recv_frag->super.frag_base.frag_peer; send_frag->frag_send.frag_base.frag_peer = recv_frag->super.frag_base.frag_peer;
send_frag->frag_send.frag_base.frag_addr = NULL; send_frag->frag_send.frag_base.frag_addr = NULL;
send_frag->frag_send.frag_base.frag_size = 0; send_frag->frag_send.frag_base.frag_size = 0;
@ -383,9 +374,11 @@ static void mca_ptl_ib_start_ack(mca_ptl_base_module_t *module,
* data to user buffer * data to user buffer
*/ */
void mca_ptl_ib_matched(mca_ptl_base_module_t* module, void mca_ptl_ib_matched(
mca_ptl_base_module_t* ptl,
mca_ptl_base_recv_frag_t* frag) mca_ptl_base_recv_frag_t* frag)
{ {
mca_ptl_ib_module_t* ib_ptl = (mca_ptl_ib_module_t*)ptl;
mca_pml_base_recv_request_t *request; mca_pml_base_recv_request_t *request;
mca_ptl_base_header_t *header; mca_ptl_base_header_t *header;
mca_ptl_ib_recv_frag_t *recv_frag; mca_ptl_ib_recv_frag_t *recv_frag;
@ -398,13 +391,11 @@ void mca_ptl_ib_matched(mca_ptl_base_module_t* module,
if (header->hdr_common.hdr_flags & MCA_PTL_FLAGS_ACK) { if (header->hdr_common.hdr_flags & MCA_PTL_FLAGS_ACK) {
mca_ptl_ib_send_frag_t *send_frag; mca_ptl_ib_send_frag_t *send_frag;
send_frag = mca_ptl_ib_alloc_send_frag(ib_ptl, NULL);
send_frag = mca_ptl_ib_alloc_send_frag(module, NULL);
if(NULL == send_frag) { if(NULL == send_frag) {
ompi_output(0, "Cannot get send descriptor"); ompi_output(0, "Cannot get send descriptor");
} else { } else {
mca_ptl_ib_start_ack(module, send_frag, recv_frag); mca_ptl_ib_ack(ib_ptl, send_frag, recv_frag);
} }
} }
@ -419,7 +410,7 @@ void mca_ptl_ib_matched(mca_ptl_base_module_t* module,
* unex_buffer to application buffer */ * unex_buffer to application buffer */
if ((header->hdr_common.hdr_type & MCA_PTL_HDR_TYPE_MATCH) && if ((header->hdr_common.hdr_type & MCA_PTL_HDR_TYPE_MATCH) &&
(header->hdr_rndv.hdr_frag_length > 0)) { (header->hdr_match.hdr_msg_length > 0)) {
struct iovec iov; struct iovec iov;
ompi_proc_t *proc; ompi_proc_t *proc;
unsigned int iov_count, max_data; unsigned int iov_count, max_data;
@ -429,7 +420,7 @@ void mca_ptl_ib_matched(mca_ptl_base_module_t* module,
iov.iov_len = frag->frag_base.frag_size; iov.iov_len = frag->frag_base.frag_size;
proc = ompi_comm_peer_lookup(request->req_base.req_comm, proc = ompi_comm_peer_lookup(request->req_base.req_comm,
request->req_base.req_peer); request->req_base.req_ompi.req_status.MPI_SOURCE);
ompi_convertor_copy(proc->proc_convertor, &frag->frag_base.frag_convertor); ompi_convertor_copy(proc->proc_convertor, &frag->frag_base.frag_convertor);

Просмотреть файл

@ -54,17 +54,13 @@ extern "C" {
struct mca_ptl_ib_component_t { struct mca_ptl_ib_component_t {
mca_ptl_base_component_1_0_0_t super; mca_ptl_base_component_1_0_0_t super;
/**< base PTL component */
struct mca_ptl_ib_module_t **ib_ptl_modules; uint32_t ib_num_ptls;
/**< number of hcas available to the IB component */
struct mca_ptl_ib_module_t *ib_ptls;
/**< array of available PTLs */ /**< array of available PTLs */
uint32_t ib_num_ptl_modules;
/**< number of ptl modules actually used */
uint32_t ib_max_ptl_modules;
/**< maximum number of ptls */
int ib_free_list_num; int ib_free_list_num;
/**< initial size of free lists */ /**< initial size of free lists */
@ -95,9 +91,6 @@ struct mca_ptl_ib_component_t {
ompi_mutex_t ib_lock; ompi_mutex_t ib_lock;
/**< lock for accessing module state */ /**< lock for accessing module state */
uint32_t ib_num_hcas;
/**< number of hcas available to the IB component */
int ib_mem_registry_hints_log_size; int ib_mem_registry_hints_log_size;
/**< log2 size of hints hash array used by memory registry */ /**< log2 size of hints hash array used by memory registry */
}; };
@ -110,18 +103,19 @@ extern mca_ptl_ib_component_t mca_ptl_ib_component;
* IB PTL Interface * IB PTL Interface
*/ */
struct mca_ptl_ib_module_t { struct mca_ptl_ib_module_t {
mca_ptl_base_module_t super; mca_ptl_base_module_t super; /**< base PTL interface */
/**< base PTL interface */ VAPI_hca_id_t hca_id; /**< ID of HCA */
VAPI_hca_port_t port; /**< IB port of this PTL */
VAPI_hca_hndl_t nic; /**< NIC handle */
VAPI_pd_hndl_t ptag; /**< Protection Domain tag */
VAPI_cq_hndl_t cq_hndl; /**< Completion Queue handle */
mca_ptl_ib_state_t *ib_state; EVAPI_async_handler_hndl_t async_handler;
/* IB state holds info about queue handles, HCA handles, /**< Async event handler used to detect weird/unknown events */
* protection domain etc. which are private to this module */
ompi_free_list_t send_free; mca_ptl_ib_mem_registry_t mem_registry; /**< registry of memory regions */
/**< free list of send buffer descriptors */ ompi_free_list_t send_free; /**< free list of send buffer descriptors */
ompi_list_t repost; /**< list of buffers to repost */
ompi_free_list_t recv_free;
/**< free list of recv buffer descriptors */
}; };
typedef struct mca_ptl_ib_module_t mca_ptl_ib_module_t; typedef struct mca_ptl_ib_module_t mca_ptl_ib_module_t;
@ -164,8 +158,8 @@ extern int mca_ptl_ib_component_close(void);
*/ */
extern mca_ptl_base_module_t** mca_ptl_ib_component_init( extern mca_ptl_base_module_t** mca_ptl_ib_component_init(
int *num_ptl_modules, int *num_ptl_modules,
bool *allow_multi_user_threads, bool allow_multi_user_threads,
bool *have_hidden_threads bool have_hidden_threads
); );
/** /**

Просмотреть файл

@ -16,7 +16,7 @@
* $HEADER$ * $HEADER$
*/ */
#include <hh_common.h> /* #include <hh_common.h> */
/* Open MPI includes */ /* Open MPI includes */
#include "ompi_config.h" #include "ompi_config.h"
@ -30,10 +30,10 @@
#include "mca/pml/base/pml_base_sendreq.h" #include "mca/pml/base/pml_base_sendreq.h"
#include "mca/base/mca_base_param.h" #include "mca/base/mca_base_param.h"
#include "mca/base/mca_base_module_exchange.h" #include "mca/base/mca_base_module_exchange.h"
#include "mca/errmgr/errmgr.h"
/* IB ptl includes */ /* IB ptl includes */
#include "ptl_ib.h" #include "ptl_ib.h"
#include "ptl_ib_priv.h"
mca_ptl_ib_component_t mca_ptl_ib_component = { mca_ptl_ib_component_t mca_ptl_ib_component = {
@ -101,7 +101,7 @@ static inline int mca_ptl_ib_param_register_int(
int mca_ptl_ib_component_open(void) int mca_ptl_ib_component_open(void)
{ {
/* register super component parameters */ /* register component parameters */
mca_ptl_ib_module.super.ptl_exclusivity = mca_ptl_ib_module.super.ptl_exclusivity =
mca_ptl_ib_param_register_int ("exclusivity", 0); mca_ptl_ib_param_register_int ("exclusivity", 0);
@ -120,7 +120,7 @@ int mca_ptl_ib_component_open(void)
/* register IB component parameters */ /* register IB component parameters */
mca_ptl_ib_component.ib_free_list_num = mca_ptl_ib_component.ib_free_list_num =
mca_ptl_ib_param_register_int ("free_list_num", 64); mca_ptl_ib_param_register_int ("free_list_num", 8);
mca_ptl_ib_component.ib_free_list_max = mca_ptl_ib_component.ib_free_list_max =
mca_ptl_ib_param_register_int ("free_list_max", 1024); mca_ptl_ib_param_register_int ("free_list_max", 1024);
mca_ptl_ib_component.ib_free_list_inc = mca_ptl_ib_component.ib_free_list_inc =
@ -128,8 +128,10 @@ int mca_ptl_ib_component_open(void)
mca_ptl_ib_component.ib_mem_registry_hints_log_size = mca_ptl_ib_component.ib_mem_registry_hints_log_size =
mca_ptl_ib_param_register_int ("hints_log_size", 8); mca_ptl_ib_param_register_int ("hints_log_size", 8);
/* initialize global state */
mca_ptl_ib_component.ib_num_ptls=0;
mca_ptl_ib_component.ib_ptls=NULL;
OBJ_CONSTRUCT(&mca_ptl_ib_component.ib_procs, ompi_list_t); OBJ_CONSTRUCT(&mca_ptl_ib_component.ib_procs, ompi_list_t);
OBJ_CONSTRUCT (&mca_ptl_ib_component.ib_recv_frags, ompi_free_list_t); OBJ_CONSTRUCT (&mca_ptl_ib_component.ib_recv_frags, ompi_free_list_t);
return OMPI_SUCCESS; return OMPI_SUCCESS;
@ -146,16 +148,6 @@ int mca_ptl_ib_component_close(void)
return OMPI_SUCCESS; return OMPI_SUCCESS;
} }
/*
* Register IB component addressing information. The MCA framework
* will make this available to all peers.
*/
static int mca_ptl_ib_component_send(void)
{
return OMPI_SUCCESS;
}
/* /*
* IB component initialization: * IB component initialization:
* (1) read interface list from kernel and compare against component parameters * (1) read interface list from kernel and compare against component parameters
@ -167,16 +159,47 @@ mca_ptl_base_module_t** mca_ptl_ib_component_init(int *num_ptl_modules,
bool enable_progress_threads, bool enable_progress_threads,
bool enable_mpi_threads) bool enable_mpi_threads)
{ {
mca_ptl_base_module_t **modules;
VAPI_ret_t vapi_ret; VAPI_ret_t vapi_ret;
VAPI_hca_id_t* hca_ids;
mca_ptl_base_module_t** ptls;
int i, ret; int i, ret;
mca_ptl_ib_module_t* ib_modules = NULL;
/* initialization */ /* initialization */
*num_ptl_modules = 0; *num_ptl_modules = 0;
mca_ptl_ib_component.ib_num_hcas=0;
/* Initialize Receive fragments */ /* query the list of available hcas */
vapi_ret=EVAPI_list_hcas(0, &(mca_ptl_ib_component.ib_num_ptls), NULL);
if( VAPI_EAGAIN != vapi_ret || 0 == mca_ptl_ib_component.ib_num_ptls ) {
ompi_output(0,"Warning: no IB HCAs found\n");
return NULL;
}
hca_ids = (VAPI_hca_id_t*) malloc(mca_ptl_ib_component.ib_num_ptls * sizeof(VAPI_hca_id_t));
if(NULL == hca_ids) {
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
return NULL;
}
vapi_ret=EVAPI_list_hcas(mca_ptl_ib_component.ib_num_ptls, &mca_ptl_ib_component.ib_num_ptls, hca_ids);
if( VAPI_OK != vapi_ret ) {
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
return NULL;
}
/* Allocate space for ptl modules */
mca_ptl_ib_component.ib_ptls = (mca_ptl_ib_module_t*) malloc(sizeof(mca_ptl_ib_module_t) *
mca_ptl_ib_component.ib_num_ptls);
if(NULL == mca_ptl_ib_component.ib_ptls) {
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
return NULL;
}
ptls = (struct mca_ptl_base_module_t**)
malloc(mca_ptl_ib_component.ib_num_ptls * sizeof(struct mca_ptl_ib_module_t*));
if(NULL == ptls) {
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
return NULL;
}
/* Initialize pool of receive fragments */
ompi_free_list_init (&(mca_ptl_ib_component.ib_recv_frags), ompi_free_list_init (&(mca_ptl_ib_component.ib_recv_frags),
sizeof (mca_ptl_ib_recv_frag_t), sizeof (mca_ptl_ib_recv_frag_t),
OBJ_CLASS (mca_ptl_ib_recv_frag_t), OBJ_CLASS (mca_ptl_ib_recv_frag_t),
@ -184,76 +207,18 @@ mca_ptl_base_module_t** mca_ptl_ib_component_init(int *num_ptl_modules,
mca_ptl_ib_component.ib_free_list_max, mca_ptl_ib_component.ib_free_list_max,
mca_ptl_ib_component.ib_free_list_inc, NULL); mca_ptl_ib_component.ib_free_list_inc, NULL);
/* Initialize each module */
for(i = 0; i < mca_ptl_ib_component.ib_num_ptls; i++) {
mca_ptl_ib_module_t* ib_ptl = &mca_ptl_ib_component.ib_ptls[i];
/* figure out how many HCA's are available for use - don't allocate /* Initialize the modules function pointers */
* any resrouces at this stage. */ memcpy(ib_ptl, &mca_ptl_ib_module, sizeof(mca_ptl_ib_module));
vapi_ret=EVAPI_list_hcas(0,&(mca_ptl_ib_component.ib_num_hcas),
NULL);
if( HH_EAGAIN != vapi_ret ) {
ompi_output(0, "mca_ptl_ib_component_init: "
"Unexpect return from EVAPI_list_hcas - %s\n",
VAPI_strerror(vapi_ret));
return NULL;
}
if( 0 == mca_ptl_ib_component.ib_num_hcas ) {
ompi_output(0,"Warniing :: mca_ptl_ib_component_init: "
" No IB devices found \n");
return NULL;
}
/* Number of InfiniBand PTLs is equal to /* Initialize module state */
* number of physical HCAs. Is this always the OBJ_CONSTRUCT(&ib_ptl->send_free, ompi_free_list_t);
* case, or under some conditions, there can be OBJ_CONSTRUCT(&ib_ptl->repost, ompi_list_t);
* multiple PTLs for one HCA? */
mca_ptl_ib_component.ib_num_ptl_modules =
mca_ptl_ib_component.ib_num_hcas;
/* Not sure what max_ptl_modules does */ ompi_free_list_init(&ib_ptl->send_free,
mca_ptl_ib_component.ib_max_ptl_modules =
mca_ptl_ib_component.ib_num_hcas;
/* Allocate space for number of modules available
* to this component */
ib_modules = (mca_ptl_ib_module_t*) malloc(sizeof(mca_ptl_ib_module_t) *
mca_ptl_ib_component.ib_num_ptl_modules);
if(NULL == ib_modules) {
return NULL;
}
/* Zero out the PTL struct memory region */
memset((void*)ib_modules, 0, sizeof(mca_ptl_ib_module_t) *
mca_ptl_ib_component.ib_num_ptl_modules);
/* Copy the function pointers to the IB modules */
for(i = 0; i < mca_ptl_ib_component.ib_num_ptl_modules; i++) {
memcpy((void*)&ib_modules[i],
&mca_ptl_ib_module,
sizeof(mca_ptl_ib_module));
}
/* For each module, Initialize! */
for(i = 0; i < mca_ptl_ib_component.ib_num_ptl_modules; i++) {
/* Allocate space for the state of the IB module */
ib_modules[i].ib_state = malloc(sizeof(mca_ptl_ib_state_t));
if(NULL == ib_modules[i].ib_state) {
return NULL;
}
if(mca_ptl_ib_init_module(ib_modules[i].ib_state, i)
!= OMPI_SUCCESS) {
return NULL;
}
/* Find a better place for this */
OBJ_CONSTRUCT(&(ib_modules[i].send_free), ompi_free_list_t);
A_PRINT("Free list addr : %p", &ib_modules[i].send_free);
OBJ_CONSTRUCT(&(ib_modules[i].recv_free), ompi_free_list_t);
ompi_free_list_init(&(ib_modules[i].send_free),
sizeof(mca_ptl_ib_send_frag_t), sizeof(mca_ptl_ib_send_frag_t),
OBJ_CLASS(mca_ptl_ib_send_frag_t), OBJ_CLASS(mca_ptl_ib_send_frag_t),
mca_ptl_ib_component.ib_free_list_num, mca_ptl_ib_component.ib_free_list_num,
@ -261,51 +226,27 @@ mca_ptl_base_module_t** mca_ptl_ib_component_init(int *num_ptl_modules,
mca_ptl_ib_component.ib_free_list_inc, mca_ptl_ib_component.ib_free_list_inc,
NULL); NULL);
/* Initialize the send descriptors */
if(mca_ptl_ib_register_send_frags((mca_ptl_base_module_t *) &ib_modules[i]) memcpy(ib_ptl->hca_id, hca_ids[i], sizeof(ib_ptl->hca_id));
!= OMPI_SUCCESS) { if(mca_ptl_ib_module_init(ib_ptl) != OMPI_SUCCESS) {
free(hca_ids);
return NULL; return NULL;
} }
DUMP_IB_STATE(ib_modules[i].ib_state); /* Initialize the send descriptors */
if(mca_ptl_ib_send_frag_register(ib_ptl) != OMPI_SUCCESS) {
free(hca_ids);
return NULL;
}
ptls[i] = &ib_ptl->super;
} }
/* Post OOB receives */ /* Post OOB receive to support dynamic connection setup */
mca_ptl_ib_post_oob_recv_nb(); mca_ptl_ib_post_recv();
/* Allocate list of IB ptl pointers */ *num_ptl_modules = mca_ptl_ib_component.ib_num_ptls;
mca_ptl_ib_component.ib_ptl_modules = (struct mca_ptl_ib_module_t**) free(hca_ids);
malloc(mca_ptl_ib_component.ib_num_ptl_modules * return ptls;
sizeof(struct mca_ptl_ib_module_t*));
if(NULL == mca_ptl_ib_component.ib_ptl_modules) {
return NULL;
}
/* Set the pointers for all IB ptls */
for(i = 0; i < mca_ptl_ib_component.ib_num_ptl_modules; i++) {
mca_ptl_ib_component.ib_ptl_modules[i] = &ib_modules[i];
}
if(mca_ptl_ib_component_send() != OMPI_SUCCESS) {
return NULL;
}
/* Allocate list of MCA ptl pointers */
modules = (mca_ptl_base_module_t**)
malloc(mca_ptl_ib_component.ib_num_ptl_modules *
sizeof(mca_ptl_base_module_t*));
if(NULL == modules) {
return NULL;
}
memcpy(modules, mca_ptl_ib_component.ib_ptl_modules,
mca_ptl_ib_component.ib_num_ptl_modules *
sizeof(mca_ptl_ib_module_t*));
*num_ptl_modules = mca_ptl_ib_component.ib_num_ptl_modules;
return modules;
} }
/* /*
@ -322,79 +263,89 @@ int mca_ptl_ib_component_control(int param, void* value, size_t size)
* IB component progress. * IB component progress.
*/ */
#define MCA_PTL_IB_DRAIN_NETWORK(nic, cq_hndl, comp_type, comp_addr) \
{ \
VAPI_ret_t ret; \
VAPI_wc_desc_t comp; \
\
ret = VAPI_poll_cq(nic, cq_hndl, &comp); \
if(VAPI_OK == ret) { \
if(comp.status != VAPI_SUCCESS) { \
ompi_output(0, "Got error : %s, Vendor code : %d Frag : %p", \
VAPI_wc_status_sym(comp.status), \
comp.vendor_err_syndrome, comp.id); \
*comp_type = IB_COMP_ERROR; \
*comp_addr = NULL; \
} else { \
if(VAPI_CQE_SQ_SEND_DATA == comp.opcode) { \
*comp_type = IB_COMP_SEND; \
*comp_addr = (void*) (unsigned long) comp.id; \
} else if(VAPI_CQE_RQ_SEND_DATA == comp.opcode) { \
*comp_type = IB_COMP_RECV; \
*comp_addr = (void*) (unsigned long) comp.id; \
} else if(VAPI_CQE_SQ_RDMA_WRITE == comp.opcode) { \
*comp_type = IB_COMP_RDMA_W; \
*comp_addr = (void*) (unsigned long) comp.id; \
} else { \
ompi_output(0, "VAPI_poll_cq: returned unknown opcode : %d\n", \
comp.opcode); \
*comp_type = IB_COMP_ERROR; \
*comp_addr = NULL; \
} \
} \
} else { \
/* No completions from the network */ \
*comp_type = IB_COMP_NOTHING; \
*comp_addr = NULL; \
} \
}
int mca_ptl_ib_component_progress(mca_ptl_tstamp_t tstamp) int mca_ptl_ib_component_progress(mca_ptl_tstamp_t tstamp)
{ {
int i, num_procs, num_modules; int i;
ompi_list_item_t *item; int count = 0;
mca_ptl_ib_peer_t *peer;
mca_ptl_ib_proc_t *proc;
mca_ptl_ib_module_t *module;
int comp_type = IB_COMP_NOTHING;
void* comp_addr;
num_procs = ompi_list_get_size(&(mca_ptl_ib_component.ib_procs));
/* Traverse the list of procs associated with the
* IB component */
item = ompi_list_get_first(&(mca_ptl_ib_component.ib_procs));
for(i = 0; i < num_procs;
item = ompi_list_get_next(item), i++) {
proc = (mca_ptl_ib_proc_t *) item;
/* We only have one peer per proc right now */
peer = (mca_ptl_ib_peer_t *) proc->proc_peers[0];
if(!ompi_list_is_empty(&(peer->pending_send_frags))) {
mca_ptl_ib_progress_send_frags(peer);
}
}
/* Poll for completions */ /* Poll for completions */
for(i = 0; i < mca_ptl_ib_component.ib_num_ptls; i++) {
num_modules = mca_ptl_ib_component.ib_num_ptl_modules; mca_ptl_ib_module_t* ib_ptl = &mca_ptl_ib_component.ib_ptls[i];
int comp_type = IB_COMP_NOTHING;
for(i = 0; i < num_modules; i++) { void* comp_addr;
module = mca_ptl_ib_component.ib_ptl_modules[i]; MCA_PTL_IB_DRAIN_NETWORK(ib_ptl->nic, ib_ptl->cq_hndl, &comp_type, &comp_addr);
mca_ptl_ib_drain_network(module->ib_state->nic,
module->ib_state->cq_hndl,
&comp_type, &comp_addr);
/* Handle n/w completions */ /* Handle n/w completions */
switch(comp_type) { switch(comp_type) {
case IB_COMP_SEND : case IB_COMP_SEND :
D_PRINT("Caught a send completion");
/* Process a completed send */ /* Process a completed send */
mca_ptl_ib_process_send_comp( mca_ptl_ib_send_frag_send_complete(ib_ptl, (mca_ptl_ib_send_frag_t*)comp_addr);
(mca_ptl_base_module_t *) module, count++;
comp_addr);
break; break;
case IB_COMP_RECV : case IB_COMP_RECV :
D_PRINT("Caught a recv completion");
/* Process incoming receives */ /* Process incoming receives */
mca_ptl_ib_process_recv((mca_ptl_base_module_t *)module, mca_ptl_ib_process_recv(ib_ptl, comp_addr);
comp_addr);
/* Re post recv buffers */ /* Re post recv buffers */
mca_ptl_ib_buffer_repost(module->ib_state->nic, if(ompi_list_get_size(&ib_ptl->repost) <= 1) {
comp_addr); ompi_list_append(&ib_ptl->repost, (ompi_list_item_t*)comp_addr);
} else {
ompi_list_item_t* item;
while(NULL != (item = ompi_list_remove_first(&ib_ptl->repost))) {
mca_ptl_ib_buffer_repost(ib_ptl->nic, item);
}
mca_ptl_ib_buffer_repost(ib_ptl->nic, comp_addr);
}
count++;
break; break;
case IB_COMP_RDMA_W : case IB_COMP_RDMA_W :
mca_ptl_ib_process_rdma_w_comp( ompi_output(0, "%s:%d RDMA not implemented\n", __FILE__,__LINE__);
(mca_ptl_base_module_t *) module, count++;
comp_addr);
break; break;
case IB_COMP_NOTHING: case IB_COMP_NOTHING:
break; break;
default: default:
@ -402,6 +353,6 @@ int mca_ptl_ib_component_progress(mca_ptl_tstamp_t tstamp)
break; break;
} }
} }
return count;
return OMPI_SUCCESS;
} }

Просмотреть файл

@ -53,7 +53,7 @@ static void mca_ptl_ib_mem_registry_construct(ompi_object_t *object)
registry->hints[i].pval = (void *)NULL; registry->hints[i].pval = (void *)NULL;
} }
registry->ib_state = NULL; registry->ib_ptl = NULL;
registry->evictable = NULL; registry->evictable = NULL;
return; return;
@ -177,7 +177,7 @@ mca_ptl_ib_mem_registry_info_t *mca_ptl_ib_mem_registry_register(
memcpy(&(info->request),mr,sizeof(VAPI_mr_t)); memcpy(&(info->request),mr,sizeof(VAPI_mr_t));
info->ref_cnt = 1; info->ref_cnt = 1;
do { do {
vapi_result = VAPI_register_mr(registry->ib_state->nic, mr, vapi_result = VAPI_register_mr(registry->ib_ptl->nic, mr,
&(info->hndl), &(info->reply)); &(info->hndl), &(info->reply));
if (VAPI_OK != vapi_result) { if (VAPI_OK != vapi_result) {
if (VAPI_EAGAIN == vapi_result) { if (VAPI_EAGAIN == vapi_result) {
@ -214,7 +214,7 @@ mca_ptl_ib_mem_registry_info_t *mca_ptl_ib_mem_registry_register(
} }
mca_ptl_ib_mem_registry_info_t *mca_ptl_ib_register_mem_with_registry( mca_ptl_ib_mem_registry_info_t *mca_ptl_ib_register_mem_with_registry(
mca_ptl_ib_state_t *ib_state, mca_ptl_ib_module_t *ib_module,
void *addr, size_t len) void *addr, size_t len)
{ {
mca_ptl_ib_mem_registry_info_t *info; mca_ptl_ib_mem_registry_info_t *info;
@ -223,17 +223,17 @@ mca_ptl_ib_mem_registry_info_t *mca_ptl_ib_register_mem_with_registry(
mr.acl = VAPI_EN_LOCAL_WRITE | VAPI_EN_REMOTE_WRITE; mr.acl = VAPI_EN_LOCAL_WRITE | VAPI_EN_REMOTE_WRITE;
mr.l_key = 0; mr.l_key = 0;
mr.r_key = 0; mr.r_key = 0;
mr.pd_hndl = ib_state->ptag; mr.pd_hndl = ib_module->ptag;
mr.size = len; mr.size = len;
mr.start = (VAPI_virt_addr_t) (MT_virt_addr_t) addr; mr.start = (VAPI_virt_addr_t) (MT_virt_addr_t) addr;
mr.type = VAPI_MR; mr.type = VAPI_MR;
info = mca_ptl_ib_mem_registry_register(&(ib_state->mem_registry),&mr); info = mca_ptl_ib_mem_registry_register(&(ib_module->mem_registry),&mr);
return info; return info;
} }
int mca_ptl_ib_deregister_mem_with_registry( int mca_ptl_ib_deregister_mem_with_registry(
mca_ptl_ib_state_t *ib_state, mca_ptl_ib_module_t *ib_module,
void *addr, size_t len) void *addr, size_t len)
{ {
VAPI_mr_t mr; VAPI_mr_t mr;
@ -242,12 +242,12 @@ int mca_ptl_ib_deregister_mem_with_registry(
mr.acl = VAPI_EN_LOCAL_WRITE | VAPI_EN_REMOTE_WRITE; mr.acl = VAPI_EN_LOCAL_WRITE | VAPI_EN_REMOTE_WRITE;
mr.l_key = 0; mr.l_key = 0;
mr.r_key = 0; mr.r_key = 0;
mr.pd_hndl = ib_state->ptag; mr.pd_hndl = ib_module->ptag;
mr.size = len; mr.size = len;
mr.start = (VAPI_virt_addr_t) (MT_virt_addr_t) addr; mr.start = (VAPI_virt_addr_t) (MT_virt_addr_t) addr;
mr.type = VAPI_MR; mr.type = VAPI_MR;
rc = mca_ptl_ib_mem_registry_deregister(&(ib_state->mem_registry),&mr); rc = mca_ptl_ib_mem_registry_deregister(&(ib_module->mem_registry),&mr);
return rc; return rc;
} }
@ -268,7 +268,7 @@ static int mca_ptl_ib_mem_registry_real_deregister(
/* delete the info object from the red/black tree */ /* delete the info object from the red/black tree */
ompi_rb_tree_delete(&(registry->rb_tree), &(info->reply)); ompi_rb_tree_delete(&(registry->rb_tree), &(info->reply));
/* do the real deregistration */ /* do the real deregistration */
vapi_result = VAPI_deregister_mr(registry->ib_state->nic, info->hndl); vapi_result = VAPI_deregister_mr(registry->ib_ptl->nic, info->hndl);
/* return the info object to the free list */ /* return the info object to the free list */
item = (ompi_list_item_t *)info; item = (ompi_list_item_t *)info;
OMPI_FREE_LIST_RETURN(&(registry->info_free_list), item); OMPI_FREE_LIST_RETURN(&(registry->info_free_list), item);
@ -300,10 +300,12 @@ int mca_ptl_ib_mem_registry_deregister(
return OMPI_SUCCESS; return OMPI_SUCCESS;
} }
void mca_ptl_ib_mem_registry_init(
mca_ptl_ib_mem_registry_t *registry, int mca_ptl_ib_mem_registry_init(
mca_ptl_ib_state_t *ib_state) mca_ptl_ib_mem_registry_t *registry,
struct mca_ptl_ib_module_t *ib_ptl)
{ {
registry->ib_state = ib_state; registry->ib_ptl = ib_ptl;
return; return OMPI_SUCCESS;
} }

Просмотреть файл

@ -40,6 +40,7 @@ extern "C" {
#include <vapi.h> #include <vapi.h>
#include <vapi_common.h> #include <vapi_common.h>
struct mca_ptl_ib_module_t;
typedef struct mca_ptl_ib_mem_registry_info_t mca_ptl_ib_mem_registry_info_t; typedef struct mca_ptl_ib_mem_registry_info_t mca_ptl_ib_mem_registry_info_t;
struct mca_ptl_ib_mem_registry_info_t { struct mca_ptl_ib_mem_registry_info_t {
@ -60,7 +61,7 @@ struct mca_ptl_ib_mem_registry_t {
ompi_free_list_t info_free_list; ompi_free_list_t info_free_list;
ompi_ptr_t *hints; ompi_ptr_t *hints;
mca_ptl_ib_mem_registry_info_t *evictable; mca_ptl_ib_mem_registry_info_t *evictable;
struct mca_ptl_ib_state_t *ib_state; struct mca_ptl_ib_module_t *ib_ptl;
int hints_log_size; int hints_log_size;
int hints_size; int hints_size;
}; };
@ -135,20 +136,20 @@ mca_ptl_ib_mem_registry_info_t *mca_ptl_ib_mem_registry_register(
VAPI_mr_t *mr); VAPI_mr_t *mr);
mca_ptl_ib_mem_registry_info_t *mca_ptl_ib_register_mem_with_registry( mca_ptl_ib_mem_registry_info_t *mca_ptl_ib_register_mem_with_registry(
struct mca_ptl_ib_state_t *ib_state, struct mca_ptl_ib_module_t *ib_ptl,
void *addr, size_t len); void *addr, size_t len);
int mca_ptl_ib_deregister_mem_with_registry( int mca_ptl_ib_deregister_mem_with_registry(
struct mca_ptl_ib_state_t *ib_state, struct mca_ptl_ib_module_t *ib_ptl,
void *addr, size_t len); void *addr, size_t len);
int mca_ptl_ib_mem_registry_deregister( int mca_ptl_ib_mem_registry_deregister(
mca_ptl_ib_mem_registry_t *registry, mca_ptl_ib_mem_registry_t *registry,
VAPI_mr_t *mr); VAPI_mr_t *mr);
void mca_ptl_ib_mem_registry_init( int mca_ptl_ib_mem_registry_init(
mca_ptl_ib_mem_registry_t *registry, mca_ptl_ib_mem_registry_t* registry,
struct mca_ptl_ib_state_t *ib_state); struct mca_ptl_ib_module_t *ib_ptl);
#if defined(c_plusplus) || defined(__cplusplus) #if defined(c_plusplus) || defined(__cplusplus)
} }

Просмотреть файл

@ -24,80 +24,38 @@
#include "mca/pml/base/pml_base_sendreq.h" #include "mca/pml/base/pml_base_sendreq.h"
#include "mca/ns/base/base.h" #include "mca/ns/base/base.h"
#include "mca/oob/base/base.h" #include "mca/oob/base/base.h"
#include "mca/rml/rml.h"
#include "mca/errmgr/errmgr.h"
#include "dps/dps.h"
#include "ptl_ib.h" #include "ptl_ib.h"
#include "ptl_ib_addr.h" #include "ptl_ib_addr.h"
#include "ptl_ib_peer.h" #include "ptl_ib_peer.h"
#include "ptl_ib_proc.h" #include "ptl_ib_proc.h"
#include "ptl_ib_priv.h"
#include "ptl_ib_sendfrag.h" #include "ptl_ib_sendfrag.h"
static void mca_ptl_ib_peer_construct(mca_ptl_base_peer_t* module_peer); static void mca_ptl_ib_peer_construct(mca_ptl_base_peer_t* peer);
static void mca_ptl_ib_peer_destruct(mca_ptl_base_peer_t* module_peer); static void mca_ptl_ib_peer_destruct(mca_ptl_base_peer_t* peer);
OBJ_CLASS_INSTANCE(mca_ptl_ib_peer_t, OBJ_CLASS_INSTANCE(mca_ptl_ib_peer_t,
ompi_list_item_t, mca_ptl_ib_peer_construct, ompi_list_item_t, mca_ptl_ib_peer_construct,
mca_ptl_ib_peer_destruct); mca_ptl_ib_peer_destruct);
/*
* Callback function for OOB send completion.
* Not much to do over here right now ...
*
*/
static void mca_ptl_ib_peer_connect_send_callback(int status,
ompi_process_name_t* peer, ompi_buffer_t buffer,
int tag, void* cbdata)
{
D_PRINT("OOB Send to %d complete", peer->vpid);
}
/*
* Wrapper around mca_oob_send_packed_nb
*
* Post a non-blocking OOB send request to peer with
* pre-allocated user buffer
*
*/
static int mca_ptl_ib_post_oob_send_nb(ompi_process_name_t *name,
void* user_buf, int len)
{
int rc;
ompi_buffer_t buffer;
rc = ompi_buffer_init_preallocated(&buffer, user_buf,
len);
if(rc != OMPI_SUCCESS) {
return rc;
}
rc = mca_oob_send_packed_nb(name, buffer,
131313, 0,
(mca_oob_callback_packed_fn_t)mca_ptl_ib_peer_connect_send_callback,
NULL);
if(rc != OMPI_SUCCESS) {
return rc;
}
return rc;
}
/* /*
* Initialize state of the peer instance. * Initialize state of the peer instance.
* *
*/ */
static void mca_ptl_ib_peer_construct(mca_ptl_base_peer_t* module_peer) static void mca_ptl_ib_peer_construct(mca_ptl_base_peer_t* peer)
{ {
module_peer->peer_module = 0; peer->peer_ptl = 0;
module_peer->peer_proc = 0; peer->peer_proc = 0;
module_peer->peer_ts = 0.0; peer->peer_tstamp = 0.0;
module_peer->peer_state = MCA_PTL_IB_CLOSED; peer->peer_state = MCA_PTL_IB_CLOSED;
module_peer->peer_retries = 0; peer->peer_retries = 0;
OBJ_CONSTRUCT(&module_peer->peer_send_lock, ompi_mutex_t); OBJ_CONSTRUCT(&peer->peer_send_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&module_peer->peer_recv_lock, ompi_mutex_t); OBJ_CONSTRUCT(&peer->peer_recv_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&module_peer->pending_send_frags, ompi_list_t); OBJ_CONSTRUCT(&peer->pending_send_frags, ompi_list_t);
} }
/* /*
@ -105,73 +63,50 @@ static void mca_ptl_ib_peer_construct(mca_ptl_base_peer_t* module_peer)
* *
*/ */
static void mca_ptl_ib_peer_destruct(mca_ptl_base_peer_t* module_peer) static void mca_ptl_ib_peer_destruct(mca_ptl_base_peer_t* peer)
{ {
} }
/*
* Allocate peer connection structures
*
*/
static int mca_ptl_ib_alloc_peer_conn(mca_ptl_base_peer_t* peer)
{
/* Allocate space for peer connection */
peer->peer_conn = (mca_ptl_ib_peer_conn_t *)
malloc(sizeof(mca_ptl_ib_peer_conn_t));
if(NULL == peer->peer_conn) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
return OMPI_SUCCESS;
}
/* /*
* Send connection information to remote peer using OOB * Send connection information to remote peer using OOB
* *
*/ */
static int mca_ptl_ib_peer_send_conn_info(mca_ptl_base_peer_t* peer) static void mca_ptl_ib_peer_send_cb(
int status,
orte_process_name_t* peer,
orte_buffer_t* buffer,
orte_rml_tag_t tag,
void* cbdata)
{ {
OBJ_RELEASE(buffer);
}
static int mca_ptl_ib_peer_send_connect_req(mca_ptl_base_peer_t* peer)
{
orte_buffer_t* buffer = OBJ_NEW(orte_buffer_t);
int rc; int rc;
ompi_process_name_t *name; if(NULL == buffer) {
char* sendbuf; ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
return ORTE_ERR_OUT_OF_RESOURCE;
name = &peer->peer_proc->proc_guid;
sendbuf = (char*) malloc(sizeof(char)*50);
if(NULL == sendbuf) {
return OMPI_ERR_OUT_OF_RESOURCE;
} }
/* Zero out the send buffer */ /* pack the info in the send buffer */
memset(sendbuf, 0, 50); rc = orte_dps.pack(buffer, &peer->lcl_qp_prop.qp_num, 1, ORTE_UINT32);
if(rc != ORTE_SUCCESS) {
/* Copy the info in the send buffer */ ORTE_ERROR_LOG(rc);
/* Format:
*
* <QP> <LID>
* Ofcourse without the <'s and >'s moron!
* Size of each field is limited to maximum
* 8 characters. This should be enough for all
* platforms, and is internal information
*/
sprintf(sendbuf, "%08d %08d",
peer->peer_conn->lres->qp_prop.qp_num,
peer->peer_module->ib_state->port.lid);
/* Send it off */
rc = mca_ptl_ib_post_oob_send_nb(name,
(void*)sendbuf, 50);
if(rc != OMPI_SUCCESS) {
return rc; return rc;
} }
rc = orte_dps.pack(buffer, &peer->peer_ptl->port.lid, 1, ORTE_UINT32);
D_PRINT("Sent buffer : %s", sendbuf); /* send to peer */
rc = orte_rml.send_buffer_nb(&peer->peer_proc->proc_guid, buffer, ORTE_RML_TAG_DYNAMIC-1, 0,
mca_ptl_ib_peer_send_cb, NULL);
if(rc < 0) {
ORTE_ERROR_LOG(rc);
return rc;
}
return OMPI_SUCCESS; return OMPI_SUCCESS;
} }
@ -180,45 +115,29 @@ static int mca_ptl_ib_peer_send_conn_info(mca_ptl_base_peer_t* peer)
* *
*/ */
static void mca_ptl_ib_peer_send_connect_ack(mca_ptl_base_peer_t* peer) static int mca_ptl_ib_peer_send_connect_ack(mca_ptl_base_peer_t* peer)
{ {
orte_buffer_t* buffer = OBJ_NEW(orte_buffer_t);
int rc; int rc;
ompi_process_name_t *name; uint32_t zero = 0;
char* sendbuf;
int zero = 0;
name = &peer->peer_proc->proc_guid; /* pack the info in the send buffer */
if(ORTE_SUCCESS != (rc = orte_dps.pack(buffer, &zero, 1, ORTE_UINT32))) {
sendbuf = (char*) malloc(sizeof(char)*50); ORTE_ERROR_LOG(rc);
return rc;
if(NULL == sendbuf) { }
ompi_output(0, "Out of resources"); if(ORTE_SUCCESS != (rc = orte_dps.pack(buffer, &zero, 1, ORTE_UINT32))) {
ORTE_ERROR_LOG(rc);
return rc;
} }
/* Zero out the send buffer */ /* send to peer */
memset(sendbuf, 0, 50); rc = orte_rml.send_buffer_nb(&peer->peer_proc->proc_guid, buffer, ORTE_RML_TAG_DYNAMIC-1, 0,
mca_ptl_ib_peer_send_cb, NULL);
/* Copy the info in the send buffer */ if(rc < 0) {
ORTE_ERROR_LOG(rc);
/* Format: return rc;
*
* <QP> <LID>
* Ofcourse without the <'s and >'s moron!
* Size of each field is limited to maximum
* 8 characters. This should be enough for all
* platforms, and is internal information
*/
sprintf(sendbuf, "%08d %08d", zero, zero);
/* Send it off */
rc = mca_ptl_ib_post_oob_send_nb(name,
(void*)sendbuf, 50);
if(rc != OMPI_SUCCESS) {
ompi_output(0, "Error in sending connect ack!");
} }
D_PRINT("Sent buffer : %s", sendbuf);
} }
/* /*
@ -229,24 +148,31 @@ static void mca_ptl_ib_peer_send_connect_ack(mca_ptl_base_peer_t* peer)
* setup. * setup.
* *
*/ */
static void mca_ptl_ib_peer_set_remote_info(mca_ptl_base_peer_t* peer, static int mca_ptl_ib_peer_set_remote_info(mca_ptl_base_peer_t* peer, orte_buffer_t* buffer)
void* baseptr, size_t size)
{ {
char tempbuf[9]; int rc;
size_t cnt = 1;
memset(tempbuf, 0, 9); rc = orte_dps.unpack(buffer, &peer->rem_qp_num, &cnt, ORTE_UINT32);
strncpy(tempbuf, (char*)baseptr, 8); if(ORTE_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
peer->peer_conn->rres->qp_num = atoi(tempbuf); return rc;
}
memset(tempbuf, 0, 9); rc = orte_dps.unpack(buffer, &peer->rem_lid, &cnt, ORTE_UINT32);
strncpy(tempbuf, (char*)baseptr + 9*sizeof(char), 8); if(ORTE_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
peer->peer_conn->rres->lid = atoi(tempbuf); return rc;
}
D_PRINT("Received QP num = %d, LID = %d", D_PRINT("Received QP num = %d, LID = %d",
peer->peer_conn->rres->qp_num, peer->rem_qp_num,
peer->peer_conn->rres->lid); peer->rem_lid);
return ORTE_SUCCESS;
}
static int mca_ptl_ib_peer_init(
mca_ptl_ib_peer_t *peer)
{
return OMPI_SUCCESS;
} }
/* /*
@ -259,72 +185,72 @@ static void mca_ptl_ib_peer_set_remote_info(mca_ptl_base_peer_t* peer,
static int mca_ptl_ib_peer_start_connect(mca_ptl_base_peer_t* peer) static int mca_ptl_ib_peer_start_connect(mca_ptl_base_peer_t* peer)
{ {
mca_ptl_ib_module_t* ib_ptl = peer->peer_ptl;
int rc; int rc;
/* Allocate peer connection structures */ /* Create the Queue Pair */
rc = mca_ptl_ib_alloc_peer_conn(peer); if(OMPI_SUCCESS != (rc = mca_ptl_ib_create_qp(ib_ptl->nic,
if(rc != OMPI_SUCCESS) { ib_ptl->ptag,
return rc; ib_ptl->cq_hndl,
} ib_ptl->cq_hndl,
&peer->lcl_qp_hndl,
/* Initialize the peer */ &peer->lcl_qp_prop,
rc = mca_ptl_ib_init_peer(peer->peer_module->ib_state, VAPI_TS_RC))) {
peer->peer_conn); ompi_output(0, "[%d,%d,%d] %s:%d errcode %d\n",
if(rc != OMPI_SUCCESS) { ORTE_NAME_ARGS(orte_process_info.my_name), __FILE__,__LINE__,rc);
return rc; return rc;
} }
/* Send connection info over to remote peer */ /* Send connection info over to remote peer */
rc = mca_ptl_ib_peer_send_conn_info(peer); peer->peer_state = MCA_PTL_IB_CONNECTING;
if(rc != OMPI_SUCCESS) { if(OMPI_SUCCESS != (rc = mca_ptl_ib_peer_send_connect_req(peer))) {
ompi_output(0, "[%d,%d,%d] %s:%d errcode %d\n",
ORTE_NAME_ARGS(orte_process_info.my_name), __FILE__,__LINE__,rc);
return rc; return rc;
} }
return OMPI_SUCCESS;
/* Update status of peer to as connecting */
peer->peer_state = MCA_PTL_IB_CONNECTING;
return rc;
} }
/* /*
* Reply to a `start - connect' message * Reply to a `start - connect' message
* *
*/ */
static int mca_ptl_ib_peer_reply_start_connect(mca_ptl_ib_peer_t *peer, static int mca_ptl_ib_peer_reply_start_connect(mca_ptl_ib_peer_t *peer, orte_buffer_t* buffer)
void* baseptr, size_t size)
{ {
mca_ptl_ib_module_t* ib_ptl = peer->peer_ptl;
int rc; int rc;
/* Allocate peer connection structures */ /* Create the Queue Pair */
rc = mca_ptl_ib_alloc_peer_conn(peer); if(OMPI_SUCCESS != (rc = mca_ptl_ib_create_qp(ib_ptl->nic,
if(rc != OMPI_SUCCESS) { ib_ptl->ptag,
return rc; ib_ptl->cq_hndl,
} ib_ptl->cq_hndl,
&peer->lcl_qp_hndl,
/* Initialize the peer */ &peer->lcl_qp_prop,
rc = mca_ptl_ib_init_peer(peer->peer_module->ib_state, VAPI_TS_RC))) {
peer->peer_conn); ompi_output(0, "[%d,%d,%d] %s:%d errcode %d\n",
if(rc != OMPI_SUCCESS) { ORTE_NAME_ARGS(orte_process_info.my_name), __FILE__,__LINE__,rc);
return rc; return rc;
} }
/* Set the remote side info */ /* Set the remote side info */
mca_ptl_ib_peer_set_remote_info(peer, baseptr, size); mca_ptl_ib_peer_set_remote_info(peer, buffer);
/* Connect to peer */ /* Connect to peer */
rc = mca_ptl_ib_peer_connect(peer->peer_module->ib_state, rc = mca_ptl_ib_peer_connect(peer);
peer->peer_conn);
if(rc != OMPI_SUCCESS) { if(rc != OMPI_SUCCESS) {
ompi_output(0, "[%d,%d,%d] %s:%d errcode %d\n",
ORTE_NAME_ARGS(orte_process_info.my_name), __FILE__,__LINE__,rc);
return rc; return rc;
} }
/* Send connection info over to remote peer */ /* Send connection info over to remote peer */
rc = mca_ptl_ib_peer_send_conn_info(peer); if(OMPI_SUCCESS != (rc = mca_ptl_ib_peer_send_connect_req(peer))) {
if(rc != OMPI_SUCCESS) { ompi_output(0, "[%d,%d,%d] %s:%d errcode %d\n",
ORTE_NAME_ARGS(orte_process_info.my_name), __FILE__,__LINE__,rc);
return rc; return rc;
} }
return OMPI_SUCCESS;
return rc;
} }
/* /*
@ -334,8 +260,7 @@ static int mca_ptl_ib_peer_reply_start_connect(mca_ptl_ib_peer_t *peer,
static void mca_ptl_ib_peer_connected(mca_ptl_ib_peer_t *peer) static void mca_ptl_ib_peer_connected(mca_ptl_ib_peer_t *peer)
{ {
peer->peer_state = MCA_PTL_IB_CONNECTED; peer->peer_state = MCA_PTL_IB_CONNECTED;
mca_ptl_ib_progress_send_frags(peer);
DUMP_PEER(peer);
} }
/* /*
@ -347,22 +272,17 @@ static void mca_ptl_ib_peer_connected(mca_ptl_ib_peer_t *peer)
* *
*/ */
static void mca_ptl_ib_peer_connect_recv_callback(int status, static void mca_ptl_ib_peer_recv(
ompi_process_name_t* peer, ompi_buffer_t buffer, int status,
int tag, void* cbdata) orte_process_name_t* peer,
orte_buffer_t* buffer,
orte_rml_tag_t tag,
void* cbdata)
{ {
size_t size;
void *baseptr, *dataptr, *fromptr;
mca_ptl_ib_proc_t *ib_proc; mca_ptl_ib_proc_t *ib_proc;
mca_ptl_ib_peer_t *ib_peer; mca_ptl_ib_peer_t *ib_peer;
int peer_state; int peer_state;
int rc;
ompi_buffer_size(buffer, &size);
ompi_buffer_get_ptrs(buffer, &baseptr,
&dataptr, &fromptr);
D_PRINT("Size recv: %d, Data: %s", size, baseptr);
for(ib_proc = (mca_ptl_ib_proc_t*) for(ib_proc = (mca_ptl_ib_proc_t*)
ompi_list_get_first(&mca_ptl_ib_component.ib_procs); ompi_list_get_first(&mca_ptl_ib_component.ib_procs);
@ -389,44 +309,23 @@ static void mca_ptl_ib_peer_connect_recv_callback(int status,
* status of this connection to CONNECTING, * status of this connection to CONNECTING,
* and then reply with our QP information */ * and then reply with our QP information */
#if 0 if(OMPI_SUCCESS != (rc = mca_ptl_ib_peer_reply_start_connect(ib_peer, buffer))) {
D_PRINT("Start Connect %d", ompi_output(0, "[%d,%d,%d] %s:%d errcode %d\n",
ib_proc->proc_guid.vpid); ORTE_NAME_ARGS(orte_process_info.my_name), __FILE__,__LINE__,rc);
#endif break;
ompi_output(0, "Start Connect %d",
ib_proc->proc_guid.vpid);
if(mca_ptl_ib_peer_reply_start_connect(ib_peer,
baseptr, size)
!= OMPI_SUCCESS) {
D_PRINT("Connect Error");
} }
/* Setup state as connected */ /* Setup state as connected */
ib_peer->peer_state = MCA_PTL_IB_CONNECT_ACK; ib_peer->peer_state = MCA_PTL_IB_CONNECT_ACK;
break; break;
case MCA_PTL_IB_CONNECTING : case MCA_PTL_IB_CONNECTING :
/* We are already connecting with this peer,
* this means that we have initiated OOB sends
* with this peer, and the peer is replying.
* No need to send him any more stuff */
#if 0 mca_ptl_ib_peer_set_remote_info(ib_peer, buffer);
D_PRINT("Connect reply %d", if(OMPI_SUCCESS != (rc = mca_ptl_ib_peer_connect(ib_peer))) {
ib_proc->proc_guid.vpid); ompi_output(0, "[%d,%d,%d] %s:%d errcode %d\n",
#endif ORTE_NAME_ARGS(orte_process_info.my_name), __FILE__,__LINE__,rc);
ompi_output(0, "Connect reply %d", break;
ib_proc->proc_guid.vpid);
mca_ptl_ib_peer_set_remote_info(ib_peer,
baseptr, size);
if(mca_ptl_ib_peer_connect(ib_peer->peer_module->ib_state,
ib_peer->peer_conn)
!= OMPI_SUCCESS) {
D_PRINT("Connect Error");
} }
/* Setup state as connected */ /* Setup state as connected */
@ -434,7 +333,6 @@ static void mca_ptl_ib_peer_connect_recv_callback(int status,
/* Send him an ack */ /* Send him an ack */
mca_ptl_ib_peer_send_connect_ack(ib_peer); mca_ptl_ib_peer_send_connect_ack(ib_peer);
break; break;
case MCA_PTL_IB_CONNECT_ACK: case MCA_PTL_IB_CONNECT_ACK:
@ -451,22 +349,23 @@ static void mca_ptl_ib_peer_connect_recv_callback(int status,
break; break;
} }
} }
/* Okay, now that we are done receiving, /* Okay, now that we are done receiving,
* re-post the buffer */ * re-post the buffer */
mca_ptl_ib_post_oob_recv_nb(); mca_ptl_ib_post_recv();
} }
void mca_ptl_ib_post_oob_recv_nb() void mca_ptl_ib_post_recv()
{ {
D_PRINT(""); D_PRINT("");
mca_oob_recv_packed_nb(MCA_OOB_NAME_ANY, orte_rml.recv_buffer_nb(
131313, 0, ORTE_RML_NAME_ANY,
(mca_oob_callback_packed_fn_t)mca_ptl_ib_peer_connect_recv_callback, ORTE_RML_TAG_DYNAMIC-1,
NULL); 0,
mca_ptl_ib_peer_recv,
NULL);
} }
@ -521,20 +420,22 @@ int mca_ptl_ib_peer_send(mca_ptl_base_peer_t* peer,
break; break;
case MCA_PTL_IB_CONNECTED: case MCA_PTL_IB_CONNECTED:
{
/* Send the frag off */ mca_ptl_ib_module_t* ib_ptl = peer->peer_ptl;
ompi_list_item_t* item;
A_PRINT("Send to : %d, len : %d, frag : %p", A_PRINT("Send to : %d, len : %d, frag : %p",
peer->peer_proc->proc_guid.vpid, peer->peer_proc->proc_guid.vpid,
frag->ib_buf.desc.sg_entry.len, frag->ib_buf.desc.sg_entry.len,
frag); frag);
rc = mca_ptl_ib_post_send(peer->peer_module->ib_state, rc = mca_ptl_ib_post_send(peer->peer_ptl, peer,
peer->peer_conn,
&frag->ib_buf, (void*) frag); &frag->ib_buf, (void*) frag);
while(NULL != (item = ompi_list_remove_first(&ib_ptl->repost))) {
mca_ptl_ib_buffer_repost(ib_ptl->nic, item);
}
break; break;
}
default: default:
rc = OMPI_ERR_UNREACH; rc = OMPI_ERR_UNREACH;
} }
@ -561,15 +462,74 @@ void mca_ptl_ib_progress_send_frags(mca_ptl_ib_peer_t* peer)
while(!ompi_list_is_empty(&(peer->pending_send_frags))) { while(!ompi_list_is_empty(&(peer->pending_send_frags))) {
frag_item = ompi_list_remove_first(&(peer->pending_send_frags)); frag_item = ompi_list_remove_first(&(peer->pending_send_frags));
sendfrag = (mca_ptl_ib_send_frag_t *) frag_item; sendfrag = (mca_ptl_ib_send_frag_t *) frag_item;
/* We need to post this one */ /* We need to post this one */
if(mca_ptl_ib_post_send(peer->peer_module->ib_state, if(mca_ptl_ib_post_send(peer->peer_ptl, peer, &sendfrag->ib_buf,
peer->peer_conn, &sendfrag->ib_buf,
(void*) sendfrag) (void*) sendfrag)
!= OMPI_SUCCESS) { != OMPI_SUCCESS) {
ompi_output(0, "Error in posting send"); ompi_output(0, "Error in posting send");
} }
} }
} }
/*
* Complete connection to peer.
*/
int mca_ptl_ib_peer_connect(
mca_ptl_ib_peer_t *peer)
{
int rc, i;
VAPI_ret_t ret;
ib_buffer_t *ib_buf_ptr;
mca_ptl_ib_module_t *ib_ptl = peer->peer_ptl;
/* Establish Reliable Connection */
rc = mca_ptl_ib_qp_init(ib_ptl->nic,
peer->lcl_qp_hndl,
peer->rem_qp_num,
peer->rem_lid);
if(rc != OMPI_SUCCESS) {
return rc;
}
/* Allocate resources to this connection */
peer->lcl_recv = (ib_buffer_t*)
malloc(sizeof(ib_buffer_t) * NUM_IB_RECV_BUF);
if(NULL == peer->lcl_recv) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* Register the buffers */
for(i = 0; i < NUM_IB_RECV_BUF; i++) {
rc = mca_ptl_ib_register_mem(ib_ptl->nic, ib_ptl->ptag,
(void*) peer->lcl_recv[i].buf,
MCA_PTL_IB_FIRST_FRAG_SIZE,
&peer->lcl_recv[i].hndl);
if(rc != OMPI_SUCCESS) {
return OMPI_ERROR;
}
ib_buf_ptr = &peer->lcl_recv[i];
ib_buf_ptr->qp_hndl = peer->lcl_qp_hndl;
IB_PREPARE_RECV_DESC(ib_buf_ptr);
}
/* Post receives */
for(i = 0; i < NUM_IB_RECV_BUF; i++) {
ret = VAPI_post_rr(ib_ptl->nic,
peer->lcl_qp_hndl,
&peer->lcl_recv[i].desc.rr);
if(VAPI_OK != ret) {
MCA_PTL_IB_VAPI_RET(ret, "VAPI_post_rr");
}
}
return OMPI_SUCCESS;
}

Просмотреть файл

@ -67,7 +67,7 @@ typedef enum {
struct mca_ptl_base_peer_t { struct mca_ptl_base_peer_t {
ompi_list_item_t super; ompi_list_item_t super;
struct mca_ptl_ib_module_t* peer_module; struct mca_ptl_ib_module_t* peer_ptl;
/**< PTL instance that created this connection */ /**< PTL instance that created this connection */
struct mca_ptl_ib_proc_t* peer_proc; struct mca_ptl_ib_proc_t* peer_proc;
@ -76,13 +76,10 @@ struct mca_ptl_base_peer_t {
mca_ptl_ib_peer_state_t peer_state; mca_ptl_ib_peer_state_t peer_state;
/**< current state of the connection */ /**< current state of the connection */
mca_ptl_ib_peer_conn_t* peer_conn;
/**< IB specific private information about peer */
size_t peer_retries; size_t peer_retries;
/**< number of connection retries attempted */ /**< number of connection retries attempted */
double peer_ts; double peer_tstamp;
/**< timestamp of when the first connection was attempted */ /**< timestamp of when the first connection was attempted */
ompi_mutex_t peer_send_lock; ompi_mutex_t peer_send_lock;
@ -93,13 +90,29 @@ struct mca_ptl_base_peer_t {
ompi_list_t pending_send_frags; ompi_list_t pending_send_frags;
/**< list of pending send frags for this peer */ /**< list of pending send frags for this peer */
VAPI_qp_num_t rem_qp_num;
/* Remote side QP number */
IB_lid_t rem_lid;
/* Local identifier of the remote process */
VAPI_qp_hndl_t lcl_qp_hndl;
/* Local QP handle */
VAPI_qp_prop_t lcl_qp_prop;
/* Local QP properties */
ib_buffer_t *lcl_recv;
/* Remote resources associated with this connection */
}; };
typedef struct mca_ptl_base_peer_t mca_ptl_base_peer_t; typedef struct mca_ptl_base_peer_t mca_ptl_base_peer_t;
typedef struct mca_ptl_base_peer_t mca_ptl_ib_peer_t; typedef struct mca_ptl_base_peer_t mca_ptl_ib_peer_t;
int mca_ptl_ib_peer_send(mca_ptl_base_peer_t*, mca_ptl_ib_send_frag_t*); int mca_ptl_ib_peer_send(mca_ptl_base_peer_t*, mca_ptl_ib_send_frag_t*);
void mca_ptl_ib_post_oob_recv_nb(void); int mca_ptl_ib_peer_connect(mca_ptl_base_peer_t*);
void mca_ptl_ib_post_recv(void);
void mca_ptl_ib_progress_send_frags(mca_ptl_ib_peer_t*); void mca_ptl_ib_progress_send_frags(mca_ptl_ib_peer_t*);

Просмотреть файл

@ -77,40 +77,6 @@ static void async_event_handler(VAPI_hca_hndl_t hca_hndl,
* *
*/ */
static int mca_ptl_ib_get_hca_id(int num, VAPI_hca_id_t* hca_id)
{
uint32_t num_hcas;
VAPI_ret_t ret;
VAPI_hca_id_t* hca_ids = NULL;
hca_ids = (VAPI_hca_id_t*) malloc(mca_ptl_ib_component.ib_num_hcas *
sizeof(VAPI_hca_id_t));
if(NULL == hca_ids) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* Now get the hca_id from underlying VAPI layer */
ret = EVAPI_list_hcas(mca_ptl_ib_component.ib_num_hcas,
&num_hcas, hca_ids);
/* HACK: right now, I have put VAPI_EAGAIN as
* acceptable condition since we are trying to have
* only 1 ptl support */
if((VAPI_OK != ret) && (VAPI_EAGAIN != ret)) {
MCA_PTL_IB_VAPI_RET(ret, "EVAPI_list_hcas");
return OMPI_ERROR;
} else {
num = num % num_hcas;
memcpy(hca_id, hca_ids[num], sizeof(VAPI_hca_id_t));
}
free(hca_ids);
return OMPI_SUCCESS;
}
static int mca_ptl_ib_get_hca_hndl(VAPI_hca_id_t hca_id, static int mca_ptl_ib_get_hca_hndl(VAPI_hca_id_t hca_id,
VAPI_hca_hndl_t* hca_hndl) VAPI_hca_hndl_t* hca_hndl)
@ -194,7 +160,7 @@ static int mca_ptl_ib_set_async_handler(VAPI_hca_hndl_t nic,
return OMPI_SUCCESS; return OMPI_SUCCESS;
} }
static int mca_ptl_ib_create_qp(VAPI_hca_hndl_t nic, int mca_ptl_ib_create_qp(VAPI_hca_hndl_t nic,
VAPI_pd_hndl_t ptag, VAPI_pd_hndl_t ptag,
VAPI_cq_hndl_t recv_cq, VAPI_cq_hndl_t recv_cq,
VAPI_cq_hndl_t send_cq, VAPI_cq_hndl_t send_cq,
@ -242,29 +208,23 @@ static int mca_ptl_ib_create_qp(VAPI_hca_hndl_t nic,
return OMPI_SUCCESS; return OMPI_SUCCESS;
} }
int mca_ptl_ib_init_module(mca_ptl_ib_state_t *ib_state, int module_num) int mca_ptl_ib_module_init(mca_ptl_ib_module_t *ib_ptl)
{ {
/* Get the HCA id ... InfiniHost0, 1 etc */
if(mca_ptl_ib_get_hca_id(module_num, &ib_state->hca_id)
!= OMPI_SUCCESS) {
return OMPI_ERROR;
}
/* Get HCA handle */ /* Get HCA handle */
if(mca_ptl_ib_get_hca_hndl(ib_state->hca_id, &ib_state->nic) if(mca_ptl_ib_get_hca_hndl(ib_ptl->hca_id, &ib_ptl->nic)
!= OMPI_SUCCESS) { != OMPI_SUCCESS) {
return OMPI_ERROR; return OMPI_ERROR;
} }
/* Allocate a protection domain for this NIC */ /* Allocate a protection domain for this NIC */
if(mca_ptl_ib_alloc_pd(ib_state->nic, &ib_state->ptag) if(mca_ptl_ib_alloc_pd(ib_ptl->nic, &ib_ptl->ptag)
!= OMPI_SUCCESS) { != OMPI_SUCCESS) {
return OMPI_ERROR; return OMPI_ERROR;
} }
/* Get the properties of the HCA, /* Get the properties of the HCA,
* LID etc. are part of the properties */ * LID etc. are part of the properties */
if(mca_ptl_ib_query_hca_prop(ib_state->nic, &ib_state->port) if(mca_ptl_ib_query_hca_prop(ib_ptl->nic, &ib_ptl->port)
!= OMPI_SUCCESS) { != OMPI_SUCCESS) {
return OMPI_ERROR; return OMPI_ERROR;
} }
@ -272,27 +232,26 @@ int mca_ptl_ib_init_module(mca_ptl_ib_state_t *ib_state, int module_num)
/* Create Completion Q */ /* Create Completion Q */
/* We use a single completion Q for sends & recvs /* We use a single completion Q for sends & recvs
* This saves us overhead of polling 2 separate Qs */ * This saves us overhead of polling 2 separate Qs */
if(mca_ptl_ib_create_cq(ib_state->nic, &ib_state->cq_hndl) if(mca_ptl_ib_create_cq(ib_ptl->nic, &ib_ptl->cq_hndl)
!= OMPI_SUCCESS) { != OMPI_SUCCESS) {
return OMPI_ERROR; return OMPI_ERROR;
} }
/* Attach asynchronous handler */ /* Attach asynchronous handler */
if(mca_ptl_ib_set_async_handler(ib_state->nic, if(mca_ptl_ib_set_async_handler(ib_ptl->nic,
&ib_state->async_handler) &ib_ptl->async_handler)
!= OMPI_SUCCESS) { != OMPI_SUCCESS) {
return OMPI_ERROR; return OMPI_ERROR;
} }
/* initialize memory region registry */ /* initialize memory region registry */
OBJ_CONSTRUCT(&(ib_state->mem_registry), mca_ptl_ib_mem_registry_t); OBJ_CONSTRUCT(&ib_ptl->mem_registry, mca_ptl_ib_mem_registry_t);
mca_ptl_ib_mem_registry_init(&(ib_state->mem_registry), ib_state); mca_ptl_ib_mem_registry_init(&ib_ptl->mem_registry, ib_ptl);
return OMPI_SUCCESS; return OMPI_SUCCESS;
} }
static int mca_ptl_ib_rc_qp_init(VAPI_hca_hndl_t nic, int mca_ptl_ib_qp_init(VAPI_hca_hndl_t nic,
VAPI_qp_hndl_t qp_hndl, VAPI_qp_hndl_t qp_hndl,
VAPI_qp_num_t remote_qp, VAPI_qp_num_t remote_qp,
IB_lid_t remote_lid) IB_lid_t remote_lid)
@ -419,195 +378,40 @@ int mca_ptl_ib_register_mem(VAPI_hca_hndl_t nic, VAPI_pd_hndl_t ptag,
return OMPI_SUCCESS; return OMPI_SUCCESS;
} }
int mca_ptl_ib_init_peer(mca_ptl_ib_state_t *ib_state,
mca_ptl_ib_peer_conn_t *peer_conn)
{
/* Local resources */
peer_conn->lres = (mca_ptl_ib_peer_local_res_t *)
malloc(sizeof(mca_ptl_ib_peer_local_res_t));
if(NULL == peer_conn->lres) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* Remote resources */ int mca_ptl_ib_post_send(mca_ptl_ib_module_t *ib_ptl,
peer_conn->rres = (mca_ptl_ib_peer_remote_res_t *) mca_ptl_ib_peer_t *peer,
malloc(sizeof(mca_ptl_ib_peer_remote_res_t));
if(NULL == peer_conn->rres) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* Create the Queue Pair */
if(mca_ptl_ib_create_qp(ib_state->nic,
ib_state->ptag,
ib_state->cq_hndl,
ib_state->cq_hndl,
&peer_conn->lres->qp_hndl,
&peer_conn->lres->qp_prop,
VAPI_TS_RC)
!= OMPI_SUCCESS) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
return OMPI_SUCCESS;
}
/*
* 1. Establish Reliable Connection with peer
* 2. Allocate resources to this connection
* 3. Post receives for this connection
*
*/
int mca_ptl_ib_peer_connect(mca_ptl_ib_state_t *ib_state,
mca_ptl_ib_peer_conn_t *peer_conn)
{
int rc, i;
VAPI_ret_t ret;
ib_buffer_t *ib_buf_ptr;
/* Establish Reliable Connection */
rc = mca_ptl_ib_rc_qp_init(ib_state->nic,
peer_conn->lres->qp_hndl,
peer_conn->rres->qp_num,
peer_conn->rres->lid);
if(rc != OMPI_SUCCESS) {
return rc;
}
/* Allocate resources to this connection */
peer_conn->lres->recv = (ib_buffer_t*)
malloc(sizeof(ib_buffer_t) * NUM_IB_RECV_BUF);
if(NULL == peer_conn->lres->recv) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* Register the buffers */
for(i = 0; i < NUM_IB_RECV_BUF; i++) {
rc = mca_ptl_ib_register_mem(ib_state->nic, ib_state->ptag,
(void*) peer_conn->lres->recv[i].buf,
MCA_PTL_IB_FIRST_FRAG_SIZE,
&peer_conn->lres->recv[i].hndl);
if(rc != OMPI_SUCCESS) {
return OMPI_ERROR;
}
ib_buf_ptr = &peer_conn->lres->recv[i];
ib_buf_ptr->qp_hndl = peer_conn->lres->qp_hndl;
IB_PREPARE_RECV_DESC(ib_buf_ptr);
}
/* Post receives */
for(i = 0; i < NUM_IB_RECV_BUF; i++) {
ret = VAPI_post_rr(ib_state->nic,
peer_conn->lres->qp_hndl,
&peer_conn->lres->recv[i].desc.rr);
if(VAPI_OK != ret) {
MCA_PTL_IB_VAPI_RET(ret, "VAPI_post_rr");
}
}
D_PRINT("Done posting recvs");
return OMPI_SUCCESS;
}
int mca_ptl_ib_post_send(mca_ptl_ib_state_t *ib_state,
mca_ptl_ib_peer_conn_t *peer_conn,
ib_buffer_t *ib_buf, void* addr) ib_buffer_t *ib_buf, void* addr)
{ {
VAPI_ret_t ret; VAPI_ret_t ret;
int msg_len = ib_buf->desc.sg_entry.len; int msg_len = ib_buf->desc.sg_entry.len;
//IB_SET_REMOTE_QP_NUM(ib_buf, (peer_conn->rres->qp_num)); IB_PREPARE_SEND_DESC(ib_buf, (peer->rem_qp_num),
//IB_SET_SEND_DESC_ID(ib_buf, addr);
IB_PREPARE_SEND_DESC(ib_buf, (peer_conn->rres->qp_num),
msg_len, addr); msg_len, addr);
D_PRINT("length : %d, qp_num = %d", /* TODO - get this from NIC properties */
ib_buf->desc.sg_entry.len, if(msg_len < 128) {
(peer_conn->rres->qp_num)); ret = EVAPI_post_inline_sr(ib_ptl->nic,
peer->lcl_qp_hndl,
ret = VAPI_post_sr(ib_state->nic,
peer_conn->lres->qp_hndl,
&ib_buf->desc.sr); &ib_buf->desc.sr);
} else {
ret = VAPI_post_sr(ib_ptl->nic,
peer->lcl_qp_hndl,
&ib_buf->desc.sr);
}
if(VAPI_OK != ret) { if(VAPI_OK != ret) {
MCA_PTL_IB_VAPI_RET(ret, "VAPI_post_sr"); MCA_PTL_IB_VAPI_RET(ret, "VAPI_post_sr");
return OMPI_ERROR; return OMPI_ERROR;
} }
return OMPI_SUCCESS; return OMPI_SUCCESS;
} }
void mca_ptl_ib_drain_network(VAPI_hca_hndl_t nic,
VAPI_cq_hndl_t cq_hndl, int* comp_type, void** comp_addr) void mca_ptl_ib_buffer_repost(VAPI_hca_hndl_t nic, void* addr)
{ {
VAPI_ret_t ret; VAPI_ret_t ret;
VAPI_wc_desc_t comp; ib_buffer_t *ib_buf = (ib_buffer_t*)addr;
ret = VAPI_poll_cq(nic, cq_hndl, &comp);
if(VAPI_OK == ret) {
if(comp.status != VAPI_SUCCESS) {
ompi_output(0, "Got error : %s, Vendor code : %d Frag : %p",
VAPI_wc_status_sym(comp.status),
comp.vendor_err_syndrome, comp.id);
*comp_type = IB_COMP_ERROR;
*comp_addr = NULL;
} else {
if(VAPI_CQE_SQ_SEND_DATA == comp.opcode) {
D_PRINT("Send completion, id:%d\n",
comp.id);
*comp_type = IB_COMP_SEND;
*comp_addr = (void*) (unsigned long) comp.id;
} else if(VAPI_CQE_RQ_SEND_DATA == comp.opcode) {
A_PRINT("Received message completion len = %d, id : %d\n",
comp.byte_len, comp.id);
*comp_type = IB_COMP_RECV;
*comp_addr = (void*) (unsigned long) comp.id;
} else if(VAPI_CQE_SQ_RDMA_WRITE == comp.opcode) {
A_PRINT("RDMA Write completion");
*comp_type = IB_COMP_RDMA_W;
*comp_addr = (void*) (unsigned long) comp.id;
} else {
ompi_output(0, "Got Unknown completion! Opcode : %d\n",
comp.opcode);
*comp_type = IB_COMP_ERROR;
*comp_addr = NULL;
}
}
} else {
/* No completions from the network */
*comp_type = IB_COMP_NOTHING;
*comp_addr = NULL;
}
}
void mca_ptl_ib_buffer_repost(VAPI_hca_hndl_t nic,
void* addr)
{
VAPI_ret_t ret;
ib_buffer_t *ib_buf;
D_PRINT("");
ib_buf = (ib_buffer_t *) (unsigned long) addr;
IB_PREPARE_RECV_DESC(ib_buf); IB_PREPARE_RECV_DESC(ib_buf);
@ -619,12 +423,12 @@ void mca_ptl_ib_buffer_repost(VAPI_hca_hndl_t nic,
} }
} }
void mca_ptl_ib_prepare_ack(mca_ptl_ib_state_t *ib_state, void mca_ptl_ib_prepare_ack(mca_ptl_ib_module_t *ib_ptl,
void* addr_to_reg, int len_to_reg, void* addr_to_reg, int len_to_reg,
void* ack_buf, int* len_added) void* ack_buf, int* len_added)
{ {
mca_ptl_ib_mem_registry_info_t *info = mca_ptl_ib_mem_registry_info_t *info =
mca_ptl_ib_register_mem_with_registry(ib_state, mca_ptl_ib_register_mem_with_registry(ib_ptl,
addr_to_reg, (size_t)len_to_reg); addr_to_reg, (size_t)len_to_reg);
if(NULL == info) { if(NULL == info) {
@ -638,15 +442,15 @@ void mca_ptl_ib_prepare_ack(mca_ptl_ib_state_t *ib_state,
*len_added = sizeof(VAPI_rkey_t); *len_added = sizeof(VAPI_rkey_t);
} }
int mca_ptl_ib_rdma_write(mca_ptl_ib_state_t *ib_state, int mca_ptl_ib_rdma_write(mca_ptl_ib_module_t *ib_ptl,
mca_ptl_ib_peer_conn_t *peer_conn, ib_buffer_t *ib_buf, mca_ptl_ib_peer_t *peer, ib_buffer_t *ib_buf,
void* send_buf, size_t send_len, void* remote_buf, void* send_buf, size_t send_len, void* remote_buf,
VAPI_rkey_t remote_key, void* id_buf) VAPI_rkey_t remote_key, void* id_buf)
{ {
VAPI_ret_t ret; VAPI_ret_t ret;
mca_ptl_ib_mem_registry_info_t *info = mca_ptl_ib_mem_registry_info_t *info =
mca_ptl_ib_register_mem_with_registry(ib_state, mca_ptl_ib_register_mem_with_registry(ib_ptl,
send_buf, send_len); send_buf, send_len);
if (NULL == info) { if (NULL == info) {
@ -654,12 +458,12 @@ int mca_ptl_ib_rdma_write(mca_ptl_ib_state_t *ib_state,
} }
/* Prepare descriptor */ /* Prepare descriptor */
IB_PREPARE_RDMA_W_DESC(ib_buf, (peer_conn->rres->qp_num), IB_PREPARE_RDMA_W_DESC(ib_buf, (peer->rem_qp_num),
send_len, send_buf, (info->reply.l_key), remote_key, send_len, send_buf, (info->reply.l_key), remote_key,
id_buf, remote_buf); id_buf, remote_buf);
ret = VAPI_post_sr(ib_state->nic, ret = VAPI_post_sr(ib_ptl->nic,
peer_conn->lres->qp_hndl, peer->lcl_qp_hndl,
&ib_buf->desc.sr); &ib_buf->desc.sr);
if(ret != VAPI_OK) { if(ret != VAPI_OK) {
MCA_PTL_IB_VAPI_RET(ret, "VAPI_post_sr"); MCA_PTL_IB_VAPI_RET(ret, "VAPI_post_sr");

Просмотреть файл

@ -24,37 +24,11 @@
#include "ptl_ib_vapi.h" #include "ptl_ib_vapi.h"
#include "ptl_ib_memory.h" #include "ptl_ib_memory.h"
#define NUM_IB_SEND_BUF (10) #define NUM_IB_SEND_BUF (1)
#define NUM_IB_RECV_BUF (1000) #define NUM_IB_RECV_BUF (4)
#define MCA_PTL_IB_FIRST_FRAG_SIZE (65536) #define MCA_PTL_IB_FIRST_FRAG_SIZE (65536)
struct mca_ptl_ib_state_t {
VAPI_hca_id_t hca_id;
/* ID of HCA */
VAPI_hca_port_t port;
/* IB port of this PTL */
VAPI_hca_hndl_t nic;
/* NIC handle */
VAPI_pd_hndl_t ptag;
/* Protection Domain tag */
VAPI_cq_hndl_t cq_hndl;
/* Completion Queue handle */
/* At present Send & Recv are tied to the same completion queue */
EVAPI_async_handler_hndl_t async_handler;
/* Async event handler used to detect weird/unknown events */
mca_ptl_ib_mem_registry_t mem_registry;
/* registry of memory regions */
};
typedef struct mca_ptl_ib_state_t mca_ptl_ib_state_t;
typedef enum { typedef enum {
IB_RECV, IB_RECV,
IB_SEND IB_SEND
@ -99,6 +73,7 @@ struct vapi_descriptor_t {
typedef struct vapi_descriptor_t vapi_descriptor_t; typedef struct vapi_descriptor_t vapi_descriptor_t;
struct ib_buffer_t { struct ib_buffer_t {
ompi_list_item_t super;
vapi_descriptor_t desc; vapi_descriptor_t desc;
/* Descriptor of the buffer */ /* Descriptor of the buffer */
@ -114,61 +89,16 @@ struct ib_buffer_t {
typedef struct ib_buffer_t ib_buffer_t; typedef struct ib_buffer_t ib_buffer_t;
/* mca_ptl_ib_peer_local_res_t contains information
* regarding local resources dedicated to this
* connection */
struct mca_ptl_ib_peer_local_res_t {
VAPI_qp_hndl_t qp_hndl; #define DUMP_IB_STATE(ib_ptl) { \
/* Local QP handle */ ompi_output(0, "[%s:%d] ", __FILE__, __LINE__); \
ompi_output(0, "Dumping IB state"); \
VAPI_qp_prop_t qp_prop; ompi_output(0, "HCA ID : %s", ib_ptl->hca_id); \
/* Local QP properties */ ompi_output(0, "LID : %d", ib_ptl->port.lid); \
ompi_output(0, "HCA handle : %d", ib_ptl->nic); \
ib_buffer_t *recv; ompi_output(0, "Protection Domain: %d", ib_ptl->ptag); \
/* Pointer to recv buffers */ ompi_output(0, "Comp Q handle : %d", ib_ptl->cq_hndl); \
}; ompi_output(0, "Async hndl : %d", ib_ptl->async_handler); \
typedef struct mca_ptl_ib_peer_local_res_t mca_ptl_ib_peer_local_res_t;
/* mca_ptl_ib_peer_remote_res_t contains information
* regarding remote resources dedicated to this
* connection */
struct mca_ptl_ib_peer_remote_res_t {
VAPI_qp_num_t qp_num;
/* Remote side QP number */
IB_lid_t lid;
/* Local identifier of the remote process */
};
typedef struct mca_ptl_ib_peer_remote_res_t mca_ptl_ib_peer_remote_res_t;
/* mca_ptl_ib_peer_conn_t contains private information
* about the peer. This information is used to describe
* the connection oriented information about this peer
* and local resources associated with it. */
struct mca_ptl_ib_peer_conn_t {
mca_ptl_ib_peer_local_res_t* lres;
/* Local resources associated with this connection */
mca_ptl_ib_peer_remote_res_t* rres;
/* Remote resources associated with this connection */
};
typedef struct mca_ptl_ib_peer_conn_t mca_ptl_ib_peer_conn_t;
#define DUMP_IB_STATE(ib_state_ptr) { \
ompi_output(0, "[%s:%d] ", __FILE__, __LINE__); \
ompi_output(0, "Dumping IB state"); \
ompi_output(0, "HCA ID : %s", ib_state_ptr->hca_id); \
ompi_output(0, "LID : %d", ib_state_ptr->port.lid); \
ompi_output(0, "HCA handle : %d", ib_state_ptr->nic); \
ompi_output(0, "Protection Domain: %d", ib_state_ptr->ptag); \
ompi_output(0, "Comp Q handle : %d", ib_state_ptr->cq_hndl); \
ompi_output(0, "Async hndl : %d", ib_state_ptr->async_handler); \
} }
#define IB_PREPARE_RECV_DESC(ib_buf_ptr) { \ #define IB_PREPARE_RECV_DESC(ib_buf_ptr) { \
@ -234,25 +164,54 @@ typedef struct mca_ptl_ib_peer_conn_t mca_ptl_ib_peer_conn_t;
} }
int mca_ptl_ib_init_module(mca_ptl_ib_state_t*, int); struct mca_ptl_ib_module_t;
int mca_ptl_ib_get_num_hcas(uint32_t*); struct mca_ptl_base_peer_t;
int mca_ptl_ib_init_peer(mca_ptl_ib_state_t*, mca_ptl_ib_peer_conn_t*);
int mca_ptl_ib_peer_connect(mca_ptl_ib_state_t*,
mca_ptl_ib_peer_conn_t*); int mca_ptl_ib_module_init(struct mca_ptl_ib_module_t*);
int mca_ptl_ib_register_mem(VAPI_hca_hndl_t nic, VAPI_pd_hndl_t ptag,
void* buf, int len, vapi_memhandle_t* memhandle); int mca_ptl_ib_register_mem(
int mca_ptl_ib_post_send(mca_ptl_ib_state_t *ib_state, VAPI_hca_hndl_t nic,
mca_ptl_ib_peer_conn_t *peer_conn, VAPI_pd_hndl_t ptag,
ib_buffer_t *ib_buf, void*); void* buf,
void mca_ptl_ib_drain_network(VAPI_hca_hndl_t nic, int len,
VAPI_cq_hndl_t cq_hndl, int* comp_type, void** comp_addr); vapi_memhandle_t* memhandle);
void mca_ptl_ib_buffer_repost(VAPI_hca_hndl_t nic,
void* addr); int mca_ptl_ib_post_send(
void mca_ptl_ib_prepare_ack(mca_ptl_ib_state_t *ib_state, struct mca_ptl_ib_module_t *ib_module,
void* addr_to_reg, int len_to_reg, struct mca_ptl_base_peer_t *peer,
void* ack_buf, int* len_added); ib_buffer_t *ib_buf, void*);
int mca_ptl_ib_rdma_write(mca_ptl_ib_state_t *ib_state,
mca_ptl_ib_peer_conn_t *peer_conn, ib_buffer_t *ib_buf, void mca_ptl_ib_buffer_repost(
void* send_buf, size_t send_len, void* remote_buf, VAPI_hca_hndl_t nic,
VAPI_rkey_t remote_key, void*); void* addr);
void mca_ptl_ib_prepare_ack(
struct mca_ptl_ib_module_t *ib_module,
void* addr_to_reg, int len_to_reg,
void* ack_buf, int* len_added);
int mca_ptl_ib_rdma_write(
struct mca_ptl_ib_module_t *ib_module,
struct mca_ptl_base_peer_t *peer,
ib_buffer_t *ib_buf,
void* send_buf,
size_t send_len,
void* remote_buf,
VAPI_rkey_t remote_key, void*);
int mca_ptl_ib_create_qp(VAPI_hca_hndl_t nic,
VAPI_pd_hndl_t ptag,
VAPI_cq_hndl_t recv_cq,
VAPI_cq_hndl_t send_cq,
VAPI_qp_hndl_t* qp_hndl,
VAPI_qp_prop_t* qp_prop,
int transport_type);
int mca_ptl_ib_qp_init(
VAPI_hca_hndl_t nic,
VAPI_qp_hndl_t qp_hndl,
VAPI_qp_num_t remote_qp,
IB_lid_t remote_lid);
#endif /* MCA_PTL_IB_PRIV_H */ #endif /* MCA_PTL_IB_PRIV_H */

Просмотреть файл

@ -45,7 +45,7 @@ struct mca_ptl_ib_proc_t {
ompi_proc_t *proc_ompi; ompi_proc_t *proc_ompi;
/**< pointer to corresponding ompi_proc_t */ /**< pointer to corresponding ompi_proc_t */
ompi_process_name_t proc_guid; orte_process_name_t proc_guid;
/**< globally unique identifier for the process */ /**< globally unique identifier for the process */
size_t proc_addr_count; size_t proc_addr_count;

Просмотреть файл

@ -50,9 +50,10 @@ static void mca_ptl_ib_recv_frag_destruct(mca_ptl_ib_recv_frag_t* frag)
} }
void void
mca_ptl_ib_recv_frag_done (mca_ptl_base_header_t *header, mca_ptl_ib_recv_frag_done (
mca_ptl_base_recv_frag_t* frag, mca_ptl_base_header_t *header,
mca_pml_base_recv_request_t *request) mca_ptl_base_recv_frag_t* frag,
mca_pml_base_recv_request_t *request)
{ {
D_PRINT(""); D_PRINT("");
frag->frag_base.frag_owner->ptl_recv_progress ( frag->frag_base.frag_owner->ptl_recv_progress (
@ -66,66 +67,57 @@ mca_ptl_ib_recv_frag_done (mca_ptl_base_header_t *header,
(ompi_list_item_t*)frag); (ompi_list_item_t*)frag);
} }
static void mca_ptl_ib_data_frag(mca_ptl_base_module_t *module, static void mca_ptl_ib_data_frag(
mca_ptl_base_header_t *header) mca_ptl_ib_module_t *ib_ptl,
mca_ptl_base_header_t *hdr)
{ {
bool matched; bool matched;
int rc; int rc;
ompi_list_item_t *item; ompi_list_item_t *item;
mca_ptl_ib_recv_frag_t *recv_frag; mca_ptl_ib_recv_frag_t *recv_frag;
mca_ptl_base_rendezvous_header_t *rendezvous_hdr = (mca_ptl_base_rendezvous_header_t *)header; size_t hdr_length;
OMPI_FREE_LIST_GET(&mca_ptl_ib_component.ib_recv_frags, OMPI_FREE_LIST_WAIT (&mca_ptl_ib_component.ib_recv_frags, item, rc);
item, rc);
while (OMPI_SUCCESS != rc) {
/* TODO: progress the recv state machine */
D_PRINT("Retry to allocate a recv fragment\n");
OMPI_FREE_LIST_GET (&mca_ptl_ib_component.ib_recv_frags,
item, rc);
}
recv_frag = (mca_ptl_ib_recv_frag_t *) item; recv_frag = (mca_ptl_ib_recv_frag_t *) item;
recv_frag->super.frag_base.frag_owner = module; recv_frag->super.frag_base.frag_owner = &ib_ptl->super;
recv_frag->super.frag_base.frag_peer = NULL; recv_frag->super.frag_base.frag_peer = NULL;
recv_frag->super.frag_request = NULL; recv_frag->super.frag_request = NULL;
recv_frag->super.frag_is_buffered = false; recv_frag->super.frag_is_buffered = false;
/* Copy the header, mca_ptl_base_match() /* Copy the header, mca_ptl_base_match() */
* does not do what it claims */ recv_frag->super.frag_base.frag_header = *hdr;
recv_frag->super.frag_base.frag_header = *header;
/* Taking the data starting point be switch(hdr->hdr_common.hdr_type) {
* default */ case MCA_PTL_HDR_TYPE_MATCH:
recv_frag->super.frag_base.frag_addr = hdr_length = sizeof(mca_ptl_base_match_header_t);
(char *) header + sizeof (mca_ptl_base_header_t); recv_frag->super.frag_base.frag_size = hdr->hdr_match.hdr_msg_length;
recv_frag->super.frag_base.frag_size = header->hdr_rndv.hdr_frag_length; break;
case MCA_PTL_HDR_TYPE_RNDV:
hdr_length = sizeof(mca_ptl_base_rendezvous_header_t);
recv_frag->super.frag_base.frag_size = hdr->hdr_rndv.hdr_frag_length;
break;
}
/* match with preposted /* Taking the data starting point be default */
* requests */ recv_frag->super.frag_base.frag_addr = (char *) hdr + hdr_length;
matched = module->ptl_match(
/* match against preposted requests */
matched = ib_ptl->super.ptl_match(
recv_frag->super.frag_base.frag_owner, recv_frag->super.frag_base.frag_owner,
&recv_frag->super, &recv_frag->super,
&recv_frag->super.frag_base.frag_header.hdr_match); &recv_frag->super.frag_base.frag_header.hdr_match);
if (!matched) { if (!matched) {
/* Oh my GOD memcpy (recv_frag->unex_buf, (char *) hdr + hdr_length, recv_frag->super.frag_base.frag_size);
* !!! */
/* ompi_output(0, "Can't match buffer. Mama is unhappy\n"); */
memcpy (recv_frag->unex_buf,
(char *) header + sizeof (mca_ptl_base_header_t),
header->hdr_rndv.hdr_frag_length);
recv_frag->super.frag_is_buffered = true; recv_frag->super.frag_is_buffered = true;
recv_frag->super.frag_base.frag_addr = recv_frag->unex_buf; recv_frag->super.frag_base.frag_addr = recv_frag->unex_buf;
}
} else {
D_PRINT("Message matched!");
}
} }
static void mca_ptl_ib_ctrl_frag(mca_ptl_base_module_t *module, static void mca_ptl_ib_ctrl_frag(
mca_ptl_base_header_t *header) mca_ptl_ib_module_t *ib_ptl,
mca_ptl_base_header_t *header)
{ {
mca_ptl_ib_send_frag_t *send_frag; mca_ptl_ib_send_frag_t *send_frag;
mca_pml_base_send_request_t *req; mca_pml_base_send_request_t *req;
@ -145,14 +137,14 @@ static void mca_ptl_ib_ctrl_frag(mca_ptl_base_module_t *module,
((char*) header + sizeof(mca_ptl_base_ack_header_t)); ((char*) header + sizeof(mca_ptl_base_ack_header_t));
/* Copy over data to request buffer */ /* Copy over data to request buffer */
memcpy(((mca_ptl_ib_send_request_t *) req)->req_buf, memcpy(&((mca_ptl_ib_send_request_t *) req)->req_key,
data_ptr, sizeof(VAPI_rkey_t)); data_ptr, sizeof(VAPI_rkey_t));
/* Progress & release fragments */ /* Progress & release fragments */
mca_ptl_ib_process_send_comp(module, (void*) send_frag); mca_ptl_ib_send_frag_send_complete(ib_ptl, send_frag);
} }
static void mca_ptl_ib_last_frag(mca_ptl_base_module_t *module, static void mca_ptl_ib_last_frag(mca_ptl_ib_module_t *ib_ptl,
mca_ptl_base_header_t *hdr) mca_ptl_base_header_t *hdr)
{ {
mca_ptl_ib_fin_header_t *fin_hdr = (mca_ptl_ib_fin_header_t *)hdr; mca_ptl_ib_fin_header_t *fin_hdr = (mca_ptl_ib_fin_header_t *)hdr;
@ -162,12 +154,12 @@ static void mca_ptl_ib_last_frag(mca_ptl_base_module_t *module,
/* deregister memory if this is the last fragment */ /* deregister memory if this is the last fragment */
if ((request->req_bytes_received + hdr->hdr_frag.hdr_frag_length) >= if ((request->req_bytes_received + hdr->hdr_frag.hdr_frag_length) >=
request->req_bytes_packed) { request->req_bytes_packed) {
mca_ptl_ib_deregister_mem_with_registry(((mca_ptl_ib_module_t *)module)->ib_state, mca_ptl_ib_deregister_mem_with_registry(ib_ptl,
fin_hdr->mr_addr.pval, (size_t)fin_hdr->mr_size); fin_hdr->mr_addr.pval, (size_t)fin_hdr->mr_size);
} }
module->ptl_recv_progress ( ib_ptl->super.ptl_recv_progress (
module, &ib_ptl->super,
request, request,
hdr->hdr_frag.hdr_frag_length, hdr->hdr_frag.hdr_frag_length,
hdr->hdr_frag.hdr_frag_length); hdr->hdr_frag.hdr_frag_length);
@ -179,28 +171,26 @@ static void mca_ptl_ib_last_frag(mca_ptl_base_module_t *module,
* *
*/ */
void mca_ptl_ib_process_recv(mca_ptl_base_module_t *module, void* addr) void mca_ptl_ib_process_recv(mca_ptl_ib_module_t *ib_ptl, void* addr)
{ {
ib_buffer_t *ib_buf; ib_buffer_t *ib_buf;
mca_ptl_base_header_t *header; mca_ptl_base_header_t *header;
D_PRINT("");
ib_buf = (ib_buffer_t *) addr; ib_buf = (ib_buffer_t *) addr;
header = (mca_ptl_base_header_t *) &ib_buf->buf[0]; header = (mca_ptl_base_header_t *) &ib_buf->buf[0];
switch(header->hdr_common.hdr_type) { switch(header->hdr_common.hdr_type) {
case MCA_PTL_HDR_TYPE_MATCH : case MCA_PTL_HDR_TYPE_MATCH :
case MCA_PTL_HDR_TYPE_RNDV :
case MCA_PTL_HDR_TYPE_FRAG : case MCA_PTL_HDR_TYPE_FRAG :
mca_ptl_ib_data_frag(module, header); mca_ptl_ib_data_frag(ib_ptl, header);
break; break;
case MCA_PTL_HDR_TYPE_ACK : case MCA_PTL_HDR_TYPE_ACK :
mca_ptl_ib_ctrl_frag(module, header); mca_ptl_ib_ctrl_frag(ib_ptl, header);
break; break;
case MCA_PTL_HDR_TYPE_FIN : case MCA_PTL_HDR_TYPE_FIN :
A_PRINT("Fin"); A_PRINT("Fin");
mca_ptl_ib_last_frag(module, header); mca_ptl_ib_last_frag(ib_ptl, header);
break; break;
default : default :
ompi_output(0, "Unknown fragment type"); ompi_output(0, "Unknown fragment type");

Просмотреть файл

@ -40,11 +40,13 @@ struct mca_ptl_ib_recv_frag_t {
}; };
typedef struct mca_ptl_ib_recv_frag_t mca_ptl_ib_recv_frag_t; typedef struct mca_ptl_ib_recv_frag_t mca_ptl_ib_recv_frag_t;
struct mca_ptl_ib_module_t;
void mca_ptl_ib_recv_frag_done (mca_ptl_base_header_t*, void mca_ptl_ib_recv_frag_done (mca_ptl_base_header_t*,
mca_ptl_base_recv_frag_t*, mca_pml_base_recv_request_t*); mca_ptl_base_recv_frag_t*, mca_pml_base_recv_request_t*);
void mca_ptl_ib_process_recv(mca_ptl_base_module_t* , void*); void mca_ptl_ib_process_recv(struct mca_ptl_ib_module_t* , void*);
#if defined(c_plusplus) || defined(__cplusplus) #if defined(c_plusplus) || defined(__cplusplus)
} }
#endif #endif

Просмотреть файл

@ -49,170 +49,43 @@ static void mca_ptl_ib_send_frag_destruct(mca_ptl_ib_send_frag_t* frag)
{ {
} }
int mca_ptl_ib_send_frag_init(mca_ptl_ib_send_frag_t* sendfrag,
struct mca_ptl_base_peer_t* ptl_peer,
struct mca_pml_base_send_request_t* sendreq,
size_t offset,
size_t* size,
int flags)
{
size_t size_in = *size;
size_t size_out;
mca_ptl_base_rendezvous_header_t *hdr;
struct iovec iov;
int header_length;
/* Start of the IB buffer */
hdr = (mca_ptl_base_rendezvous_header_t *) &sendfrag->ib_buf.buf[0];
/* Fill up the header for PML to make a match */
if(offset == 0) {
hdr->hdr_match.hdr_common.hdr_type = MCA_PTL_HDR_TYPE_MATCH;
hdr->hdr_match.hdr_common.hdr_flags = flags;
hdr->hdr_src_ptr.lval = 0; /* for VALGRIND/PURIFY - REPLACE WITH MACRO */
/* Ptr to send frag, so incoming ACK can locate the frag */
hdr->hdr_src_ptr.pval = sendfrag;
hdr->hdr_match.hdr_contextid = sendreq->req_base.req_comm->c_contextid;
hdr->hdr_match.hdr_src = sendreq->req_base.req_comm->c_my_rank;
hdr->hdr_match.hdr_dst = sendreq->req_base.req_peer;
hdr->hdr_match.hdr_tag = sendreq->req_base.req_tag;
hdr->hdr_match.hdr_msg_length = sendreq->req_bytes_packed;
hdr->hdr_match.hdr_msg_seq = sendreq->req_base.req_sequence;
header_length = sizeof(mca_ptl_base_rendezvous_header_t);
} else {
hdr->hdr_match.hdr_common.hdr_type = MCA_PTL_HDR_TYPE_FRAG;
hdr->hdr_match.hdr_common.hdr_flags = flags;
hdr->hdr_src_ptr.lval = 0; /* for VALGRIND/PURIFY - REPLACE WITH MACRO */
hdr->hdr_src_ptr.pval = sendfrag;
header_length = sizeof(mca_ptl_base_rendezvous_header_t);
}
/* initialize convertor */
if(size_in > 0) {
ompi_convertor_t *convertor;
int rc, freeAfter;
unsigned int iov_count, max_data;
/* first fragment (eager send) and first fragment of long
* protocol can use the convertor initialized on the request,
* remaining fragments must copy/reinit the convertor as the
* transfer could be in parallel.
*/
if( offset <= mca_ptl_ib_module.super.ptl_first_frag_size ) {
convertor = &sendreq->req_convertor;
} else {
convertor = &sendfrag->frag_send.frag_base.frag_convertor;
ompi_convertor_copy(&sendreq->req_convertor, convertor);
ompi_convertor_init_for_send( convertor,
0,
sendreq->req_base.req_datatype,
sendreq->req_base.req_count,
sendreq->req_base.req_addr,
offset,
NULL );
}
/* if data is contigous, convertor will return an offset
* into users buffer - otherwise will return an allocated buffer
* that holds the packed data
*/
iov.iov_base = &sendfrag->ib_buf.buf[header_length];
iov.iov_len = size_in;
iov_count = 1;
max_data = size_in;
if((rc = ompi_convertor_pack(convertor,&iov, &iov_count, &max_data, &freeAfter)) < 0) {
ompi_output(0, "Unable to pack data");
return OMPI_ERROR;
}
/* adjust size and request offset to reflect actual
* number of bytes packed by convertor */
size_out = iov.iov_len;
IB_SET_SEND_DESC_LEN((&sendfrag->ib_buf),
(header_length + iov.iov_len));
} else {
size_out = size_in;
IB_SET_SEND_DESC_LEN((&sendfrag->ib_buf),
(header_length + size_in));
}
hdr->hdr_frag_length = size_out;
/* fragment state */
sendfrag->frag_send.frag_base.frag_owner =
&ptl_peer->peer_module->super;
sendfrag->frag_send.frag_request = sendreq;
sendfrag->frag_send.frag_base.frag_addr = iov.iov_base;
sendfrag->frag_send.frag_base.frag_size = size_out;
sendfrag->frag_send.frag_base.frag_peer = ptl_peer;
sendfrag->frag_progressed = 0;
*size = size_out;
return OMPI_SUCCESS;
}
/* /*
* Allocate a IB send descriptor * Allocate a IB send descriptor
* *
*/ */
mca_ptl_ib_send_frag_t* mca_ptl_ib_alloc_send_frag( mca_ptl_ib_send_frag_t* mca_ptl_ib_alloc_send_frag(
mca_ptl_base_module_t* ptl, mca_ptl_ib_module_t* ib_ptl,
mca_pml_base_send_request_t* request) mca_pml_base_send_request_t* request)
{ {
ompi_free_list_t *flist; ompi_free_list_t *flist = &ib_ptl->send_free;
ompi_list_item_t *item; ompi_list_item_t *item;
mca_ptl_ib_send_frag_t *ib_send_frag; mca_ptl_ib_send_frag_t *ib_send_frag;
flist = &((mca_ptl_ib_module_t *)ptl)->send_free;
item = ompi_list_remove_first(&((flist)->super)); item = ompi_list_remove_first(&((flist)->super));
while(NULL == item) { while(NULL == item) {
mca_ptl_tstamp_t tstamp = 0; mca_ptl_tstamp_t tstamp = 0;
D_PRINT("Gone one NULL descriptor ... trying again"); D_PRINT("Gone one NULL descriptor ... trying again");
ptl->ptl_component->ptlm_progress (tstamp); mca_ptl_ib_component_progress(0);
item = ompi_list_remove_first (&((flist)->super)); item = ompi_list_remove_first (&((flist)->super));
} }
ib_send_frag = (mca_ptl_ib_send_frag_t *)item; ib_send_frag = (mca_ptl_ib_send_frag_t *)item;
B_PRINT("Allocated frag : %p", ib_send_frag);
return ib_send_frag; return ib_send_frag;
} }
int mca_ptl_ib_register_send_frags(mca_ptl_base_module_t *ptl) int mca_ptl_ib_send_frag_register(mca_ptl_ib_module_t *ib_ptl)
{ {
int i, rc, num_send_frags; int i, rc, num_send_frags;
ompi_list_item_t *item; ompi_list_item_t *item;
ompi_free_list_t *flist; ompi_free_list_t *flist = &ib_ptl->send_free;
ib_buffer_t *ib_buf_ptr; ib_buffer_t *ib_buf_ptr;
mca_ptl_ib_send_frag_t *ib_send_frag; mca_ptl_ib_send_frag_t *ib_send_frag;
mca_ptl_ib_state_t *ib_state;
flist = &((mca_ptl_ib_module_t *)ptl)->send_free;
ib_state = ((mca_ptl_ib_module_t *)ptl)->ib_state;
num_send_frags = ompi_list_get_size(&(flist->super)); num_send_frags = ompi_list_get_size(&(flist->super));
item = ompi_list_get_first(&((flist)->super)); item = ompi_list_get_first(&((flist)->super));
/* Register the buffers */ /* Register the buffers */
@ -225,7 +98,7 @@ int mca_ptl_ib_register_send_frags(mca_ptl_base_module_t *ptl)
ib_buf_ptr = (ib_buffer_t *) &ib_send_frag->ib_buf; ib_buf_ptr = (ib_buffer_t *) &ib_send_frag->ib_buf;
rc = mca_ptl_ib_register_mem(ib_state->nic, ib_state->ptag, rc = mca_ptl_ib_register_mem(ib_ptl->nic, ib_ptl->ptag,
(void*) ib_buf_ptr->buf, (void*) ib_buf_ptr->buf,
MCA_PTL_IB_FIRST_FRAG_SIZE, MCA_PTL_IB_FIRST_FRAG_SIZE,
&ib_buf_ptr->hndl); &ib_buf_ptr->hndl);
@ -240,182 +113,47 @@ int mca_ptl_ib_register_send_frags(mca_ptl_base_module_t *ptl)
return OMPI_SUCCESS; return OMPI_SUCCESS;
} }
/*
* Process RDMA Write completions
*
* Just return send fragment to free list
*/
void mca_ptl_ib_process_rdma_w_comp(mca_ptl_base_module_t *module,
void* comp_addr)
{
mca_ptl_ib_module_t *ib_module = (mca_ptl_ib_module_t *)module;
mca_ptl_ib_send_frag_t *sendfrag = (mca_ptl_ib_send_frag_t *) comp_addr;
/* deregister memory region for RDMA write */
mca_ptl_ib_deregister_mem_with_registry(ib_module->ib_state,
(void *)(unsigned long)(sendfrag->ib_buf.desc.sg_entry.addr),
(size_t)(sendfrag->ib_buf.desc.sg_entry.len));
#if 0
mca_ptl_ib_send_frag_t *sendfrag;
ompi_free_list_t *flist;
A_PRINT("Free RDMA send descriptor : %p", comp_addr);
sendfrag = (mca_ptl_ib_send_frag_t *) comp_addr;
flist = &(sendfrag->
frag_send.frag_base.frag_peer->
peer_module->send_free);
OMPI_FREE_LIST_RETURN(flist,
((ompi_list_item_t *) sendfrag));
#endif
}
/* /*
* Process send completions * Process send completions
* *
*/ */
void mca_ptl_ib_process_send_comp(mca_ptl_base_module_t *module, void mca_ptl_ib_send_frag_send_complete(mca_ptl_ib_module_t *ib_ptl, mca_ptl_ib_send_frag_t* sendfrag)
void* addr)
{ {
mca_ptl_ib_send_frag_t *sendfrag; mca_ptl_base_header_t *hdr;
mca_ptl_base_header_t *header; mca_pml_base_send_request_t* req = sendfrag->frag_send.frag_request;
mca_pml_base_send_request_t *req; hdr = (mca_ptl_base_header_t *) sendfrag->ib_buf.buf;
ompi_free_list_t *flist;
sendfrag = (mca_ptl_ib_send_frag_t *) addr; switch(hdr->hdr_common.hdr_type) {
header = (mca_ptl_base_header_t *) sendfrag->ib_buf.buf; case MCA_PTL_HDR_TYPE_MATCH:
if (0 == (hdr->hdr_common.hdr_flags & MCA_PTL_FLAGS_ACK)
|| mca_pml_base_send_request_matched(req)) {
req = (mca_pml_base_send_request_t *) ib_ptl->super.ptl_send_progress(&ib_ptl->super,
sendfrag->frag_send.frag_request; sendfrag->frag_send.frag_request,
hdr->hdr_rndv.hdr_frag_length);
if(req->req_cached == false) {
OMPI_FREE_LIST_RETURN(&ib_ptl->send_free,
((ompi_list_item_t *) sendfrag));
}
}
break;
flist = &(sendfrag-> case MCA_PTL_HDR_TYPE_ACK:
frag_send.frag_base.frag_peer->
peer_module->send_free);
if(header->hdr_common.hdr_type == MCA_PTL_HDR_TYPE_ACK) { OMPI_FREE_LIST_RETURN(&ib_ptl->send_free,
/* Is this an ack descriptor ? */
A_PRINT("Completion of send_ack");
OMPI_FREE_LIST_RETURN(flist,
((ompi_list_item_t *) sendfrag)); ((ompi_list_item_t *) sendfrag));
break;
} else if(header->hdr_common.hdr_type == MCA_PTL_HDR_TYPE_FIN) { case MCA_PTL_HDR_TYPE_FIN:
A_PRINT("Completion of fin"); ib_ptl->super.ptl_send_progress(&ib_ptl->super,
sendfrag->frag_send.frag_request,
module->ptl_send_progress(module, hdr->hdr_frag.hdr_frag_length);
sendfrag->frag_send.frag_request, OMPI_FREE_LIST_RETURN(&ib_ptl->send_free,
header->hdr_frag.hdr_frag_length); ((ompi_list_item_t *) sendfrag));
break;
OMPI_FREE_LIST_RETURN(flist,
((ompi_list_item_t *) sendfrag));
} else if(NULL == req) {
/* An ack descriptor ? Don't know what to do! */
OMPI_FREE_LIST_RETURN(flist,
((ompi_list_item_t *) sendfrag));
} else if (0 == (header->hdr_common.hdr_flags & MCA_PTL_FLAGS_ACK)
|| mca_pml_base_send_request_matched(req)) {
module->ptl_send_progress(module,
sendfrag->frag_send.frag_request,
header->hdr_rndv.hdr_frag_length);
/* Return sendfrag to free list */
B_PRINT("Return frag : %p", sendfrag);
OMPI_FREE_LIST_RETURN(flist,
((ompi_list_item_t *) sendfrag));
} else {
/* Not going to call progress on this send,
* and not free-ing descriptor */
A_PRINT("Why should I return sendfrag?");
} }
} }
int mca_ptl_ib_put_frag_init(mca_ptl_ib_send_frag_t *sendfrag,
mca_ptl_base_peer_t *ptl_peer,
mca_pml_base_send_request_t *req,
size_t offset, size_t *size, int flags)
{
int rc;
int size_in, size_out;
mca_ptl_ib_fin_header_t *hdr;
size_in = *size;
hdr = (mca_ptl_ib_fin_header_t *)
&sendfrag->ib_buf.buf[0];
hdr->frag_hdr.hdr_common.hdr_type = MCA_PTL_HDR_TYPE_FIN;
hdr->frag_hdr.hdr_common.hdr_flags = flags;
hdr->frag_hdr.hdr_frag_offset = offset;
hdr->frag_hdr.hdr_src_ptr.lval = 0;
hdr->frag_hdr.hdr_src_ptr.pval = sendfrag;
hdr->frag_hdr.hdr_dst_ptr = req->req_peer_match;
hdr->frag_hdr.hdr_frag_length = size_in;
hdr->mr_addr.lval = req->req_peer_addr.lval;
hdr->mr_size = req->req_peer_size;
if(size_in > 0 && 0) {
struct iovec iov;
ompi_convertor_t *convertor;
unsigned int iov_count, max_data;
int freeAfter;
if( offset <= mca_ptl_ib_module.super.ptl_first_frag_size) {
convertor = &req->req_convertor;
} else {
convertor = &sendfrag->frag_send.frag_base.frag_convertor;
ompi_convertor_copy(&req->req_convertor, convertor);
ompi_convertor_init_for_send( convertor,
0,
req->req_base.req_datatype,
req->req_base.req_count,
req->req_base.req_addr,
offset,
NULL );
}
iov.iov_base = &sendfrag->ib_buf.buf[sizeof(mca_ptl_ib_fin_header_t)];
iov.iov_len = size_in;
iov_count = 1;
max_data = size_in;
rc = ompi_convertor_pack(convertor, &iov, &iov_count, &max_data, &freeAfter);
if (rc < 0) {
ompi_output (0, "[%s:%d] Unable to pack data\n",
__FILE__, __LINE__);
return rc;
}
size_out = iov.iov_len;
} else {
size_out = size_in;
}
*size = size_out;
hdr->frag_hdr.hdr_frag_length = size_out;
IB_SET_SEND_DESC_LEN((&sendfrag->ib_buf),
(sizeof(mca_ptl_ib_fin_header_t)));
/* fragment state */
sendfrag->frag_send.frag_base.frag_owner =
&ptl_peer->peer_module->super;
sendfrag->frag_send.frag_request = req;
sendfrag->frag_send.frag_base.frag_addr =
&sendfrag->ib_buf.buf[sizeof(mca_ptl_ib_fin_header_t)];
sendfrag->frag_send.frag_base.frag_size = size_out;
sendfrag->frag_send.frag_base.frag_peer = ptl_peer;
sendfrag->frag_progressed = 0;
return OMPI_SUCCESS;
}

Просмотреть файл

@ -45,27 +45,10 @@ struct mca_ptl_ib_send_frag_t {
}; };
typedef struct mca_ptl_ib_send_frag_t mca_ptl_ib_send_frag_t; typedef struct mca_ptl_ib_send_frag_t mca_ptl_ib_send_frag_t;
/** struct mca_ptl_ib_module_t;
* Initialize a fragment descriptor.
*
* frag (IN) Fragment
* peer (IN) PTL peer addressing information
* request (IN) Send request
* offset (IN) Current offset into packed buffer
* size (IN/OUT) Requested size / actual size returned
* flags (IN)
*/
int mca_ptl_ib_send_frag_init(
mca_ptl_ib_send_frag_t*,
struct mca_ptl_base_peer_t*,
struct mca_pml_base_send_request_t*,
size_t offset,
size_t* size,
int flags);
/** /**
* Initialize a fragment descriptor. * Allocate a fragment descriptor.
* *
* request (IN) PML base send request * request (IN) PML base send request
* ptl (IN) PTL module * ptl (IN) PTL module
@ -74,20 +57,12 @@ int mca_ptl_ib_send_frag_init(
*/ */
mca_ptl_ib_send_frag_t* mca_ptl_ib_alloc_send_frag( mca_ptl_ib_send_frag_t* mca_ptl_ib_alloc_send_frag(
mca_ptl_base_module_t* ptl, struct mca_ptl_ib_module_t* ib_ptl,
mca_pml_base_send_request_t* request); mca_pml_base_send_request_t* request);
int mca_ptl_ib_register_send_frags(mca_ptl_base_module_t *ptl); int mca_ptl_ib_send_frag_register(struct mca_ptl_ib_module_t *ptl);
void mca_ptl_ib_send_frag_send_complete(struct mca_ptl_ib_module_t *ptl, mca_ptl_ib_send_frag_t*);
void mca_ptl_ib_process_send_comp(mca_ptl_base_module_t *,
void*);
void mca_ptl_ib_process_rdma_w_comp(mca_ptl_base_module_t *module,
void* comp_addr);
int mca_ptl_ib_put_frag_init(mca_ptl_ib_send_frag_t *sendfrag,
struct mca_ptl_base_peer_t *ptl_peer,
struct mca_pml_base_send_request_t *req,
size_t offset, size_t *size, int flags);
#if defined(c_plusplus) || defined(__cplusplus) #if defined(c_plusplus) || defined(__cplusplus)
} }

Просмотреть файл

@ -38,11 +38,8 @@ OBJ_CLASS_DECLARATION(mca_ptl_ib_send_request_t);
*/ */
struct mca_ptl_ib_send_request_t { struct mca_ptl_ib_send_request_t {
mca_pml_base_send_request_t super; mca_pml_base_send_request_t super;
mca_ptl_ib_send_frag_t *req_frag;
mca_ptl_ib_send_frag_t *req_frag; VAPI_rkey_t req_key;
/* first fragment */
char req_buf[8];
/* temporary buffer to hold VAPI_rkey_t */
}; };
typedef struct mca_ptl_ib_send_request_t mca_ptl_ib_send_request_t; typedef struct mca_ptl_ib_send_request_t mca_ptl_ib_send_request_t;