1
1
This commit was SVN r5977.
Этот коммит содержится в:
Galen Shipman 2005-06-07 21:30:14 +00:00
родитель 7e2fc97eab
Коммит 94a53b8f11
9 изменённых файлов: 21 добавлений и 783 удалений

Просмотреть файл

@ -31,11 +31,7 @@ libmca_bmi_ib_la_SOURCES = \
bmi_ib_priv.h \
bmi_ib_proc.c \
bmi_ib_proc.h \
bmi_ib_vapi.h \
bmi_ib_sendfrag.c \
bmi_ib_sendfrag.h \
bmi_ib_recvfrag.c \
bmi_ib_recvfrag.h
bmi_ib_vapi.h
# Make the output library in this directory, and name it either
# mca_<type>_<name>.la (for DSO builds) or libmca_<type>_<name>.la

Просмотреть файл

@ -180,7 +180,8 @@ mca_bmi_base_descriptor_t* mca_bmi_ib_prepare_src(
size_t* size
)
{
return OMPI_SUCCESS;
}
/**

Просмотреть файл

@ -31,8 +31,6 @@
#include "mca/mpool/base/base.h"
#include "bmi_ib.h"
#include "bmi_ib_frag.h"
#include "bmi_ib_sendfrag.h"
#include "bmi_ib_recvfrag.h"
#include "bmi_ib_endpoint.h"
mca_bmi_ib_component_t mca_bmi_ib_component = {
@ -343,73 +341,50 @@ int mca_bmi_ib_component_progress()
ompi_output(0, "Got error : %s, Vendor code : %d Frag : %p",
VAPI_wc_status_sym(comp.status),
comp.vendor_err_syndrome, comp.id);
comp_type = IB_COMP_ERROR;
comp_addr = NULL;
} else {
if(VAPI_CQE_SQ_SEND_DATA == comp.opcode) {
comp_type = IB_COMP_SEND;
comp_addr = (void*) (unsigned long) comp.id;
} else if(VAPI_CQE_RQ_SEND_DATA == comp.opcode) {
comp_type = IB_COMP_RECV;
comp_addr = (void*) (unsigned long) comp.id;
} else if(VAPI_CQE_SQ_RDMA_WRITE == comp.opcode) {
comp_type = IB_COMP_RDMA_W;
comp_addr = (void*) (unsigned long) comp.id;
} else {
ompi_output(0, "VAPI_poll_cq: returned unknown opcode : %d\n",
comp.opcode);
comp_type = IB_COMP_ERROR;
comp_addr = NULL;
}
}
} else {
/* No completions from the network */
comp_type = IB_COMP_NOTHING;
comp_addr = NULL;
}
/* Handle n/w completions */
switch(comp_type) {
case IB_COMP_SEND :
switch(comp.opcode) {
case VAPI_CQE_SQ_SEND_DATA :
/* Process a completed send */
frag = (mca_bmi_ib_frag_t*) comp.id;
frag->base.des_cbfunc(&ib_bmi->super,(mca_bmi_base_endpoint_t*) &frag->endpoint, &frag->base, frag->rc);
mca_bmi_ib_sendfrag_complete(ib_bmi, (mca_bmi_ib_frag_t*)comp_addr);
count++;
break;
case IB_COMP_RECV :
case VAPI_CQE_RQ_SEND_DATA:
D_PRINT(0, "%s:%d ib recv under redesign\n", __FILE__, __LINE__);
frag = (mca_bmi_ib_frag_t*) comp_addr;
frag = (mca_bmi_ib_frag_t*) comp.id;
frag->segment.seg_len = comp.byte_len-sizeof(mca_bmi_ib_header_t);
/* advance the segment address past the header and subtract from the length..*/
ib_bmi->ib_reg[frag->hdr->tag].cbfunc(&ib_bmi->super, frag->hdr->tag, &frag->base, ib_bmi->ib_reg[frag->hdr->tag].cbdata);
OMPI_FREE_LIST_RETURN(&ib_bmi->recv_free, (ompi_free_list_item_t*)comp_addr);
OMPI_FREE_LIST_RETURN(&ib_bmi->recv_free, (ompi_free_list_item_t*)comp.id);
if(OMPI_THREAD_ADD32(&ib_bmi->rr_posted, -1) <= mca_bmi_ib_component.ib_rr_buf_min)
mca_bmi_ib_endpoint_post_rr(mca_bmi_ib_component.ib_rr_buf_max - ib_bmi->rr_posted,
((mca_bmi_ib_recv_frag_t*)comp_addr)->endpoint);
((mca_bmi_ib_recv_frag_t*)comp.id)->endpoint);
count++;
break;
case IB_COMP_RDMA_W :
case VAPI_CQE_SQ_RDMA_WRITE:
ompi_output(0, "%s:%d RDMA not implemented\n", __FILE__,__LINE__);
count++;
break;
case IB_COMP_NOTHING:
break;
default:
ompi_output(0, "Errorneous network completion");
break;
}
}
}
return count;
}

Просмотреть файл

@ -1,534 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004 The Ohio State University.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <sys/time.h>
#include <time.h>
#include "include/types.h"
#include "mca/pml/base/pml_base_sendreq.h"
#include "mca/ns/base/base.h"
#include "mca/oob/base/base.h"
#include "mca/rml/rml.h"
#include "mca/errmgr/errmgr.h"
#include "dps/dps.h"
#include "bmi_ib.h"
#include "bmi_ib_addr.h"
#include "bmi_ib_peer.h"
#include "bmi_ib_proc.h"
#include "bmi_ib_priv.h"
#include "bmi_ib_sendfrag.h"
static void mca_bmi_ib_peer_construct(mca_bmi_base_endpoint_t* peer);
static void mca_bmi_ib_peer_destruct(mca_bmi_base_endpoint_t* peer);
OBJ_CLASS_INSTANCE(mca_bmi_ib_endpoint_t,
ompi_list_item_t, mca_bmi_ib_peer_construct,
mca_bmi_ib_peer_destruct);
/*
* Initialize state of the peer instance.
*
*/
static void mca_bmi_ib_peer_construct(mca_bmi_base_endpoint_t* peer)
{
peer->peer_bmi = 0;
peer->peer_proc = 0;
peer->peer_tstamp = 0.0;
peer->peer_state = MCA_PTL_IB_CLOSED;
peer->peer_retries = 0;
OBJ_CONSTRUCT(&peer->peer_send_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&peer->peer_recv_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&peer->pending_send_frags, ompi_list_t);
}
/*
* Destroy a peer
*
*/
static void mca_bmi_ib_peer_destruct(mca_bmi_base_endpoint_t* peer)
{
}
/*
* Send connection information to remote peer using OOB
*
*/
static void mca_bmi_ib_peer_send_cb(
int status,
orte_process_name_t* peer,
orte_buffer_t* buffer,
orte_rml_tag_t tag,
void* cbdata)
{
OBJ_RELEASE(buffer);
}
static int mca_bmi_ib_peer_send_connect_req(mca_bmi_base_endpoint_t* peer)
{
orte_buffer_t* buffer = OBJ_NEW(orte_buffer_t);
int rc;
if(NULL == buffer) {
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
return ORTE_ERR_OUT_OF_RESOURCE;
}
/* pack the info in the send buffer */
rc = orte_dps.pack(buffer, &peer->lcl_qp_prop.qp_num, 1, ORTE_UINT32);
if(rc != ORTE_SUCCESS) {
ORTE_ERROR_LOG(rc);
return rc;
}
rc = orte_dps.pack(buffer, &peer->peer_bmi->port.lid, 1, ORTE_UINT32);
/* send to peer */
rc = orte_rml.send_buffer_nb(&peer->peer_proc->proc_guid, buffer, ORTE_RML_TAG_DYNAMIC-1, 0,
mca_bmi_ib_peer_send_cb, NULL);
if(rc < 0) {
ORTE_ERROR_LOG(rc);
return rc;
}
return OMPI_SUCCESS;
}
/*
* Send connect ACK to remote peer
*
*/
static int mca_bmi_ib_peer_send_connect_ack(mca_bmi_base_endpoint_t* peer)
{
orte_buffer_t* buffer = OBJ_NEW(orte_buffer_t);
int rc;
uint32_t zero = 0;
/* pack the info in the send buffer */
if(ORTE_SUCCESS != (rc = orte_dps.pack(buffer, &zero, 1, ORTE_UINT32))) {
ORTE_ERROR_LOG(rc);
return rc;
}
if(ORTE_SUCCESS != (rc = orte_dps.pack(buffer, &zero, 1, ORTE_UINT32))) {
ORTE_ERROR_LOG(rc);
return rc;
}
/* send to peer */
rc = orte_rml.send_buffer_nb(&peer->peer_proc->proc_guid, buffer, ORTE_RML_TAG_DYNAMIC-1, 0,
mca_bmi_ib_peer_send_cb, NULL);
if(rc < 0) {
ORTE_ERROR_LOG(rc);
return rc;
}
}
/*
* Set remote connection info
*
* XXX: Currently size is unutilized, this shall change
* as soon as we add more info to be exchanged at connection
* setup.
*
*/
static int mca_bmi_ib_peer_set_remote_info(mca_bmi_base_endpoint_t* peer, orte_buffer_t* buffer)
{
int rc;
size_t cnt = 1;
rc = orte_dps.unpack(buffer, &peer->rem_qp_num, &cnt, ORTE_UINT32);
if(ORTE_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
rc = orte_dps.unpack(buffer, &peer->rem_lid, &cnt, ORTE_UINT32);
if(ORTE_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
D_PRINT("Received QP num = %d, LID = %d",
peer->rem_qp_num,
peer->rem_lid);
return ORTE_SUCCESS;
}
static int mca_bmi_ib_peer_init(
mca_bmi_ib_endpoint_t *peer)
{
return OMPI_SUCCESS;
}
/*
* Start to connect to the peer. We send our Queue Pair
* information over the TCP OOB communication mechanism.
* On completion of our send, a send completion handler
* is called.
*
*/
static int mca_bmi_ib_peer_start_connect(mca_bmi_base_endpoint_t* peer)
{
mca_bmi_ib_module_t* ib_bmi = peer->peer_bmi;
int rc;
/* Create the Queue Pair */
if(OMPI_SUCCESS != (rc = mca_bmi_ib_create_qp(ib_bmi->nic,
ib_bmi->ptag,
ib_bmi->cq_hndl,
ib_bmi->cq_hndl,
&peer->lcl_qp_hndl,
&peer->lcl_qp_prop,
VAPI_TS_RC))) {
ompi_output(0, "[%lu,%lu,%lu] %s:%d errcode %d\n",
ORTE_NAME_ARGS(orte_process_info.my_name), __FILE__,__LINE__,rc);
return rc;
}
/* Send connection info over to remote peer */
peer->peer_state = MCA_BMI_IB_CONNECTING;
if(OMPI_SUCCESS != (rc = mca_bmi_ib_peer_send_connect_req(peer))) {
ompi_output(0, "[%lu,%lu,%lu] %s:%d errcode %d\n",
ORTE_NAME_ARGS(orte_process_info.my_name), __FILE__,__LINE__,rc);
return rc;
}
return OMPI_SUCCESS;
}
/*
* Reply to a `start - connect' message
*
*/
static int mca_bmi_ib_peer_reply_start_connect(mca_bmi_ib_endpoint_t *peer, orte_buffer_t* buffer)
{
mca_bmi_ib_module_t* ib_bmi = peer->peer_bmi;
int rc;
/* Create the Queue Pair */
if(OMPI_SUCCESS != (rc = mca_bmi_ib_create_qp(ib_bmi->nic,
ib_bmi->ptag,
ib_bmi->cq_hndl,
ib_bmi->cq_hndl,
&peer->lcl_qp_hndl,
&peer->lcl_qp_prop,
VAPI_TS_RC))) {
ompi_output(0, "[%lu,%lu,%lu] %s:%d errcode %d\n",
ORTE_NAME_ARGS(orte_process_info.my_name), __FILE__,__LINE__,rc);
return rc;
}
/* Set the remote side info */
mca_bmi_ib_peer_set_remote_info(peer, buffer);
/* Connect to peer */
rc = mca_bmi_ib_peer_connect(peer);
if(rc != OMPI_SUCCESS) {
ompi_output(0, "[%lu,%lu,%lu] %s:%d errcode %d\n",
ORTE_NAME_ARGS(orte_process_info.my_name), __FILE__,__LINE__,rc);
return rc;
}
/* Send connection info over to remote peer */
if(OMPI_SUCCESS != (rc = mca_bmi_ib_peer_send_connect_req(peer))) {
ompi_output(0, "[%lu,%lu,%lu] %s:%d errcode %d\n",
ORTE_NAME_ARGS(orte_process_info.my_name), __FILE__,__LINE__,rc);
return rc;
}
return OMPI_SUCCESS;
}
/*
*
*/
static void mca_bmi_ib_peer_connected(mca_bmi_ib_endpoint_t *peer)
{
peer->peer_state = MCA_BMI_IB_CONNECTED;
mca_bmi_ib_progress_send_frags(peer);
}
/*
* Non blocking OOB recv callback.
* Read incoming QP and other info, and if this peer
* is trying to connect, reply with our QP info,
* otherwise try to modify QP's and establish
* reliable connection
*
*/
static void mca_bmi_ib_peer_recv(
int status,
orte_process_name_t* peer,
orte_buffer_t* buffer,
orte_rml_tag_t tag,
void* cbdata)
{
mca_bmi_ib_proc_t *ib_proc;
mca_bmi_ib_endpoint_t *ib_peer;
int peer_state;
int rc;
for(ib_proc = (mca_bmi_ib_proc_t*)
ompi_list_get_first(&mca_bmi_ib_component.ib_procs);
ib_proc != (mca_bmi_ib_proc_t*)
ompi_list_get_end(&mca_bmi_ib_component.ib_procs);
ib_proc = (mca_bmi_ib_proc_t*)ompi_list_get_next(ib_proc)) {
if(ib_proc->proc_guid.vpid == peer->vpid) {
/* Try to get the peer instance of this proc */
/* Limitation: Right now, we have only 1 peer
* for every process. Need several changes, some
* in PML/BMI interface to set this right */
ib_peer = ib_proc->proc_peers[0];
peer_state = ib_peer->peer_state;
/* Update status */
switch(peer_state) {
case MCA_BMI_IB_CLOSED :
/* We had this connection closed before.
* The peer is trying to connect. Move the
* status of this connection to CONNECTING,
* and then reply with our QP information */
if(OMPI_SUCCESS != (rc = mca_bmi_ib_peer_reply_start_connect(ib_peer, buffer))) {
ompi_output(0, "[%lu,%lu,%lu] %s:%d errcode %d\n",
ORTE_NAME_ARGS(orte_process_info.my_name), __FILE__,__LINE__,rc);
break;
}
/* Setup state as connected */
ib_peer->peer_state = MCA_BMI_IB_CONNECT_ACK;
break;
case MCA_BMI_IB_CONNECTING :
mca_bmi_ib_peer_set_remote_info(ib_peer, buffer);
if(OMPI_SUCCESS != (rc = mca_bmi_ib_peer_connect(ib_peer))) {
ompi_output(0, "[%lu,%lu,%lu] %s:%d errcode %d\n",
ORTE_NAME_ARGS(orte_process_info.my_name), __FILE__,__LINE__,rc);
break;
}
/* Setup state as connected */
mca_bmi_ib_peer_connected(ib_peer);
/* Send him an ack */
mca_bmi_ib_peer_send_connect_ack(ib_peer);
break;
case MCA_BMI_IB_CONNECT_ACK:
mca_bmi_ib_peer_connected(ib_peer);
break;
case MCA_BMI_IB_CONNECTED :
break;
default :
ompi_output(0, "Connected -> Connecting not possible.\n");
}
break;
}
}
/* Okay, now that we are done receiving,
* re-post the buffer */
mca_bmi_ib_post_recv();
}
void mca_bmi_ib_post_recv()
{
D_PRINT("");
orte_rml.recv_buffer_nb(
ORTE_RML_NAME_ANY,
ORTE_RML_TAG_DYNAMIC-1,
0,
mca_bmi_ib_peer_recv,
NULL);
}
/*
* Attempt to send a fragment using a given peer. If the peer is not
* connected, queue the fragment and start the connection as required.
*/
int mca_bmi_ib_peer_send(mca_bmi_base_endpoint_t* peer,
mca_bmi_ib_send_frag_t* frag)
{
int rc;
OMPI_THREAD_LOCK(&peer->peer_send_lock);
switch(peer->peer_state) {
case MCA_BMI_IB_CONNECTING:
D_PRINT("Queing because state is connecting");
ompi_list_append(&peer->pending_send_frags,
(ompi_list_item_t *)frag);
rc = OMPI_SUCCESS;
break;
case MCA_BMI_IB_CONNECT_ACK:
D_PRINT("Queuing because waiting for ack");
ompi_list_append(&peer->pending_send_frags,
(ompi_list_item_t *)frag);
rc = OMPI_SUCCESS;
break;
case MCA_BMI_IB_CLOSED:
D_PRINT("Connection to peer closed ... connecting ...");
ompi_list_append(&peer->pending_send_frags,
(ompi_list_item_t *)frag);
rc = mca_bmi_ib_peer_start_connect(peer);
break;
case MCA_BMI_IB_FAILED:
rc = OMPI_ERR_UNREACH;
break;
case MCA_BMI_IB_CONNECTED:
{
mca_bmi_ib_module_t* ib_bmi = peer->peer_bmi;
ompi_list_item_t* item;
A_PRINT("Send to : %d, len : %d, frag : %p",
peer->peer_proc->proc_guid.vpid,
frag->ib_buf.desc.sg_entry.len,
frag);
rc = mca_bmi_ib_post_send(peer->peer_bmi, peer,
&frag->ib_buf, (void*) frag);
while(NULL != (item = ompi_list_remove_first(&ib_bmi->repost))) {
mca_bmi_ib_buffer_repost(ib_bmi->nic, item);
}
break;
}
default:
rc = OMPI_ERR_UNREACH;
}
OMPI_THREAD_UNLOCK(&peer->peer_send_lock);
return rc;
}
void mca_bmi_ib_progress_send_frags(mca_bmi_ib_endpoint_t* peer)
{
ompi_list_item_t *frag_item;
mca_bmi_ib_send_frag_t *sendfrag;
/*Check if peer is connected */
if(peer->peer_state != MCA_BMI_IB_CONNECTED) {
return;
}
/* While there are frags in the list,
* process them */
while(!ompi_list_is_empty(&(peer->pending_send_frags))) {
frag_item = ompi_list_remove_first(&(peer->pending_send_frags));
sendfrag = (mca_bmi_ib_send_frag_t *) frag_item;
/* We need to post this one */
if(mca_bmi_ib_post_send(peer->peer_bmi, peer, &sendfrag->ib_buf,
(void*) sendfrag)
!= OMPI_SUCCESS) {
ompi_output(0, "Error in posting send");
}
}
}
/*
* Complete connection to peer.
*/
int mca_bmi_ib_peer_connect(
mca_bmi_ib_endpoint_t *peer)
{
int rc, i;
VAPI_ret_t ret;
ib_buffer_t *ib_buf_ptr;
mca_bmi_ib_module_t *ib_bmi = peer->peer_bmi;
/* Establish Reliable Connection */
rc = mca_bmi_ib_qp_init(ib_bmi->nic,
peer->lcl_qp_hndl,
peer->rem_qp_num,
peer->rem_lid);
if(rc != OMPI_SUCCESS) {
return rc;
}
/* Allocate resources to this connection */
peer->lcl_recv = (ib_buffer_t*)
malloc(sizeof(ib_buffer_t) * NUM_IB_RECV_BUF);
if(NULL == peer->lcl_recv) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* Register the buffers */
for(i = 0; i < NUM_IB_RECV_BUF; i++) {
rc = mca_bmi_ib_register_mem(ib_bmi->nic, ib_bmi->ptag,
(void*) peer->lcl_recv[i].buf,
MCA_BMI_IB_FIRST_FRAG_SIZE,
&peer->lcl_recv[i].hndl);
if(rc != OMPI_SUCCESS) {
return OMPI_ERROR;
}
ib_buf_ptr = &peer->lcl_recv[i];
ib_buf_ptr->qp_hndl = peer->lcl_qp_hndl;
IB_PREPARE_RECV_DESC(ib_buf_ptr);
}
/* Post receives */
for(i = 0; i < NUM_IB_RECV_BUF; i++) {
ret = VAPI_post_rr(ib_bmi->nic,
peer->lcl_qp_hndl,
&peer->lcl_recv[i].desc.rr);
if(VAPI_OK != ret) {
MCA_BMI_IB_VAPI_RET(ret, "VAPI_post_rr");
}
}
return OMPI_SUCCESS;
}

Просмотреть файл

@ -1,84 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004 The Ohio State University.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef MCA_BMI_IB_PEER_H
#define MCA_BMI_IB_PEER_H
#include "class/ompi_list.h"
#include "event/event.h"
#include "mca/pml/pml.h"
#include "mca/bmi/bmi.h"
#include "bmi_ib_recvfrag.h"
#include "bmi_ib_sendfrag.h"
#include "bmi_ib_priv.h"
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
OBJ_CLASS_DECLARATION(mca_bmi_ib_endpoint_t);
/**
* State of IB peer connection.
*/
typedef enum {
/* Defines the state in which this BMI instance
* has started the process of connection */
MCA_BMI_IB_CONNECTING,
/* Waiting for ack from peer */
MCA_BMI_IB_CONNECT_ACK,
/* Connected ... both sender & receiver have
* buffers associated with this connection */
MCA_BMI_IB_CONNECTED,
/* Connection is closed, there are no resources
* associated with this */
MCA_BMI_IB_CLOSED,
/* Maximum number of retries have been used.
* Report failure on send to upper layer */
MCA_BMI_IB_FAILED
} mca_bmi_ib_peer_state_t;
int mca_bmi_ib_peer_send(mca_bmi_base_endpoint_t*, mca_bmi_ib_send_frag_t*);
int mca_bmi_ib_peer_connect(mca_bmi_base_endpoint_t*);
void mca_bmi_ib_post_recv(void);
void mca_bmi_ib_progress_send_frags(mca_bmi_ib_endpoint_t*);
#define DUMP_PEER(peer_ptr) { \
ompi_output(0, "[%s:%d] ", __FILE__, __LINE__); \
ompi_output(0, "Dumping peer %d state", \
peer->peer_proc->proc_guid.vpid); \
ompi_output(0, "Local QP hndl : %d", \
peer_ptr->peer_conn->lres->qp_hndl); \
ompi_output(0, "Local QP num : %d", \
peer_ptr->peer_conn->lres->qp_prop.qp_num); \
ompi_output(0, "Remote QP num : %d", \
peer_ptr->peer_conn->rres->qp_num); \
ompi_output(0, "Remote LID : %d", \
peer_ptr->peer_conn->rres->lid); \
}
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#endif

Просмотреть файл

@ -1,31 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004 The Ohio State University.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "bmi_ib.h"
#include "bmi_ib_frag.h"
#include "bmi_ib_recvfrag.h"
void mca_bmi_ib_process_recv(mca_bmi_ib_module_t *ib_bmi, mca_bmi_ib_recv_frag_t* frag)
{
ompi_output(0, "%s:%d ib mca_bmi_ib_process_recv got back tag %d", __FILE__, __LINE__, frag->hdr->tag);
}

Просмотреть файл

@ -1,26 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004 The Ohio State University.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef MCA_BMI_IB_RECV_FRAG_H
#define MCA_BMI_IB_RECV_FRAG_H
#include "mca/bmi/bmi.h"
void mca_bmi_ib_process_recv(struct mca_bmi_ib_module_t*, struct mca_bmi_ib_recv_frag_t*);
#endif

Просмотреть файл

@ -1,30 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004 The Ohio State University.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "include/types.h"
#include "bmi_ib_sendfrag.h"
#include "bmi_ib_frag.h"
#include "mca/bmi/bmi.h"
#include "bmi_ib_endpoint.h"
void mca_bmi_ib_sendfrag_complete( mca_bmi_ib_module_t * ib_bmi, mca_bmi_ib_send_frag_t* frag)
{
frag->base.des_cbfunc(&ib_bmi->super,(mca_bmi_base_endpoint_t*) &frag->endpoint, &frag->base, frag->rc);
}

Просмотреть файл

@ -1,29 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004 The Ohio State University.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef MCA_BMI_IB_SEND_FRAG_H
#define MCA_BMI_IB_SEND_FRAG_H
#include "ompi_config.h"
#include "bmi_ib_frag.h"
#include "bmi_ib.h"
void mca_bmi_ib_sendfrag_complete( mca_bmi_ib_module_t* ib_bmi, mca_bmi_ib_send_frag_t* frag);
#endif