1
1

Organized some IB initialization code.

This commit was SVN r1845.
Этот коммит содержится в:
Sayantan Sur 2004-08-03 02:08:17 +00:00
родитель 71b9c0d362
Коммит d8e33483f6
8 изменённых файлов: 264 добавлений и 148 удалений

Просмотреть файл

@ -59,14 +59,17 @@ int mca_ptl_ib_add_procs(
struct mca_ptl_base_peer_t** peers,
ompi_bitmap_t* reachable)
{
int i, rc;
fprintf(stderr,"[%s:%d] %s\n",
__FILE__, __LINE__, __func__);
int i;
struct ompi_proc_t* ompi_proc;
mca_ptl_ib_proc_t* ptl_proc;
mca_ptl_base_peer_t* ptl_peer;
D_PRINT("Adding %d procs\n", nprocs);
for(i = 0; i < nprocs; i++) {
struct ompi_proc_t* ompi_proc = ompi_procs[i];
mca_ptl_ib_proc_t* ptl_proc = mca_ptl_ib_proc_create(ompi_proc);
mca_ptl_base_peer_t* ptl_peer;
ompi_proc = ompi_procs[i];
ptl_proc = mca_ptl_ib_proc_create(ompi_proc);
if(NULL == ptl_proc) {
return OMPI_ERR_OUT_OF_RESOURCE;

Просмотреть файл

@ -65,8 +65,10 @@ struct mca_ptl_ib_t {
VAPI_pd_hndl_t ptag; /* Protection Domain tag */
VAPI_cq_hndl_t cq_hndl; /* Completion Queue handle */
VAPI_qp_hndl_t *qp_hndl; /* Array of Queue Pair handles */
VAPI_qp_hndl_t ud_scq_hndl;/* UD send completion queue handle */
VAPI_qp_hndl_t ud_rcq_hndl;/* US recv completion queue handle */
VAPI_qp_hndl_t ud_rcq_hndl;/* UD recv completion queue handle */
VAPI_qp_hndl_t ud_qp_hndl; /* UD queue pair handle */
VAPI_qp_prop_t ud_qp_prop; /* UD queue pair properties */
VAPI_rr_desc_t* ud_rr_hndl; /* UD receive descriptor pool */
@ -155,7 +157,6 @@ extern int mca_ptl_ib_add_procs(
ompi_bitmap_t* reachable
);
/**
* PML->PTL notification of change in the process list.
*
@ -242,7 +243,6 @@ extern void mca_ptl_ib_recv_frag_return(
);
/**
* Return a send fragment to the modules free list.
*

Просмотреть файл

@ -91,9 +91,7 @@ static inline int mca_ptl_ib_param_register_int(
int mca_ptl_ib_module_open(void)
{
fprintf(stderr,"[%s:%d] %s\n",
__FILE__, __LINE__, __func__);
fflush(stderr);
D_PRINT("Opening InfiniBand module ...\n");
/* register super module parameters */
mca_ptl_ib.super.ptl_exclusivity =
mca_ptl_ib_param_register_int ("exclusivity", 0);
@ -152,6 +150,9 @@ static int mca_ptl_ib_module_send(void)
ud_qp_addr[i].lid = ptl->port.lid;
}
D_PRINT("ud_qp_addr[0].ud_qp = %d\n", ud_qp_addr[0].ud_qp);
D_PRINT("ud_qp_addr[0].lid = %d\n", ud_qp_addr[0].lid);
rc = mca_base_modex_send(&mca_ptl_ib_module.super.ptlm_version,
ud_qp_addr, size);
@ -172,22 +173,16 @@ mca_ptl_t** mca_ptl_ib_module_init(int *num_ptls,
bool *have_hidden_threads)
{
mca_ptl_t **ptls;
int i, rc;
int i, ret;
uint32_t num_hcas;
VAPI_ret_t ret;
VAPI_hca_id_t* hca_id = NULL;
mca_ptl_ib_t* ptl_ib = NULL;
*num_ptls = 0;
VAPI_cqe_num_t act_num_cqe;
act_num_cqe = 0;
*allow_multi_user_threads = true;
*have_hidden_threads = OMPI_HAVE_THREADS;
fprintf(stderr,"[%s:%d] %s\n",
__FILE__, __LINE__, __func__);
D_PRINT("IB Module Init\n");
/* need to set ompi_using_threads() as ompi_event_init()
* will spawn a thread if supported */
@ -195,44 +190,17 @@ mca_ptl_t** mca_ptl_ib_module_init(int *num_ptls,
ompi_set_using_threads(true);
}
if((rc = ompi_event_init()) != OMPI_SUCCESS) {
if((ret = ompi_event_init()) != OMPI_SUCCESS) {
ompi_output(0, "mca_ptl_ib_module_init: "
"unable to initialize event dispatch thread: %d\n", rc);
"unable to initialize event dispatch thread: %d\n", ret);
return NULL;
}
#if 0
/* initialize free lists */
ompi_free_list_init(&mca_ptl_ib_module.ib_send_requests,
sizeof(mca_ptl_ib_send_request_t),
OBJ_CLASS(mca_ptl_ib_send_request_t),
mca_ptl_ib_module.ib_free_list_num,
mca_ptl_ib_module.ib_free_list_max,
mca_ptl_ib_module.ib_free_list_inc,
NULL); /* use default allocator */
ret = mca_ptl_ib_get_num_hcas(&num_hcas);
ompi_free_list_init(&mca_ptl_ib_module.ib_recv_frags,
sizeof(mca_ptl_ib_recv_frag_t),
OBJ_CLASS(mca_ptl_ib_recv_frag_t),
mca_ptl_ib_module.ib_free_list_num,
mca_ptl_ib_module.ib_free_list_max,
mca_ptl_ib_module.ib_free_list_inc,
NULL); /* use default allocator */
#endif
D_PRINT("Number of HCAs found: %d\n", num_hcas);
/* List all HCAs */
ret = EVAPI_list_hcas(0, &num_hcas, NULL);
/* Don't check for return status, it will be
* VAPI_EAGAIN, we are just trying to get the
* number of HCAs */
if (0 == num_hcas) {
return NULL;
}
hca_id = (VAPI_hca_id_t*) malloc(sizeof(VAPI_hca_id_t) * num_hcas);
if(NULL == hca_id) {
if ((0 == num_hcas) || (OMPI_SUCCESS != ret)) {
return NULL;
}
@ -241,24 +209,33 @@ mca_ptl_t** mca_ptl_ib_module_init(int *num_ptls,
/*mca_ptl_ib_module.ib_num_hcas = num_hcas;*/
mca_ptl_ib_module.ib_num_hcas = 1;
mca_ptl_ib_module.ib_num_ptls = 1;
mca_ptl_ib_module.ib_max_ptls = 1;
/* Now get the hca_id from underlying VAPI layer */
ret = EVAPI_list_hcas(mca_ptl_ib_module.ib_num_hcas,
&num_hcas, hca_id);
/* Number of InfiniBand PTLs is equal to
* number of physical HCAs. Is this always the
* case, or under some conditions, there can be
* multiple PTLs for one HCA? */
mca_ptl_ib_module.ib_num_ptls =
mca_ptl_ib_module.ib_num_hcas;
/* HACK : Don't check return status now,
* just opening one ptl ... */
/*MCA_PTL_IB_VAPI_RET(NULL, ret, "EVAPI_list_hcas"); */
/* Not sure what max_ptls does */
mca_ptl_ib_module.ib_max_ptls =
mca_ptl_ib_module.ib_num_hcas;
D_PRINT("num_hcas: %d, num_ptls: %d, max_ptls: %d\n",
mca_ptl_ib_module.ib_num_hcas,
mca_ptl_ib_module.ib_num_ptls,
mca_ptl_ib_module.ib_max_ptls);
/* Number of PTLs are equal to number of HCAs */
ptl_ib = (mca_ptl_ib_t*) malloc(sizeof(mca_ptl_ib_t) *
mca_ptl_ib_module.ib_num_ptls);
if(NULL == ptl_ib) {
return NULL;
}
/* Zero out the PTL struct memory region */
memset((void*)ptl_ib, 0, sizeof(mca_ptl_ib_t) *
mca_ptl_ib_module.ib_num_ptls);
/* Copy the function pointers to the IB ptls */
for(i = 0; i< mca_ptl_ib_module.ib_num_ptls; i++) {
memcpy((void*)&ptl_ib[i],
@ -266,6 +243,60 @@ mca_ptl_t** mca_ptl_ib_module_init(int *num_ptls,
sizeof(mca_ptl_ib));
}
D_PRINT("About to initialize IB ptls ...\n");
/* For each ptl, do this */
for(i = 0; i < mca_ptl_ib_module.ib_num_ptls; i++) {
if(mca_ptl_ib_get_hca_id(i, &ptl_ib[i].hca_id)
!= OMPI_SUCCESS) {
return NULL;
}
D_PRINT("hca_id: %s\n", ptl_ib[i].hca_id);
if(mca_ptl_ib_get_hca_hndl(ptl_ib[i].hca_id, &ptl_ib[i].nic)
!= OMPI_SUCCESS) {
return NULL;
}
D_PRINT("hca_hndl: %d\n", ptl_ib[i].nic);
/* Each HCA uses only port 1. Need to change
* this so that each ptl can choose different
* ports */
if(mca_ptl_ib_query_hca_prop(ptl_ib[i].nic, &ptl_ib[i].port)
!= OMPI_SUCCESS) {
return NULL;
}
D_PRINT("LID: %d\n", ptl_ib[i].port.lid);
if(mca_ptl_ib_alloc_pd(ptl_ib[i].nic, &ptl_ib[i].ptag)
!= OMPI_SUCCESS) {
return NULL;
}
D_PRINT("Protection Domain: %d\n", ptl_ib[i].ptag);
if(mca_ptl_ib_create_cq(ptl_ib[i].nic, &ptl_ib[i].cq_hndl)
!= OMPI_SUCCESS) {
return NULL;
}
D_PRINT("CQ handle: %d\n", ptl_ib[i].cq_hndl);
if(mca_ptl_ib_ud_cq_init(&ptl_ib[i]) != OMPI_SUCCESS) {
return NULL;
}
if(mca_ptl_ib_ud_qp_init(&ptl_ib[i]) != OMPI_SUCCESS) {
return NULL;
}
}
/* Allocate list of IB ptl pointers */
mca_ptl_ib_module.ib_ptls = (struct mca_ptl_ib_t**)
malloc(mca_ptl_ib_module.ib_num_ptls *
@ -279,54 +310,6 @@ mca_ptl_t** mca_ptl_ib_module_init(int *num_ptls,
mca_ptl_ib_module.ib_ptls[i] = &ptl_ib[i];
}
/* Open the HCAs asscociated with ptls */
for(i = 0; i < mca_ptl_ib_module.ib_num_ptls; i++) {
strncpy(ptl_ib[i].hca_id, hca_id[i],
sizeof(VAPI_hca_id_t));
/* Open the HCA */
ret = EVAPI_get_hca_hndl(ptl_ib[i].hca_id,
&ptl_ib[i].nic);
MCA_PTL_IB_VAPI_RET(NULL, ret, "EVAPI_get_hca_hndl");
/* Querying for port properties */
ret = VAPI_query_hca_port_prop(ptl_ib[i].nic,
(IB_port_t)DEFAULT_PORT,
(VAPI_hca_port_t *)&(ptl_ib[i].port));
MCA_PTL_IB_VAPI_RET(NULL, ret, "VAPI_query_hca_port_prop");
}
/* Create the Completion Queue & Protection handles
* before creating the UD Queue Pair */
for(i = 0; i < mca_ptl_ib_module.ib_num_ptls; i++) {
ret = VAPI_alloc_pd(ptl_ib[i].nic, &ptl_ib[i].ptag);
MCA_PTL_IB_VAPI_RET(NULL, ret, "VAPI_alloc_pd");
ret = VAPI_create_cq(ptl_ib[i].nic, DEFAULT_CQ_SIZE,
&ptl_ib[i].cq_hndl, &act_num_cqe);
MCA_PTL_IB_VAPI_RET(NULL, ret, "VAPI_create_cq");
/* If we didn't get any CQ entries, then return
* failure */
if(act_num_cqe == 0) {
return NULL;
}
ret = mca_ptl_ib_ud_cq_init(&ptl_ib[i]);
if(ret != VAPI_OK) {
return NULL;
}
ret = mca_ptl_ib_ud_qp_init(&ptl_ib[i]);
if(ret != VAPI_OK) {
return NULL;
}
}
if(mca_ptl_ib_module_send() != OMPI_SUCCESS) {
return NULL;
}
@ -344,10 +327,6 @@ mca_ptl_t** mca_ptl_ib_module_init(int *num_ptls,
*num_ptls = mca_ptl_ib_module.ib_num_ptls;
fprintf(stderr,"ptls = %p, num_ptls = %d\n",
ptls, *num_ptls);
free(hca_id);
return ptls;
}

Просмотреть файл

@ -2,19 +2,19 @@
#include "ptl_ib.h"
#include "ptl_ib_priv.h"
VAPI_ret_t mca_ptl_ib_ud_cq_init(mca_ptl_ib_t* ptl_ib)
int mca_ptl_ib_ud_cq_init(mca_ptl_ib_t* ptl_ib)
{
VAPI_ret_t ret;
VAPI_cqe_num_t act_num_cqe = 0;
ret = VAPI_create_cq(ptl_ib->nic, DEFAULT_CQ_SIZE,
&(ptl_ib->ud_scq_hndl), &act_num_cqe);
MCA_PTL_IB_VAPI_RET(ret, ret, "VAPI_create_cq");
if(act_num_cqe == 0) {
/* Couldn't give any CQ entries, not
* enough resources */
return VAPI_EAGAIN;
D_PRINT("UD Send CQ handle :%d\n", ptl_ib->ud_scq_hndl);
if((VAPI_OK != ret) || (0 == act_num_cqe)) {
MCA_PTL_IB_VAPI_RET(ret, "VAPI_create_cq");
return OMPI_ERROR;
}
/* Send completion queue was allocated successfully,
@ -24,20 +24,20 @@ VAPI_ret_t mca_ptl_ib_ud_cq_init(mca_ptl_ib_t* ptl_ib)
ret = VAPI_create_cq(ptl_ib->nic, DEFAULT_CQ_SIZE,
&(ptl_ib->ud_rcq_hndl), &act_num_cqe);
MCA_PTL_IB_VAPI_RET(ret, ret, "VAPI_create_cq");
if(act_num_cqe == 0) {
/* Couldn't give any CQ entries, not
* enough resources */
return VAPI_EAGAIN;
D_PRINT("UD Recv CQ handle :%d\n", ptl_ib->ud_rcq_hndl);
if((VAPI_OK != ret) || (act_num_cqe == 0)) {
MCA_PTL_IB_VAPI_RET(ret, "VAPI_create_cq");
return OMPI_ERROR;
}
return VAPI_OK;
return OMPI_SUCCESS;
}
/* Set up UD Completion Queue and Queue pair */
VAPI_ret_t mca_ptl_ib_ud_qp_init(mca_ptl_ib_t* ptl_ib)
int mca_ptl_ib_ud_qp_init(mca_ptl_ib_t* ptl_ib)
{
VAPI_qp_init_attr_t qp_init_attr;
VAPI_qp_attr_t qp_attr;
@ -68,7 +68,10 @@ VAPI_ret_t mca_ptl_ib_ud_qp_init(mca_ptl_ib_t* ptl_ib)
ret = VAPI_create_qp(ptl_ib->nic, &qp_init_attr,
&(ptl_ib->ud_qp_hndl), &(ptl_ib->ud_qp_prop));
MCA_PTL_IB_VAPI_RET(ret, ret, "VAPI_create_qp");
if(VAPI_OK != ret) {
MCA_PTL_IB_VAPI_RET(ret, "VAPI_create_qp");
return OMPI_ERROR;
}
D_PRINT("UD QP[%d] created ..hndl=%d\n",
ptl_ib->ud_qp_prop.qp_num,
@ -89,7 +92,10 @@ VAPI_ret_t mca_ptl_ib_ud_qp_init(mca_ptl_ib_t* ptl_ib)
ptl_ib->ud_qp_hndl, &qp_attr,
&qp_attr_mask, &qp_cap);
MCA_PTL_IB_VAPI_RET(ret, ret, "VAPI_modify_qp");
if(VAPI_OK != ret) {
MCA_PTL_IB_VAPI_RET(ret, "VAPI_modify_qp");
return OMPI_ERROR;
}
D_PRINT("Modified UD to init..Qp\n");
@ -103,7 +109,11 @@ VAPI_ret_t mca_ptl_ib_ud_qp_init(mca_ptl_ib_t* ptl_ib)
ret = VAPI_modify_qp(ptl_ib->nic,
ptl_ib->ud_qp_hndl, &qp_attr,
&qp_attr_mask, &qp_cap);
MCA_PTL_IB_VAPI_RET(ret, ret, "VAPI_modify_qp");
if(VAPI_OK != ret) {
MCA_PTL_IB_VAPI_RET(ret, "VAPI_modify_qp");
return OMPI_ERROR;
}
D_PRINT("Modified UD to RTR..Qp\n");
@ -120,10 +130,132 @@ VAPI_ret_t mca_ptl_ib_ud_qp_init(mca_ptl_ib_t* ptl_ib)
ptl_ib->ud_qp_hndl, &qp_attr,
&qp_attr_mask, &qp_cap);
MCA_PTL_IB_VAPI_RET(ret, ret, "VAPI_modify_qp");
if(VAPI_OK != ret) {
MCA_PTL_IB_VAPI_RET(ret, "VAPI_modify_qp");
return OMPI_ERROR;
}
D_PRINT("Modified UD to RTS..Qp\n");
/* Everything was fine ... return success! */
return VAPI_OK;
return OMPI_SUCCESS;
}
int mca_ptl_ib_get_num_hcas(uint32_t* num_hcas)
{
VAPI_ret_t ret;
/* List all HCAs */
ret = EVAPI_list_hcas(0, num_hcas, NULL);
if( (VAPI_OK != ret) && (VAPI_EAGAIN != ret)) {
MCA_PTL_IB_VAPI_RET(ret, "EVAPI_list_hcas");
return OMPI_ERROR;
}
return OMPI_SUCCESS;
}
/* This function returns the hca_id for each PTL
* in a round robin manner. Each PTL gets a different
* HCA id ...
*
* If num PTLs > num HCAs, then those ptls will be
* assigned HCA ids beginning from 0 again.
*/
int mca_ptl_ib_get_hca_id(int num, VAPI_hca_id_t* hca_id)
{
int num_hcas;
VAPI_ret_t ret;
VAPI_hca_id_t* hca_ids = NULL;
hca_ids = (VAPI_hca_id_t*) malloc(mca_ptl_ib_module.ib_num_hcas *
sizeof(VAPI_hca_id_t));
/* Now get the hca_id from underlying VAPI layer */
ret = EVAPI_list_hcas(mca_ptl_ib_module.ib_num_hcas,
&num_hcas, hca_ids);
/* HACK: right now, I have put VAPI_EAGAIN as
* acceptable condition since we are trying to have
* only 1 ptl support */
if((VAPI_OK != ret) && (VAPI_EAGAIN != ret)) {
MCA_PTL_IB_VAPI_RET(ret, "EVAPI_list_hcas");
return OMPI_ERROR;
} else {
num = num % num_hcas;
memcpy(hca_id, hca_ids[num], sizeof(VAPI_hca_id_t));
}
free(hca_ids);
return OMPI_SUCCESS;
}
int mca_ptl_ib_get_hca_hndl(VAPI_hca_id_t hca_id,
VAPI_hca_hndl_t* hca_hndl)
{
VAPI_ret_t ret;
/* Open the HCA */
ret = EVAPI_get_hca_hndl(hca_id, hca_hndl);
if(VAPI_OK != ret) {
MCA_PTL_IB_VAPI_RET(ret, "EVAPI_get_hca_hndl");
return OMPI_ERROR;
}
return OMPI_SUCCESS;
}
int mca_ptl_ib_query_hca_prop(VAPI_hca_hndl_t nic,
VAPI_hca_port_t* port)
{
VAPI_ret_t ret;
/* Querying for port properties */
ret = VAPI_query_hca_port_prop(nic,
(IB_port_t)DEFAULT_PORT,
port);
if(VAPI_OK != ret) {
MCA_PTL_IB_VAPI_RET(ret, "VAPI_query_hca_port_prop");
return OMPI_ERROR;
}
return OMPI_SUCCESS;
}
int mca_ptl_ib_alloc_pd(VAPI_hca_hndl_t nic,
VAPI_pd_hndl_t* ptag)
{
VAPI_ret_t ret;
ret = VAPI_alloc_pd(nic, ptag);
if(ret != VAPI_OK) {
MCA_PTL_IB_VAPI_RET(ret, "VAPI_alloc_pd");
return OMPI_ERROR;
}
return OMPI_SUCCESS;
}
int mca_ptl_ib_create_cq(VAPI_hca_hndl_t nic,
VAPI_cq_hndl_t* cq_hndl)
{
int act_num_cqe = 0;
VAPI_ret_t ret;
ret = VAPI_create_cq(nic, DEFAULT_CQ_SIZE,
cq_hndl, &act_num_cqe);
if( (VAPI_OK != ret) || (0 == act_num_cqe)) {
MCA_PTL_IB_VAPI_RET(ret, "VAPI_create_cq");
return OMPI_ERROR;
}
return OMPI_SUCCESS;
}

Просмотреть файл

@ -4,7 +4,13 @@
#include "ptl_ib_vapi.h"
#include "ptl_ib.h"
VAPI_ret_t mca_ptl_ib_ud_cq_init(mca_ptl_ib_t*);
VAPI_ret_t mca_ptl_ib_ud_qp_init(mca_ptl_ib_t*);
int mca_ptl_ib_ud_cq_init(mca_ptl_ib_t*);
int mca_ptl_ib_ud_qp_init(mca_ptl_ib_t*);
int mca_ptl_ib_get_num_hcas(uint32_t*);
int mca_ptl_ib_get_hca_id(int, VAPI_hca_id_t*);
int mca_ptl_ib_get_hca_hndl(VAPI_hca_id_t, VAPI_hca_hndl_t*);
int mca_ptl_ib_query_hca_prop(VAPI_hca_hndl_t, VAPI_hca_port_t*);
int mca_ptl_ib_alloc_pd(VAPI_hca_hndl_t, VAPI_pd_hndl_t*);
int mca_ptl_ib_create_cq(VAPI_hca_hndl_t, VAPI_cq_hndl_t*);
#endif

Просмотреть файл

@ -45,9 +45,6 @@ mca_ptl_ib_proc_t* mca_ptl_ib_proc_create(ompi_proc_t* ompi_proc)
mca_ptl_ib_proc_t* ptl_proc = NULL;
fprintf(stderr,"[%s:%d] %s\n",
__FILE__, __LINE__, __func__);
/*
mca_ptl_ib_proc_t* ptl_proc =
mca_ptl_ib_proc_lookup_ompi(ompi_proc);
@ -58,12 +55,15 @@ mca_ptl_ib_proc_t* mca_ptl_ib_proc_create(ompi_proc_t* ompi_proc)
*/
ptl_proc = OBJ_NEW(mca_ptl_ib_proc_t);
ptl_proc->proc_ompi = ompi_proc;
/* build a unique identifier (of arbitrary
* size) to represent the proc */
ptl_proc->proc_guid = ompi_proc->proc_name;
D_PRINT("Creating proc for %d\n", ompi_proc->proc_name.vpid);
/* lookup ib parameters exported by
* this proc */
rc = mca_base_modex_recv(
@ -97,6 +97,8 @@ mca_ptl_ib_proc_t* mca_ptl_ib_proc_create(ompi_proc_t* ompi_proc)
return NULL;
}
D_PRINT("returning from proc_create\n");
return ptl_proc;
}

Просмотреть файл

@ -9,9 +9,6 @@
#include "ptl_ib_addr.h"
#include "ptl_ib_peer.h"
/*
extern ompi_class_t mca_ptl_ib_proc_t_class;
*/
OBJ_CLASS_DECLARATION(mca_ptl_ib_proc_t);
/**
@ -30,7 +27,7 @@ struct mca_ptl_ib_proc_t {
ompi_process_name_t proc_guid;
/**< globally unique identifier for the process */
struct mca_ptl_ib_ud_addr_t *proc_addrs;
struct mca_ptl_ib_ud_addr_t* proc_addrs;
/**< array of addresses published by peer */
size_t proc_addr_count;

Просмотреть файл

@ -23,18 +23,15 @@
* vapi_ret : The value which was returned from the last VAPI call
* func_name : The VAPI function which was called
*/
#define MCA_PTL_IB_VAPI_RET(ret, vapi_ret, func_name) { \
if(vapi_ret != VAPI_OK) { \
ompi_output(0,"[%s:%d]", __FILE__, __LINE__); \
ompi_output(0,"%s : %s",func_name,VAPI_strerror(vapi_ret)); \
return ret; \
} \
#define MCA_PTL_IB_VAPI_RET(vapi_ret, func_name) { \
ompi_output(0,"[%s:%d] ", __FILE__, __LINE__); \
ompi_output(0,"%s : %s",func_name,VAPI_strerror(vapi_ret)); \
}
/* Debug Print */
#if 1
#define D_PRINT(fmt, args...) { \
fprintf(stderr, "[%s:%d]", __FILE__, __LINE__); \
fprintf(stderr, "[%s:%d:%s] ", __FILE__, __LINE__, __func__); \
fprintf(stderr, fmt, ## args); \
fflush(stderr); \
}