1
1

disable shared receive queue support at compile time if the mvapi implementation

does not support shared receive queues (such as the one shipped by SilverStorm / 
Infinicon for OS X).  Reviewed by Galen.

This commit was SVN r8389.
Этот коммит содержится в:
Brian Barrett 2005-12-06 15:46:30 +00:00
родитель 372f9b0f29
Коммит 38391e3406
4 изменённых файлов: 73 добавлений и 17 удалений

Просмотреть файл

@ -615,10 +615,13 @@ int mca_btl_mvapi_put( mca_btl_base_module_t* btl,
} else {
rc = OMPI_SUCCESS;
}
#ifdef VAPI_FEATURE_SRQ
if(mca_btl_mvapi_component.use_srq) {
MCA_BTL_MVAPI_POST_SRR_HIGH(mvapi_btl, 1);
MCA_BTL_MVAPI_POST_SRR_LOW(mvapi_btl, 1);
} else {
} else
#endif
{
MCA_BTL_MVAPI_ENDPOINT_POST_RR_HIGH(endpoint, 1);
MCA_BTL_MVAPI_ENDPOINT_POST_RR_LOW(endpoint, 1);
}
@ -687,10 +690,13 @@ int mca_btl_mvapi_get( mca_btl_base_module_t* btl,
} else {
rc = OMPI_SUCCESS;
}
#ifdef VAPI_FEATURE_SRQ
if(mca_btl_mvapi_component.use_srq) {
MCA_BTL_MVAPI_POST_SRR_HIGH(mvapi_btl, 1);
MCA_BTL_MVAPI_POST_SRR_LOW(mvapi_btl, 1);
} else {
} else
#endif
{
MCA_BTL_MVAPI_ENDPOINT_POST_RR_HIGH(endpoint, 1);
MCA_BTL_MVAPI_ENDPOINT_POST_RR_LOW(endpoint, 1);
}
@ -737,7 +743,7 @@ static void async_event_handler(VAPI_hca_hndl_t hca_hndl,
VAPI_event_syndrome_sym(event_p->syndrome)));
break;
}
#ifdef VAPI_FEATURE_SRQ
case VAPI_SRQ_LIMIT_REACHED:
{
size_t i;
@ -749,11 +755,16 @@ static void async_event_handler(VAPI_hca_hndl_t hca_hndl,
MCA_BTL_MVAPI_POST_SRR_LOW(mvapi_btl, 1);
}
}
#endif
/* BWB - is this right? */
#ifdef VAPI_FEATURE_SRQ
case VAPI_RECEIVE_QUEUE_DRAINED: {
fprintf(stderr, "VAPI_RECEIVE_QUEUE_DRAINEDD\n");
}
#endif
default:
BTL_ERROR(("Warning!! Got an undefined "
"asynchronous event %s", VAPI_event_record_sym(event_p->type)));
@ -772,16 +783,19 @@ int mca_btl_mvapi_module_init(mca_btl_mvapi_module_t *mvapi_btl)
/* Allocate Protection Domain */
VAPI_ret_t ret;
uint32_t cqe_cnt = 0;
#ifdef VAPI_FEATURE_SRQ
VAPI_srq_attr_t srq_attr, srq_attr_out, srq_attr_mod;
VAPI_srq_attr_mask_t srq_attr_mask;
uint32_t max_outs_wr;
#endif
ret = VAPI_alloc_pd(mvapi_btl->nic, &mvapi_btl->ptag);
if(ret != VAPI_OK) {
BTL_ERROR(("error in VAPI_alloc_pd: %s", VAPI_strerror(ret)));
return OMPI_ERROR;
}
#ifdef VAPI_FEATURE_SRQ
if(mca_btl_mvapi_component.use_srq) {
mvapi_btl->srd_posted_hp = 0;
mvapi_btl->srd_posted_lp = 0;
@ -843,10 +857,11 @@ int mca_btl_mvapi_module_init(mca_btl_mvapi_module_t *mvapi_btl)
}
} else {
} else {
mvapi_btl->srq_hndl_hp = VAPI_INVAL_SRQ_HNDL;
mvapi_btl->srq_hndl_lp = VAPI_INVAL_SRQ_HNDL;
}
#endif /* VAPI_FEATURE_SRQ */
ret = VAPI_create_cq(mvapi_btl->nic, mca_btl_mvapi_component.ib_cq_size,
&mvapi_btl->cq_hndl_lp, &cqe_cnt);

Просмотреть файл

@ -161,11 +161,12 @@ struct mca_btl_mvapi_module_t {
opal_mutex_t ib_lock; /**< module level lock */
VAPI_rr_desc_t* rr_desc_post; /**< an array to allow posting of rr in one swoop */
#ifdef VAPI_FEATURE_SRQ
VAPI_srq_hndl_t srq_hndl_hp; /**< A high priority shared receive queue
runtime optional, can also use a receive queue
per queue pair.. */
VAPI_srq_hndl_t srq_hndl_lp; /**< A low priority shared receive queue */
#endif
size_t ib_inline_max; /**< max size of inline send*/
int32_t num_peers;

Просмотреть файл

@ -18,7 +18,14 @@
#include "ompi_config.h"
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_MALLOC_H
#include <malloc.h>
#endif
#include "ompi/include/constants.h"
#include "opal/event/event.h"
#include "opal/util/if.h"
@ -40,8 +47,6 @@
#include "mca/mpool/mvapi/mpool_mvapi.h"
#include "btl_mvapi_endpoint.h"
#include "mca/pml/base/pml_base_module_exchange.h"
#include <malloc.h>
mca_btl_mvapi_component_t mca_btl_mvapi_component = {
@ -118,6 +123,8 @@ static inline void mca_btl_mvapi_param_register_int(
int mca_btl_mvapi_component_open(void)
{
int tmp_int;
/* initialize state */
mca_btl_mvapi_component.ib_num_btls=0;
mca_btl_mvapi_component.mvapi_btls=NULL;
@ -127,7 +134,8 @@ int mca_btl_mvapi_component_open(void)
/* register IB component parameters */
mca_btl_mvapi_param_register_int ("max_btls", "maximum number of HCAs/ports to use",
4, &mca_btl_mvapi_component.ib_max_btls);
4, &tmp_int);
mca_btl_mvapi_component.ib_max_btls = tmp_int;
mca_btl_mvapi_param_register_int ("free_list_num", "intial size of free lists",
8, &mca_btl_mvapi_component.ib_free_list_num);
mca_btl_mvapi_param_register_int ("free_list_max", "maximum size of free lists",
@ -138,8 +146,12 @@ int mca_btl_mvapi_component_open(void)
"mvapi", &mca_btl_mvapi_component.ib_mpool_name);
mca_btl_mvapi_param_register_int("reg_mru_len", "length of the registration cache most recently used list",
16, (int*) &mca_btl_mvapi_component.reg_mru_len);
#ifdef VAPI_FEATURE_SRQ
mca_btl_mvapi_param_register_int("use_srq", "if 1 use the IB shared receive queue to post receive descriptors",
0, (int*) &mca_btl_mvapi_component.use_srq);
#else
mca_btl_mvapi_component.use_srq = 0;
#endif
mca_btl_mvapi_param_register_int("ib_cq_size", "size of the IB completion queue",
10000, (int*) &mca_btl_mvapi_component.ib_cq_size);
mca_btl_mvapi_param_register_int("ib_sg_list_size", "size of IB segment list",
@ -617,10 +629,13 @@ int mca_btl_mvapi_component_progress( void )
OMPI_FREE_LIST_RETURN(&(mvapi_btl->recv_free_eager), (opal_list_item_t*) frag);
/* repost receive descriptors */
#ifdef VAPI_FEATURE_SRQ
if(mca_btl_mvapi_component.use_srq) {
OPAL_THREAD_ADD32(&mvapi_btl->srd_posted_hp, -1);
MCA_BTL_MVAPI_POST_SRR_HIGH(mvapi_btl, 0);
} else {
} else
#endif
{
OPAL_THREAD_ADD32(&endpoint->rd_posted_hp, -1);
MCA_BTL_MVAPI_ENDPOINT_POST_RR_HIGH(endpoint, 0);
}
@ -745,10 +760,13 @@ int mca_btl_mvapi_component_progress( void )
OMPI_FREE_LIST_RETURN(&(mvapi_btl->recv_free_max), (opal_list_item_t*) frag);
/* post descriptors */
#ifdef VAPI_FEATURE_SRQ
if(mca_btl_mvapi_component.use_srq) {
OPAL_THREAD_ADD32(&mvapi_btl->srd_posted_lp, -1);
MCA_BTL_MVAPI_POST_SRR_LOW(mvapi_btl, 0);
} else {
} else
#endif
{
OPAL_THREAD_ADD32(&endpoint->rd_posted_lp, -1);
MCA_BTL_MVAPI_ENDPOINT_POST_RR_LOW(endpoint, 0);
}

Просмотреть файл

@ -41,7 +41,9 @@ int mca_btl_mvapi_endpoint_create_qp(
VAPI_hca_hndl_t nic,
VAPI_pd_hndl_t ptag,
VAPI_cq_hndl_t cq_hndl,
#ifdef VAPI_FEATURE_SRQ
VAPI_srq_hndl_t srq_hndl,
#endif
VAPI_qp_hndl_t* qp_hndl,
VAPI_qp_prop_t* qp_prop,
int transport_type
@ -131,10 +133,13 @@ static inline int mca_btl_mvapi_endpoint_post_send(
BTL_ERROR(("VAPI_post_sr: %s\n", VAPI_strerror(frag->ret)));
return OMPI_ERROR;
}
#ifdef VAPI_FEATURE_SRQ
if(mca_btl_mvapi_component.use_srq) {
MCA_BTL_MVAPI_POST_SRR_HIGH(mvapi_btl, 1);
MCA_BTL_MVAPI_POST_SRR_LOW(mvapi_btl, 1);
} else {
} else
#endif
{
MCA_BTL_MVAPI_ENDPOINT_POST_RR_HIGH(endpoint, 1);
MCA_BTL_MVAPI_ENDPOINT_POST_RR_LOW(endpoint, 1);
}
@ -333,7 +338,9 @@ static int mca_btl_mvapi_endpoint_start_connect(mca_btl_base_endpoint_t* endpoin
endpoint->endpoint_btl->nic,
endpoint->endpoint_btl->ptag,
endpoint->endpoint_btl->cq_hndl_hp,
#ifdef VAPI_FEATURE_SRQ
endpoint->endpoint_btl->srq_hndl_hp,
#endif
&endpoint->lcl_qp_hndl_hp,
&endpoint->lcl_qp_prop_hp,
VAPI_TS_RC))) {
@ -347,7 +354,9 @@ static int mca_btl_mvapi_endpoint_start_connect(mca_btl_base_endpoint_t* endpoin
endpoint->endpoint_btl->nic,
endpoint->endpoint_btl->ptag,
endpoint->endpoint_btl->cq_hndl_lp,
#ifdef VAPI_FEATURE_SRQ
endpoint->endpoint_btl->srq_hndl_lp,
#endif
&endpoint->lcl_qp_hndl_lp,
&endpoint->lcl_qp_prop_lp,
VAPI_TS_RC))) {
@ -395,7 +404,9 @@ static int mca_btl_mvapi_endpoint_reply_start_connect(mca_btl_mvapi_endpoint_t *
endpoint->endpoint_btl->nic,
endpoint->endpoint_btl->ptag,
endpoint->endpoint_btl->cq_hndl_hp,
#ifdef VAPI_FEATURE_SRQ
endpoint->endpoint_btl->srq_hndl_hp,
#endif
&endpoint->lcl_qp_hndl_hp,
&endpoint->lcl_qp_prop_hp,
VAPI_TS_RC))) {
@ -409,7 +420,9 @@ static int mca_btl_mvapi_endpoint_reply_start_connect(mca_btl_mvapi_endpoint_t *
endpoint->endpoint_btl->nic,
endpoint->endpoint_btl->ptag,
endpoint->endpoint_btl->cq_hndl_lp,
#ifdef VAPI_FEATURE_SRQ
endpoint->endpoint_btl->srq_hndl_lp,
#endif
&endpoint->lcl_qp_hndl_lp,
&endpoint->lcl_qp_prop_lp,
VAPI_TS_RC))) {
@ -786,10 +799,13 @@ int mca_btl_mvapi_endpoint_connect(
return rc;
}
#ifdef VAPI_FEATURE_SRQ
if(mca_btl_mvapi_component.use_srq) {
MCA_BTL_MVAPI_POST_SRR_HIGH(endpoint->endpoint_btl, 0);
MCA_BTL_MVAPI_POST_SRR_LOW(endpoint->endpoint_btl, 0);
} else {
} else
#endif
{
MCA_BTL_MVAPI_ENDPOINT_POST_RR_HIGH(endpoint, 0);
MCA_BTL_MVAPI_ENDPOINT_POST_RR_LOW(endpoint, 0);
}
@ -841,7 +857,9 @@ int mca_btl_mvapi_endpoint_create_qp(
VAPI_hca_hndl_t nic,
VAPI_pd_hndl_t ptag,
VAPI_cq_hndl_t cq_hndl,
#ifdef VAPI_FEATURE_SRQ
VAPI_srq_hndl_t srq_hndl,
#endif
VAPI_qp_hndl_t* qp_hndl,
VAPI_qp_prop_t* qp_prop,
int transport_type)
@ -849,7 +867,9 @@ int mca_btl_mvapi_endpoint_create_qp(
VAPI_ret_t ret;
VAPI_qp_init_attr_t qp_init_attr;
#ifdef VAPI_FEATURE_SRQ
VAPI_qp_init_attr_ext_t qp_init_attr_ext;
#endif
/* worst case number of credit messages could be queued */
switch(transport_type) {
@ -881,7 +901,8 @@ int mca_btl_mvapi_endpoint_create_qp(
default:
return OMPI_ERR_NOT_IMPLEMENTED;
}
#ifdef VAPI_FEATURE_SRQ
if(mca_btl_mvapi_component.use_srq) {
qp_init_attr_ext.srq_hndl = srq_hndl;
@ -890,8 +911,9 @@ int mca_btl_mvapi_endpoint_create_qp(
&qp_init_attr_ext,
qp_hndl,
qp_prop);
}
else {
} else
#endif
{
ret = VAPI_create_qp(nic,
&qp_init_attr,
qp_hndl,