1
1

This component wasn't even in 1.5.0; no one has had a GM network in

forever.  There is no point in carrying this component forward.

This commit was SVN r26563.
Этот коммит содержится в:
Jeff Squyres 2012-06-06 21:43:54 +00:00
родитель 2abf783fa0
Коммит 56a537a5f5
11 изменённых файлов: 0 добавлений и 2839 удалений

Просмотреть файл

@ -1,54 +0,0 @@
#
# Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The University of Tennessee and The University
# of Tennessee Research Foundation. All rights
# reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
AM_CPPFLAGS = $(btl_gm_CPPFLAGS)
gm_sources = \
btl_gm.c \
btl_gm.h \
btl_gm_component.c \
btl_gm_endpoint.c \
btl_gm_endpoint.h \
btl_gm_frag.c \
btl_gm_frag.h \
btl_gm_proc.c \
btl_gm_proc.h
# Make the output library in this directory, and name it either
# mca_<type>_<name>.la (for DSO builds) or libmca_<type>_<name>.la
# (for static builds).
if MCA_BUILD_ompi_btl_gm_DSO
component_noinst =
component_install = mca_btl_gm.la
else
component_noinst = libmca_btl_gm.la
component_install =
endif
mcacomponentdir = $(pkglibdir)
mcacomponent_LTLIBRARIES = $(component_install)
mca_btl_gm_la_SOURCES = $(gm_sources)
mca_btl_gm_la_LIBADD = $(btl_gm_LIBS)
mca_btl_gm_la_LDFLAGS = -module -avoid-version $(btl_gm_LDFLAGS)
noinst_LTLIBRARIES = $(component_noinst)
libmca_btl_gm_la_SOURCES = $(gm_sources)
libmca_btl_gm_la_LIBADD = $(btl_gm_LIBS)
libmca_btl_gm_la_LDFLAGS = -module -avoid-version $(btl_gm_LDFLAGS)

Просмотреть файл

@ -1,985 +0,0 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2008 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2009 Myricom, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <string.h>
#include "opal/class/opal_bitmap.h"
#include "opal/util/output.h"
#include "ompi/mca/btl/btl.h"
#include "btl_gm.h"
#include "btl_gm_frag.h"
#include "btl_gm_proc.h"
#include "btl_gm_endpoint.h"
#include "opal/datatype/opal_convertor.h"
#include "ompi/mca/mpool/base/base.h"
#include "ompi/mca/mpool/mpool.h"
#include "ompi/proc/proc.h"
/**
* Non-locking versions of public interfaces.
*/
static int mca_btl_gm_send_nl(
struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* endpoint,
struct mca_btl_base_descriptor_t* des,
mca_btl_base_tag_t tag);
static int mca_btl_gm_get_nl(
mca_btl_base_module_t* btl,
mca_btl_base_endpoint_t* endpoint,
mca_btl_base_descriptor_t* des);
static int mca_btl_gm_put_nl(
mca_btl_base_module_t* btl,
mca_btl_base_endpoint_t* endpoint,
mca_btl_base_descriptor_t* des);
mca_btl_gm_module_t mca_btl_gm_module = {
{
&mca_btl_gm_component.super,
0, /* max size of first fragment */
0, /* min send fragment size */
0, /* max send fragment size */
0, /* btl_rdma_pipeline_send_length */
0, /* btl_rdma_pipeline_frag_size */
0, /* btl_min_rdma_pipeline_size */
0, /* exclusivity */
0, /* latency */
0, /* bandwidth */
0, /* flags */
mca_btl_gm_add_procs,
mca_btl_gm_del_procs,
NULL,
mca_btl_gm_finalize,
mca_btl_gm_alloc,
mca_btl_gm_free,
mca_btl_gm_prepare_src,
mca_btl_gm_prepare_dst,
#if OMPI_ENABLE_THREAD_MULTIPLE || OMPI_ENABLE_PROGRESS_THREADS
mca_btl_gm_send,
NULL, /* send immediate */
mca_btl_gm_put,
mca_btl_gm_get,
#else
mca_btl_gm_send_nl,
NULL, /* send immediate */
mca_btl_gm_put_nl,
mca_btl_gm_get_nl,
#endif
mca_btl_base_dump,
NULL, /* mpool */
mca_btl_gm_register_error_cb,
mca_btl_gm_ft_event
}
};
/**
*
*/
int mca_btl_gm_add_procs(
struct mca_btl_base_module_t* btl,
size_t nprocs,
struct ompi_proc_t **ompi_procs,
struct mca_btl_base_endpoint_t** peers,
opal_bitmap_t* reachable)
{
mca_btl_gm_module_t* gm_btl = (mca_btl_gm_module_t*)btl;
int i, rc;
for(i = 0; i < (int) nprocs; i++) {
struct ompi_proc_t* ompi_proc = ompi_procs[i];
mca_btl_gm_proc_t* gm_proc;
mca_btl_base_endpoint_t* gm_endpoint;
if(ompi_proc == ompi_proc_local())
continue;
if(NULL == (gm_proc = mca_btl_gm_proc_create(ompi_proc))) {
continue;
}
/*
* Check to make sure that the peer has at least as many interface
* addresses exported as we are trying to use. If not, then
* don't bind this PTL instance to the proc.
*/
OPAL_THREAD_LOCK(&gm_proc->proc_lock);
/* The btl_proc datastructure is shared by all GM PTL
* instances that are trying to reach this destination.
* Cache the peer instance on the btl_proc.
*/
gm_endpoint = OBJ_NEW(mca_btl_gm_endpoint_t);
if(NULL == gm_endpoint) {
OPAL_THREAD_UNLOCK(&gm_proc->proc_lock);
return OMPI_ERR_OUT_OF_RESOURCE;
}
gm_endpoint->endpoint_btl = gm_btl;
rc = mca_btl_gm_proc_insert(gm_proc, gm_endpoint);
if(rc != OMPI_SUCCESS) {
OBJ_RELEASE(gm_endpoint);
OPAL_THREAD_UNLOCK(&gm_proc->proc_lock);
continue;
}
opal_bitmap_set_bit(reachable, i);
OPAL_THREAD_UNLOCK(&gm_proc->proc_lock);
peers[i] = gm_endpoint;
}
return OMPI_SUCCESS;
}
int mca_btl_gm_del_procs(struct mca_btl_base_module_t* btl,
size_t nprocs,
struct ompi_proc_t **procs,
struct mca_btl_base_endpoint_t ** peers)
{
/* TODO */
return OMPI_SUCCESS;
}
/*
*Register callback function for error handling..
*/
int mca_btl_gm_register_error_cb(
struct mca_btl_base_module_t* btl,
mca_btl_base_module_error_cb_fn_t cbfunc)
{
mca_btl_gm_module_t* gm_btl = (mca_btl_gm_module_t*) btl;
gm_btl->error_cb = cbfunc; /* stash for later */
return OMPI_SUCCESS;
}
/**
* Allocate a segment.
*
* @param btl (IN) BTL module
* @param size (IN) Request segment size.
*/
mca_btl_base_descriptor_t* mca_btl_gm_alloc(
struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* endpoint,
uint8_t order,
size_t size,
uint32_t flags)
{
mca_btl_gm_module_t* gm_btl = (mca_btl_gm_module_t*) btl;
mca_btl_gm_frag_t* frag;
int rc;
if(size <= btl->btl_eager_limit) {
MCA_BTL_GM_FRAG_ALLOC_EAGER(gm_btl, frag, rc);
if(NULL == frag) {
return NULL;
}
frag->type=MCA_BTL_GM_EAGER;
frag->segment.seg_len = size;
} else if(size <= btl->btl_max_send_size) {
MCA_BTL_GM_FRAG_ALLOC_MAX(gm_btl, frag, rc);
if(NULL == frag) {
return NULL;
}
frag->type=MCA_BTL_GM_SEND;
frag->segment.seg_len = size;
} else {
return NULL;
}
frag->base.des_src = &frag->segment;
frag->base.des_src_cnt = 1;
frag->base.des_dst = NULL;
frag->base.des_dst_cnt = 0;
frag->base.des_flags = flags;
frag->base.order = MCA_BTL_NO_ORDER;
return &frag->base;
}
/**
* Return a segment
*/
int mca_btl_gm_free( struct mca_btl_base_module_t* btl,
mca_btl_base_descriptor_t* des )
{
mca_btl_gm_frag_t* frag = (mca_btl_gm_frag_t*)des;
if( NULL != frag->registration ) {
btl->btl_mpool->mpool_deregister(btl->btl_mpool, (mca_mpool_base_registration_t*) frag->registration);
frag->registration = NULL;
}
MCA_BTL_GM_FRAG_RETURN(btl, frag);
return OMPI_SUCCESS;
}
/**
* Pack data and return a descriptor that can be
* used for send/put.
*
* @param btl (IN) BTL module
* @param peer (IN) BTL peer addressing
*/
mca_btl_base_descriptor_t* mca_btl_gm_prepare_src(
struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* endpoint,
struct mca_mpool_base_registration_t* registration,
struct opal_convertor_t* convertor,
uint8_t order,
size_t reserve,
size_t* size,
uint32_t flags
)
{
mca_btl_gm_frag_t *frag = NULL;
struct iovec iov;
uint32_t iov_count = 1;
size_t max_data = *size;
int rc;
#if (OMPI_MCA_BTL_GM_HAVE_RDMA_GET || OMPI_MCA_BTL_GM_HAVE_RDMA_PUT)
if(opal_convertor_need_buffers(convertor) == false && 0 == reserve) {
if(registration != NULL || max_data > btl->btl_max_send_size) {
MCA_BTL_GM_FRAG_ALLOC_USER(btl, frag, rc);
if(NULL == frag) {
return NULL;
}
/*
* just assign it something..
* we will assign the real value in put/get
*/
frag->type = MCA_BTL_GM_PUT;
iov.iov_len = max_data;
iov.iov_base = NULL;
opal_convertor_pack(convertor, &iov, &iov_count, &max_data);
*size = max_data;
if(NULL == registration) {
rc = btl->btl_mpool->mpool_register(btl->btl_mpool,
iov.iov_base, max_data, 0, &registration);
if(OMPI_SUCCESS != rc || NULL == registration) {
MCA_BTL_GM_FRAG_RETURN(btl, frag);
return NULL;
}
/* keep track of the registration we did */
frag->registration = registration;
}
frag->segment.seg_len = max_data;
frag->segment.seg_addr.pval = iov.iov_base;
frag->base.des_src = &frag->segment;
frag->base.des_src_cnt = 1;
frag->base.des_dst = NULL;
frag->base.des_dst_cnt = 0;
frag->base.des_flags = flags;
frag->base.order = MCA_BTL_NO_ORDER;
return &frag->base;
}
}
#endif
if (max_data + reserve <= btl->btl_eager_limit) {
/* the data is small enough to fit in the eager frag and
* memory is not prepinned */
MCA_BTL_GM_FRAG_ALLOC_EAGER(btl, frag, rc);
if(frag != NULL) {
frag->type = MCA_BTL_GM_EAGER;
}
}
if(NULL == frag) {
/* the data doesn't fit into eager frag or eger frag is
* not available */
MCA_BTL_GM_FRAG_ALLOC_MAX(btl, frag, rc);
if(NULL == frag) {
return NULL;
}
frag->type = MCA_BTL_GM_SEND;
if(max_data + reserve > btl->btl_max_send_size) {
max_data = btl->btl_max_send_size - reserve;
}
}
iov.iov_len = max_data;
iov.iov_base = (unsigned char*) frag->segment.seg_addr.pval + reserve;
rc = opal_convertor_pack(convertor, &iov, &iov_count, &max_data);
if(rc < 0) {
MCA_BTL_GM_FRAG_RETURN(btl, frag);
return NULL;
}
*size = max_data;
frag->segment.seg_len = max_data + reserve;
frag->base.des_src = &frag->segment;
frag->base.des_src_cnt = 1;
frag->base.des_dst = NULL;
frag->base.des_dst_cnt = 0;
frag->base.des_flags = flags;
frag->base.order = MCA_BTL_NO_ORDER;
return &frag->base;
}
/**
* Prepare a descriptor for send/rdma using the supplied
* convertor. If the convertor references data that is contigous,
* the descriptor may simply point to the user buffer. Otherwise,
* this routine is responsible for allocating buffer space and
* packing if required.
*
* @param btl (IN) BTL module
* @param endpoint (IN) BTL peer addressing
* @param convertor (IN) Data type convertor
* @param reserve (IN) Additional bytes requested by upper layer to precede user data
* @param size (IN/OUT) Number of bytes to prepare (IN), number of bytes actually prepared (OUT)
*/
mca_btl_base_descriptor_t* mca_btl_gm_prepare_dst(
struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* endpoint,
struct mca_mpool_base_registration_t* registration,
struct opal_convertor_t* convertor,
uint8_t order,
size_t reserve,
size_t* size,
uint32_t flags)
{
#if (OMPI_MCA_BTL_GM_HAVE_RDMA_GET || OMPI_MCA_BTL_GM_HAVE_RDMA_PUT)
mca_btl_gm_frag_t* frag;
mca_mpool_base_module_t* mpool = btl->btl_mpool;
int rc;
MCA_BTL_GM_FRAG_ALLOC_USER(btl, frag, rc);
if(NULL == frag) {
return NULL;
}
/*
* we don't know that this is for a PUT,
* but it doesn't matter.. they belong
* on the same list eventually anyway
*/
frag->type = MCA_BTL_GM_PUT;
frag->segment.seg_len = *size;
opal_convertor_get_current_pointer( convertor, (void**)&(frag->segment.seg_addr.pval) );
frag->base.des_src = NULL;
frag->base.des_src_cnt = 0;
frag->base.des_dst = &frag->segment;
frag->base.des_dst_cnt = 1;
frag->base.des_flags = flags;
frag->base.order = MCA_BTL_NO_ORDER;
if(NULL == registration) {
rc = mpool->mpool_register( mpool,
frag->segment.seg_addr.pval,
frag->segment.seg_len,
0,
&registration );
if(rc != OMPI_SUCCESS) {
MCA_BTL_GM_FRAG_RETURN(btl,frag);
return NULL;
}
frag->registration = registration;
}
return &frag->base;
#else
return NULL;
#endif
}
/**
*
*/
static void mca_btl_gm_drop_callback( struct gm_port* port, void* context, gm_status_t status )
{
mca_btl_gm_module_t* btl = (mca_btl_gm_module_t*)context;
OPAL_THREAD_ADD32( &btl->gm_num_send_tokens, 1 );
}
/**
* Callback on send completion and/or error.
* Called with mca_btl_gm_component.gm_lock held.
*/
static void mca_btl_gm_send_callback( struct gm_port* port, void* context, gm_status_t status )
{
mca_btl_gm_frag_t* frag = (mca_btl_gm_frag_t*)context;
mca_btl_gm_module_t* btl = frag->btl;
int btl_ownership = frag->base.des_flags & MCA_BTL_DES_FLAGS_BTL_OWNERSHIP;
switch(status) {
case GM_TRY_AGAIN:
case GM_SEND_TIMED_OUT:
#if GM_API_VERSION > 0x200
case GM_TIMED_OUT:
#endif
/* drop all sends to this destination port */
gm_drop_sends(
btl->port,
(frag->base.des_flags & MCA_BTL_DES_FLAGS_PRIORITY) ? GM_HIGH_PRIORITY : GM_LOW_PRIORITY,
frag->endpoint->endpoint_addr.node_id,
frag->endpoint->endpoint_addr.port_id,
mca_btl_gm_drop_callback,
btl
);
/* retry the failed fragment */
mca_btl_gm_send_nl(&btl->super, frag->endpoint, &frag->base, frag->hdr->tag);
break;
case GM_SEND_DROPPED:
/* release the send token */
OPAL_THREAD_ADD32(&btl->gm_num_send_tokens, 1);
/* retry the dropped fragment */
mca_btl_gm_send_nl(&btl->super, frag->endpoint, &frag->base, frag->hdr->tag);
break;
case GM_SUCCESS:
/* call the completion callback */
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
frag->base.des_cbfunc(&btl->super, frag->endpoint, &frag->base, OMPI_SUCCESS);
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
if( btl_ownership ) {
mca_btl_gm_free(&btl->super, &frag->base);
}
/* return the send token and deque pending fragments */
MCA_BTL_GM_RETURN_TOKEN(btl);
break;
default:
/* error condition can't deal with */
opal_output(0, "[%s:%d] send completed with unhandled gm error %d\n", __FILE__,__LINE__,status);
/* release the send token */
OPAL_THREAD_ADD32( &btl->gm_num_send_tokens, 1 );
/* call the completion callback */
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
frag->base.des_cbfunc(&btl->super, frag->endpoint, &frag->base, OMPI_ERROR);
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
if( btl_ownership ) {
mca_btl_gm_free(&btl->super, &frag->base);
}
break;
}
}
/**
* Initiate an asynchronous send. Do NOT acquire gm lock, must already be held,
* or in an unthreaded environment.
*
* @param btl (IN) BTL module
* @param endpoint (IN) BTL addressing information
* @param descriptor (IN) Description of the data to be transfered
* @param tag (IN) The tag value used to notify the peer.
*/
static int mca_btl_gm_send_nl(
struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* endpoint,
struct mca_btl_base_descriptor_t* des,
mca_btl_base_tag_t tag)
{
mca_btl_gm_module_t* gm_btl = (mca_btl_gm_module_t*) btl;
mca_btl_gm_frag_t* frag = (mca_btl_gm_frag_t*)des;
frag->btl = gm_btl;
frag->endpoint = endpoint;
frag->hdr->tag = tag;
/* queue the descriptor if there are no send tokens */
MCA_BTL_GM_ACQUIRE_TOKEN_NL(gm_btl, frag);
/* post the send descriptor */
if(frag->base.des_flags & MCA_BTL_DES_FLAGS_PRIORITY &&
frag->size == mca_btl_gm_component.gm_eager_frag_size) {
gm_send_with_callback( gm_btl->port,
frag->hdr,
mca_btl_gm_component.gm_eager_frag_size,
frag->segment.seg_len + sizeof(mca_btl_base_header_t),
GM_HIGH_PRIORITY,
endpoint->endpoint_addr.node_id,
endpoint->endpoint_addr.port_id,
mca_btl_gm_send_callback,
frag );
} else {
gm_send_with_callback( gm_btl->port,
frag->hdr,
mca_btl_gm_component.gm_max_frag_size,
frag->segment.seg_len + sizeof(mca_btl_base_header_t),
GM_LOW_PRIORITY,
endpoint->endpoint_addr.node_id,
endpoint->endpoint_addr.port_id,
mca_btl_gm_send_callback,
frag );
}
if(opal_list_get_size(&gm_btl->gm_repost)) {
mca_btl_gm_frag_t* frag;
while(NULL != (frag = (mca_btl_gm_frag_t*)opal_list_remove_first(&gm_btl->gm_repost))) {
gm_provide_receive_buffer(gm_btl->port, frag->hdr, frag->size, frag->priority);
}
}
return OMPI_SUCCESS;
}
/**
* Initiate an asynchronous send.
*
* @param btl (IN) BTL module
* @param endpoint (IN) BTL addressing information
* @param descriptor (IN) Description of the data to be transfered
* @param tag (IN) The tag value used to notify the peer.
*/
int mca_btl_gm_send(
struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* endpoint,
struct mca_btl_base_descriptor_t* des,
mca_btl_base_tag_t tag)
{
mca_btl_gm_module_t* gm_btl = (mca_btl_gm_module_t*) btl;
mca_btl_gm_frag_t* frag = (mca_btl_gm_frag_t*)des;
frag->btl = gm_btl;
frag->endpoint = endpoint;
frag->hdr->tag = tag;
/* queue the descriptor if there are no send tokens */
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
MCA_BTL_GM_ACQUIRE_TOKEN(gm_btl, frag);
/* post the send descriptor */
if(frag->base.des_flags & MCA_BTL_DES_FLAGS_PRIORITY &&
frag->size == mca_btl_gm_component.gm_eager_frag_size) {
gm_send_with_callback( gm_btl->port,
frag->hdr,
mca_btl_gm_component.gm_eager_frag_size,
frag->segment.seg_len + sizeof(mca_btl_base_header_t),
GM_HIGH_PRIORITY,
endpoint->endpoint_addr.node_id,
endpoint->endpoint_addr.port_id,
mca_btl_gm_send_callback,
frag );
} else {
gm_send_with_callback( gm_btl->port,
frag->hdr,
mca_btl_gm_component.gm_max_frag_size,
frag->segment.seg_len + sizeof(mca_btl_base_header_t),
GM_LOW_PRIORITY,
endpoint->endpoint_addr.node_id,
endpoint->endpoint_addr.port_id,
mca_btl_gm_send_callback,
frag );
}
if(opal_list_get_size(&gm_btl->gm_repost)) {
mca_btl_gm_frag_t* frag;
while(NULL != (frag = (mca_btl_gm_frag_t*)opal_list_remove_first(&gm_btl->gm_repost))) {
gm_provide_receive_buffer(gm_btl->port, frag->hdr, frag->size, frag->priority);
}
}
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
return OMPI_SUCCESS;
}
/**
* Callback on put completion and/or error.
* Called with mca_btl_gm_component.gm_lock held.
*/
static void mca_btl_gm_put_callback( struct gm_port* port, void* context, gm_status_t status )
{
mca_btl_gm_frag_t* frag = (mca_btl_gm_frag_t*)context;
mca_btl_gm_module_t* btl = frag->btl;
int btl_ownership = frag->base.des_flags & MCA_BTL_DES_FLAGS_BTL_OWNERSHIP;
/* call the completion callback */
switch(status) {
case GM_TRY_AGAIN:
case GM_SEND_TIMED_OUT:
#if GM_API_VERSION > 0x200
case GM_TIMED_OUT:
#endif
/* drop all sends to this destination port */
gm_drop_sends(
btl->port,
(frag->base.des_flags & MCA_BTL_DES_FLAGS_PRIORITY) ? GM_HIGH_PRIORITY : GM_LOW_PRIORITY,
frag->endpoint->endpoint_addr.node_id,
frag->endpoint->endpoint_addr.port_id,
mca_btl_gm_drop_callback,
btl
);
/* retry the failed fragment */
mca_btl_gm_put_nl(&btl->super, frag->endpoint, &frag->base);
break;
case GM_SEND_DROPPED:
/* release the send token */
OPAL_THREAD_ADD32(&btl->gm_num_send_tokens, 1);
/* retry the dropped fragment */
mca_btl_gm_put_nl(&btl->super, frag->endpoint, &frag->base);
break;
case GM_SUCCESS:
/* call completion callback */
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
frag->base.des_cbfunc(&btl->super, frag->endpoint, &frag->base, OMPI_SUCCESS);
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
if( btl_ownership ) {
mca_btl_gm_free(&btl->super, &frag->base);
}
/* return the send token and deque pending fragments */
MCA_BTL_GM_RETURN_TOKEN(btl);
break;
default:
/* error condition can't deal with */
opal_output(0, "[%s:%d] gm_put operation failed with status %d\n", __FILE__, __LINE__, status);
/* release the send token */
OPAL_THREAD_ADD32( &btl->gm_num_send_tokens, 1 );
/* call the completion callback */
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
frag->base.des_cbfunc(&btl->super, frag->endpoint, &frag->base, OMPI_ERROR);
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
if( btl_ownership ) {
mca_btl_gm_free(&btl->super, &frag->base);
}
break;
}
}
/**
* Initiate an asynchronous put. Do not acquire lock.
*
* @param btl (IN) BTL module
* @param endpoint (IN) BTL addressing information
* @param descriptor (IN) Description of the data to be transferred
*/
static int mca_btl_gm_put_nl(
mca_btl_base_module_t* btl,
mca_btl_base_endpoint_t* endpoint,
mca_btl_base_descriptor_t* des)
{
#if OMPI_MCA_BTL_GM_HAVE_RDMA_PUT
mca_btl_gm_module_t* gm_btl = (mca_btl_gm_module_t*) btl;
mca_btl_gm_frag_t* frag = (mca_btl_gm_frag_t*) des;
frag->btl = gm_btl;
frag->endpoint = endpoint;
frag->type = MCA_BTL_GM_PUT;
/* queue the descriptor if there are no send tokens */
MCA_BTL_GM_ACQUIRE_TOKEN_NL(gm_btl, frag);
/* post the put descriptor */
gm_put(gm_btl->port,
des->des_src->seg_addr.pval,
des->des_dst->seg_addr.lval,
des->des_src->seg_len,
GM_LOW_PRIORITY,
endpoint->endpoint_addr.node_id,
endpoint->endpoint_addr.port_id,
mca_btl_gm_put_callback,
frag);
return OMPI_SUCCESS;
#else
return OMPI_ERR_NOT_IMPLEMENTED;
#endif
}
/**
* Initiate an asynchronous put.
*
* @param btl (IN) BTL module
* @param endpoint (IN) BTL addressing information
* @param descriptor (IN) Description of the data to be transferred
*/
int mca_btl_gm_put(
mca_btl_base_module_t* btl,
mca_btl_base_endpoint_t* endpoint,
mca_btl_base_descriptor_t* des)
{
#if OMPI_MCA_BTL_GM_HAVE_RDMA_PUT
mca_btl_gm_module_t* gm_btl = (mca_btl_gm_module_t*) btl;
mca_btl_gm_frag_t* frag = (mca_btl_gm_frag_t*) des;
frag->btl = gm_btl;
frag->endpoint = endpoint;
frag->type = MCA_BTL_GM_PUT;
/* queue the descriptor if there are no send tokens */
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
MCA_BTL_GM_ACQUIRE_TOKEN(gm_btl, frag);
/* post the put descriptor */
gm_put(gm_btl->port,
des->des_src->seg_addr.pval,
des->des_dst->seg_addr.lval,
des->des_src->seg_len,
GM_LOW_PRIORITY,
endpoint->endpoint_addr.node_id,
endpoint->endpoint_addr.port_id,
mca_btl_gm_put_callback,
frag);
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
return OMPI_SUCCESS;
#else
return OMPI_ERR_NOT_IMPLEMENTED;
#endif
}
/**
* Callback on get completion and/or error.
* Called with mca_btl_gm_component.gm_lock held.
*/
static void mca_btl_gm_get_callback( struct gm_port* port, void* context, gm_status_t status )
{
mca_btl_gm_frag_t* frag = (mca_btl_gm_frag_t*)context;
mca_btl_gm_module_t* btl = frag->btl;
int btl_ownership = frag->base.des_flags & MCA_BTL_DES_FLAGS_BTL_OWNERSHIP;
/* call the completion callback */
switch(status) {
case GM_TRY_AGAIN:
case GM_SEND_TIMED_OUT:
#if GM_API_VERSION > 0x200
case GM_TIMED_OUT:
#endif
/* drop all sends to this destination port */
gm_drop_sends(
btl->port,
(frag->base.des_flags & MCA_BTL_DES_FLAGS_PRIORITY) ? GM_HIGH_PRIORITY : GM_LOW_PRIORITY,
frag->endpoint->endpoint_addr.node_id,
frag->endpoint->endpoint_addr.port_id,
mca_btl_gm_drop_callback,
btl
);
/* retry the failed fragment */
mca_btl_gm_get_nl(&btl->super, frag->endpoint, &frag->base);
break;
case GM_SEND_DROPPED:
/* release the send token */
OPAL_THREAD_ADD32(&btl->gm_num_send_tokens, 1);
/* retry the dropped fragment */
mca_btl_gm_get_nl(&btl->super, frag->endpoint, &frag->base);
break;
case GM_SUCCESS:
/* call completion callback */
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
frag->base.des_cbfunc(&btl->super, frag->endpoint, &frag->base, OMPI_SUCCESS);
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
if( btl_ownership ) {
mca_btl_gm_free(&btl->super, &frag->base);
}
/* return the send token and deque pending fragments */
MCA_BTL_GM_RETURN_TOKEN(btl);
break;
default:
/* error condition can't deal with */
opal_output(0, "[%s:%d] gm_get operation failed with status %d\n", __FILE__, __LINE__, status);
/* release the send token */
OPAL_THREAD_ADD32( &btl->gm_num_send_tokens, 1 );
/* call the completion callback */
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
frag->base.des_cbfunc(&btl->super, frag->endpoint, &frag->base, OMPI_ERROR);
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
if( btl_ownership ) {
mca_btl_gm_free(&btl->super, &frag->base);
}
break;
}
}
/**
* Initiate an asynchronous get. No locking.
*
* @param btl (IN) BTL module
* @param endpoint (IN) BTL addressing information
* @param descriptor (IN) Description of the data to be transferred
*
*/
static int mca_btl_gm_get_nl(
mca_btl_base_module_t* btl,
mca_btl_base_endpoint_t* endpoint,
mca_btl_base_descriptor_t* des)
{
#if OMPI_MCA_BTL_GM_HAVE_RDMA_GET
mca_btl_gm_module_t* gm_btl = (mca_btl_gm_module_t*) btl;
mca_btl_gm_frag_t* frag = (mca_btl_gm_frag_t*) des;
frag->btl = gm_btl;
frag->endpoint = endpoint;
frag->type = MCA_BTL_GM_GET;
/* queue the descriptor if there are no send tokens */
MCA_BTL_GM_ACQUIRE_TOKEN_NL(gm_btl, frag);
/* post get put descriptor */
gm_get(gm_btl->port,
des->des_dst->seg_addr.lval,
des->des_src->seg_addr.pval,
des->des_src->seg_len,
GM_LOW_PRIORITY,
endpoint->endpoint_addr.node_id,
endpoint->endpoint_addr.port_id,
mca_btl_gm_get_callback,
frag);
return OMPI_SUCCESS;
#else
return OMPI_ERR_NOT_IMPLEMENTED;
#endif
}
/**
* Initiate an asynchronous get.
*
* @param btl (IN) BTL module
* @param endpoint (IN) BTL addressing information
* @param descriptor (IN) Description of the data to be transferred
*
*/
int mca_btl_gm_get(
mca_btl_base_module_t* btl,
mca_btl_base_endpoint_t* endpoint,
mca_btl_base_descriptor_t* des)
{
#if OMPI_MCA_BTL_GM_HAVE_RDMA_GET
mca_btl_gm_module_t* gm_btl = (mca_btl_gm_module_t*) btl;
mca_btl_gm_frag_t* frag = (mca_btl_gm_frag_t*) des;
frag->btl = gm_btl;
frag->endpoint = endpoint;
frag->type = MCA_BTL_GM_GET;
/* queue the descriptor if there are no send tokens */
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
MCA_BTL_GM_ACQUIRE_TOKEN(gm_btl, frag);
/* post get put descriptor */
gm_get(gm_btl->port,
des->des_dst->seg_addr.lval,
des->des_src->seg_addr.pval,
des->des_src->seg_len,
GM_LOW_PRIORITY,
endpoint->endpoint_addr.node_id,
endpoint->endpoint_addr.port_id,
mca_btl_gm_get_callback,
frag);
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
return OMPI_SUCCESS;
#else
return OMPI_ERR_NOT_IMPLEMENTED;
#endif
}
/*
* Cleanup/release module resources.
*/
#if OMPI_ENABLE_PROGRESS_THREADS
static void mca_btl_gm_alarm(void* arg) {}
#endif
int mca_btl_gm_finalize(struct mca_btl_base_module_t* btl)
{
mca_btl_gm_module_t* gm_btl = (mca_btl_gm_module_t*) btl;
#if OMPI_ENABLE_PROGRESS_THREADS
gm_alarm_t alarm;
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
gm_btl->gm_progress = false;
gm_initialize_alarm(&alarm);
gm_set_alarm(gm_btl->port, &alarm, 10, mca_btl_gm_alarm, NULL);
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
opal_thread_join(&gm_btl->gm_thread, NULL);
#endif
OBJ_DESTRUCT(&gm_btl->gm_frag_eager);
OBJ_DESTRUCT(&gm_btl->gm_frag_max);
OBJ_DESTRUCT(&gm_btl->gm_frag_user);
gm_close(gm_btl->port);
free(gm_btl);
return OMPI_SUCCESS;
}
int mca_btl_gm_ft_event(int state) {
if(OPAL_CRS_CHECKPOINT == state) {
;
}
else if(OPAL_CRS_CONTINUE == state) {
;
}
else if(OPAL_CRS_RESTART == state) {
;
}
else if(OPAL_CRS_TERM == state ) {
;
}
else {
;
}
return OMPI_SUCCESS;
}

Просмотреть файл

@ -1,377 +0,0 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2009 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
/**
* @file
*/
#ifndef MCA_BTL_GM_H
#define MCA_BTL_GM_H
#include "ompi_config.h"
#include <sys/types.h>
#include <string.h>
#include <gm.h>
/* Open MPI includes */
#include "opal/mca/event/event.h"
#include "ompi/class/ompi_free_list.h"
#include "ompi/mca/btl/btl.h"
#include "ompi/mca/btl/base/base.h"
#include "ompi/mca/mpool/mpool.h"
#include "ompi/mca/btl/btl.h"
#include "btl_gm_endpoint.h"
BEGIN_C_DECLS
#define GM_BUFFER_SIZE 7
#define GM_BUFFER_LENGTH gm_max_length_for_size(GM_BUFFER_SIZE)
/**
* Myrinet (GM) BTL component.
*/
struct mca_btl_gm_component_t {
mca_btl_base_component_2_0_0_t super; /**< base BTL component */
size_t gm_num_btls; /**< number of hcas available to the GM component */
size_t gm_max_btls; /**< maximum number of supported hcas */
struct mca_btl_gm_module_t **gm_btls; /**< array of available BTL modules */
size_t gm_max_ports; /**< maximum number of ports per board */
size_t gm_max_boards; /**< maximum number of boards */
size_t gm_eager_frag_size;
size_t gm_max_frag_size;
char* gm_port_name;
int32_t gm_num_repost;
int32_t gm_num_high_priority; /**< number of receive descriptors at high priority */
int gm_debug; /**< turn on debug output */
int gm_free_list_num; /**< initial size of free lists */
int gm_free_list_max; /**< maximum size of free lists */
int gm_free_list_inc; /**< number of elements to alloc when growing free lists */
opal_list_t gm_procs; /**< list of gm proc structures */
opal_mutex_t gm_lock; /**< lock for accessing module state */
char* gm_mpool_name; /**< name of memory pool */
};
typedef struct mca_btl_gm_component_t mca_btl_gm_component_t;
OMPI_MODULE_DECLSPEC extern mca_btl_gm_component_t mca_btl_gm_component;
/**
* BTL Module Interface
*/
struct mca_btl_gm_module_t {
mca_btl_base_module_t super; /**< base BTL interface */
/* local port handle/address */
struct gm_port *port;
mca_btl_gm_addr_t gm_addr;
/* free list of fragment descriptors */
ompi_free_list_t gm_frag_eager;
ompi_free_list_t gm_frag_max;
ompi_free_list_t gm_frag_user;
/* number of send/recv tokens */
int32_t gm_num_send_tokens;
int32_t gm_max_send_tokens;
int32_t gm_num_recv_tokens;
int32_t gm_max_recv_tokens;
int32_t gm_num_repost;
/* lock for accessing module state */
opal_list_t gm_pending; /**< list of pending send descriptors */
opal_list_t gm_repost; /**< list of pending fragments */
#if OMPI_ENABLE_PROGRESS_THREADS
opal_thread_t gm_thread;
bool gm_progress;
#endif
mca_btl_base_module_error_cb_fn_t error_cb;
};
typedef struct mca_btl_gm_module_t mca_btl_gm_module_t;
extern mca_btl_gm_module_t mca_btl_gm_module;
/**
* Register GM component parameters with the MCA framework
*/
extern int mca_btl_gm_component_open(void);
/**
* Any final cleanup before being unloaded.
*/
extern int mca_btl_gm_component_close(void);
/**
* GM component initialization.
*
* @param num_btl_modules (OUT) Number of BTLs returned in BTL array.
* @param allow_multi_user_threads (OUT) Flag indicating wether BTL supports user threads (TRUE)
* @param have_hidden_threads (OUT) Flag indicating wether BTL uses threads (TRUE)
*/
extern mca_btl_base_module_t** mca_btl_gm_component_init(
int *num_btl_modules,
bool allow_multi_user_threads,
bool have_hidden_threads
);
/**
* GM component progress.
*/
extern int mca_btl_gm_component_progress(void);
/**
* Cleanup any resources held by the BTL.
*
* @param btl BTL instance.
* @return OMPI_SUCCESS or error status on failure.
*/
extern int mca_btl_gm_finalize(
struct mca_btl_base_module_t* btl
);
/**
* PML->BTL notification of change in the process list.
*
* @param btl (IN)
* @param nprocs (IN) Number of processes
* @param procs (IN) Set of processes
* @param peers (OUT) Set of (optional) peer addressing info.
* @param peers (IN/OUT) Set of processes that are reachable via this BTL.
* @return OMPI_SUCCESS or error status on failure.
*
*/
extern int mca_btl_gm_add_procs(
struct mca_btl_base_module_t* btl,
size_t nprocs,
struct ompi_proc_t **procs,
struct mca_btl_base_endpoint_t** peers,
opal_bitmap_t* reachable
);
/**
* PML->BTL notification of change in the process list.
*
* @param btl (IN) BTL instance
* @param nproc (IN) Number of processes.
* @param procs (IN) Set of processes.
* @param peers (IN) Set of peer data structures.
* @return Status indicating if cleanup was successful
*
*/
extern int mca_btl_gm_del_procs(
struct mca_btl_base_module_t* btl,
size_t nprocs,
struct ompi_proc_t **procs,
struct mca_btl_base_endpoint_t** peers
);
/**
* Initiate an asynchronous send.
*
* @param btl (IN) BTL module
* @param endpoint (IN) BTL addressing information
* @param descriptor (IN) Description of the data to be transfered
* @param tag (IN) The tag value used to notify the peer.
*/
extern int mca_btl_gm_send(
struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* btl_peer,
struct mca_btl_base_descriptor_t* descriptor,
mca_btl_base_tag_t tag
);
/**
* Initiate an asynchronous put.
*
* @param btl (IN) BTL module
* @param endpoint (IN) BTL addressing information
* @param descriptor (IN) Description of the data to be transferred
*/
extern int mca_btl_gm_put(
struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* btl_peer,
struct mca_btl_base_descriptor_t* decriptor
);
/**
* Initiate an asynchronous get.
*
* @param btl (IN) BTL module
* @param endpoint (IN) BTL addressing information
* @param descriptor (IN) Description of the data to be transferred
*/
extern int mca_btl_gm_get(
struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* btl_peer,
struct mca_btl_base_descriptor_t* decriptor
);
/**
* Register a callback function that is called on error.
*
* @param btl (IN) BTL module
* @return Status indicating if registration was successful
*
*/
extern int mca_btl_gm_register_error_cb(
struct mca_btl_base_module_t* btl,
mca_btl_base_module_error_cb_fn_t cbfunc);
/**
* Allocate a descriptor with a segment of the requested size.
* Note that the BTL layer may choose to return a smaller size
* if it cannot support the request.
*
* @param btl (IN) BTL module
* @param size (IN) Request segment size.
*/
extern mca_btl_base_descriptor_t* mca_btl_gm_alloc(
struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* endpoint,
uint8_t order,
size_t size,
uint32_t flags);
/**
* Return a segment allocated by this BTL.
*
* @param btl (IN) BTL module
* @param descriptor (IN) Allocated descriptor.
*/
extern int mca_btl_gm_free(
struct mca_btl_base_module_t* btl,
mca_btl_base_descriptor_t* des);
/**
* Prepare a descriptor for send/rdma using the supplied
* convertor. If the convertor references data that is contigous,
* the descriptor may simply point to the user buffer. Otherwise,
* this routine is responsible for allocating buffer space and
* packing if required.
*
* @param btl (IN) BTL module
* @param endpoint (IN) BTL peer addressing
* @param convertor (IN) Data type convertor
* @param reserve (IN) Additional bytes requested by upper layer to precede user data
* @param size (IN/OUT) Number of bytes to prepare (IN), number of bytes actually prepared (OUT)
*/
mca_btl_base_descriptor_t* mca_btl_gm_prepare_src(
struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* peer,
struct mca_mpool_base_registration_t*,
struct opal_convertor_t* convertor,
uint8_t order,
size_t reserve,
size_t* size,
uint32_t flags
);
extern mca_btl_base_descriptor_t* mca_btl_gm_prepare_dst(
struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* peer,
struct mca_mpool_base_registration_t*,
struct opal_convertor_t* convertor,
uint8_t order,
size_t reserve,
size_t* size,
uint32_t flags);
/**
* Fault Tolerance Event Notification Function
* @param state Checkpoint Stae
* @return OMPI_SUCCESS or failure status
*/
int mca_btl_gm_ft_event(int state);
/**
* Acquire a send token - queue the fragment if none available
*/
#define MCA_BTL_GM_ACQUIRE_TOKEN_NL(btl, frag) \
do { \
/* queue the descriptor if there are no send tokens */ \
if(OPAL_THREAD_ADD32(&gm_btl->gm_num_send_tokens, -1) < 0) { \
opal_list_append(&gm_btl->gm_pending, (opal_list_item_t*)frag); \
OPAL_THREAD_ADD32(&gm_btl->gm_num_send_tokens, 1); \
return OMPI_SUCCESS; \
} \
} while (0) \
#define MCA_BTL_GM_ACQUIRE_TOKEN(btl, frag) \
do { \
/* queue the descriptor if there are no send tokens */ \
if(OPAL_THREAD_ADD32(&gm_btl->gm_num_send_tokens, -1) < 0) { \
opal_list_append(&gm_btl->gm_pending, (opal_list_item_t*)frag); \
OPAL_THREAD_ADD32(&gm_btl->gm_num_send_tokens, 1); \
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock); \
return OMPI_SUCCESS; \
} \
} while (0) \
/**
* Return send token and dequeue and pending fragments
* mca_btl_gm_component.gm_lock is already held.
*/
#define MCA_BTL_GM_RETURN_TOKEN(btl) \
do { \
OPAL_THREAD_ADD32( &btl->gm_num_send_tokens, 1 ); \
if(opal_list_get_size(&btl->gm_pending)) { \
mca_btl_gm_frag_t* frag; \
frag = (mca_btl_gm_frag_t*)opal_list_remove_first(&btl->gm_pending); \
if(NULL != frag) { \
switch(frag->type) { \
case MCA_BTL_GM_SEND: \
case MCA_BTL_GM_EAGER: \
mca_btl_gm_send_nl(&btl->super, frag->endpoint, &frag->base, frag->hdr->tag); \
break; \
case MCA_BTL_GM_PUT: \
mca_btl_gm_put_nl(&btl->super, frag->endpoint, &frag->base); \
break; \
case MCA_BTL_GM_GET: \
mca_btl_gm_get_nl(&btl->super, frag->endpoint, &frag->base); \
break; \
} \
} \
} \
} while (0)
END_C_DECLS
#endif

Просмотреть файл

@ -1,732 +0,0 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007-2008 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/constants.h"
#include "opal/mca/event/event.h"
#include "opal/util/output.h"
#include "ompi/mca/btl/btl.h"
#ifdef HAVE_INTTYPES_H
#include <inttypes.h>
#endif
#include "opal/mca/base/mca_base_param.h"
#include "ompi/mca/mpool/base/base.h"
#include "ompi/mca/mpool/rdma/mpool_rdma.h"
#include "btl_gm.h"
#include "btl_gm_frag.h"
#include "btl_gm_endpoint.h"
#include "ompi/mca/btl/base/base.h"
#include "ompi/mca/btl/base/btl_base_error.h"
#include "btl_gm_endpoint.h"
#include "orte/runtime/orte_globals.h"
#include "orte/util/name_fns.h"
#include "ompi/runtime/ompi_module_exchange.h"
#include "ompi/runtime/mpiruntime.h"
#if OMPI_ENABLE_PROGRESS_THREADS
static void* mca_btl_gm_progress_thread( opal_object_t* arg );
#endif
static int gm_reg_mr(void *reg_data, void *base, size_t size,
mca_mpool_base_registration_t *reg);
static int gm_dereg_mr(void *reg_data, mca_mpool_base_registration_t *reg);
mca_btl_gm_component_t mca_btl_gm_component = {
{
/* First, the mca_base_component_t struct containing meta information
about the component itself */
{
MCA_BTL_BASE_VERSION_2_0_0,
"gm", /* MCA component name */
OMPI_MAJOR_VERSION, /* MCA component major version */
OMPI_MINOR_VERSION, /* MCA component minor version */
OMPI_RELEASE_VERSION, /* MCA component release version */
mca_btl_gm_component_open, /* component open */
mca_btl_gm_component_close /* component close */
},
{
/* The component is not checkpoint ready */
MCA_BASE_METADATA_PARAM_NONE
},
mca_btl_gm_component_init,
mca_btl_gm_component_progress,
}
};
/*
* utility routines for parameter registration
*/
static inline char* mca_btl_gm_param_register_string(
const char* param_name,
const char* default_value)
{
char *param_value;
int id = mca_base_param_register_string("btl","gm",param_name,NULL,default_value);
mca_base_param_lookup_string(id, &param_value);
return param_value;
}
static inline int mca_btl_gm_param_register_int(
const char* param_name,
int default_value)
{
int id = mca_base_param_register_int("btl","gm",param_name,NULL,default_value);
int param_value = default_value;
mca_base_param_lookup_int(id,&param_value);
return param_value;
}
/*
* Called by MCA framework to open the component, registers
* component parameters.
*/
int mca_btl_gm_component_open(void)
{
/* initialize state */
mca_btl_gm_component.gm_num_btls=0;
mca_btl_gm_component.gm_btls=NULL;
/* initialize objects */
OBJ_CONSTRUCT(&mca_btl_gm_component.gm_procs, opal_list_t);
OBJ_CONSTRUCT(&mca_btl_gm_component.gm_lock, opal_mutex_t);
/* register GM component parameters */
mca_btl_gm_component.gm_free_list_num =
mca_btl_gm_param_register_int ("free_list_num", 8);
mca_btl_gm_component.gm_free_list_max =
mca_btl_gm_param_register_int ("free_list_max", -1);
mca_btl_gm_component.gm_free_list_inc =
mca_btl_gm_param_register_int ("free_list_inc", 8);
mca_btl_gm_component.gm_debug =
mca_btl_gm_param_register_int("debug", 0);
mca_btl_gm_component.gm_mpool_name =
mca_btl_gm_param_register_string("mpool", "rdma");
mca_btl_gm_component.gm_max_ports =
mca_btl_gm_param_register_int("max_ports", 16);
mca_btl_gm_component.gm_max_boards =
mca_btl_gm_param_register_int("max_boards", 4);
mca_btl_gm_component.gm_max_btls =
mca_btl_gm_param_register_int("max_modules", 4);
mca_btl_gm_component.gm_num_high_priority =
mca_btl_gm_param_register_int("num_high_priority", 8);
mca_btl_gm_component.gm_num_repost =
mca_btl_gm_param_register_int("num_repost", 4);
mca_btl_gm_component.gm_port_name=
mca_btl_gm_param_register_string("port_name", "OMPI");
/* register gm module parameters */
mca_btl_gm_module.super.btl_exclusivity = MCA_BTL_EXCLUSIVITY_DEFAULT;
mca_btl_gm_module.super.btl_eager_limit = 32*1024;
mca_btl_gm_module.super.btl_rndv_eager_limit = 32*1024;
mca_btl_gm_module.super.btl_max_send_size = 64*1024;
mca_btl_gm_module.super.btl_rdma_pipeline_send_length = 512*1024;
mca_btl_gm_module.super.btl_rdma_pipeline_frag_size = 128*1024;
mca_btl_gm_module.super.btl_min_rdma_pipeline_size = 128*1024;
#if OMPI_MCA_BTL_GM_HAVE_RDMA_PUT
mca_btl_gm_module.super.btl_flags = MCA_BTL_FLAGS_PUT |
MCA_BTL_FLAGS_NEED_ACK | MCA_BTL_FLAGS_NEED_CSUM;
#else
mca_btl_gm_module.super.btl_flags = MCA_BTL_FLAGS_SEND;
#endif
mca_btl_gm_module.super.btl_bandwidth = 250;
mca_btl_gm_module.super.btl_latency = 0;
mca_btl_base_param_register(&mca_btl_gm_component.super.btl_version,
&mca_btl_gm_module.super);
/* compute the eager frag size */
mca_btl_gm_component.gm_eager_frag_size =
gm_min_size_for_length(mca_btl_gm_module.super.btl_eager_limit) - 1;
mca_btl_gm_module.super.btl_eager_limit =
gm_max_length_for_size(mca_btl_gm_component.gm_eager_frag_size) -
sizeof(mca_btl_base_header_t);
/* compute the max frag size */
mca_btl_gm_component.gm_max_frag_size =
gm_min_size_for_length(mca_btl_gm_module.super.btl_max_send_size) - 1;
mca_btl_gm_module.super.btl_max_send_size =
gm_max_length_for_size(mca_btl_gm_component.gm_max_frag_size) -
sizeof(mca_btl_base_header_t);
return OMPI_SUCCESS;
}
/*
* component cleanup - sanity checking of queue lengths
*/
int mca_btl_gm_component_close(void)
{
return OMPI_SUCCESS;
}
static int gm_reg_mr(void *reg_data, void *base, size_t size,
mca_mpool_base_registration_t *reg)
{
struct gm_port *port = (struct gm_port*)reg_data;
int rc;
rc = gm_register_memory(port, base, size);
if(rc != GM_SUCCESS)
return OMPI_ERR_OUT_OF_RESOURCE;
return MPI_SUCCESS;
}
static int gm_dereg_mr(void *reg_data, mca_mpool_base_registration_t *reg)
{
struct gm_port *port = (struct gm_port*)reg_data;
int rc;
rc = gm_deregister_memory(port, reg->base, reg->bound - reg->base + 1);
if(rc != GM_SUCCESS) {
opal_output(0, "%s: error unpinning gm memory errno says %s\n",
__func__, strerror(errno));
return OMPI_ERROR;
}
return OMPI_SUCCESS;
}
/**
* Initialize module instance
*/
static int
mca_btl_gm_module_init (mca_btl_gm_module_t * btl)
{
mca_mpool_base_resources_t resources;
int32_t num_high_priority;
int32_t i;
int rc;
/* initialize objects */
OBJ_CONSTRUCT(&btl->gm_frag_eager, ompi_free_list_t);
OBJ_CONSTRUCT(&btl->gm_frag_max, ompi_free_list_t);
OBJ_CONSTRUCT(&btl->gm_frag_user, ompi_free_list_t);
OBJ_CONSTRUCT(&btl->gm_pending, opal_list_t);
OBJ_CONSTRUCT(&btl->gm_repost, opal_list_t);
#if OMPI_ENABLE_PROGRESS_THREADS
OBJ_CONSTRUCT(&btl->gm_thread, opal_thread_t);
#endif
btl->error_cb = NULL;
/* query nic tokens */
btl->gm_num_send_tokens = gm_num_send_tokens (btl->port);
btl->gm_max_send_tokens = btl->gm_num_send_tokens;
btl->gm_num_recv_tokens = gm_num_receive_tokens (btl->port);
btl->gm_max_recv_tokens = btl->gm_num_recv_tokens;
/* dont allow high priority to exceed 1/2 of available recv tokens */
num_high_priority = mca_btl_gm_component.gm_num_high_priority;
if(num_high_priority > (btl->gm_num_recv_tokens >> 1)) {
num_high_priority = btl->gm_num_recv_tokens >> 1;
}
if(mca_btl_gm_component.gm_num_repost > num_high_priority >> 1) {
btl->gm_num_repost = (num_high_priority >> 1);
} else {
btl->gm_num_repost = mca_btl_gm_component.gm_num_repost;
}
/* initialize memory pool */
resources.reg_data = (void*)btl->port;
resources.sizeof_reg = sizeof(mca_mpool_base_registration_t);
resources.register_mem = gm_reg_mr;
resources.deregister_mem = gm_dereg_mr;
btl->super.btl_mpool = mca_mpool_base_module_create(
mca_btl_gm_component.gm_mpool_name,
&btl->super,
&resources);
if(NULL == btl->super.btl_mpool) {
opal_output (0, "[%s:%d] unable to initialize mpool", __FILE__, __LINE__);
return OMPI_ERROR;
}
/* initialize free lists */
ompi_free_list_init_new( &btl->gm_frag_eager,
sizeof (mca_btl_gm_frag_eager_t),
opal_cache_line_size,
OBJ_CLASS (mca_btl_gm_frag_eager_t),
(1 << mca_btl_gm_component.gm_eager_frag_size) + sizeof (uintptr_t),
opal_cache_line_size,
btl->gm_max_send_tokens,
mca_btl_gm_component.gm_free_list_max,
mca_btl_gm_component.gm_free_list_inc,
btl->super.btl_mpool );
ompi_free_list_init_new( &btl->gm_frag_max,
sizeof (mca_btl_gm_frag_max_t),
opal_cache_line_size,
OBJ_CLASS (mca_btl_gm_frag_max_t),
(1 << mca_btl_gm_component.gm_max_frag_size) + sizeof (uintptr_t),
opal_cache_line_size,
btl->gm_max_recv_tokens,
mca_btl_gm_component.gm_free_list_max,
mca_btl_gm_component.gm_free_list_inc,
btl->super.btl_mpool );
ompi_free_list_init_new( &btl->gm_frag_user,
sizeof (mca_btl_gm_frag_user_t),
opal_cache_line_size,
OBJ_CLASS (mca_btl_gm_frag_user_t),
0,opal_cache_line_size,
mca_btl_gm_component.gm_free_list_num,
mca_btl_gm_component.gm_free_list_max,
mca_btl_gm_component.gm_free_list_inc,
NULL );
/* post receive buffers */
for(i=0; i<num_high_priority; i++) {
mca_btl_gm_frag_t* frag;
MCA_BTL_GM_FRAG_ALLOC_EAGER(btl, frag, rc);
if(NULL == frag) {
return rc;
}
frag->type = MCA_BTL_GM_EAGER;
frag->base.des_src = NULL;
frag->base.des_src_cnt = 0;
frag->base.des_dst = &frag->segment;
frag->base.des_dst_cnt = 1;
frag->priority = GM_HIGH_PRIORITY;
gm_provide_receive_buffer(btl->port, frag->hdr, frag->size, frag->priority);
}
for(i=mca_btl_gm_component.gm_num_high_priority; i<btl->gm_max_recv_tokens; i++) {
mca_btl_gm_frag_t* frag;
MCA_BTL_GM_FRAG_ALLOC_MAX(btl, frag, rc);
if(NULL == frag) {
return rc;
}
frag->type = MCA_BTL_GM_SEND;
frag->base.des_src = NULL;
frag->base.des_src_cnt = 0;
frag->base.des_dst = &frag->segment;
frag->base.des_dst_cnt = 1;
frag->priority = GM_LOW_PRIORITY;
gm_provide_receive_buffer(btl->port, frag->hdr, frag->size, frag->priority);
}
/* enable rdma */
if( GM_SUCCESS != gm_allow_remote_memory_access (btl->port) ) {
opal_output (0, "[%s:%d] unable to allow remote memory access", __FILE__, __LINE__);
return OMPI_ERROR;
}
#if OMPI_ENABLE_PROGRESS_THREADS
/* start progress thread */
btl->gm_progress = true;
btl->gm_thread.t_run = mca_btl_gm_progress_thread;
btl->gm_thread.t_arg = btl;
if(OPAL_SUCCESS != (rc = opal_thread_start(&btl->gm_thread))) {
opal_output (0, "[%s:%d] unable to create progress thread, retval=%d", __FILE__, __LINE__, rc);
return rc;
}
#endif
return OMPI_SUCCESS;
}
/*
* Scan all ports on the boards. As it's difficult to find the total number of boards
* so we use a predefined maximum.
*/
static int mca_btl_gm_discover( void )
{
uint32_t board_no;
uint32_t port_no;
uint32_t node_id;
struct gm_port* port;
#if GM_API_VERSION > 0x200
uint32_t global_id;
#else
char global_id[GM_MAX_HOST_NAME_LEN];
#endif /* GM_API_VERSION > 0x200 */
int rc;
for( board_no = 0; board_no < mca_btl_gm_component.gm_max_boards; board_no++ ) {
mca_btl_gm_module_t *btl;
/* open the first available gm port for this board */
for( port_no = 4; port_no < mca_btl_gm_component.gm_max_ports; port_no++ ) {
if (3 == port_no) {
continue; /* port 0,1,3 reserved */
} else if (GM_SUCCESS == gm_open(&port, board_no, port_no,
mca_btl_gm_component.gm_port_name, GM_API_VERSION) ) {
break;
}
}
if( port_no == mca_btl_gm_component.gm_max_ports ) {
continue;
}
/* Get node local Id */
if( GM_SUCCESS != gm_get_node_id( port, &node_id) ) {
opal_output (0, " failure to get node_id \n");
continue;
}
/* Gather an unique id for the node */
#if GM_API_VERSION > 0x200
if (GM_SUCCESS != gm_node_id_to_global_id( port, node_id, &global_id) ) {
opal_output (0, "[%s:%d] Unable to get my GM global unique id", __FILE__, __LINE__);
continue;
}
#else
if( GM_SUCCESS != gm_get_host_name( port, global_id ) ) {
opal_output( 0, "[%s:%d] Unable to get the GM host name\n", __FILE__, __LINE__);
continue;
}
#endif /* GM_API_VERSION > 0x200 */
/* create the btl module */
btl = (mca_btl_gm_module_t *)malloc( sizeof(mca_btl_gm_module_t) );
if (NULL == btl) {
opal_output( 0, "[%s:%d] out of resources", __FILE__, __LINE__);
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* copy the basic informations into the new BTL */
memcpy (btl, &mca_btl_gm_module, sizeof(mca_btl_gm_module_t));
/* setup local address */
btl->port = port;
btl->gm_addr.port_id = port_no;
btl->gm_addr.node_id = node_id;
#if GM_API_VERSION > 0x200
btl->gm_addr.global_id = global_id;
#else
strncpy( btl->gm_addr.global_id, global_id, GM_MAX_HOST_NAME_LEN );
#endif /* GM_API_VERSION > 0x200 */
if(mca_btl_gm_component.gm_debug > 0) {
opal_output(0,
"%s gm_port %08lX, "
"board %" PRIu32 ", global %" PRIu32 " "
"node %" PRIu32 "port %" PRIu32 "\n",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(unsigned long) port, board_no, global_id, node_id, port_no);
}
if((rc = mca_btl_gm_module_init(btl)) != OMPI_SUCCESS) {
opal_output(0, "[%s:%d] unable to initialze gm port", __FILE__, __LINE__);
return rc;
}
/* everything is OK let's mark it as usable and go to the next one */
mca_btl_gm_component.gm_btls[mca_btl_gm_component.gm_num_btls] = btl;
if(++mca_btl_gm_component.gm_num_btls >= mca_btl_gm_component.gm_max_btls ) {
break;
}
}
return OMPI_SUCCESS;
}
/*
* Register GM component addressing information. The MCA framework
* will make this available to all peers.
*/
static int
mca_btl_gm_modex_send(void)
{
int rc;
size_t i;
size_t size;
mca_btl_gm_addr_t *addrs = NULL;
size = mca_btl_gm_component.gm_num_btls * sizeof (mca_btl_gm_addr_t);
if (0 != size) {
addrs = (mca_btl_gm_addr_t *)malloc (size);
if (NULL == addrs) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
for (i = 0; i < mca_btl_gm_component.gm_num_btls; i++) {
mca_btl_gm_module_t *btl = mca_btl_gm_component.gm_btls[i];
addrs[i] = btl->gm_addr;
MCA_BTL_GM_ADDR_HTON(addrs[i]);
}
}
rc = ompi_modex_send (&mca_btl_gm_component.super.btl_version, addrs, size);
if (NULL != addrs) {
free (addrs);
}
return rc;
}
/*
* Initialize the GM component,
* check how many boards are available and open ports on them.
*/
mca_btl_base_module_t **
mca_btl_gm_component_init (int *num_btl_modules,
bool enable_progress_threads,
bool enable_mpi_threads)
{
mca_btl_base_module_t **btls;
*num_btl_modules = 0;
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
/* Currently refuse to run if MPI_THREAD_MULTIPLE is enabled */
if (ompi_mpi_thread_multiple && !mca_btl_base_thread_multiple_override) {
mca_btl_gm_component.gm_num_btls = 0;
mca_btl_gm_modex_send();
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
return NULL;
}
/* try to initialize GM */
if( GM_SUCCESS != gm_init() ) {
opal_output( 0, "[%s:%d] error in initializing the gm library\n", __FILE__, __LINE__ );
mca_btl_gm_component.gm_num_btls = 0;
mca_btl_gm_modex_send();
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
return NULL;
}
/* First discover all available boards. For each board we create a unique BTL */
mca_btl_gm_component.gm_btls = malloc( mca_btl_gm_component.gm_max_btls * sizeof (mca_btl_gm_module_t *));
if (NULL == mca_btl_gm_component.gm_btls) {
opal_output( 0, "[%s:%d] out of resources.", __FILE__, __LINE__ );
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
return NULL;
}
/* initialize gm */
if (OMPI_SUCCESS != mca_btl_gm_discover()) {
mca_btl_base_error_no_nics("Myrinet/GM", "NIC");
mca_btl_gm_component.gm_num_btls = 0;
mca_btl_gm_modex_send();
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
return NULL;
}
if (mca_btl_gm_component.gm_num_btls == 0) {
mca_btl_base_error_no_nics("Myrinet/GM", "NIC");
mca_btl_gm_component.gm_num_btls = 0;
mca_btl_gm_modex_send();
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
return NULL;
}
/* publish GM parameters with the MCA framework */
if (OMPI_SUCCESS != mca_btl_gm_modex_send()) {
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
return NULL;
}
/* return array of BTLs */
btls = (mca_btl_base_module_t**) malloc (
mca_btl_gm_component.gm_num_btls * sizeof(mca_btl_base_module_t *));
if (NULL == btls) {
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
return NULL;
}
memcpy(btls, mca_btl_gm_component.gm_btls,
mca_btl_gm_component.gm_num_btls * sizeof(mca_btl_gm_module_t *));
*num_btl_modules = mca_btl_gm_component.gm_num_btls;
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
return btls;
}
/*
* GM component progress.
*/
int mca_btl_gm_component_progress()
{
static int32_t inprogress = 0;
int count = 0;
size_t i;
/* could get into deadlock in this case as we post recvs after callback completes */
if(OPAL_THREAD_ADD32(&inprogress, 1) > 1) {
OPAL_THREAD_ADD32(&inprogress, -1);
return OMPI_SUCCESS;
}
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
for( i = 0; i < mca_btl_gm_component.gm_num_btls; ) {
mca_btl_gm_module_t* btl = mca_btl_gm_component.gm_btls[i];
gm_recv_event_t* event = gm_receive(btl->port);
/* If there are no receive events just skip the function call */
switch(gm_ntohc(event->recv.type)) {
case GM_FAST_RECV_EVENT:
case GM_FAST_PEER_RECV_EVENT:
case GM_FAST_HIGH_RECV_EVENT:
case GM_FAST_HIGH_PEER_RECV_EVENT:
{
unsigned char* buffer = (unsigned char*)gm_ntohp(event->recv.buffer);
mca_btl_gm_frag_t* frag = (mca_btl_gm_frag_t*)*((uintptr_t*)(buffer - sizeof(uintptr_t)));
mca_btl_base_header_t* hdr = (mca_btl_base_header_t *)gm_ntohp(event->recv.message);
mca_btl_active_message_callback_t* reg;
frag->segment.seg_addr.pval = (hdr+1);
frag->segment.seg_len = gm_ntohl(event->recv.length) - sizeof(mca_btl_base_header_t);
reg = mca_btl_base_active_message_trigger + hdr->tag;
/* cbfunc may be null if interface goes down.. */
if(reg->cbfunc) {
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
reg->cbfunc(&btl->super, hdr->tag, &frag->base, reg->cbdata);
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
} else {
btl->error_cb(&btl->super, MCA_BTL_ERROR_FLAGS_FATAL, NULL, NULL);
return 0;
}
MCA_BTL_GM_FRAG_POST(btl,frag);
count++;
break;
}
case GM_RECV_EVENT:
case GM_PEER_RECV_EVENT:
case GM_HIGH_RECV_EVENT:
case GM_HIGH_PEER_RECV_EVENT:
{
unsigned char* buffer = (unsigned char*)gm_ntohp(event->recv.buffer);
mca_btl_gm_frag_t* frag = (mca_btl_gm_frag_t*)*((uintptr_t*)(buffer - sizeof(uintptr_t)));
mca_btl_base_header_t* hdr = (mca_btl_base_header_t*)buffer;
mca_btl_active_message_callback_t* reg;
frag->segment.seg_addr.pval = (hdr+1);
frag->segment.seg_len = gm_ntohl(event->recv.length) - sizeof(mca_btl_base_header_t);
reg = mca_btl_base_active_message_trigger + hdr->tag;
if(reg->cbfunc) {
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
reg->cbfunc(&btl->super, hdr->tag, &frag->base, reg->cbdata);
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
MCA_BTL_GM_FRAG_POST(btl,frag);
} else {
btl->error_cb(&btl->super, MCA_BTL_ERROR_FLAGS_FATAL, NULL, NULL);
return 0;
}
count++;
break;
}
case GM_NO_RECV_EVENT:
i++;
break;
default:
gm_unknown(btl->port, event);
break;
}
}
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
OPAL_THREAD_ADD32(&inprogress, -1);
return count;
}
#if OMPI_ENABLE_PROGRESS_THREADS
static void* mca_btl_gm_progress_thread( opal_object_t* arg )
{
opal_thread_t* thread = (opal_thread_t*)arg;
mca_btl_gm_module_t* btl = thread->t_arg;
/* This thread enter in a cancel enabled state */
pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, NULL );
pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, NULL );
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
while(btl->gm_progress) {
gm_recv_event_t* event;
/* dont process events while the app is in the library */
while(ompi_progress_threads()) {
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
while(ompi_progress_threads())
sched_yield();
usleep(100); /* give app a chance to re-enter library */
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
}
/* otherwise processes any pending events */
event = gm_blocking_receive_no_spin(btl->port);
switch(gm_ntohc(event->recv.type)) {
case GM_FAST_RECV_EVENT:
case GM_FAST_PEER_RECV_EVENT:
case GM_FAST_HIGH_RECV_EVENT:
case GM_FAST_HIGH_PEER_RECV_EVENT:
{
unsigned char* buffer = (unsigned char*)gm_ntohp(event->recv.buffer);
mca_btl_gm_frag_t* frag = (mca_btl_gm_frag_t*)*((uintptr_t*)(buffer - sizeof(uintptr_t)));
mca_btl_base_header_t* hdr = (mca_btl_base_header_t *)gm_ntohp(event->recv.message);
mca_btl_active_message_callback_t* reg;
frag->segment.seg_addr.pval = (hdr+1);
frag->segment.seg_len = gm_ntohl(event->recv.length) - sizeof(mca_btl_base_header_t);
reg = mca_btl_base_active_message_trigger + hdr->tag;
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
reg->cbfunc(&btl->super, hdr->tag, &frag->base, reg->cbdata);
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
MCA_BTL_GM_FRAG_POST(btl,frag);
break;
}
case GM_RECV_EVENT:
case GM_PEER_RECV_EVENT:
case GM_HIGH_RECV_EVENT:
case GM_HIGH_PEER_RECV_EVENT:
{
unsigned char* buffer = (unsigned char*)gm_ntohp(event->recv.buffer);
mca_btl_gm_frag_t* frag = (mca_btl_gm_frag_t*)*((uintptr_t*)(buffer - sizeof(uintptr_t)));
mca_btl_base_header_t* hdr = (mca_btl_base_header_t*)buffer;
mca_btl_active_message_callback_t* reg;
frag->segment.seg_addr.pval = (hdr+1);
frag->segment.seg_len = gm_ntohl(event->recv.length) - sizeof(mca_btl_base_header_t);
reg = mca_btl_base_active_message_trigger + hdr->tag;
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
reg->cbfunc(&btl->super, hdr->tag, &frag->base, reg->cbdata);
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
MCA_BTL_GM_FRAG_POST(btl,frag);
break;
}
case _GM_SLEEP_EVENT:
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
gm_unknown(btl->port, event);
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
break;
default:
gm_unknown(btl->port, event);
break;
}
}
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
return PTHREAD_CANCELED;
}
#endif

Просмотреть файл

@ -1,56 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <sys/time.h>
#include <time.h>
#include "ompi/types.h"
#include "btl_gm.h"
#include "btl_gm_endpoint.h"
#include "btl_gm_proc.h"
#include "btl_gm_frag.h"
/*
* Initialize state of the endpoint instance.
*
*/
static void mca_btl_gm_endpoint_construct(mca_btl_base_endpoint_t* endpoint)
{
endpoint->endpoint_btl = 0;
endpoint->endpoint_proc = 0;
}
/*
* Destroy a endpoint
*
*/
static void mca_btl_gm_endpoint_destruct(mca_btl_base_endpoint_t* endpoint)
{
}
OBJ_CLASS_INSTANCE(
mca_btl_gm_endpoint_t,
opal_list_item_t,
mca_btl_gm_endpoint_construct,
mca_btl_gm_endpoint_destruct);

Просмотреть файл

@ -1,93 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef MCA_BTL_GM_ENDPOINT_H
#define MCA_BTL_GM_ENDPOINT_H
#include "opal/class/opal_list.h"
#include "opal/mca/event/event.h"
#include "btl_gm_frag.h"
#include "btl_gm.h"
#ifdef HAVE_ARPA_INET_H
#include <arpa/inet.h>
#endif
BEGIN_C_DECLS
/**
* Structure used to publish GM id information to peers.
*/
struct mca_btl_gm_addr_t {
#if GM_API_VERSION > 0x200
unsigned int global_id;
#else
char global_id[GM_MAX_HOST_NAME_LEN];
#endif /* GM_API_VERSION > 0x200 */
unsigned int node_id;
unsigned int port_id;
};
typedef struct mca_btl_gm_addr_t mca_btl_gm_addr_t;
#if GM_API_VERSION > 0x200
#define MCA_BTL_GM_ADDR_HTON(addr) \
addr.global_id = htonl(addr.global_id); \
addr.node_id = htonl(addr.node_id); \
addr.port_id = htonl(addr.port_id);
#define MCA_BTL_GM_ADDR_NTOH(addr) \
addr.global_id = ntohl(addr.global_id); \
addr.node_id = ntohl(addr.node_id); \
addr.port_id = ntohl(addr.port_id);
#else
#define MCA_BTL_GM_ADDR_HTON(addr) \
addr.node_id = htonl(addr.node_id); \
addr.port_id = htonl(addr.port_id);
#define MCA_BTL_GM_ADDR_NTOH(addr) \
addr.node_id = ntohl(addr.node_id); \
addr.port_id = ntohl(addr.port_id);
#endif
/**
* An abstraction that represents a connection to a endpoint process.
* An instance of mca_btl_base_endpoint_t is associated w/ each process
* and BTL pair at startup. However, connections to the endpoint
* are established dynamically on an as-needed basis:
*/
struct mca_btl_base_endpoint_t {
opal_list_item_t super;
struct mca_btl_gm_module_t* endpoint_btl;
/**< BTL instance that created this connection */
struct mca_btl_gm_proc_t* endpoint_proc;
/**< proc structure corresponding to endpoint */
mca_btl_gm_addr_t endpoint_addr;
};
typedef struct mca_btl_base_endpoint_t mca_btl_base_endpoint_t;
typedef mca_btl_base_endpoint_t mca_btl_gm_endpoint_t;
OBJ_CLASS_DECLARATION(mca_btl_gm_endpoint_t);
END_C_DECLS
#endif

Просмотреть файл

@ -1,84 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "btl_gm_frag.h"
#define MCA_BTL_GM_FRAG_COMMON_CONSTRUCTOR(frag) \
do { \
frag->base.des_src = NULL; \
frag->base.des_src_cnt = 0; \
frag->base.des_dst = NULL; \
frag->base.des_dst_cnt = 0; \
frag->registration = NULL; \
} while(0)
static void mca_btl_gm_frag_eager_constructor(mca_btl_gm_frag_t* frag)
{
uintptr_t *ctx = (uintptr_t*)frag->base.super.ptr;
*ctx = (uintptr_t)frag;
frag->hdr = (mca_btl_base_header_t*)(ctx + 1);
frag->segment.seg_addr.pval = (unsigned char*)(frag->hdr + 1);
frag->segment.seg_len = mca_btl_gm_module.super.btl_eager_limit - sizeof(mca_btl_base_header_t);
frag->size = mca_btl_gm_component.gm_eager_frag_size;
MCA_BTL_GM_FRAG_COMMON_CONSTRUCTOR(frag);
}
static void mca_btl_gm_frag_max_constructor(mca_btl_gm_frag_t* frag)
{
uintptr_t *ctx = (uintptr_t*)frag->base.super.ptr;
*ctx = (uintptr_t)frag;
frag->hdr = (mca_btl_base_header_t*)(ctx + 1);
frag->segment.seg_addr.pval = (unsigned char*)(frag->hdr + 1);
frag->segment.seg_len = mca_btl_gm_module.super.btl_max_send_size - sizeof(mca_btl_base_header_t);
frag->size = mca_btl_gm_component.gm_max_frag_size;
MCA_BTL_GM_FRAG_COMMON_CONSTRUCTOR(frag);
}
static void mca_btl_gm_frag_user_constructor(mca_btl_gm_frag_t* frag)
{
frag->hdr = NULL;
frag->size = 0;
frag->registration = NULL;
MCA_BTL_GM_FRAG_COMMON_CONSTRUCTOR(frag);
}
OBJ_CLASS_INSTANCE(
mca_btl_gm_frag_t,
mca_btl_base_descriptor_t,
NULL,
NULL);
OBJ_CLASS_INSTANCE(
mca_btl_gm_frag_eager_t,
mca_btl_base_descriptor_t,
mca_btl_gm_frag_eager_constructor,
NULL);
OBJ_CLASS_INSTANCE(
mca_btl_gm_frag_max_t,
mca_btl_base_descriptor_t,
mca_btl_gm_frag_max_constructor,
NULL);
OBJ_CLASS_INSTANCE(
mca_btl_gm_frag_user_t,
mca_btl_base_descriptor_t,
mca_btl_gm_frag_user_constructor,
NULL);

Просмотреть файл

@ -1,139 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef MCA_BTL_GM_FRAG_H
#define MCA_BTL_GM_FRAG_H
#define MCA_BTL_GM_FRAG_ALIGN (8)
#include "ompi_config.h"
#include "btl_gm.h"
#include "ompi/mca/btl/base/btl_base_error.h"
BEGIN_C_DECLS
typedef enum {
MCA_BTL_GM_EAGER,
MCA_BTL_GM_SEND,
MCA_BTL_GM_PUT,
MCA_BTL_GM_GET
} mca_btl_gm_frag_type_t;
/**
* GM send fragment derived type.
*/
struct mca_btl_gm_frag_t {
mca_btl_base_descriptor_t base;
mca_btl_base_segment_t segment;
struct mca_btl_gm_module_t* btl;
struct mca_btl_base_endpoint_t *endpoint;
struct mca_mpool_base_registration_t* registration;
mca_btl_base_header_t *hdr;
size_t size;
enum gm_priority priority;
mca_btl_gm_frag_type_t type;
};
typedef struct mca_btl_gm_frag_t mca_btl_gm_frag_t;
OBJ_CLASS_DECLARATION(mca_btl_gm_frag_t);
typedef struct mca_btl_gm_frag_t mca_btl_gm_frag_eager_t;
OBJ_CLASS_DECLARATION(mca_btl_gm_frag_eager_t);
typedef struct mca_btl_gm_frag_t mca_btl_gm_frag_max_t;
OBJ_CLASS_DECLARATION(mca_btl_gm_frag_max_t);
typedef struct mca_btl_gm_frag_t mca_btl_gm_frag_user_t;
OBJ_CLASS_DECLARATION(mca_btl_gm_frag_user_t);
/*
* Macros to allocate/return descriptors from module specific
* free list(s).
*/
#define MCA_BTL_GM_FRAG_ALLOC_EAGER(btl, frag, rc) \
{ \
\
ompi_free_list_item_t *item; \
OMPI_FREE_LIST_GET(&((mca_btl_gm_module_t*)btl)->gm_frag_eager, item, rc); \
frag = (mca_btl_gm_frag_t*) item; \
}
#define MCA_BTL_GM_FRAG_ALLOC_MAX(btl, frag, rc) \
{ \
\
ompi_free_list_item_t *item; \
OMPI_FREE_LIST_GET(&((mca_btl_gm_module_t*)btl)->gm_frag_max, item, rc); \
frag = (mca_btl_gm_frag_t*) item; \
}
#define MCA_BTL_GM_FRAG_ALLOC_USER(btl, frag, rc) \
{ \
ompi_free_list_item_t *item; \
OMPI_FREE_LIST_GET(&((mca_btl_gm_module_t*)btl)->gm_frag_user, item, rc); \
frag = (mca_btl_gm_frag_t*) item; \
}
#define MCA_BTL_GM_FRAG_RETURN(btl, frag) \
do { \
ompi_free_list_t* mylist = NULL; \
mca_btl_gm_module_t* btl_gm = (mca_btl_gm_module_t*) btl; \
mca_btl_gm_frag_t* frag_gm = (mca_btl_gm_frag_t*) frag; \
switch(frag_gm->type) { \
case MCA_BTL_GM_EAGER: \
mylist = &btl_gm->gm_frag_eager; \
break; \
case MCA_BTL_GM_SEND: \
mylist = &btl_gm->gm_frag_max; \
break; \
case MCA_BTL_GM_PUT: \
case MCA_BTL_GM_GET: \
mylist = &btl_gm->gm_frag_user; \
break; \
default: \
BTL_ERROR(("Unknown frag type\n")); \
break; \
} \
OMPI_FREE_LIST_RETURN(mylist, \
(ompi_free_list_item_t*)(frag)); \
} while (0);
/* called with mca_btl_gm_component.gm_lock held */
#define MCA_BTL_GM_FRAG_POST(btl,frag) \
do { \
if(opal_list_get_size(&btl->gm_repost) < (size_t)btl->gm_num_repost) { \
opal_list_append(&btl->gm_repost, (opal_list_item_t*)frag); \
} else { \
do { \
gm_provide_receive_buffer(btl->port, frag->hdr, frag->size, frag->priority); \
} while (NULL != (frag = (mca_btl_gm_frag_t*)opal_list_remove_first(&btl->gm_repost))); \
} \
} while(0)
END_C_DECLS
#endif

Просмотреть файл

@ -1,212 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/runtime/ompi_module_exchange.h"
#ifdef HAVE_INTTYPES_H
#include <inttypes.h>
#endif
#include "orte/util/name_fns.h"
#include "orte/runtime/orte_globals.h"
#include "btl_gm.h"
#include "btl_gm_proc.h"
static void mca_btl_gm_proc_construct(mca_btl_gm_proc_t* proc);
static void mca_btl_gm_proc_destruct(mca_btl_gm_proc_t* proc);
OBJ_CLASS_INSTANCE(mca_btl_gm_proc_t,
opal_list_item_t, mca_btl_gm_proc_construct,
mca_btl_gm_proc_destruct);
void mca_btl_gm_proc_construct(mca_btl_gm_proc_t* gm_proc)
{
gm_proc->proc_ompi = 0;
gm_proc->proc_addr_count = 0;
gm_proc->proc_endpoints = 0;
gm_proc->proc_endpoint_count = 0;
OBJ_CONSTRUCT(&gm_proc->proc_lock, opal_mutex_t);
/* add to list of all proc instance */
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
opal_list_append(&mca_btl_gm_component.gm_procs, &gm_proc->super);
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
}
/*
* Cleanup gm proc instance
*/
void mca_btl_gm_proc_destruct(mca_btl_gm_proc_t* gm_proc)
{
/* remove from list of all proc instances */
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
opal_list_remove_item(&mca_btl_gm_component.gm_procs, &gm_proc->super);
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
/* release resources */
if(NULL != gm_proc->proc_endpoints) {
free(gm_proc->proc_endpoints);
}
OBJ_DESTRUCT(&gm_proc->proc_lock);
}
/*
* Look for an existing GM process instances based on the associated
* ompi_proc_t instance.
*/
static mca_btl_gm_proc_t* mca_btl_gm_proc_lookup_ompi(ompi_proc_t* ompi_proc)
{
mca_btl_gm_proc_t* gm_proc;
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
for(gm_proc = (mca_btl_gm_proc_t*)opal_list_get_first(&mca_btl_gm_component.gm_procs);
gm_proc != (mca_btl_gm_proc_t*)opal_list_get_end(&mca_btl_gm_component.gm_procs);
gm_proc = (mca_btl_gm_proc_t*)opal_list_get_next(gm_proc)) {
if(gm_proc->proc_ompi == ompi_proc) {
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
return gm_proc;
}
}
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
return NULL;
}
/*
* Create a GM process structure. There is a one-to-one correspondence
* between a ompi_proc_t and a mca_btl_gm_proc_t instance. We cache
* additional data (specifically the list of mca_btl_gm_endpoint_t instances,
* and published addresses) associated w/ a given destination on this
* datastructure.
*/
mca_btl_gm_proc_t* mca_btl_gm_proc_create(ompi_proc_t* ompi_proc)
{
mca_btl_gm_proc_t* gm_proc = NULL;
size_t i, size;
int rc;
/* Check if we have already created a GM proc
* structure for this ompi process */
gm_proc = mca_btl_gm_proc_lookup_ompi(ompi_proc);
if(gm_proc != NULL) {
return gm_proc;
}
/* create a new gm proc out of the ompi_proc ... */
gm_proc = OBJ_NEW(mca_btl_gm_proc_t);
gm_proc->proc_endpoint_count = 0;
gm_proc->proc_ompi = ompi_proc;
/* query for the peer address info */
rc = ompi_modex_recv(
&mca_btl_gm_component.super.btl_version,
ompi_proc,
(void*)&gm_proc->proc_addrs,
&size);
if(OMPI_SUCCESS != rc) {
opal_output(0, "[%s:%d] ompi_modex_recv failed for peer %s",
__FILE__,__LINE__,ORTE_NAME_PRINT(&ompi_proc->proc_name));
OBJ_RELEASE(gm_proc);
return NULL;
}
if((size % sizeof(mca_btl_gm_addr_t)) != 0) {
opal_output(0, "[%s:%d] invalid gm address for peer %s",
__FILE__,__LINE__,ORTE_NAME_PRINT(&ompi_proc->proc_name));
OBJ_RELEASE(gm_proc);
return NULL;
}
gm_proc->proc_addr_count = size/sizeof(mca_btl_gm_addr_t);
if (0 == gm_proc->proc_addr_count) {
gm_proc->proc_endpoints = NULL;
} else {
gm_proc->proc_endpoints = (mca_btl_base_endpoint_t**)
malloc(gm_proc->proc_addr_count * sizeof(mca_btl_base_endpoint_t*));
}
if(NULL == gm_proc->proc_endpoints) {
OBJ_RELEASE(gm_proc);
return NULL;
}
for (i = 0 ; i < gm_proc->proc_addr_count; ++i) {
MCA_BTL_GM_ADDR_NTOH(gm_proc->proc_addrs[i]);
}
return gm_proc;
}
/*
* Note that this routine must be called with the lock on the process
* already held. Insert a btl instance into the proc array and assign
* it an address.
*/
int mca_btl_gm_proc_insert(
mca_btl_gm_proc_t* gm_proc,
mca_btl_base_endpoint_t* gm_endpoint)
{
mca_btl_gm_module_t* gm_btl = gm_endpoint->endpoint_btl;
/* insert into endpoint array */
if(gm_proc->proc_addr_count <= gm_proc->proc_endpoint_count)
return OMPI_ERR_OUT_OF_RESOURCE;
gm_endpoint->endpoint_proc = gm_proc;
gm_endpoint->endpoint_addr = gm_proc->proc_addrs[gm_proc->proc_endpoint_count];
OPAL_THREAD_LOCK(&mca_btl_gm_component.gm_lock);
#if GM_API_VERSION > 0x200
if (GM_SUCCESS != gm_global_id_to_node_id(
gm_btl->port,
gm_endpoint->endpoint_addr.global_id,
&gm_endpoint->endpoint_addr.node_id)) {
opal_output( 0, "[%s:%d] error in converting global to local id \n",
__FILE__, __LINE__ );
return OMPI_ERROR;
}
if(mca_btl_gm_component.gm_debug > 0) {
opal_output(0, "%s mapped global id %" PRIu32
" to node id %" PRIu32 "\n",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
gm_endpoint->endpoint_addr.global_id,
gm_endpoint->endpoint_addr.node_id);
}
#else
gm_endpoint->endpoint_addr.node_id = gm_host_name_to_node_id( gm_btl->port,
gm_endpoint->endpoint_addr.global_id);
if( GM_NO_SUCH_NODE_ID == gm_endpoint->endpoint_addr.node_id ) {
opal_output( 0, "[%s:%d] unable to convert the remote host name (%s) to a host id",
__FILE__, __LINE__, gm_endpoint->endpoint_addr.global_id);
return OMPI_ERROR;
}
#endif /* GM_API_VERSION > 0x200 */
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
gm_proc->proc_endpoints[gm_proc->proc_endpoint_count] = gm_endpoint;
gm_proc->proc_endpoint_count++;
return OMPI_SUCCESS;
}

Просмотреть файл

@ -1,64 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef MCA_BTL_GM_PROC_H
#define MCA_BTL_GM_PROC_H
#include "opal/class/opal_object.h"
#include "ompi/proc/proc.h"
#include "btl_gm.h"
#include "btl_gm_endpoint.h"
BEGIN_C_DECLS
OBJ_CLASS_DECLARATION(mca_btl_gm_proc_t);
/**
* Represents the state of a remote process and the set of addresses
* that it exports. Also cache an instance of mca_btl_base_endpoint_t for
* each
* BTL instance that attempts to open a connection to the process.
*/
struct mca_btl_gm_proc_t {
opal_list_item_t super;
/**< allow proc to be placed on a list */
ompi_proc_t *proc_ompi;
/**< pointer to corresponding ompi_proc_t */
struct mca_btl_gm_addr_t* proc_addrs;
/**< array of addresses exported by peer */
size_t proc_addr_count;
/**< number of addresses published by peer */
struct mca_btl_base_endpoint_t **proc_endpoints;
/**< array of endpoints that have been created to access this proc */
size_t proc_endpoint_count;
/**< number of endpoints */
opal_mutex_t proc_lock;
/**< lock to protect against concurrent access to proc state */
};
typedef struct mca_btl_gm_proc_t mca_btl_gm_proc_t;
mca_btl_gm_proc_t* mca_btl_gm_proc_create(ompi_proc_t* ompi_proc);
int mca_btl_gm_proc_insert(mca_btl_gm_proc_t*, mca_btl_base_endpoint_t*);
END_C_DECLS
#endif

Просмотреть файл

@ -1,43 +0,0 @@
# -*- shell-script -*-
#
# Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The University of Tennessee and The University
# of Tennessee Research Foundation. All rights
# reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
# MCA_btl_gm_CONFIG([action-if-can-compile],
# [action-if-cant-compile])
# ------------------------------------------------
AC_DEFUN([MCA_ompi_btl_gm_CONFIG],[
AC_CONFIG_FILES([ompi/mca/btl/gm/Makefile])
OMPI_CHECK_GM([btl_gm],
[btl_gm_happy="yes"],
[btl_gm_happy="no"])
AS_IF([test "$btl_gm_happy" = "yes"],
[btl_gm_WRAPPER_EXTRA_LDFLAGS="$btl_gm_LDFLAGS"
btl_gm_WRAPPER_EXTRA_LIBS="$btl_gm_LIBS"
$1],
[$2])
# substitute in the things needed to build gm
AC_SUBST([btl_gm_CFLAGS])
AC_SUBST([btl_gm_CPPFLAGS])
AC_SUBST([btl_gm_LDFLAGS])
AC_SUBST([btl_gm_LIBS])
])dnl