1
1

The first patch related to the Active Message stuff. So far, here is what we have:

- the registration array is now global instead of one by BTL.
- each framework have to declare the entries in the registration array reserved. Then
  it have to define the internal way of sharing (or not) these entries between all
  components. As an example, the PML will not share as there is only one active PML
  at any moment, while the BTLs will have to. The tag is 8 bits long, the first 3
  are reserved for the framework while the remaining 5 are use internally by each
  framework.
- The registration function is optional. If a BTL do not provide such function,
  nothing happens. However, in the case where such function is provided in the BTL
  structure, it will be called by the BML, when a tag is registered.

Now, it's time for the second step... Converting OB1 from a switch based PML to an
active message one.

This commit was SVN r17140.
Этот коммит содержится в:
George Bosilca 2008-01-15 05:32:53 +00:00
родитель 98f79f2ea0
Коммит 6310ce955c
50 изменённых файлов: 982 добавлений и 1411 удалений

Просмотреть файл

@ -532,19 +532,6 @@ typedef int (*mca_bml_base_module_del_proc_btl_fn_t)(
struct ompi_proc_t*,
struct mca_btl_base_module_t* );
/**
* Callback function that is called asynchronously on receipt
* of data by the transport layer.
*/
typedef void (*mca_bml_base_module_recv_cb_fn_t)(
mca_btl_base_module_t* bml_btl,
mca_btl_base_tag_t tag,
mca_btl_base_descriptor_t* descriptor,
void* cbdata
);
/**
* Register a callback function that is called on receipt
* of a fragment.
@ -558,7 +545,7 @@ typedef void (*mca_bml_base_module_recv_cb_fn_t)(
*/
typedef int (*mca_bml_base_module_register_fn_t)(
mca_btl_base_tag_t tag,
mca_bml_base_module_recv_cb_fn_t cbfunc,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata
);

Просмотреть файл

@ -83,22 +83,6 @@ static int btl_exclusivity_compare(const void* arg1, const void* arg2)
}
}
void mca_bml_r2_recv_callback(
mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_descriptor_t* desc,
void* cbdata
){
/* just pass it up the stack.. */
mca_bml_r2.r2_reg[tag](btl,
tag,
desc,
cbdata);
}
int mca_bml_r2_progress( void )
{
int i, count = 0;
@ -758,24 +742,30 @@ int mca_bml_r2_add_btl(mca_btl_base_module_t* btl)
/*
* Register callback w/ all active btls
*/
int mca_bml_r2_register(
mca_btl_base_tag_t tag,
mca_bml_base_module_recv_cb_fn_t cbfunc,
void* data
)
int mca_bml_r2_register( mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* data )
{
uint32_t i;
int rc;
mca_btl_base_module_t *btl;
mca_btl_base_active_message_trigger[tag].cbfunc = cbfunc;
mca_btl_base_active_message_trigger[tag].cbdata = data;
/* Give an oportunity to the BTLs to do something special
* for each registration.
*/
{
int i, rc;
mca_btl_base_module_t *btl;
for(i = 0; i < mca_bml_r2.num_btl_modules; i++) {
btl = mca_bml_r2.btl_modules[i];
rc = btl->btl_register(btl, tag, cbfunc, data);
if(OMPI_SUCCESS != rc) {
return rc;
for(i = 0; i < (int)mca_bml_r2.num_btl_modules; i++) {
btl = mca_bml_r2.btl_modules[i];
if( NULL == btl->btl_register )
continue;
rc = btl->btl_register(btl, tag, cbfunc, data);
if(OMPI_SUCCESS != rc) {
return rc;
}
}
}
return OMPI_SUCCESS;
}

Просмотреть файл

@ -36,14 +36,6 @@
extern "C" {
#endif
typedef mca_bml_base_module_recv_cb_fn_t mca_bml_r2_recv_reg_t;
void mca_bml_r2_recv_callback(
mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_descriptor_t *des,
void* cbdata);
/**
* BML module interface functions and attributes.
*/
@ -53,7 +45,6 @@ struct mca_bml_r2_module_t {
mca_btl_base_module_t** btl_modules;
size_t num_btl_progress;
mca_btl_base_component_progress_fn_t * btl_progress;
mca_bml_r2_recv_reg_t r2_reg[256];
bool btls_added;
bool show_unreach_errors;
};
@ -87,7 +78,7 @@ int mca_bml_r2_del_btl( mca_btl_base_module_t* btl );
int mca_bml_r2_del_proc_btl( struct ompi_proc_t* proc, mca_btl_base_module_t* btl );
int mca_bml_r2_register( mca_btl_base_tag_t tag,
mca_bml_base_module_recv_cb_fn_t cbfunc,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* data );
int mca_bml_r2_register_error( mca_btl_base_module_error_cb_fn_t cbfunc );

Просмотреть файл

@ -29,6 +29,8 @@
#include "ompi/mca/btl/btl.h"
#include "ompi/mca/btl/base/base.h"
mca_btl_active_message_callback_t mca_btl_base_active_message_trigger[MCA_BTL_TAG_MAX];
/*
* mca_btl_base_descriptor_t
*/

Просмотреть файл

@ -138,12 +138,33 @@ typedef uint8_t mca_btl_base_tag_t;
#define MCA_BTL_NO_ORDER 255
/* reserved tag values */
#define MCA_BTL_TAG_BTL 0
#define MCA_BTL_TAG_PML 1
#define MCA_BTL_TAG_OSC_RDMA 2
#define MCA_BTL_TAG_USR 3
#define MCA_BTL_TAG_MAX 255 /* 1 + highest allowed tag num */
/*
* Communication specific defines. There are a number of active message ID
* that can be shred between all frameworks that need to communicate (i.e.
* use the PML or the BTL directly). These ID are exchanged between the
* processes, therefore they need to be identical everywhere. The simplest
* approach is to have them defined as constants, and give each framework a
* small number. Here is the rule that defines these ID (they are 8 bits):
* - the first 3 bits are used to code the framework (i.e. PML, OSC, COLL)
* - the remaining 5 bytes are used internally by the framework, and divided
* based on the components requirements. Therefore, the way the PML and
* the OSC frameworks use these defines will be different. For more
* information about how these framework ID are defined, take a look in the
* header file associated with the framework.
*/
#define MCA_BTL_AM_FRAMEWORK_MASK 0xD0
#define MCA_BTL_TAG_BTL 0x20
#define MCA_BTL_TAG_PML 0x40
#define MCA_BTL_TAG_OSC_RDMA 0x60
#define MCA_BTL_TAG_USR 0x80
#define MCA_BTL_TAG_MAX 255 /* 1 + highest allowed tag num */
/*
* Reserved tags for specific BTLs. As multiple BTLs can be active
* simultaneously, their tags should not collide.
*/
#define MCA_BTL_TAG_IB (MCA_BTL_TAG_BTL + 0)
#define MCA_BTL_TAG_UDAPL (MCA_BTL_TAG_BTL + 1)
/* prefered protocol */
#define MCA_BTL_FLAGS_SEND 0x0001
@ -194,7 +215,6 @@ typedef void (*mca_btl_base_completion_fn_t)(
struct mca_btl_base_descriptor_t* descriptor,
int status);
/**
* Describes a region/segment of memory that is addressable
* by an BTL.
@ -323,6 +343,35 @@ typedef struct mca_btl_base_module_t** (*mca_btl_base_component_init_fn_t)(
typedef int (*mca_btl_base_component_progress_fn_t)(void);
/**
* Callback function that is called asynchronously on receipt
* of data by the transport layer.
* Note that the the mca_btl_base_descriptor_t is only valid within the
* completion function, this implies that all data payload in the
* mca_btl_base_descriptor_t must be copied out within this callback or
* forfeited back to the BTL.
*
* @param[IN] btl BTL module
* @param[IN] tag The active message receive callback tag value
* @param[IN] descriptor The BTL descriptor (contains the receive payload)
* @param[IN] cbdata Opaque callback data
*/
typedef void (*mca_btl_base_module_recv_cb_fn_t)(
struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_descriptor_t* descriptor,
void* cbdata
);
typedef struct mca_btl_active_message_callback_t {
mca_btl_base_module_recv_cb_fn_t cbfunc;
void* cbdata;
} mca_btl_active_message_callback_t;
OMPI_DECLSPEC extern
mca_btl_active_message_callback_t mca_btl_base_active_message_trigger[MCA_BTL_TAG_MAX];
/**
* BTL component descriptor. Contains component version information
* and component open/close/init functions.
@ -422,28 +471,6 @@ typedef int (*mca_btl_base_module_del_procs_fn_t)(
struct mca_btl_base_endpoint_t** peer
);
/**
* Callback function that is called asynchronously on receipt
* of data by the transport layer.
* Note that the the mca_btl_base_descriptor_t is only valid within the
* completion function, this implies that all data payload in the
* mca_btl_base_descriptor_t must be copied out within this callback or
* forfeited back to the BTL.
*
* @param[IN] btl BTL module
* @param[IN] tag The active message receive callback tag value
* @param[IN] descriptor The BTL descriptor (contains the receive payload)
* @param[IN] cbdata Opaque callback data
*/
typedef void (*mca_btl_base_module_recv_cb_fn_t)(
struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_descriptor_t* descriptor,
void* cbdata
);
/**
* Register a callback function that is called on receipt
* of a fragment.

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -12,8 +12,8 @@
/**
* @file
*/
#ifndef MCA_PTL_ELAN_H
#define MCA_PTL_ELAN_H
#ifndef MCA_BTL_ELAN_H
#define MCA_BTL_ELAN_H
#include "ompi_config.h"
@ -41,13 +41,8 @@
#include "elan3/elan3.h"
#include "elan/elan.h"
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
BEGIN_C_DECLS
#define MCA_BTL_HAS_MPOOL 1
#define BTL_ELAN_RECV_MASK 0xffffffffULL
#define BTL_ELAN_PUT_MASK 0xffffffffULL
/**
* ELAN BTL component.
*/
@ -73,12 +68,13 @@ struct mca_btl_elan_component_t {
int elan_free_list_inc;
/**< number of elements to alloc when growing free lists */
int elan_max_posted_recv;
/**< number of pre-posted receives */
/* free list of fragment descriptors */
ompi_free_list_t elan_frag_eager;
ompi_free_list_t elan_frag_max;
ompi_free_list_t elan_frag_user;
ompi_free_list_t elan_frag_eager;
ompi_free_list_t elan_frag_max;
ompi_free_list_t elan_frag_user;
opal_list_t elan_procs;
/**< list of elan proc structures */
@ -96,9 +92,9 @@ struct mca_btl_elan_component_t {
/**< pin memory on first use and leave pinned */
};
typedef struct mca_btl_elan_component_t mca_btl_elan_component_t;
typedef struct mca_btl_elan_component_t mca_btl_elan_component_t;
OMPI_MODULE_DECLSPEC extern mca_btl_elan_component_t mca_btl_elan_component;
OMPI_MODULE_DECLSPEC extern mca_btl_elan_component_t mca_btl_elan_component;
/**
@ -107,15 +103,17 @@ OMPI_MODULE_DECLSPEC extern mca_btl_elan_component_t mca_btl_elan_component;
struct mca_btl_elan_module_t {
mca_btl_base_module_t super; /**< base BTL interface */
mca_btl_base_recv_reg_t elan_reg[MCA_BTL_TAG_MAX];
ELAN_STATE *state;
ELAN_BASE *base;
ELAN_TPORT *tport; /* What we actually use for moving messages */
ELAN_QUEUE *queue;
ELAN_GROUP *group; /* The group with everyone in */
unsigned int elan_vp; /**< elan vpid, not ompi vpid */
unsigned int elan_nvp; /**< total # of elan vpid */
opal_mutex_t elan_lock;
unsigned int elan_vp; /**< elan vpid, not ompi vpid */
unsigned int elan_nvp; /**< total # of elan vpid */
opal_mutex_t elan_lock;
opal_list_t recv_list; /* list of pending receives. */
opal_list_t send_list; /* list of posted sends */
opal_list_t rdma_list; /* list of posted receives */
struct bufdesc_t * tportFIFOHead;
struct bufdesc_t * tportFIFOTail;
struct mca_mpool_base_module_t* elan_mpool;
@ -130,7 +128,6 @@ struct bufdesc_t {
};
typedef struct bufdesc_t bufdesc_t;
/**
* Register ELAN component parameters with the MCA framework
*/
@ -148,35 +145,25 @@ extern int mca_btl_elan_component_close(void);
* @param allow_multi_user_threads (OUT) Flag indicating wether BTL supports user threads (TRUE)
* @param have_hidden_threads (OUT) Flag indicating wether BTL uses threads (TRUE)
*/
extern mca_btl_base_module_t** mca_btl_elan_component_init(
int *num_btl_modules,
bool allow_multi_user_threads,
bool have_hidden_threads
);
extern mca_btl_base_module_t**
mca_btl_elan_component_init( int* num_btl_modules,
bool allow_multi_user_threads,
bool have_hidden_threads );
/**
* ELAN component progress.
*/
extern int mca_btl_elan_component_progress(void);
/**
* Cleanup any resources held by the BTL.
*
* @param btl BTL instance.
* @return OMPI_SUCCESS or error status on failure.
*/
extern void cancel_elanRx( mca_btl_elan_module_t* elan_btl );
extern void cancel_elanRx(
mca_btl_elan_module_t* elan_btl
);
extern int mca_btl_elan_finalize(
struct mca_btl_base_module_t* btl
);
extern int mca_btl_elan_finalize( struct mca_btl_base_module_t* btl );
extern int mca_btl_elan_ft_event(int state);
@ -192,13 +179,11 @@ extern int mca_btl_elan_ft_event(int state);
*
*/
extern int mca_btl_elan_add_procs(
struct mca_btl_base_module_t* btl,
size_t nprocs,
struct ompi_proc_t **procs,
struct mca_btl_base_endpoint_t** peers,
ompi_bitmap_t* reachable
);
extern int mca_btl_elan_add_procs( struct mca_btl_base_module_t* btl,
size_t nprocs,
struct ompi_proc_t **procs,
struct mca_btl_base_endpoint_t** peers,
ompi_bitmap_t* reachable );
/**
* PML->BTL notification of change in the process list.
@ -211,13 +196,10 @@ extern int mca_btl_elan_add_procs(
*
*/
extern int mca_btl_elan_del_procs(
struct mca_btl_base_module_t* btl,
size_t nprocs,
struct ompi_proc_t **procs,
struct mca_btl_base_endpoint_t** peers
);
extern int mca_btl_elan_del_procs( struct mca_btl_base_module_t* btl,
size_t nprocs,
struct ompi_proc_t **procs,
struct mca_btl_base_endpoint_t** peers );
/**
* Initiate an asynchronous send.
@ -228,13 +210,10 @@ extern int mca_btl_elan_del_procs(
* @param tag (IN) The tag value used to notify the peer.
*/
extern int mca_btl_elan_send(
struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* btl_peer,
struct mca_btl_base_descriptor_t* descriptor,
mca_btl_base_tag_t tag
);
extern int mca_btl_elan_send( struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* btl_peer,
struct mca_btl_base_descriptor_t* descriptor,
mca_btl_base_tag_t tag );
/**
* Initiate an asynchronous put.
@ -243,13 +222,10 @@ extern int mca_btl_elan_send(
* @param endpoint (IN) BTL addressing information
* @param descriptor (IN) Description of the data to be transferred
*/
extern int mca_btl_elan_put(
struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* btl_peer,
struct mca_btl_base_descriptor_t* decriptor
);
extern int mca_btl_elan_put( struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* btl_peer,
struct mca_btl_base_descriptor_t* decriptor );
/**
* Initiate an asynchronous get.
@ -258,28 +234,11 @@ extern int mca_btl_elan_put(
* @param endpoint (IN) BTL addressing information
* @param descriptor (IN) Description of the data to be transferred
*/
extern int mca_btl_elan_get(
struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* btl_peer,
struct mca_btl_base_descriptor_t* decriptor
);
/**
* Register a callback function that is called on receipt
* of a fragment.
*
* @param btl (IN) BTL module
* @return Status indicating if registration was successful
*
*/
extern int mca_btl_elan_get( struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* btl_peer,
struct mca_btl_base_descriptor_t* decriptor );
extern int mca_btl_elan_register(
struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata);
/**
* Allocate a descriptor with a segment of the requested size.
* Note that the BTL layer may choose to return a smaller size
@ -289,13 +248,12 @@ extern int mca_btl_elan_register(
* @param size (IN) Request segment size.
*/
extern mca_btl_base_descriptor_t* mca_btl_elan_alloc(
struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* peer,
uint8_t order,
size_t size,
uint32_t flags);
extern mca_btl_base_descriptor_t*
mca_btl_elan_alloc( struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* peer,
uint8_t order,
size_t size,
uint32_t flags );
/**
* Return a segment allocated by this BTL.
@ -304,10 +262,8 @@ extern mca_btl_base_descriptor_t* mca_btl_elan_alloc(
* @param descriptor (IN) Allocated descriptor.
*/
extern int mca_btl_elan_free(
struct mca_btl_base_module_t* btl,
mca_btl_base_descriptor_t* des);
extern int mca_btl_elan_free( struct mca_btl_base_module_t* btl,
mca_btl_base_descriptor_t* des );
/**
* Prepare a descriptor for send/rdma using the supplied
@ -321,47 +277,29 @@ extern int mca_btl_elan_free(
* @param convertor (IN) Data type convertor
* @param reserve (IN) Additional bytes requested by upper layer to precede user data
* @param size (IN/OUT) Number of bytes to prepare (IN), number of bytes actually prepared (OUT)
*/
*/
mca_btl_base_descriptor_t* mca_btl_elan_prepare_src(
struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* peer,
struct mca_mpool_base_registration_t*,
struct ompi_convertor_t* convertor,
uint8_t order,
size_t reserve,
size_t* size,
uint32_t flags
);
mca_btl_base_descriptor_t*
mca_btl_elan_prepare_src( struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* peer,
struct mca_mpool_base_registration_t*,
struct ompi_convertor_t* convertor,
uint8_t order,
size_t reserve,
size_t* size,
uint32_t flags );
extern mca_btl_base_descriptor_t* mca_btl_elan_prepare_dst(
struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* peer,
struct mca_mpool_base_registration_t*,
struct ompi_convertor_t* convertor,
uint8_t order,
size_t reserve,
size_t* size,
uint32_t flags);
extern mca_btl_base_descriptor_t*
mca_btl_elan_prepare_dst( struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* peer,
struct mca_mpool_base_registration_t*,
struct ompi_convertor_t* convertor,
uint8_t order,
size_t reserve,
size_t* size,
uint32_t flags );
END_C_DECLS
extern bufdesc_t * elan_ipeek(mca_btl_elan_module_t* elan_btl);
#endif /* MCA_BTL_ELAN_H */
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#define BTL_ELAN_ADD_TO_FIFO(BTL, DESC) \
do { \
OPAL_THREAD_LOCK(&((BTL)->elan_lock)); \
if( (BTL)->tportFIFOTail ) { \
(BTL)->tportFIFOTail->next = (DESC); \
(BTL)->tportFIFOTail = (DESC); \
} else { \
(BTL)->tportFIFOHead = (DESC); \
(BTL)->tportFIFOTail = (DESC); \
} \
OPAL_THREAD_UNLOCK(&((BTL)->elan_lock)); \
} while(0)
#endif

Просмотреть файл

@ -70,9 +70,9 @@ mca_btl_elan_component_t mca_btl_elan_component = {
* utility routines for parameter registration
*/
static inline char* mca_btl_elan_param_register_string(
const char* param_name,
const char* default_value)
static inline char*
mca_btl_elan_param_register_string( const char* param_name,
const char* default_value )
{
char *param_value;
int id = mca_base_param_register_string("btl","elan",param_name,NULL,default_value);
@ -80,9 +80,9 @@ static inline char* mca_btl_elan_param_register_string(
return param_value;
}
static inline int mca_btl_elan_param_register_int(
const char* param_name,
int default_value)
static inline int
mca_btl_elan_param_register_int( const char* param_name,
int default_value )
{
int id = mca_base_param_register_int("btl","elan",param_name,NULL,default_value);
int param_value = default_value;
@ -124,8 +124,14 @@ int mca_btl_elan_component_open(void)
mca_btl_base_param_register(&mca_btl_elan_component.super.btl_version,
&mca_btl_elan_module.super);
mca_btl_elan_component.elanidmap_file =
mca_btl_elan_param_register_string( "elanidmap", "/etc/elanidmap" );
mca_base_param_reg_string( (mca_base_component_t*)&mca_btl_elan_component, "elanidmap",
"System-wide configuration file for the Quadrics network (elanidmap)",
false, false, "/etc/elanidmap", &mca_btl_elan_component.elanidmap_file );
mca_base_param_reg_int( (mca_base_component_t*)&mca_btl_elan_component, "max_posted_recv",
"Number of received posted in advance. Increasing this number for"
" communication bound application can lead to visible improvement"
" in performances",
false, false, 16, &mca_btl_elan_component.elan_max_posted_recv );
return OMPI_SUCCESS;
}
@ -158,9 +164,10 @@ int mca_btl_elan_component_close(void)
* (2) setup Elan4 listen socket for incoming connection attempts
* (3) register BTL parameters with the MCA
*/
mca_btl_base_module_t** mca_btl_elan_component_init( int *num_btl_modules,
bool enable_progress_threads,
bool enable_mpi_threads )
mca_btl_base_module_t**
mca_btl_elan_component_init( int *num_btl_modules,
bool enable_progress_threads,
bool enable_mpi_threads )
{
mca_btl_base_module_t** btls;
@ -177,34 +184,34 @@ mca_btl_base_module_t** mca_btl_elan_component_init( int *num_btl_modules,
OBJ_CONSTRUCT (&mca_btl_elan_component.elan_frag_user, ompi_free_list_t);
OBJ_CONSTRUCT(&mca_btl_elan_component.elan_procs, opal_list_t);
ompi_free_list_init_new( &mca_btl_elan_component.elan_frag_eager,
sizeof(mca_btl_elan_frag_t) + mca_btl_elan_module.super.btl_eager_limit,
CACHE_LINE_SIZE,
OBJ_CLASS(mca_btl_elan_frag_t),
0,CACHE_LINE_SIZE,
mca_btl_elan_component.elan_free_list_num,
mca_btl_elan_component.elan_free_list_max,
mca_btl_elan_component.elan_free_list_inc,
NULL ); /* use default allocator */
sizeof(mca_btl_elan_frag_t) + mca_btl_elan_module.super.btl_eager_limit,
CACHE_LINE_SIZE,
OBJ_CLASS(mca_btl_elan_frag_t),
0,CACHE_LINE_SIZE,
mca_btl_elan_component.elan_free_list_num,
mca_btl_elan_component.elan_free_list_max,
mca_btl_elan_component.elan_free_list_inc,
NULL ); /* use default allocator */
ompi_free_list_init_new( &mca_btl_elan_component.elan_frag_user,
sizeof(mca_btl_elan_frag_t),
CACHE_LINE_SIZE,
OBJ_CLASS(mca_btl_elan_frag_t),
0,CACHE_LINE_SIZE,
mca_btl_elan_component.elan_free_list_num,
mca_btl_elan_component.elan_free_list_max,
mca_btl_elan_component.elan_free_list_inc,
NULL ); /* use default allocator */
sizeof(mca_btl_elan_frag_t),
CACHE_LINE_SIZE,
OBJ_CLASS(mca_btl_elan_frag_t),
0,CACHE_LINE_SIZE,
mca_btl_elan_component.elan_free_list_num,
mca_btl_elan_component.elan_free_list_max,
mca_btl_elan_component.elan_free_list_inc,
NULL ); /* use default allocator */
ompi_free_list_init_new( &mca_btl_elan_component.elan_frag_max,
sizeof(mca_btl_elan_frag_t)+mca_btl_elan_module.super.btl_max_send_size,
CACHE_LINE_SIZE,
OBJ_CLASS(mca_btl_elan_frag_t),
0,CACHE_LINE_SIZE,
mca_btl_elan_component.elan_free_list_num,
mca_btl_elan_component.elan_free_list_max,
mca_btl_elan_component.elan_free_list_inc,
NULL ); /* use default allocator */
sizeof(mca_btl_elan_frag_t)+mca_btl_elan_module.super.btl_max_send_size,
CACHE_LINE_SIZE,
OBJ_CLASS(mca_btl_elan_frag_t),
0,CACHE_LINE_SIZE,
mca_btl_elan_component.elan_free_list_num,
mca_btl_elan_component.elan_free_list_max,
mca_btl_elan_component.elan_free_list_inc,
NULL ); /* use default allocator */
vpid = orte_process_info.my_name->vpid;
@ -220,8 +227,9 @@ mca_btl_base_module_t** mca_btl_elan_component_init( int *num_btl_modules,
continue;
memcpy( btl, &mca_btl_elan_module, sizeof(mca_btl_elan_module_t) );
OBJ_CONSTRUCT (&btl->elan_lock, opal_mutex_t);
btl->tportFIFOHead = NULL;
btl->tportFIFOTail = NULL;
OBJ_CONSTRUCT( &btl->recv_list, opal_list_t );
OBJ_CONSTRUCT( &btl->send_list, opal_list_t );
OBJ_CONSTRUCT( &btl->rdma_list, opal_list_t );
mca_btl_elan_component.elan_btls[count++] = btl;
}
mca_btl_elan_component.elan_num_btls = count ;
@ -237,68 +245,89 @@ mca_btl_base_module_t** mca_btl_elan_component_init( int *num_btl_modules,
return btls;
}
static mca_btl_elan_frag_t* mca_btl_elan_ipeek(mca_btl_elan_module_t* elan_btl)
{
mca_btl_elan_frag_t* frag;
/* The receive list will always contain at least one element. There
* is no need to perform any checks.
*/
frag = (mca_btl_elan_frag_t*)opal_list_get_first( &(elan_btl->recv_list) );
if( elan_tportRxDone(frag->elan_event) ) {
int tag; /* we need it for the cast */
size_t length;
elan_tportRxWait( frag->elan_event, NULL, &tag, &length );
frag->base.des_dst->seg_len = length;
frag->tag = (mca_btl_base_tag_t)tag;
opal_list_remove_first( &(elan_btl->recv_list) );
return frag;
}
/* If there are any pending sends check their completion */
if( !opal_list_is_empty( &(elan_btl->send_list) ) ) {
frag = (mca_btl_elan_frag_t*)opal_list_get_first( &(elan_btl->send_list) );
if( elan_tportTxDone(frag->elan_event) ) {
opal_list_remove_first( &(elan_btl->send_list) );
/* call the completion callback */
elan_tportTxWait(frag->elan_event);
return frag;
}
}
/* If any RDMA have been posted, check their status */
if( !opal_list_is_empty( &(elan_btl->rdma_list) ) ) {
frag = (mca_btl_elan_frag_t*)opal_list_get_first( &(elan_btl->rdma_list) );
if( elan_done(frag->elan_event,0) ) {
opal_list_remove_first( &(elan_btl->rdma_list) );
elan_wait( frag->elan_event, ELAN_WAIT_EVENT );
return frag;
}
}
return NULL;
}
/*
* Elan4 component progress.
*/
int mca_btl_elan_component_progress( void )
{
size_t num_progressed = 0, i, no_btls, size;
size_t num_progressed = 0, i, no_btls;
mca_btl_elan_frag_t* frag;
bufdesc_t* desc;
no_btls = mca_btl_elan_component.elan_num_btls;
for (i = 0; i < no_btls; i++) {
mca_btl_elan_module_t* elan_btl = mca_btl_elan_component.elan_btls[i];
OPAL_THREAD_LOCK(&elan_btl->elan_lock);
desc = elan_ipeek(elan_btl);
OPAL_THREAD_UNLOCK(&elan_btl->elan_lock);
if(desc ==NULL)
continue;
frag = (mca_btl_elan_frag_t*) desc->frag;
if( NULL != frag ) {
if(frag->type== MCA_BTL_ELAN_HDR_TYPE_SEND ) {
/* it's a send */
/* call the completion callback */
elan_tportTxWait(desc->eve);
frag->base.des_cbfunc( &(elan_btl->super), frag->endpoint, &(frag->base), OMPI_SUCCESS );
free(desc);
} else if( (frag->type == MCA_BTL_ELAN_HDR_TYPE_PUT) ||
(frag->type== MCA_BTL_ELAN_HDR_TYPE_GET) ) {
/* it's a put*/
/* call the completion callback */
elan_wait(desc->eve,ELAN_WAIT_EVENT);
frag->base.des_cbfunc( &(elan_btl->super), frag->endpoint, &(frag->base), OMPI_SUCCESS );
free(desc);
} else {
mca_btl_elan_module_t* elan_btl = mca_btl_elan_component.elan_btls[i];
/**
* As long as there are event on the network, keep looping here. Only go
* to the next BTL once this one is emptied.
*/
do {
OPAL_THREAD_LOCK(&elan_btl->elan_lock);
frag = mca_btl_elan_ipeek(elan_btl);
OPAL_THREAD_UNLOCK(&elan_btl->elan_lock);
if( NULL == frag)
break;
if(frag->type == MCA_BTL_ELAN_HDR_TYPE_RECV ) {
/* and this one is a receive */
mca_btl_base_recv_reg_t* reg;
reg = &(elan_btl->elan_reg[frag->tag]);
elan_tportRxWait(desc->eve, NULL, NULL, &size);
frag->base.des_dst->seg_len = size;
reg->cbfunc( &(elan_btl->super), frag->tag, &(frag->base),reg->cbdata );
mca_btl_active_message_callback_t* reg;
reg = mca_btl_base_active_message_trigger + frag->tag;
reg->cbfunc( &(elan_btl->super), frag->tag, &(frag->base), reg->cbdata );
/**
* The upper level extract the data from the fragment.
* Now we can register the fragment
* again with the elan BTL.
* Now we can register the fragment again with the network.
*/
desc->eve = elan_tportRxStart (elan_btl->tport, 0 , 0, 0, 0xffffffff, frag->tag, frag->base.des_dst->seg_addr.pval, mca_btl_elan_module.super.btl_eager_limit) ;
/*desc->eve = elan_tportRxStart (elan_btl->tport, ELAN_TPORT_RXANY , 0, 0, 0, 0, frag->base.des_dst->seg_addr.pval, mca_btl_elan_module.super.btl_eager_limit) ;*/
desc->frag = frag;
desc->next = NULL;
frag->elan_event = elan_tportRxStart( elan_btl->tport, 0, 0, 0, 0, 0,
frag->base.des_dst->seg_addr.pval,
mca_btl_elan_module.super.btl_eager_limit );
OPAL_THREAD_LOCK(&elan_btl->elan_lock);
if( elan_btl->tportFIFOTail ) {
elan_btl->tportFIFOTail->next = desc;
elan_btl->tportFIFOTail=desc;
} else {
elan_btl->tportFIFOHead = desc;
elan_btl->tportFIFOTail = desc;
}
opal_list_append( &(elan_btl->recv_list), (opal_list_item_t*)frag );
OPAL_THREAD_UNLOCK(&elan_btl->elan_lock);
} else {
/* it's either a send or a put/get */
frag->base.des_cbfunc( &(elan_btl->super), frag->endpoint,
&(frag->base), OMPI_SUCCESS );
}
} else {
opal_output( 0, "Something bad happened the frag == NULL\n" );
}
num_progressed++;
num_progressed++;
} while(0)
}
return num_progressed;

Просмотреть файл

@ -46,9 +46,8 @@ static void mca_btl_elan_endpoint_destruct(mca_btl_base_endpoint_t* endpoint)
}
OBJ_CLASS_INSTANCE(
mca_btl_elan_endpoint_t,
opal_list_item_t,
mca_btl_elan_endpoint_construct,
mca_btl_elan_endpoint_destruct);
OBJ_CLASS_INSTANCE(mca_btl_elan_endpoint_t,
opal_list_item_t,
mca_btl_elan_endpoint_construct,
mca_btl_elan_endpoint_destruct);

Просмотреть файл

@ -9,8 +9,8 @@
* $HEADER$
*/
#ifndef MCA_BTL_TEMPLATE_ENDPOINT_H
#define MCA_BTL_TEMPLATE_ENDPOINT_H
#ifndef MCA_BTL_ELAN_ENDPOINT_H
#define MCA_BTL_ELAN_ENDPOINT_H
#include "opal/class/opal_list.h"
#include "opal/event/event.h"
@ -18,10 +18,8 @@
#include "ompi/mca/btl/btl.h"
#include "btl_elan_frag.h"
#include "btl_elan.h"
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
BEGIN_C_DECLS
/**
* State of ELAN endpoint connection.
@ -51,7 +49,6 @@ typedef struct mca_btl_base_endpoint_t mca_btl_base_endpoint_t;
typedef mca_btl_base_endpoint_t mca_btl_elan_endpoint_t;
OBJ_CLASS_DECLARATION(mca_btl_elan_endpoint_t);
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#endif
END_C_DECLS
#endif /* MCA_BTL_ELAN_ENDPOINT_H */

Просмотреть файл

@ -17,6 +17,7 @@ static void mca_btl_elan_frag_common_constructor(mca_btl_elan_frag_t* frag)
frag->base.des_src_cnt = 0;
frag->base.des_dst = NULL;
frag->base.des_dst_cnt = 0;
frag->elan_event = NULL;
}
static void mca_btl_elan_frag_eager_constructor(mca_btl_elan_frag_t* frag)
@ -40,27 +41,22 @@ static void mca_btl_elan_frag_user_constructor(mca_btl_elan_frag_t* frag)
}
OBJ_CLASS_INSTANCE(
mca_btl_elan_frag_t,
mca_btl_base_descriptor_t,
NULL,
NULL);
OBJ_CLASS_INSTANCE( mca_btl_elan_frag_t,
mca_btl_base_descriptor_t,
NULL,
NULL );
OBJ_CLASS_INSTANCE(
mca_btl_elan_frag_eager_t,
mca_btl_base_descriptor_t,
mca_btl_elan_frag_eager_constructor,
NULL);
OBJ_CLASS_INSTANCE( mca_btl_elan_frag_eager_t,
mca_btl_base_descriptor_t,
mca_btl_elan_frag_eager_constructor,
NULL );
OBJ_CLASS_INSTANCE(
mca_btl_elan_frag_max_t,
mca_btl_base_descriptor_t,
mca_btl_elan_frag_max_constructor,
NULL);
OBJ_CLASS_INSTANCE(
mca_btl_elan_frag_user_t,
mca_btl_base_descriptor_t,
mca_btl_elan_frag_user_constructor,
NULL);
OBJ_CLASS_INSTANCE( mca_btl_elan_frag_max_t,
mca_btl_base_descriptor_t,
mca_btl_elan_frag_max_constructor,
NULL );
OBJ_CLASS_INSTANCE( mca_btl_elan_frag_user_t,
mca_btl_base_descriptor_t,
mca_btl_elan_frag_user_constructor,
NULL );

Просмотреть файл

@ -9,32 +9,22 @@
* $HEADER$
*/
#ifndef MCA_BTL_TEMPLATE_FRAG_H
#define MCA_BTL_TEMPLATE_FRAG_H
#ifndef MCA_BTL_ELAN_FRAG_H
#define MCA_BTL_ELAN_FRAG_H
#define MCA_BTL_TEMPLATE_FRAG_ALIGN (8)
#include "ompi_config.h"
#include "btl_elan.h"
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
BEGIN_C_DECLS
#define MCA_BTL_ELAN_HDR_TYPE_SEND 1
#define MCA_BTL_ELAN_HDR_TYPE_PUT 2
#define MCA_BTL_ELAN_HDR_TYPE_GET 3
#define MCA_BTL_ELAN_HDR_TYPE_MATCH 4
#define MCA_BTL_ELAN_HDR_TYPE_FRAG 5
#define MCA_BTL_ELAN_HDR_TYPE_ACK 6
#define MCA_BTL_ELAN_HDR_TYPE_NACK 7
#define MCA_BTL_ELAN_HDR_TYPE_FIN 8
#define MCA_BTL_ELAN_HDR_TYPE_FIN_ACK 9
#define MCA_BTL_ELAN_HDR_TYPE_RECV 10
#define MCA_BTL_ELAN_HDR_TYPE_RECV 4
/**
* TEMPLATE send fraelanent derived type.
* Elan send fragment derived type.
*/
struct mca_btl_elan_frag_t {
mca_btl_base_descriptor_t base;
@ -44,7 +34,8 @@ struct mca_btl_elan_frag_t {
int type;
ompi_free_list_t* my_list;
mca_btl_base_tag_t tag;
size_t size;
struct ELAN_EVENT* elan_event;
size_t size;
struct mca_mpool_base_registration_t* registration;
};
typedef struct mca_btl_elan_frag_t mca_btl_elan_frag_t;
@ -68,42 +59,41 @@ OBJ_CLASS_DECLARATION(mca_btl_elan_frag_user_t);
* free list(s).
*/
#define MCA_BTL_TEMPLATE_FRAG_ALLOC_EAGER(frag, rc) \
{ \
ompi_free_list_item_t *item; \
OMPI_FREE_LIST_WAIT(&mca_btl_elan_component.elan_frag_eager, item, rc); \
frag = (mca_btl_elan_frag_t*) item; \
frag->segment.seg_addr.pval = (void*)(frag+1); \
frag->my_list = &mca_btl_elan_component.elan_frag_eager; \
}
#define MCA_BTL_ELAN_FRAG_ALLOC_EAGER(frag, rc) \
{ \
ompi_free_list_item_t *item; \
OMPI_FREE_LIST_WAIT(&mca_btl_elan_component.elan_frag_eager, item, rc); \
frag = (mca_btl_elan_frag_t*) item; \
frag->segment.seg_addr.pval = (void*)(frag+1); \
frag->my_list = &mca_btl_elan_component.elan_frag_eager; \
}
#define MCA_BTL_TEMPLATE_FRAG_ALLOC_MAX(frag, rc) \
{ \
ompi_free_list_item_t *item; \
OMPI_FREE_LIST_WAIT(&mca_btl_elan_component.elan_frag_max, item, rc); \
frag = (mca_btl_elan_frag_t*) item; \
frag->segment.seg_addr.pval = (void*)(frag+1); \
frag->my_list = &mca_btl_elan_component.elan_frag_max; \
}
#define MCA_BTL_ELAN_FRAG_ALLOC_MAX(frag, rc) \
{ \
ompi_free_list_item_t *item; \
OMPI_FREE_LIST_WAIT(&mca_btl_elan_component.elan_frag_max, item, rc); \
frag = (mca_btl_elan_frag_t*) item; \
frag->segment.seg_addr.pval = (void*)(frag+1); \
frag->my_list = &mca_btl_elan_component.elan_frag_max; \
}
#define MCA_BTL_TEMPLATE_FRAG_ALLOC_USER(frag, rc) \
{ \
ompi_free_list_item_t *item; \
OMPI_FREE_LIST_WAIT(&mca_btl_elan_component.elan_frag_user, item, rc); \
frag = (mca_btl_elan_frag_t*) item; \
frag->my_list = &mca_btl_elan_component.elan_frag_user; \
}
#define MCA_BTL_ELAN_FRAG_ALLOC_USER(frag, rc) \
{ \
ompi_free_list_item_t *item; \
OMPI_FREE_LIST_WAIT(&mca_btl_elan_component.elan_frag_user, item, rc); \
frag = (mca_btl_elan_frag_t*) item; \
frag->my_list = &mca_btl_elan_component.elan_frag_user; \
}
#define MCA_BTL_TEMPLATE_FRAG_RETURN(frag) \
{ \
OMPI_FREE_LIST_RETURN(frag->my_list, \
(ompi_free_list_item_t*)(frag)); \
}
#define MCA_BTL_ELAN_FRAG_RETURN(frag) \
{ \
OMPI_FREE_LIST_RETURN(frag->my_list, \
(ompi_free_list_item_t*)(frag)); \
}
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#endif
END_C_DECLS
#endif /* MCA_BTL_ELAN_FRAG_H */

Просмотреть файл

@ -65,9 +65,9 @@ static mca_btl_elan_proc_t* mca_btl_elan_proc_lookup_ompi(ompi_proc_t* ompi_proc
mca_btl_elan_proc_t* elan_proc;
OPAL_THREAD_LOCK(&mca_btl_elan_component.elan_lock);
for(elan_proc = (mca_btl_elan_proc_t*)
opal_list_get_first(&mca_btl_elan_component.elan_procs);
opal_list_get_first(&mca_btl_elan_component.elan_procs);
elan_proc != (mca_btl_elan_proc_t*)
opal_list_get_end(&mca_btl_elan_component.elan_procs);
opal_list_get_end(&mca_btl_elan_component.elan_procs);
elan_proc = (mca_btl_elan_proc_t*)opal_list_get_next(elan_proc)) {
if(elan_proc->proc_ompi == ompi_proc) {
OPAL_THREAD_UNLOCK(&mca_btl_elan_component.elan_lock);
@ -79,7 +79,7 @@ static mca_btl_elan_proc_t* mca_btl_elan_proc_lookup_ompi(ompi_proc_t* ompi_proc
}
/*
* Create a TEMPLATE process structure. There is a one-to-one correspondence
* Create a ELAN process structure. There is a one-to-one correspondence
* between a ompi_proc_t and a mca_btl_elan_proc_t instance. We cache
* additional data (specifically the list of mca_btl_elan_endpoint_t instances,
* and published addresses) associated w/ a given destination on this
@ -101,8 +101,8 @@ mca_btl_elan_proc_t* mca_btl_elan_proc_create(ompi_proc_t* ompi_proc)
/* Oops! First time, gotta create a new Elan proc
* out of the ompi_proc ... */
module_proc = OBJ_NEW(mca_btl_elan_proc_t);
if(NULL == module_proc)
return NULL;
if(NULL == module_proc)
return NULL;
/* Initialize number of peer */
module_proc->proc_endpoint_count = 0;
module_proc->proc_ompi = ompi_proc;
@ -115,19 +115,19 @@ mca_btl_elan_proc_t* mca_btl_elan_proc_create(ompi_proc_t* ompi_proc)
(void**)&module_proc->elan_vp_array,
&size );
if(rc != OMPI_SUCCESS) {
BTL_ERROR(("mca_base_modex_recv: failed with return value=%d", rc));
OBJ_RELEASE(module_proc);
return NULL;
BTL_ERROR(("mca_base_modex_recv: failed with return value=%d", rc));
OBJ_RELEASE(module_proc);
return NULL;
}
module_proc->proc_addr_count = size / sizeof(unsigned int);;
/* XXX: Right now, there can be only 1 peer associated
* with a proc. Needs a little bit change in
* mca_btl_elan_proc_t to allow on demand increasing of
* number of endpoints for this proc
*/
*/
module_proc->proc_endpoints = (mca_btl_base_endpoint_t**)
malloc((1+module_proc->proc_addr_count )* sizeof(mca_btl_base_endpoint_t*));
malloc((1+module_proc->proc_addr_count )* sizeof(mca_btl_base_endpoint_t*));
if(NULL == module_proc->proc_endpoints) {
OBJ_RELEASE(module_proc);
return NULL;
@ -142,7 +142,7 @@ mca_btl_elan_proc_t* mca_btl_elan_proc_create(ompi_proc_t* ompi_proc)
* it an address.
*/
int mca_btl_elan_proc_insert( mca_btl_elan_proc_t* module_proc,
mca_btl_base_endpoint_t* module_endpoint )
mca_btl_base_endpoint_t* module_endpoint )
{
/* insert into endpoint array */
size_t i;

Просмотреть файл

@ -9,8 +9,8 @@
* $HEADER$
*/
#ifndef MCA_BTL_TEMPLATE_PROC_H
#define MCA_BTL_TEMPLATE_PROC_H
#ifndef MCA_BTL_ELAN_PROC_H
#define MCA_BTL_ELAN_PROC_H
#include "orte/mca/ns/ns.h"
#include "opal/class/opal_object.h"
@ -18,9 +18,7 @@
#include "btl_elan.h"
#include "btl_elan_endpoint.h"
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
BEGIN_C_DECLS
/**
* Represents the state of a remote process and the set of addresses
@ -58,7 +56,6 @@ OBJ_CLASS_DECLARATION(mca_btl_elan_proc_t);
mca_btl_elan_proc_t* mca_btl_elan_proc_create(ompi_proc_t* ompi_proc);
int mca_btl_elan_proc_insert(mca_btl_elan_proc_t*, mca_btl_base_endpoint_t*);
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#endif
END_C_DECLS
#endif /* MCA_BTL_ELAN_PROC_H */

Просмотреть файл

@ -71,7 +71,7 @@ mca_btl_gm_module_t mca_btl_gm_module = {
0, /* flags */
mca_btl_gm_add_procs,
mca_btl_gm_del_procs,
mca_btl_gm_register,
NULL,
mca_btl_gm_finalize,
mca_btl_gm_alloc,
mca_btl_gm_free,
@ -162,24 +162,6 @@ int mca_btl_gm_del_procs(struct mca_btl_base_module_t* btl,
return OMPI_SUCCESS;
}
/**
* Register callback function to support send/recv semantics
*/
int mca_btl_gm_register(
struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata)
{
mca_btl_gm_module_t* gm_btl = (mca_btl_gm_module_t*) btl;
gm_btl->gm_reg[tag].cbfunc = cbfunc;
gm_btl->gm_reg[tag].cbdata = cbdata;
return OMPI_SUCCESS;
}
/*
*Register callback function for error handling..
*/

Просмотреть файл

@ -81,7 +81,6 @@ OMPI_MODULE_DECLSPEC extern mca_btl_gm_component_t mca_btl_gm_component;
*/
struct mca_btl_gm_module_t {
mca_btl_base_module_t super; /**< base BTL interface */
mca_btl_base_recv_reg_t gm_reg[256];
/* local port handle/address */
struct gm_port *port;
@ -241,34 +240,6 @@ extern int mca_btl_gm_get(
struct mca_btl_base_descriptor_t* decriptor
);
/**
* Register a callback function that is called on receipt
* of a fragment.
*
* @param btl (IN) BTL module
* @return Status indicating if registration was successful
*
*/
extern int mca_btl_gm_register(
struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata);
/**
* Register a callback function that is called on error..
*
* @param btl (IN) BTL module
* @return Status indicating if cleanup was successful
*/
int mca_btl_gm_register_error_cb(
struct mca_btl_base_module_t* btl,
mca_btl_base_module_error_cb_fn_t cbfunc
);
/**
* Register a callback function that is called on error.
*

Просмотреть файл

@ -594,10 +594,10 @@ int mca_btl_gm_component_progress()
unsigned char* buffer = (unsigned char*)gm_ntohp(event->recv.buffer);
mca_btl_gm_frag_t* frag = (mca_btl_gm_frag_t*)*((uintptr_t*)(buffer - sizeof(uintptr_t)));
mca_btl_base_header_t* hdr = (mca_btl_base_header_t *)gm_ntohp(event->recv.message);
mca_btl_base_recv_reg_t* reg;
mca_btl_active_message_callback_t* reg;
frag->segment.seg_addr.pval = (hdr+1);
frag->segment.seg_len = gm_ntohl(event->recv.length) - sizeof(mca_btl_base_header_t);
reg = &btl->gm_reg[hdr->tag];
reg = mca_btl_base_active_message_trigger + hdr->tag;
/* cbfunc may be null if interface goes down.. */
if(reg->cbfunc) {
@ -621,10 +621,10 @@ int mca_btl_gm_component_progress()
unsigned char* buffer = (unsigned char*)gm_ntohp(event->recv.buffer);
mca_btl_gm_frag_t* frag = (mca_btl_gm_frag_t*)*((uintptr_t*)(buffer - sizeof(uintptr_t)));
mca_btl_base_header_t* hdr = (mca_btl_base_header_t*)buffer;
mca_btl_base_recv_reg_t* reg;
mca_btl_active_message_callback_t* reg;
frag->segment.seg_addr.pval = (hdr+1);
frag->segment.seg_len = gm_ntohl(event->recv.length) - sizeof(mca_btl_base_header_t);
reg = &btl->gm_reg[hdr->tag];
reg = mca_btl_base_active_message_trigger + hdr->tag;
if(reg->cbfunc) {
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
reg->cbfunc(&btl->super, hdr->tag, &frag->base, reg->cbdata);
@ -687,10 +687,10 @@ static void* mca_btl_gm_progress_thread( opal_object_t* arg )
unsigned char* buffer = (unsigned char*)gm_ntohp(event->recv.buffer);
mca_btl_gm_frag_t* frag = (mca_btl_gm_frag_t*)*((uintptr_t*)(buffer - sizeof(uintptr_t)));
mca_btl_base_header_t* hdr = (mca_btl_base_header_t *)gm_ntohp(event->recv.message);
mca_btl_base_recv_reg_t* reg;
mca_btl_active_message_callback_t* reg;
frag->segment.seg_addr.pval = (hdr+1);
frag->segment.seg_len = gm_ntohl(event->recv.length) - sizeof(mca_btl_base_header_t);
reg = &btl->gm_reg[hdr->tag];
reg = mca_btl_base_active_message_trigger + hdr->tag;
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
reg->cbfunc(&btl->super, hdr->tag, &frag->base, reg->cbdata);
@ -707,10 +707,10 @@ static void* mca_btl_gm_progress_thread( opal_object_t* arg )
unsigned char* buffer = (unsigned char*)gm_ntohp(event->recv.buffer);
mca_btl_gm_frag_t* frag = (mca_btl_gm_frag_t*)*((uintptr_t*)(buffer - sizeof(uintptr_t)));
mca_btl_base_header_t* hdr = (mca_btl_base_header_t*)buffer;
mca_btl_base_recv_reg_t* reg;
mca_btl_active_message_callback_t* reg;
frag->segment.seg_addr.pval = (hdr+1);
frag->segment.seg_len = gm_ntohl(event->recv.length) - sizeof(mca_btl_base_header_t);
reg = &btl->gm_reg[hdr->tag];
reg = mca_btl_base_active_message_trigger + hdr->tag;
OPAL_THREAD_UNLOCK(&mca_btl_gm_component.gm_lock);
reg->cbfunc(&btl->super, hdr->tag, &frag->base, reg->cbdata);

Просмотреть файл

@ -112,9 +112,6 @@ int mca_btl_mx_register( struct mca_btl_base_module_t* btl,
{
mca_btl_mx_module_t* mx_btl = (mca_btl_mx_module_t*) btl;
mx_btl->mx_reg[tag].cbfunc = cbfunc;
mx_btl->mx_reg[tag].cbdata = cbdata;
if( (NULL != cbfunc) && ( 0 == mca_btl_mx_component.mx_use_unexpected) ) {
mca_btl_mx_frag_t* frag;
mx_return_t mx_return;
@ -135,12 +132,11 @@ int mca_btl_mx_register( struct mca_btl_base_module_t* btl,
frag->base.des_src = NULL;
frag->base.des_src_cnt = 0;
frag->mx_frag_list = NULL;
frag->tag = tag;
frag->type = MCA_BTL_MX_RECV;
mx_segment.segment_ptr = (void*)(frag+1);
mx_segment.segment_length = mx_btl->super.btl_eager_limit;
mx_return = mx_irecv( mx_btl->mx_endpoint, &mx_segment, 1, (uint64_t)tag,
BTL_MX_RECV_MASK,
mx_return = mx_irecv( mx_btl->mx_endpoint, &mx_segment, 1, 0x0ULL, 0x0ULL,
frag, &(frag->mx_request) );
if( MX_SUCCESS != mx_return ) {
opal_output( 0, "mca_btl_mx_register: mx_irecv failed with status %d (%s)\n",
@ -197,7 +193,7 @@ int mca_btl_mx_free( struct mca_btl_base_module_t* btl,
{
mca_btl_mx_frag_t* frag = (mca_btl_mx_frag_t*)des;
assert( 0xff == frag->tag );
assert( MCA_BTL_MX_SEND == frag->type );
MCA_BTL_MX_FRAG_RETURN(btl, frag);
return OMPI_SUCCESS;
@ -330,7 +326,7 @@ mca_btl_base_descriptor_t* mca_btl_mx_prepare_dst( struct mca_btl_base_module_t*
}
/* Allow the fragment to be recycled using the mca_btl_mx_free function */
frag->tag = 0xff;
frag->type = MCA_BTL_MX_SEND;
frag->base.des_dst = frag->segment;
frag->base.des_dst_cnt = 1;
@ -367,7 +363,7 @@ static int mca_btl_mx_put( struct mca_btl_base_module_t* btl,
}
frag->endpoint = endpoint;
frag->tag = 0xff;
frag->type = MCA_BTL_MX_SEND;
do {
mx_segment[i].segment_ptr = descriptor->des_src[i].seg_addr.pval;
@ -417,7 +413,7 @@ int mca_btl_mx_send( struct mca_btl_base_module_t* btl,
}
frag->endpoint = endpoint;
frag->tag = 0xff;
frag->type = MCA_BTL_MX_SEND;
do {
mx_segment[i].segment_ptr = descriptor->des_src[i].seg_addr.pval;

Просмотреть файл

@ -115,7 +115,6 @@ OMPI_MODULE_DECLSPEC extern mca_btl_mx_component_t mca_btl_mx_component;
*/
struct mca_btl_mx_module_t {
mca_btl_base_module_t super; /**< base BTL interface */
mca_btl_base_recv_reg_t mx_reg[MCA_BTL_TAG_MAX]; /**< the PML registered callbacks */
mx_endpoint_t mx_endpoint; /**< local MX endpoint */
mx_endpoint_addr_t mx_endpoint_addr; /**< local MX endpoint address */
uint32_t mx_unique_network_id; /**< unique identifier for this BTL,

Просмотреть файл

@ -207,7 +207,7 @@ mca_btl_mx_unexpected_handler( void *context, mx_endpoint_addr_t source,
void * data_if_available )
{
mca_btl_mx_module_t* mx_btl = (mca_btl_mx_module_t*)context;
mca_btl_base_recv_reg_t* reg;
mca_btl_active_message_callback_t* reg;
mca_btl_base_tag_t tag;
mca_btl_base_descriptor_t descriptor;
mca_btl_base_segment_t segment;
@ -220,7 +220,7 @@ mca_btl_mx_unexpected_handler( void *context, mx_endpoint_addr_t source,
tag = match_value & 0xff;
assert( tag < 16 );
reg = &(mx_btl->mx_reg[tag]);
reg = mca_btl_base_active_message_trigger + tag;
segment.seg_addr.pval = data_if_available;
segment.seg_len = length;
@ -612,24 +612,24 @@ int mca_btl_mx_component_progress(void)
continue;
}
/* on the mx_status we have now the pointer attached to the request.
* This pointer indicate which fragment we are working on. On the
* status we have the status of the operation, so we know what we
* are supposed to do next.
* This pointer indicate which fragment we are working on. On the
* status we have the status of the operation, so we know what we
* are supposed to do next.
*/
frag = mx_status.context;
if( NULL != frag ) {
if( 0xff == frag->tag ) { /* it's a send */
if( 0xff == frag->type ) { /* it's a send */
/* call the completion callback */
frag->base.des_cbfunc( &(mx_btl->super), frag->endpoint,
&(frag->base), OMPI_SUCCESS );
} else if( !mca_btl_mx_component.mx_use_unexpected ) { /* and this one is a receive */
mca_btl_base_recv_reg_t* reg;
mca_btl_active_message_callback_t* reg;
mx_segment_t mx_segment;
uint8_t tag = mx_status.match_info & 0xff;
reg = &(mx_btl->mx_reg[frag->tag]);
reg = mca_btl_base_active_message_trigger + tag;
frag->base.des_dst->seg_len = mx_status.msg_length;
reg->cbfunc( &(mx_btl->super), frag->tag, &(frag->base),
reg->cbdata );
reg->cbfunc( &(mx_btl->super), tag, &(frag->base), reg->cbdata );
/**
* The upper level extract the data from the fragment.
* Now we can register the fragment
@ -638,7 +638,7 @@ int mca_btl_mx_component_progress(void)
mx_segment.segment_ptr = frag->base.des_dst->seg_addr.pval;
mx_segment.segment_length = mca_btl_mx_module.super.btl_eager_limit;
mx_return = mx_irecv( mx_btl->mx_endpoint, &mx_segment, 1,
(uint64_t)frag->tag, BTL_MX_RECV_MASK,
0x0ULL, 0x0ULL,
frag, &(frag->mx_request) );
if( MX_SUCCESS != mx_return ) {
opal_output( 0, "Fail to re-register a fragment with the MX NIC ... (%s)\n",

Просмотреть файл

@ -31,6 +31,9 @@
extern "C" {
#endif
#define MCA_BTL_MX_SEND 0x01
#define MCA_BTL_MX_RECV 0x02
/**
* MX send framxent derived type.
*/
@ -38,7 +41,7 @@ extern "C" {
mca_btl_base_descriptor_t base;
mca_btl_base_segment_t segment[2];
struct mca_btl_base_endpoint_t* endpoint;
mca_btl_base_tag_t tag;
uint8_t type;
mx_request_t mx_request;
size_t size;
ompi_free_list_t* mx_frag_list;

Просмотреть файл

@ -53,7 +53,7 @@ mca_btl_ud_module_t mca_btl_ofud_module = {
MCA_BTL_FLAGS_SEND,
mca_btl_ud_add_procs,
mca_btl_ud_del_procs,
mca_btl_ud_register,
NULL,
mca_btl_ud_finalize,
mca_btl_ud_alloc,
mca_btl_ud_free,
@ -187,26 +187,6 @@ int mca_btl_ud_del_procs(struct mca_btl_base_module_t* btl,
}
/*
* Register callback function to support send/recv semantics
*/
int mca_btl_ud_register(struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata)
{
mca_btl_ud_module_t* ud_btl = (mca_btl_ud_module_t*)btl;
OPAL_THREAD_LOCK(&ud_btl->ud_lock);
ud_btl->ib_reg[tag].cbfunc = cbfunc;
ud_btl->ib_reg[tag].cbdata = cbdata;
OPAL_THREAD_UNLOCK(&ud_btl->ud_lock);
return OMPI_SUCCESS;
}
/**
* Allocate a segment.
*

Просмотреть файл

@ -123,7 +123,6 @@ extern mca_btl_ud_profile_t mca_btl_ud_profile;
struct mca_btl_ud_module_t {
mca_btl_base_module_t super;
mca_btl_ud_recv_reg_t ib_reg[256]; /* protected by ib_lock */
uint8_t ib_port_num;
struct ibv_device* ib_dev;
@ -139,7 +138,7 @@ struct mca_btl_ud_module_t {
opal_list_t pending_frags; /**< list of pending send frags */
opal_mutex_t ud_lock; /**< lock for ib_reg and pending_frags */
opal_mutex_t ud_lock; /**< lock for pending_frags */
size_t ib_inline_max; /**< max size of IB inline send */
@ -203,20 +202,6 @@ extern mca_btl_base_module_t** mca_btl_ud_component_init(
extern int mca_btl_ud_component_progress(void);
/**
* Register a callback function that is called on receipt
* of a fragment.
*
* @param btl (IN) BTL module
* @return Status indicating if cleanup was successful
*/
int mca_btl_ud_register(struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata);
/**
* Cleanup any resources held by the BTL.
*

Просмотреть файл

@ -425,7 +425,7 @@ int mca_btl_ud_component_progress(void)
struct ibv_recv_wr* bad_wr;
struct ibv_recv_wr* head_wr;
mca_btl_ud_module_t* ud_btl;
mca_btl_base_recv_reg_t* reg;
mca_btl_active_message_callback_t* reg;
struct ibv_wc* cwc;
struct ibv_wc wc[MCA_BTL_UD_NUM_WC];
@ -480,7 +480,7 @@ int mca_btl_ud_component_progress(void)
}
case MCA_BTL_UD_FRAG_RECV:
assert(cwc->opcode == IBV_WC_RECV);
reg = &ud_btl->ib_reg[frag->hdr->tag];
reg = mca_btl_base_active_message_trigger + frag->hdr->tag;
frag->segment.seg_addr.pval = frag->hdr + 1;
frag->segment.seg_len = cwc->byte_len -

Просмотреть файл

@ -73,7 +73,7 @@ mca_btl_openib_module_t mca_btl_openib_module = {
0, /* TODO this should be PUT btl flags */
mca_btl_openib_add_procs,
mca_btl_openib_del_procs,
mca_btl_openib_register,
NULL,
mca_btl_openib_finalize,
/* we need alloc free, pack */
mca_btl_openib_alloc,
@ -460,25 +460,6 @@ int mca_btl_openib_del_procs(struct mca_btl_base_module_t* btl,
return OMPI_SUCCESS;
}
/*
*Register callback function to support send/recv semantics
*/
int mca_btl_openib_register(
struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata)
{
mca_btl_openib_module_t* openib_btl = (mca_btl_openib_module_t*) btl;
OPAL_THREAD_LOCK(&openib_btl->ib_lock);
openib_btl->ib_reg[tag].cbfunc = cbfunc;
openib_btl->ib_reg[tag].cbdata = cbdata;
OPAL_THREAD_UNLOCK(&openib_btl->ib_lock);
return OMPI_SUCCESS;
}
/*
*Register callback function for error handling..
*/

Просмотреть файл

@ -328,7 +328,6 @@ struct mca_btl_openib_module_qp_t {
struct mca_btl_openib_module_t {
mca_btl_base_module_t super; /**< base BTL interface */
bool btl_inited;
mca_btl_openib_recv_reg_t ib_reg[256];
mca_btl_openib_port_info_t port_info; /* contains only the subnet id right now */
mca_btl_openib_hca_t *hca;
uint8_t port_num; /**< ID of the PORT */
@ -364,25 +363,6 @@ typedef struct mca_btl_openib_reg_t mca_btl_openib_reg_t;
extern void* mca_btl_openib_progress_thread(opal_object_t*);
#endif
/**
* Register a callback function that is called on receipt
* of a fragment.
*
* @param btl (IN) BTL module
* @return Status indicating if cleanup was successful
*
* When the process list changes, the PML notifies the BTL of the
* change, to provide the opportunity to cleanup or release any
* resources associated with the peer.
*/
int mca_btl_openib_register(
struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata
);
/**
* Register a callback function that is called on error..

Просмотреть файл

@ -238,6 +238,7 @@ static void btl_openib_control(mca_btl_base_module_t* btl,
mca_btl_openib_eager_rdma_header_t *rdma_hdr;
mca_btl_openib_header_coalesced_t *clsc_hdr =
(mca_btl_openib_header_coalesced_t*)(ctl_hdr + 1);
mca_btl_active_message_callback_t* reg;
size_t len = des->des_dst->seg_len - sizeof(*ctl_hdr);
switch (ctl_hdr->type) {
@ -290,8 +291,8 @@ static void btl_openib_control(mca_btl_base_module_t* btl,
tmp_seg.seg_len = clsc_hdr->size;
/* call registered callback */
obtl->ib_reg[clsc_hdr->tag].cbfunc(&obtl->super, clsc_hdr->tag,
&tmp_des, obtl->ib_reg[clsc_hdr->tag].cbdata);
reg = mca_btl_base_active_message_trigger + clsc_hdr->tag;
reg->cbfunc( &obtl->super, clsc_hdr->tag, &tmp_des, reg->cbdata );
len -= skip;
clsc_hdr = (mca_btl_openib_header_coalesced_t*)
(((unsigned char*)clsc_hdr) + skip);
@ -430,8 +431,8 @@ static int init_one_port(opal_list_t *btl_list, mca_btl_openib_hca_t *hca,
continue;
}
openib_btl->ib_reg[MCA_BTL_TAG_BTL].cbfunc = btl_openib_control;
openib_btl->ib_reg[MCA_BTL_TAG_BTL].cbdata = NULL;
mca_btl_base_active_message_trigger[MCA_BTL_TAG_IB].cbfunc = btl_openib_control;
mca_btl_base_active_message_trigger[MCA_BTL_TAG_IB].cbdata = NULL;
/* Check bandwidth configured for this HCA */
sprintf(param, "bandwidth_%s", ibv_get_device_name(hca->ib_dev));
@ -1470,8 +1471,9 @@ static int btl_openib_handle_incoming(mca_btl_openib_module_t *openib_btl,
if(OPAL_LIKELY(!(is_credit_msg = is_credit_message(frag)))) {
/* call registered callback */
openib_btl->ib_reg[hdr->tag].cbfunc(&openib_btl->super, hdr->tag, des,
openib_btl->ib_reg[hdr->tag].cbdata);
mca_btl_active_message_callback_t* reg;
reg = mca_btl_base_active_message_trigger + hdr->tag;
reg->cbfunc( &openib_btl->super, hdr->tag, des, reg->cbdata );
if(MCA_BTL_OPENIB_RDMA_FRAG(frag)) {
cqp = (hdr->credits >> 11) & 0x0f;
hdr->credits &= 0x87ff;

Просмотреть файл

@ -55,7 +55,7 @@ mca_btl_portals_module_t mca_btl_portals_module = {
mca_btl_portals_add_procs,
mca_btl_portals_del_procs,
mca_btl_portals_register,
NULL,
mca_btl_portals_finalize,
mca_btl_portals_alloc,
@ -224,21 +224,6 @@ mca_btl_portals_del_procs(struct mca_btl_base_module_t *btl_base,
}
int
mca_btl_portals_register(struct mca_btl_base_module_t* btl_base,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata)
{
assert(&mca_btl_portals_module == (mca_btl_portals_module_t*) btl_base);
mca_btl_portals_module.portals_reg[tag].cbfunc = cbfunc;
mca_btl_portals_module.portals_reg[tag].cbdata = cbdata;
return OMPI_SUCCESS;
}
mca_btl_base_descriptor_t*
mca_btl_portals_alloc(struct mca_btl_base_module_t* btl_base,
struct mca_btl_base_endpoint_t* endpoint,

Просмотреть файл

@ -73,9 +73,6 @@ struct mca_btl_portals_module_t {
/* base BTL module interface */
mca_btl_base_module_t super;
/* registered callbacks */
mca_btl_base_recv_reg_t portals_reg[MCA_BTL_TAG_MAX];
/* number of processes we're actively connected to. Needed to
know when to do activation / shutdown */
int32_t portals_num_procs;
@ -165,11 +162,6 @@ int mca_btl_portals_del_procs(struct mca_btl_base_module_t* btl_base,
struct ompi_proc_t **procs,
struct mca_btl_base_endpoint_t** peers);
int mca_btl_portals_register(struct mca_btl_base_module_t* btl_base,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata);
mca_btl_base_descriptor_t*
mca_btl_portals_alloc(struct mca_btl_base_module_t* btl_base,
struct mca_btl_base_endpoint_t* endpoint,

Просмотреть файл

@ -148,8 +148,6 @@ mca_btl_portals_component_open(void)
copy and send. So don't use it for now. */
mca_btl_portals_module.portals_num_procs = 0;
bzero(&(mca_btl_portals_module.portals_reg),
sizeof(mca_btl_portals_module.portals_reg));
for (i = 0 ; i < OMPI_BTL_PORTALS_EQ_SIZE ; ++i) {
mca_btl_portals_module.portals_eq_sizes[i] = 0;
@ -403,13 +401,11 @@ mca_btl_portals_component_progress(void)
block->full = true;
}
assert(NULL != mca_btl_portals_module.portals_reg[tag].cbfunc);
mca_btl_portals_module.portals_reg[tag].cbfunc(
mca_btl_base_active_message_trigger[tag].cbfunc(
&mca_btl_portals_module.super,
tag,
&frag->base,
mca_btl_portals_module.portals_reg[tag].cbdata);
mca_btl_base_active_message_trigger[tag].cbdata);
mca_btl_portals_return_block_part(&mca_btl_portals_module, block);
}
break;

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* Copyright (c) 2004-2008 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -48,7 +48,7 @@ mca_btl_sctp_module_t mca_btl_sctp_module = {
0, /* flags */
mca_btl_sctp_add_procs,
mca_btl_sctp_del_procs,
mca_btl_sctp_register,
NULL,
mca_btl_sctp_finalize,
mca_btl_sctp_alloc,
mca_btl_sctp_free,
@ -158,23 +158,6 @@ int mca_btl_sctp_del_procs(struct mca_btl_base_module_t* btl,
}
/**
* Register callback function to support send/recv semantics
*/
int mca_btl_sctp_register(
struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata)
{
mca_btl_sctp_module_t* sctp_btl = (mca_btl_sctp_module_t*) btl;
sctp_btl->sctp_reg[tag].cbfunc = cbfunc;
sctp_btl->sctp_reg[tag].cbdata = cbdata;
return OMPI_SUCCESS;
}
/**
* Allocate a segment.
*

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* Copyright (c) 2004-2008 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -107,7 +107,6 @@ OMPI_MODULE_DECLSPEC extern mca_btl_sctp_component_t mca_btl_sctp_component;
*/
struct mca_btl_sctp_module_t {
mca_btl_base_module_t super; /**< base BTL interface */
mca_btl_base_recv_reg_t sctp_reg[256];
int sctp_ifindex; /**< PTL interface index */
struct sockaddr_in sctp_ifaddr; /**< PTL interface address */
struct sockaddr_in sctp_ifmask; /**< PTL interface netmask */
@ -266,21 +265,6 @@ extern int mca_btl_sctp_get(
struct mca_btl_base_descriptor_t* decriptor
);
/**
* Register a callback function that is called on receipt
* of a fragment.
*
* @param btl (IN) BTL module
* @return Status indicating if registration was successful
*
*/
extern int mca_btl_sctp_register(
struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata);
/**
* Allocate a descriptor with a segment of the requested size.
* Note that the BTL layer may choose to return a smaller size

Просмотреть файл

@ -1094,7 +1094,8 @@ static void mca_btl_sctp_endpoint_recv_handler(int sd, short flags, void* user)
switch(frag->hdr.type) {
case MCA_BTL_SCTP_HDR_TYPE_SEND:
{
mca_btl_base_recv_reg_t* reg = frag->btl->sctp_reg + frag->hdr.base.tag;
mca_btl_active_message_callback_t* reg;
reg = mca_btl_base_active_message_trigger + frag->hdr.base.tag;
reg->cbfunc(&frag->btl->super, frag->hdr.base.tag, &frag->base, reg->cbdata);
break;
}

Просмотреть файл

@ -188,7 +188,8 @@ data_still_pending_on_endpoint:
switch(frag->hdr.type) {
case MCA_BTL_SCTP_HDR_TYPE_SEND:
{
mca_btl_base_recv_reg_t* reg = frag->btl->sctp_reg + frag->hdr.base.tag;
mca_btl_active_message_callback_t* reg;
reg = mca_btl_base_active_message_trigger + frag->hdr.base.tag;
reg->cbfunc(&frag->btl->super, frag->hdr.base.tag, &frag->base, reg->cbdata);
break;
}

Просмотреть файл

@ -57,7 +57,7 @@ mca_btl_base_module_t mca_btl_self = {
0, /* btl flags */
mca_btl_self_add_procs,
mca_btl_self_del_procs,
mca_btl_self_register,
NULL,
mca_btl_self_finalize,
mca_btl_self_alloc,
mca_btl_self_free,
@ -119,29 +119,6 @@ int mca_btl_self_finalize(struct mca_btl_base_module_t* btl)
}
/**
* Register a callback function that is called on receipt
* of a fragment.
*
* @param btl (IN) BTL module
* @return Status indicating if cleanup was successful
*
* When the process list changes, the PML notifies the BTL of the
* change, to provide the opportunity to cleanup or release any
* resources associated with the peer.
*/
int mca_btl_self_register( struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata )
{
mca_btl_self_component.self_reg[tag].cbfunc = cbfunc;
mca_btl_self_component.self_reg[tag].cbdata = cbdata;
return OMPI_SUCCESS;
}
/**
* Allocate a segment.
*
@ -315,6 +292,8 @@ int mca_btl_self_send( struct mca_btl_base_module_t* btl,
struct mca_btl_base_descriptor_t* des,
mca_btl_base_tag_t tag )
{
mca_btl_active_message_callback_t* reg;
/**
* We have to set the dst before the call to the function and reset them
* after.
@ -322,7 +301,8 @@ int mca_btl_self_send( struct mca_btl_base_module_t* btl,
des->des_dst = des->des_src;
des->des_dst_cnt = des->des_src_cnt;
/* upcall */
mca_btl_self_component.self_reg[tag].cbfunc( btl, tag, des, (void*)OMPI_SUCCESS );
reg = mca_btl_base_active_message_trigger + tag;
reg->cbfunc( btl, tag, des, reg->cbdata );
des->des_dst = NULL;
des->des_dst_cnt = 0;
/* send completion */

Просмотреть файл

@ -51,7 +51,6 @@ struct mca_btl_self_component_t {
ompi_free_list_t self_frags_eager; /**< free list of self first */
ompi_free_list_t self_frags_send; /**< free list of self second */
ompi_free_list_t self_frags_rdma; /**< free list of self second */
mca_btl_base_recv_reg_t self_reg[256];
};
typedef struct mca_btl_self_component_t mca_btl_self_component_t;
OMPI_MODULE_DECLSPEC extern mca_btl_self_component_t mca_btl_self_component;
@ -135,26 +134,6 @@ int mca_btl_self_del_procs(
);
/**
* Register a callback function that is called on receipt
* of a fragment.
*
* @param btl (IN) BTL module
* @return Status indicating if cleanup was successful
*
* When the process list changes, the PML notifies the BTL of the
* change, to provide the opportunity to cleanup or release any
* resources associated with the peer.
*/
int mca_btl_self_register(
struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata
);
/**
* Allocate a segment.
*

Просмотреть файл

@ -98,7 +98,7 @@ mca_btl_sm_t mca_btl_sm = {
0, /* btl flags */
mca_btl_sm_add_procs,
mca_btl_sm_del_procs,
mca_btl_sm_register,
NULL,
mca_btl_sm_finalize,
mca_btl_sm_alloc,
mca_btl_sm_free,
@ -675,30 +675,6 @@ int mca_btl_sm_finalize(struct mca_btl_base_module_t* btl)
}
/**
* Register a callback function that is called on receipt
* of a fragment.
*
* @param btl (IN) BTL module
* @return Status indicating if cleanup was successful
*
* When the process list changes, the PML notifies the BTL of the
* change, to provide the opportunity to cleanup or release any
* resources associated with the peer.
*/
int mca_btl_sm_register(
struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata)
{
mca_btl_sm_t* sm_btl = (mca_btl_sm_t*)btl;
sm_btl->sm_reg[tag].cbfunc = cbfunc;
sm_btl->sm_reg[tag].cbdata = cbdata;
return OMPI_SUCCESS;
}
/*
* Register callback function for error handling..
*/

Просмотреть файл

@ -75,9 +75,6 @@ extern mca_btl_sm_module_resource_t mca_btl_sm_module_resource;
#define DONE (char)1
#endif
typedef mca_btl_base_recv_reg_t mca_btl_sm_recv_reg_t;
/**
* Shared Memory (SM) BTL module.
*/
@ -179,7 +176,6 @@ extern int mca_btl_sm_component_progress(void);
struct mca_btl_sm_t {
mca_btl_base_module_t super; /**< base BTL interface */
bool btl_inited; /**< flag indicating if btl has been inited */
mca_btl_sm_recv_reg_t sm_reg[256];
mca_btl_base_module_error_cb_fn_t error_cb;
};
typedef struct mca_btl_sm_t mca_btl_sm_t;
@ -250,26 +246,6 @@ extern int mca_btl_sm_del_procs(
);
/**
* Register a callback function that is called on receipt
* of a fragment.
*
* @param btl (IN) BTL module
* @return Status indicating if cleanup was successful
*
* When the process list changes, the PML notifies the BTL of the
* change, to provide the opportunity to cleanup or release any
* resources associated with the peer.
*/
extern int mca_btl_sm_register(
struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata
);
/**
* Allocate a segment.
*

Просмотреть файл

@ -421,13 +421,13 @@ int mca_btl_sm_component_progress(void)
}
case MCA_BTL_SM_FRAG_SEND:
{
mca_btl_sm_recv_reg_t* reg;
mca_btl_active_message_callback_t* reg;
/* change the address from address relative to the shared
* memory address, to a true virtual address */
hdr = (mca_btl_sm_hdr_t *)((char *)hdr +
mca_btl_sm_component.sm_offset[peer_smp_rank]);
/* recv upcall */
reg = mca_btl_sm.sm_reg + hdr->tag;
reg = mca_btl_base_active_message_trigger + hdr->tag;
Frag.segment.seg_addr.pval = ((char*)hdr) +
sizeof(mca_btl_sm_hdr_t);
Frag.segment.seg_len = hdr->len;

Просмотреть файл

@ -51,7 +51,7 @@ mca_btl_tcp_module_t mca_btl_tcp_module = {
0, /* flags */
mca_btl_tcp_add_procs,
mca_btl_tcp_del_procs,
mca_btl_tcp_register,
NULL,
mca_btl_tcp_finalize,
mca_btl_tcp_alloc,
mca_btl_tcp_free,
@ -160,23 +160,6 @@ int mca_btl_tcp_del_procs(struct mca_btl_base_module_t* btl,
}
/**
* Register callback function to support send/recv semantics
*/
int mca_btl_tcp_register(
struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata)
{
mca_btl_tcp_module_t* tcp_btl = (mca_btl_tcp_module_t*) btl;
tcp_btl->tcp_reg[tag].cbfunc = cbfunc;
tcp_btl->tcp_reg[tag].cbdata = cbdata;
return OMPI_SUCCESS;
}
/**
* Allocate a segment.
*

Просмотреть файл

@ -83,7 +83,7 @@ struct mca_btl_tcp_component_t {
int32_t tcp6_port_min; /**< IPv4 minimum port */
int32_t tcp6_port_range; /**< IPv4 port range */
#endif
/* Port range restriction */
/* Port range restriction */
char* tcp_if_include; /**< comma seperated list of interface to include */
char* tcp_if_exclude; /**< comma seperated list of interface to exclude */
@ -108,7 +108,6 @@ OMPI_MODULE_DECLSPEC extern mca_btl_tcp_component_t mca_btl_tcp_component;
*/
struct mca_btl_tcp_module_t {
mca_btl_base_module_t super; /**< base BTL interface */
mca_btl_base_recv_reg_t tcp_reg[256];
uint16_t tcp_ifkindex; /** <BTL kernel interface index */
#if 0
int tcp_ifindex; /**< PTL interface index */
@ -268,21 +267,6 @@ extern int mca_btl_tcp_get(
struct mca_btl_base_descriptor_t* decriptor
);
/**
* Register a callback function that is called on receipt
* of a fragment.
*
* @param btl (IN) BTL module
* @return Status indicating if registration was successful
*
*/
extern int mca_btl_tcp_register(
struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata);
/**
* Allocate a descriptor with a segment of the requested size.
* Note that the BTL layer may choose to return a smaller size

Просмотреть файл

@ -263,6 +263,7 @@ int mca_btl_tcp_component_open(void)
mca_btl_tcp_component.tcp_disable_family =
mca_btl_tcp_param_register_int ("disable_family", NULL, 0);
return OMPI_SUCCESS;
}

Просмотреть файл

@ -676,15 +676,16 @@ static void mca_btl_tcp_endpoint_recv_handler(int sd, short flags, void* user)
btl_endpoint->endpoint_recv_frag = frag;
} else {
btl_endpoint->endpoint_recv_frag = NULL;
if( MCA_BTL_TCP_HDR_TYPE_SEND == frag->hdr.type ) {
mca_btl_base_recv_reg_t* reg = frag->btl->tcp_reg + frag->hdr.base.tag;
reg->cbfunc(&frag->btl->super, frag->hdr.base.tag, &frag->base, reg->cbdata);
if( MCA_BTL_TCP_HDR_TYPE_SEND == frag->hdr.type ) {
mca_btl_active_message_callback_t* reg;
reg = mca_btl_base_active_message_trigger + frag->hdr.base.tag;
reg->cbfunc(&frag->btl->super, frag->hdr.base.tag, &frag->base, reg->cbdata);
}
#if MCA_BTL_TCP_ENDPOINT_CACHE
if( 0 != btl_endpoint->endpoint_cache_length ) {
/* If the cache still contain some data we can reuse the same fragment
* until we flush it completly.
*/
/* If the cache still contain some data we can reuse the same fragment
* until we flush it completly.
*/
MCA_BTL_TCP_FRAG_INIT_DST(frag, btl_endpoint);
goto data_still_pending_on_endpoint;
}

Просмотреть файл

@ -72,7 +72,6 @@ void mca_btl_tcp_proc_destruct(mca_btl_tcp_proc_t* proc)
}
}
/*
* Create a TCP process structure. There is a one-to-one correspondence
* between a ompi_proc_t and a mca_btl_tcp_proc_t instance. We cache

Просмотреть файл

@ -149,9 +149,6 @@ int mca_btl_template_register(
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata)
{
mca_btl_template_module_t* template_btl = (mca_btl_template_module_t*) btl;
template_btl->template_reg[tag].cbfunc = cbfunc;
template_btl->template_reg[tag].cbdata = cbdata;
return OMPI_SUCCESS;
}

Просмотреть файл

@ -86,7 +86,6 @@ OMPI_MODULE_DECLSPEC extern mca_btl_template_component_t mca_btl_template_compon
*/
struct mca_btl_template_module_t {
mca_btl_base_module_t super; /**< base BTL interface */
mca_btl_base_recv_reg_t template_reg[MCA_BTL_TAG_MAX];
/* free list of fragment descriptors */
ompi_free_list_t template_frag_eager;

Просмотреть файл

@ -64,7 +64,7 @@ mca_btl_udapl_module_t mca_btl_udapl_module = {
MCA_BTL_FLAGS_SEND,
mca_btl_udapl_add_procs,
mca_btl_udapl_del_procs,
mca_btl_udapl_register,
NULL,
mca_btl_udapl_finalize,
mca_btl_udapl_alloc,
mca_btl_udapl_free,
@ -713,24 +713,6 @@ int mca_btl_udapl_del_procs(struct mca_btl_base_module_t* btl,
}
/**
* Register callback function to support send/recv semantics
*/
int mca_btl_udapl_register(
struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata)
{
mca_btl_udapl_module_t* udapl_btl = (mca_btl_udapl_module_t*) btl;
udapl_btl->udapl_reg[tag].cbfunc = cbfunc;
udapl_btl->udapl_reg[tag].cbdata = cbdata;
return OMPI_SUCCESS;
}
/**
* Allocate a segment.
*

Просмотреть файл

@ -105,7 +105,6 @@ OMPI_MODULE_DECLSPEC extern mca_btl_udapl_component_t mca_btl_udapl_component;
*/
struct mca_btl_udapl_module_t {
mca_btl_base_module_t super; /**< base BTL interface */
mca_btl_base_recv_reg_t udapl_reg[256];
mca_btl_udapl_addr_t udapl_addr;
/* uDAPL interface and other handles */
@ -397,21 +396,6 @@ extern int mca_btl_udapl_get(
struct mca_btl_base_descriptor_t* decriptor
);
/**
* Register a callback function that is called on receipt
* of a fragment.
*
* @param btl (IN) BTL module
* @return Status indicating if registration was successful
*
*/
extern int mca_btl_udapl_register(
struct mca_btl_base_module_t* btl,
mca_btl_base_tag_t tag,
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata);
/**
* Allocate a descriptor with a segment of the requested size.
* Note that the BTL layer may choose to return a smaller size

Просмотреть файл

@ -536,8 +536,8 @@ mca_btl_udapl_component_init (int *num_btl_modules,
}
/* register internal control message callback */
btl->udapl_reg[MCA_BTL_TAG_BTL].cbfunc = mca_btl_udapl_receive_control;
btl->udapl_reg[MCA_BTL_TAG_BTL].cbdata = NULL;
mca_btl_base_active_message_trigger[MCA_BTL_TAG_UDAPL].cbfunc = mca_btl_udapl_receive_control;
mca_btl_base_active_message_trigger[MCA_BTL_TAG_UDAPL].cbdata = NULL;
/* successful btl creation */
mca_btl_udapl_component.udapl_btls[mca_btl_udapl_component.udapl_num_btls] = btl;
@ -858,7 +858,7 @@ int mca_btl_udapl_component_progress()
}
case MCA_BTL_UDAPL_RECV:
{
mca_btl_base_recv_reg_t* reg;
mca_btl_active_message_callback_t* reg;
int cntrl_msg = -1;
assert(frag->base.des_dst == &frag->segment);
@ -880,7 +880,7 @@ int mca_btl_udapl_component_progress()
cntrl_msg = frag->ftr->tag;
reg = &btl->udapl_reg[frag->ftr->tag];
reg = mca_btl_base_active_message_trigger + frag->ftr->tag;
OPAL_THREAD_UNLOCK(&mca_btl_udapl_component.udapl_lock);
reg->cbfunc(&btl->super,
@ -1087,7 +1087,7 @@ int mca_btl_udapl_component_progress()
if (local_rdma_frag->rdma_ftr->active == 1) {
int pad = 0;
mca_btl_base_recv_reg_t* reg;
mca_btl_active_message_callback_t* reg;
MCA_BTL_UDAPL_RDMA_NEXT_INDEX(endpoint->endpoint_eager_rdma_local.head);
OPAL_THREAD_UNLOCK(&endpoint->endpoint_eager_rdma_local.lock);
@ -1109,7 +1109,7 @@ int mca_btl_udapl_component_progress()
local_rdma_frag->segment.seg_len);
/* retrieve callback and callback */
reg = &btl->udapl_reg[local_rdma_frag->ftr->tag];
reg = mca_btl_base_active_message_trigger + local_rdma_frag->ftr->tag;
reg->cbfunc(&btl->super,
local_rdma_frag->ftr->tag, &local_rdma_frag->base, reg->cbdata);

Просмотреть файл

@ -378,7 +378,7 @@ static int mca_pml_ob1_recv_frag_match( mca_btl_base_module_t *btl,
* correct order (if multiple network interfaces).
*/
PERUSE_TRACE_MSG_EVENT(PERUSE_COMM_MSG_ARRIVED, comm_ptr,
hdr->hdr_src, hdr->hdr_tag, PERUSE_RECV);
hdr->hdr_src, hdr->hdr_tag, PERUSE_RECV);
/* get next expected message sequence number - if threaded
* run, lock to make sure that if another thread is processing
@ -455,7 +455,7 @@ wrong_seq:
* is ahead of sequence. Save it for later.
*/
append_frag_to_list(&proc->frags_cant_match, btl, hdr, segments,
num_segments, NULL);
num_segments, NULL);
OPAL_THREAD_UNLOCK(&comm->matching_lock);
return OMPI_SUCCESS;
}