1
1

Convert sm btl to use new OMPI_PROC_FLAG_LOCAL instead of the modex.

This commit was SVN r6522.
Этот коммит содержится в:
Jeff Squyres 2005-07-15 15:22:41 +00:00
родитель 51f8cd1442
Коммит 84bc5214e9
3 изменённых файлов: 37 добавлений и 132 удалений

Просмотреть файл

@ -110,7 +110,6 @@ int mca_btl_sm_add_procs_same_base_addr(
int return_code=OMPI_SUCCESS;
size_t i,j,proc,size,n_to_allocate,length;
int n_local_procs,cnt,len, my_len;
mca_btl_sm_exchange_t **sm_proc_info;
ompi_proc_t* my_proc; /* pointer to caller's proc structure */
mca_btl_sm_t *btl_sm;
ompi_fifo_t *my_fifos;
@ -127,21 +126,14 @@ int mca_btl_sm_add_procs_same_base_addr(
/* allocate array to hold setup shared memory from all
* other procs */
sm_proc_info=(mca_btl_sm_exchange_t **)
malloc(nprocs*sizeof(mca_btl_sm_exchange_t *));
if( NULL == sm_proc_info ){
return_code=OMPI_ERR_OUT_OF_RESOURCE;
goto CLEANUP;
}
mca_btl_sm_component.sm_proc_connect=(int *) malloc(nprocs*sizeof(int));
if( NULL == mca_btl_sm_component.sm_proc_connect ){
return_code=OMPI_ERR_OUT_OF_RESOURCE;
goto CLEANUP;
}
/* initialize sm_proc_info and sm_proc_connect*/
/* initialize and sm_proc_connect*/
for(proc=0 ; proc < nprocs ; proc++ ) {
sm_proc_info[proc]=0;
mca_btl_sm_component.sm_proc_connect[proc]=0;
}
@ -159,61 +151,46 @@ int mca_btl_sm_add_procs_same_base_addr(
* of local procs in the prcs list. */
n_local_procs=0;
for( proc=0 ; proc < nprocs; proc++ ) {
#if OMPI_ENABLE_PROGRESS_THREADS == 1
char path[PATH_MAX];
#endif
struct mca_btl_base_endpoint_t *peer;
/* check to see if this is me */
if( my_proc == procs[proc] ) {
mca_btl_sm_component.my_smp_rank=n_local_procs;
mca_btl_sm_component.my_smp_rank = n_local_procs;
}
if( procs[proc]->proc_name.jobid != my_proc->proc_name.jobid ) {
/* check to see if this proc can be reached via shmem (i.e.,
if they're on my local host and in my job) */
else if (procs[proc]->proc_name.jobid != my_proc->proc_name.jobid ||
0 == (procs[proc]->proc_flags & OMPI_PROC_FLAG_LOCAL)) {
continue;
}
return_code = mca_base_modex_recv(
&mca_btl_sm_component.super.btl_version, procs[proc],
(void**)(&(sm_proc_info[proc])), &size);
if(return_code != OMPI_SUCCESS) {
opal_output(0, "mca_btl_sm_add_procs: mca_base_modex_recv: failed with return value=%d", return_code);
/* If we got here, the proc is reachable via sm. So
initialize the peers information */
peer = peers[proc] = malloc(sizeof(struct mca_btl_base_endpoint_t));
if( NULL == peer ){
return_code=OMPI_ERR_OUT_OF_RESOURCE;
goto CLEANUP;
}
/* for zero length, just continue - comparison is meaningless*/
if( 0 >= size ) {
continue;
}
/* check to see if this proc is on my host */
len=strlen((char *)(sm_proc_info[proc]));
if( len == my_len ) {
if( 0 == strncmp(orte_system_info.nodename,
(char *)(sm_proc_info[proc]),len) ) {
struct mca_btl_base_endpoint_t *peer = peers[proc];
peer->peer_smp_rank=n_local_procs+
mca_btl_sm_component.num_smp_procs;
#if OMPI_ENABLE_PROGRESS_THREADS == 1
char path[PATH_MAX];
/* int flags; */
sprintf(path, "%s/sm_fifo.%d", orte_process_info.job_session_dir,
procs[proc]->proc_name.vpid);
peer->fifo_fd = open(path, O_WRONLY);
if(peer->fifo_fd < 0) {
opal_output(0, "mca_btl_sm_add_procs: open(%s) failed with errno=%d\n", path, errno);
goto CLEANUP;
}
#endif
/* initialize the peers information */
peer = peers[proc]=malloc(sizeof(struct mca_btl_base_endpoint_t));
if( NULL == peer ){
return_code=OMPI_ERR_OUT_OF_RESOURCE;
goto CLEANUP;
}
peer->peer_smp_rank=n_local_procs+
mca_btl_sm_component.num_smp_procs;
#if OMPI_ENABLE_PROGRESS_THREADS == 1
sprintf(path, "%s/sm_fifo.%d", orte_process_info.job_session_dir,
procs[proc]->proc_name.vpid);
peer->fifo_fd = open(path, O_WRONLY);
if(peer->fifo_fd < 0) {
opal_output(0, "mca_btl_sm_add_procs: open(%s) failed with errno=%d\n", path, errno);
goto CLEANUP;
}
#endif
n_local_procs++;
mca_btl_sm_component.sm_proc_connect[proc]=SM_CONNECTED;
}
}
n_local_procs++;
mca_btl_sm_component.sm_proc_connect[proc]=SM_CONNECTED;
}
if( n_local_procs == 0) {
return_code = OMPI_SUCCESS;
goto CLEANUP;
@ -586,19 +563,6 @@ int mca_btl_sm_add_procs_same_base_addr(
mca_btl_sm_component.num_smp_procs+=n_local_procs;
CLEANUP:
/* free local memory */
if(sm_proc_info){
/* free the memory allocated by mca_base_modex_recv */
for( proc=0 ; proc < nprocs; proc++ ) {
if(sm_proc_info[proc]){
free(sm_proc_info[proc]);
sm_proc_info[proc]=NULL;
}
}
free(sm_proc_info);
sm_proc_info=NULL;
}
return return_code;
}

Просмотреть файл

@ -210,7 +210,7 @@ extern int mca_btl_sm_add_procs(
struct ompi_bitmap_t* reachability
);
/**
* PML->BTL notification of change in the process list.
* PML->BTL Notification that a receive fragment has been matched.
@ -269,7 +269,7 @@ extern int mca_btl_sm_register(
mca_btl_base_module_recv_cb_fn_t cbfunc,
void* cbdata
);
/**
* Allocate a segment.
@ -292,7 +292,8 @@ extern int mca_btl_sm_free(
struct mca_btl_base_module_t* btl,
mca_btl_base_descriptor_t* segment
);
/**
* Pack data
*
@ -307,7 +308,8 @@ struct mca_btl_base_descriptor_t* mca_btl_sm_prepare_src(
size_t reserve,
size_t* size
);
/**
* Initiate a send to the peer.
*
@ -321,20 +323,11 @@ extern int mca_btl_sm_send(
mca_btl_base_tag_t tag
);
/**
* Data structure used to hold information that will be exchanged with
* all other procs at startup. !!!!! This is only temporary, until the
* registry is complete
*/
#define MCA_BTL_SM_MAX_HOSTNAME_LEN 128
typedef struct mca_btl_sm_exchange{
char host_name[MCA_BTL_SM_MAX_HOSTNAME_LEN];
}mca_btl_sm_exchange_t;
#if OMPI_ENABLE_PROGRESS_THREADS == 1
void mca_btl_sm_component_event_thread(opal_object_t*);
#endif
#if OMPI_ENABLE_PROGRESS_THREADS == 1
#define MCA_BTL_SM_SIGNAL_PEER(peer) \
{ \

Просмотреть файл

@ -40,11 +40,6 @@
#include "btl_sm_fifo.h"
/*
* Local utility functions.
*/
static int mca_btl_sm_component_exchange(void);
/*
* Shared Memory (SM) component instance.
@ -229,11 +224,6 @@ mca_btl_base_module_t** mca_btl_sm_component_init(
mca_btl_sm_component.sm_mpool = NULL;
mca_btl_sm_component.sm_mpool_base = NULL;
/* publish shared memory parameters with the MCA framework */
if (OMPI_SUCCESS != mca_btl_sm_component_exchange()) {
return NULL;
}
#if OMPI_ENABLE_PROGRESS_THREADS == 1
/* create a named pipe to receive events */
sprintf(mca_btl_sm_component.sm_fifo_path,
@ -499,45 +489,3 @@ int mca_btl_sm_component_progress(void)
} /* end peer_local_smp_rank loop */
return rc;
}
/*
*
*/
static int mca_btl_sm_component_exchange()
{
mca_btl_sm_exchange_t mca_btl_sm_setup_info;
size_t len,size;
char *ptr;
int rc;
/* determine length of host name */
len=strlen(orte_system_info.nodename);
/* check if string is zero length or there is an error */
if( 0 >= len) {
return OMPI_ERROR;
}
/* check if string is too long */
if( MCA_BTL_SM_MAX_HOSTNAME_LEN < (len+1) ){
return OMPI_ERROR;
}
/* copy string into structure that will be used to send data around */
ptr=NULL;
ptr=strncpy(&(mca_btl_sm_setup_info.host_name[0]),
orte_system_info.nodename, len);
if( NULL == ptr ) {
return OMPI_ERROR;
}
mca_btl_sm_setup_info.host_name[len]='\0';
/* exchange setup information */
size=sizeof(mca_btl_sm_exchange_t);
rc = mca_base_modex_send(&mca_btl_sm_component.super.btl_version,
&mca_btl_sm_setup_info, size);
return OMPI_SUCCESS;
}