2005-05-24 02:06:50 +04:00
|
|
|
/*
|
2005-11-05 22:57:48 +03:00
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The University of Tennessee and The University
|
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
2005-05-24 02:06:50 +04:00
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
#include "ompi_config.h"
|
|
|
|
#include <errno.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <sys/stat.h> /* for mkfifo */
|
|
|
|
|
2005-08-13 01:42:07 +04:00
|
|
|
#include "ompi/include/constants.h"
|
2005-05-24 02:06:50 +04:00
|
|
|
#include "include/sys/cache.h"
|
2005-07-04 03:09:55 +04:00
|
|
|
#include "opal/event/event.h"
|
2005-07-04 05:36:20 +04:00
|
|
|
#include "opal/util/if.h"
|
2005-07-04 04:13:44 +04:00
|
|
|
#include "opal/util/argv.h"
|
2005-07-04 03:31:27 +04:00
|
|
|
#include "opal/util/output.h"
|
2005-05-24 02:06:50 +04:00
|
|
|
#include "util/sys_info.h"
|
|
|
|
#include "util/proc_info.h"
|
|
|
|
#include "mca/pml/pml.h"
|
|
|
|
#include "mca/base/mca_base_param.h"
|
2005-07-03 04:52:18 +04:00
|
|
|
#include "mca/pml/base/pml_base_module_exchange.h"
|
2005-05-24 02:06:50 +04:00
|
|
|
#include "mca/mpool/base/base.h"
|
|
|
|
#include "mca/common/sm/common_sm_mmap.h"
|
2005-06-30 09:50:55 +04:00
|
|
|
#include "btl_sm.h"
|
|
|
|
#include "btl_sm_frag.h"
|
2005-07-28 20:25:09 +04:00
|
|
|
#include "btl_sm_fifo.h"
|
|
|
|
|
2005-05-24 02:06:50 +04:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Shared Memory (SM) component instance.
|
|
|
|
*/
|
|
|
|
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm_component_t mca_btl_sm_component = {
|
2005-05-24 02:06:50 +04:00
|
|
|
{ /* super is being filled in */
|
|
|
|
/* First, the mca_base_component_t struct containing meta information
|
|
|
|
about the component itself */
|
|
|
|
{
|
|
|
|
/* Indicate that we are a pml v1.0.0 component (which also implies a
|
|
|
|
specific MCA version) */
|
2005-06-30 09:50:55 +04:00
|
|
|
MCA_BTL_BASE_VERSION_1_0_0,
|
2005-05-24 02:06:50 +04:00
|
|
|
"sm", /* MCA component name */
|
Major simplifications to component versioning:
- After long discussions and ruminations on how we run components in
LAM/MPI, made the decision that, by default, all components included
in Open MPI will use the version number of their parent project
(i.e., OMPI or ORTE). They are certaint free to use a different
number, but this simplification makes the common cases easy:
- components are only released when the parent project is released
- it is easy (trivial?) to distinguish which version component goes
with with version of the parent project
- removed all autogen/configure code for templating the version .h
file in components
- made all ORTE components use ORTE_*_VERSION for version numbers
- made all OMPI components use OMPI_*_VERSION for version numbers
- removed all VERSION files from components
- configure now displays OPAL, ORTE, and OMPI version numbers
- ditto for ompi_info
- right now, faking it -- OPAL and ORTE and OMPI will always have the
same version number (i.e., they all come from the same top-level
VERSION file). But this paves the way for the Great Configure
Reorganization, where, among other things, each project will have
its own version number.
So all in all, we went from a boatload of version numbers to
[effectively] three. That's pretty good. :-)
This commit was SVN r6344.
2005-07-05 00:12:36 +04:00
|
|
|
OMPI_MAJOR_VERSION, /* MCA component major version */
|
|
|
|
OMPI_MINOR_VERSION, /* MCA component minor version */
|
|
|
|
OMPI_RELEASE_VERSION, /* MCA component release version */
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm_component_open, /* component open */
|
|
|
|
mca_btl_sm_component_close /* component close */
|
2005-05-24 02:06:50 +04:00
|
|
|
},
|
|
|
|
|
|
|
|
/* Next the MCA v1.0.0 component meta data */
|
|
|
|
{
|
|
|
|
/* Whether the component is checkpointable or not */
|
|
|
|
false
|
|
|
|
},
|
|
|
|
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm_component_init,
|
|
|
|
mca_btl_sm_component_progress,
|
2005-05-24 02:06:50 +04:00
|
|
|
} /* end super */
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* utility routines for parameter registration
|
|
|
|
*/
|
|
|
|
|
2005-06-30 09:50:55 +04:00
|
|
|
static inline char* mca_btl_sm_param_register_string(
|
2005-05-24 02:06:50 +04:00
|
|
|
const char* param_name,
|
|
|
|
const char* default_value)
|
|
|
|
{
|
|
|
|
char *param_value;
|
2005-09-07 17:40:22 +04:00
|
|
|
int id = mca_base_param_register_string("btl","sm",param_name,NULL,default_value);
|
2005-05-24 02:06:50 +04:00
|
|
|
mca_base_param_lookup_string(id, ¶m_value);
|
|
|
|
return param_value;
|
|
|
|
}
|
|
|
|
|
2005-06-30 09:50:55 +04:00
|
|
|
static inline int mca_btl_sm_param_register_int(
|
2005-05-24 02:06:50 +04:00
|
|
|
const char* param_name,
|
|
|
|
int default_value)
|
|
|
|
{
|
2005-09-07 17:40:22 +04:00
|
|
|
int id = mca_base_param_register_int("btl","sm",param_name,NULL,default_value);
|
2005-05-24 02:06:50 +04:00
|
|
|
int param_value = default_value;
|
|
|
|
mca_base_param_lookup_int(id,¶m_value);
|
|
|
|
return param_value;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called by MCA framework to open the component, registers
|
|
|
|
* component parameters.
|
|
|
|
*/
|
|
|
|
|
2005-06-30 09:50:55 +04:00
|
|
|
int mca_btl_sm_component_open(void)
|
2005-05-24 02:06:50 +04:00
|
|
|
{
|
|
|
|
/* register SM component parameters */
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm_component.sm_free_list_num =
|
2005-10-09 22:15:12 +04:00
|
|
|
mca_btl_sm_param_register_int("free_list_num", 8);
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm_component.sm_free_list_max =
|
|
|
|
mca_btl_sm_param_register_int("free_list_max", -1);
|
|
|
|
mca_btl_sm_component.sm_free_list_inc =
|
|
|
|
mca_btl_sm_param_register_int("free_list_inc", 256);
|
|
|
|
mca_btl_sm_component.sm_max_procs =
|
|
|
|
mca_btl_sm_param_register_int("max_procs", -1);
|
|
|
|
mca_btl_sm_component.sm_extra_procs =
|
|
|
|
mca_btl_sm_param_register_int("sm_extra_procs", -1);
|
|
|
|
mca_btl_sm_component.sm_mpool_name =
|
|
|
|
mca_btl_sm_param_register_string("mpool", "sm");
|
|
|
|
mca_btl_sm_component.eager_limit =
|
|
|
|
mca_btl_sm_param_register_int("eager_limit", 1024);
|
|
|
|
mca_btl_sm_component.max_frag_size =
|
|
|
|
mca_btl_sm_param_register_int("max_frag_size", 8*1024);
|
|
|
|
mca_btl_sm_component.size_of_cb_queue =
|
|
|
|
mca_btl_sm_param_register_int("size_of_cb_queue", 128);
|
|
|
|
mca_btl_sm_component.cb_lazy_free_freq =
|
|
|
|
mca_btl_sm_param_register_int("cb_lazy_free_freq", 120);
|
2005-05-24 02:06:50 +04:00
|
|
|
/* make sure that queue size and lazy free frequency are consistent -
|
|
|
|
* want to make sure that slots are freed at a rate they can be
|
|
|
|
* reused, w/o allocating extra new circular buffer fifo arrays */
|
2005-06-30 09:50:55 +04:00
|
|
|
if( (float)(mca_btl_sm_component.cb_lazy_free_freq) >=
|
|
|
|
0.95*(float)(mca_btl_sm_component.size_of_cb_queue) ) {
|
2005-05-24 02:06:50 +04:00
|
|
|
/* upper limit */
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm_component.cb_lazy_free_freq=
|
|
|
|
(int)(0.95*(float)(mca_btl_sm_component.size_of_cb_queue));
|
2005-05-24 02:06:50 +04:00
|
|
|
/* lower limit */
|
2005-06-30 09:50:55 +04:00
|
|
|
if( 0>= mca_btl_sm_component.cb_lazy_free_freq ) {
|
|
|
|
mca_btl_sm_component.cb_lazy_free_freq=1;
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* default number of extra procs to allow for future growth */
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm_component.sm_extra_procs =
|
|
|
|
mca_btl_sm_param_register_int("sm_extra_procs", 2);
|
2005-05-24 02:06:50 +04:00
|
|
|
|
|
|
|
/* initialize objects */
|
2005-07-04 02:45:48 +04:00
|
|
|
OBJ_CONSTRUCT(&mca_btl_sm_component.sm_lock, opal_mutex_t);
|
2005-06-30 09:50:55 +04:00
|
|
|
OBJ_CONSTRUCT(&mca_btl_sm_component.sm_frags1, ompi_free_list_t);
|
|
|
|
OBJ_CONSTRUCT(&mca_btl_sm_component.sm_frags2, ompi_free_list_t);
|
2005-05-24 02:06:50 +04:00
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* component cleanup - sanity checking of queue lengths
|
|
|
|
*/
|
|
|
|
|
2005-06-30 09:50:55 +04:00
|
|
|
int mca_btl_sm_component_close(void)
|
2005-05-24 02:06:50 +04:00
|
|
|
{
|
|
|
|
int return_value=OMPI_SUCCESS;
|
|
|
|
|
2005-06-30 09:50:55 +04:00
|
|
|
OBJ_DESTRUCT(&mca_btl_sm_component.sm_lock);
|
|
|
|
OBJ_DESTRUCT(&mca_btl_sm_component.sm_frags1);
|
|
|
|
OBJ_DESTRUCT(&mca_btl_sm_component.sm_frags2);
|
2005-05-24 02:06:50 +04:00
|
|
|
|
|
|
|
/* unmap the shared memory control structure */
|
2005-06-30 09:50:55 +04:00
|
|
|
if(mca_btl_sm_component.mmap_file != NULL) {
|
|
|
|
return_value=munmap(mca_btl_sm_component.mmap_file->map_addr,
|
|
|
|
mca_btl_sm_component.mmap_file->map_size);
|
2005-05-24 02:06:50 +04:00
|
|
|
if(-1 == return_value) {
|
|
|
|
return_value=OMPI_ERROR;
|
2005-07-04 03:31:27 +04:00
|
|
|
opal_output(0," munmap failed :: file - %s :: errno - %d \n",
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm_component.mmap_file->map_addr,
|
2005-05-24 02:06:50 +04:00
|
|
|
errno);
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* unlink file, so that it will be deleted when all references
|
|
|
|
* to it are gone - no error checking, since we want all procs
|
|
|
|
* to call this, so that in an abnormal termination scanario,
|
|
|
|
* this file will still get cleaned up */
|
2005-06-30 09:50:55 +04:00
|
|
|
unlink(mca_btl_sm_component.mmap_file->map_path);
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
#if OMPI_ENABLE_PROGRESS_THREADS == 1
|
|
|
|
/* close/cleanup fifo create for event notification */
|
2005-06-30 09:50:55 +04:00
|
|
|
if(mca_btl_sm_component.sm_fifo_fd > 0) {
|
2005-05-24 02:06:50 +04:00
|
|
|
/* write a done message down the pipe */
|
|
|
|
unsigned char cmd = DONE;
|
2005-06-30 09:50:55 +04:00
|
|
|
if( write(mca_btl_sm_component.sm_fifo_fd,&cmd,sizeof(cmd)) !=
|
2005-05-24 02:06:50 +04:00
|
|
|
sizeof(cmd)){
|
2005-07-04 03:31:27 +04:00
|
|
|
opal_output(0, "mca_btl_sm_component_close: write fifo failed: errno=%d\n",
|
2005-05-24 02:06:50 +04:00
|
|
|
errno);
|
|
|
|
}
|
2005-07-04 02:45:48 +04:00
|
|
|
opal_thread_join(&mca_btl_sm_component.sm_fifo_thread, NULL);
|
2005-06-30 09:50:55 +04:00
|
|
|
close(mca_btl_sm_component.sm_fifo_fd);
|
|
|
|
unlink(mca_btl_sm_component.sm_fifo_path);
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
CLEANUP:
|
|
|
|
|
|
|
|
/* return */
|
|
|
|
return return_value;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SM component initialization
|
|
|
|
*/
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_base_module_t** mca_btl_sm_component_init(
|
2005-05-24 02:06:50 +04:00
|
|
|
int *num_ptls,
|
|
|
|
bool enable_progress_threads,
|
|
|
|
bool enable_mpi_threads)
|
|
|
|
{
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_base_module_t **ptls = NULL;
|
2005-05-24 02:06:50 +04:00
|
|
|
int i;
|
|
|
|
|
|
|
|
*num_ptls = 0;
|
|
|
|
|
|
|
|
/* lookup/create shared memory pool only when used */
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm_component.sm_mpool = NULL;
|
|
|
|
mca_btl_sm_component.sm_mpool_base = NULL;
|
2005-05-24 02:06:50 +04:00
|
|
|
|
|
|
|
#if OMPI_ENABLE_PROGRESS_THREADS == 1
|
|
|
|
/* create a named pipe to receive events */
|
2005-09-12 00:55:22 +04:00
|
|
|
sprintf( mca_btl_sm_component.sm_fifo_path,
|
|
|
|
"%s/sm_fifo.%lu", orte_process_info.job_session_dir,
|
|
|
|
(unsigned long)orte_process_info.my_name->vpid );
|
2005-06-30 09:50:55 +04:00
|
|
|
if(mkfifo(mca_btl_sm_component.sm_fifo_path, 0660) < 0) {
|
2005-07-04 03:31:27 +04:00
|
|
|
opal_output(0, "mca_btl_sm_component_init: mkfifo failed with errno=%d\n",errno);
|
2005-05-24 02:06:50 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm_component.sm_fifo_fd = open(mca_btl_sm_component.sm_fifo_path, O_RDWR);
|
|
|
|
if(mca_btl_sm_component.sm_fifo_fd < 0) {
|
2005-07-04 03:31:27 +04:00
|
|
|
opal_output(0, "mca_btl_sm_component_init: open(%s) failed with errno=%d\n",
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm_component.sm_fifo_path, errno);
|
2005-05-24 02:06:50 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2005-07-04 02:45:48 +04:00
|
|
|
OBJ_CONSTRUCT(&mca_btl_sm_component.sm_fifo_thread, opal_thread_t);
|
|
|
|
mca_btl_sm_component.sm_fifo_thread.t_run = (opal_thread_fn_t) mca_btl_sm_component_event_thread;
|
|
|
|
opal_thread_start(&mca_btl_sm_component.sm_fifo_thread);
|
2005-05-24 02:06:50 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* allocate the Shared Memory PTL */
|
|
|
|
*num_ptls = 2;
|
2005-06-30 09:50:55 +04:00
|
|
|
ptls = malloc((*num_ptls)*sizeof(mca_btl_base_module_t*));
|
2005-05-24 02:06:50 +04:00
|
|
|
if (NULL == ptls) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get pointer to the ptls */
|
2005-06-30 09:50:55 +04:00
|
|
|
ptls[0] = (mca_btl_base_module_t *)(&(mca_btl_sm[0]));
|
|
|
|
ptls[1] = (mca_btl_base_module_t *)(&(mca_btl_sm[1]));
|
2005-05-24 02:06:50 +04:00
|
|
|
|
|
|
|
/* set scheduling parameters */
|
|
|
|
for( i=0 ; i < 2 ; i++ ) {
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm[i].super.btl_eager_limit=mca_btl_sm_component.eager_limit;
|
|
|
|
mca_btl_sm[i].super.btl_min_send_size=mca_btl_sm_component.max_frag_size;
|
|
|
|
mca_btl_sm[i].super.btl_max_send_size=mca_btl_sm_component.max_frag_size;
|
|
|
|
mca_btl_sm[i].super.btl_min_rdma_size=mca_btl_sm_component.max_frag_size;
|
|
|
|
mca_btl_sm[i].super.btl_max_rdma_size=mca_btl_sm_component.max_frag_size;
|
2005-08-12 20:56:46 +04:00
|
|
|
mca_btl_sm[i].super.btl_exclusivity=MCA_BTL_EXCLUSIVITY_HIGH-1; /* always use this ptl */
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm[i].super.btl_latency=100; /* lowest latency */
|
|
|
|
mca_btl_sm[i].super.btl_bandwidth=900; /* not really used now since exclusivity is set to 100 */
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* initialize some PTL data */
|
|
|
|
/* start with no SM procs */
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm_component.num_smp_procs = 0;
|
|
|
|
mca_btl_sm_component.my_smp_rank = 0xFFFFFFFF; /* not defined */
|
2005-05-24 02:06:50 +04:00
|
|
|
|
|
|
|
/* set flag indicating ptl not inited */
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm[0].btl_inited=false;
|
|
|
|
mca_btl_sm[1].btl_inited=false;
|
2005-05-24 02:06:50 +04:00
|
|
|
|
|
|
|
return ptls;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SM component progress.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if OMPI_ENABLE_PROGRESS_THREADS == 1
|
2005-07-03 20:06:07 +04:00
|
|
|
void mca_btl_sm_component_event_thread(opal_object_t* thread)
|
2005-05-24 02:06:50 +04:00
|
|
|
{
|
|
|
|
while(1) {
|
|
|
|
unsigned char cmd;
|
2005-06-30 09:50:55 +04:00
|
|
|
if(read(mca_btl_sm_component.sm_fifo_fd, &cmd, sizeof(cmd)) != sizeof(cmd)) {
|
2005-05-24 02:06:50 +04:00
|
|
|
/* error condition */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if( DONE == cmd ){
|
|
|
|
/* return when done message received */
|
|
|
|
return;
|
|
|
|
}
|
2005-09-12 00:55:22 +04:00
|
|
|
mca_btl_sm_component_progress();
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2005-06-30 09:50:55 +04:00
|
|
|
int mca_btl_sm_component_progress(void)
|
2005-05-24 02:06:50 +04:00
|
|
|
{
|
|
|
|
/* local variables */
|
|
|
|
unsigned int peer_smp_rank ;
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm_frag_t *frag;
|
2005-05-24 02:06:50 +04:00
|
|
|
ompi_fifo_t *fifo = NULL;
|
2005-06-30 09:50:55 +04:00
|
|
|
int my_smp_rank=mca_btl_sm_component.my_smp_rank;
|
2005-05-24 02:06:50 +04:00
|
|
|
int proc;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
/* send progress is made by the PML */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* receive progress
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* poll each fifo */
|
|
|
|
|
|
|
|
/* loop over fifo's - procs with same base shared memory
|
|
|
|
* virtual address as this process */
|
2005-06-30 09:50:55 +04:00
|
|
|
for( proc=0 ; proc < mca_btl_sm_component.num_smp_procs_same_base_addr
|
2005-05-24 02:06:50 +04:00
|
|
|
; proc++ )
|
|
|
|
{
|
2005-06-30 09:50:55 +04:00
|
|
|
peer_smp_rank= mca_btl_sm_component.list_smp_procs_same_base_addr[proc];
|
|
|
|
fifo=&(mca_btl_sm_component.fifo[peer_smp_rank][my_smp_rank]);
|
2005-05-24 02:06:50 +04:00
|
|
|
|
|
|
|
/* if fifo is not yet setup - continue - not data has been sent*/
|
|
|
|
if(OMPI_CB_FREE == fifo->tail){
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* aquire thread lock */
|
2005-07-04 02:45:48 +04:00
|
|
|
if( opal_using_threads() ) {
|
2005-07-04 01:38:51 +04:00
|
|
|
opal_atomic_lock( &(fifo->tail_lock) );
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* get pointer - pass in offset to change queue pointer
|
2005-11-13 01:32:09 +03:00
|
|
|
* addressing from that of the sender. In this case, we know
|
|
|
|
* that we have the same base address as the sender, so no
|
|
|
|
* translation is necessary when accessing the fifo. Hence,
|
|
|
|
* we use the _same_base_addr varient. */
|
2005-06-30 09:50:55 +04:00
|
|
|
frag = (mca_btl_sm_frag_t *)
|
2005-05-24 02:06:50 +04:00
|
|
|
ompi_fifo_read_from_tail_same_base_addr( fifo );
|
|
|
|
if( OMPI_CB_FREE == frag ) {
|
|
|
|
/* release thread lock */
|
2005-07-04 02:45:48 +04:00
|
|
|
if( opal_using_threads() ) {
|
2005-07-04 01:38:51 +04:00
|
|
|
opal_atomic_unlock(&(fifo->tail_lock));
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* release thread lock */
|
2005-07-04 02:45:48 +04:00
|
|
|
if( opal_using_threads() ) {
|
2005-07-04 01:38:51 +04:00
|
|
|
opal_atomic_unlock(&(fifo->tail_lock));
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* dispatch fragment by type */
|
|
|
|
switch(frag->type) {
|
2005-06-30 09:50:55 +04:00
|
|
|
case MCA_BTL_SM_FRAG_ACK:
|
2005-05-24 02:06:50 +04:00
|
|
|
{
|
|
|
|
/* completion callback */
|
2005-06-18 00:13:24 +04:00
|
|
|
frag->base.des_src = frag->base.des_dst;
|
|
|
|
frag->base.des_src_cnt = frag->base.des_dst_cnt;
|
|
|
|
frag->base.des_dst = NULL;
|
|
|
|
frag->base.des_dst_cnt = 0;
|
2005-06-30 09:50:55 +04:00
|
|
|
frag->base.des_cbfunc(&mca_btl_sm[0].super, frag->endpoint, &frag->base, frag->rc);
|
2005-05-24 02:06:50 +04:00
|
|
|
break;
|
|
|
|
}
|
2005-06-30 09:50:55 +04:00
|
|
|
case MCA_BTL_SM_FRAG_SEND:
|
2005-05-24 02:06:50 +04:00
|
|
|
{
|
|
|
|
/* recv upcall */
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm_recv_reg_t* reg = mca_btl_sm[0].sm_reg + frag->tag;
|
2005-06-18 00:13:24 +04:00
|
|
|
frag->base.des_dst = frag->base.des_src;
|
|
|
|
frag->base.des_dst_cnt = frag->base.des_src_cnt;
|
|
|
|
frag->base.des_src = NULL;
|
|
|
|
frag->base.des_src_cnt = 0;
|
2005-06-30 09:50:55 +04:00
|
|
|
reg->cbfunc(&mca_btl_sm[0].super,frag->tag,&frag->base,reg->cbdata);
|
|
|
|
frag->type = MCA_BTL_SM_FRAG_ACK;
|
2005-09-12 00:55:22 +04:00
|
|
|
MCA_BTL_SM_FIFO_WRITE( mca_btl_sm_component.sm_peers[peer_smp_rank],
|
|
|
|
my_smp_rank, peer_smp_rank, frag, rc );
|
2005-05-24 02:06:50 +04:00
|
|
|
if(OMPI_SUCCESS != rc)
|
|
|
|
return rc;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
{
|
|
|
|
/* unknown */
|
|
|
|
frag->rc = OMPI_ERROR;
|
2005-06-30 09:50:55 +04:00
|
|
|
frag->type = MCA_BTL_SM_FRAG_ACK;
|
2005-09-12 00:55:22 +04:00
|
|
|
MCA_BTL_SM_FIFO_WRITE( mca_btl_sm_component.sm_peers[peer_smp_rank],
|
|
|
|
my_smp_rank, peer_smp_rank, frag, rc );
|
2005-05-24 02:06:50 +04:00
|
|
|
if(OMPI_SUCCESS != rc)
|
|
|
|
return rc;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rc++;
|
|
|
|
} /* end peer_local_smp_rank loop */
|
|
|
|
|
2005-06-20 18:51:08 +04:00
|
|
|
|
2005-05-24 02:06:50 +04:00
|
|
|
/* loop over fifo's - procs with different base shared memory
|
|
|
|
* virtual address as this process */
|
2005-06-30 09:50:55 +04:00
|
|
|
for( proc=0 ; proc < mca_btl_sm_component.num_smp_procs_different_base_addr
|
2005-05-24 02:06:50 +04:00
|
|
|
; proc++ )
|
|
|
|
{
|
2005-06-30 09:50:55 +04:00
|
|
|
peer_smp_rank= mca_btl_sm_component.list_smp_procs_different_base_addr[proc];
|
|
|
|
fifo=&(mca_btl_sm_component.fifo[peer_smp_rank][my_smp_rank]);
|
2005-05-24 02:06:50 +04:00
|
|
|
|
|
|
|
/* if fifo is not yet setup - continue - not data has been sent*/
|
|
|
|
if(OMPI_CB_FREE == fifo->tail){
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* aquire thread lock */
|
2005-07-04 02:45:48 +04:00
|
|
|
if( opal_using_threads() ) {
|
2005-07-04 01:38:51 +04:00
|
|
|
opal_atomic_lock(&(fifo->tail_lock));
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* get pointer - pass in offset to change queue pointer
|
2005-11-13 01:32:09 +03:00
|
|
|
* addressing from that of the sender. In this case, we do
|
|
|
|
* *not* have the same base address as the sender, so we must
|
|
|
|
* translate every access into the fifo to be relevant to our
|
|
|
|
* memory space. Hence, we do *not* use the _same_base_addr
|
|
|
|
* variant. */
|
2005-06-30 09:50:55 +04:00
|
|
|
frag=(mca_btl_sm_frag_t *)ompi_fifo_read_from_tail( fifo,
|
|
|
|
mca_btl_sm_component.sm_offset[peer_smp_rank]);
|
2005-05-24 02:06:50 +04:00
|
|
|
if( OMPI_CB_FREE == frag ) {
|
|
|
|
/* release thread lock */
|
2005-07-04 02:45:48 +04:00
|
|
|
if( opal_using_threads() ) {
|
2005-07-04 01:38:51 +04:00
|
|
|
opal_atomic_unlock(&(fifo->tail_lock));
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* release thread lock */
|
2005-07-04 02:45:48 +04:00
|
|
|
if( opal_using_threads() ) {
|
2005-07-04 01:38:51 +04:00
|
|
|
opal_atomic_unlock(&(fifo->tail_lock));
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* change the address from address relative to the shared
|
|
|
|
* memory address, to a true virtual address */
|
2005-11-13 01:32:09 +03:00
|
|
|
frag = (mca_btl_sm_frag_t *)( (char *)frag +
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm_component.sm_offset[peer_smp_rank]);
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2005-06-20 18:51:08 +04:00
|
|
|
/* dispatch fragment by type */
|
|
|
|
switch(frag->type) {
|
2005-06-30 09:50:55 +04:00
|
|
|
case MCA_BTL_SM_FRAG_ACK:
|
2005-06-20 18:51:08 +04:00
|
|
|
{
|
|
|
|
/* completion callback */
|
|
|
|
frag->base.des_src =
|
2005-11-13 01:32:09 +03:00
|
|
|
( mca_btl_base_segment_t* )((unsigned char*)frag->base.des_dst + mca_btl_sm_component.sm_offset[peer_smp_rank]);
|
2005-06-20 18:51:08 +04:00
|
|
|
frag->base.des_src->seg_addr.pval =
|
2005-11-13 01:32:09 +03:00
|
|
|
((unsigned char*)frag->base.des_src->seg_addr.pval +
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm_component.sm_offset[peer_smp_rank]);
|
2005-06-20 18:51:08 +04:00
|
|
|
frag->base.des_src_cnt = frag->base.des_dst_cnt;
|
|
|
|
frag->base.des_dst = NULL;
|
|
|
|
frag->base.des_dst_cnt = 0;
|
2005-11-13 01:32:09 +03:00
|
|
|
frag->base.des_cbfunc(&mca_btl_sm[1].super, frag->endpoint, &frag->base, frag->rc);
|
2005-06-20 18:51:08 +04:00
|
|
|
break;
|
|
|
|
}
|
2005-06-30 09:50:55 +04:00
|
|
|
case MCA_BTL_SM_FRAG_SEND:
|
2005-06-20 18:51:08 +04:00
|
|
|
{
|
|
|
|
/* recv upcall */
|
2005-11-13 01:32:09 +03:00
|
|
|
mca_btl_sm_recv_reg_t* reg = mca_btl_sm[1].sm_reg + frag->tag;
|
2005-06-30 09:50:55 +04:00
|
|
|
frag->base.des_dst = (mca_btl_base_segment_t*)
|
|
|
|
((unsigned char*)frag->base.des_src + mca_btl_sm_component.sm_offset[peer_smp_rank]);
|
2005-06-20 18:51:08 +04:00
|
|
|
frag->base.des_dst->seg_addr.pval =
|
2005-11-13 01:32:09 +03:00
|
|
|
((unsigned char*)frag->base.des_dst->seg_addr.pval +
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_sm_component.sm_offset[peer_smp_rank]);
|
2005-06-20 18:51:08 +04:00
|
|
|
frag->base.des_dst_cnt = frag->base.des_src_cnt;
|
|
|
|
frag->base.des_src = NULL;
|
|
|
|
frag->base.des_src_cnt = 0;
|
2005-11-13 01:32:09 +03:00
|
|
|
reg->cbfunc(&mca_btl_sm[1].super,frag->tag,&frag->base,reg->cbdata);
|
2005-06-30 09:50:55 +04:00
|
|
|
frag->type = MCA_BTL_SM_FRAG_ACK;
|
2005-09-12 00:55:22 +04:00
|
|
|
MCA_BTL_SM_FIFO_WRITE( mca_btl_sm_component.sm_peers[peer_smp_rank],
|
|
|
|
my_smp_rank, peer_smp_rank, frag, rc );
|
2005-06-20 18:51:08 +04:00
|
|
|
if(OMPI_SUCCESS != rc)
|
|
|
|
return rc;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
{
|
|
|
|
/* unknown */
|
|
|
|
frag->rc = OMPI_ERROR;
|
2005-06-30 09:50:55 +04:00
|
|
|
frag->type = MCA_BTL_SM_FRAG_ACK;
|
2005-09-12 00:55:22 +04:00
|
|
|
MCA_BTL_SM_FIFO_WRITE( mca_btl_sm_component.sm_peers[peer_smp_rank],
|
|
|
|
my_smp_rank, peer_smp_rank, frag, rc );
|
2005-06-20 18:51:08 +04:00
|
|
|
if(OMPI_SUCCESS != rc)
|
|
|
|
return rc;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2005-05-24 02:06:50 +04:00
|
|
|
rc++;
|
|
|
|
} /* end peer_local_smp_rank loop */
|
|
|
|
return rc;
|
|
|
|
}
|