2005-05-24 02:06:50 +04:00
|
|
|
/*
|
2005-11-05 22:57:48 +03:00
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The University of Tennessee and The University
|
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
2005-05-24 02:06:50 +04:00
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "ompi_config.h"
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "opal/sys/cache.h"
|
2005-07-04 03:09:55 +04:00
|
|
|
#include "opal/event/event.h"
|
2005-05-24 02:06:50 +04:00
|
|
|
#include "mpi.h"
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "ompi/mca/pml/pml.h"
|
|
|
|
#include "ompi/mca/btl/btl.h"
|
|
|
|
#include "ompi/mca/btl/base/base.h"
|
|
|
|
#include "opal/mca/base/mca_base_param.h"
|
|
|
|
#include "ompi/mca/pml/base/pml_base_bsend.h"
|
2005-05-24 02:06:50 +04:00
|
|
|
#include "pml_ob1.h"
|
|
|
|
#include "pml_ob1_proc.h"
|
|
|
|
#include "pml_ob1_hdr.h"
|
|
|
|
#include "pml_ob1_sendreq.h"
|
|
|
|
#include "pml_ob1_recvreq.h"
|
2005-06-10 00:16:33 +04:00
|
|
|
#include "pml_ob1_rdmafrag.h"
|
2005-06-07 18:12:47 +04:00
|
|
|
#include "pml_ob1_recvfrag.h"
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "ompi/mca/bml/base/base.h"
|
2005-05-24 02:06:50 +04:00
|
|
|
|
|
|
|
|
|
|
|
mca_pml_base_component_1_0_0_t mca_pml_ob1_component = {
|
|
|
|
|
|
|
|
/* First, the mca_base_component_t struct containing meta
|
|
|
|
information about the component itself */
|
|
|
|
|
|
|
|
{
|
|
|
|
/* Indicate that we are a pml v1.0.0 component (which also implies
|
|
|
|
a specific MCA version) */
|
|
|
|
|
|
|
|
MCA_PML_BASE_VERSION_1_0_0,
|
|
|
|
|
|
|
|
"ob1", /* MCA component name */
|
Major simplifications to component versioning:
- After long discussions and ruminations on how we run components in
LAM/MPI, made the decision that, by default, all components included
in Open MPI will use the version number of their parent project
(i.e., OMPI or ORTE). They are certaint free to use a different
number, but this simplification makes the common cases easy:
- components are only released when the parent project is released
- it is easy (trivial?) to distinguish which version component goes
with with version of the parent project
- removed all autogen/configure code for templating the version .h
file in components
- made all ORTE components use ORTE_*_VERSION for version numbers
- made all OMPI components use OMPI_*_VERSION for version numbers
- removed all VERSION files from components
- configure now displays OPAL, ORTE, and OMPI version numbers
- ditto for ompi_info
- right now, faking it -- OPAL and ORTE and OMPI will always have the
same version number (i.e., they all come from the same top-level
VERSION file). But this paves the way for the Great Configure
Reorganization, where, among other things, each project will have
its own version number.
So all in all, we went from a boatload of version numbers to
[effectively] three. That's pretty good. :-)
This commit was SVN r6344.
2005-07-05 00:12:36 +04:00
|
|
|
OMPI_MAJOR_VERSION, /* MCA component major version */
|
|
|
|
OMPI_MINOR_VERSION, /* MCA component minor version */
|
|
|
|
OMPI_RELEASE_VERSION, /* MCA component release version */
|
2005-05-24 02:06:50 +04:00
|
|
|
mca_pml_ob1_component_open, /* component open */
|
|
|
|
mca_pml_ob1_component_close /* component close */
|
|
|
|
},
|
|
|
|
|
|
|
|
/* Next the MCA v1.0.0 component meta data */
|
|
|
|
|
|
|
|
{
|
|
|
|
/* Whether the component is checkpointable or not */
|
|
|
|
false
|
|
|
|
},
|
|
|
|
|
|
|
|
mca_pml_ob1_component_init, /* component init */
|
|
|
|
mca_pml_ob1_component_fini /* component finalize */
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static inline int mca_pml_ob1_param_register_int(
|
|
|
|
const char* param_name,
|
|
|
|
int default_value)
|
|
|
|
{
|
|
|
|
int id = mca_base_param_register_int("pml","ob1",param_name,NULL,default_value);
|
|
|
|
int param_value = default_value;
|
|
|
|
mca_base_param_lookup_int(id,¶m_value);
|
|
|
|
return param_value;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int mca_pml_ob1_component_open(void)
|
|
|
|
{
|
2005-06-23 23:24:44 +04:00
|
|
|
int param, value;
|
2005-05-24 02:06:50 +04:00
|
|
|
|
|
|
|
mca_pml_ob1.free_list_num =
|
2005-09-21 23:10:56 +04:00
|
|
|
mca_pml_ob1_param_register_int("free_list_num", 4);
|
2005-05-24 02:06:50 +04:00
|
|
|
mca_pml_ob1.free_list_max =
|
|
|
|
mca_pml_ob1_param_register_int("free_list_max", -1);
|
|
|
|
mca_pml_ob1.free_list_inc =
|
2005-09-21 23:10:56 +04:00
|
|
|
mca_pml_ob1_param_register_int("free_list_inc", 64);
|
2005-05-24 02:06:50 +04:00
|
|
|
mca_pml_ob1.priority =
|
2005-08-24 00:21:44 +04:00
|
|
|
mca_pml_ob1_param_register_int("priority", 1);
|
2005-06-07 18:12:47 +04:00
|
|
|
mca_pml_ob1.eager_limit =
|
2005-07-08 00:58:57 +04:00
|
|
|
mca_pml_ob1_param_register_int("eager_limit", 128 * 1024);
|
2005-06-02 01:09:43 +04:00
|
|
|
mca_pml_ob1.send_pipeline_depth =
|
2005-06-02 21:42:53 +04:00
|
|
|
mca_pml_ob1_param_register_int("send_pipeline_depth", 3);
|
2005-06-02 01:09:43 +04:00
|
|
|
mca_pml_ob1.recv_pipeline_depth =
|
2005-07-15 01:28:17 +04:00
|
|
|
mca_pml_ob1_param_register_int("recv_pipeline_depth", 4);
|
2006-03-23 18:57:34 +03:00
|
|
|
|
2005-06-23 23:24:44 +04:00
|
|
|
|
2005-08-27 00:28:42 +04:00
|
|
|
OBJ_CONSTRUCT(&mca_pml_ob1.lock, opal_mutex_t);
|
|
|
|
|
|
|
|
/* requests */
|
|
|
|
OBJ_CONSTRUCT(&mca_pml_ob1.send_requests, ompi_free_list_t);
|
|
|
|
ompi_free_list_init(
|
|
|
|
&mca_pml_ob1.send_requests,
|
|
|
|
sizeof(mca_pml_ob1_send_request_t),
|
|
|
|
OBJ_CLASS(mca_pml_ob1_send_request_t),
|
|
|
|
mca_pml_ob1.free_list_num,
|
|
|
|
mca_pml_ob1.free_list_max,
|
|
|
|
mca_pml_ob1.free_list_inc,
|
|
|
|
NULL);
|
|
|
|
|
|
|
|
OBJ_CONSTRUCT(&mca_pml_ob1.recv_requests, ompi_free_list_t);
|
|
|
|
ompi_free_list_init(
|
|
|
|
&mca_pml_ob1.recv_requests,
|
|
|
|
sizeof(mca_pml_ob1_recv_request_t),
|
|
|
|
OBJ_CLASS(mca_pml_ob1_recv_request_t),
|
|
|
|
mca_pml_ob1.free_list_num,
|
|
|
|
mca_pml_ob1.free_list_max,
|
|
|
|
mca_pml_ob1.free_list_inc,
|
|
|
|
NULL);
|
|
|
|
|
|
|
|
/* fragments */
|
|
|
|
OBJ_CONSTRUCT(&mca_pml_ob1.rdma_frags, ompi_free_list_t);
|
|
|
|
ompi_free_list_init(
|
|
|
|
&mca_pml_ob1.rdma_frags,
|
|
|
|
sizeof(mca_pml_ob1_rdma_frag_t),
|
|
|
|
OBJ_CLASS(mca_pml_ob1_rdma_frag_t),
|
|
|
|
mca_pml_ob1.free_list_num,
|
|
|
|
mca_pml_ob1.free_list_max,
|
|
|
|
mca_pml_ob1.free_list_inc,
|
|
|
|
NULL);
|
|
|
|
|
|
|
|
OBJ_CONSTRUCT(&mca_pml_ob1.recv_frags, ompi_free_list_t);
|
|
|
|
ompi_free_list_init(
|
|
|
|
&mca_pml_ob1.recv_frags,
|
|
|
|
sizeof(mca_pml_ob1_recv_frag_t),
|
|
|
|
OBJ_CLASS(mca_pml_ob1_recv_frag_t),
|
|
|
|
mca_pml_ob1.free_list_num,
|
|
|
|
mca_pml_ob1.free_list_max,
|
|
|
|
mca_pml_ob1.free_list_inc,
|
|
|
|
NULL);
|
|
|
|
|
|
|
|
OBJ_CONSTRUCT(&mca_pml_ob1.buffers, ompi_free_list_t);
|
|
|
|
|
|
|
|
/* pending operations */
|
|
|
|
OBJ_CONSTRUCT(&mca_pml_ob1.send_pending, opal_list_t);
|
|
|
|
OBJ_CONSTRUCT(&mca_pml_ob1.recv_pending, opal_list_t);
|
|
|
|
OBJ_CONSTRUCT(&mca_pml_ob1.acks_pending, opal_list_t);
|
2005-09-14 08:41:33 +04:00
|
|
|
OBJ_CONSTRUCT(&mca_pml_ob1.rdma_pending, opal_list_t);
|
2005-08-12 06:41:14 +04:00
|
|
|
|
2005-06-23 23:24:44 +04:00
|
|
|
mca_base_param_register_int("mpi", NULL, "leave_pinned", "leave_pinned", 0);
|
|
|
|
param = mca_base_param_find("mpi", NULL, "leave_pinned");
|
|
|
|
mca_base_param_lookup_int(param, &value);
|
|
|
|
mca_pml_ob1.leave_pinned = value;
|
2006-03-23 18:57:34 +03:00
|
|
|
|
|
|
|
mca_base_param_register_int("mpi", NULL, "leave_pinned_pipeline", "leave_pinned_pipeline", 0);
|
|
|
|
param = mca_base_param_find("mpi", NULL, "leave_pinned_pipeline");
|
|
|
|
mca_base_param_lookup_int(param, &value);
|
|
|
|
mca_pml_ob1.leave_pinned_pipeline = value;
|
|
|
|
|
|
|
|
if(mca_pml_ob1.leave_pinned_pipeline && mca_pml_ob1.leave_pinned) {
|
|
|
|
mca_pml_ob1.leave_pinned_pipeline = 0;
|
|
|
|
opal_output(0, "WARNING: Cannot set both mpi_leave_pinned and mpi_leave_pinned_pipeline, defaulting to mpi_leave_pinned ONLY\n");
|
|
|
|
}
|
2005-08-12 06:41:14 +04:00
|
|
|
mca_pml_ob1.enabled = false;
|
|
|
|
return mca_bml_base_open();
|
|
|
|
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int mca_pml_ob1_component_close(void)
|
|
|
|
{
|
|
|
|
int rc;
|
2005-07-12 09:40:56 +04:00
|
|
|
|
2005-08-12 06:41:14 +04:00
|
|
|
if(!mca_pml_ob1.enabled)
|
|
|
|
return OMPI_SUCCESS; /* never selected.. return success.. */
|
2005-07-12 09:40:56 +04:00
|
|
|
|
2005-08-12 06:41:14 +04:00
|
|
|
if(OMPI_SUCCESS != (rc = mca_bml_base_close()))
|
2005-05-24 02:06:50 +04:00
|
|
|
return rc;
|
|
|
|
|
2005-07-12 09:40:56 +04:00
|
|
|
OBJ_DESTRUCT(&mca_pml_ob1.acks_pending);
|
|
|
|
OBJ_DESTRUCT(&mca_pml_ob1.send_pending);
|
|
|
|
OBJ_DESTRUCT(&mca_pml_ob1.recv_pending);
|
|
|
|
OBJ_DESTRUCT(&mca_pml_ob1.send_requests);
|
|
|
|
OBJ_DESTRUCT(&mca_pml_ob1.recv_requests);
|
|
|
|
OBJ_DESTRUCT(&mca_pml_ob1.rdma_frags);
|
|
|
|
OBJ_DESTRUCT(&mca_pml_ob1.recv_frags);
|
|
|
|
OBJ_DESTRUCT(&mca_pml_ob1.buffers);
|
|
|
|
OBJ_DESTRUCT(&mca_pml_ob1.lock);
|
|
|
|
|
2005-07-21 01:13:55 +04:00
|
|
|
#if 0
|
2005-05-24 02:06:50 +04:00
|
|
|
if (mca_pml_ob1.send_requests.fl_num_allocated !=
|
2005-07-03 20:22:16 +04:00
|
|
|
mca_pml_ob1.send_requests.super.opal_list_length) {
|
2005-07-04 03:31:27 +04:00
|
|
|
opal_output(0, "ob1 send requests: %d allocated %d returned\n",
|
2005-05-24 02:06:50 +04:00
|
|
|
mca_pml_ob1.send_requests.fl_num_allocated,
|
2005-07-03 20:22:16 +04:00
|
|
|
mca_pml_ob1.send_requests.super.opal_list_length);
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
if (mca_pml_ob1.recv_requests.fl_num_allocated !=
|
2005-07-03 20:22:16 +04:00
|
|
|
mca_pml_ob1.recv_requests.super.opal_list_length) {
|
2005-07-04 03:31:27 +04:00
|
|
|
opal_output(0, "ob1 recv requests: %d allocated %d returned\n",
|
2005-05-24 02:06:50 +04:00
|
|
|
mca_pml_ob1.recv_requests.fl_num_allocated,
|
2005-07-03 20:22:16 +04:00
|
|
|
mca_pml_ob1.recv_requests.super.opal_list_length);
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
mca_pml_base_module_t* mca_pml_ob1_component_init(int* priority,
|
|
|
|
bool enable_progress_threads,
|
|
|
|
bool enable_mpi_threads)
|
|
|
|
{
|
|
|
|
*priority = mca_pml_ob1.priority;
|
|
|
|
|
|
|
|
/* buffered send */
|
|
|
|
if(OMPI_SUCCESS != mca_pml_base_bsend_init(enable_mpi_threads)) {
|
2005-07-04 03:31:27 +04:00
|
|
|
opal_output(0, "mca_pml_ob1_component_init: mca_pml_bsend_init failed\n");
|
2005-05-24 02:06:50 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2005-08-12 06:41:14 +04:00
|
|
|
|
|
|
|
if(OMPI_SUCCESS != mca_bml_base_init( enable_progress_threads, enable_mpi_threads))
|
2006-02-13 19:28:25 +03:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* As our own progress function does nothing except calling the BML
|
|
|
|
* progress, let's modify the progress function pointer in our structure
|
|
|
|
* to avoid useless functions calls. The event library will instead call
|
|
|
|
* directly the BML function.
|
|
|
|
*/
|
2006-02-14 12:48:24 +03:00
|
|
|
mca_pml_ob1.super.pml_progress = mca_bml.bml_progress;
|
2006-02-13 19:28:25 +03:00
|
|
|
|
2005-05-24 02:06:50 +04:00
|
|
|
return &mca_pml_ob1.super;
|
|
|
|
}
|
|
|
|
|