1
1

Change the orientation of the thread level determination in

ompi_mpi_init(): we no longer *query* the components to see what they
can support -- instead, we *tell* the components what they need to
support (if they can't support it, they should disqualify themselves
from selection).

This involved a bunch of things:
- pass the configure-time-decided OMPI_ENABLE_PROGRESS_THREADS and
  OMPI_ENABLE_MPI_THREADS constants to each framework selection
  function
- eliminate mca_base_init_select_components()
- eliminate all "allow_multi_user_threads" and "have_hidden_threads"
  kinds of arguments and aggregation from various framework selection
  and component/module functions
- correctly determine the max MPI thread level that we can support and
  limit the final MPI thread level as appropriate during
  ompi_mpi_init()

As a side effect, while editing nearly every MPI component I also did
the following:
- found 2 places (gm and ib ptls) where ompi_set_using_threads() was
  erroneously being called (I think this is left over kruft from
  misunderstandings by the original authors).  I removed these.

This commit was SVN r5055.
Этот коммит содержится в:
Jeff Squyres 2005-03-27 13:05:23 +00:00
родитель bd1ae3e657
Коммит e9ae621323
59 изменённых файлов: 314 добавлений и 446 удалений

Просмотреть файл

@ -99,8 +99,9 @@ typedef void* (*mca_allocator_base_component_segment_free_fn_t)(void* segment);
/**
* The function used to initialize the component.
*/
typedef struct mca_allocator_base_module_t* (*mca_allocator_base_component_init_fn_t)(
bool *allow_multi_user_threads,
typedef struct mca_allocator_base_module_t*
(*mca_allocator_base_component_init_fn_t)(
bool enable_mpi_threads,
mca_allocator_base_component_segment_alloc_fn_t segment_alloc,
mca_allocator_base_component_segment_free_fn_t segment_free
);

Просмотреть файл

@ -74,16 +74,16 @@ int mca_allocator_basic_component_close(void)
*/
mca_allocator_base_module_t* mca_allocator_basic_component_init(
bool *allow_multi_user_threads,
bool enable_mpi_threads,
mca_allocator_base_component_segment_alloc_fn_t segment_alloc,
mca_allocator_base_component_segment_free_fn_t segment_free)
{
mca_allocator_basic_module_t *module = (mca_allocator_basic_module_t *)
malloc(sizeof(mca_allocator_basic_module_t));
if(module == NULL)
if (NULL == module) {
return NULL;
}
*allow_multi_user_threads &= true;
module->super.alc_alloc = mca_allocator_basic_alloc;
module->super.alc_realloc = mca_allocator_basic_realloc;
module->super.alc_free = mca_allocator_basic_free;

Просмотреть файл

@ -71,7 +71,7 @@ int mca_allocator_basic_component_close(void);
* The function used to initialize the component.
*/
mca_allocator_base_module_t* mca_allocator_basic_component_init(
bool *allow_multi_user_threads,
bool enable_mpi_threads,
mca_allocator_base_component_segment_alloc_fn_t segment_alloc,
mca_allocator_base_component_segment_free_fn_t segment_free
);

Просмотреть файл

@ -21,7 +21,7 @@
#include "mca/allocator/bucket/allocator_bucket_alloc.h"
struct mca_allocator_base_module_t* mca_allocator_bucket_module_init(
bool *allow_multi_user_threads,
bool enable_mpi_threads,
mca_allocator_base_component_segment_alloc_fn_t segment_alloc,
mca_allocator_base_component_segment_free_fn_t segment_free);
@ -43,7 +43,7 @@ int mca_allocator_bucket_finalize(struct mca_allocator_base_module_t* allocator)
}
struct mca_allocator_base_module_t* mca_allocator_bucket_module_init(
bool *allow_multi_user_threads,
bool enable_mpi_threads,
mca_allocator_base_component_segment_alloc_fn_t segment_alloc,
mca_allocator_base_component_segment_free_fn_t segment_free)
{

Просмотреть файл

@ -53,7 +53,6 @@ libmca_base_la_SOURCES = \
mca_base_component_repository.c \
mca_base_components_open.c \
mca_base_components_close.c \
mca_base_init_select_components.c \
mca_base_list.c \
mca_base_module_exchange.c \
mca_base_msgbuf.c \

Просмотреть файл

@ -52,8 +52,6 @@ struct mca_base_component_priority_list_item_t {
mca_base_component_list_item_t super;
int cpli_priority;
bool cpli_allow_multi_user_threads;
bool cpli_have_hidden_threads;
};
typedef struct mca_base_component_priority_list_item_t
mca_base_component_priority_list_item_t;
@ -146,11 +144,6 @@ OMPI_DECLSPEC int mca_base_components_open(const char *type_name, int output_id
OMPI_DECLSPEC int mca_base_components_close(int output_id, ompi_list_t *components_available,
const mca_base_component_t *skip);
/* mca_base_init_select_components.c */
OMPI_DECLSPEC int mca_base_init_select_components(int requested,
int *provided);
#if 0
/* JMS Not implemented yet */
int mca_base_init_callback(mca_base_init_cb_t func);

Просмотреть файл

@ -1,109 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "include/constants.h"
#include "class/ompi_list.h"
#include "mca/base/base.h"
#include "mca/coll/coll.h"
#include "mca/coll/base/base.h"
#include "mca/ptl/ptl.h"
#include "mca/ptl/base/base.h"
#include "mca/pml/pml.h"
#include "mca/pml/base/base.h"
#include "mca/mpool/base/base.h"
#include "mca/io/base/base.h"
/*
* Look at available pml, ptl, and coll modules and find a set that
* works nicely. Also set the final MPI thread level. There are many
* factors involved here, and this first implementation is rather
* simplistic.
*
* The contents of this function will likely be replaced
*/
int mca_base_init_select_components(int requested, int *provided)
{
bool user_threads, hidden_threads;
bool user_cumulative, hidden_cumulative;
/* Set initial values */
user_cumulative = true;
hidden_cumulative = false;
/* Make final lists of available modules (i.e., call the query/init
functions and see if they return happiness). For pml, there will
only be one (because there's only one for the whole process), but
for ptl and coll, we'll get lists back. */
if (OMPI_SUCCESS != mca_mpool_base_init(&user_threads)) {
return OMPI_ERROR;
}
user_cumulative &= user_threads;
/* JMS: At some point, we'll need to feed it the thread level to
ensure to pick one high enough (e.g., if we need CR) */
user_threads = true;
hidden_threads = false;
if (OMPI_SUCCESS != mca_pml_base_select(&mca_pml,
&user_threads, &hidden_threads)) {
return OMPI_ERROR;
}
user_cumulative &= user_threads;
hidden_cumulative |= hidden_threads;
if (OMPI_SUCCESS != mca_ptl_base_select(&user_threads, &hidden_threads)) {
return OMPI_ERROR;
}
user_cumulative &= user_threads;
hidden_cumulative |= hidden_threads;
if (OMPI_SUCCESS != mca_coll_base_find_available(&user_threads,
&hidden_threads)) {
return OMPI_ERROR;
}
user_cumulative &= user_threads;
hidden_cumulative |= hidden_threads;
/* io and topo components are selected later, because the io framework is
opened lazily (at the first MPI_File_* function invocation). */
/* Now that we have a final list of all available modules, do the
selection. pml is already selected. */
/* JMS ...Do more here with the thread level, etc.... */
*provided = requested;
if (hidden_cumulative) {
ompi_set_using_threads(true);
}
/* Tell the selected pml module about all the selected ptl
modules */
mca_pml.pml_add_ptls(&mca_ptl_base_modules_initialized);
/* All done */
return OMPI_SUCCESS;
}

Просмотреть файл

@ -59,6 +59,4 @@ static void cpl_constructor(ompi_object_t *obj)
mca_base_component_priority_list_item_t *cpli =
(mca_base_component_priority_list_item_t *) obj;
cpli->cpli_priority = -1;
cpli->cpli_allow_multi_user_threads = false;
cpli->cpli_have_hidden_threads = false;
}

Просмотреть файл

@ -90,8 +90,8 @@ OMPI_DECLSPEC int mca_coll_base_open(void);
* functions -- it is not considered a public interface member --
* and is only mentioned here for completeness.
*/
OMPI_DECLSPEC int mca_coll_base_find_available(bool *allow_multi_user_threads,
bool *have_hidden_threads);
OMPI_DECLSPEC int mca_coll_base_find_available(bool enable_progress_threads,
bool enable_mpi_threads);
/**
* Select an available component for a new communicator.

Просмотреть файл

@ -43,9 +43,13 @@ const mca_coll_base_component_1_0_0_t *mca_coll_base_basic_component = NULL;
* Private functions
*/
static int init_query(const mca_base_component_t *ls,
mca_base_component_priority_list_item_t *entry);
mca_base_component_priority_list_item_t *entry,
bool enable_progress_threads,
bool enable_mpi_threads);
static int init_query_1_0_0(const mca_base_component_t *ls,
mca_base_component_priority_list_item_t *entry);
mca_base_component_priority_list_item_t *entry,
bool enable_progress_threads,
bool enable_mpi_threads);
/*
* Scan down the list of successfully opened components and query each of
@ -60,8 +64,8 @@ static int init_query_1_0_0(const mca_base_component_t *ls,
* it in a global variable so that we can find it easily later (e.g.,
* during scope selection).
*/
int mca_coll_base_find_available(bool *allow_multi_user_threads,
bool *have_hidden_threads)
int mca_coll_base_find_available(bool enable_progress_threads,
bool enable_mpi_threads)
{
bool found = false;
mca_base_component_priority_list_item_t *entry;
@ -88,7 +92,9 @@ int mca_coll_base_find_available(bool *allow_multi_user_threads,
entry = OBJ_NEW(mca_base_component_priority_list_item_t);
entry->super.cli_component = component;
entry->cpli_priority = 0;
if (OMPI_SUCCESS == init_query(component, entry)) {
if (OMPI_SUCCESS == init_query(component, entry,
enable_progress_threads,
enable_mpi_threads)) {
/* Is this the basic component? If so, save it, because it's
special. Keep it off the available list -- we'll use it
@ -160,7 +166,9 @@ int mca_coll_base_find_available(bool *allow_multi_user_threads,
* some information. If it doesn't, close it.
*/
static int init_query(const mca_base_component_t *m,
mca_base_component_priority_list_item_t *entry)
mca_base_component_priority_list_item_t *entry,
bool enable_progress_threads,
bool enable_mpi_threads)
{
int ret;
@ -174,7 +182,8 @@ static int init_query(const mca_base_component_t *m,
if (1 == m->mca_type_major_version &&
0 == m->mca_type_minor_version &&
0 == m->mca_type_release_version) {
ret = init_query_1_0_0(m, entry);
ret = init_query_1_0_0(m, entry, enable_progress_threads,
enable_mpi_threads);
} else {
/* Unrecognized coll API version */
@ -210,13 +219,14 @@ static int init_query(const mca_base_component_t *m,
/*
* Query a specific component, coll v1.0.0
*/
static int
init_query_1_0_0(const mca_base_component_t *component,
mca_base_component_priority_list_item_t *entry)
static int init_query_1_0_0(const mca_base_component_t *component,
mca_base_component_priority_list_item_t *entry,
bool enable_progress_threads,
bool enable_mpi_threads)
{
mca_coll_base_component_1_0_0_t *coll =
(mca_coll_base_component_1_0_0_t *) component;
return coll->collm_init_query(&(entry->cpli_allow_multi_user_threads),
&(entry->cpli_have_hidden_threads));
return coll->collm_init_query(enable_progress_threads,
enable_mpi_threads);
}

Просмотреть файл

@ -44,8 +44,8 @@ OMPI_COMP_EXPORT extern int mca_coll_basic_priority_param;
/* API functions */
int mca_coll_basic_init_query(bool *allow_multi_user_threads,
bool *have_hidden_threads);
int mca_coll_basic_init_query(bool enable_progress_threads,
bool enable_mpi_threads);
const struct mca_coll_base_module_1_0_0_t *
mca_coll_basic_comm_query(struct ompi_communicator_t *comm, int *priority,
struct mca_coll_base_comm_t **data);

Просмотреть файл

@ -126,15 +126,12 @@ static const mca_coll_base_module_1_0_0_t inter_linear = {
* Initial query function that is invoked during MPI_INIT, allowing
* this module to indicate what level of thread support it provides.
*/
int mca_coll_basic_init_query(bool *allow_multi_user_threads,
bool *have_hidden_user_threads)
int mca_coll_basic_init_query(bool enable_progress_threads,
bool enable_mpi_threads)
{
*allow_multi_user_threads = true;
*have_hidden_user_threads = false;
/* All done */
/* Nothing to do */
return OMPI_SUCCESS;
return OMPI_SUCCESS;
}

Просмотреть файл

@ -36,7 +36,7 @@ struct mca_coll_base_comm_t;
*/
typedef int (*mca_coll_base_component_init_query_fn_t)
(bool *allow_multi_user_threads, bool *have_hidden_threads);
(bool enable_progress_threads, bool enable_mpi_threads);
typedef const struct mca_coll_base_module_1_0_0_t *
(*mca_coll_base_component_comm_query_1_0_0_fn_t)
(struct ompi_communicator_t *comm, int *priority,

Просмотреть файл

@ -39,8 +39,8 @@ extern "C" {
/* Component functions */
int mca_coll_demo_init_query(bool *allow_demo_user_threads,
bool *have_hidden_threads);
int mca_coll_demo_init_query(bool enable_progress_threads,
bool enable_mpi_threads);
const struct mca_coll_base_module_1_0_0_t *
mca_coll_demo_comm_query(struct ompi_communicator_t *comm, int *priority,
struct mca_coll_base_comm_t **data);

Просмотреть файл

@ -92,13 +92,10 @@ static const mca_coll_base_module_1_0_0_t inter = {
* Initial query function that is invoked during MPI_INIT, allowing
* this module to indicate what level of thread support it provides.
*/
int mca_coll_demo_init_query(bool *allow_demo_user_threads,
bool *have_hidden_user_threads)
int mca_coll_demo_init_query(bool enable_progress_threads,
bool enable_mpi_threads)
{
*allow_demo_user_threads = true;
*have_hidden_user_threads = false;
/* All done */
/* Nothing to do */
return OMPI_SUCCESS;
}

Просмотреть файл

@ -44,8 +44,8 @@ OMPI_COMP_EXPORT extern int mca_coll_self_priority_param;
/* API functions */
int mca_coll_self_init_query(bool *allow_multi_user_threads,
bool *have_hidden_threads);
int mca_coll_self_init_query(bool enable_progress_threads,
bool enable_mpi_threads);
const struct mca_coll_base_module_1_0_0_t *
mca_coll_self_comm_query(struct ompi_communicator_t *comm, int *priority,
struct mca_coll_base_comm_t **data);

Просмотреть файл

@ -62,13 +62,10 @@ static const mca_coll_base_module_1_0_0_t module = {
* Initial query function that is invoked during MPI_INIT, allowing
* this module to indicate what level of thread support it provides.
*/
int mca_coll_self_init_query(bool *allow_multi_user_threads,
bool *have_hidden_user_threads)
int mca_coll_self_init_query(bool enable_progress_threads,
bool enable_mpi_threads)
{
*allow_multi_user_threads = true;
*have_hidden_user_threads = false;
/* All done */
/* Nothing to do */
return OMPI_SUCCESS;
}

Просмотреть файл

@ -57,15 +57,15 @@ extern "C" {
* coll module functions
*/
int mca_coll_sm_init_query(bool *allow_multi_user_threads,
bool *have_hidden_threads);
int mca_coll_sm_init_query(bool enable_progress_threads,
bool enable_mpi_threads);
const struct mca_coll_base_module_1_0_0_t *
mca_coll_sm_comm_query(struct ompi_communicator_t *comm, int *priority,
mca_coll_base_module_comm_t **data);
struct mca_coll_base_comm_t **data);
int mca_coll_sm_comm_unquery(struct ompi_communicator_t *comm,
mca_coll_base_module_comm_t *data);
struct mca_coll_base_comm_t *data);
const struct mca_coll_base_module_1_0_0_t *
mca_coll_sm_module_init(struct ompi_communicator_t *comm);

Просмотреть файл

@ -62,13 +62,10 @@ int mca_coll_sm_param_priority = -1;
* Initial query function that is invoked during MPI_INIT, allowing
* this module to indicate what level of thread support it provides.
*/
int mca_coll_sm_init_query(bool *allow_multi_user_threads,
bool *have_hidden_user_threads)
int mca_coll_sm_init_query(bool enable_progress_threads,
bool enable_mpi_threads)
{
*allow_multi_user_threads = true;
*have_hidden_user_threads = false;
/* All done */
/* Nothing to do */
return OMPI_SUCCESS;
}
@ -81,7 +78,7 @@ int mca_coll_sm_init_query(bool *allow_multi_user_threads,
*/
const mca_coll_base_module_1_0_0_t *
mca_coll_sm_comm_query(struct ompi_communicator_t *comm, int *priority,
mca_coll_base_module_comm_t **data)
struct mca_coll_base_comm_t **data)
{
/* If we're intercomm, or if there's only one process in the
communicator, we don't want to run */
@ -117,7 +114,7 @@ mca_coll_sm_comm_query(struct ompi_communicator_t *comm, int *priority,
* Unquery the coll on comm
*/
int mca_coll_sm_comm_unquery(struct ompi_communicator_t *comm,
mca_coll_base_module_comm_t *data)
struct mca_coll_base_comm_t *data)
{
/* JMS */
/* Remove mpool query, if we got one */

Просмотреть файл

@ -85,8 +85,8 @@ extern "C" {
* functions -- it is not considered a public interface member --
* and is only mentioned here for completeness.
*/
OMPI_DECLSPEC int mca_io_base_find_available(bool *allow_multi_user_threads,
bool *have_hidden_threads);
OMPI_DECLSPEC int mca_io_base_find_available(bool enable_progress_threads,
bool enable_mpi_threads);
/**
* Select an available component for a new file handle.

Просмотреть файл

@ -35,9 +35,13 @@
* Private functions
*/
static int init_query(const mca_base_component_t *ls,
mca_base_component_priority_list_item_t *entry);
mca_base_component_priority_list_item_t *entry,
bool enable_progress_threads,
bool enable_mpi_threads);
static int init_query_1_0_0(const mca_base_component_t *ls,
mca_base_component_priority_list_item_t *entry);
mca_base_component_priority_list_item_t *entry,
bool enable_progress_threads,
bool enable_mpi_threads);
/*
* Scan down the list of successfully opened components and query each of
@ -52,8 +56,8 @@ static int init_query_1_0_0(const mca_base_component_t *ls,
* Appropriate run-time MPI exceptions will be invoked during
* MPI_FILE_OPEN and MPI_FILE_DELETE.
*/
int mca_io_base_find_available(bool *allow_multi_user_threads,
bool *have_hidden_threads)
int mca_io_base_find_available(bool enable_progress_threads,
bool enable_mpi_threads)
{
int err;
mca_base_component_priority_list_item_t *entry;
@ -79,7 +83,9 @@ int mca_io_base_find_available(bool *allow_multi_user_threads,
entry = OBJ_NEW(mca_base_component_priority_list_item_t);
entry->super.cli_component = component;
entry->cpli_priority = 0;
if (OMPI_SUCCESS == init_query(component, entry)) {
if (OMPI_SUCCESS == init_query(component, entry,
enable_progress_threads,
enable_mpi_threads)) {
/* Save the results in the list. The priority isn't
relevant, because selection is decided at
@ -127,7 +133,9 @@ int mca_io_base_find_available(bool *allow_multi_user_threads,
* some information. If it doesn't, close it.
*/
static int init_query(const mca_base_component_t *m,
mca_base_component_priority_list_item_t *entry)
mca_base_component_priority_list_item_t *entry,
bool enable_progress_threads,
bool enable_mpi_threads)
{
int ret;
@ -141,7 +149,8 @@ static int init_query(const mca_base_component_t *m,
if (1 == m->mca_type_major_version &&
0 == m->mca_type_minor_version &&
0 == m->mca_type_release_version) {
ret = init_query_1_0_0(m, entry);
ret = init_query_1_0_0(m, entry, enable_progress_threads,
enable_mpi_threads);
} else {
/* Unrecognized io API version */
@ -178,13 +187,14 @@ static int init_query(const mca_base_component_t *m,
/*
* Query a specific component, io v1.0.0
*/
static int
init_query_1_0_0(const mca_base_component_t *component,
mca_base_component_priority_list_item_t *entry)
static int init_query_1_0_0(const mca_base_component_t *component,
mca_base_component_priority_list_item_t *entry,
bool enable_progress_threads,
bool enable_mpi_threads)
{
mca_io_base_component_1_0_0_t *io =
(mca_io_base_component_1_0_0_t *) component;
return io->io_init_query(&(entry->cpli_allow_multi_user_threads),
&(entry->cpli_have_hidden_threads));
return io->io_init_query(enable_progress_threads,
enable_mpi_threads);
}

Просмотреть файл

@ -79,7 +79,7 @@ typedef enum mca_io_base_version_t mca_io_base_version_t;
struct mca_io_base_module_1_0_0_t;
typedef int (*mca_io_base_component_init_query_fn_t)
(bool *allow_multi_user_threads, bool *have_hidden_threads);
(bool enable_progress_threads, bool enable_mpi_threads);
typedef const struct mca_io_base_module_1_0_0_t *
(*mca_io_base_component_file_query_1_0_0_fn_t)
(struct ompi_file_t *file, struct mca_io_base_file_t **private_data,

Просмотреть файл

@ -29,8 +29,8 @@
*/
static int open_component(void);
static int close_component(void);
static int init_query(bool *enable_multi_user_threads,
bool *have_hidden_threads);
static int init_query(bool enable_progress_threads,
bool enable_mpi_threads);
static const struct mca_io_base_module_1_0_0_t *
file_query(struct ompi_file_t *file,
struct mca_io_base_file_t **private_data,
@ -147,20 +147,14 @@ static int close_component(void)
}
static int init_query(bool *allow_multi_user_threads,
bool *have_hidden_threads)
static int init_query(bool enable_progress_threads,
bool enable_mpi_threads)
{
/* Note that we say "true" for multi user threads here because we
/* Note that it's ok if mpi_enable_threads==true here because we
self-enforce only allowing one user thread into ROMIO at a time
-- this fact will be clearly documented for users (ROMIO itself
is not thread safe). */
*allow_multi_user_threads = true;
*have_hidden_threads = ompi_using_threads();
/* Don't launch a progress thread here -- we'll launch one the
first time a ROMIO module is initialized */
return OMPI_SUCCESS;
}

Просмотреть файл

@ -43,7 +43,8 @@ OBJ_CLASS_DECLARATION(mca_mpool_base_selected_module_t);
*/
OMPI_DECLSPEC int mca_mpool_base_open(void);
OMPI_DECLSPEC int mca_mpool_base_init(bool *allow_multi_user_threads);
OMPI_DECLSPEC int mca_mpool_base_init(bool enable_progress_threads,
bool enable_mpi_threads);
OMPI_DECLSPEC int mca_mpool_base_close(void);
OMPI_DECLSPEC mca_mpool_base_component_t* mca_mpool_base_component_lookup(const char* name);
OMPI_DECLSPEC mca_mpool_base_module_t* mca_mpool_base_module_lookup(const char* name);

Просмотреть файл

@ -35,19 +35,14 @@ OBJ_CLASS_INSTANCE(mca_mpool_base_selected_module_t, ompi_list_item_t, NULL, NUL
* will be closed and unloaded. The selected modules will be returned
* to the caller in a ompi_list_t.
*/
int mca_mpool_base_init(bool *allow_multi_user_threads)
int mca_mpool_base_init(bool enable_progress_threads, bool enable_mpi_threads)
{
bool user_threads;
ompi_list_item_t *item;
mca_base_component_list_item_t *cli;
mca_mpool_base_component_t *component;
mca_mpool_base_module_t *module;
mca_mpool_base_selected_module_t *sm;
/* Default to true in case there's no modules selected */
*allow_multi_user_threads = true;
/* Traverse the list of available modules; call their init
functions. */
@ -65,7 +60,8 @@ int mca_mpool_base_init(bool *allow_multi_user_threads)
ompi_output_verbose(10, mca_mpool_base_output,
"select: no init function; ignoring module");
} else {
module = component->mpool_init(&user_threads);
module = component->mpool_init(enable_progress_threads,
enable_mpi_threads);
/* If the module didn't initialize, unload it */
@ -82,7 +78,6 @@ int mca_mpool_base_init(bool *allow_multi_user_threads)
/* Otherwise, it initialized properly. Save it. */
else {
*allow_multi_user_threads &= user_threads;
ompi_output_verbose(10, mca_mpool_base_output,
"select: init returned success");

Просмотреть файл

@ -28,7 +28,7 @@ struct mca_mpool_t;
* component initialize
*/
typedef struct mca_mpool_base_module_t* (*mca_mpool_base_component_init_fn_t)
(bool *allow_multi_user_threads);
(bool enable_progress_threads, bool enable_mpi_threads);
/**
* if appropriate - returns base address of memory pool

Просмотреть файл

@ -28,7 +28,8 @@
* Local functions
*/
static int mca_mpool_sm_open(void);
static mca_mpool_base_module_t* mca_mpool_sm_init(bool *allow_multi_user_threads);
static mca_mpool_base_module_t* mca_mpool_sm_init(bool enable_progress_threads,
bool enable_mpi_threads);
mca_mpool_sm_component_t mca_mpool_sm_component = {
@ -97,7 +98,8 @@ static int mca_mpool_sm_open(void)
static mca_mpool_base_module_t*
mca_mpool_sm_init(bool *allow_multi_user_threads)
mca_mpool_sm_init(bool enable_progress_threads,
bool enable_mpi_threads)
{
char *file_name;
size_t len;
@ -144,16 +146,13 @@ mca_mpool_sm_init(bool *allow_multi_user_threads)
/* setup allocator */
mca_mpool_sm_component.sm_allocator = allocator_component->allocator_init(
allow_multi_user_threads,
mca_common_sm_mmap_alloc,
NULL
);
mca_mpool_sm_component.sm_allocator =
allocator_component->allocator_init(enable_mpi_threads,
mca_common_sm_mmap_alloc, NULL);
if(NULL == mca_mpool_sm_component.sm_allocator) {
ompi_output(0, "mca_mpool_sm_init: unable to initialize allocator");
return NULL;
}
*allow_multi_user_threads = true;
return &mca_mpool_sm_module;
}

Просмотреть файл

@ -32,9 +32,8 @@ extern "C" {
#endif
OMPI_DECLSPEC int mca_pml_base_open(void);
OMPI_DECLSPEC int mca_pml_base_progress(void);
OMPI_DECLSPEC int mca_pml_base_select(mca_pml_base_module_t *selected,
bool *allow_multi_user_threads,
bool *have_hidden_threads);
OMPI_DECLSPEC int mca_pml_base_select(bool enable_progress_threads,
bool enable_mpi_threads);
OMPI_DECLSPEC int mca_pml_base_close(void);

Просмотреть файл

@ -60,7 +60,7 @@ static void* mca_pml_bsend_alloc_segment(size_t* size_inout)
/*
* One time initialization at startup
*/
int mca_pml_base_bsend_init(bool* thread_safe)
int mca_pml_base_bsend_init(bool thread_safe)
{
int id = mca_base_param_register_string("pml", "base", "bsend_allocator", NULL, "basic");
mca_allocator_base_module_t *allocator;
@ -79,9 +79,11 @@ int mca_pml_base_bsend_init(bool* thread_safe)
}
free(name);
/* try to create an instance of the allocator - to determine thread safety level */
/* try to create an instance of the allocator - to determine
thread safety level */
allocator = mca_pml_bsend_allocator_component->allocator_init(thread_safe, mca_pml_bsend_alloc_segment, NULL);
if(NULL == allocator) {
if (NULL == allocator) {
return OMPI_ERR_BUFFER;
}
allocator->alc_finalize(allocator);

Просмотреть файл

@ -24,7 +24,7 @@ extern "C" {
#endif
struct mca_ptl_base_send_request_t;
OMPI_DECLSPEC int mca_pml_base_bsend_init(bool*);
OMPI_DECLSPEC int mca_pml_base_bsend_init(bool enable_mpi_threads);
OMPI_DECLSPEC int mca_pml_base_bsend_fini(void);
OMPI_DECLSPEC int mca_pml_base_bsend_attach(void* addr, int size);

Просмотреть файл

@ -40,28 +40,23 @@ typedef struct opened_component_t {
* will have all of its function pointers saved and returned to the
* caller.
*/
int mca_pml_base_select(mca_pml_base_module_t *selected,
bool *allow_multi_user_threads,
bool *have_hidden_threads)
int mca_pml_base_select(bool enable_progress_threads,
bool enable_mpi_threads)
{
int priority=0, best_priority=0;
bool user_threads=false, hidden_threads=false;
bool best_user_threads=false, best_hidden_threads=false;
ompi_list_item_t *item=NULL;
mca_base_component_list_item_t *cli=NULL;
mca_pml_base_component_t *component=NULL, *best_component=NULL;
mca_pml_base_module_t *modules=NULL;
int priority = 0, best_priority = 0;
ompi_list_item_t *item = NULL;
mca_base_component_list_item_t *cli = NULL;
mca_pml_base_component_t *component = NULL, *best_component = NULL;
mca_pml_base_module_t *module = NULL;
ompi_list_t opened;
opened_component_t *om=NULL;
opened_component_t *om = NULL;
/* Traverse the list of available components; call their init
functions. */
best_priority = -1;
best_component = NULL;
modules = NULL;
best_user_threads = user_threads = true;
best_hidden_threads = hidden_threads = false;
module = NULL;
OBJ_CONSTRUCT(&opened, ompi_list_t);
for (item = ompi_list_get_first(&mca_pml_base_components_available);
ompi_list_get_end(&mca_pml_base_components_available) != item;
@ -77,9 +72,9 @@ int mca_pml_base_select(mca_pml_base_module_t *selected,
ompi_output_verbose(10, mca_pml_base_output,
"select: no init function; ignoring component");
} else {
modules = component->pmlm_init(&priority, &user_threads,
&hidden_threads);
if (NULL == modules) {
module = component->pmlm_init(&priority, enable_progress_threads,
enable_mpi_threads);
if (NULL == module) {
ompi_output_verbose(10, mca_pml_base_output,
"select: init returned failure");
} else {
@ -87,8 +82,6 @@ int mca_pml_base_select(mca_pml_base_module_t *selected,
"select: init returned priority %d", priority);
if (priority > best_priority) {
best_priority = priority;
best_user_threads = user_threads;
best_hidden_threads = hidden_threads;
best_component = component;
}
@ -139,16 +132,14 @@ int mca_pml_base_select(mca_pml_base_module_t *selected,
available list all unselected components. The available list will
contain only the selected component. */
mca_base_components_close(mca_pml_base_output, &mca_pml_base_components_available,
(mca_base_component_t *) best_component);
mca_base_components_close(mca_pml_base_output,
&mca_pml_base_components_available,
(mca_base_component_t *) best_component);
/* Save the winner */
mca_pml_base_selected_component = *best_component;
mca_pml = *modules;
*selected = *modules;
*allow_multi_user_threads &= best_user_threads;
*have_hidden_threads |= best_hidden_threads;
mca_pml = *module;
ompi_output_verbose(10, mca_pml_base_output,
"select: component %s selected",
component->pmlm_version.mca_component_name);

Просмотреть файл

@ -100,16 +100,18 @@ typedef enum {
* @param priority (OUT) Relative priority or ranking used by MCA to
* selected a component.
*
* @param allow_multi_user_threads (OUT) Whether this component can run
* at MPI_THREAD_MULTIPLE or not.
* @param enable_progress_threads (IN) Whether this component is
* allowed to run a hidden/progress thread or not.
*
* @param have_hidden_threads (OUT) Whether this component may use
* hidden threads (e.g., progress threads) or not.
* @param enable_mpi_threads (IN) Whether support for multiple MPI
* threads is enabled or not (i.e., MPI_THREAD_MULTIPLE), which
* indicates whether multiple threads may invoke this component
* simultaneously or not.
*/
typedef struct mca_pml_base_module_1_0_0_t * (*mca_pml_base_component_init_fn_t)(
int *priority,
bool *allow_multi_user_threads,
bool *have_hidden_threads);
bool enable_progress_threads,
bool enable_mpi_threads);
typedef int (*mca_pml_base_component_finalize_fn_t)(void);

Просмотреть файл

@ -80,8 +80,8 @@ extern int mca_pml_teg_component_close(void);
extern mca_pml_base_module_t* mca_pml_teg_component_init(
int *priority,
bool *allow_multi_user_threads,
bool *have_hidden_threads
bool enable_progress_threads,
bool enable_mpi_threads
);
extern int mca_pml_teg_component_fini(void);

Просмотреть файл

@ -127,13 +127,12 @@ int mca_pml_teg_component_close(void)
mca_pml_base_module_t* mca_pml_teg_component_init(int* priority,
bool *allow_multi_user_threads,
bool *have_hidden_threads)
bool enable_progress_threads,
bool enable_mpi_threads)
{
uint32_t proc_arch;
int rc;
*priority = 0;
*have_hidden_threads = false;
mca_pml_teg.teg_ptl_components = NULL;
mca_pml_teg.teg_num_ptl_components = 0;
@ -151,7 +150,7 @@ mca_pml_base_module_t* mca_pml_teg_component_init(int* priority,
NULL);
/* buffered send */
if(mca_pml_base_bsend_init(allow_multi_user_threads) != OMPI_SUCCESS) {
if(OMPI_SUCCESS != mca_pml_base_bsend_init(enable_mpi_threads)) {
ompi_output(0, "mca_pml_teg_component_init: mca_pml_bsend_init failed\n");
return NULL;
}
@ -163,7 +162,6 @@ mca_pml_base_module_t* mca_pml_teg_component_init(int* priority,
if(rc != OMPI_SUCCESS)
return NULL;
*allow_multi_user_threads &= true;
return &mca_pml_teg.super;
}

Просмотреть файл

@ -43,8 +43,8 @@ typedef struct mca_ptl_base_selected_module_t mca_ptl_base_selected_module_t;
*/
OMPI_DECLSPEC int mca_ptl_base_open(void);
OMPI_DECLSPEC int mca_ptl_base_select(bool *allow_multi_user_threads,
bool *have_hidden_threads);
OMPI_DECLSPEC int mca_ptl_base_select(bool enable_progress_threads,
bool enable_mpi_threads);
OMPI_DECLSPEC int mca_ptl_base_close(void);

Просмотреть файл

@ -20,6 +20,7 @@
#include "runtime/runtime.h"
#include "mca/mca.h"
#include "mca/base/base.h"
#include "mca/pml/pml.h"
#include "mca/ptl/ptl.h"
#include "mca/ptl/base/base.h"
@ -31,11 +32,10 @@
* components will be closed and unloaded. The selected modules will
* be returned to the caller in a ompi_list_t.
*/
int mca_ptl_base_select(bool *allow_multi_user_threads,
bool *have_hidden_threads)
int mca_ptl_base_select(bool enable_progress_threads,
bool enable_mpi_threads)
{
int i, num_ptls;
bool user_threads, hidden_threads;
ompi_list_item_t *item;
mca_base_component_list_item_t *cli;
mca_ptl_base_component_t *component;
@ -96,8 +96,8 @@ int mca_ptl_base_select(bool *allow_multi_user_threads,
ompi_output_verbose(10, mca_ptl_base_output,
"select: no init function; ignoring component");
} else {
modules = component->ptlm_init(&num_ptls, &user_threads,
&hidden_threads);
modules = component->ptlm_init(&num_ptls, enable_progress_threads,
enable_mpi_threads);
/* If the component didn't initialize, remove it from the opened
list and remove it from the component repository */
@ -116,9 +116,6 @@ int mca_ptl_base_select(bool *allow_multi_user_threads,
/* Otherwise, it initialized properly. Save it. */
else {
*allow_multi_user_threads &= user_threads;
*have_hidden_threads |= hidden_threads;
ompi_output_verbose(10, mca_ptl_base_output,
"select: init returned success");
@ -146,6 +143,10 @@ int mca_ptl_base_select(bool *allow_multi_user_threads,
orte_abort(1, "No ptl components available. This shouldn't happen.");
}
/* Once we have some modules, tell the PML about them */
mca_pml.pml_add_ptls(&mca_ptl_base_modules_initialized);
/* All done */
return OMPI_SUCCESS;

Просмотреть файл

@ -198,18 +198,12 @@ mca_ptl_elan_component_close (void)
*/
mca_ptl_base_module_t **
mca_ptl_elan_component_init (int *num_ptls,
bool * allow_multi_user_threads,
bool * have_hidden_threads)
bool enable_progress_threads,
bool enable_mpi_threads)
{
mca_ptl_base_module_t **ptls;
*num_ptls = 0;
*allow_multi_user_threads = true;
*have_hidden_threads = OMPI_ENABLE_PROGRESS_THREADS;
/* XXX: Set the global variable to be true for threading */
if (OMPI_ENABLE_PROGRESS_THREADS)
ompi_set_using_threads(true);
ompi_free_list_init (&(elan_mp->elan_recv_frags_free),
sizeof (mca_ptl_elan_recv_frag_t),

Просмотреть файл

@ -481,18 +481,12 @@ mca_ptl_gm_init( mca_ptl_gm_component_t * gm )
mca_ptl_base_module_t **
mca_ptl_gm_component_init (int *num_ptl_modules,
bool * allow_multi_user_threads,
bool * have_hidden_threads)
bool enable_progress_threads,
bool enable_mpi_threads)
{
mca_ptl_base_module_t **ptls;
*num_ptl_modules = 0;
*allow_multi_user_threads = true;
#if OMPI_HAVE_POSIX_THREADS
*have_hidden_threads = true;
#else
*have_hidden_threads = false;
#endif /* OMPI_HAVE_POSIX_THREADS */
if (OMPI_SUCCESS != mca_ptl_gm_init (&mca_ptl_gm_component)) {
ompi_output( 0, "[%s:%d] error in initializing gm state and PTL's. (%d PTL's)\n",

Просмотреть файл

@ -164,26 +164,17 @@ static int mca_ptl_ib_component_send(void)
* (3) register PTL parameters with the MCA
*/
mca_ptl_base_module_t** mca_ptl_ib_component_init(int *num_ptl_modules,
bool *allow_multi_user_threads,
bool *have_hidden_threads)
bool enable_progress_threads,
bool enable_mpi_threads)
{
mca_ptl_base_module_t **modules;
VAPI_ret_t vapi_ret;
int i, ret;
mca_ptl_ib_module_t* ib_modules = NULL;
/* initialization */
*num_ptl_modules = 0;
mca_ptl_ib_component.ib_num_hcas=0;
*allow_multi_user_threads = true;
*have_hidden_threads = OMPI_ENABLE_PROGRESS_THREADS;
/* need to set ompi_using_threads() as ompi_event_init()
* will spawn a thread if supported */
if(OMPI_ENABLE_PROGRESS_THREADS) {
ompi_set_using_threads(true);
}
/* Initialize Receive fragments */
ompi_free_list_init (&(mca_ptl_ib_component.ib_recv_frags),

Просмотреть файл

@ -167,13 +167,11 @@ int mca_ptl_mx_component_close(void)
*/
mca_ptl_base_module_t** mca_ptl_mx_component_init(
int *num_ptls,
bool *allow_multi_user_threads,
bool *have_hidden_threads)
bool enable_progress_threads,
bool enable_mpi_threads)
{
mca_ptl_base_module_t** ptls;
*num_ptls = 0;
*allow_multi_user_threads = true;
*have_hidden_threads = false; /* MX driver/callbacks are multi-threaded */
ompi_free_list_init(&mca_ptl_mx_component.mx_send_frags,
sizeof(mca_ptl_mx_send_frag_t),

Просмотреть файл

@ -161,13 +161,11 @@ mca_ptl_portals_component_close(void)
*/
mca_ptl_base_module_t**
mca_ptl_portals_component_init(int *num_ptls,
bool *allow_multi_user_threads,
bool *have_hidden_threads)
bool enable_progress_threads,
bool enable_mpi_threads)
{
mca_ptl_base_module_t** ptls;
*num_ptls = 0;
*allow_multi_user_threads = true;
*have_hidden_threads = false;
/* do the non-portable global initialization stuff for a
particular network link */

Просмотреть файл

@ -41,7 +41,7 @@ static int mca_ptl_prof_component_open_fn( void );
static int mca_ptl_prof_component_close_fn( void );
static struct mca_ptl_base_module_t** ptl_prof_component_init_fn(
int *num_ptls,
bool *allow_multi_user_threads, bool *have_hidden_threads );
bool enable_progress_threads, bool enable_mpi_threads);
static int ptl_prof_component_control_fn( int param, void* value, size_t size );
mca_ptl_prof_module_1_0_0_t mca_ptl_prof_component = {
@ -94,14 +94,12 @@ static int ptl_prof_component_control_fn( int param, void* value, size_t size )
extern mca_ptl_prof_t mca_ptl_prof;
static struct mca_ptl_base_module_t** ptl_prof_component_init_fn(
int *num_ptls,
bool *allow_multi_user_threads,
bool *have_hidden_threads )
bool enable_progress_threads,
bool enable_mpi_threads)
{
mca_ptl_prof_t** ptl_array;
*num_ptls = 1;
*allow_multi_user_threads = true;
*have_hidden_threads = false;
ptl_array = (mca_ptl_prof_t**)malloc( (*num_ptls) * sizeof(mca_ptl_prof_t*) );
ptl_array[0] = &mca_ptl_prof;
mca_ptl_prof.super.ptl_component = (mca_ptl_base_component_t*)&mca_ptl_prof_component;

Просмотреть файл

@ -284,11 +284,13 @@ typedef enum {
* @param num_ptls (OUT) Returns the number of ptl instances created, or 0
* if the transport is not available.
*
* @param allow_multi_user_threads (OUT) Indicated wether this component can
* run at MPI_THREAD_MULTIPLE or not.
* @param enable_progress_threads (IN) Whether this component is
* allowed to run a hidden/progress thread or not.
*
* @param have_hidden_threads (OUT) Whether this component uses
* hidden threads (e.g., progress threads) or not.
* @param enable_mpi_threads (IN) Whether support for multiple MPI
* threads is enabled or not (i.e., MPI_THREAD_MULTIPLE), which
* indicates whether multiple threads may invoke this component
* simultaneously or not.
*
* @return Array of pointers to PTL modules, or NULL if the transport
* is not available.
@ -302,8 +304,8 @@ typedef enum {
*/
typedef struct mca_ptl_base_module_t** (*mca_ptl_base_component_init_fn_t)(
int *num_ptls,
bool *allow_multi_user_threads,
bool *have_hidden_threads
bool enable_progress_threads,
bool enable_mpi_threads
);

Просмотреть файл

@ -84,8 +84,8 @@ extern int mca_ptl_self_component_close(void);
*/
extern mca_ptl_base_module_t** mca_ptl_self_component_init(
int *num_ptls,
bool *allow_multi_user_threads,
bool *have_hidden_threads
bool enable_progress_threads,
bool enable_mpi_threads
);
int mca_ptl_self_add_proc(struct mca_ptl_base_module_t* ptl, size_t nprocs, struct ompi_proc_t **ompi_proc, struct mca_ptl_base_peer_t** peer_ret, struct ompi_bitmap_t* reachable);

Просмотреть файл

@ -73,7 +73,7 @@ mca_ptl_self_component_t mca_ptl_self_component = {
true
},
mca_ptl_self_component_init,
mca_ptl_self_component_init,
NULL,
NULL,
}
@ -155,12 +155,10 @@ int mca_ptl_self_component_close(void)
mca_ptl_base_module_t** mca_ptl_self_component_init(int *num_ptl_modules,
bool *allow_multi_user_threads,
bool *have_hidden_threads)
bool enable_progress_threads,
bool enable_mpi_threads)
{
*num_ptl_modules = 0;
*allow_multi_user_threads = true;
*have_hidden_threads = false;
mca_ptl_self_component.self_ptl_modules = (mca_ptl_base_module_t **)
malloc(sizeof(mca_ptl_base_module_t*));

Просмотреть файл

@ -151,14 +151,14 @@ extern int mca_ptl_sm_component_close(void);
* SM module initialization.
*
* @param num_ptls (OUT) Number of PTLs returned in PTL array.
* @param allow_multi_user_threads (OUT) Flag indicating wether PTL supports user threads (TRUE)
* @param have_hidden_threads (OUT) Flag indicating wether PTL uses threads (TRUE)
* @param enable_progress_threads (IN) Flag indicating whether PTL is allowed to have progress threads
* @param enable_mpi_threads (IN) Flag indicating whether PTL must support multilple simultaneous invocations from different threads
*
*/
extern mca_ptl_base_module_t** mca_ptl_sm_component_init(
int *num_ptls,
bool *allow_multi_user_threads,
bool *have_hidden_threads
bool enable_progress_threads,
bool enable_mpi_threads
);
/**

Просмотреть файл

@ -240,15 +240,13 @@ CLEANUP:
*/
mca_ptl_base_module_t** mca_ptl_sm_component_init(
int *num_ptls,
bool *allow_multi_user_threads,
bool *have_hidden_threads)
bool enable_progress_threads,
bool enable_mpi_threads)
{
mca_ptl_base_module_t **ptls = NULL;
int i;
*num_ptls = 0;
*allow_multi_user_threads = true;
*have_hidden_threads = OMPI_ENABLE_PROGRESS_THREADS;
/* lookup shared memory pool */
mca_ptl_sm_component.sm_mpool =

Просмотреть файл

@ -98,8 +98,8 @@ extern int mca_ptl_tcp_component_close(void);
*/
extern mca_ptl_base_module_t** mca_ptl_tcp_component_init(
int *num_ptls,
bool *allow_multi_user_threads,
bool *have_hidden_threads
bool enable_progress_threads,
bool enable_mpi_threads
);
/**

Просмотреть файл

@ -484,13 +484,11 @@ static int mca_ptl_tcp_component_exchange(void)
* (3) register PTL parameters with the MCA
*/
mca_ptl_base_module_t** mca_ptl_tcp_component_init(int *num_ptl_modules,
bool *allow_multi_user_threads,
bool *have_hidden_threads)
bool enable_progress_threads,
bool enable_mpi_threads)
{
mca_ptl_base_module_t **ptls;
*num_ptl_modules = 0;
*allow_multi_user_threads = true;
*have_hidden_threads = OMPI_ENABLE_PROGRESS_THREADS;
ompi_free_list_init(&mca_ptl_tcp_component.tcp_send_frags,
sizeof(mca_ptl_tcp_send_frag_t),

Просмотреть файл

@ -39,8 +39,8 @@ OMPI_DECLSPEC int mca_topo_base_comm_select(struct ompi_communicator_t *comm,
OMPI_DECLSPEC int mca_topo_base_comm_unselect(struct ompi_communicator_t *comm);
OMPI_DECLSPEC int mca_topo_base_find_available (bool *allow_multi_user_threads,
bool *have_hidden_threads);
OMPI_DECLSPEC int mca_topo_base_find_available (bool enable_progress_threads,
bool enable_mpi_threads);
OMPI_DECLSPEC int mca_topo_base_init_comm (struct ompi_communicator_t *comm);

Просмотреть файл

@ -32,12 +32,16 @@ ompi_list_t mca_topo_base_modules_available;
bool mca_topo_base_modules_available_valid = false;
static int init_query(const mca_base_component_t *m,
mca_base_component_priority_list_item_t *entry);
mca_base_component_priority_list_item_t *entry,
bool enable_progress_threads,
bool enable_mpi_threads);
static int init_query_1_0_0(const mca_base_component_t *component,
mca_base_component_priority_list_item_t *entry);
mca_base_component_priority_list_item_t *entry,
bool enable_progress_threads,
bool enable_mpi_threads);
int mca_topo_base_find_available(bool *allow_multi_user_threads,
bool *have_hidden_threads)
int mca_topo_base_find_available(bool enable_progress_threads,
bool enable_mpi_threads)
{
bool found = false;
mca_base_component_priority_list_item_t *entry;
@ -63,7 +67,9 @@ int mca_topo_base_find_available(bool *allow_multi_user_threads,
/* Now for this entry, we have to determine the thread level. Call
a subroutine to do the job for us */
if (OMPI_SUCCESS == init_query(entry->super.cli_component, entry)) {
if (OMPI_SUCCESS == init_query(entry->super.cli_component, entry,
enable_progress_threads,
enable_mpi_threads)) {
/* Save the results in the list. The priority is not relvant at
this point in time. But we save the thread arguments so that
the initial selection algorithm can negotiate overall thread
@ -103,7 +109,10 @@ int mca_topo_base_find_available(bool *allow_multi_user_threads,
static int init_query(const mca_base_component_t *m,
mca_base_component_priority_list_item_t *entry) {
mca_base_component_priority_list_item_t *entry,
bool enable_progress_threads,
bool enable_mpi_threads)
{
int ret;
ompi_output_verbose(10, mca_topo_base_output,
@ -114,7 +123,8 @@ static int init_query(const mca_base_component_t *m,
if (1 == m->mca_type_major_version &&
0 == m->mca_type_minor_version &&
0 == m->mca_type_release_version) {
ret = init_query_1_0_0 (m, entry);
ret = init_query_1_0_0(m, entry, enable_progress_threads,
enable_mpi_threads);
} else {
/* unrecognised API version */
ompi_output_verbose(10, mca_topo_base_output,
@ -145,10 +155,12 @@ static int init_query(const mca_base_component_t *m,
static int init_query_1_0_0(const mca_base_component_t *component,
mca_base_component_priority_list_item_t *entry) {
mca_base_component_priority_list_item_t *entry,
bool enable_progress_threads,
bool enable_mpi_threads)
{
mca_topo_base_component_1_0_0_t *topo = (mca_topo_base_component_1_0_0_t *) component;
return topo->topom_init_query(&(entry->cpli_allow_multi_user_threads),
&(entry->cpli_have_hidden_threads));
return topo->topom_init_query(enable_progress_threads,
enable_mpi_threads);
}

Просмотреть файл

@ -62,8 +62,8 @@
* **************** component struct *******************************
*/
typedef int (*mca_topo_base_component_init_query_1_0_0_fn_t)
(bool *allow_multi_user_threads,
bool *have_hidden_threads);
(bool enable_progress_threads,
bool enable_mpi_threads);
typedef struct mca_topo_base_module_1_0_0_t*
(*mca_topo_base_component_comm_query_1_0_0_fn_t) (int *priority);

Просмотреть файл

@ -59,13 +59,10 @@ static mca_topo_base_module_1_0_0_t unity = {
* *******************************************************************
*/
int mca_topo_unity_component_init_query(bool *allow_multi_user_threads,
bool *have_hidden_threads)
int mca_topo_unity_component_init_query(bool enable_progress_threads,
bool enable_mpi_threads)
{
*allow_multi_user_threads = true;
*have_hidden_threads = false;
/* return success */
/* Nothing to do */
return OMPI_SUCCESS;
}

Просмотреть файл

@ -1,7 +1,3 @@
/** @file
*
*
*/
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
@ -28,14 +24,6 @@
#include "request/request.h"
#include "mca/topo/topo.h"
/*
* This structure is the interface to the MCA world. It contains the
* version information and the four functions (see below) which
* are needed for this module to function with the MCA framework
*/
/*OMPI_COMP_EXPORT extern struct mca_topo_base_module_1_0_0_t mca_topo_unity_module;*/
/*
* ******************************************************************
* ******** functions which provide MCA interface comppliance *******
@ -52,8 +40,8 @@
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
int mca_topo_unity_component_init_query(bool *allow_multi_user_threads,
bool *have_hidden_threads);
int mca_topo_unity_component_init_query(bool enable_progress_threads,
bool enable_mpi_threads);
struct mca_topo_base_module_1_0_0_t *
mca_topo_unity_component_comm_query (int *priority);
int mca_topo_unity_component_comm_unquery (struct ompi_communicator_t *comm);

Просмотреть файл

@ -90,13 +90,12 @@ int MPI_Cart_create(MPI_Comm old_comm, int ndims, int *dims,
*/
if (!(mca_topo_base_components_opened_valid ||
mca_topo_base_components_available_valid)) {
bool user_threads = true;
bool hidden_threads = true;
if (OMPI_SUCCESS != (err = mca_topo_base_open())) {
return OMPI_ERRHANDLER_INVOKE(old_comm, err, FUNC_NAME);
}
if (OMPI_SUCCESS != (err = mca_topo_base_find_available (&user_threads,
&hidden_threads))) {
if (OMPI_SUCCESS !=
(err = mca_topo_base_find_available(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_MPI_THREADS))) {
return OMPI_ERRHANDLER_INVOKE(old_comm, err, FUNC_NAME);
}
}

Просмотреть файл

@ -64,21 +64,12 @@ int MPI_File_delete(char *filename, MPI_Info info)
if (!(mca_io_base_components_opened_valid ||
mca_io_base_components_available_valid)) {
bool user_threads = true;
bool hidden_threads = true;
if (OMPI_SUCCESS != (rc = mca_io_base_open())) {
return OMPI_ERRHANDLER_INVOKE(MPI_FILE_NULL, rc, FUNC_NAME);
}
/* JMS Need to do something here with user_threads and
hidden_threads -- technically this is no longer a query,
it's a mandate. The query part is left over from when this
function was invoked during MPI_INIT. Since we've now
long-since decided the user threads and hidden threads
stuff (i.e., during MPI_INIT), we can't change them now.
This is not hugely important now, since ROMIO is the only
io component that we have, but it should be fixed. */
if (OMPI_SUCCESS != (rc = mca_io_base_find_available(&user_threads,
&hidden_threads))) {
if (OMPI_SUCCESS !=
(rc = mca_io_base_find_available(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_MPI_THREADS))) {
return OMPI_ERRHANDLER_INVOKE(MPI_FILE_NULL, rc, FUNC_NAME);
}
}

Просмотреть файл

@ -65,21 +65,12 @@ int MPI_File_open(MPI_Comm comm, char *filename, int amode,
if (!(mca_io_base_components_opened_valid ||
mca_io_base_components_available_valid)) {
bool user_threads = true;
bool hidden_threads = true;
if (OMPI_SUCCESS != (rc = mca_io_base_open())) {
return OMPI_ERRHANDLER_INVOKE(MPI_FILE_NULL, rc, FUNC_NAME);
}
/* JMS Need to do something here with user_threads and
hidden_threads -- technically this is no longer a query,
it's a mandate. The query part is left over from when this
function was invoked during MPI_INIT. Since we've now
long-since decided the user threads and hidden threads
stuff (i.e., during MPI_INIT), we can't change them now.
This is not hugely important now, since ROMIO is the only
io component that we have, but it should be fixed. */
if (OMPI_SUCCESS != (rc = mca_io_base_find_available(&user_threads,
&hidden_threads))) {
if (OMPI_SUCCESS !=
(rc = mca_io_base_find_available(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_MPI_THREADS))) {
return OMPI_ERRHANDLER_INVOKE(MPI_FILE_NULL, rc, FUNC_NAME);
}
}

Просмотреть файл

@ -75,13 +75,12 @@ int MPI_Graph_create(MPI_Comm old_comm, int nnodes, int *index,
*/
if (!(mca_topo_base_components_opened_valid ||
mca_topo_base_components_available_valid)) {
bool user_threads = true;
bool hidden_threads = true;
if (OMPI_SUCCESS != (err = mca_topo_base_open())) {
return OMPI_ERRHANDLER_INVOKE(old_comm, err, FUNC_NAME);
}
if (OMPI_SUCCESS != (err = mca_topo_base_find_available (&user_threads,
&hidden_threads))) {
if (OMPI_SUCCESS !=
(err = mca_topo_base_find_available(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_MPI_THREADS))) {
return OMPI_ERRHANDLER_INVOKE(old_comm, err, FUNC_NAME);
}
}

Просмотреть файл

@ -91,11 +91,12 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
goto error;
}
/* if we are not the seed nor a singleton, AND we have not set the
* orte_debug flag, then
* start recording the compound command that starts us up.
* if we are the seed or a singleton, then don't do this - the registry is
* local, so we'll just drive it directly */
/* If we are not the seed nor a singleton, AND we have not set the
orte_debug flag, then start recording the compound command that
starts us up. if we are the seed or a singleton, then don't do
this - the registry is local, so we'll just drive it
directly */
if (orte_process_info.seed ||
NULL == orte_process_info.ns_replica ||
orte_debug_flag) {
@ -110,11 +111,13 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
}
/* Now do the things that hit the registry */
if (ORTE_SUCCESS != (ret = orte_init_stage2())) {
ORTE_ERROR_LOG(ret);
error = "ompi_mpi_init: orte_init_stage2 failed";
goto error;
}
/* Once we've joined the RTE, see if any MCA parameters were
passed to the MPI level */
@ -130,13 +133,14 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
}
#endif
/* initialize ompi procs */
/* Initialize OMPI procs */
if (OMPI_SUCCESS != (ret = ompi_proc_init())) {
error = "mca_proc_init() failed";
goto error;
}
/* Open up MPI-related MCA modules. */
/* Open up MPI-related MCA components */
if (OMPI_SUCCESS != (ret = mca_allocator_base_open())) {
error = "mca_allocator_base_open() failed";
@ -158,24 +162,54 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
error = "mca_coll_base_open() failed";
goto error;
}
/* The io framework is initialized lazily, at the first use of any
MPI_File_* function, so it is not included here. */
/* initialize module exchange */
/* In order to reduce the common case for MPI apps (where they
don't use MPI-2 IO or MPI-1 topology functions), the io and
topo frameworks are initialized lazily, at the first use of
relevant functions (e.g., MPI_FILE_*, MPI_CART_*, MPI_GRAPH_*),
so they are not opened here. */
/* Initialize module exchange */
if (OMPI_SUCCESS != (ret = mca_base_modex_init())) {
error = "mca_base_modex_init() failed";
goto error;
}
/* Select which pml, ptl, and coll modules to use, and determine the
final thread level */
/* Select which MPI components to use */
if (OMPI_SUCCESS !=
(ret = mca_base_init_select_components(requested, provided))) {
error = "mca_base_init_select_components() failed";
(ret = mca_mpool_base_init(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_MPI_THREADS))) {
error = "mca_mpool_base_init() failed";
goto error;
}
if (OMPI_SUCCESS !=
(ret = mca_pml_base_select(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_MPI_THREADS))) {
error = "mca_pml_base_select() failed";
goto error;
}
if (OMPI_SUCCESS !=
(ret = mca_ptl_base_select(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_MPI_THREADS))) {
error = "mca_ptl_base_select() failed";
goto error;
}
if (OMPI_SUCCESS !=
(ret = mca_coll_base_find_available(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_MPI_THREADS))) {
error = "mca_coll_base_find_available() failed";
goto error;
}
/* io and topo components are not selected here -- see comment
above about the io and topo frameworks being loaded lazily */
/* Initialize each MPI handle subsystem */
/* initialize requests */
if (OMPI_SUCCESS != (ret = ompi_request_init())) {
error = "ompi_request_init() failed";
@ -253,9 +287,8 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
goto error;
}
/*
* Let system know we are at STG1 Barrier
*/
/* Let system know we are at STG1 Barrier */
if (ORTE_SUCCESS != (ret = orte_soh.set_proc_soh(orte_process_info.my_name,
ORTE_PROC_STATE_AT_STG1, 0))) {
ORTE_ERROR_LOG(ret);
@ -263,8 +296,8 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
goto error;
}
/* if the compound command is operative, execute it
*/
/* if the compound command is operative, execute it */
if (compound_cmd) {
if (OMPI_SUCCESS != (ret = orte_gpr.exec_compound_cmd())) {
ORTE_ERROR_LOG(ret);
@ -273,8 +306,9 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
}
}
/* First barrier -- wait for message from RMGR_PROC_STAGE_GATE_MGR
to arrive */
/* FIRST BARRIER - WAIT FOR MSG FROM RMGR_PROC_STAGE_GATE_MGR TO ARRIVE */
if (ORTE_SUCCESS != (ret = orte_rml.xcast(NULL, NULL, 0, NULL, NULL))) {
ORTE_ERROR_LOG(ret);
error = "ompi_mpi_init: failed to see all procs register\n";
@ -306,17 +340,32 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
goto error;
}
/* save the resulting thread levels */
/* Figure out the final MPI thread levels. If we were not
compiled for support for MPI threads, then don't allow
MPI_THREAD_MULTIPLE. */
ompi_mpi_thread_requested = requested;
ompi_mpi_thread_provided = *provided;
if (OMPI_HAVE_THREAD_SUPPORT == 1) {
ompi_mpi_thread_provided = *provided = MPI_THREAD_SINGLE;
ompi_mpi_main_thread = NULL;
} else if (OMPI_ENABLE_MPI_THREADS == 1) {
ompi_mpi_thread_provided = *provided = requested;
ompi_mpi_main_thread = ompi_thread_get_self();
} else {
if (MPI_THREAD_MULTIPLE == requested) {
ompi_mpi_thread_provided = *provided = MPI_THREAD_SERIALIZED;
} else {
ompi_mpi_thread_provided = *provided = requested;
}
ompi_mpi_main_thread = ompi_thread_get_self();
}
ompi_mpi_thread_multiple = (ompi_mpi_thread_provided ==
MPI_THREAD_MULTIPLE);
#if OMPI_ENABLE_MPI_THREADS
ompi_mpi_main_thread = ompi_thread_get_self();
#else
ompi_mpi_main_thread = NULL;
#endif
if (OMPI_ENABLE_PROGRESS_THREADS == 1 ||
OMPI_ENABLE_MPI_THREADS == 1) {
ompi_set_using_threads(true);
}
/* Init coll for the comms */
@ -342,9 +391,8 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
}
#endif
/*
* Let system know we are at STG2 Barrier
*/
/* Let system know we are at STG2 Barrier */
if (ORTE_SUCCESS != (ret = orte_soh.set_proc_soh(orte_process_info.my_name,
ORTE_PROC_STATE_AT_STG2, 0))) {
ORTE_ERROR_LOG(ret);
@ -357,7 +405,9 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
ompi_progress_events(OMPI_EVLOOP_NONBLOCK);
#endif
/* SECOND BARRIER - WAIT FOR MSG FROM RMGR_PROC_STAGE_GATE_MGR TO ARRIVE */
/* Second barrier -- wait for message from
RMGR_PROC_STAGE_GATE_MGR to arrive */
if (ORTE_SUCCESS != (ret = orte_rml.xcast(NULL, NULL, 0, NULL, NULL))) {
ORTE_ERROR_LOG(ret);
error = "ompi_mpi_init: failed to see all procs register\n";
@ -366,8 +416,8 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
/* new very last step: check whether we have been spawned or not.
We introduce that at the very end, since we need collectives,
datatypes, ptls etc. up and running here....
*/
datatypes, ptls etc. up and running here.... */
if (OMPI_SUCCESS != (ret = ompi_comm_dyn_init())) {
error = "ompi_comm_dyn_init() failed";
goto error;
@ -381,7 +431,7 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
return ret;
}
/* All done */
/* All done. Wasn't that simple? */
ompi_mpi_initialized = true;