ompi/init: always lazy-wait in ompi_mpi_init
According to discussion in #2181 we don't need MCA parameter any more. Signed-off-by: Artem Polyakov <artpol84@gmail.com>
Этот коммит содержится в:
родитель
a49422fe84
Коммит
06a73da5ea
@ -20,6 +20,7 @@
|
||||
* Copyright (c) 2014-2015 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2014-2016 Research Organization for Information Science
|
||||
* and Technology (RIST). All rights reserved.
|
||||
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
|
||||
*
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
@ -279,7 +280,6 @@ opal_list_t ompi_registered_datareps = {{0}};
|
||||
|
||||
bool ompi_enable_timing = false, ompi_enable_timing_ext = false;
|
||||
extern bool ompi_mpi_yield_when_idle;
|
||||
extern bool ompi_mpi_lazy_wait_in_init;
|
||||
extern int ompi_mpi_event_tick_rate;
|
||||
|
||||
/**
|
||||
@ -529,11 +529,7 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
|
||||
opal_pmix.register_evhandler(NULL, &info, ompi_errhandler_callback,
|
||||
ompi_errhandler_registration_callback,
|
||||
(void*)&errtrk);
|
||||
if( ompi_mpi_lazy_wait_in_init ){
|
||||
OMPI_LAZY_WAIT_FOR_COMPLETION(errtrk.active);
|
||||
} else {
|
||||
OMPI_WAIT_FOR_COMPLETION(errtrk.active);
|
||||
}
|
||||
OMPI_LAZY_WAIT_FOR_COMPLETION(errtrk.active);
|
||||
|
||||
OPAL_LIST_DESTRUCT(&info);
|
||||
if (OPAL_SUCCESS != errtrk.status) {
|
||||
@ -660,11 +656,7 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
|
||||
if (NULL != opal_pmix.fence_nb) {
|
||||
opal_pmix.fence_nb(NULL, opal_pmix_collect_all_data,
|
||||
fence_release, (void*)&active);
|
||||
if( ompi_mpi_lazy_wait_in_init ){
|
||||
OMPI_LAZY_WAIT_FOR_COMPLETION(active);
|
||||
} else {
|
||||
OMPI_WAIT_FOR_COMPLETION(active);
|
||||
}
|
||||
OMPI_LAZY_WAIT_FOR_COMPLETION(active);
|
||||
} else {
|
||||
opal_pmix.fence(NULL, opal_pmix_collect_all_data);
|
||||
}
|
||||
@ -841,11 +833,7 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
|
||||
if (NULL != opal_pmix.fence_nb) {
|
||||
opal_pmix.fence_nb(NULL, opal_pmix_collect_all_data,
|
||||
fence_release, (void*)&active);
|
||||
if( ompi_mpi_lazy_wait_in_init ){
|
||||
OMPI_LAZY_WAIT_FOR_COMPLETION(active);
|
||||
} else {
|
||||
OMPI_WAIT_FOR_COMPLETION(active);
|
||||
}
|
||||
OMPI_LAZY_WAIT_FOR_COMPLETION(active);
|
||||
} else {
|
||||
opal_pmix.fence(NULL, opal_pmix_collect_all_data);
|
||||
}
|
||||
|
@ -60,7 +60,6 @@ bool ompi_have_sparse_group_storage = OPAL_INT_TO_BOOL(OMPI_GROUP_SPARSE);
|
||||
bool ompi_use_sparse_group_storage = OPAL_INT_TO_BOOL(OMPI_GROUP_SPARSE);
|
||||
|
||||
bool ompi_mpi_yield_when_idle = true;
|
||||
bool ompi_mpi_lazy_wait_in_init = false;
|
||||
int ompi_mpi_event_tick_rate = -1;
|
||||
char *ompi_mpi_show_mca_params_string = NULL;
|
||||
bool ompi_mpi_have_sparse_group_storage = !!(OMPI_GROUP_SPARSE);
|
||||
@ -113,14 +112,6 @@ int ompi_mpi_register_params(void)
|
||||
MCA_BASE_VAR_SCOPE_READONLY,
|
||||
&ompi_mpi_yield_when_idle);
|
||||
|
||||
ompi_mpi_lazy_wait_in_init = true;
|
||||
(void) mca_base_var_register("ompi", "mpi", NULL, "lazy_wait_in_init",
|
||||
"Avoid aggressive progress in MPI_Init, make sure that PMIx server has timeslots to progress",
|
||||
MCA_BASE_VAR_TYPE_BOOL, NULL, 0, 0,
|
||||
OPAL_INFO_LVL_9,
|
||||
MCA_BASE_VAR_SCOPE_READONLY,
|
||||
&ompi_mpi_lazy_wait_in_init);
|
||||
|
||||
ompi_mpi_event_tick_rate = -1;
|
||||
(void) mca_base_var_register("ompi", "mpi", NULL, "event_tick_rate",
|
||||
"How often to progress TCP communications (0 = never, otherwise specified in microseconds)",
|
||||
|
Загрузка…
x
Ссылка в новой задаче
Block a user