Merge pull request #6680 from hoopoepg/topic/suppressed-pml-ucx-mt-warning
PML/UCX: disable PML UCX if MT is requested but not supported
Этот коммит содержится в:
Коммит
61adcd9fc2
@ -245,7 +245,7 @@ int mca_pml_ucx_close(void)
|
|||||||
return OMPI_SUCCESS;
|
return OMPI_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mca_pml_ucx_init(void)
|
int mca_pml_ucx_init(int enable_mpi_threads)
|
||||||
{
|
{
|
||||||
ucp_worker_params_t params;
|
ucp_worker_params_t params;
|
||||||
ucp_worker_attr_t attr;
|
ucp_worker_attr_t attr;
|
||||||
@ -256,8 +256,7 @@ int mca_pml_ucx_init(void)
|
|||||||
|
|
||||||
/* TODO check MPI thread mode */
|
/* TODO check MPI thread mode */
|
||||||
params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
|
params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
|
||||||
params.thread_mode = UCS_THREAD_MODE_SINGLE;
|
if (enable_mpi_threads) {
|
||||||
if (ompi_mpi_thread_multiple) {
|
|
||||||
params.thread_mode = UCS_THREAD_MODE_MULTI;
|
params.thread_mode = UCS_THREAD_MODE_MULTI;
|
||||||
} else {
|
} else {
|
||||||
params.thread_mode = UCS_THREAD_MODE_SINGLE;
|
params.thread_mode = UCS_THREAD_MODE_SINGLE;
|
||||||
@ -279,10 +278,11 @@ int mca_pml_ucx_init(void)
|
|||||||
goto err_destroy_worker;
|
goto err_destroy_worker;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ompi_mpi_thread_multiple && (attr.thread_mode != UCS_THREAD_MODE_MULTI)) {
|
if (enable_mpi_threads && (attr.thread_mode != UCS_THREAD_MODE_MULTI)) {
|
||||||
/* UCX does not support multithreading, disqualify current PML for now */
|
/* UCX does not support multithreading, disqualify current PML for now */
|
||||||
/* TODO: we should let OMPI to fallback to THREAD_SINGLE mode */
|
/* TODO: we should let OMPI to fallback to THREAD_SINGLE mode */
|
||||||
PML_UCX_ERROR("UCP worker does not support MPI_THREAD_MULTIPLE");
|
PML_UCX_VERBOSE(1, "UCP worker does not support MPI_THREAD_MULTIPLE. "
|
||||||
|
"PML UCX could not be selected");
|
||||||
rc = OMPI_ERR_NOT_SUPPORTED;
|
rc = OMPI_ERR_NOT_SUPPORTED;
|
||||||
goto err_destroy_worker;
|
goto err_destroy_worker;
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,7 @@ extern mca_pml_ucx_module_t ompi_pml_ucx;
|
|||||||
|
|
||||||
int mca_pml_ucx_open(void);
|
int mca_pml_ucx_open(void);
|
||||||
int mca_pml_ucx_close(void);
|
int mca_pml_ucx_close(void);
|
||||||
int mca_pml_ucx_init(void);
|
int mca_pml_ucx_init(int enable_mpi_threads);
|
||||||
int mca_pml_ucx_cleanup(void);
|
int mca_pml_ucx_cleanup(void);
|
||||||
|
|
||||||
int mca_pml_ucx_add_procs(struct ompi_proc_t **procs, size_t nprocs);
|
int mca_pml_ucx_add_procs(struct ompi_proc_t **procs, size_t nprocs);
|
||||||
|
@ -94,7 +94,7 @@ mca_pml_ucx_component_init(int* priority, bool enable_progress_threads,
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if ( (ret = mca_pml_ucx_init()) != 0) {
|
if ( (ret = mca_pml_ucx_init(enable_mpi_threads)) != 0) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Загрузка…
Ссылка в новой задаче
Block a user