1
1

MCA/COMMON/UCX: minor unification of del_proces calls

- some common functionality of del_procs calls is moved into
  mca_common module
- blocking ucp_put call is replaced by non-blocking routine

Signed-off-by: Sergey Oblomov <sergeyo@mellanox.com>
Этот коммит содержится в:
Sergey Oblomov 2018-07-02 15:06:55 +03:00
родитель 48bbe7af04
Коммит c2bd6af9f2
4 изменённых файлов: 29 добавлений и 24 удалений

Просмотреть файл

@ -18,6 +18,7 @@
#include "opal/mca/pmix/pmix.h"
#include "ompi/message/message.h"
#include "ompi/mca/pml/base/pml_base_bsend.h"
#include "opal/mca/common/ucx/common_ucx.h"
#include "pml_ucx_request.h"
#include <inttypes.h>
@ -374,29 +375,19 @@ static void mca_pml_ucx_waitall(void **reqs, int *count_p)
PML_UCX_VERBOSE(2, "waiting for %d disconnect requests", *count_p);
for (i = 0; i < *count_p; ++i) {
do {
opal_progress();
status = ucp_request_test(reqs[i], NULL);
} while (status == UCS_INPROGRESS);
status = opal_common_ucx_wait_request(reqs[i], ompi_pml_ucx.ucp_worker);
if (status != UCS_OK) {
PML_UCX_ERROR("disconnect request failed: %s",
ucs_status_string(status));
}
ucp_request_free(reqs[i]);
reqs[i] = NULL;
}
*count_p = 0;
}
static void mca_pml_fence_complete_cb(int status, void *fenced)
{
*(int*)fenced = 1;
}
int mca_pml_ucx_del_procs(struct ompi_proc_t **procs, size_t nprocs)
{
volatile int fenced = 0;
ompi_proc_t *proc;
int num_reqs;
size_t max_reqs;
@ -447,10 +438,7 @@ int mca_pml_ucx_del_procs(struct ompi_proc_t **procs, size_t nprocs)
mca_pml_ucx_waitall(dreqs, &num_reqs);
free(dreqs);
opal_pmix.fence_nb(NULL, 0, mca_pml_fence_complete_cb, (void*)&fenced);
while (!fenced) {
ucp_worker_progress(ompi_pml_ucx.ucp_worker);
}
opal_common_ucx_mca_pmix_fence(ompi_pml_ucx.ucp_worker);
return OMPI_SUCCESS;
}

Просмотреть файл

@ -11,6 +11,7 @@
#include "common_ucx.h"
#include "opal/mca/base/mca_base_var.h"
#include "opal/mca/pmix/pmix.h"
/***********************************************************************/
@ -36,3 +37,19 @@ OPAL_DECLSPEC void opal_common_ucx_mca_register(void)
void opal_common_ucx_empty_complete_cb(void *request, ucs_status_t status)
{
}
static void opal_common_ucx_mca_fence_complete_cb(int status, void *fenced)
{
*(int*)fenced = 1;
}
OPAL_DECLSPEC void opal_common_ucx_mca_pmix_fence(ucp_worker_h worker)
{
volatile int fenced = 0;
opal_pmix.fence_nb(NULL, 0, opal_common_ucx_mca_fence_complete_cb, (void*)&fenced);
while (!fenced) {
ucp_worker_progress(worker);
}
}

Просмотреть файл

@ -27,6 +27,7 @@ extern int opal_common_ucx_progress_iterations;
OPAL_DECLSPEC void opal_common_ucx_mca_register(void);
OPAL_DECLSPEC void opal_common_ucx_empty_complete_cb(void *request, ucs_status_t status);
OPAL_DECLSPEC void opal_common_ucx_mca_pmix_fence(ucp_worker_h worker);
static inline
ucs_status_t opal_common_ucx_wait_request(ucs_status_ptr_t request, ucp_worker_h worker)

Просмотреть файл

@ -103,15 +103,11 @@ static void mca_spml_ucx_waitall(void **reqs, int *count_p)
SPML_VERBOSE(10, "waiting for %d disconnect requests", *count_p);
for (i = 0; i < *count_p; ++i) {
do {
opal_progress();
status = ucp_request_test(reqs[i], NULL);
} while (status == UCS_INPROGRESS);
status = opal_common_ucx_wait_request(reqs[i], mca_spml_ucx.ucp_worker);
if (status != UCS_OK) {
SPML_ERROR("disconnect request failed: %s",
ucs_status_string(status));
}
ucp_request_release(reqs[i]);
reqs[i] = NULL;
}
@ -175,8 +171,9 @@ int mca_spml_ucx_del_procs(ompi_proc_t** procs, size_t nprocs)
mca_spml_ucx_waitall(dreqs, &num_reqs);
free(dreqs);
opal_pmix.fence(NULL, 0);
opal_common_ucx_mca_pmix_fence(mca_spml_ucx.ucp_worker);
free(mca_spml_ucx.ucp_peers);
mca_spml_ucx.ucp_peers = NULL;
return OSHMEM_SUCCESS;
}
@ -585,12 +582,14 @@ int mca_spml_ucx_put(void* dst_addr, size_t size, void* src_addr, int dst)
{
void *rva;
ucs_status_t status;
ucs_status_ptr_t request;
spml_ucx_mkey_t *ucx_mkey;
ucx_mkey = mca_spml_ucx_get_mkey(dst, dst_addr, &rva, &mca_spml_ucx);
status = ucp_put(mca_spml_ucx.ucp_peers[dst].ucp_conn, src_addr, size,
(uint64_t)rva, ucx_mkey->rkey);
request = ucp_put_nb(mca_spml_ucx.ucp_peers[dst].ucp_conn, src_addr, size,
(uint64_t)rva, ucx_mkey->rkey, opal_common_ucx_empty_complete_cb);
/* TODO: replace wait_request by opal_common_ucx_wait_request_opal_status */
status = opal_common_ucx_wait_request(request, mca_spml_ucx.ucp_worker);
return ucx_status_to_oshmem(status);
}