1
1

PML/UCX: blocked calls optimizations

- added UCX progress priority

Signed-off-by: Sergey Oblomov <sergeyo@mellanox.com>
Этот коммит содержится в:
Sergey Oblomov 2018-08-21 13:22:18 +03:00
родитель 1665b8db8f
Коммит b0f87f2235
2 изменённых файлов: 61 добавлений и 56 удалений

Просмотреть файл

@ -482,6 +482,7 @@ int mca_pml_ucx_recv(void *buf, size_t count, ompi_datatype_t *datatype, int src
ucp_tag_recv_info_t info;
ucs_status_t status;
void *req;
int i;
PML_UCX_TRACE_RECV("%s", buf, count, datatype, src, tag, comm, "recv");
@ -492,11 +493,14 @@ int mca_pml_ucx_recv(void *buf, size_t count, ompi_datatype_t *datatype, int src
mca_pml_ucx_get_datatype(datatype),
ucp_tag, ucp_tag_mask, req);
for (;;) {
status = ucp_request_test(req, &info);
if (status != UCS_INPROGRESS) {
mca_pml_ucx_set_recv_status_safe(mpi_status, status, &info);
return OMPI_SUCCESS;
while (1) {
for (i = 0; i < opal_common_ucx.progress_iterations; i++) {
status = ucp_request_test(req, &info);
if (status != UCS_INPROGRESS) {
mca_pml_ucx_set_recv_status_safe(mpi_status, status, &info);
return OMPI_SUCCESS;
}
ucp_worker_progress(ompi_pml_ucx.ucp_worker);
}
opal_progress();
}
@ -685,16 +689,13 @@ mca_pml_ucx_send_nb(ucp_ep_h ep, const void *buf, size_t count,
req = (ompi_request_t*)mca_pml_ucx_common_send(ep, buf, count, datatype,
mca_pml_ucx_get_datatype(datatype),
tag, mode,
mca_pml_ucx_send_completion);
tag, mode, cb);
if (OPAL_LIKELY(req == NULL)) {
return OMPI_SUCCESS;
} else if (!UCS_PTR_IS_ERR(req)) {
PML_UCX_VERBOSE(8, "got request %p", (void*)req);
ucp_worker_progress(ompi_pml_ucx.ucp_worker);
ompi_request_wait(&req, MPI_STATUS_IGNORE);
return OMPI_SUCCESS;
MCA_COMMON_UCX_WAIT_LOOP(req, ompi_pml_ucx.ucp_worker, "ucx send", ompi_request_free(&req));
} else {
PML_UCX_ERROR("ucx send failed: %s", ucs_status_string(UCS_PTR_STATUS(req)));
return OMPI_ERROR;
@ -707,7 +708,7 @@ mca_pml_ucx_send_nbr(ucp_ep_h ep, const void *buf, size_t count,
ucp_datatype_t ucx_datatype, ucp_tag_t tag)
{
void *req;
ucs_status_ptr_t req;
ucs_status_t status;
/* coverity[bad_alloc_arithmetic] */
@ -717,12 +718,7 @@ mca_pml_ucx_send_nbr(ucp_ep_h ep, const void *buf, size_t count,
return OMPI_SUCCESS;
}
ucp_worker_progress(ompi_pml_ucx.ucp_worker);
while ((status = ucp_request_check_status(req)) == UCS_INPROGRESS) {
opal_progress();
}
return OPAL_LIKELY(UCS_OK == status) ? OMPI_SUCCESS : OMPI_ERROR;
MCA_COMMON_UCX_WAIT_LOOP(req, ompi_pml_ucx.ucp_worker, "ucx send", (void)0);
}
#endif
@ -783,18 +779,21 @@ int mca_pml_ucx_probe(int src, int tag, struct ompi_communicator_t* comm,
ucp_tag_t ucp_tag, ucp_tag_mask;
ucp_tag_recv_info_t info;
ucp_tag_message_h ucp_msg;
int i;
PML_UCX_TRACE_PROBE("probe", src, tag, comm);
PML_UCX_MAKE_RECV_TAG(ucp_tag, ucp_tag_mask, tag, src, comm);
for (;;) {
ucp_msg = ucp_tag_probe_nb(ompi_pml_ucx.ucp_worker, ucp_tag, ucp_tag_mask,
0, &info);
if (ucp_msg != NULL) {
mca_pml_ucx_set_recv_status_safe(mpi_status, UCS_OK, &info);
return OMPI_SUCCESS;
while (1) {
for (i = 0; i < opal_common_ucx.progress_iterations; i++) {
ucp_msg = ucp_tag_probe_nb(ompi_pml_ucx.ucp_worker, ucp_tag, ucp_tag_mask,
0, &info);
if (ucp_msg != NULL) {
mca_pml_ucx_set_recv_status_safe(mpi_status, UCS_OK, &info);
return OMPI_SUCCESS;
}
ucp_worker_progress(ompi_pml_ucx.ucp_worker);
}
opal_progress();
}
}

Просмотреть файл

@ -52,6 +52,31 @@ BEGIN_C_DECLS
__VA_ARGS__); \
}
#define MCA_COMMON_UCX_WAIT_LOOP(_request, _worker, _msg, _completed) \
while (1) { \
ucs_status_t status; \
int i; \
/* call UCX progress */ \
for (i = 0; i < opal_common_ucx.progress_iterations; i++) { \
if (UCS_INPROGRESS != (status = opal_common_ucx_request_status(_request))) { \
_completed; \
if (OPAL_LIKELY(UCS_OK == status)) { \
return OPAL_SUCCESS; \
} else { \
MCA_COMMON_UCX_VERBOSE(1, "%s failed: %d, %s", \
(_msg) ? (_msg) : __FUNCTION__, \
UCS_PTR_STATUS(_request), \
ucs_status_string(UCS_PTR_STATUS(_request))); \
return OPAL_ERROR; \
} \
} \
ucp_worker_progress(_worker); \
} \
/* call OPAL progress on every opal_common_ucx_progress_iterations */ \
/* calls to UCX progress */ \
opal_progress(); \
}
typedef struct opal_common_ucx_module {
int output;
int verbose;
@ -74,16 +99,22 @@ OPAL_DECLSPEC int opal_common_ucx_mca_pmix_fence(ucp_worker_h worker);
OPAL_DECLSPEC int opal_common_ucx_del_procs(opal_common_ucx_del_proc_t *procs, size_t count,
size_t my_rank, size_t max_disconnect, ucp_worker_h worker);
static inline
ucs_status_t opal_common_ucx_request_status(ucs_status_ptr_t request)
{
#if !HAVE_DECL_UCP_REQUEST_CHECK_STATUS
ucp_tag_recv_info_t info;
return ucp_request_test(request, &info);
#else
return ucp_request_check_status(request);
#endif
}
static inline
int opal_common_ucx_wait_request(ucs_status_ptr_t request, ucp_worker_h worker,
const char *msg)
{
ucs_status_t status;
int i;
#if !HAVE_DECL_UCP_REQUEST_CHECK_STATUS
ucp_tag_recv_info_t info;
#endif
/* check for request completed or failed */
if (OPAL_LIKELY(UCS_OK == request)) {
return OPAL_SUCCESS;
@ -94,32 +125,7 @@ int opal_common_ucx_wait_request(ucs_status_ptr_t request, ucp_worker_h worker,
return OPAL_ERROR;
}
while (1) {
/* call UCX progress */
for (i = 0; i < opal_common_ucx.progress_iterations; i++) {
if (UCS_INPROGRESS != (status =
#if HAVE_DECL_UCP_REQUEST_CHECK_STATUS
ucp_request_check_status(request)
#else
ucp_request_test(request, &info)
#endif
)) {
ucp_request_free(request);
if (OPAL_LIKELY(UCS_OK == status)) {
return OPAL_SUCCESS;
} else {
MCA_COMMON_UCX_VERBOSE(1, "%s failed: %d, %s", msg ? msg : __func__,
UCS_PTR_STATUS(request),
ucs_status_string(UCS_PTR_STATUS(request)));
return OPAL_ERROR;
}
}
ucp_worker_progress(worker);
}
/* call OPAL progress on every opal_common_ucx_progress_iterations
* calls to UCX progress */
opal_progress();
}
MCA_COMMON_UCX_WAIT_LOOP(request, worker, msg, ucp_request_free(request));
}
static inline