diff --git a/ompi/mca/pml/yalla/pml_yalla.c b/ompi/mca/pml/yalla/pml_yalla.c index 436519ef2a..6cbd62cf1b 100644 --- a/ompi/mca/pml/yalla/pml_yalla.c +++ b/ompi/mca/pml/yalla/pml_yalla.c @@ -66,14 +66,14 @@ static int send_ep_address(void) address = alloca(addrlen); error = mxm_ep_get_address(ompi_pml_yalla.mxm_ep, address, &addrlen); if (MXM_OK != error) { - PML_YALLA_ERROR("Failed to get EP address"); + PML_YALLA_ERROR("%s", "Failed to get EP address"); return OMPI_ERROR; } OPAL_MODEX_SEND(rc, OPAL_PMIX_GLOBAL, &mca_pml_yalla_component.pmlm_version, address, addrlen); if (OMPI_SUCCESS != rc) { - PML_YALLA_ERROR("Open MPI couldn't distribute EP connection details"); + PML_YALLA_ERROR("%s", "Open MPI couldn't distribute EP connection details"); return OMPI_ERROR; } @@ -87,7 +87,7 @@ static int recv_ep_address(ompi_proc_t *proc, void **address_p, size_t *addrlen_ OPAL_MODEX_RECV(rc, &mca_pml_yalla_component.pmlm_version, &proc->super.proc_name, address_p, addrlen_p); if (rc < 0) { - PML_YALLA_ERROR("Failed to receive EP address"); + PML_YALLA_ERROR("%s", "Failed to receive EP address"); } return rc; } @@ -103,18 +103,18 @@ int mca_pml_yalla_open(void) { mxm_error_t error; - PML_YALLA_VERBOSE(1, "mca_pml_yalla_open"); + PML_YALLA_VERBOSE(1, "%s", "mca_pml_yalla_open"); /* Set memory hooks */ if ((OPAL_MEMORY_FREE_SUPPORT | OPAL_MEMORY_MUNMAP_SUPPORT) == ((OPAL_MEMORY_FREE_SUPPORT | OPAL_MEMORY_MUNMAP_SUPPORT) & opal_mem_hooks_support_level())) { - PML_YALLA_VERBOSE(1, "enabling on-demand memory mapping"); + PML_YALLA_VERBOSE(1, "%s", "enabling on-demand memory mapping"); opal_setenv("MXM_MPI_MEM_ON_DEMAND_MAP", "y", false, &environ); ompi_pml_yalla.using_mem_hooks = 1; } else { - PML_YALLA_VERBOSE(1, "disabling on-demand memory mapping"); + PML_YALLA_VERBOSE(1, "%s", "disabling on-demand memory mapping"); ompi_pml_yalla.using_mem_hooks = 0; } opal_setenv("MXM_MPI_SINGLE_THREAD", ompi_mpi_thread_multiple ? "n" : "y", @@ -137,7 +137,7 @@ int mca_pml_yalla_open(void) int mca_pml_yalla_close(void) { - PML_YALLA_VERBOSE(1, "mca_pml_yalla_close"); + PML_YALLA_VERBOSE(1, "%s", "mca_pml_yalla_close"); if (ompi_pml_yalla.ctx_opts != NULL) { mxm_config_free_context_opts(ompi_pml_yalla.ctx_opts); @@ -157,7 +157,7 @@ int mca_pml_yalla_init(void) mxm_error_t error; int rc; - PML_YALLA_VERBOSE(1, "mca_pml_yalla_init"); + PML_YALLA_VERBOSE(1, "%s", "mca_pml_yalla_init"); if (ompi_pml_yalla.using_mem_hooks) { opal_mem_hooks_register_release(mca_pml_yalla_mem_release_cb, NULL); @@ -188,7 +188,7 @@ int mca_pml_yalla_init(void) int mca_pml_yalla_cleanup(void) { - PML_YALLA_VERBOSE(1, "mca_pml_yalla_cleanup"); + PML_YALLA_VERBOSE(1, "%s", "mca_pml_yalla_cleanup"); opal_progress_unregister(mca_pml_yalla_progress); @@ -241,7 +241,7 @@ int mca_pml_yalla_add_procs(struct ompi_proc_t **procs, size_t nprocs) free(address); if (MXM_OK != error) { - PML_YALLA_ERROR("Failed to connect"); + PML_YALLA_ERROR("%s", "Failed to connect"); return OMPI_ERROR; } @@ -256,7 +256,7 @@ int mca_pml_yalla_del_procs(struct ompi_proc_t **procs, size_t nprocs) size_t i; if (ompi_mpi_finalized) { - PML_YALLA_VERBOSE(3, "using bulk powerdown"); + PML_YALLA_VERBOSE(3, "%s", "using bulk powerdown"); mxm_ep_powerdown(ompi_pml_yalla.mxm_ep); } @@ -303,7 +303,7 @@ int mca_pml_yalla_del_comm(struct ompi_communicator_t* comm) mxm_mq_h mq = (void*)comm->c_pml_comm; if (ompi_pml_yalla.mxm_context == NULL) { - PML_YALLA_ERROR("Destroying communicator after MXM context is destroyed"); + PML_YALLA_ERROR("%s", "Destroying communicator after MXM context is destroyed"); return OMPI_ERROR; } @@ -390,7 +390,7 @@ int mca_pml_yalla_isend_init(const void *buf, size_t count, ompi_datatype_t *dat { mca_pml_yalla_send_request_t *sreq; - sreq = MCA_PML_YALLA_SREQ_INIT(buf, count, datatype, dst, tag, mode, comm, + sreq = MCA_PML_YALLA_SREQ_INIT((void *)buf, count, datatype, dst, tag, mode, comm, OMPI_REQUEST_INACTIVE); sreq->super.ompi.req_persistent = true; sreq->super.flags = MCA_PML_YALLA_REQUEST_FLAG_SEND; @@ -459,7 +459,7 @@ int mca_pml_yalla_isend(const void *buf, size_t count, ompi_datatype_t *datatype mxm_error_t error; int rc; - sreq = MCA_PML_YALLA_SREQ_INIT(buf, count, datatype, dst, tag, mode, comm, + sreq = MCA_PML_YALLA_SREQ_INIT((void *)buf, count, datatype, dst, tag, mode, comm, OMPI_REQUEST_ACTIVE); sreq->super.ompi.req_persistent = false; sreq->super.flags = 0; @@ -493,7 +493,7 @@ int mca_pml_yalla_send(const void *buf, size_t count, ompi_datatype_t *datatype, mxm_send_req_t sreq; mxm_error_t error; - PML_YALLA_INIT_MXM_SEND_REQ(&sreq, buf, count, datatype, dst, tag, mode, comm, send); + PML_YALLA_INIT_MXM_SEND_REQ(&sreq, (void *)buf, count, datatype, dst, tag, mode, comm, send); PML_YALLA_INIT_BLOCKING_MXM_SEND_REQ(&sreq); PML_YALLA_VERBOSE(8, "send to %d tag %d dtype %s count %zu", dst, tag, diff --git a/ompi/mca/pml/yalla/pml_yalla_request.h b/ompi/mca/pml/yalla/pml_yalla_request.h index 7b96024e2f..0ccc026c0c 100644 --- a/ompi/mca/pml/yalla/pml_yalla_request.h +++ b/ompi/mca/pml/yalla/pml_yalla_request.h @@ -25,7 +25,15 @@ struct pml_yalla_base_request { ompi_request_t ompi; mca_pml_yalla_convertor_t *convertor; int flags; - mxm_req_base_t mxm_base[0]; /* overlaps with base of send/recv */ + /* overlaps with base of send/recv + * In ISO C90, you would have to give contents a length of 1, + * which means either you waste space or complicate the argument to malloc. + * Note: + * - 1 was the portable way to go, though it was rather strange + * - 0 was better at indicating intent, but not legal as far as + * the Standard was concerned and supported as an extension by some compilers (including gcc) + */ + mxm_req_base_t mxm_base[1]; }; struct pml_yalla_send_request { @@ -126,28 +134,26 @@ void mca_pml_yalla_init_reqs(void); } \ } -#define MCA_PML_YALLA_RREQ_INIT(_buf, _count, _datatype, _src, _tag, _comm, _state) \ - ({ \ - mca_pml_yalla_recv_request_t *rreq = (mca_pml_yalla_recv_request_t *)PML_YALLA_FREELIST_GET(&ompi_pml_yalla.recv_reqs); \ - \ - PML_YALLA_INIT_OMPI_REQ(&rreq->super.ompi, _comm, _state); \ - PML_YALLA_INIT_MXM_RECV_REQ(&rreq->mxm, _buf, _count, _datatype, _src, _tag, \ - _comm, irecv, rreq); \ - rreq; \ - }) +static inline mca_pml_yalla_recv_request_t* MCA_PML_YALLA_RREQ_INIT(void *_buf, size_t _count, ompi_datatype_t *_datatype, + int _src, int _tag, struct ompi_communicator_t* _comm, int _state) +{ + mca_pml_yalla_recv_request_t *rreq = (mca_pml_yalla_recv_request_t *)PML_YALLA_FREELIST_GET(&ompi_pml_yalla.recv_reqs); + PML_YALLA_INIT_OMPI_REQ(&rreq->super.ompi, _comm, _state); + PML_YALLA_INIT_MXM_RECV_REQ(&rreq->mxm, _buf, _count, _datatype, _src, _tag, _comm, irecv, rreq); + return rreq; +} -#define MCA_PML_YALLA_SREQ_INIT(_buf, _count, _datatype, _dst, _tag, _mode, _comm, _state) \ - ({ \ - mca_pml_yalla_send_request_t *sreq = (mca_pml_yalla_send_request_t *)PML_YALLA_FREELIST_GET(&ompi_pml_yalla.send_reqs); \ - \ - PML_YALLA_INIT_OMPI_REQ(&sreq->super.ompi, _comm, _state); \ - PML_YALLA_INIT_MXM_SEND_REQ(&sreq->mxm, _buf, _count, _datatype, _dst, _tag, \ - mode, _comm, isend, sreq); \ - sreq->super.ompi.req_status.MPI_TAG = _tag; \ - sreq->super.ompi.req_status.MPI_SOURCE = (_comm)->c_my_rank; \ - sreq->super.ompi.req_status._ucount = _count; \ - sreq; \ - }) +static inline mca_pml_yalla_send_request_t* MCA_PML_YALLA_SREQ_INIT(void *_buf, size_t _count, ompi_datatype_t *_datatype, + int _dst, int _tag, mca_pml_base_send_mode_t _mode, struct ompi_communicator_t* _comm, int _state) +{ + mca_pml_yalla_send_request_t *sreq = (mca_pml_yalla_send_request_t *)PML_YALLA_FREELIST_GET(&ompi_pml_yalla.send_reqs); + PML_YALLA_INIT_OMPI_REQ(&sreq->super.ompi, _comm, _state); + PML_YALLA_INIT_MXM_SEND_REQ(&sreq->mxm, _buf, _count, _datatype, _dst, _tag, _mode, _comm, isend, sreq); + sreq->super.ompi.req_status.MPI_TAG = _tag; + sreq->super.ompi.req_status.MPI_SOURCE = (_comm)->c_my_rank; + sreq->super.ompi.req_status._ucount = _count; + return sreq; +} #define PML_YALLA_INIT_MXM_PROBE_REQ(_rreq, _rank, _tag, _comm) \ { \