1
1

* Update usage of PtlHandleIsEqual to match new semantic

* Properly set message to MPI_MESSAGE_NULL in the right places
* Fix double free of buffer for non-contiguous blocking sends
* Remove useless debugging output

This commit was SVN r26604.
Этот коммит содержится в:
Brian Barrett 2012-06-14 22:24:23 +00:00
родитель 6760840ebb
Коммит 946ec4cd97
5 изменённых файлов: 17 добавлений и 15 удалений

Просмотреть файл

@ -386,13 +386,13 @@ ompi_mtl_portals4_component_init(bool enable_progress_threads,
return &ompi_mtl_portals4.base; return &ompi_mtl_portals4.base;
error: error:
if (PTL_OK != PtlHandleIsEqual(ompi_mtl_portals4.long_overflow_me_h, PTL_INVALID_HANDLE)) { if (!PtlHandleIsEqual(ompi_mtl_portals4.long_overflow_me_h, PTL_INVALID_HANDLE)) {
PtlMEUnlink(ompi_mtl_portals4.long_overflow_me_h); PtlMEUnlink(ompi_mtl_portals4.long_overflow_me_h);
} }
if (PTL_OK != PtlHandleIsEqual(ompi_mtl_portals4.zero_md_h, PTL_INVALID_HANDLE)) { if (!PtlHandleIsEqual(ompi_mtl_portals4.zero_md_h, PTL_INVALID_HANDLE)) {
PtlMDRelease(ompi_mtl_portals4.zero_md_h); PtlMDRelease(ompi_mtl_portals4.zero_md_h);
} }
if (PTL_OK != PtlHandleIsEqual(ompi_mtl_portals4.md_h, PTL_INVALID_HANDLE)) { if (!PtlHandleIsEqual(ompi_mtl_portals4.md_h, PTL_INVALID_HANDLE)) {
PtlMDRelease(ompi_mtl_portals4.md_h); PtlMDRelease(ompi_mtl_portals4.md_h);
} }
if (ompi_mtl_portals4.read_idx != (ptl_pt_index_t) ~0UL) { if (ompi_mtl_portals4.read_idx != (ptl_pt_index_t) ~0UL) {
@ -401,10 +401,10 @@ ompi_mtl_portals4_component_init(bool enable_progress_threads,
if (ompi_mtl_portals4.recv_idx != (ptl_pt_index_t) ~0UL) { if (ompi_mtl_portals4.recv_idx != (ptl_pt_index_t) ~0UL) {
PtlPTFree(ompi_mtl_portals4.ni_h, ompi_mtl_portals4.recv_idx); PtlPTFree(ompi_mtl_portals4.ni_h, ompi_mtl_portals4.recv_idx);
} }
if (PTL_OK != PtlHandleIsEqual(ompi_mtl_portals4.send_eq_h, PTL_INVALID_HANDLE)) { if (!PtlHandleIsEqual(ompi_mtl_portals4.send_eq_h, PTL_INVALID_HANDLE)) {
PtlEQFree(ompi_mtl_portals4.send_eq_h); PtlEQFree(ompi_mtl_portals4.send_eq_h);
} }
if (PTL_OK != PtlHandleIsEqual(ompi_mtl_portals4.recv_eq_h, PTL_INVALID_HANDLE)) { if (!PtlHandleIsEqual(ompi_mtl_portals4.recv_eq_h, PTL_INVALID_HANDLE)) {
PtlEQFree(ompi_mtl_portals4.recv_eq_h); PtlEQFree(ompi_mtl_portals4.recv_eq_h);
} }
return NULL; return NULL;

Просмотреть файл

@ -45,8 +45,6 @@ ompi_mtl_portals4_flowctl_init(void)
ompi_mtl_portals4.flowctl.max_send_slots = (ompi_mtl_portals4.send_queue_size - 3) / 3; ompi_mtl_portals4.flowctl.max_send_slots = (ompi_mtl_portals4.send_queue_size - 3) / 3;
ompi_mtl_portals4.flowctl.send_slots = ompi_mtl_portals4.flowctl.max_send_slots; ompi_mtl_portals4.flowctl.send_slots = ompi_mtl_portals4.flowctl.max_send_slots;
opal_output(ompi_mtl_base_output, "num send slots: %d", ompi_mtl_portals4.flowctl.max_send_slots);
ompi_mtl_portals4.flowctl.alert_req.type = portals4_req_flowctl; ompi_mtl_portals4.flowctl.alert_req.type = portals4_req_flowctl;
ompi_mtl_portals4.flowctl.alert_req.event_callback = flowctl_alert_callback; ompi_mtl_portals4.flowctl.alert_req.event_callback = flowctl_alert_callback;

Просмотреть файл

@ -207,6 +207,8 @@ ompi_mtl_portals4_improbe(struct mca_mtl_base_module_t *mtl,
*message = NULL; *message = NULL;
return OMPI_ERR_OUT_OF_RESOURCE; return OMPI_ERR_OUT_OF_RESOURCE;
} }
} else {
(*message) = MPI_MESSAGE_NULL;
} }
return OMPI_SUCCESS; return OMPI_SUCCESS;

Просмотреть файл

@ -456,5 +456,7 @@ ompi_mtl_portals4_imrecv(struct mca_mtl_base_module_t* mtl,
ptl_request->opcount, ptl_request->opcount,
(int)length, (unsigned long) ptl_request)); (int)length, (unsigned long) ptl_request));
(*message) = MPI_MESSAGE_NULL;
return ompi_mtl_portals4_recv_progress(&(ptl_message->ev), &ptl_request->super); return ompi_mtl_portals4_recv_progress(&(ptl_message->ev), &ptl_request->super);
} }

Просмотреть файл

@ -50,7 +50,7 @@ ompi_mtl_portals4_callback(ptl_event_t *ev,
"send %lu hit flow control", "send %lu hit flow control",
ptl_request->opcount)); ptl_request->opcount));
if (PTL_OK != PtlHandleIsEqual(ptl_request->me_h, PTL_INVALID_HANDLE)) { if (!PtlHandleIsEqual(ptl_request->me_h, PTL_INVALID_HANDLE)) {
ret = PtlMEUnlink(ptl_request->me_h); ret = PtlMEUnlink(ptl_request->me_h);
if (PTL_OK != ret) { if (PTL_OK != ret) {
opal_output_verbose(1, ompi_mtl_base_output, opal_output_verbose(1, ompi_mtl_base_output,
@ -83,7 +83,7 @@ ompi_mtl_portals4_callback(ptl_event_t *ev,
if ((PTL_EVENT_ACK == ev->type) && if ((PTL_EVENT_ACK == ev->type) &&
(PTL_PRIORITY_LIST == ev->ptl_list) && (PTL_PRIORITY_LIST == ev->ptl_list) &&
(eager == ompi_mtl_portals4.protocol) && (eager == ompi_mtl_portals4.protocol) &&
(!(PTL_OK != PtlHandleIsEqual(ptl_request->me_h, PTL_INVALID_HANDLE)))) { (!PtlHandleIsEqual(ptl_request->me_h, PTL_INVALID_HANDLE))) {
/* long expected messages with the eager protocol won't see a /* long expected messages with the eager protocol won't see a
get event to complete the message. Give them an extra get event to complete the message. Give them an extra
count to cause the message to complete with just the SEND count to cause the message to complete with just the SEND
@ -480,18 +480,18 @@ ompi_mtl_portals4_send(struct mca_mtl_base_module_t* mtl,
ret = ompi_mtl_portals4_send_start(mtl, comm, dest, tag, ret = ompi_mtl_portals4_send_start(mtl, comm, dest, tag,
convertor, mode, &ptl_request.super); convertor, mode, &ptl_request.super);
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) goto cleanup; if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
if (NULL != ptl_request.super.buffer_ptr) {
free(ptl_request.super.buffer_ptr);
}
return ret;
}
while (false == ptl_request.complete) { while (false == ptl_request.complete) {
ompi_mtl_portals4_progress(); ompi_mtl_portals4_progress();
} }
ret = ptl_request.retval; ret = ptl_request.retval;
cleanup:
if (NULL != ptl_request.super.buffer_ptr) {
free(ptl_request.super.buffer_ptr);
}
return ret; return ret;
} }