1
1

* No need for waiting until exposure epochs are over in order to complete

a WIN_FREE
  * Fix race condition in threaded builds with pending unlocks and
    finishing an epoch
  * Fix memory leak due to use of OBJ_DESTRUCT instead of OBJ_RELEASE
  * Fix race condition between releasing multiple shared locks and
    starting a new lock
  * Need to incremement the shared count if starting a new shared
    lock once an exclusive lock finishes

This commit was SVN r15185.
Этот коммит содержится в:
Brian Barrett 2007-06-24 22:36:00 +00:00
родитель 84f102c343
Коммит 2ed0548da8
2 изменённых файлов: 68 добавлений и 28 удалений

Просмотреть файл

@ -40,12 +40,6 @@ ompi_osc_pt2pt_module_free(ompi_win_t *win)
"pt2pt component destroying window with id %d",
module->p2p_comm->c_contextid);
OPAL_THREAD_LOCK(&module->p2p_lock);
while (OMPI_WIN_EXPOSE_EPOCH & ompi_win_get_mode(win)) {
opal_condition_wait(&module->p2p_cond, &module->p2p_lock);
}
OPAL_THREAD_UNLOCK(&module->p2p_lock);
/* finish with a barrier */
if (ompi_group_size(win->w_group) > 1) {
ret = module->p2p_comm->c_coll.coll_barrier(module->p2p_comm);

Просмотреть файл

@ -419,8 +419,9 @@ ompi_osc_pt2pt_module_lock(int lock_type,
ompi_win_append_mode(win, OMPI_WIN_ACCESS_EPOCH | OMPI_WIN_LOCK_ACCESS);
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d sending lock request to %d",
module->p2p_comm->c_my_rank, target));
"%d: sending lock request to %d",
module->p2p_comm->c_my_rank,
target));
/* generate a lock request */
ompi_osc_pt2pt_control_send(module,
proc,
@ -465,8 +466,9 @@ ompi_osc_pt2pt_module_unlock(int target,
/* send the unlock request */
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d sending unlock request to %d",
module->p2p_comm->c_my_rank, target));
"%d: sending unlock request to %d with %d requests",
module->p2p_comm->c_my_rank, target,
out_count));
ompi_osc_pt2pt_control_send(module,
proc,
OMPI_OSC_PT2PT_HDR_UNLOCK_REQ,
@ -495,6 +497,10 @@ ompi_osc_pt2pt_module_unlock(int target,
}
OPAL_THREAD_UNLOCK(&module->p2p_lock);
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d: finished unlock to %d",
module->p2p_comm->c_my_rank, target));
/* set our mode on the window */
ompi_win_remove_mode(win, OMPI_WIN_ACCESS_EPOCH | OMPI_WIN_LOCK_ACCESS);
@ -516,11 +522,14 @@ ompi_osc_pt2pt_passive_lock(ompi_osc_pt2pt_module_t *module,
if (lock_type == MPI_LOCK_EXCLUSIVE) {
if (module->p2p_lock_status == 0) {
module->p2p_lock_status = MPI_LOCK_EXCLUSIVE;
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d: setting lock status to EXCLUSIVE (from %d)",
module->p2p_comm->c_my_rank, origin));
ompi_win_append_mode(module->p2p_win, OMPI_WIN_EXPOSE_EPOCH);
send_ack = true;
} else {
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d queuing lock request from %d (%d)",
"%d: queuing lock request from %d (type=%d)",
module->p2p_comm->c_my_rank, origin, lock_type));
new_pending = OBJ_NEW(ompi_osc_pt2pt_pending_lock_t);
new_pending->proc = proc;
@ -531,11 +540,14 @@ ompi_osc_pt2pt_passive_lock(ompi_osc_pt2pt_module_t *module,
if (module->p2p_lock_status != MPI_LOCK_EXCLUSIVE) {
module->p2p_lock_status = MPI_LOCK_SHARED;
module->p2p_shared_count++;
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d: setting lock status to SHARED (from %d), count %d",
module->p2p_comm->c_my_rank, origin, module->p2p_shared_count));
ompi_win_append_mode(module->p2p_win, OMPI_WIN_EXPOSE_EPOCH);
send_ack = true;
} else {
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"queuing lock request from %d (%d) lock_type:%d",
"%d: queuing lock request from %d (type=%d)",
module->p2p_comm->c_my_rank, origin, lock_type));
new_pending = OBJ_NEW(ompi_osc_pt2pt_pending_lock_t);
new_pending->proc = proc;
@ -549,7 +561,7 @@ ompi_osc_pt2pt_passive_lock(ompi_osc_pt2pt_module_t *module,
if (send_ack) {
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d sending lock ack to %d",
"%d: sending lock ack to %d",
module->p2p_comm->c_my_rank, origin));
ompi_osc_pt2pt_control_send(module, proc,
OMPI_OSC_PT2PT_HDR_LOCK_REQ,
@ -572,7 +584,8 @@ ompi_osc_pt2pt_passive_unlock(ompi_osc_pt2pt_module_t *module,
assert(module->p2p_lock_status != 0);
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"received unlock request from %d with %d requests\n",
"%d: received unlock request from %d with %d requests\n",
module->p2p_comm->c_my_rank,
origin, count));
new_pending = OBJ_NEW(ompi_osc_pt2pt_pending_lock_t);
@ -591,51 +604,84 @@ int
ompi_osc_pt2pt_passive_unlock_complete(ompi_osc_pt2pt_module_t *module)
{
ompi_osc_pt2pt_pending_lock_t *new_pending = NULL;
opal_list_t copy_unlock_acks;
if (module->p2p_num_pending_in != 0) return OMPI_SUCCESS;
OPAL_THREAD_LOCK(&(module->p2p_lock));
OPAL_THREAD_LOCK(&(module->p2p_lock));
if (module->p2p_num_pending_in != 0) {
OPAL_THREAD_UNLOCK(&module->p2p_lock);
return OMPI_SUCCESS;
}
if (module->p2p_lock_status == MPI_LOCK_EXCLUSIVE) {
ompi_win_remove_mode(module->p2p_win, OMPI_WIN_EXPOSE_EPOCH);
module->p2p_lock_status = 0;
} else {
module->p2p_shared_count -= opal_list_get_size(&module->p2p_unlocks_pending);
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d: decrementing shared count to %d",
module->p2p_comm->c_my_rank, module->p2p_shared_count));
if (module->p2p_shared_count == 0) {
ompi_win_remove_mode(module->p2p_win, OMPI_WIN_EXPOSE_EPOCH);
module->p2p_lock_status = 0;
}
}
OBJ_CONSTRUCT(&copy_unlock_acks, opal_list_t);
/* copy over any unlocks that have been satisfied (possibly
multiple if SHARED) */
opal_list_join(&copy_unlock_acks,
opal_list_get_end(&copy_unlock_acks),
&module->p2p_unlocks_pending);
OPAL_THREAD_UNLOCK(&module->p2p_lock);
/* issue whichever unlock acks we should issue */
while (NULL != (new_pending = (ompi_osc_pt2pt_pending_lock_t*)
opal_list_remove_first(&module->p2p_unlocks_pending))) {
opal_list_remove_first(&copy_unlock_acks))) {
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"sending unlock reply to proc"));
"%d: sending unlock ack to proc %d",
module->p2p_comm->c_my_rank,
new_pending->proc->proc_name.vpid));
ompi_osc_pt2pt_control_send(module,
new_pending->proc,
OMPI_OSC_PT2PT_HDR_UNLOCK_REPLY,
OMPI_SUCCESS, OMPI_SUCCESS);
OBJ_DESTRUCT(new_pending);
OBJ_RELEASE(new_pending);
}
/* if we were really unlocked, see if we have more to process */
new_pending = (ompi_osc_pt2pt_pending_lock_t*)
opal_list_remove_first(&(module->p2p_locks_pending));
OBJ_DESTRUCT(&copy_unlock_acks);
/* if we were really unlocked, see if we have another lock request
we can satisfy */
OPAL_THREAD_LOCK(&module->p2p_lock);
if (0 == module->p2p_lock_status) {
new_pending = (ompi_osc_pt2pt_pending_lock_t*)
opal_list_remove_first(&(module->p2p_locks_pending));
if (NULL != new_pending) {
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d: sending lock ack to proc %d",
module->p2p_comm->c_my_rank,
new_pending->proc->proc_name.vpid));
ompi_win_append_mode(module->p2p_win, OMPI_WIN_EXPOSE_EPOCH);
/* set lock state and generate a lock request */
module->p2p_lock_status = new_pending->lock_type;
if (MPI_LOCK_SHARED == new_pending->lock_type) {
module->p2p_shared_count++;
}
}
} else {
new_pending = NULL;
}
OPAL_THREAD_UNLOCK(&(module->p2p_lock));
if (NULL != new_pending) {
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"sending unlock ack to proc %d",
new_pending->proc->proc_name.vpid));
ompi_win_append_mode(module->p2p_win, OMPI_WIN_EXPOSE_EPOCH);
/* set lock state and generate a lock request */
module->p2p_lock_status = new_pending->lock_type;
ompi_osc_pt2pt_control_send(module,
new_pending->proc,
OMPI_OSC_PT2PT_HDR_LOCK_REQ,
module->p2p_comm->c_my_rank,
OMPI_SUCCESS);
OBJ_DESTRUCT(new_pending);
OBJ_RELEASE(new_pending);
}
return OMPI_SUCCESS;