1
1

- Protect callers of opal/ompi_condition_wait from spurious wakeups,

possible when with building with pthreads.
   Compiled on Linux ia32 with and without
   --enable-progress-threads

This commit was SVN r8682.
Этот коммит содержится в:
Rainer Keller 2006-01-12 17:13:08 +00:00
родитель c0bad339af
Коммит 95f886b6ab
6 изменённых файлов: 53 добавлений и 41 удалений

Просмотреть файл

@ -133,6 +133,7 @@ OMPI_DECLSPEC int ompi_free_list_grow(ompi_free_list_t* flist, size_t num_elemen
while(NULL == item) { \
if((fl)->fl_max_to_alloc <= (fl)->fl_num_allocated) { \
(fl)->fl_num_waiting++; \
while ((fl)->fl_max_to_alloc <= (fl)->fl_num_allocated) \
opal_condition_wait(&((fl)->fl_condition), &((fl)->fl_lock)); \
(fl)->fl_num_waiting--; \
} else { \

Просмотреть файл

@ -66,7 +66,10 @@ int ompi_request_wait_any(
/* give up and sleep until completion */
OPAL_THREAD_LOCK(&ompi_request_lock);
ompi_request_waiting++;
do {
/*
* We will break out of while{} as soon as all requests have completed.
*/
while (1) {
rptr = requests;
num_requests_null_inactive = 0;
for (i = 0; i < count; i++, rptr++) {
@ -87,10 +90,10 @@ int ompi_request_wait_any(
}
if(num_requests_null_inactive == count)
break;
if (completed < 0) {
while (completed < 0) {
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
}
} while (completed < 0);
}
ompi_request_waiting--;
OPAL_THREAD_UNLOCK(&ompi_request_lock);

Просмотреть файл

@ -129,6 +129,7 @@ OMPI_DECLSPEC int opal_free_list_grow(opal_free_list_t* flist, size_t num_elemen
while(NULL == item) { \
if((fl)->fl_max_to_alloc <= (fl)->fl_num_allocated) { \
(fl)->fl_num_waiting++; \
while ((fl)->fl_max_to_alloc <= (fl)->fl_num_allocated) \
opal_condition_wait(&((fl)->fl_condition), &((fl)->fl_lock)); \
(fl)->fl_num_waiting--; \
} else { \

Просмотреть файл

@ -52,7 +52,10 @@ int orte_gpr_proxy_begin_compound_cmd(void)
if (orte_gpr_proxy_globals.compound_cmd_mode) {
orte_gpr_proxy_globals.compound_cmd_waiting++;
opal_condition_wait(&orte_gpr_proxy_globals.compound_cmd_condition, &orte_gpr_proxy_globals.wait_for_compound_mutex);
while (orte_gpr_proxy_globals.compound_cmd_mode) {
opal_condition_wait(&orte_gpr_proxy_globals.compound_cmd_condition,
&orte_gpr_proxy_globals.wait_for_compound_mutex);
}
orte_gpr_proxy_globals.compound_cmd_waiting--;
}

Просмотреть файл

@ -105,7 +105,10 @@ int orte_iof_base_flush(void)
}
if(pending != 0) {
if(opal_event_progress_thread() == false) {
opal_condition_wait(&orte_iof_base.iof_condition, &orte_iof_base.iof_lock);
while (opal_event_progress_thread() == false) {
opal_condition_wait(&orte_iof_base.iof_condition,
&orte_iof_base.iof_lock);
}
} else {
OPAL_THREAD_UNLOCK(&orte_iof_base.iof_lock);
opal_event_loop(OPAL_EVLOOP_ONCE);

Просмотреть файл

@ -889,9 +889,10 @@ int orte_pls_rsh_launch(orte_jobid_t jobid)
rsh_daemon_info_t *daemon_info;
OPAL_THREAD_LOCK(&mca_pls_rsh_component.lock);
if (mca_pls_rsh_component.num_children++ >=
while (mca_pls_rsh_component.num_children++ >=
mca_pls_rsh_component.num_concurrent) {
opal_condition_wait(&mca_pls_rsh_component.cond, &mca_pls_rsh_component.lock);
opal_condition_wait(&mca_pls_rsh_component.cond,
&mca_pls_rsh_component.lock);
}
OPAL_THREAD_UNLOCK(&mca_pls_rsh_component.lock);