From 22af95b266e54b8d276a88d6c8b55dd532b04158 Mon Sep 17 00:00:00 2001 From: Nathan Hjelm Date: Mon, 30 Nov 2015 16:37:09 -0700 Subject: [PATCH] ompi/proc: make proc system always thread safe This commit changes the OPAL_THREAD_LOCK/OPAL_THREAD_UNLOCK calls in ompi/proc to opal_mutex_lock/opal_mutex_unlock. This will allow multi-threaded BTLs the ability to creat ompi_proc_t's without having to set opal_using_threads. There should be no performance hits as none of the lock points are in the critical path. Signed-off-by: Nathan Hjelm --- ompi/proc/proc.c | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/ompi/proc/proc.c b/ompi/proc/proc.c index 87a36779a0..594f771903 100644 --- a/ompi/proc/proc.c +++ b/ompi/proc/proc.c @@ -84,10 +84,10 @@ void ompi_proc_destruct(ompi_proc_t* proc) if (NULL != proc->super.proc_hostname) { free(proc->super.proc_hostname); } - OPAL_THREAD_LOCK(&ompi_proc_lock); + opal_mutex_lock (&ompi_proc_lock); opal_list_remove_item(&ompi_proc_list, (opal_list_item_t*)proc); opal_hash_table_remove_value_ptr (&ompi_proc_hash, &proc->super.proc_name, sizeof (proc->super.proc_name)); - OPAL_THREAD_UNLOCK(&ompi_proc_lock); + opal_mutex_unlock (&ompi_proc_lock); } /** @@ -209,7 +209,7 @@ opal_proc_t *ompi_proc_for_name (const opal_process_name_t proc_name) return &proc->super; } - OPAL_THREAD_LOCK(&ompi_proc_lock); + opal_mutex_lock (&ompi_proc_lock); do { /* double-check that another competing thread has not added this proc */ ret = opal_hash_table_get_value_ptr (&ompi_proc_hash, &proc_name, sizeof (proc_name), (void **) &proc); @@ -231,7 +231,7 @@ opal_proc_t *ompi_proc_for_name (const opal_process_name_t proc_name) break; } } while (0); - OPAL_THREAD_UNLOCK(&ompi_proc_lock); + opal_mutex_unlock (&ompi_proc_lock); return (opal_proc_t *) proc; } @@ -319,7 +319,7 @@ int ompi_proc_complete_init(void) ompi_proc_t *proc; int ret, errcode = OMPI_SUCCESS; - OPAL_THREAD_LOCK(&ompi_proc_lock); + opal_mutex_lock (&ompi_proc_lock); OPAL_LIST_FOREACH(proc, &ompi_proc_list, ompi_proc_t) { ret = ompi_proc_complete_init_single (proc); @@ -328,7 +328,7 @@ int ompi_proc_complete_init(void) break; } } - OPAL_THREAD_UNLOCK(&ompi_proc_lock); + opal_mutex_unlock (&ompi_proc_lock); if (ompi_process_info.num_procs >= ompi_add_procs_cutoff) { uint16_t u16, *u16ptr; @@ -419,7 +419,7 @@ ompi_proc_t **ompi_proc_get_allocated (size_t *size) my_name = *OMPI_CAST_RTE_NAME(&ompi_proc_local_proc->super.proc_name); /* First count how many match this jobid */ - OPAL_THREAD_LOCK(&ompi_proc_lock); + opal_mutex_lock (&ompi_proc_lock); OPAL_LIST_FOREACH(proc, &ompi_proc_list, ompi_proc_t) { if (OPAL_EQUAL == ompi_rte_compare_name_fields(mask, OMPI_CAST_RTE_NAME(&proc->super.proc_name), &my_name)) { ++count; @@ -429,7 +429,7 @@ ompi_proc_t **ompi_proc_get_allocated (size_t *size) /* allocate an array */ procs = (ompi_proc_t**) malloc(count * sizeof(ompi_proc_t*)); if (NULL == procs) { - OPAL_THREAD_UNLOCK(&ompi_proc_lock); + opal_mutex_unlock (&ompi_proc_lock); return NULL; } @@ -454,7 +454,7 @@ ompi_proc_t **ompi_proc_get_allocated (size_t *size) procs[count++] = proc; } } - OPAL_THREAD_UNLOCK(&ompi_proc_lock); + opal_mutex_unlock (&ompi_proc_lock); *size = count; return procs; @@ -518,7 +518,7 @@ ompi_proc_t** ompi_proc_all(size_t* size) return NULL; } - OPAL_THREAD_LOCK(&ompi_proc_lock); + opal_mutex_lock (&ompi_proc_lock); OPAL_LIST_FOREACH(proc, &ompi_proc_list, ompi_proc_t) { /* We know this isn't consistent with the behavior in ompi_proc_world, * but we are leaving the RETAIN for now because the code using this function @@ -529,7 +529,7 @@ ompi_proc_t** ompi_proc_all(size_t* size) OBJ_RETAIN(proc); procs[count++] = proc; } - OPAL_THREAD_UNLOCK(&ompi_proc_lock); + opal_mutex_unlock (&ompi_proc_lock); *size = count; return procs; } @@ -560,14 +560,14 @@ ompi_proc_t * ompi_proc_find ( const ompi_process_name_t * name ) /* return the proc-struct which matches this jobid+process id */ mask = OMPI_RTE_CMP_JOBID | OMPI_RTE_CMP_VPID; - OPAL_THREAD_LOCK(&ompi_proc_lock); + opal_mutex_lock (&ompi_proc_lock); OPAL_LIST_FOREACH(proc, &ompi_proc_list, ompi_proc_t) { if (OPAL_EQUAL == ompi_rte_compare_name_fields(mask, &proc->super.proc_name, name)) { rproc = proc; break; } } - OPAL_THREAD_UNLOCK(&ompi_proc_lock); + opal_mutex_unlock (&ompi_proc_lock); return rproc; } @@ -579,7 +579,7 @@ int ompi_proc_refresh(void) ompi_vpid_t i = 0; int ret=OMPI_SUCCESS; - OPAL_THREAD_LOCK(&ompi_proc_lock); + opal_mutex_lock (&ompi_proc_lock); OPAL_LIST_FOREACH(proc, &ompi_proc_list, ompi_proc_t) { /* Does not change: proc->super.proc_name.vpid */ @@ -602,7 +602,7 @@ int ompi_proc_refresh(void) } } - OPAL_THREAD_UNLOCK(&ompi_proc_lock); + opal_mutex_unlock (&ompi_proc_lock); return ret; } @@ -614,7 +614,7 @@ ompi_proc_pack(ompi_proc_t **proclist, int proclistsize, int rc; char *nspace; - OPAL_THREAD_LOCK(&ompi_proc_lock); + opal_mutex_lock (&ompi_proc_lock); /* cycle through the provided array, packing the OMPI level * data for each proc. This data may or may not be included @@ -633,7 +633,7 @@ ompi_proc_pack(ompi_proc_t **proclist, int proclistsize, rc = opal_dss.pack(buf, &(proclist[i]->super.proc_name), 1, OMPI_NAME); if(rc != OPAL_SUCCESS) { OMPI_ERROR_LOG(rc); - OPAL_THREAD_UNLOCK(&ompi_proc_lock); + opal_mutex_unlock (&ompi_proc_lock); return rc; } /* retrieve and send the corresponding nspace for this job @@ -642,25 +642,25 @@ ompi_proc_pack(ompi_proc_t **proclist, int proclistsize, rc = opal_dss.pack(buf, &nspace, 1, OPAL_STRING); if(rc != OPAL_SUCCESS) { OMPI_ERROR_LOG(rc); - OPAL_THREAD_UNLOCK(&ompi_proc_lock); + opal_mutex_unlock (&ompi_proc_lock); return rc; } /* pack architecture flag */ rc = opal_dss.pack(buf, &(proclist[i]->super.proc_arch), 1, OPAL_UINT32); if(rc != OPAL_SUCCESS) { OMPI_ERROR_LOG(rc); - OPAL_THREAD_UNLOCK(&ompi_proc_lock); + opal_mutex_unlock (&ompi_proc_lock); return rc; } /* pass the name of the host this proc is on */ rc = opal_dss.pack(buf, &(proclist[i]->super.proc_hostname), 1, OPAL_STRING); if(rc != OPAL_SUCCESS) { OMPI_ERROR_LOG(rc); - OPAL_THREAD_UNLOCK(&ompi_proc_lock); + opal_mutex_unlock (&ompi_proc_lock); return rc; } } - OPAL_THREAD_UNLOCK(&ompi_proc_lock); + opal_mutex_unlock (&ompi_proc_lock); return OMPI_SUCCESS; } @@ -672,7 +672,7 @@ ompi_proc_find_and_add(const ompi_process_name_t * name, bool* isnew) /* return the proc-struct which matches this jobid+process id */ mask = OMPI_RTE_CMP_JOBID | OMPI_RTE_CMP_VPID; - OPAL_THREAD_LOCK(&ompi_proc_lock); + opal_mutex_lock (&ompi_proc_lock); OPAL_LIST_FOREACH(proc, &ompi_proc_list, ompi_proc_t) { if (OPAL_EQUAL == ompi_rte_compare_name_fields(mask, &proc->super.proc_name, name)) { rproc = proc; @@ -690,7 +690,7 @@ ompi_proc_find_and_add(const ompi_process_name_t * name, bool* isnew) ompi_proc_allocate (name->jobid, name->vpid, &rproc); } - OPAL_THREAD_UNLOCK(&ompi_proc_lock); + opal_mutex_unlock (&ompi_proc_lock); return rproc; }