1
1

Merge pull request #421 from igor-ivanov/pr/fix-oshmem-coverity

oshmem: Fix set of coverity issues
Этот коммит содержится в:
igor-ivanov 2015-02-24 21:40:06 +04:00
родитель 44f93b4832 3e2dd782ea
Коммит 0f44cdd779
5 изменённых файлов: 29 добавлений и 19 удалений

Просмотреть файл

@ -258,6 +258,9 @@ static int ompi_mtl_mxm_recv_ep_address(ompi_proc_t *source_proc, void **address
&modex_cur_size);
if (OMPI_SUCCESS != rc) {
MXM_ERROR("Open MPI couldn't distribute EP connection details");
free(*address_p);
*address_p = NULL;
*address_len_p = 0;
goto bail;
}
@ -310,12 +313,14 @@ int ompi_mtl_mxm_module_init(void)
MXM_VERBOSE(1, "MXM support will be disabled because of total number "
"of processes (%lu) is less than the minimum set by the "
"mtl_mxm_np MCA parameter (%u)", totps, ompi_mtl_mxm.mxm_np);
free(procs);
return OMPI_ERR_NOT_SUPPORTED;
}
MXM_VERBOSE(1, "MXM support enabled");
if (ORTE_NODE_RANK_INVALID == (lr = ompi_process_info.my_node_rank)) {
MXM_ERROR("Unable to obtain local node rank");
free(procs);
return OMPI_ERROR;
}
nlps = ompi_process_info.num_local_peers + 1;
@ -325,6 +330,7 @@ int ompi_mtl_mxm_module_init(void)
mxlr = max(mxlr, procs[proc]->super.proc_name.vpid);
}
}
free(procs);
/* Setup the endpoint options and local addresses to bind to. */
#if MXM_API < MXM_VERSION(2,0)
@ -409,7 +415,7 @@ int ompi_mtl_mxm_add_procs(struct mca_mtl_base_module_t *mtl, size_t nprocs,
mxm_conn_req_t *conn_reqs;
size_t ep_index = 0;
#endif
void *ep_address;
void *ep_address = NULL;
size_t ep_address_len;
mxm_error_t err;
size_t i;
@ -441,11 +447,13 @@ int ompi_mtl_mxm_add_procs(struct mca_mtl_base_module_t *mtl, size_t nprocs,
#if MXM_API < MXM_VERSION(2,0)
if (ep_address_len != sizeof(ep_info[i])) {
MXM_ERROR("Invalid endpoint address length");
free(ep_address);
rc = OMPI_ERROR;
goto bail;
}
memcpy(&ep_info[i], ep_address, ep_address_len);
free(ep_address);
conn_reqs[ep_index].ptl_addr[MXM_PTL_SELF] = (struct sockaddr *)&(ep_info[i].ptl_addr[MXM_PTL_SELF]);
conn_reqs[ep_index].ptl_addr[MXM_PTL_SHM] = (struct sockaddr *)&(ep_info[i].ptl_addr[MXM_PTL_SHM]);
conn_reqs[ep_index].ptl_addr[MXM_PTL_RDMA] = (struct sockaddr *)&(ep_info[i].ptl_addr[MXM_PTL_RDMA]);
@ -455,6 +463,7 @@ int ompi_mtl_mxm_add_procs(struct mca_mtl_base_module_t *mtl, size_t nprocs,
endpoint = OBJ_NEW(mca_mtl_mxm_endpoint_t);
endpoint->mtl_mxm_module = &ompi_mtl_mxm;
err = mxm_ep_connect(ompi_mtl_mxm.ep, ep_address, &endpoint->mxm_conn);
free(ep_address);
if (err != MXM_OK) {
MXM_ERROR("MXM returned connect error: %s\n", mxm_error_string(err));
rc = OMPI_ERROR;
@ -462,7 +471,6 @@ int ompi_mtl_mxm_add_procs(struct mca_mtl_base_module_t *mtl, size_t nprocs,
}
procs[i]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_MTL] = endpoint;
#endif
free(ep_address);
}
#if MXM_API < MXM_VERSION(2,0)
@ -511,7 +519,7 @@ bail:
int ompi_mtl_add_single_proc(struct mca_mtl_base_module_t *mtl,
struct ompi_proc_t* procs)
{
void *ep_address;
void *ep_address = NULL;
size_t ep_address_len;
mxm_error_t err;
int rc;
@ -533,10 +541,12 @@ int ompi_mtl_add_single_proc(struct mca_mtl_base_module_t *mtl,
if (ep_address_len != sizeof(ep_info)) {
MXM_ERROR("Invalid endpoint address length");
free(ep_address);
return OMPI_ERROR;
}
memcpy(&ep_info, ep_address, ep_address_len);
free(ep_address);
conn_req.ptl_addr[MXM_PTL_SELF] = (struct sockaddr *)&(ep_info.ptl_addr[MXM_PTL_SELF]);
conn_req.ptl_addr[MXM_PTL_SHM] = (struct sockaddr *)&(ep_info.ptl_addr[MXM_PTL_SHM]);
conn_req.ptl_addr[MXM_PTL_RDMA] = (struct sockaddr *)&(ep_info.ptl_addr[MXM_PTL_RDMA]);
@ -563,6 +573,7 @@ int ompi_mtl_add_single_proc(struct mca_mtl_base_module_t *mtl,
endpoint = OBJ_NEW(mca_mtl_mxm_endpoint_t);
endpoint->mtl_mxm_module = &ompi_mtl_mxm;
err = mxm_ep_connect(ompi_mtl_mxm.ep, ep_address, &endpoint->mxm_conn);
free(ep_address);
if (err != MXM_OK) {
MXM_ERROR("MXM returned connect error: %s\n", mxm_error_string(err));
return OMPI_ERROR;

Просмотреть файл

@ -350,7 +350,7 @@ static int _algorithm_recursive_doubling(struct oshmem_group_t *group,
SCOLL_VERBOSE(12,
"[#%d] Restore special synchronization array",
group->my_pe);
for (i = 0; pSync && (i < _SHMEM_BARRIER_SYNC_SIZE); i++) {
for (i = 0; i < _SHMEM_BARRIER_SYNC_SIZE; i++) {
pSync[i] = _SHMEM_SYNC_VALUE;
}
} else {

Просмотреть файл

@ -193,11 +193,11 @@ static inline void spml_yoda_prepare_for_put(void* buffer, size_t size, void* p_
{
if (use_send) {
memcpy((void*) buffer, &size, sizeof(size));
memcpy((void*) ( ((char*) buffer) + sizeof(size)), &p_dst, sizeof(p_dst));
memcpy((void*) ( ((char*) buffer) + sizeof(size) + sizeof(p_dst)), p_src, size);
memcpy((void*) (((char*) buffer) + sizeof(size)), &p_dst, sizeof(void *));
memcpy((void*) (((char*) buffer) + sizeof(size) + sizeof(void *)), p_src, size);
}
else {
memcpy((void*) ( (unsigned char*) buffer), p_src, size);
memcpy((void*) ((unsigned char*) buffer), p_src, size);
}
}
@ -205,9 +205,9 @@ static inline void spml_yoda_prepare_for_get_response(void* buffer, size_t size,
{
if (use_send) {
memcpy((void*) buffer, &size, sizeof(size));
memcpy((void*) ( ((char*) buffer) + sizeof(size)), &p_dst, sizeof(p_dst));
memcpy((void*) ( ((char*) buffer) + sizeof(size) + sizeof(p_dst)), p_src, size);
memcpy((void*) ( ((char*) buffer) + sizeof(size) + sizeof(p_dst) + size), &p_getreq, sizeof(p_getreq));
memcpy((void*) (((char*) buffer) + sizeof(size)), &p_dst, sizeof(void *));
memcpy((void*) (((char*) buffer) + sizeof(size) + sizeof(void *)), p_src, size);
memcpy((void*) (((char*) buffer) + sizeof(size) + sizeof(void *) + size), &p_getreq, sizeof(void *));
}
else {
memcpy((void*) ( (unsigned char*) buffer), p_src, size);
@ -216,11 +216,11 @@ static inline void spml_yoda_prepare_for_get_response(void* buffer, size_t size,
static inline void spml_yoda_prepare_for_get(void* buffer, size_t size, void* p_src, int dst, void* p_dst, void* p_getreq)
{
memcpy((void*) buffer, &p_src, sizeof(p_src));
memcpy((void*) ( ((unsigned char*) buffer) + sizeof(p_src) ), &size, sizeof(size));
memcpy((void*) ( ((unsigned char*) buffer) + sizeof(p_src) + sizeof(size) ), &dst, sizeof(dst));
memcpy((void*) ( ((unsigned char*) buffer) + sizeof(p_src) + sizeof(size) + sizeof(dst)), &p_dst, sizeof(p_dst));
memcpy((void*) ( ((unsigned char*) buffer) + sizeof(p_src) + sizeof(size) + sizeof(dst) + sizeof(p_dst)), &p_getreq, sizeof(p_getreq));
memcpy((void*) buffer, &p_src, sizeof(void *));
memcpy((void*) (((unsigned char*) buffer) + sizeof(void *)), &size, sizeof(size));
memcpy((void*) (((unsigned char*) buffer) + sizeof(void *) + sizeof(size) ), &dst, sizeof(dst));
memcpy((void*) (((unsigned char*) buffer) + sizeof(void *) + sizeof(size) + sizeof(dst)), &p_dst, sizeof(void *));
memcpy((void*) (((unsigned char*) buffer) + sizeof(void *) + sizeof(size) + sizeof(dst) + sizeof(void *)), &p_getreq, sizeof(void *));
}
static void mca_yoda_put_callback(mca_btl_base_module_t* btl,

Просмотреть файл

@ -286,8 +286,7 @@ oshmem_proc_t** oshmem_proc_all(size_t* size)
OPAL_THREAD_LOCK(&oshmem_proc_lock);
for (proc = (oshmem_proc_t*) opal_list_get_first(&oshmem_proc_list);
((proc != (oshmem_proc_t*) opal_list_get_end(&oshmem_proc_list))
&& (proc != NULL ));
proc && (proc != (oshmem_proc_t*) opal_list_get_end(&oshmem_proc_list));
proc = (oshmem_proc_t*)opal_list_get_next(proc)) {
/* We know this isn't consistent with the behavior in oshmem_proc_world,
* but we are leaving the RETAIN for now because the code using this function

Просмотреть файл

@ -23,9 +23,9 @@ oshmem_group_t* find_group_in_cache(int PE_start, int logPE_stride, int PE_size)
}
for (item = opal_list_get_first(&oshmem_group_cache_list);
item != opal_list_get_end(&oshmem_group_cache_list);
item && (item != opal_list_get_end(&oshmem_group_cache_list));
item = opal_list_get_next(item)) {
if (item && !memcmp(((oshmem_group_cache_t *) item)->cache_id,
if (!memcmp(((oshmem_group_cache_t *) item)->cache_id,
cache_look_up_id,
3 * sizeof(int))) {
return ((oshmem_group_cache_t *) item)->group;