Merge pull request #7843 from devreal/clang-tidy-free
Some fixups for issues detected by clang-tidy
Этот коммит содержится в:
Коммит
634f67b216
@ -979,6 +979,7 @@ int ompi_attr_copy_all(ompi_attribute_type_t type, void *old_object,
|
||||
/* Did the callback return non-MPI_SUCCESS? */
|
||||
if (0 != err) {
|
||||
ret = err;
|
||||
OBJ_RELEASE(new_attr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -178,19 +178,19 @@ int ompi_coll_base_allgather_intra_bruck(const void *sbuf, int scount,
|
||||
/* 1. copy blocks [0 .. (size - rank - 1)] from rbuf to shift buffer */
|
||||
err = ompi_datatype_copy_content_same_ddt(rdtype, ((ptrdiff_t)(size - rank) * (ptrdiff_t)rcount),
|
||||
shift_buf, rbuf);
|
||||
if (err < 0) { line = __LINE__; goto err_hndl; }
|
||||
if (err < 0) { line = __LINE__; free(free_buf); goto err_hndl; }
|
||||
|
||||
/* 2. move blocks [(size - rank) .. size] from rbuf to the begining of rbuf */
|
||||
tmpsend = (char*) rbuf + (ptrdiff_t)(size - rank) * (ptrdiff_t)rcount * rext;
|
||||
err = ompi_datatype_copy_content_same_ddt(rdtype, (ptrdiff_t)rank * (ptrdiff_t)rcount,
|
||||
rbuf, tmpsend);
|
||||
if (err < 0) { line = __LINE__; goto err_hndl; }
|
||||
if (err < 0) { line = __LINE__; free(free_buf); goto err_hndl; }
|
||||
|
||||
/* 3. copy blocks from shift buffer back to rbuf starting at block [rank]. */
|
||||
tmprecv = (char*) rbuf + (ptrdiff_t)rank * (ptrdiff_t)rcount * rext;
|
||||
err = ompi_datatype_copy_content_same_ddt(rdtype, (ptrdiff_t)(size - rank) * (ptrdiff_t)rcount,
|
||||
tmprecv, shift_buf);
|
||||
if (err < 0) { line = __LINE__; goto err_hndl; }
|
||||
if (err < 0) { line = __LINE__; free(free_buf); goto err_hndl; }
|
||||
|
||||
free(free_buf);
|
||||
}
|
||||
|
@ -688,6 +688,9 @@ ompi_coll_base_reduce_intra_basic_linear(const void *sbuf, void *rbuf, int count
|
||||
if (NULL != free_buffer) {
|
||||
free(free_buffer);
|
||||
}
|
||||
if (NULL != inplace_temp_free) {
|
||||
free(inplace_temp_free);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -704,6 +707,9 @@ ompi_coll_base_reduce_intra_basic_linear(const void *sbuf, void *rbuf, int count
|
||||
if (NULL != free_buffer) {
|
||||
free(free_buffer);
|
||||
}
|
||||
if (NULL != inplace_temp_free) {
|
||||
free(inplace_temp_free);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1303,12 +1303,14 @@ int mca_common_ompio_prepare_to_group(ompio_file_t *fh,
|
||||
fh->f_comm);
|
||||
if ( OMPI_SUCCESS != ret ) {
|
||||
opal_output (1, "mca_common_ompio_prepare_to_group: error in ompi_fcoll_base_coll_allgather_array\n");
|
||||
free(start_offsets_lens_tmp);
|
||||
goto exit;
|
||||
}
|
||||
end_offsets_tmp = (OMPI_MPI_OFFSET_TYPE* )malloc (fh->f_init_procs_per_group * sizeof(OMPI_MPI_OFFSET_TYPE));
|
||||
if (NULL == end_offsets_tmp) {
|
||||
opal_output (1, "OUT OF MEMORY\n");
|
||||
goto exit;
|
||||
free(start_offsets_lens_tmp);
|
||||
return OMPI_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
for( k = 0 ; k < fh->f_init_procs_per_group; k++){
|
||||
end_offsets_tmp[k] = start_offsets_lens_tmp[3*k] + start_offsets_lens_tmp[3*k+1];
|
||||
@ -1333,14 +1335,12 @@ int mca_common_ompio_prepare_to_group(ompio_file_t *fh,
|
||||
if (NULL == aggr_bytes_per_group_tmp) {
|
||||
opal_output (1, "OUT OF MEMORY\n");
|
||||
ret = OMPI_ERR_OUT_OF_RESOURCE;
|
||||
free(end_offsets_tmp);
|
||||
goto exit;
|
||||
}
|
||||
decision_list_tmp = (int* )malloc (fh->f_init_num_aggrs * sizeof(int));
|
||||
if (NULL == decision_list_tmp) {
|
||||
opal_output (1, "OUT OF MEMORY\n");
|
||||
ret = OMPI_ERR_OUT_OF_RESOURCE;
|
||||
free(end_offsets_tmp);
|
||||
if (NULL != aggr_bytes_per_group_tmp) {
|
||||
free(aggr_bytes_per_group_tmp);
|
||||
}
|
||||
|
@ -278,8 +278,8 @@ component_select(struct ompi_win_t *win, void **base, size_t size, int disp_unit
|
||||
total += rbuf[i];
|
||||
}
|
||||
|
||||
/* user opal/shmem directly to create a shared memory segment */
|
||||
state_size = sizeof(ompi_osc_sm_global_state_t) + sizeof(ompi_osc_sm_node_state_t) * comm_size;
|
||||
/* user opal/shmem directly to create a shared memory segment */
|
||||
state_size = sizeof(ompi_osc_sm_global_state_t) + sizeof(ompi_osc_sm_node_state_t) * comm_size;
|
||||
state_size += OPAL_ALIGN_PAD_AMOUNT(state_size, 64);
|
||||
posts_size = comm_size * post_size * sizeof (module->posts[0][0]);
|
||||
posts_size += OPAL_ALIGN_PAD_AMOUNT(posts_size, 64);
|
||||
@ -289,34 +289,39 @@ component_select(struct ompi_win_t *win, void **base, size_t size, int disp_unit
|
||||
mca_osc_sm_component.backing_directory, ompi_process_info.nodename,
|
||||
OMPI_PROC_MY_NAME->jobid, (int) OMPI_PROC_MY_NAME->vpid, ompi_comm_get_cid(module->comm));
|
||||
if (ret < 0) {
|
||||
free(rbuf);
|
||||
return OMPI_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
|
||||
ret = opal_shmem_segment_create (&module->seg_ds, data_file, total + pagesize + state_size + posts_size);
|
||||
free(data_file);
|
||||
if (OPAL_SUCCESS != ret) {
|
||||
free(rbuf);
|
||||
goto error;
|
||||
}
|
||||
|
||||
unlink_needed = true;
|
||||
}
|
||||
|
||||
ret = module->comm->c_coll->coll_bcast (&module->seg_ds, sizeof (module->seg_ds), MPI_BYTE, 0,
|
||||
module->comm, module->comm->c_coll->coll_bcast_module);
|
||||
if (OMPI_SUCCESS != ret) {
|
||||
goto error;
|
||||
}
|
||||
ret = module->comm->c_coll->coll_bcast (&module->seg_ds, sizeof (module->seg_ds), MPI_BYTE, 0,
|
||||
module->comm, module->comm->c_coll->coll_bcast_module);
|
||||
if (OMPI_SUCCESS != ret) {
|
||||
free(rbuf);
|
||||
goto error;
|
||||
}
|
||||
|
||||
module->segment_base = opal_shmem_segment_attach (&module->seg_ds);
|
||||
if (NULL == module->segment_base) {
|
||||
goto error;
|
||||
}
|
||||
module->segment_base = opal_shmem_segment_attach (&module->seg_ds);
|
||||
if (NULL == module->segment_base) {
|
||||
free(rbuf);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* wait for all processes to attach */
|
||||
ret = module->comm->c_coll->coll_barrier (module->comm, module->comm->c_coll->coll_barrier_module);
|
||||
if (OMPI_SUCCESS != ret) {
|
||||
goto error;
|
||||
}
|
||||
ret = module->comm->c_coll->coll_barrier (module->comm, module->comm->c_coll->coll_barrier_module);
|
||||
if (OMPI_SUCCESS != ret) {
|
||||
free(rbuf);
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (0 == ompi_comm_rank (module->comm)) {
|
||||
opal_shmem_unlink (&module->seg_ds);
|
||||
|
@ -110,12 +110,16 @@ int ompi_osc_ucx_start(struct ompi_group_t *group, int assert, struct ompi_win_t
|
||||
|
||||
ret = ompi_comm_group(module->comm, &win_group);
|
||||
if (ret != OMPI_SUCCESS) {
|
||||
free(ranks_in_grp);
|
||||
free(ranks_in_win_grp);
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
|
||||
ret = ompi_group_translate_ranks(module->start_group, size, ranks_in_grp,
|
||||
win_group, ranks_in_win_grp);
|
||||
if (ret != OMPI_SUCCESS) {
|
||||
free(ranks_in_grp);
|
||||
free(ranks_in_win_grp);
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
|
||||
@ -215,6 +219,11 @@ int ompi_osc_ucx_post(struct ompi_group_t *group, int assert, struct ompi_win_t
|
||||
int *ranks_in_grp = NULL, *ranks_in_win_grp = NULL;
|
||||
int myrank = ompi_comm_rank(module->comm);
|
||||
|
||||
ret = ompi_comm_group(module->comm, &win_group);
|
||||
if (ret != OMPI_SUCCESS) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
|
||||
size = ompi_group_size(module->post_group);
|
||||
ranks_in_grp = malloc(sizeof(int) * size);
|
||||
ranks_in_win_grp = malloc(sizeof(int) * ompi_comm_size(module->comm));
|
||||
@ -223,15 +232,11 @@ int ompi_osc_ucx_post(struct ompi_group_t *group, int assert, struct ompi_win_t
|
||||
ranks_in_grp[i] = i;
|
||||
}
|
||||
|
||||
ret = ompi_comm_group(module->comm, &win_group);
|
||||
if (ret != OMPI_SUCCESS) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
|
||||
ret = ompi_group_translate_ranks(module->post_group, size, ranks_in_grp,
|
||||
win_group, ranks_in_win_grp);
|
||||
if (ret != OMPI_SUCCESS) {
|
||||
return OMPI_ERROR;
|
||||
ret = OMPI_ERROR;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
@ -243,7 +248,8 @@ int ompi_osc_ucx_post(struct ompi_group_t *group, int assert, struct ompi_win_t
|
||||
1, ranks_in_win_grp[i], &result,
|
||||
sizeof(result), remote_addr);
|
||||
if (ret != OMPI_SUCCESS) {
|
||||
return OMPI_ERROR;
|
||||
ret = OMPI_ERROR;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
curr_idx = result & (OMPI_OSC_UCX_POST_PEER_MAX - 1);
|
||||
@ -256,7 +262,8 @@ int ompi_osc_ucx_post(struct ompi_group_t *group, int assert, struct ompi_win_t
|
||||
myrank + 1, &result, sizeof(result),
|
||||
remote_addr);
|
||||
if (ret != OMPI_SUCCESS) {
|
||||
return OMPI_ERROR;
|
||||
ret = OMPI_ERROR;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (result == 0)
|
||||
@ -277,9 +284,11 @@ int ompi_osc_ucx_post(struct ompi_group_t *group, int assert, struct ompi_win_t
|
||||
} while (1);
|
||||
}
|
||||
|
||||
cleanup:
|
||||
free(ranks_in_grp);
|
||||
free(ranks_in_win_grp);
|
||||
ompi_group_free(&win_group);
|
||||
if (OMPI_SUCCESS != ret) return ret;
|
||||
}
|
||||
|
||||
module->epoch_type.exposure = POST_WAIT_EPOCH;
|
||||
|
@ -140,7 +140,7 @@ static inline int ddt_put_get(ompi_osc_ucx_module_t *module,
|
||||
ret = create_iov_list(origin_addr, origin_count, origin_dt,
|
||||
&origin_ucx_iov, &origin_ucx_iov_count);
|
||||
if (ret != OMPI_SUCCESS) {
|
||||
return ret;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
@ -148,7 +148,7 @@ static inline int ddt_put_get(ompi_osc_ucx_module_t *module,
|
||||
ret = create_iov_list(NULL, target_count, target_dt,
|
||||
&target_ucx_iov, &target_ucx_iov_count);
|
||||
if (ret != OMPI_SUCCESS) {
|
||||
return ret;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
@ -168,7 +168,8 @@ static inline int ddt_put_get(ompi_osc_ucx_module_t *module,
|
||||
remote_addr + (uint64_t)(target_ucx_iov[target_ucx_iov_idx].addr));
|
||||
if (OPAL_SUCCESS != status) {
|
||||
OSC_UCX_VERBOSE(1, "opal_common_ucx_mem_putget failed: %d", status);
|
||||
return OMPI_ERROR;
|
||||
ret = OMPI_ERROR;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
origin_ucx_iov[origin_ucx_iov_idx].addr = (void *)((intptr_t)origin_ucx_iov[origin_ucx_iov_idx].addr + curr_len);
|
||||
@ -202,7 +203,8 @@ static inline int ddt_put_get(ompi_osc_ucx_module_t *module,
|
||||
remote_addr + target_lb + prev_len);
|
||||
if (OPAL_SUCCESS != status) {
|
||||
OSC_UCX_VERBOSE(1, "opal_common_ucx_mem_putget failed: %d", status);
|
||||
return OMPI_ERROR;
|
||||
ret = OMPI_ERROR;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
prev_len += origin_ucx_iov[origin_ucx_iov_idx].len;
|
||||
@ -224,7 +226,8 @@ static inline int ddt_put_get(ompi_osc_ucx_module_t *module,
|
||||
remote_addr + (uint64_t)(target_ucx_iov[target_ucx_iov_idx].addr));
|
||||
if (OPAL_SUCCESS != status) {
|
||||
OSC_UCX_VERBOSE(1, "opal_common_ucx_mem_putget failed: %d", status);
|
||||
return OMPI_ERROR;
|
||||
ret = OMPI_ERROR;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
prev_len += target_ucx_iov[target_ucx_iov_idx].len;
|
||||
@ -232,6 +235,8 @@ static inline int ddt_put_get(ompi_osc_ucx_module_t *module,
|
||||
}
|
||||
}
|
||||
|
||||
cleanup:
|
||||
|
||||
if (origin_ucx_iov != NULL) {
|
||||
free(origin_ucx_iov);
|
||||
}
|
||||
@ -338,12 +343,14 @@ static inline int get_dynamic_win_info(uint64_t remote_addr, ompi_osc_ucx_module
|
||||
len, remote_state_addr);
|
||||
if (OPAL_SUCCESS != ret) {
|
||||
OSC_UCX_VERBOSE(1, "opal_common_ucx_mem_putget failed: %d", ret);
|
||||
return OMPI_ERROR;
|
||||
ret = OMPI_ERROR;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
ret = opal_common_ucx_wpmem_flush(module->state_mem, OPAL_COMMON_UCX_SCOPE_EP, target);
|
||||
if (ret != OMPI_SUCCESS) {
|
||||
return ret;
|
||||
if (ret != OPAL_SUCCESS) {
|
||||
ret = OMPI_ERROR;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
memcpy(&win_count, temp_buf, sizeof(uint64_t));
|
||||
@ -365,6 +372,7 @@ static inline int get_dynamic_win_info(uint64_t remote_addr, ompi_osc_ucx_module
|
||||
temp_dynamic_wins[contain].mem_addr, OMPI_OSC_UCX_MEM_ADDR_MAX_LEN);
|
||||
module->local_dynamic_win_info[contain].mem->mem_displs[target] = target * OMPI_OSC_UCX_MEM_ADDR_MAX_LEN;
|
||||
|
||||
cleanup:
|
||||
free(temp_buf);
|
||||
|
||||
return ret;
|
||||
@ -652,11 +660,13 @@ int accumulate_req(const void *origin_addr, int origin_count,
|
||||
ret = ompi_osc_ucx_get(temp_addr, (int)temp_count, temp_dt,
|
||||
target, target_disp, target_count, target_dt, win);
|
||||
if (ret != OMPI_SUCCESS) {
|
||||
free(temp_addr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = opal_common_ucx_wpmem_flush(module->mem, OPAL_COMMON_UCX_SCOPE_EP, target);
|
||||
if (ret != OMPI_SUCCESS) {
|
||||
free(temp_addr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -670,6 +680,7 @@ int accumulate_req(const void *origin_addr, int origin_count,
|
||||
ret = create_iov_list(origin_addr, origin_count, origin_dt,
|
||||
&origin_ucx_iov, &origin_ucx_iov_count);
|
||||
if (ret != OMPI_SUCCESS) {
|
||||
free(temp_addr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -707,6 +718,7 @@ int accumulate_req(const void *origin_addr, int origin_count,
|
||||
ret = ompi_osc_ucx_put(temp_addr, (int)temp_count, temp_dt, target, target_disp,
|
||||
target_count, target_dt, win);
|
||||
if (ret != OMPI_SUCCESS) {
|
||||
free(temp_addr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -741,6 +753,7 @@ do_atomic_compare_and_swap(const void *origin_addr, const void *compare_addr,
|
||||
if (!module->acc_single_intrinsic) {
|
||||
ret = start_atomicity(module, target, &lock_acquired);
|
||||
if (ret != OMPI_SUCCESS) {
|
||||
free(temp_addr);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -204,6 +204,7 @@ int mca_topo_treematch_dist_graph_create(mca_topo_base_module_t* topo_module,
|
||||
* and create a duplicate of the original communicator */
|
||||
free(vpids);
|
||||
free(colors);
|
||||
free(lindex_to_grank);
|
||||
goto fallback; /* return with success */
|
||||
}
|
||||
/* compute local roots ranks in comm_old */
|
||||
@ -250,6 +251,7 @@ int mca_topo_treematch_dist_graph_create(mca_topo_base_module_t* topo_module,
|
||||
}
|
||||
if( (0 == num_objs_in_node) || (0 == num_pus_in_node) ) { /* deal with bozo cases: COVERITY 1418505 */
|
||||
free(colors);
|
||||
free(lindex_to_grank);
|
||||
goto fallback; /* return with success */
|
||||
}
|
||||
/* Check for oversubscribing */
|
||||
@ -288,6 +290,7 @@ int mca_topo_treematch_dist_graph_create(mca_topo_base_module_t* topo_module,
|
||||
object = hwloc_get_obj_by_depth(opal_hwloc_topology, effective_depth, obj_rank);
|
||||
if( NULL == object) {
|
||||
free(colors);
|
||||
free(lindex_to_grank);
|
||||
hwloc_bitmap_free(set);
|
||||
goto fallback; /* return with success */
|
||||
}
|
||||
@ -315,6 +318,7 @@ int mca_topo_treematch_dist_graph_create(mca_topo_base_module_t* topo_module,
|
||||
OPAL_OUTPUT_VERBOSE((10, ompi_topo_base_framework.framework_output,
|
||||
"Oversubscribing PUs resources => Rank Reordering Impossible \n"));
|
||||
free(colors);
|
||||
free(lindex_to_grank);
|
||||
hwloc_bitmap_free(set);
|
||||
goto fallback; /* return with success */
|
||||
}
|
||||
|
@ -143,10 +143,12 @@ int mca_base_alias_register (const char *project, const char *framework, const c
|
||||
|
||||
opal_hash_table_set_value_ptr (alias_hash_table, name, strlen(name), alias);
|
||||
free (name);
|
||||
name = NULL;
|
||||
}
|
||||
|
||||
mca_base_alias_item_t *alias_item = OBJ_NEW(mca_base_alias_item_t);
|
||||
if (NULL == alias_item) {
|
||||
if (NULL != name) free(name);
|
||||
return OPAL_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
|
||||
|
@ -1234,6 +1234,7 @@ static int mca_btl_tcp_component_exchange(void)
|
||||
opal_net_get_hostname(addr));
|
||||
} else {
|
||||
BTL_ERROR(("Unexpected address family: %d", addr->sa_family));
|
||||
free(addrs);
|
||||
return OPAL_ERR_BAD_PARAM;
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,10 @@ opal_reachable_t * opal_reachable_allocate(unsigned int num_local,
|
||||
malloc, rather than a bunch of little allocations */
|
||||
memory = malloc(sizeof(int*) * num_local +
|
||||
num_local * (sizeof(int) * num_remote));
|
||||
if (memory == NULL) return NULL;
|
||||
if (memory == NULL) {
|
||||
OBJ_RELEASE(reachable);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
reachable->memory = (void*)memory;
|
||||
reachable->weights = (int**)reachable->memory;
|
||||
|
Загрузка…
x
Ссылка в новой задаче
Block a user