Fix segv in MPI_Graph_create_undef_c Intel test.
When you call MPI_Graph_create with a old_comm of size N, and pass nnodes=(N=1), then the Nth proc is supposed to get MPI_COMM_NULL out. The code in this base function didn't properly handle the proc(s) that are supposed to get MPI_COMM_NULL out. cmr=v1.7.5:reviewer=hjelmn This commit was SVN r31145.
Этот коммит содержится в:
родитель
c6994adf66
Коммит
7adb137409
@ -9,6 +9,7 @@
|
|||||||
* University of Stuttgart. All rights reserved.
|
* University of Stuttgart. All rights reserved.
|
||||||
* Copyright (c) 2004-2005 The Regents of the University of California.
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
||||||
* All rights reserved.
|
* All rights reserved.
|
||||||
|
* Copyright (c) 2014 Cisco Systems, Inc. All rights reserved.
|
||||||
* $COPYRIGHT$
|
* $COPYRIGHT$
|
||||||
*
|
*
|
||||||
* Additional copyrights may follow
|
* Additional copyrights may follow
|
||||||
@ -73,30 +74,40 @@ int mca_topo_base_graph_create(mca_topo_base_module_t *topo,
|
|||||||
graph->index = NULL;
|
graph->index = NULL;
|
||||||
graph->edges = NULL;
|
graph->edges = NULL;
|
||||||
|
|
||||||
graph->index = (int*)malloc(sizeof(int) * nnodes);
|
/* Don't do any of the other initialization if we're not supposed
|
||||||
if (NULL == graph->index) {
|
to be part of the new communicator (because nnodes has been
|
||||||
free(graph);
|
reset to 0, making things like index[nnodes-1] be junk).
|
||||||
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
||||||
}
|
|
||||||
memcpy(graph->index, index, nnodes * sizeof(int));
|
|
||||||
|
|
||||||
/* Graph communicator; copy the right data to the common information */
|
JMS: This should really be refactored to use
|
||||||
graph->edges = (int*)malloc(sizeof(int) * index[nnodes-1]);
|
comm_create_group(), because ompi_comm_allocate() still
|
||||||
if (NULL == graph->edges) {
|
complains about 0-byte mallocs in debug builds for 0-member
|
||||||
free(graph->index);
|
groups. */
|
||||||
free(graph);
|
if (MPI_UNDEFINED != new_rank) {
|
||||||
return OMPI_ERR_OUT_OF_RESOURCE;
|
graph->index = (int*)malloc(sizeof(int) * nnodes);
|
||||||
}
|
if (NULL == graph->index) {
|
||||||
memcpy(graph->edges, edges, index[nnodes-1] * sizeof(int));
|
free(graph);
|
||||||
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
||||||
|
}
|
||||||
|
memcpy(graph->index, index, nnodes * sizeof(int));
|
||||||
|
|
||||||
topo_procs = (ompi_proc_t**)malloc(num_procs * sizeof(ompi_proc_t *));
|
/* Graph communicator; copy the right data to the common information */
|
||||||
if(OMPI_GROUP_IS_DENSE(old_comm->c_local_group)) {
|
graph->edges = (int*)malloc(sizeof(int) * index[nnodes-1]);
|
||||||
memcpy(topo_procs,
|
if (NULL == graph->edges) {
|
||||||
old_comm->c_local_group->grp_proc_pointers,
|
free(graph->index);
|
||||||
num_procs * sizeof(ompi_proc_t *));
|
free(graph);
|
||||||
} else {
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
||||||
for(i = 0 ; i < num_procs; i++) {
|
}
|
||||||
topo_procs[i] = ompi_group_peer_lookup(old_comm->c_local_group,i);
|
memcpy(graph->edges, edges, index[nnodes-1] * sizeof(int));
|
||||||
|
|
||||||
|
topo_procs = (ompi_proc_t**)malloc(num_procs * sizeof(ompi_proc_t *));
|
||||||
|
if(OMPI_GROUP_IS_DENSE(old_comm->c_local_group)) {
|
||||||
|
memcpy(topo_procs,
|
||||||
|
old_comm->c_local_group->grp_proc_pointers,
|
||||||
|
num_procs * sizeof(ompi_proc_t *));
|
||||||
|
} else {
|
||||||
|
for(i = 0 ; i < num_procs; i++) {
|
||||||
|
topo_procs[i] = ompi_group_peer_lookup(old_comm->c_local_group,i);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Загрузка…
x
Ссылка в новой задаче
Block a user