1
1

fixing a minor issue in intercomm_create.

adapting group_incl/excl/range_incl/range_excl to return
MPI_GROUP_EMPTY in case the new group size is zero.
group_union/difference/intersection still to be adapted to handle that scenario.

This commit was SVN r1921.
Этот коммит содержится в:
Edgar Gabriel 2004-08-05 22:51:34 +00:00
родитель 60f4cac622
Коммит 578c313a61
5 изменённых файлов: 29 добавлений и 5 удалений

Просмотреть файл

@ -50,6 +50,12 @@ int MPI_Group_excl(MPI_Group group, int n, int *ranks,
} /* end if( MPI_CHECK_ARGS) */
if ( n == group_pointer->grp_proc_count ) {
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return MPI_SUCCESS;
}
/*
* pull out elements
*/

Просмотреть файл

@ -49,6 +49,12 @@ int MPI_Group_incl(MPI_Group group, int n, int *ranks, MPI_Group *new_group)
} /* end if( MPI_CHECK_ARGS) */
if ( 0 == n ) {
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return MPI_SUCCESS;
}
/* get new group struct */
new_group_pointer=ompi_group_allocate(n);
if( NULL == new_group_pointer ) {

Просмотреть файл

@ -136,6 +136,12 @@ int MPI_Group_range_excl(MPI_Group group, int n_triplets, int ranges[][3],
/* we have counted the procs to exclude from the list */
new_group_size=group_pointer->grp_proc_count-new_group_size;
if ( 0 == new_group_size ) {
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return MPI_SUCCESS;
}
/* allocate a new ompi_group_t structure */
new_group_pointer=ompi_group_allocate(new_group_size);
if( NULL == new_group_pointer ) {

Просмотреть файл

@ -134,6 +134,12 @@ int MPI_Group_range_incl(MPI_Group group, int n_triplets, int ranges[][3],
}
}
if ( 0 == new_group_size ) {
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return MPI_SUCCESS;
}
/* allocate a new ompi_group_t structure */
new_group_pointer=ompi_group_allocate(new_group_size);
if( NULL == new_group_pointer ) {

Просмотреть файл

@ -80,12 +80,12 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
MPI_Request req;
/* local leader exchange group sizes lists */
rc =mca_pml.pml_irecv (&rsize, 1, MPI_INT, remote_leader, tag, bridge_comm,
rc =mca_pml.pml_irecv (&rsize, 1, MPI_INT, rleader, tag, bridge_comm,
&req );
if ( rc != MPI_SUCCESS ) {
goto err_exit;
}
rc = mca_pml.pml_send ( &local_size, 1, MPI_INT, remote_leader, tag,
rc = mca_pml.pml_send ( &local_size, 1, MPI_INT, rleader, tag,
MCA_PML_BASE_SEND_STANDARD, bridge_comm );
if ( rc != MPI_SUCCESS ) {
goto err_exit;
@ -98,13 +98,13 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
}
/* bcast size and list of remote processes to all processes in local_comm */
rc = local_comm->c_coll.coll_bcast ( &rsize, 1, MPI_INT, local_leader,
rc = local_comm->c_coll.coll_bcast ( &rsize, 1, MPI_INT, lleader,
local_comm );
if ( rc != MPI_SUCCESS ) {
goto err_exit;
}
rprocs = ompi_comm_get_rprocs ( local_comm, bridge_comm, local_leader,
rprocs = ompi_comm_get_rprocs ( local_comm, bridge_comm, lleader,
remote_leader, tag, rsize );
if ( NULL == rprocs ) {
goto err_exit;
@ -158,7 +158,7 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
}
err_exit:
if ( NULL == rprocs ) {
if ( NULL != rprocs ) {
free ( rprocs );
}
if ( OMPI_SUCCESS != rc ) {