1
1

Fix formatting in group and communicator code (- No functionality changes -)

Mostly TAB to spaces changes, though a couple style fixes were included as well.

The tab/space issue was causing problems with off-trunk branch merging.

This commit was SVN r23827.
Этот коммит содержится в:
Josh Hursey 2010-10-04 14:54:58 +00:00
родитель accad16a52
Коммит ee42c673fe
9 изменённых файлов: 627 добавлений и 616 удалений

Просмотреть файл

@ -217,7 +217,7 @@ int ompi_comm_set ( ompi_communicator_t **ncomm,
}
}
*ncomm = newcomm;
*ncomm = newcomm;
return (OMPI_SUCCESS);
}
@ -247,7 +247,7 @@ int ompi_comm_group ( ompi_communicator_t* comm, ompi_group_t **group )
** Counterpart to MPI_Comm_create. To be used within OMPI.
*/
int ompi_comm_create ( ompi_communicator_t *comm, ompi_group_t *group,
ompi_communicator_t **newcomm )
ompi_communicator_t **newcomm )
{
ompi_communicator_t *newcomp = NULL;
int rsize , lsize;
@ -351,12 +351,12 @@ int ompi_comm_create ( ompi_communicator_t *comm, ompi_group_t *group,
/* Activate the communicator and init coll-component */
rc = ompi_comm_activate( &newcomp, /* new communicator */
comm,
NULL,
NULL,
NULL,
mode,
-1 );
comm,
NULL,
NULL,
NULL,
mode,
-1 );
if ( OMPI_SUCCESS != rc ) {
goto exit;
}
@ -392,7 +392,7 @@ int ompi_comm_create ( ompi_communicator_t *comm, ompi_group_t *group,
** Counterpart to MPI_Comm_split. To be used within OMPI (e.g. MPI_Cart_sub).
*/
int ompi_comm_split ( ompi_communicator_t* comm, int color, int key,
ompi_communicator_t **newcomm, bool pass_on_topo )
ompi_communicator_t **newcomm, bool pass_on_topo )
{
int myinfo[2];
int size, my_size;
@ -552,7 +552,7 @@ int ompi_comm_split ( ompi_communicator_t* comm, int color, int key,
NULL, /* topo component */
NULL, /* local group */
NULL /* remote group */
);
);
if ( NULL == newcomm ) {
rc = MPI_ERR_INTERN;
@ -583,18 +583,18 @@ int ompi_comm_split ( ompi_communicator_t* comm, int color, int key,
* be freed anyway.
*/
if ( MPI_UNDEFINED == color ) {
newcomp->c_local_group->grp_my_rank = MPI_UNDEFINED;
newcomp->c_local_group->grp_my_rank = MPI_UNDEFINED;
}
/* Activate the communicator and init coll-component */
rc = ompi_comm_activate( &newcomp, /* new communicator */
comm,
NULL,
NULL,
NULL,
mode,
-1 );
comm,
NULL,
NULL,
NULL,
mode,
-1 );
if ( OMPI_SUCCESS != rc ) {
goto exit;
}
@ -610,7 +610,7 @@ int ompi_comm_split ( ompi_communicator_t* comm, int color, int key,
free ( rresults );
}
if ( NULL != rsorted ) {
free ( rsorted );
free ( rsorted );
}
if ( NULL != lranks ) {
free ( lranks );
@ -686,12 +686,12 @@ int ompi_comm_dup ( ompi_communicator_t * comm, ompi_communicator_t **newcomm )
/* activate communicator and init coll-module */
rc = ompi_comm_activate( &newcomp, /* new communicator */
comp,
NULL,
NULL,
NULL,
mode,
-1 );
comp,
NULL,
NULL,
NULL,
mode,
-1 );
if ( OMPI_SUCCESS != rc ) {
return rc;
}
@ -768,12 +768,13 @@ int ompi_comm_compare(ompi_communicator_t *comm1, ompi_communicator_t *comm2, in
}
}
if ( sameranks && sameorder )
if ( sameranks && sameorder ) {
lresult = MPI_CONGRUENT;
else if ( sameranks && !sameorder )
} else if ( sameranks && !sameorder ) {
lresult = MPI_SIMILAR;
else
} else {
lresult = MPI_UNEQUAL;
}
if ( rsize1 > 0 ) {
@ -809,12 +810,13 @@ int ompi_comm_compare(ompi_communicator_t *comm1, ompi_communicator_t *comm2, in
}
}
if ( sameranks && sameorder )
if ( sameranks && sameorder ) {
rresult = MPI_CONGRUENT;
else if ( sameranks && !sameorder )
} else if ( sameranks && !sameorder ) {
rresult = MPI_SIMILAR;
else
} else {
rresult = MPI_UNEQUAL;
}
}
/* determine final results */
@ -825,11 +827,13 @@ int ompi_comm_compare(ompi_communicator_t *comm1, ompi_communicator_t *comm2, in
if ( MPI_SIMILAR == lresult || MPI_CONGRUENT == lresult ) {
*result = MPI_SIMILAR;
}
else
else {
*result = MPI_UNEQUAL;
}
}
else if ( MPI_UNEQUAL == rresult )
else if ( MPI_UNEQUAL == rresult ) {
*result = MPI_UNEQUAL;
}
return OMPI_SUCCESS;
}
@ -912,7 +916,7 @@ static int ompi_comm_allgather_emulate_intra( void *inbuf, int incount,
/* Step 2: the inter-bcast step */
rc = MCA_PML_CALL(irecv (outbuf, size*outcount, outtype, 0,
OMPI_COMM_ALLGATHER_TAG, comm, &sendreq));
OMPI_COMM_ALLGATHER_TAG, comm, &sendreq));
if ( OMPI_SUCCESS != rc ) {
goto exit;
}
@ -920,8 +924,8 @@ static int ompi_comm_allgather_emulate_intra( void *inbuf, int incount,
if ( 0 == rank ) {
for ( i=0; i < rsize; i++ ){
rc = MCA_PML_CALL(send (tmpbuf, rsize*outcount, outtype, i,
OMPI_COMM_ALLGATHER_TAG,
MCA_PML_BASE_SEND_STANDARD, comm));
OMPI_COMM_ALLGATHER_TAG,
MCA_PML_BASE_SEND_STANDARD, comm));
if ( OMPI_SUCCESS != rc ) {
goto exit;
}
@ -959,20 +963,20 @@ int ompi_comm_free ( ompi_communicator_t **comm )
communicator destructor for 2 reasons:
1. The destructor will only NOT be called immediately during
ompi_comm_free() if the reference count is still greater
than zero at that point, meaning that there are ongoing
communications. However, pending communications will never
need attributes, so it's safe to release them directly here.
ompi_comm_free() if the reference count is still greater
than zero at that point, meaning that there are ongoing
communications. However, pending communications will never
need attributes, so it's safe to release them directly here.
2. Releasing attributes in ompi_comm_free() enables us to check
the return status of the attribute delete functions. At
least one interpretation of the MPI standard (i.e., the one
of the Intel test suite) is that if any of the attribute
deletion functions fail, then MPI_COMM_FREE /
MPI_COMM_DISCONNECT should also fail. We can't do that if
we delay releasing the attributes -- we need to release the
attributes right away so that we can report the error right
away. */
the return status of the attribute delete functions. At
least one interpretation of the MPI standard (i.e., the one
of the Intel test suite) is that if any of the attribute
deletion functions fail, then MPI_COMM_FREE /
MPI_COMM_DISCONNECT should also fail. We can't do that if
we delay releasing the attributes -- we need to release the
attributes right away so that we can report the error right
away. */
if ( OMPI_COMM_IS_INTER(*comm) ) {
ompi_comm_free (&(*comm)->c_local_comm);
}
@ -1115,7 +1119,7 @@ ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm,
if ( local_rank == local_leader ) {
/* local leader exchange name lists */
rc = MCA_PML_CALL(irecv (recvbuf, rlen, MPI_BYTE, remote_leader, tag,
bridge_comm, &req ));
bridge_comm, &req ));
if ( OMPI_SUCCESS != rc ) {
goto err_exit;
}
@ -1187,7 +1191,7 @@ ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm,
* in intercomm_create
*/
int ompi_comm_overlapping_groups (int size, ompi_proc_t **lprocs,
int rsize, ompi_proc_t ** rprocs)
int rsize, ompi_proc_t ** rprocs)
{
int rc=OMPI_SUCCESS;
@ -1432,8 +1436,9 @@ int ompi_topo_create (ompi_communicator_t *old_comm,
else {
proc_list = (ompi_proc_t **) calloc (old_comm->c_local_group->grp_proc_count,
sizeof (ompi_proc_t *));
for(i=0 ; i<old_comm->c_local_group->grp_proc_count ; i++)
for(i=0 ; i<old_comm->c_local_group->grp_proc_count ; i++) {
proc_list[i] = ompi_group_peer_lookup(old_comm->c_local_group,i);
}
memcpy (topo_procs,
proc_list,
@ -1496,7 +1501,7 @@ int ompi_topo_create (ompi_communicator_t *old_comm,
* it as they deem fit */
new_comm->c_topo_comm->mtc_periods_or_edges = (int *)
malloc (sizeof(int) * dims_or_index[ndims_or_nnodes-1]);
malloc (sizeof(int) * dims_or_index[ndims_or_nnodes-1]);
if (NULL == new_comm->c_topo_comm->mtc_periods_or_edges) {
ompi_comm_free (&new_comm);
*comm_topo = new_comm;
@ -1527,7 +1532,7 @@ int ompi_topo_create (ompi_communicator_t *old_comm,
NULL, /* local leader */
NULL, /* remote_leader */
OMPI_COMM_CID_INTRA, /* mode */
-1 ); /* send first, doesn't matter */
-1 ); /* send first, doesn't matter */
if (OMPI_SUCCESS != ret) {
/* something wrong happened during setting the communicator */
ompi_comm_free (&new_comm);
@ -1561,13 +1566,13 @@ int ompi_topo_create (ompi_communicator_t *old_comm,
NULL, /* local leader */
NULL, /* remote_leader */
OMPI_COMM_CID_INTRA, /* mode */
-1 ); /* send first, doesn't matter */
-1 ); /* send first, doesn't matter */
if (OMPI_SUCCESS != ret) {
/* something wrong happened during setting the communicator */
*comm_topo = new_comm;
return ret;
/* something wrong happened during setting the communicator */
*comm_topo = new_comm;
return ret;
}
/* if the returned rank is -1, then this process is not in the
@ -1635,11 +1640,11 @@ static int ompi_comm_fill_rest (ompi_communicator_t *comm,
/* determine the cube dimensions */
comm->c_cube_dim = opal_cube_dim(comm->c_local_group->grp_proc_count);
return OMPI_SUCCESS;
return OMPI_SUCCESS;
}
static int ompi_comm_copy_topo (ompi_communicator_t *oldcomm,
ompi_communicator_t *newcomm)
ompi_communicator_t *newcomm)
{
mca_topo_base_comm_t *oldt = oldcomm->c_topo_comm;
mca_topo_base_comm_t *newt = newcomm->c_topo_comm;
@ -1703,4 +1708,3 @@ static int ompi_comm_copy_topo (ompi_communicator_t *oldcomm,
return OMPI_SUCCESS;
}

Просмотреть файл

@ -181,7 +181,7 @@ int ompi_comm_nextcid ( ompi_communicator_t* newcomm,
*/
switch (mode)
{
{
case OMPI_COMM_CID_INTRA:
allredfnct=(ompi_comm_cid_allredfct*)ompi_comm_allreduce_intra;
break;
@ -197,85 +197,83 @@ int ompi_comm_nextcid ( ompi_communicator_t* newcomm,
default:
return MPI_UNDEFINED;
break;
}
}
do {
/* Only one communicator function allowed in same time on the
* same communicator.
*/
OPAL_THREAD_LOCK(&ompi_cid_lock);
response = ompi_comm_register_cid (comm->c_contextid);
OPAL_THREAD_UNLOCK(&ompi_cid_lock);
/* Only one communicator function allowed in same time on the
* same communicator.
*/
OPAL_THREAD_LOCK(&ompi_cid_lock);
response = ompi_comm_register_cid (comm->c_contextid);
OPAL_THREAD_UNLOCK(&ompi_cid_lock);
} while (OMPI_SUCCESS != response );
start = ompi_mpi_communicators.lowest_free;
while (!done) {
/**
* This is the real algorithm described in the doc
*/
OPAL_THREAD_LOCK(&ompi_cid_lock);
if (comm->c_contextid != ompi_comm_lowest_cid() ) {
/* if not lowest cid, we do not continue, but sleep and try again */
OPAL_THREAD_UNLOCK(&ompi_cid_lock);
continue;
}
OPAL_THREAD_UNLOCK(&ompi_cid_lock);
for (i=start; i < mca_pml.pml_max_contextid ; i++) {
flag=opal_pointer_array_test_and_set_item(&ompi_mpi_communicators,
i, comm);
if (true == flag) {
nextlocal_cid = i;
break;
}
}
(allredfnct)(&nextlocal_cid, &nextcid, 1, MPI_MAX, comm, bridgecomm,
local_leader, remote_leader, send_first );
if (nextcid == nextlocal_cid) {
response = 1; /* fine with me */
}
else {
opal_pointer_array_set_item(&ompi_mpi_communicators,
nextlocal_cid, NULL);
flag = opal_pointer_array_test_and_set_item(&ompi_mpi_communicators,
nextcid, comm );
if (true == flag) {
response = 1; /* works as well */
}
else {
response = 0; /* nope, not acceptable */
}
}
(allredfnct)(&response, &glresponse, 1, MPI_MIN, comm, bridgecomm,
local_leader, remote_leader, send_first );
if (1 == glresponse) {
done = 1; /* we are done */
break;
}
else if ( 0 == glresponse ) {
if ( 1 == response ) {
/* we could use that, but other don't agree */
opal_pointer_array_set_item(&ompi_mpi_communicators,
nextcid, NULL);
}
start = nextcid+1; /* that's where we can start the next round */
}
/**
* This is the real algorithm described in the doc
*/
OPAL_THREAD_LOCK(&ompi_cid_lock);
if (comm->c_contextid != ompi_comm_lowest_cid() ) {
/* if not lowest cid, we do not continue, but sleep and try again */
OPAL_THREAD_UNLOCK(&ompi_cid_lock);
continue;
}
OPAL_THREAD_UNLOCK(&ompi_cid_lock);
for (i=start; i < mca_pml.pml_max_contextid ; i++) {
flag=opal_pointer_array_test_and_set_item(&ompi_mpi_communicators,
i, comm);
if (true == flag) {
nextlocal_cid = i;
break;
}
}
(allredfnct)(&nextlocal_cid, &nextcid, 1, MPI_MAX, comm, bridgecomm,
local_leader, remote_leader, send_first );
if (nextcid == nextlocal_cid) {
response = 1; /* fine with me */
}
else {
opal_pointer_array_set_item(&ompi_mpi_communicators,
nextlocal_cid, NULL);
flag = opal_pointer_array_test_and_set_item(&ompi_mpi_communicators,
nextcid, comm );
if (true == flag) {
response = 1; /* works as well */
}
else {
response = 0; /* nope, not acceptable */
}
}
(allredfnct)(&response, &glresponse, 1, MPI_MIN, comm, bridgecomm,
local_leader, remote_leader, send_first );
if (1 == glresponse) {
done = 1; /* we are done */
break;
}
else if ( 0 == glresponse ) {
if ( 1 == response ) {
/* we could use that, but other don't agree */
opal_pointer_array_set_item(&ompi_mpi_communicators,
nextcid, NULL);
}
start = nextcid+1; /* that's where we can start the next round */
}
}
/* set the according values to the newcomm */
newcomm->c_contextid = nextcid;
newcomm->c_f_to_c_index = newcomm->c_contextid;
opal_pointer_array_set_item (&ompi_mpi_communicators, nextcid, newcomm);
OPAL_THREAD_LOCK(&ompi_cid_lock);
ompi_comm_unregister_cid (comm->c_contextid);
OPAL_THREAD_UNLOCK(&ompi_cid_lock);
return (MPI_SUCCESS);
}
@ -404,7 +402,7 @@ int ompi_comm_activate ( ompi_communicator_t** newcomm,
* send messages over the new communicator
*/
switch (mode)
{
{
case OMPI_COMM_CID_INTRA:
allredfnct=(ompi_comm_cid_allredfct*)ompi_comm_allreduce_intra;
break;
@ -420,15 +418,15 @@ int ompi_comm_activate ( ompi_communicator_t** newcomm,
default:
return MPI_UNDEFINED;
break;
}
}
if (MPI_UNDEFINED != (*newcomm)->c_local_group->grp_my_rank) {
/* Initialize the PML stuff in the newcomm */
if ( OMPI_SUCCESS != (ret = MCA_PML_CALL(add_comm(*newcomm))) ) {
goto bail_on_error;
}
OMPI_COMM_SET_PML_ADDED(*newcomm);
/* Initialize the PML stuff in the newcomm */
if ( OMPI_SUCCESS != (ret = MCA_PML_CALL(add_comm(*newcomm))) ) {
goto bail_on_error;
}
OMPI_COMM_SET_PML_ADDED(*newcomm);
}
@ -468,7 +466,7 @@ int ompi_comm_activate ( ompi_communicator_t** newcomm,
/* Let the collectives components fight over who will do
collective on this new comm. */
if (OMPI_SUCCESS != (ret = mca_coll_base_comm_select(*newcomm))) {
goto bail_on_error;
goto bail_on_error;
}
/* For an inter communicator, we have to deal with the potential
@ -567,7 +565,7 @@ static int ompi_comm_allreduce_inter ( int *inbuf, int *outbuf,
rc = OMPI_ERR_OUT_OF_RESOURCE;
goto exit;
}
/* Execute the inter-allreduce: the result of our group will
be in the buffer of the remote group */
rc = intercomm->c_coll.coll_allreduce ( inbuf, tmpbuf, count, MPI_INT,
@ -605,12 +603,16 @@ static int ompi_comm_allreduce_inter ( int *inbuf, int *outbuf,
if ( &ompi_mpi_op_max.op == op ) {
for ( i = 0 ; i < count; i++ ) {
if (tmpbuf[i] > outbuf[i]) outbuf[i] = tmpbuf[i];
if (tmpbuf[i] > outbuf[i]) {
outbuf[i] = tmpbuf[i];
}
}
}
else if ( &ompi_mpi_op_min.op == op ) {
for ( i = 0 ; i < count; i++ ) {
if (tmpbuf[i] < outbuf[i]) outbuf[i] = tmpbuf[i];
if (tmpbuf[i] < outbuf[i]) {
outbuf[i] = tmpbuf[i];
}
}
}
else if ( &ompi_mpi_op_sum.op == op ) {
@ -624,7 +626,7 @@ static int ompi_comm_allreduce_inter ( int *inbuf, int *outbuf,
}
}
}
/* distribute the overall result to all processes in the other group.
Instead of using bcast, we are using here allgatherv, to avoid the
possible deadlock. Else, we need an algorithm to determine,
@ -637,7 +639,7 @@ static int ompi_comm_allreduce_inter ( int *inbuf, int *outbuf,
rcounts, rdisps, MPI_INT,
intercomm,
intercomm->c_coll.coll_allgatherv_module);
exit:
if ( NULL != tmpbuf ) {
free ( tmpbuf );
@ -667,7 +669,7 @@ static int ompi_comm_allreduce_intra_bridge (int *inbuf, int *outbuf,
int i;
int rc;
int local_leader, remote_leader;
local_leader = (*((int*)lleader));
remote_leader = (*((int*)rleader));
@ -694,14 +696,14 @@ static int ompi_comm_allreduce_intra_bridge (int *inbuf, int *outbuf,
MPI_Request req;
rc = MCA_PML_CALL(irecv ( outbuf, count, MPI_INT, remote_leader,
OMPI_COMM_ALLREDUCE_TAG,
bcomm, &req));
OMPI_COMM_ALLREDUCE_TAG,
bcomm, &req));
if ( OMPI_SUCCESS != rc ) {
goto exit;
}
rc = MCA_PML_CALL(send (tmpbuf, count, MPI_INT, remote_leader,
OMPI_COMM_ALLREDUCE_TAG,
MCA_PML_BASE_SEND_STANDARD, bcomm));
OMPI_COMM_ALLREDUCE_TAG,
MCA_PML_BASE_SEND_STANDARD, bcomm));
if ( OMPI_SUCCESS != rc ) {
goto exit;
}
@ -712,12 +714,16 @@ static int ompi_comm_allreduce_intra_bridge (int *inbuf, int *outbuf,
if ( &ompi_mpi_op_max.op == op ) {
for ( i = 0 ; i < count; i++ ) {
if (tmpbuf[i] > outbuf[i]) outbuf[i] = tmpbuf[i];
if (tmpbuf[i] > outbuf[i]) {
outbuf[i] = tmpbuf[i];
}
}
}
else if ( &ompi_mpi_op_min.op == op ) {
for ( i = 0 ; i < count; i++ ) {
if (tmpbuf[i] < outbuf[i]) outbuf[i] = tmpbuf[i];
if (tmpbuf[i] < outbuf[i]) {
outbuf[i] = tmpbuf[i];
}
}
}
else if ( &ompi_mpi_op_sum.op == op ) {
@ -730,9 +736,8 @@ static int ompi_comm_allreduce_intra_bridge (int *inbuf, int *outbuf,
outbuf[i] *= tmpbuf[i];
}
}
}
rc = comm->c_coll.coll_bcast ( outbuf, count, MPI_INT, local_leader,
comm, comm->c_coll.coll_bcast_module );
@ -764,7 +769,7 @@ static int ompi_comm_allreduce_intra_oob (int *inbuf, int *outbuf,
int local_leader, local_rank;
orte_process_name_t *remote_leader=NULL;
orte_std_cntr_t size_count;
local_leader = (*((int*)lleader));
remote_leader = (orte_process_name_t*)rleader;
size_count = count;
@ -773,8 +778,8 @@ static int ompi_comm_allreduce_intra_oob (int *inbuf, int *outbuf,
&ompi_mpi_op_max.op != op && &ompi_mpi_op_min.op != op ) {
return MPI_ERR_OP;
}
local_rank = ompi_comm_rank ( comm );
tmpbuf = (int *) malloc ( count * sizeof(int));
if ( NULL == tmpbuf ) {
@ -826,12 +831,16 @@ static int ompi_comm_allreduce_intra_oob (int *inbuf, int *outbuf,
if ( &ompi_mpi_op_max.op == op ) {
for ( i = 0 ; i < count; i++ ) {
if (tmpbuf[i] > outbuf[i]) outbuf[i] = tmpbuf[i];
if (tmpbuf[i] > outbuf[i]) {
outbuf[i] = tmpbuf[i];
}
}
}
else if ( &ompi_mpi_op_min.op == op ) {
for ( i = 0 ; i < count; i++ ) {
if (tmpbuf[i] < outbuf[i]) outbuf[i] = tmpbuf[i];
if (tmpbuf[i] < outbuf[i]) {
outbuf[i] = tmpbuf[i];
}
}
}
else if ( &ompi_mpi_op_sum.op == op ) {
@ -844,9 +853,8 @@ static int ompi_comm_allreduce_intra_oob (int *inbuf, int *outbuf,
outbuf[i] *= tmpbuf[i];
}
}
}
rc = comm->c_coll.coll_bcast (outbuf, count, MPI_INT,
local_leader, comm,
comm->c_coll.coll_bcast_module);
@ -860,4 +868,3 @@ static int ompi_comm_allreduce_intra_oob (int *inbuf, int *outbuf,
}
END_C_DECLS

Просмотреть файл

@ -222,25 +222,25 @@ int ompi_comm_finalize(void)
is because a parent communicator is created dynamically
during init, and we just set this pointer to it. Hence, we
just pass in the pointer here. */
OBJ_DESTRUCT (ompi_mpi_comm_parent);
OBJ_DESTRUCT (ompi_mpi_comm_parent);
/* Please note, that the we did increase the reference count
for ompi_mpi_comm_null, ompi_mpi_group_null, and
ompi_mpi_errors_are_fatal in ompi_comm_init because of
ompi_mpi_comm_parent. In case a
parent communicator is really created, the ref. counters
for these objects are decreased again by one. However, in a
static scenario, we should ideally decrease the ref. counter
for these objects by one here. The problem just is, that
if the app had a parent_comm, and this has been freed/disconnected,
ompi_comm_parent points again to ompi_comm_null, the reference count
for these objects has not been increased again.
So the point is, if ompi_mpi_comm_parent == &ompi_mpi_comm_null
we do not know whether we have to decrease the ref count for
those three objects or not. Since this is a constant, non-increasing
amount of memory, we stick with the current solution for now,
namely don't do anything.
*/
/* Please note, that the we did increase the reference count
for ompi_mpi_comm_null, ompi_mpi_group_null, and
ompi_mpi_errors_are_fatal in ompi_comm_init because of
ompi_mpi_comm_parent. In case a
parent communicator is really created, the ref. counters
for these objects are decreased again by one. However, in a
static scenario, we should ideally decrease the ref. counter
for these objects by one here. The problem just is, that
if the app had a parent_comm, and this has been freed/disconnected,
ompi_comm_parent points again to ompi_comm_null, the reference count
for these objects has not been increased again.
So the point is, if ompi_mpi_comm_parent == &ompi_mpi_comm_null
we do not know whether we have to decrease the ref count for
those three objects or not. Since this is a constant, non-increasing
amount of memory, we stick with the current solution for now,
namely don't do anything.
*/
}
/* Shut down MPI_COMM_NULL */
@ -256,23 +256,23 @@ int ompi_comm_finalize(void)
comm=(ompi_communicator_t *)opal_pointer_array_get_item(&ompi_mpi_communicators, i);
if ( NULL != comm ) {
/* Still here ? */
if ( !OMPI_COMM_IS_EXTRA_RETAIN(comm)) {
if ( !OMPI_COMM_IS_EXTRA_RETAIN(comm)) {
/* For communicator that have been marked as "extra retain", we do not further
* enforce to decrease the reference counter once more. These "extra retain"
* communicators created e.g. by the hierarch or inter module did increase
* the reference count by one more than other communicators, on order to
* allow for deallocation with the parent communicator. Note, that
* this only occurs if the cid of the local_comm is lower than of its
* parent communicator. Read the comment in comm_activate for
* a full explanation.
*/
if ( ompi_debug_show_handle_leaks && !(OMPI_COMM_IS_FREED(comm)) ){
opal_output(0,"WARNING: MPI_Comm still allocated in MPI_Finalize\n");
ompi_comm_dump ( comm);
OBJ_RELEASE(comm);
}
}
/* For communicator that have been marked as "extra retain", we do not further
* enforce to decrease the reference counter once more. These "extra retain"
* communicators created e.g. by the hierarch or inter module did increase
* the reference count by one more than other communicators, on order to
* allow for deallocation with the parent communicator. Note, that
* this only occurs if the cid of the local_comm is lower than of its
* parent communicator. Read the comment in comm_activate for
* a full explanation.
*/
if ( ompi_debug_show_handle_leaks && !(OMPI_COMM_IS_FREED(comm)) ){
opal_output(0,"WARNING: MPI_Comm still allocated in MPI_Finalize\n");
ompi_comm_dump ( comm);
OBJ_RELEASE(comm);
}
}
}
}
}
@ -291,7 +291,7 @@ int ompi_comm_finalize(void)
*/
int ompi_comm_link_function(void)
{
return OMPI_SUCCESS;
return OMPI_SUCCESS;
}
/********************************************************************************/

Просмотреть файл

@ -46,10 +46,10 @@ int ompi_group_translate_ranks ( ompi_group_t *group1,
struct ompi_proc_t *proc1_pointer, *proc2_pointer;
if ( MPI_GROUP_EMPTY == group1 || MPI_GROUP_EMPTY == group2 ) {
for (proc = 0; proc < n_ranks ; proc++) {
ranks2[proc] = MPI_UNDEFINED;
}
return MPI_SUCCESS;
for (proc = 0; proc < n_ranks ; proc++) {
ranks2[proc] = MPI_UNDEFINED;
}
return MPI_SUCCESS;
}
/*
@ -60,56 +60,55 @@ int ompi_group_translate_ranks ( ompi_group_t *group1,
* find a match.
*/
if( group1->grp_parent_group_ptr == group2 ) { /* from child to parent */
if(OMPI_GROUP_IS_SPORADIC(group1)) {
return ompi_group_translate_ranks_sporadic_reverse
(group1,n_ranks,ranks1,group2,ranks2);
}
else if(OMPI_GROUP_IS_STRIDED(group1)) {
return ompi_group_translate_ranks_strided_reverse
(group1,n_ranks,ranks1,group2,ranks2);
}
else if(OMPI_GROUP_IS_BITMAP(group1)) {
return ompi_group_translate_ranks_bmap_reverse
(group1,n_ranks,ranks1,group2,ranks2);
}
if(OMPI_GROUP_IS_SPORADIC(group1)) {
return ompi_group_translate_ranks_sporadic_reverse
(group1,n_ranks,ranks1,group2,ranks2);
}
else if(OMPI_GROUP_IS_STRIDED(group1)) {
return ompi_group_translate_ranks_strided_reverse
(group1,n_ranks,ranks1,group2,ranks2);
}
else if(OMPI_GROUP_IS_BITMAP(group1)) {
return ompi_group_translate_ranks_bmap_reverse
(group1,n_ranks,ranks1,group2,ranks2);
}
}
else if( group2->grp_parent_group_ptr == group1 ) { /* from parent to child*/
if(OMPI_GROUP_IS_SPORADIC(group2)) {
return ompi_group_translate_ranks_sporadic
(group1,n_ranks,ranks1,group2,ranks2);
}
else if(OMPI_GROUP_IS_STRIDED(group2)) {
return ompi_group_translate_ranks_strided
(group1,n_ranks,ranks1,group2,ranks2);
}
else if(OMPI_GROUP_IS_BITMAP(group2)) {
return ompi_group_translate_ranks_bmap
(group1,n_ranks,ranks1,group2,ranks2);
}
if(OMPI_GROUP_IS_SPORADIC(group2)) {
return ompi_group_translate_ranks_sporadic
(group1,n_ranks,ranks1,group2,ranks2);
}
else if(OMPI_GROUP_IS_STRIDED(group2)) {
return ompi_group_translate_ranks_strided
(group1,n_ranks,ranks1,group2,ranks2);
}
else if(OMPI_GROUP_IS_BITMAP(group2)) {
return ompi_group_translate_ranks_bmap
(group1,n_ranks,ranks1,group2,ranks2);
}
}
else {
/* loop over all ranks */
for (proc = 0; proc < n_ranks; proc++) {
rank=ranks1[proc];
if ( MPI_PROC_NULL == rank) {
ranks2[proc] = MPI_PROC_NULL;
}
else {
proc1_pointer = ompi_group_peer_lookup(group1 ,rank);
/* initialize to no "match" */
ranks2[proc] = MPI_UNDEFINED;
for (proc2 = 0; proc2 < group2->grp_proc_count; proc2++)
{
proc2_pointer= ompi_group_peer_lookup(group2 ,proc2);
if ( proc1_pointer == proc2_pointer) {
ranks2[proc] = proc2;
break;
}
} /* end proc2 loop */
} /* end proc loop */
}
/* loop over all ranks */
for (proc = 0; proc < n_ranks; proc++) {
rank=ranks1[proc];
if ( MPI_PROC_NULL == rank) {
ranks2[proc] = MPI_PROC_NULL;
}
else {
proc1_pointer = ompi_group_peer_lookup(group1 ,rank);
/* initialize to no "match" */
ranks2[proc] = MPI_UNDEFINED;
for (proc2 = 0; proc2 < group2->grp_proc_count; proc2++) {
proc2_pointer= ompi_group_peer_lookup(group2 ,proc2);
if ( proc1_pointer == proc2_pointer) {
ranks2[proc] = proc2;
break;
}
} /* end proc2 loop */
} /* end proc loop */
}
}
return MPI_SUCCESS;
@ -117,49 +116,49 @@ int ompi_group_translate_ranks ( ompi_group_t *group1,
int ompi_group_dump (ompi_group_t* group)
{
int i;
int new_rank;
int i;
int new_rank;
i=0;
printf("Group Proc Count: %d\n",group->grp_proc_count);
printf("Group My Rank: %d\n",group->grp_my_rank);
if (OMPI_GROUP_IS_SPORADIC(group)) {
ompi_group_translate_ranks( group,1,&group->grp_my_rank,
group->grp_parent_group_ptr,
&new_rank);
printf("Rank in the parent group: %d\n",new_rank);
printf("The Sporadic List Length: %d\n",
group->sparse_data.grp_sporadic.grp_sporadic_list_len);
printf("Rank First Length\n");
for(i=0 ; i<group->sparse_data.grp_sporadic.grp_sporadic_list_len ; i++) {
printf("%d %d\n",
group->sparse_data.grp_sporadic.grp_sporadic_list[i].rank_first,
group->sparse_data.grp_sporadic.grp_sporadic_list[i].length);
}
}
else if (OMPI_GROUP_IS_STRIDED(group)) {
ompi_group_translate_ranks( group,1,&group->grp_my_rank,
group->grp_parent_group_ptr,
&new_rank);
printf("Rank in the parent group: %d\n",new_rank);
printf("The Offset is: %d\n",group->sparse_data.grp_strided.grp_strided_offset);
printf("The Stride is: %d\n",group->sparse_data.grp_strided.grp_strided_stride);
printf("The Last Element is: %d\n",
group->sparse_data.grp_strided.grp_strided_last_element);
}
else if (OMPI_GROUP_IS_BITMAP(group)) {
ompi_group_translate_ranks( group,1,&group->grp_my_rank,
group->grp_parent_group_ptr,
&new_rank);
printf("Rank in the parent group: %d\n",new_rank);
printf("The length of the bitmap array is: %d\n",
group->sparse_data.grp_bitmap.grp_bitmap_array_len);
for (i=0 ; i<group->sparse_data.grp_bitmap.grp_bitmap_array_len ; i++) {
printf("%d\t",group->sparse_data.grp_bitmap.grp_bitmap_array[i]);
}
}
printf("*********************************************************\n");
return OMPI_SUCCESS;
i=0;
printf("Group Proc Count: %d\n",group->grp_proc_count);
printf("Group My Rank: %d\n",group->grp_my_rank);
if (OMPI_GROUP_IS_SPORADIC(group)) {
ompi_group_translate_ranks( group,1,&group->grp_my_rank,
group->grp_parent_group_ptr,
&new_rank);
printf("Rank in the parent group: %d\n",new_rank);
printf("The Sporadic List Length: %d\n",
group->sparse_data.grp_sporadic.grp_sporadic_list_len);
printf("Rank First Length\n");
for(i=0 ; i<group->sparse_data.grp_sporadic.grp_sporadic_list_len ; i++) {
printf("%d %d\n",
group->sparse_data.grp_sporadic.grp_sporadic_list[i].rank_first,
group->sparse_data.grp_sporadic.grp_sporadic_list[i].length);
}
}
else if (OMPI_GROUP_IS_STRIDED(group)) {
ompi_group_translate_ranks( group,1,&group->grp_my_rank,
group->grp_parent_group_ptr,
&new_rank);
printf("Rank in the parent group: %d\n",new_rank);
printf("The Offset is: %d\n",group->sparse_data.grp_strided.grp_strided_offset);
printf("The Stride is: %d\n",group->sparse_data.grp_strided.grp_strided_stride);
printf("The Last Element is: %d\n",
group->sparse_data.grp_strided.grp_strided_last_element);
}
else if (OMPI_GROUP_IS_BITMAP(group)) {
ompi_group_translate_ranks( group,1,&group->grp_my_rank,
group->grp_parent_group_ptr,
&new_rank);
printf("Rank in the parent group: %d\n",new_rank);
printf("The length of the bitmap array is: %d\n",
group->sparse_data.grp_bitmap.grp_bitmap_array_len);
for (i=0 ; i<group->sparse_data.grp_bitmap.grp_bitmap_array_len ; i++) {
printf("%d\t",group->sparse_data.grp_bitmap.grp_bitmap_array[i]);
}
}
printf("*********************************************************\n");
return OMPI_SUCCESS;
}
/*
@ -169,17 +168,15 @@ int ompi_group_dump (ompi_group_t* group)
ompi_proc_t* ompi_group_get_proc_ptr (ompi_group_t* group , int rank)
{
int ranks1,ranks2;
do
{
if(OMPI_GROUP_IS_DENSE(group))
{
return group->grp_proc_pointers[rank];
do {
if(OMPI_GROUP_IS_DENSE(group)) {
return group->grp_proc_pointers[rank];
}
ranks1 = rank;
ompi_group_translate_ranks( group, 1, &ranks1,
ranks1 = rank;
ompi_group_translate_ranks( group, 1, &ranks1,
group->grp_parent_group_ptr,&ranks2);
rank = ranks2;
group = group->grp_parent_group_ptr;
rank = ranks2;
group = group->grp_parent_group_ptr;
} while (1);
}
@ -190,10 +187,10 @@ int ompi_group_minloc ( int list[] , int length )
index = 0;
for (i=0 ; i<length ; i++) {
if (min > list[i] && list[i] != -1) {
min = list[i];
index = i;
}
if (min > list[i] && list[i] != -1) {
min = list[i];
index = i;
}
}
return index;
}
@ -204,8 +201,7 @@ int ompi_group_incl(ompi_group_t* group, int n, int *ranks, ompi_group_t **new_g
method = 0;
#if OMPI_GROUP_SPARSE
if (ompi_use_sparse_group_storage)
{
if (ompi_use_sparse_group_storage) {
int len [4];
len[0] = ompi_group_calc_plist ( n ,ranks );
@ -219,20 +215,20 @@ int ompi_group_incl(ompi_group_t* group, int n, int *ranks, ompi_group_t **new_g
#endif
switch (method)
{
case 0:
result = ompi_group_incl_plist(group, n, ranks, new_group);
break;
case 1:
result = ompi_group_incl_strided(group, n, ranks, new_group);
break;
case 2:
result = ompi_group_incl_spor(group, n, ranks, new_group);
break;
default:
result = ompi_group_incl_bmap(group, n, ranks, new_group);
break;
}
{
case 0:
result = ompi_group_incl_plist(group, n, ranks, new_group);
break;
case 1:
result = ompi_group_incl_strided(group, n, ranks, new_group);
break;
case 2:
result = ompi_group_incl_spor(group, n, ranks, new_group);
break;
default:
result = ompi_group_incl_bmap(group, n, ranks, new_group);
break;
}
return result;
}
@ -249,7 +245,9 @@ int ompi_group_excl(ompi_group_t* group, int n, int *ranks, ompi_group_t **new_g
for (i=0 ; i<group->grp_proc_count ; i++) {
for(j=0 ; j<n ; j++) {
if(ranks[j] == i) break;
if(ranks[j] == i) {
break;
}
}
if (j==n) {
ranks_included[k] = i;
@ -268,7 +266,7 @@ int ompi_group_excl(ompi_group_t* group, int n, int *ranks, ompi_group_t **new_g
}
int ompi_group_range_incl(ompi_group_t* group, int n_triplets, int ranges[][3],
ompi_group_t **new_group)
ompi_group_t **new_group)
{
int j,k;
int *ranks_included=NULL;
@ -281,33 +279,32 @@ int ompi_group_range_incl(ompi_group_t* group, int n_triplets, int ranges[][3],
for(j=0 ; j<n_triplets ; j++) {
first_rank = ranges[j][0];
last_rank = ranges[j][1];
stride = ranges[j][2];
last_rank = ranges[j][1];
stride = ranges[j][2];
if (first_rank < last_rank) {
if (first_rank < last_rank) {
/* positive stride */
index = first_rank;
while (index <= last_rank) {
count ++;
k++;
index += stride;
} /* end while loop */
}
else if (first_rank > last_rank) {
/* negative stride */
index = first_rank;
while (index >= last_rank) {
count ++;
k++;
index += stride;
} /* end while loop */
} else { /* first_rank == last_rank */
while (index <= last_rank) {
count ++;
k++;
index += stride;
} /* end while loop */
}
else if (first_rank > last_rank) {
/* negative stride */
index = first_rank;
count ++;
k++;
}
while (index >= last_rank) {
count ++;
k++;
index += stride;
} /* end while loop */
} else { /* first_rank == last_rank */
index = first_rank;
count ++;
k++;
}
}
if (0 != count) {
ranks_included = (int *)malloc( (count)*(sizeof(int)));
@ -317,46 +314,44 @@ int ompi_group_range_incl(ompi_group_t* group, int n_triplets, int ranges[][3],
for(j=0 ; j<n_triplets ; j++) {
first_rank = ranges[j][0];
last_rank = ranges[j][1];
stride = ranges[j][2];
last_rank = ranges[j][1];
stride = ranges[j][2];
if (first_rank < last_rank) {
if (first_rank < last_rank) {
/* positive stride */
index = first_rank;
while (index <= last_rank) {
ranks_included[k] = index;
k++;
index += stride;
} /* end while loop */
}
else if (first_rank > last_rank) {
/* negative stride */
index = first_rank;
while (index >= last_rank) {
ranks_included[k] = index;
k++;
index += stride;
} /* end while loop */
} else { /* first_rank == last_rank */
while (index <= last_rank) {
ranks_included[k] = index;
k++;
index += stride;
} /* end while loop */
}
else if (first_rank > last_rank) {
/* negative stride */
index = first_rank;
ranks_included[k] = index;
k++;
}
while (index >= last_rank) {
ranks_included[k] = index;
k++;
index += stride;
} /* end while loop */
} else { /* first_rank == last_rank */
index = first_rank;
ranks_included[k] = index;
k++;
}
}
result = ompi_group_incl(group, k, ranks_included, new_group);
if (NULL != ranks_included)
{
if (NULL != ranks_included) {
free(ranks_included);
}
return result;
}
int ompi_group_range_excl(ompi_group_t* group, int n_triplets, int ranges[][3],
ompi_group_t **new_group)
ompi_group_t **new_group)
{
int j,k,i;
@ -368,32 +363,30 @@ int ompi_group_range_excl(ompi_group_t* group, int n_triplets, int ranges[][3],
/* determine the number of excluded processes for the range-excl-method */
k = 0;
for(j=0 ; j<n_triplets ; j++) {
first_rank = ranges[j][0];
last_rank = ranges[j][1];
stride = ranges[j][2];
last_rank = ranges[j][1];
stride = ranges[j][2];
if (first_rank < last_rank) {
if (first_rank < last_rank) {
/* positive stride */
index = first_rank;
while (index <= last_rank) {
count ++;
index += stride;
} /* end while loop */
}
else if (first_rank > last_rank) {
/* negative stride */
index = first_rank;
while (index >= last_rank) {
count ++;
index += stride;
} /* end while loop */
} else { /* first_rank == last_rank */
while (index <= last_rank) {
count ++;
index += stride;
} /* end while loop */
}
else if (first_rank > last_rank) {
/* negative stride */
index = first_rank;
count ++;
}
while (index >= last_rank) {
count ++;
index += stride;
} /* end while loop */
} else { /* first_rank == last_rank */
index = first_rank;
count ++;
}
}
if (0 != count) {
ranks_excluded = (int *)malloc( (count)*(sizeof(int)));
@ -402,64 +395,61 @@ int ompi_group_range_excl(ompi_group_t* group, int n_triplets, int ranges[][3],
k = 0;
i = 0;
for(j=0 ; j<n_triplets ; j++) {
first_rank = ranges[j][0];
last_rank = ranges[j][1];
stride = ranges[j][2];
last_rank = ranges[j][1];
stride = ranges[j][2];
if (first_rank < last_rank) {
if (first_rank < last_rank) {
/* positive stride */
index = first_rank;
while (index <= last_rank) {
ranks_excluded[i] = index;
i++;
index += stride;
} /* end while loop */
}
else if (first_rank > last_rank) {
/* negative stride */
index = first_rank;
while (index >= last_rank) {
ranks_excluded[i] = index;
i++;
index += stride;
} /* end while loop */
} else { /* first_rank == last_rank */
while (index <= last_rank) {
ranks_excluded[i] = index;
i++;
index += stride;
} /* end while loop */
}
else if (first_rank > last_rank) {
/* negative stride */
index = first_rank;
ranks_excluded[i] = index;
i++;
}
while (index >= last_rank) {
ranks_excluded[i] = index;
i++;
index += stride;
} /* end while loop */
} else { /* first_rank == last_rank */
index = first_rank;
ranks_excluded[i] = index;
i++;
}
}
if (0 != (group->grp_proc_count - count)) {
ranks_included = (int *)malloc( (group->grp_proc_count - count)*(sizeof(int)));
}
for (j=0 ; j<group->grp_proc_count ; j++) {
for(index=0 ; index<i ; index++) {
if(ranks_excluded[index] == j) break;
}
if (index == i) {
ranks_included[k] = j;
k++;
}
if(ranks_excluded[index] == j) break;
}
if (index == i) {
ranks_included[k] = j;
k++;
}
}
if (NULL != ranks_excluded)
{
if (NULL != ranks_excluded) {
free(ranks_excluded);
}
result = ompi_group_incl(group, k, ranks_included, new_group);
if (NULL != ranks_included)
{
if (NULL != ranks_included) {
free(ranks_included);
}
return result;
}
int ompi_group_intersection(ompi_group_t* group1,ompi_group_t* group2,
ompi_group_t **new_group)
ompi_group_t **new_group)
{
int proc1,proc2,k, result;
int *ranks_included=NULL;
@ -469,18 +459,18 @@ int ompi_group_intersection(ompi_group_t* group1,ompi_group_t* group2,
group1_pointer=(ompi_group_t *)group1;
group2_pointer=(ompi_group_t *)group2;
/* determine the number of included processes for the incl-method */
/* determine the number of included processes for the incl-method */
k = 0;
for (proc1 = 0; proc1 < group1_pointer->grp_proc_count; proc1++) {
proc1_pointer = ompi_group_peer_lookup (group1_pointer , proc1);
proc1_pointer = ompi_group_peer_lookup (group1_pointer , proc1);
/* check to see if this proc is in group2 */
/* check to see if this proc is in group2 */
for (proc2 = 0; proc2 < group2_pointer->grp_proc_count; proc2++) {
proc2_pointer = ompi_group_peer_lookup (group2_pointer , proc2);
proc2_pointer = ompi_group_peer_lookup (group2_pointer , proc2);
if( proc1_pointer == proc2_pointer ) {
k++;
k++;
break;
}
} /* end proc2 loop */
@ -493,16 +483,16 @@ int ompi_group_intersection(ompi_group_t* group1,ompi_group_t* group2,
/* determine the list of included processes for the incl-method */
k = 0;
for (proc1 = 0; proc1 < group1_pointer->grp_proc_count; proc1++) {
proc1_pointer = ompi_group_peer_lookup (group1_pointer , proc1);
proc1_pointer = ompi_group_peer_lookup (group1_pointer , proc1);
/* check to see if this proc is in group2 */
for (proc2 = 0; proc2 < group2_pointer->grp_proc_count; proc2++) {
proc2_pointer = ompi_group_peer_lookup (group2_pointer ,proc2);
proc2_pointer = ompi_group_peer_lookup (group2_pointer ,proc2);
if( proc1_pointer == proc2_pointer ) {
ranks_included[k] = proc1;
k++;
k++;
break;
}
} /* end proc2 loop */
@ -510,9 +500,9 @@ int ompi_group_intersection(ompi_group_t* group1,ompi_group_t* group2,
result = ompi_group_incl(group1, k, ranks_included, new_group);
if (NULL != ranks_included)
{
if (NULL != ranks_included) {
free(ranks_included);
}
return result;
}

Просмотреть файл

@ -27,29 +27,29 @@ static bool check_ranks (int, int *);
int ompi_group_calc_bmap ( int n, int orig_size , int *ranks) {
if (check_ranks(n,ranks)) {
return ompi_group_div_ceil(orig_size,BSIZE);
return ompi_group_div_ceil(orig_size,BSIZE);
}
else {
return -1;
return -1;
}
}
/* from parent group to child group*/
int ompi_group_translate_ranks_bmap ( ompi_group_t *parent_group,
int n_ranks, int *ranks1,
ompi_group_t *child_group,
int *ranks2)
int n_ranks, int *ranks1,
ompi_group_t *child_group,
int *ranks2)
{
int i,count,j,k,m;
unsigned char tmp, tmp1;
for (j=0 ; j<n_ranks ; j++) {
if ( MPI_PROC_NULL == ranks1[j]) {
ranks2[j] = MPI_PROC_NULL;
}
else {
ranks2[j] = MPI_UNDEFINED;
m = ranks1[j];
count = 0;
if ( MPI_PROC_NULL == ranks1[j]) {
ranks2[j] = MPI_PROC_NULL;
}
else {
ranks2[j] = MPI_UNDEFINED;
m = ranks1[j];
count = 0;
tmp = ( 1 << (m % BSIZE) );
/* check if the bit that correponds to the parent rank is set in the bitmap */
if ( tmp == (child_group->sparse_data.grp_bitmap.grp_bitmap_array[(int)(m/BSIZE)]
@ -60,57 +60,58 @@ int ompi_group_translate_ranks_bmap ( ompi_group_t *parent_group,
* that are set on the way till we get to the correponding bit
*/
for (i=0 ; i<=(int)(m/BSIZE) ; i++) {
for (k=0 ; k<BSIZE ; k++) {
for (k=0 ; k<BSIZE ; k++) {
tmp1 = ( 1 << k);
if ( tmp1 == ( child_group->sparse_data.grp_bitmap.grp_bitmap_array[i]
if ( tmp1 == ( child_group->sparse_data.grp_bitmap.grp_bitmap_array[i]
& (1 << k) ) ) {
count++;
count++;
}
if( i==(int)(m/BSIZE) && k==m % BSIZE ) {
ranks2[j] = count-1;
if( i==(int)(m/BSIZE) && k==m % BSIZE ) {
ranks2[j] = count-1;
i = (int)(m/BSIZE) + 1;
break;
}
}
}
}
}
}
}
}
}
}
return OMPI_SUCCESS;
}
/* from child group to parent group */
int ompi_group_translate_ranks_bmap_reverse ( ompi_group_t *child_group,
int n_ranks, int *ranks1,
ompi_group_t *parent_group,
int *ranks2)
int n_ranks, int *ranks1,
ompi_group_t *parent_group,
int *ranks2)
{
int i,j,count,m,k;
unsigned char tmp;
for (j=0 ; j<n_ranks ; j++) {
if ( MPI_PROC_NULL == ranks1[j]) {
ranks2[j] = MPI_PROC_NULL;
}
else {
m = ranks1[j];
count = 0;
if ( MPI_PROC_NULL == ranks1[j]) {
ranks2[j] = MPI_PROC_NULL;
}
else {
m = ranks1[j];
count = 0;
/*
* Go through all the bits set in the bitmap up to the child rank.
* The parent rank will be the sum of all bits passed (set and unset)
*/
for (i=0 ; i<child_group->sparse_data.grp_bitmap.grp_bitmap_array_len ; i++) {
for (k=0 ; k<BSIZE ; k++) {
for (k=0 ; k<BSIZE ; k++) {
tmp = ( 1 << k);
if ( tmp == ( child_group->sparse_data.grp_bitmap.grp_bitmap_array[i]
& (1 << k) ) )
count++;
if( m == count-1 ) {
ranks2[j] = i*BSIZE + k;
i = child_group->sparse_data.grp_bitmap.grp_bitmap_array_len + 1;
break;
}
}
if ( tmp == ( child_group->sparse_data.grp_bitmap.grp_bitmap_array[i]
& (1 << k) ) ) {
count++;
}
if( m == count-1 ) {
ranks2[j] = i*BSIZE + k;
i = child_group->sparse_data.grp_bitmap.grp_bitmap_array_len + 1;
break;
}
}
}
}
}
}
return OMPI_SUCCESS;
}
@ -133,7 +134,7 @@ int ompi_group_div_ceil (int num, int den)
static bool check_ranks (int n, int *ranks) {
int i;
for (i=1 ; i < n ; i++) {
if ( ranks[i-1] > ranks [i] ) {
if ( ranks[i-1] > ranks [i] ) {
return false;
}
}
@ -141,7 +142,7 @@ static bool check_ranks (int n, int *ranks) {
}
int ompi_group_incl_bmap(ompi_group_t* group, int n, int *ranks,
ompi_group_t **new_group)
ompi_group_t **new_group)
{
/* local variables */
int my_group_rank,i,bit_set;
@ -150,24 +151,24 @@ int ompi_group_incl_bmap(ompi_group_t* group, int n, int *ranks,
group_pointer = (ompi_group_t *)group;
if ( 0 == n ) {
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return OMPI_SUCCESS;
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return OMPI_SUCCESS;
}
new_group_pointer = ompi_group_allocate_bmap(group->grp_proc_count, n);
if( NULL == new_group_pointer ) {
return MPI_ERR_GROUP;
return MPI_ERR_GROUP;
}
/* Initialize the bit array to zeros */
for (i=0 ; i<new_group_pointer->sparse_data.grp_bitmap.grp_bitmap_array_len ; i++) {
new_group_pointer->
new_group_pointer->
sparse_data.grp_bitmap.grp_bitmap_array[i] = 0;
}
/* set the bits */
for (i=0 ; i<n ; i++) {
bit_set = ranks[i] % BSIZE;
bit_set = ranks[i] % BSIZE;
new_group_pointer->
sparse_data.grp_bitmap.grp_bitmap_array[(int)(ranks[i]/BSIZE)] |= (1 << bit_set);
}

Просмотреть файл

@ -59,8 +59,9 @@ ompi_group_t *ompi_group_allocate(int group_size)
/* create new group group element */
new_group = OBJ_NEW(ompi_group_t);
if (NULL == new_group)
goto error_exit;
if (NULL == new_group) {
goto error_exit;
}
if (0 > new_group->grp_f_to_c_index) {
OBJ_RELEASE (new_group);
@ -89,7 +90,7 @@ ompi_group_t *ompi_group_allocate(int group_size)
new_group->grp_my_rank = MPI_UNDEFINED;
OMPI_GROUP_SET_DENSE(new_group);
error_exit:
error_exit:
/* return */
return new_group;
}
@ -135,7 +136,7 @@ ompi_group_t *ompi_group_allocate_sporadic(int group_size)
new_group->grp_proc_pointers = NULL;
OMPI_GROUP_SET_SPORADIC(new_group);
error_exit:
error_exit:
return new_group;
}
@ -160,7 +161,7 @@ ompi_group_t *ompi_group_allocate_strided(void)
new_group->sparse_data.grp_strided.grp_strided_stride = -1;
new_group->sparse_data.grp_strided.grp_strided_offset = -1;
new_group->sparse_data.grp_strided.grp_strided_last_element = -1;
error_exit:
error_exit:
/* return */
return new_group;
}
@ -194,7 +195,7 @@ ompi_group_t *ompi_group_allocate_bmap(int orig_group_size , int group_size)
new_group->grp_proc_pointers = NULL;
OMPI_GROUP_SET_BITMAP(new_group);
error_exit:
error_exit:
/* return */
return new_group;
}
@ -207,8 +208,8 @@ void ompi_group_increment_proc_count(ompi_group_t *group)
int proc;
ompi_proc_t * proc_pointer;
for (proc = 0; proc < group->grp_proc_count; proc++) {
proc_pointer = ompi_group_peer_lookup(group,proc);
OBJ_RETAIN(proc_pointer);
proc_pointer = ompi_group_peer_lookup(group,proc);
OBJ_RETAIN(proc_pointer);
}
return;
@ -223,8 +224,8 @@ void ompi_group_decrement_proc_count(ompi_group_t *group)
int proc;
ompi_proc_t * proc_pointer;
for (proc = 0; proc < group->grp_proc_count; proc++) {
proc_pointer = ompi_group_peer_lookup(group,proc);
OBJ_RELEASE(proc_pointer);
proc_pointer = ompi_group_peer_lookup(group,proc);
OBJ_RELEASE(proc_pointer);
}
return;
@ -290,9 +291,9 @@ static void ompi_group_destruct(ompi_group_t *group)
/* reset the ompi_group_f_to_c_table entry - make sure that the
* entry is in the table */
if (NULL != opal_pointer_array_get_item(&ompi_group_f_to_c_table,
group->grp_f_to_c_index)) {
group->grp_f_to_c_index)) {
opal_pointer_array_set_item(&ompi_group_f_to_c_table,
group->grp_f_to_c_index, NULL);
group->grp_f_to_c_index, NULL);
}
/* return */
@ -347,6 +348,3 @@ int ompi_group_finalize(void)
return OMPI_SUCCESS;
}
/* LocalWords: grp
*/

Просмотреть файл

@ -31,7 +31,7 @@ int ompi_group_calc_plist ( int n , int *ranks ) {
}
int ompi_group_incl_plist(ompi_group_t* group, int n, int *ranks,
ompi_group_t **new_group)
ompi_group_t **new_group)
{
/* local variables */
int proc,my_group_rank;
@ -41,22 +41,21 @@ int ompi_group_incl_plist(ompi_group_t* group, int n, int *ranks,
group_pointer = (ompi_group_t *)group;
if ( 0 == n ) {
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return OMPI_SUCCESS;
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return OMPI_SUCCESS;
}
/* get new group struct */
new_group_pointer=ompi_group_allocate(n);
if( NULL == new_group_pointer ) {
return MPI_ERR_GROUP;
return MPI_ERR_GROUP;
}
/* put group elements in the list */
for (proc = 0; proc < n; proc++) {
new_group_pointer->grp_proc_pointers[proc] =
ompi_group_peer_lookup(group_pointer,ranks[proc]);
ompi_group_peer_lookup(group_pointer,ranks[proc]);
} /* end proc loop */
/* increment proc reference counters */
@ -82,7 +81,7 @@ int ompi_group_incl_plist(ompi_group_t* group, int n, int *ranks,
* two parent groups in the group structure and maintain functions
*/
int ompi_group_union (ompi_group_t* group1, ompi_group_t* group2,
ompi_group_t **new_group)
ompi_group_t **new_group)
{
/* local variables */
int new_group_size, proc1, proc2, found_in_group;
@ -116,16 +115,17 @@ int ompi_group_union (ompi_group_t* group1, ompi_group_t* group2,
}
} /* end proc1 loop */
if (found_in_group)
if (found_in_group) {
continue;
}
new_group_size++;
} /* end proc loop */
if ( 0 == new_group_size ) {
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return MPI_SUCCESS;
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return MPI_SUCCESS;
}
/* get new group struct */
@ -139,7 +139,7 @@ int ompi_group_union (ompi_group_t* group1, ompi_group_t* group2,
/* put group1 elements in the list */
for (proc1 = 0; proc1 < group1_pointer->grp_proc_count; proc1++) {
new_group_pointer->grp_proc_pointers[proc1] =
ompi_group_peer_lookup(group1_pointer,proc1);
ompi_group_peer_lookup(group1_pointer,proc1);
}
cnt = group1_pointer->grp_proc_count;
@ -159,11 +159,12 @@ int ompi_group_union (ompi_group_t* group1, ompi_group_t* group2,
}
} /* end proc1 loop */
if (found_in_group)
if (found_in_group) {
continue;
}
new_group_pointer->grp_proc_pointers[cnt] =
ompi_group_peer_lookup(group2_pointer,proc2);
ompi_group_peer_lookup(group2_pointer,proc2);
cnt++;
} /* end proc loop */
@ -174,18 +175,18 @@ int ompi_group_union (ompi_group_t* group1, ompi_group_t* group2,
my_group_rank = group1_pointer->grp_my_rank;
if (MPI_UNDEFINED == my_group_rank) {
my_group_rank = group2_pointer->grp_my_rank;
if ( MPI_UNDEFINED != my_group_rank) {
my_proc_pointer = ompi_group_peer_lookup(group2_pointer,my_group_rank);
}
if ( MPI_UNDEFINED != my_group_rank) {
my_proc_pointer = ompi_group_peer_lookup(group2_pointer,my_group_rank);
}
} else {
my_proc_pointer = ompi_group_peer_lookup(group1_pointer,my_group_rank);
}
if ( MPI_UNDEFINED == my_group_rank ) {
new_group_pointer->grp_my_rank = MPI_UNDEFINED;
new_group_pointer->grp_my_rank = MPI_UNDEFINED;
}
else {
ompi_set_group_rank(new_group_pointer, my_proc_pointer);
ompi_set_group_rank(new_group_pointer, my_proc_pointer);
}
*new_group = (MPI_Group) new_group_pointer;
@ -199,7 +200,7 @@ int ompi_group_union (ompi_group_t* group1, ompi_group_t* group2,
* two parent groups in the group structure and maintain functions
*/
int ompi_group_difference(ompi_group_t* group1, ompi_group_t* group2,
ompi_group_t **new_group) {
ompi_group_t **new_group) {
/* local varibles */
int new_group_size, proc1, proc2, found_in_group2, cnt;
@ -230,21 +231,22 @@ int ompi_group_difference(ompi_group_t* group1, ompi_group_t* group2,
break;
}
} /* end proc1 loop */
if(found_in_group2)
if(found_in_group2) {
continue;
}
new_group_size++;
} /* end proc loop */
if ( 0 == new_group_size ) {
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return MPI_SUCCESS;
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return MPI_SUCCESS;
}
/* allocate a new ompi_group_t structure */
new_group_pointer=ompi_group_allocate(new_group_size);
if( NULL == new_group_pointer ) {
return MPI_ERR_GROUP;
return MPI_ERR_GROUP;
}
/* fill in group list */
@ -261,11 +263,12 @@ int ompi_group_difference(ompi_group_t* group1, ompi_group_t* group2,
break;
}
} /* end proc1 loop */
if(found_in_group2)
if(found_in_group2) {
continue;
}
new_group_pointer->grp_proc_pointers[cnt] =
ompi_group_peer_lookup(group1_pointer,proc1);
ompi_group_peer_lookup(group1_pointer,proc1);
cnt++;
} /* end proc loop */
@ -276,24 +279,23 @@ int ompi_group_difference(ompi_group_t* group1, ompi_group_t* group2,
/* find my rank */
my_group_rank=group1_pointer->grp_my_rank;
if ( MPI_UNDEFINED != my_group_rank ) {
my_proc_pointer = ompi_group_peer_lookup(group1_pointer,my_group_rank);
my_proc_pointer = ompi_group_peer_lookup(group1_pointer,my_group_rank);
}
else {
my_group_rank=group2_pointer->grp_my_rank;
if ( MPI_UNDEFINED != my_group_rank ) {
my_proc_pointer = ompi_group_peer_lookup(group2_pointer,my_group_rank);
}
my_group_rank=group2_pointer->grp_my_rank;
if ( MPI_UNDEFINED != my_group_rank ) {
my_proc_pointer = ompi_group_peer_lookup(group2_pointer,my_group_rank);
}
}
if ( MPI_UNDEFINED == my_group_rank ) {
new_group_pointer->grp_my_rank = MPI_UNDEFINED;
new_group_pointer->grp_my_rank = MPI_UNDEFINED;
}
else {
ompi_set_group_rank(new_group_pointer,my_proc_pointer);
ompi_set_group_rank(new_group_pointer,my_proc_pointer);
}
*new_group = (MPI_Group)new_group_pointer;
return OMPI_SUCCESS;
}

Просмотреть файл

@ -27,88 +27,92 @@ int ompi_group_calc_sporadic ( int n , int *ranks)
{
int i,l=0;
for (i=0 ; i<n ; i++) {
if(ranks[i] == ranks[i-1]+1) {
if(l==0) l++;
}
else l++;
if(ranks[i] == ranks[i-1]+1) {
if(l==0) {
l++;
}
}
else {
l++;
}
}
return sizeof(struct ompi_group_sporadic_list_t ) * l;
}
/* from parent group to child group*/
int ompi_group_translate_ranks_sporadic ( ompi_group_t *parent_group,
int n_ranks, int *ranks1,
ompi_group_t *child_group,
int *ranks2)
int n_ranks, int *ranks1,
ompi_group_t *child_group,
int *ranks2)
{
int i,count,j;
for (j=0 ; j<n_ranks ; j++) {
if (MPI_PROC_NULL == ranks1[j]) {
ranks2[j] = MPI_PROC_NULL;
}
else {
if (MPI_PROC_NULL == ranks1[j]) {
ranks2[j] = MPI_PROC_NULL;
}
else {
/*
* if the rank is in the current range of the sporadic list, we calculate
* the rank in the child by adding the length of all ranges that we passed
* and the position in the current range
*/
ranks2[j] = MPI_UNDEFINED;
count = 0;
for(i=0 ; i <child_group->sparse_data.grp_sporadic.grp_sporadic_list_len ; i++) {
if( child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].rank_first
ranks2[j] = MPI_UNDEFINED;
count = 0;
for(i=0 ; i <child_group->sparse_data.grp_sporadic.grp_sporadic_list_len ; i++) {
if( child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].rank_first
<= ranks1[j] && ranks1[j] <=
child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].rank_first +
child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].length -1 ) {
child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].rank_first +
child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].length -1 ) {
ranks2[j] = ranks1[j] - child_group->
ranks2[j] = ranks1[j] - child_group->
sparse_data.grp_sporadic.grp_sporadic_list[i].rank_first + count;
break;
}
else {
count = count + child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].length;
}
}
}
break;
}
else {
count = count + child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].length;
}
}
}
}
return OMPI_SUCCESS;
}
/* from child group to parent group*/
int ompi_group_translate_ranks_sporadic_reverse ( ompi_group_t *child_group,
int n_ranks, int *ranks1,
ompi_group_t *parent_group,
int *ranks2)
int n_ranks, int *ranks1,
ompi_group_t *parent_group,
int *ranks2)
{
int i,j,count;
for (j=0 ; j<n_ranks ; j++) {
if (MPI_PROC_NULL == ranks1[j]) {
ranks2[j] = MPI_PROC_NULL;
}
else {
if (MPI_PROC_NULL == ranks1[j]) {
ranks2[j] = MPI_PROC_NULL;
}
else {
count = 0;
/*
* if the rank of the child is in the current range, the rank of the parent will be
* the position in the current range of the sporadic list
*/
for (i=0 ; i<child_group->sparse_data.grp_sporadic.grp_sporadic_list_len ; i++) {
if ( ranks1[j] > ( count +
for (i=0 ; i<child_group->sparse_data.grp_sporadic.grp_sporadic_list_len ; i++) {
if ( ranks1[j] > ( count +
child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].length
- 1) ) {
count = count + child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].length;
}
else {
ranks2[j] = child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].rank_first
count = count + child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].length;
}
else {
ranks2[j] = child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].rank_first
+ (ranks1[j] - count);
break;
}
}
}
break;
}
}
}
}
return OMPI_SUCCESS;
}
int ompi_group_incl_spor(ompi_group_t* group, int n, int *ranks,
ompi_group_t **new_group)
ompi_group_t **new_group)
{
/* local variables */
int my_group_rank,l,i,j,proc_count;
@ -117,9 +121,9 @@ int ompi_group_incl_spor(ompi_group_t* group, int n, int *ranks,
group_pointer = (ompi_group_t *)group;
if (0 == n) {
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return OMPI_SUCCESS;
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return OMPI_SUCCESS;
}
l=0;
@ -127,15 +131,19 @@ int ompi_group_incl_spor(ompi_group_t* group, int n, int *ranks,
proc_count = 0;
for(i=0 ; i<n ; i++){
if(ranks[i] == ranks[i-1]+1) {
if(l==0) l++;
}
else l++;
if(ranks[i] == ranks[i-1]+1) {
if(l==0) {
l++;
}
}
else {
l++;
}
}
new_group_pointer = ompi_group_allocate_sporadic(l);
if( NULL == new_group_pointer ) {
return MPI_ERR_GROUP;
return MPI_ERR_GROUP;
}
new_group_pointer ->
@ -144,16 +152,16 @@ int ompi_group_incl_spor(ompi_group_t* group, int n, int *ranks,
sparse_data.grp_sporadic.grp_sporadic_list[j].length = 1;
for(i=1 ; i<n ; i++){
if(ranks[i] == ranks[i-1]+1) {
new_group_pointer -> sparse_data.grp_sporadic.grp_sporadic_list[j].length ++;
}
else {
j++;
new_group_pointer ->
if(ranks[i] == ranks[i-1]+1) {
new_group_pointer -> sparse_data.grp_sporadic.grp_sporadic_list[j].length ++;
}
else {
j++;
new_group_pointer ->
sparse_data.grp_sporadic.grp_sporadic_list[j].rank_first = ranks[i];
new_group_pointer ->
new_group_pointer ->
sparse_data.grp_sporadic.grp_sporadic_list[j].length = 1;
}
}
}
new_group_pointer->sparse_data.grp_sporadic.grp_sporadic_list_len = j+1;

Просмотреть файл

@ -27,54 +27,54 @@ static int check_stride(int[],int);
int ompi_group_calc_strided ( int n , int *ranks ) {
if(-1 == check_stride(ranks,n)) {
return -1;
return -1;
}
else {
return (sizeof(int)*3);
return (sizeof(int)*3);
}
}
/* from parent group to child group*/
int ompi_group_translate_ranks_strided (ompi_group_t *parent_group,
int n_ranks, int *ranks1,
ompi_group_t *child_group,
int *ranks2)
int n_ranks, int *ranks1,
ompi_group_t *child_group,
int *ranks2)
{
int s,o,l,i;
s = child_group->sparse_data.grp_strided.grp_strided_stride;
o = child_group->sparse_data.grp_strided.grp_strided_offset;
l = child_group->sparse_data.grp_strided.grp_strided_last_element;
for (i = 0; i < n_ranks; i++) {
if ( MPI_PROC_NULL == ranks1[i]) {
ranks2[i] = MPI_PROC_NULL;
}
else {
ranks2[i] = MPI_UNDEFINED;
if ( MPI_PROC_NULL == ranks1[i]) {
ranks2[i] = MPI_PROC_NULL;
}
else {
ranks2[i] = MPI_UNDEFINED;
if ( (ranks1[i]-o) >= 0 && (ranks1[i]-o)%s == 0 && ranks1[i] <= l) {
ranks2[i] = (ranks1[i] - o)/s;
}
}
if ( (ranks1[i]-o) >= 0 && (ranks1[i]-o)%s == 0 && ranks1[i] <= l) {
ranks2[i] = (ranks1[i] - o)/s;
}
}
}
return OMPI_SUCCESS;
}
/* from child group to parent group*/
int ompi_group_translate_ranks_strided_reverse (ompi_group_t *child_group,
int n_ranks, int *ranks1,
ompi_group_t *parent_group,
int *ranks2)
int n_ranks, int *ranks1,
ompi_group_t *parent_group,
int *ranks2)
{
int s,o,i;
s = child_group->sparse_data.grp_strided.grp_strided_stride;
o = child_group->sparse_data.grp_strided.grp_strided_offset;
for (i = 0; i < n_ranks; i++) {
if ( MPI_PROC_NULL == ranks1[i]) {
ranks2[i] = MPI_PROC_NULL;
}
else {
ranks2[i] =s*ranks1[i] + o;
}
if ( MPI_PROC_NULL == ranks1[i]) {
ranks2[i] = MPI_PROC_NULL;
}
else {
ranks2[i] =s*ranks1[i] + o;
}
}
return OMPI_SUCCESS;
}
@ -91,14 +91,15 @@ static int check_stride(int incl[],int incllen) {
return -1;
}
for(i=0 ; i < incllen-1 ; i++) {
if(incl[i+1] - incl[i] != s)
return -1;
if(incl[i+1] - incl[i] != s) {
return -1;
}
}
return s;
}
int ompi_group_incl_strided(ompi_group_t* group, int n, int *ranks,
ompi_group_t **new_group)
ompi_group_t **new_group)
{
/* local variables */
int my_group_rank,stride;
@ -107,9 +108,9 @@ int ompi_group_incl_strided(ompi_group_t* group, int n, int *ranks,
group_pointer = (ompi_group_t *)group;
if ( 0 == n ) {
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return OMPI_SUCCESS;
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return OMPI_SUCCESS;
}
stride = check_stride(ranks,n);
@ -130,9 +131,9 @@ int ompi_group_incl_strided(ompi_group_t* group, int n, int *ranks,
ompi_group_increment_proc_count(new_group_pointer);
my_group_rank = group_pointer->grp_my_rank;
ompi_group_translate_ranks (new_group_pointer->grp_parent_group_ptr,1,&my_group_rank,
new_group_pointer,&new_group_pointer->grp_my_rank);
new_group_pointer,&new_group_pointer->grp_my_rank);
*new_group = (MPI_Group)new_group_pointer;
return OMPI_SUCCESS;
return OMPI_SUCCESS;
}