1
1

Merging in the Sparse Groups..

This commit includes config changes..

This commit was SVN r15764.
Этот коммит содержится в:
Mohamad Chaarawi 2007-08-04 00:41:26 +00:00
родитель 8baeadb761
Коммит 59a7bf8a9f
28 изменённых файлов: 2143 добавлений и 944 удалений

2
NEWS
Просмотреть файл

@ -28,6 +28,8 @@ version 1.0.
Trunk (not on release branches yet)
-----------------------------------
- Added the Sparse Groups implementation.
--> Expected: 1.3
- Added Cray Compute Node Linux (CNL) and ALPS Support.
--> Expected: ???

4
README
Просмотреть файл

@ -489,6 +489,10 @@ for a full list); a summary of the more commonly used ones follows:
--disable-shared; enabling static libraries and disabling shared
libraries are two independent options.
--enable-sparse-groups
Enable the usage of sparse groups. This would save memory significantly
especially if you are creating large communicators. (Disabled by default)
There are many other options available -- see "./configure --help".
Changing the compilers that Open MPI uses to build itself uses the

Просмотреть файл

@ -163,6 +163,24 @@ AC_ARG_ENABLE(debug-symbols,
AC_HELP_STRING([--disable-debug-symbols],
[Disable adding compiler flags to enable debugging symbols if --enable-debug is specified. For non-debugging builds, this flag has no effect.]))
#
# Sparse Groups
#
AC_MSG_CHECKING([if want sparse process groups])
AC_ARG_ENABLE(sparse-groups,
AC_HELP_STRING([--enable-sparse-groups],
[enable sparse process groups (default: not enabled)]))
if test "$enable_sparse_groups" = "yes"; then
AC_MSG_RESULT([yes])
GROUP_SPARSE=1
else
AC_MSG_RESULT([no])
GROUP_SPARSE=0
fi
AC_DEFINE_UNQUOTED([OMPI_GROUP_SPARSE],$GROUP_SPARSE,
[Wether we want sparse process groups])
#
# Fortran 77
#

Просмотреть файл

@ -10,6 +10,7 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 University of Houston. All rights reserved.
* Copyright (c) 2007 Cisco, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -77,36 +78,51 @@ static int ompi_comm_copy_topo (ompi_communicator_t *oldcomm,
* All other routines are just used to determine these elements.
*/
int ompi_comm_set ( ompi_communicator_t *newcomm,
int ompi_comm_set ( ompi_communicator_t **ncomm,
ompi_communicator_t* oldcomm,
int local_size,
ompi_proc_t **local_procs,
int *local_ranks,
int remote_size,
ompi_proc_t **remote_procs,
int *remote_ranks,
opal_hash_table_t *attr,
ompi_errhandler_t *errh,
mca_base_component_t *topocomponent )
mca_base_component_t *topocomponent,
ompi_group_t *local_group,
ompi_group_t *remote_group )
{
ompi_proc_t *my_gpointer;
int my_grank;
ompi_communicator_t *newcomm=NULL;
int ret;
/* ompi_comm_allocate */
newcomm = OBJ_NEW(ompi_communicator_t);
/* fill in the inscribing hyper-cube dimensions */
newcomm->c_cube_dim = opal_cube_dim(local_size);
/* Set local_group information */
memcpy ( newcomm->c_local_group->grp_proc_pointers,
local_procs, local_size * sizeof(ompi_proc_t *));
ompi_group_increment_proc_count(newcomm->c_local_group);
/* determine my rank */
my_grank = oldcomm->c_local_group->grp_my_rank;
my_gpointer = oldcomm->c_local_group->grp_proc_pointers[my_grank];
ompi_set_group_rank(newcomm->c_local_group, my_gpointer);
if (NULL == local_group) {
/* determine how the list of local_rank can be stored most
efficiently */
ret = ompi_group_incl(oldcomm->c_local_group, local_size,
local_ranks, &newcomm->c_local_group);
}
else {
newcomm->c_local_group = local_group;
OBJ_RETAIN(newcomm->c_local_group);
ompi_group_increment_proc_count(newcomm->c_local_group);
}
newcomm->c_my_rank = newcomm->c_local_group->grp_my_rank;
/* Set remote group and duplicate the local comm, if applicable */
if ( 0 < remote_size) {
memcpy ( newcomm->c_remote_group->grp_proc_pointers,
remote_procs, remote_size * sizeof(ompi_proc_t *));
ompi_group_increment_proc_count(newcomm->c_remote_group);
if ( 0 < remote_size) {
if ( NULL == remote_group ) {
ret = ompi_group_incl(oldcomm->c_remote_group, remote_size,
remote_ranks, &newcomm->c_remote_group);
}
else {
newcomm->c_remote_group = remote_group;
OBJ_RETAIN(newcomm->c_remote_group);
ompi_group_increment_proc_count(newcomm->c_remote_group);
}
newcomm->c_flags |= OMPI_COMM_INTER;
if ( OMPI_COMM_IS_INTRA(oldcomm) ) {
ompi_comm_dup(oldcomm, &newcomm->c_local_comm,1);
@ -114,15 +130,19 @@ int ompi_comm_set ( ompi_communicator_t *newcomm,
ompi_comm_dup(oldcomm->c_local_comm, &newcomm->c_local_comm,1);
}
}
else {
newcomm->c_remote_group = newcomm->c_local_group;
OBJ_RETAIN(newcomm->c_remote_group);
}
/* Check how many different jobids are represented in this communicator.
Necessary for the disconnect of dynamic communicators. */
ompi_comm_mark_dyncomm (newcomm);
/* Set error handler */
newcomm->error_handler = errh;
OBJ_RETAIN ( newcomm->error_handler );
/* Set Topology, if required */
if ( NULL != topocomponent ) {
@ -137,7 +157,7 @@ int ompi_comm_set ( ompi_communicator_t *newcomm,
* communicator into this communicator. This probably is
* another function in this file.
*/
if (OMPI_COMM_IS_CART ( oldcomm ) ) {
newcomm->c_flags |= OMPI_COMM_CART;
}
@ -149,34 +169,34 @@ int ompi_comm_set ( ompi_communicator_t *newcomm,
* Now I have to set the information on the topology from the previous
* communicator
*/
/* allocate the data for the common good */
newcomm->c_topo_comm = (mca_topo_base_comm_t *)malloc(sizeof(mca_topo_base_comm_t));
if (NULL == newcomm->c_topo_comm) {
OBJ_RELEASE(newcomm);
return OMPI_ERROR;
}
if (OMPI_SUCCESS != (ret = mca_topo_base_comm_select (newcomm,
oldcomm->c_topo_component))) {
oldcomm->c_topo_component))) {
free(newcomm->c_topo_comm);
OBJ_RELEASE(newcomm);
return ret;
}
/*
* Should copy over the information from the previous communicator
*/
if (OMPI_SUCCESS != (ret = ompi_comm_copy_topo (oldcomm, newcomm))) {
OBJ_RELEASE(newcomm);
return ret;
}
if (OMPI_SUCCESS != (ret = ompi_comm_copy_topo (oldcomm, newcomm))) {
OBJ_RELEASE(newcomm);
return ret;
}
}
/* Copy attributes and call according copy functions, if
required */
if (NULL != oldcomm->c_keyhash) {
if (NULL != attr) {
ompi_attr_hash_init(&newcomm->c_keyhash);
@ -188,13 +208,15 @@ int ompi_comm_set ( ompi_communicator_t *newcomm,
}
}
}
/* Initialize the PML stuff in the newcomm */
if ( OMPI_ERROR == MCA_PML_CALL(add_comm(newcomm)) ) {
OBJ_RELEASE(newcomm);
return OMPI_ERROR;
}
OMPI_COMM_SET_PML_ADDED(newcomm);
*ncomm = newcomm;
return (OMPI_SUCCESS);
}
@ -227,15 +249,17 @@ int ompi_comm_create ( ompi_communicator_t *comm, ompi_group_t *group,
ompi_communicator_t **newcomm )
{
ompi_communicator_t *newcomp;
int rsize;
int mode;
int rsize , lsize;
int mode,i,j;
int *allranks=NULL;
ompi_proc_t **rprocs=NULL;
int *rranks=NULL;
int rc = OMPI_SUCCESS;
lsize = group->grp_proc_count;
if ( OMPI_COMM_IS_INTER(comm) ) {
int tsize, i, j;
int tsize;
tsize = ompi_comm_remote_size(comm);
allranks = (int *) malloc ( tsize * sizeof(int));
if ( NULL == allranks ) {
@ -266,15 +290,15 @@ int ompi_comm_create ( ompi_communicator_t *comm, ompi_group_t *group,
}
/* Set proc-pointers for remote group */
rprocs = (ompi_proc_t **) calloc ( rsize, sizeof(ompi_proc_t *));
if ( NULL == rprocs ) {
rranks = (int *) malloc ( rsize * sizeof(int));
if ( NULL == rranks ) {
rc = OMPI_ERR_OUT_OF_RESOURCE;
goto exit;
}
for ( j = 0, i = 0; i < tsize; i++ ) {
if ( MPI_UNDEFINED != allranks[i] ) {
rprocs[j] = comm->c_remote_group->grp_proc_pointers[i];
rranks[j] = i;
j++;
}
}
@ -282,13 +306,28 @@ int ompi_comm_create ( ompi_communicator_t *comm, ompi_group_t *group,
} else {
rsize = 0;
rprocs = NULL;
rranks = NULL;
mode = OMPI_COMM_CID_INTRA;
}
newcomp = ompi_comm_allocate (group->grp_proc_count, rsize );
if ( NULL == newcomp ) {
rc = MPI_ERR_INTERN;
rc = ompi_comm_set ( &newcomp, /* new comm */
comm, /* old comm */
lsize, /* local_size */
NULL, /* local_ranks */
rsize, /* remote_size */
rranks, /* remote_ranks */
NULL, /* attrs */
comm->error_handler, /* error handler */
NULL, /* topo component */
group, /* local group */
NULL /* remote group */
);
if ( OMPI_SUCCESS != rc ) {
goto exit;
}
if ( NULL == newcomm ) {
rc = MPI_ERR_INTERN;
goto exit;
}
@ -304,20 +343,6 @@ int ompi_comm_create ( ompi_communicator_t *comm, ompi_group_t *group,
goto exit;
}
rc = ompi_comm_set ( newcomp, /* new comm */
comm, /* old comm */
group->grp_proc_count, /* local_size */
group->grp_proc_pointers, /* local_procs*/
rsize, /* remote_size */
rprocs, /* remote_procs */
NULL, /* attrs */
comm->error_handler, /* error handler */
NULL /* topo component */
);
if ( OMPI_SUCCESS != rc ) {
goto exit;
}
/* Set name for debugging purposes */
snprintf(newcomp->c_name, MPI_MAX_OBJECT_NAME, "MPI COMMUNICATOR %d CREATE FROM %d",
newcomp->c_contextid, comm->c_contextid );
@ -347,13 +372,13 @@ int ompi_comm_create ( ompi_communicator_t *comm, ompi_group_t *group,
if ( MPI_UNDEFINED == newcomp->c_local_group->grp_my_rank ) {
ompi_comm_free ( &newcomp );
}
exit:
if ( NULL != allranks ) {
free ( allranks );
}
if ( NULL != rprocs ) {
free ( rprocs );
if ( NULL != rranks ) {
free ( rranks );
}
*newcomm = newcomp;
@ -380,8 +405,8 @@ int ompi_comm_split ( ompi_communicator_t* comm, int color, int key,
int *results=NULL, *sorted=NULL;
int *rresults=NULL, *rsorted=NULL;
int rc=OMPI_SUCCESS;
ompi_proc_t **procs=NULL, **rprocs=NULL;
ompi_communicator_t *newcomp;
int *lranks=NULL, *rranks=NULL;
ompi_comm_allgatherfct *allgatherfct=NULL;
@ -439,13 +464,13 @@ int ompi_comm_split ( ompi_communicator_t* comm, int color, int key,
}
/* put group elements in a list */
procs = (ompi_proc_t **) malloc ( sizeof(ompi_proc_t *) * my_size);
if ( NULL == procs ) {
lranks = (int *) malloc ( my_size * sizeof(int));
if ( NULL == lranks ) {
rc = OMPI_ERR_OUT_OF_RESOURCE;
goto exit;
}
for (i = 0; i < my_size; i++) {
procs[i] = comm->c_local_group->grp_proc_pointers[sorted[i*2]];
lranks[i] = sorted[i*2];
}
/* Step 2: determine all the information for the remote group */
@ -493,18 +518,19 @@ int ompi_comm_split ( ompi_communicator_t* comm, int color, int key,
}
/* put group elements in a list */
rprocs = (ompi_proc_t **) malloc ( sizeof(ompi_proc_t *) * my_rsize);
if ( NULL == rprocs ) {
rranks = (int *) malloc ( my_rsize * sizeof(int));
if ( NULL == rranks) {
rc = OMPI_ERR_OUT_OF_RESOURCE;
goto exit;
}
for (i = 0; i < my_rsize; i++) {
rprocs[i] = comm->c_remote_group->grp_proc_pointers[rsorted[i*2]];
rranks[i] = rsorted[i*2];
}
mode = OMPI_COMM_CID_INTER;
} else {
my_rsize = 0;
rprocs = NULL;
rranks = NULL;
mode = OMPI_COMM_CID_INTRA;
}
@ -512,9 +538,27 @@ int ompi_comm_split ( ompi_communicator_t* comm, int color, int key,
/* Step 3: set up the communicator */
/* --------------------------------------------------------- */
/* Create the communicator finally */
newcomp = ompi_comm_allocate (my_size, my_rsize );
if ( NULL == newcomp ) {
rc = MPI_ERR_INTERN;
rc = ompi_comm_set ( &newcomp, /* new comm */
comm, /* old comm */
my_size, /* local_size */
lranks, /* local_ranks */
my_rsize, /* remote_size */
rranks, /* remote_ranks */
NULL, /* attrs */
comm->error_handler,/* error handler */
(pass_on_topo)?
(mca_base_component_t *)comm->c_topo_component:
NULL, /* topo component */
NULL, /* local group */
NULL /* remote group */
);
if ( NULL == newcomm ) {
rc = MPI_ERR_INTERN;
goto exit;
}
if ( OMPI_SUCCESS != rc ) {
goto exit;
}
@ -530,22 +574,6 @@ int ompi_comm_split ( ompi_communicator_t* comm, int color, int key,
goto exit;
}
rc = ompi_comm_set ( newcomp, /* new comm */
comm, /* old comm */
my_size, /* local_size */
procs, /* local_procs*/
my_rsize, /* remote_size */
rprocs, /* remote_procs */
NULL, /* attrs */
comm->error_handler,/* error handler */
(pass_on_topo)?
(mca_base_component_t *)comm->c_topo_component:
NULL); /* topo component */
if ( OMPI_SUCCESS != rc ) {
goto exit;
}
/* Set name for debugging purposes */
snprintf(newcomp->c_name, MPI_MAX_OBJECT_NAME, "MPI COMMUNICATOR %d SPLIT FROM %d",
newcomp->c_contextid, comm->c_contextid );
@ -577,13 +605,13 @@ int ompi_comm_split ( ompi_communicator_t* comm, int color, int key,
free ( rresults );
}
if ( NULL != rsorted ) {
free ( rsorted );
free ( rsorted );
}
if ( NULL != procs ) {
free ( procs );
if ( NULL != lranks ) {
free ( lranks );
}
if ( NULL != rprocs ) {
free ( rprocs );
if ( NULL != rranks ) {
free ( rranks );
}
@ -605,23 +633,37 @@ int ompi_comm_dup ( ompi_communicator_t * comm, ompi_communicator_t **newcomm,
ompi_communicator_t *comp=NULL;
ompi_communicator_t *newcomp=NULL;
int rsize, mode, rc=MPI_SUCCESS;
ompi_proc_t **rprocs;
comp = (ompi_communicator_t *) comm;
if ( OMPI_COMM_IS_INTER ( comp ) ){
rsize = comp->c_remote_group->grp_proc_count;
rprocs = comp->c_remote_group->grp_proc_pointers;
mode = OMPI_COMM_CID_INTER;
} else {
rsize = 0;
rprocs = NULL;
mode = OMPI_COMM_CID_INTRA;
}
*newcomm = MPI_COMM_NULL;
newcomp = ompi_comm_allocate (comp->c_local_group->grp_proc_count, rsize );
if ( NULL == newcomp ) {
return MPI_ERR_INTERN;
rc = ompi_comm_set ( &newcomp, /* new comm */
comp, /* old comm */
comp->c_local_group->grp_proc_count, /* local_size */
NULL, /* local_procs*/
rsize, /* remote_size */
NULL, /* remote_procs */
comp->c_keyhash, /* attrs */
comp->error_handler, /* error handler */
(mca_base_component_t *) comp->c_topo_component,
/* topo component */
comp->c_local_group, /* local group */
comp ->c_remote_group /* remote group */
);
if ( NULL == newcomm ) {
rc = MPI_ERR_INTERN;
return rc;
}
if ( MPI_SUCCESS != rc) {
return rc;
}
/* Determine context id. It is identical to f_2_c_handle */
@ -636,20 +678,6 @@ int ompi_comm_dup ( ompi_communicator_t * comm, ompi_communicator_t **newcomm,
return rc;
}
rc = ompi_comm_set ( newcomp, /* new comm */
comp, /* old comm */
comp->c_local_group->grp_proc_count, /* local_size */
comp->c_local_group->grp_proc_pointers, /* local_procs*/
rsize, /* remote_size */
rprocs, /* remote_procs */
comp->c_keyhash, /* attrs */
comp->error_handler, /* error handler */
(mca_base_component_t *) comp->c_topo_component /* topo component */
);
if ( MPI_SUCCESS != rc) {
return rc;
}
/* Set name for debugging purposes */
snprintf(newcomp->c_name, MPI_MAX_OBJECT_NAME, "MPI COMMUNICATOR %d DUP FROM %d",
newcomp->c_contextid, comm->c_contextid );
@ -657,37 +685,170 @@ int ompi_comm_dup ( ompi_communicator_t * comm, ompi_communicator_t **newcomm,
if(0 == sync_flag) {
/* activate communicator and init coll-module */
rc = ompi_comm_activate (newcomp, /* new communicator */
comp, /* old comm */
NULL, /* bridge comm */
NULL, /* local leader */
NULL, /* remote_leader */
mode, /* mode */
-1, /* send_first */
0, /* sync_flag */
(mca_base_component_t *) comp->c_coll_selected_component /* coll component */
);
comp, /* old comm */
NULL, /* bridge comm */
NULL, /* local leader */
NULL, /* remote_leader */
mode, /* mode */
-1, /* send_first */
0, /* sync_flag */
(mca_base_component_t *) comp->c_coll_selected_component /* coll component */
);
if ( MPI_SUCCESS != rc ) {
return rc;
}
} else {
/* activate communicator and init coll-module without synchronizing processes*/
rc = ompi_comm_activate (newcomp, /* new communicator */
comp, /* old comm */
NULL, /* bridge comm */
NULL, /* local leader */
NULL, /* remote_leader */
mode, /* mode */
-1, /* send_first */
1, /* sync_flag */
(mca_base_component_t *) comp->c_coll_selected_component /* coll component */
);
comp, /* old comm */
NULL, /* bridge comm */
NULL, /* local leader */
NULL, /* remote_leader */
mode, /* mode */
-1, /* send_first */
1, /* sync_flag */
(mca_base_component_t *) comp->c_coll_selected_component /* coll component */
);
if ( MPI_SUCCESS != rc ) {
return rc;
}
}
*newcomm = newcomp;
return MPI_SUCCESS;
}
/**********************************************************************/
/**********************************************************************/
/**********************************************************************/
int ompi_comm_compare(ompi_communicator_t *comm1, ompi_communicator_t *comm2, int *result) {
/* local variables */
ompi_communicator_t *comp1, *comp2;
ompi_group_t *group1, *group2;
int size1, size2, rsize1, rsize2;
int lresult, rresult=MPI_CONGRUENT;
int sameranks=1;
int sameorder=1;
int i, j;
int found = 0;
ompi_proc_t * proc1, * proc2;
comp1 = (ompi_communicator_t *) comm1;
comp2 = (ompi_communicator_t *) comm2;
if ( comp1->c_contextid == comp2->c_contextid ) {
*result = MPI_IDENT;
return MPI_SUCCESS;
}
*newcomm = newcomp;
if ( MPI_COMM_NULL == comm1 || MPI_COMM_NULL == comm2 ) {
*result = MPI_UNEQUAL;
return MPI_SUCCESS;
}
/* compare sizes of local and remote groups */
size1 = ompi_comm_size (comp1);
size2 = ompi_comm_size (comp2);
rsize1 = ompi_comm_remote_size (comp1);
rsize2 = ompi_comm_remote_size (comp2);
if ( size1 != size2 || rsize1 != rsize2 ) {
*result = MPI_UNEQUAL;
return MPI_SUCCESS;
}
/* Compare local groups */
/* we need to check whether the communicators contain
the same processes and in the same order */
group1 = (ompi_group_t *)comp1->c_local_group;
group2 = (ompi_group_t *)comp2->c_local_group;
for ( i = 0; i < size1; i++ ) {
proc1 = ompi_group_peer_lookup(group1,i);
proc2 = ompi_group_peer_lookup(group2,i);
if ( proc1 != proc2) {
sameorder = 0;
break;
}
}
for ( i = 0; i < size1; i++ ) {
found = 0;
for ( j = 0; j < size2; j++ ) {
proc1 = ompi_group_peer_lookup(group1,i);
proc2 = ompi_group_peer_lookup(group2,j);
if ( proc1 == proc2) {
found = 1;
break;
}
}
if ( !found ) {
sameranks = 0;
break;
}
}
if ( sameranks && sameorder )
lresult = MPI_CONGRUENT;
else if ( sameranks && !sameorder )
lresult = MPI_SIMILAR;
else
lresult = MPI_UNEQUAL;
if ( rsize1 > 0 ) {
/* Compare remote groups for inter-communicators */
/* we need to check whether the communicators contain
the same processes and in the same order */
sameranks = sameorder = 1;
group1 = (ompi_group_t *)comp1->c_remote_group;
group2 = (ompi_group_t *)comp2->c_remote_group;
for ( i = 0; i < rsize1; i++ ) {
proc1 = ompi_group_peer_lookup(group1,i);
proc2 = ompi_group_peer_lookup(group2,i);
if ( proc1 != proc2) {
sameorder = 0;
break;
}
}
for ( i = 0; i < rsize1; i++ ) {
found = 0;
for ( j = 0; j < rsize2; j++ ) {
proc1 = ompi_group_peer_lookup(group1,i);
proc2 = ompi_group_peer_lookup(group2,j);
if ( proc1 == proc2) {
found = 1;
break;
}
}
if ( !found ) {
sameranks = 0;
break;
}
}
if ( sameranks && sameorder )
rresult = MPI_CONGRUENT;
else if ( sameranks && !sameorder )
rresult = MPI_SIMILAR;
else
rresult = MPI_UNEQUAL;
}
/* determine final results */
if ( MPI_CONGRUENT == rresult ) {
*result = lresult;
}
else if ( MPI_SIMILAR == rresult ) {
if ( MPI_SIMILAR == lresult || MPI_CONGRUENT == lresult ) {
*result = MPI_SIMILAR;
}
else
*result = MPI_UNEQUAL;
}
else if ( MPI_UNEQUAL == rresult )
*result = MPI_UNEQUAL;
return MPI_SUCCESS;
}
/**********************************************************************/
@ -876,6 +1037,8 @@ ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm,
orte_buffer_t *sbuf=NULL, *rbuf=NULL;
void *sendbuf;
char *recvbuf;
ompi_proc_t **proc_list=NULL;
int i;
local_rank = ompi_comm_rank (local_comm);
local_size = ompi_comm_size (local_comm);
@ -885,10 +1048,19 @@ ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm,
if (NULL == sbuf) {
rc = ORTE_ERROR;
goto err_exit;
}
if(OMPI_GROUP_IS_DENSE(local_comm->c_local_group)) {
rc = ompi_proc_pack(local_comm->c_local_group->grp_proc_pointers,
local_size, sbuf);
}
/* get the proc list for the sparse implementations */
else {
proc_list = (ompi_proc_t **) calloc (local_comm->c_local_group->grp_proc_count,
sizeof (ompi_proc_t *));
for(i=0 ; i<local_comm->c_local_group->grp_proc_count ; i++)
proc_list[i] = ompi_group_peer_lookup(local_comm->c_local_group,i);
rc = ompi_proc_pack (proc_list, local_size, sbuf);
}
rc = ompi_proc_pack(local_comm->c_local_group->grp_proc_pointers,
local_size, sbuf);
if ( OMPI_SUCCESS != rc ) {
goto err_exit;
}
@ -926,7 +1098,7 @@ ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm,
/* Allocate temporary buffer */
recvbuf = (char *)malloc(rlen);
if ( NULL == recvbuf ) {
goto err_exit;
goto err_exit;
}
if ( local_rank == local_leader ) {
@ -946,7 +1118,7 @@ ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm,
goto err_exit;
}
OBJ_RELEASE(sbuf);
OBJ_RELEASE(sbuf);
}
/* broadcast name list to all proceses in local_comm */
@ -987,6 +1159,9 @@ ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm,
if (NULL != rbuf) {
OBJ_RELEASE(rbuf);
}
if ( NULL != proc_list ) {
free ( proc_list );
}
return rprocs;
}
@ -1065,8 +1240,8 @@ int ompi_comm_determine_first ( ompi_communicator_t *intercomm, int high )
flag = true;
}
else {
ourproc = intercomm->c_local_group->grp_proc_pointers[0];
theirproc = intercomm->c_remote_group->grp_proc_pointers[0];
ourproc = ompi_group_peer_lookup(intercomm->c_local_group,0);
theirproc = ompi_group_peer_lookup(intercomm->c_remote_group,0);
mask = ORTE_NS_CMP_JOBID | ORTE_NS_CMP_VPID;
rc = orte_ns.compare_fields(mask, &(ourproc->proc_name), &(theirproc->proc_name));
@ -1087,10 +1262,10 @@ int ompi_comm_dump ( ompi_communicator_t *comm )
{
opal_output(0, "Dumping information for comm_cid %d\n", comm->c_contextid);
opal_output(0," f2c index:%d cube_dim: %d\n", comm->c_f_to_c_index,
comm->c_cube_dim);
comm->c_cube_dim);
opal_output(0," Local group: size = %d my_rank = %d\n",
comm->c_local_group->grp_proc_count,
comm->c_local_group->grp_my_rank );
comm->c_local_group->grp_proc_count,
comm->c_local_group->grp_my_rank );
opal_output(0," Communicator is:");
/* Display flags */
@ -1172,6 +1347,8 @@ int ompi_topo_create (ompi_communicator_t *old_comm,
ompi_proc_t **topo_procs;
int num_procs;
int ret;
ompi_proc_t **proc_list=NULL;
int i;
/* allocate a new communicator */
@ -1228,9 +1405,26 @@ int ompi_topo_create (ompi_communicator_t *old_comm,
*/
num_procs = old_comm->c_local_group->grp_proc_count;
topo_procs = (ompi_proc_t **)malloc (num_procs * sizeof(ompi_proc_t *));
memcpy (topo_procs,
old_comm->c_local_group->grp_proc_pointers,
num_procs * sizeof(ompi_proc_t *));
if(OMPI_GROUP_IS_DENSE(old_comm->c_local_group)) {
memcpy (topo_procs,
old_comm->c_local_group->grp_proc_pointers,
num_procs * sizeof(ompi_proc_t *));
}
else {
proc_list = (ompi_proc_t **) calloc (old_comm->c_local_group->grp_proc_count,
sizeof (ompi_proc_t *));
for(i=0 ; i<old_comm->c_local_group->grp_proc_count ; i++)
proc_list[i] = ompi_group_peer_lookup(old_comm->c_local_group,i);
memcpy (topo_procs,
proc_list,
num_procs * sizeof(ompi_proc_t *));
}
if ( NULL != proc_list ) {
free ( proc_list );
}
new_rank = old_comm->c_local_group->grp_my_rank;
if (OMPI_COMM_CART == cart_or_graph) {

Просмотреть файл

@ -9,9 +9,10 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006 University of Houston. All rights reserved.
* Copyright (c) 2006-2007 University of Houston. All rights reserved.
* Copyright (c) 2006-2007 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2007 Cisco, Inc. All rights reserved.
*
* $COPYRIGHT$
*
@ -85,6 +86,9 @@ int ompi_comm_connect_accept ( ompi_communicator_t *comm, int root,
ompi_group_t *group=comm->c_local_group;
orte_process_name_t *rport=NULL, tmp_port_name;
orte_buffer_t *nbuf=NULL, *nrbuf=NULL;
ompi_proc_t **proc_list=NULL;
int i,j;
ompi_group_t *new_group_pointer;
size = ompi_comm_size ( comm );
rank = ompi_comm_rank ( comm );
@ -98,12 +102,27 @@ int ompi_comm_connect_accept ( ompi_communicator_t *comm, int root,
information of the remote process. Therefore, we have to
exchange that.
*/
if(!OMPI_GROUP_IS_DENSE(group)) {
proc_list = (ompi_proc_t **) calloc (group->grp_proc_count,
sizeof (ompi_proc_t *));
for(i=0 ; i<group->grp_proc_count ; i++)
proc_list[i] = ompi_group_peer_lookup(group,i);
}
if ( OMPI_COMM_JOIN_TAG != (int)tag ) {
rc = ompi_comm_get_rport(port,send_first,
group->grp_proc_pointers[rank], tag,
&tmp_port_name);
if (OMPI_SUCCESS != rc) return rc;
rport = &tmp_port_name;
if(OMPI_GROUP_IS_DENSE(group)){
rc = ompi_comm_get_rport(port,send_first,
group->grp_proc_pointers[rank], tag,
&tmp_port_name);
}
else {
rc = ompi_comm_get_rport(port,send_first,
proc_list[rank], tag,
&tmp_port_name);
}
if (OMPI_SUCCESS != rc) return rc;
rport = &tmp_port_name;
} else {
rport = port;
}
@ -119,8 +138,14 @@ int ompi_comm_connect_accept ( ompi_communicator_t *comm, int root,
ORTE_ERROR_LOG(rc);
goto exit;
}
ompi_proc_pack(group->grp_proc_pointers, size, nbuf);
if(OMPI_GROUP_IS_DENSE(group)) {
ompi_proc_pack(group->grp_proc_pointers, size, nbuf);
}
else {
ompi_proc_pack(proc_list, size, nbuf);
}
nrbuf = OBJ_NEW(orte_buffer_t);
if (NULL == nrbuf ) {
rc = OMPI_ERROR;
@ -200,13 +225,41 @@ int ompi_comm_connect_accept ( ompi_communicator_t *comm, int root,
OBJ_RELEASE(nbuf);
}
/* allocate comm-structure */
newcomp = ompi_comm_allocate ( size, rsize );
new_group_pointer=ompi_group_allocate(rsize);
if( NULL == new_group_pointer ) {
return MPI_ERR_GROUP;
}
/* put group elements in the list */
for (j = 0; j < rsize; j++) {
new_group_pointer->grp_proc_pointers[j] = rprocs[j];
} /* end proc loop */
/* increment proc reference counters */
ompi_group_increment_proc_count(new_group_pointer);
/* set up communicator structure */
rc = ompi_comm_set ( &newcomp, /* new comm */
comm, /* old comm */
group->grp_proc_count, /* local_size */
NULL, /* local_procs */
rsize, /* remote_size */
NULL , /* remote_procs */
NULL, /* attrs */
comm->error_handler, /* error handler */
NULL, /* topo component */
group, /* local group */
new_group_pointer /* remote group */
);
if ( NULL == newcomp ) {
rc = OMPI_ERR_OUT_OF_RESOURCE;
goto exit;
}
ompi_group_decrement_proc_count (new_group_pointer);
OBJ_RELEASE(new_group_pointer);
new_group_pointer = MPI_GROUP_NULL;
/* allocate comm_cid */
rc = ompi_comm_nextcid ( newcomp, /* new communicator */
comm, /* old communicator */
@ -219,19 +272,6 @@ int ompi_comm_connect_accept ( ompi_communicator_t *comm, int root,
goto exit;
}
/* set up communicator structure */
rc = ompi_comm_set ( newcomp, /* new comm */
comm, /* old comm */
group->grp_proc_count, /* local_size */
group->grp_proc_pointers, /* local_procs*/
rsize, /* remote_size */
rprocs, /* remote_procs */
NULL, /* attrs */
comm->error_handler, /* error handler */
NULL /* topo component */
);
/* activate comm and init coll-component */
rc = ompi_comm_activate ( newcomp, /* new communicator */
comm, /* old communicator */
@ -260,6 +300,9 @@ int ompi_comm_connect_accept ( ompi_communicator_t *comm, int root,
if ( NULL != rprocs ) {
free ( rprocs );
}
if ( NULL != proc_list ) {
free ( proc_list );
}
if ( OMPI_SUCCESS != rc ) {
if ( MPI_COMM_NULL != newcomp ) {
OBJ_RETAIN(newcomp);
@ -897,6 +940,7 @@ void ompi_comm_mark_dyncomm (ompi_communicator_t *comm)
int found;
orte_jobid_t jobids[OMPI_COMM_MAXJOBIDS], thisjobid;
ompi_group_t *grp=NULL;
ompi_proc_t *proc = NULL;
/* special case for MPI_COMM_NULL */
if ( comm == MPI_COMM_NULL ) {
@ -910,7 +954,8 @@ void ompi_comm_mark_dyncomm (ompi_communicator_t *comm)
of different jobids. */
grp = comm->c_local_group;
for (i=0; i< size; i++) {
thisjobid = grp->grp_proc_pointers[i]->proc_name.jobid;
proc = ompi_group_peer_lookup(grp,i);
thisjobid = proc->proc_name.jobid;
found = 0;
for ( j=0; j<numjobids; j++) {
if (thisjobid == jobids[j]) {
@ -927,7 +972,8 @@ void ompi_comm_mark_dyncomm (ompi_communicator_t *comm)
and count number of different jobids */
grp = comm->c_remote_group;
for (i=0; i< rsize; i++) {
thisjobid = grp->grp_proc_pointers[i]->proc_name.jobid;
proc = ompi_group_peer_lookup(grp,i);
thisjobid = proc->proc_name.jobid;
found = 0;
for ( j=0; j<numjobids; j++) {
if ( thisjobid == jobids[j]) {

Просмотреть файл

@ -9,7 +9,8 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006 University of Houston. All rights reserved.
* Copyright (c) 2006-2007 University of Houston. All rights reserved.
* Copyright (c) 2007 Cisco, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -73,7 +74,8 @@ int ompi_comm_init(void)
group = OBJ_NEW(ompi_group_t);
group->grp_proc_pointers = ompi_proc_world(&size);
group->grp_proc_count = (int)size;
group->grp_flags |= OMPI_GROUP_INTRINSIC;
OMPI_GROUP_SET_INTRINSIC (group);
OMPI_GROUP_SET_DENSE (group);
ompi_set_group_rank(group, ompi_proc_local());
ompi_group_increment_proc_count (group);
@ -108,8 +110,9 @@ int ompi_comm_init(void)
group->grp_proc_pointers = ompi_proc_self(&size);
group->grp_my_rank = 0;
group->grp_proc_count = (int)size;
group->grp_flags |= OMPI_GROUP_INTRINSIC;
OMPI_GROUP_SET_INTRINSIC (group);
OMPI_GROUP_SET_DENSE (group);
ompi_mpi_comm_self.c_contextid = 1;
ompi_mpi_comm_self.c_f_to_c_index = 1;
ompi_mpi_comm_self.c_id_start_index = 20;

Просмотреть файл

@ -260,7 +260,8 @@ struct ompi_communicator_t {
return (struct ompi_proc_t *) NULL;
}
#endif
return comm->c_remote_group->grp_proc_pointers[peer_id];
/*return comm->c_remote_group->grp_proc_pointers[peer_id];*/
return ompi_group_peer_lookup(comm->c_remote_group,peer_id);
}
static inline bool ompi_comm_peer_invalid(ompi_communicator_t* comm, int peer_id)
@ -327,6 +328,13 @@ struct ompi_communicator_t {
OMPI_DECLSPEC int ompi_comm_dup (ompi_communicator_t *comm, ompi_communicator_t **newcomm,
int sync_flag);
/**
* compare two communicators.
*
* @param comm1,comm2: input communicators
*
*/
int ompi_comm_compare(ompi_communicator_t *comm1, ompi_communicator_t *comm2, int *result);
/**
* free a communicator
@ -382,15 +390,17 @@ struct ompi_communicator_t {
* This is THE routine, where all the communicator stuff
* is really set.
*/
int ompi_comm_set ( ompi_communicator_t* newcomm,
int ompi_comm_set ( ompi_communicator_t** newcomm,
ompi_communicator_t* oldcomm,
int local_size,
struct ompi_proc_t **local_procs,
int *local_ranks,
int remote_size,
struct ompi_proc_t **remote_procs,
int *remote_ranks,
opal_hash_table_t *attr,
ompi_errhandler_t *errh,
mca_base_component_t *topocomponent );
mca_base_component_t *topocomponent,
ompi_group_t *local_group,
ompi_group_t *remote_group );
/**
* This is a short-hand routine used in intercomm_create.
* The routine makes sure, that all processes have afterwards

Просмотреть файл

@ -10,6 +10,7 @@
# University of Stuttgart. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2007 University of Houston. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
@ -25,4 +26,8 @@ headers += \
libmpi_la_SOURCES += \
group/group.c \
group/group_init.c \
group/group_set_rank.c
group/group_set_rank.c \
group/group_plist.c \
group/group_sporadic.c \
group/group_strided.c \
group/group_bitmap.c

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -9,7 +9,8 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006 University of Houston. All rights reserved.
* Copyright (c) 2006-2007 University of Houston. All rights reserved.
* Copyright (c) 2007 Cisco, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -22,7 +23,7 @@
*
* Infrastructure for MPI group support.
*/
#include "ompi/proc/proc.h"
#ifndef OMPI_GROUP_H
#define OMPI_GROUP_H
@ -32,28 +33,89 @@
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
#define BSIZE ((int)sizeof(unsigned char)*8)
struct ompi_group_sporadic_list_t
{
int rank_first;
int length;
};
struct ompi_group_sporadic_data_t
{
struct ompi_group_sporadic_list_t *grp_sporadic_list;
/** list to hold the sporadic struct */
int grp_sporadic_list_len;/** length of the structure*/
};
struct ompi_group_strided_data_t
{
int grp_strided_offset; /** offset to start from when including or excluding */
int grp_strided_stride; /** stride for including or excluding */
int grp_strided_last_element; /** the last element to be included for */
};
struct ompi_group_bitmap_data_t
{
unsigned char *grp_bitmap_array; /* the bit map array for sparse groups of type BMAP */
int grp_bitmap_array_len; /* length of the bit array */
};
/**
* Group structure
* Currently we have four formats for storing the process pointers that are members
* of the group.
* PList: a dense format that stores all the process pointers of the group.
* Sporadic: a sparse format that stores the ranges of the ranks from the parent group,
* that are included in the current group.
* Strided: a sparse format that stores three integers that describe a red-black pattern
* that the current group is formed from its parent group.
* Bitmap: a sparse format that maintains a bitmap of the included processes from the
* parent group. For each process that is included from the parent group
* its corresponding rank is set in the bitmap array.
*/
struct ompi_group_t {
opal_object_t super; /**< base class */
int grp_proc_count; /**< number of processes in group */
int grp_my_rank; /**< rank in group */
int grp_f_to_c_index; /**< index in Fortran <-> C translation array */
uint32_t grp_flags; /**< flags, e.g. freed, cannot be freed etc.*/
struct ompi_proc_t **grp_proc_pointers;
/**< list of pointers to ompi_proc_t structures
for each process in the group */
uint32_t grp_flags; /**< flags, e.g. freed, cannot be freed etc.*/
/** pointer to the original group when using sporadic storage */
struct ompi_group_t *grp_parent_group_ptr;
union
{
struct ompi_group_sporadic_data_t grp_sporadic;
struct ompi_group_strided_data_t grp_strided;
struct ompi_group_bitmap_data_t grp_bitmap;
} sparse_data;
};
typedef struct ompi_group_t ompi_group_t;
OMPI_DECLSPEC OBJ_CLASS_DECLARATION(ompi_group_t);
/* Some definitions for the flags */
#define OMPI_GROUP_ISFREED 0x00000001
#define OMPI_GROUP_INTRINSIC 0x00000002
#define OMPI_GROUP_ISFREED 0x00000001
#define OMPI_GROUP_INTRINSIC 0x00000002
#define OMPI_GROUP_DENSE 0x00000004
#define OMPI_GROUP_SPORADIC 0x00000008
#define OMPI_GROUP_STRIDED 0x00000010
#define OMPI_GROUP_BITMAP 0x00000020
#define OMPI_GROUP_IS_INTRINSIC(_group) ((_group)->grp_flags&OMPI_GROUP_INTRINSIC)
#define OMPI_GROUP_IS_DENSE(_group) ((_group)->grp_flags & OMPI_GROUP_DENSE)
#define OMPI_GROUP_IS_SPORADIC(_group) ((_group)->grp_flags & OMPI_GROUP_SPORADIC)
#define OMPI_GROUP_IS_STRIDED(_group) ((_group)->grp_flags & OMPI_GROUP_STRIDED)
#define OMPI_GROUP_IS_BITMAP(_group) ((_group)->grp_flags & OMPI_GROUP_BITMAP)
#define OMPI_GROUP_SET_INTRINSIC(_group) ( (_group)->grp_flags |= OMPI_GROUP_INTRINSIC)
#define OMPI_GROUP_SET_DENSE(_group) ( (_group)->grp_flags |= OMPI_GROUP_DENSE)
#define OMPI_GROUP_SET_SPORADIC(_group) ( (_group)->grp_flags |= OMPI_GROUP_SPORADIC)
#define OMPI_GROUP_SET_STRIDED(_group) ( (_group)->grp_flags |= OMPI_GROUP_STRIDED)
#define OMPI_GROUP_SET_BITMAP(_group) ( (_group)->grp_flags |= OMPI_GROUP_BITMAP)
/**
* Table for Fortran <-> C group handle conversion
@ -74,7 +136,9 @@ OMPI_DECLSPEC extern ompi_group_t ompi_mpi_group_null;
* @return Pointer to new group structure
*/
ompi_group_t *ompi_group_allocate(int group_size);
ompi_group_t *ompi_group_allocate_sporadic(int group_size);
ompi_group_t *ompi_group_allocate_strided(void);
ompi_group_t *ompi_group_allocate_bmap(int orig_group_size, int group_size);
/**
* Increment the reference count of the proc structures.
@ -135,18 +199,6 @@ static inline int ompi_group_rank(ompi_group_t *group)
}
static inline struct ompi_proc_t* ompi_group_peer_lookup(ompi_group_t *group, int peer_id)
{
#if OMPI_ENABLE_DEBUG
if (peer_id >= group->grp_proc_count) {
opal_output(0, "ompi_group_lookup_peer: invalid peer index (%d)", peer_id);
return (struct ompi_proc_t *) NULL;
}
#endif
return group->grp_proc_pointers[peer_id];
}
/**
* Set group rank in the input group structure
@ -175,17 +227,108 @@ int ompi_group_translate_ranks ( ompi_group_t *group1,
int ompi_group_free (ompi_group_t **group);
/**
* Prototypes for the group back-end functions. Argument lists are similar to the according
* C MPI functions.
* Functions to handle process pointers for sparse group formats
*/
int ompi_group_union (ompi_group_t* group1, ompi_group_t* group2, ompi_group_t **new_group);
int ompi_group_incl(ompi_group_t* group, int n, int *ranks, ompi_group_t **new_group);
int ompi_group_excl(ompi_group_t* group, int n, int *ranks, ompi_group_t **new_group);
int ompi_group_range_incl(ompi_group_t* group, int n_triplets, int ranges[][3],ompi_group_t **new_group);
int ompi_group_range_excl(ompi_group_t* group, int n_triplets, int ranges[][3],ompi_group_t **new_group);
int ompi_group_intersection(ompi_group_t* group1,ompi_group_t* group2,ompi_group_t **new_group);
int ompi_group_difference(ompi_group_t* group1, ompi_group_t* group2,ompi_group_t **new_group);
ompi_proc_t* ompi_group_get_proc_ptr (ompi_group_t* group , int rank);
int ompi_group_translate_ranks_sporadic ( ompi_group_t *group1,
int n_ranks, int *ranks1,
ompi_group_t *group2,
int *ranks2);
int ompi_group_translate_ranks_sporadic_reverse ( ompi_group_t *group1,
int n_ranks, int *ranks1,
ompi_group_t *group2,
int *ranks2);
int ompi_group_translate_ranks_strided ( ompi_group_t *group1,
int n_ranks, int *ranks1,
ompi_group_t *group2,
int *ranks2);
int ompi_group_translate_ranks_strided_reverse ( ompi_group_t *group1,
int n_ranks, int *ranks1,
ompi_group_t *group2,
int *ranks2);
int ompi_group_translate_ranks_bmap ( ompi_group_t *group1,
int n_ranks, int *ranks1,
ompi_group_t *group2,
int *ranks2);
int ompi_group_translate_ranks_bmap_reverse ( ompi_group_t *group1,
int n_ranks, int *ranks1,
ompi_group_t *group2,
int *ranks2);
/**
* Prototypes for the group back-end functions. Argument lists
are similar to the according C MPI functions.
*/
int ompi_group_incl(ompi_group_t* group, int n, int *ranks,
ompi_group_t **new_group);
int ompi_group_excl(ompi_group_t* group, int n, int *ranks,
ompi_group_t **new_group);
int ompi_group_range_incl(ompi_group_t* group, int n_triplets,
int ranges[][3],ompi_group_t **new_group);
int ompi_group_range_excl(ompi_group_t* group, int n_triplets,
int ranges[][3],ompi_group_t **new_group);
int ompi_group_union (ompi_group_t* group1, ompi_group_t* group2,
ompi_group_t **new_group);
int ompi_group_intersection(ompi_group_t* group1,ompi_group_t* group2,
ompi_group_t **new_group);
int ompi_group_difference(ompi_group_t* group1, ompi_group_t* group2,
ompi_group_t **new_group);
/**
* Include Functions to handle Sparse storage formats
*/
int ompi_group_incl_plist(ompi_group_t* group, int n, int *ranks,
ompi_group_t **new_group);
int ompi_group_incl_spor(ompi_group_t* group, int n, int *ranks,
ompi_group_t **new_group);
int ompi_group_incl_strided(ompi_group_t* group, int n, int *ranks,
ompi_group_t **new_group);
int ompi_group_incl_bmap(ompi_group_t* group, int n, int *ranks,
ompi_group_t **new_group);
/**
* Functions to calculate storage spaces
*/
int ompi_group_calc_plist ( int n, int *ranks );
int ompi_group_calc_strided ( int n, int *ranks );
int ompi_group_calc_sporadic ( int n, int *ranks );
int ompi_group_calc_bmap ( int n, int orig_size , int *ranks );
/**
* Function to return the minimum value in an array
*/
int ompi_group_minloc (int list[], int length);
/**
* Inline function to check if sparse groups are enabled and return the direct access
* to the proc pointer, otherwise the lookup function
*/
static inline struct ompi_proc_t* ompi_group_peer_lookup(ompi_group_t *group, int peer_id)
{
#if OMPI_ENABLE_DEBUG
if (peer_id >= group->grp_proc_count) {
opal_output(0, "ompi_group_lookup_peer: invalid peer index (%d)", peer_id);
return (struct ompi_proc_t *) NULL;
}
#endif
#if OMPI_GROUP_SPARSE
return ompi_group_get_proc_ptr (group, peer_id);
#else
return group->grp_proc_pointers[peer_id];
#endif
}
/**
* Function to print the group info
*/
int ompi_group_dump (ompi_group_t* group);
/**
* Ceil Function so not to include the math.h lib
*/
int ompi_group_div_ceil (int num, int den);
#if defined(c_plusplus) || defined(__cplusplus)
}

190
ompi/group/group_bitmap.c Обычный файл
Просмотреть файл

@ -0,0 +1,190 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2007 University of Houston. All rights reserved.
* Copyright (c) 2007 Cisco, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/group/group.h"
#include "ompi/constants.h"
#include "ompi/proc/proc.h"
#include "mpi.h"
static bool check_ranks (int, int *);
int ompi_group_calc_bmap ( int n, int orig_size , int *ranks) {
if (check_ranks(n,ranks)) {
return ompi_group_div_ceil(orig_size,BSIZE);
}
else {
return -1;
}
}
/* from parent group to child group*/
int ompi_group_translate_ranks_bmap ( ompi_group_t *parent_group,
int n_ranks, int *ranks1,
ompi_group_t *child_group,
int *ranks2)
{
int i,count,j,k,m;
unsigned char tmp, tmp1;
for (j=0 ; j<n_ranks ; j++) {
if ( MPI_PROC_NULL == ranks1[j]) {
ranks2[j] = MPI_PROC_NULL;
}
else {
ranks2[j] = MPI_UNDEFINED;
m = ranks1[j];
count = 0;
tmp = ( 1 << (m % BSIZE) );
/* check if the bit that correponds to the parent rank is set in the bitmap */
if ( tmp == (child_group->sparse_data.grp_bitmap.grp_bitmap_array[(int)(m/BSIZE)]
& (1 << (m % BSIZE)))) {
/*
* add up how many bits are set, till we get to the bit of parent
* rank that we want. The rank in the child will be the sum of the bits
* that are set on the way till we get to the correponding bit
*/
for (i=0 ; i<=(int)(m/BSIZE) ; i++) {
for (k=0 ; k<BSIZE ; k++) {
tmp1 = ( 1 << k);
if ( tmp1 == ( child_group->sparse_data.grp_bitmap.grp_bitmap_array[i]
& (1 << k) ) ) {
count++;
}
if( i==(int)(m/BSIZE) && k==m % BSIZE ) {
ranks2[j] = count-1;
i = (int)(m/BSIZE) + 1;
break;
}
}
}
}
}
}
return OMPI_SUCCESS;
}
/* from child group to parent group */
int ompi_group_translate_ranks_bmap_reverse ( ompi_group_t *child_group,
int n_ranks, int *ranks1,
ompi_group_t *parent_group,
int *ranks2)
{
int i,j,count,m,k;
unsigned char tmp;
for (j=0 ; j<n_ranks ; j++) {
if ( MPI_PROC_NULL == ranks1[j]) {
ranks2[j] = MPI_PROC_NULL;
}
else {
m = ranks1[j];
count = 0;
/*
* Go through all the bits set in the bitmap up to the child rank.
* The parent rank will be the sum of all bits passed (set and unset)
*/
for (i=0 ; i<child_group->sparse_data.grp_bitmap.grp_bitmap_array_len ; i++) {
for (k=0 ; k<BSIZE ; k++) {
tmp = ( 1 << k);
if ( tmp == ( child_group->sparse_data.grp_bitmap.grp_bitmap_array[i]
& (1 << k) ) )
count++;
if( m == count-1 ) {
ranks2[j] = i*BSIZE + k;
i = child_group->sparse_data.grp_bitmap.grp_bitmap_array_len + 1;
break;
}
}
}
}
}
return OMPI_SUCCESS;
}
int ompi_group_div_ceil (int num, int den)
{
if (0 == num%den) {
return num/den;
}
else {
return (int)(num/den) + 1;
}
}
/*
* This functions is to check that all ranks in the included list of ranks
* are monotonically increasing. If not, the bitmap format can not be used
* since we won't be able to translate the ranks corrently since the algorithms
* assume that the ranks are in order in the bitmap list.
*/
static bool check_ranks (int n, int *ranks) {
int i;
for (i=1 ; i < n ; i++) {
if ( ranks[i-1] > ranks [i] ) {
return false;
}
}
return true;
}
int ompi_group_incl_bmap(ompi_group_t* group, int n, int *ranks,
ompi_group_t **new_group)
{
/* local variables */
int my_group_rank,i,bit_set;
ompi_group_t *group_pointer, *new_group_pointer;
group_pointer = (ompi_group_t *)group;
if ( 0 == n ) {
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return OMPI_SUCCESS;
}
new_group_pointer = ompi_group_allocate_bmap(group->grp_proc_count, n);
if( NULL == new_group_pointer ) {
return MPI_ERR_GROUP;
}
/* Initialize the bit array to zeros */
for (i=0 ; i<new_group_pointer->sparse_data.grp_bitmap.grp_bitmap_array_len ; i++) {
new_group_pointer->
sparse_data.grp_bitmap.grp_bitmap_array[i] = 0;
}
/* set the bits */
for (i=0 ; i<n ; i++) {
bit_set = ranks[i] % BSIZE;
new_group_pointer->
sparse_data.grp_bitmap.grp_bitmap_array[(int)(ranks[i]/BSIZE)] |= (1 << bit_set);
}
new_group_pointer -> grp_parent_group_ptr = group_pointer;
OBJ_RETAIN(new_group_pointer -> grp_parent_group_ptr);
ompi_group_increment_proc_count(new_group_pointer -> grp_parent_group_ptr);
ompi_group_increment_proc_count(new_group_pointer);
my_group_rank=group_pointer->grp_my_rank;
ompi_group_translate_ranks (group_pointer,1,&my_group_rank,
new_group_pointer,&new_group_pointer->grp_my_rank);
*new_group = (MPI_Group)new_group_pointer;
return OMPI_SUCCESS;
}

Просмотреть файл

@ -9,7 +9,8 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006 University of Houston. All rights reserved.
* Copyright (c) 2006-2007 University of Houston. All rights reserved.
* Copyright (c) 2007 Cisco, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -83,47 +84,152 @@ ompi_group_t *ompi_group_allocate(int group_size)
new_group->grp_proc_count = group_size;
/* initialize our rank to MPI_UNDEFINED */
new_group->grp_my_rank = MPI_UNDEFINED;
new_group->grp_my_rank = MPI_UNDEFINED;
OMPI_GROUP_SET_DENSE(new_group);
error_exit:
/* return */
return new_group;
}
ompi_group_t *ompi_group_allocate_sporadic(int group_size)
{
/* local variables */
ompi_group_t *new_group = NULL;
assert (group_size >= 0);
/* create new group group element */
new_group = OBJ_NEW(ompi_group_t);
if (new_group) {
if (OMPI_ERROR == new_group->grp_f_to_c_index) {
OBJ_RELEASE(new_group);
new_group = NULL;
goto error_exit;
} else {
/* allocate array of (grp_sporadic_list )'s */
if (0 < group_size) {
new_group->sparse_data.grp_sporadic.grp_sporadic_list =
(struct ompi_group_sporadic_list_t *)malloc
(sizeof(struct ompi_group_sporadic_list_t ) * group_size);
/* non-empty group */
if ( NULL == new_group->sparse_data.grp_sporadic.grp_sporadic_list) {
/* sporadic list allocation failed */
OBJ_RELEASE (new_group);
new_group = NULL;
goto error_exit;
}
}
/* set the group size */
new_group->grp_proc_count = group_size; /* actually it's the number of
elements in the sporadic list*/
/* initialize our rank to MPI_UNDEFINED */
new_group->grp_my_rank = MPI_UNDEFINED;
}
}
new_group->grp_proc_pointers = NULL;
OMPI_GROUP_SET_SPORADIC(new_group);
error_exit:
/* return */
return new_group;
}
ompi_group_t *ompi_group_allocate_strided(void) {
/* local variables */
ompi_group_t *new_group = NULL;
/* create new group group element */
new_group = OBJ_NEW(ompi_group_t);
if (new_group) {
if (OMPI_ERROR == new_group->grp_f_to_c_index) {
OBJ_RELEASE(new_group);
new_group = NULL;
goto error_exit;
}
else {
/* initialize our rank to MPI_UNDEFINED */
new_group->grp_my_rank = MPI_UNDEFINED;
}
}
new_group->grp_proc_pointers = NULL;
OMPI_GROUP_SET_STRIDED(new_group);
new_group->sparse_data.grp_strided.grp_strided_stride = -1;
new_group->sparse_data.grp_strided.grp_strided_offset = -1;
new_group->sparse_data.grp_strided.grp_strided_last_element = -1;
error_exit:
/* return */
return new_group;
}
ompi_group_t *ompi_group_allocate_bmap(int orig_group_size , int group_size)
{
/* local variables */
ompi_group_t *new_group = NULL;
assert (group_size >= 0);
/* create new group group element */
new_group = OBJ_NEW(ompi_group_t);
if (new_group) {
if (OMPI_ERROR == new_group->grp_f_to_c_index) {
OBJ_RELEASE(new_group);
new_group = NULL;
goto error_exit;
} else {
/* allocate the unsigned char list */
new_group->sparse_data.grp_bitmap.grp_bitmap_array = (unsigned char *)malloc
(sizeof(unsigned char) * ompi_group_div_ceil(orig_group_size,BSIZE));
new_group->sparse_data.grp_bitmap.grp_bitmap_array_len =
ompi_group_div_ceil(orig_group_size,BSIZE);
new_group->grp_proc_count = group_size;
/* initialize our rank to MPI_UNDEFINED */
new_group->grp_my_rank = MPI_UNDEFINED;
}
}
new_group->grp_proc_pointers = NULL;
OMPI_GROUP_SET_BITMAP(new_group);
error_exit:
/* return */
return new_group;
}
/*
* increment the reference count of the proc structures
*/
void ompi_group_increment_proc_count(ompi_group_t *group)
{
/* local variable */
int proc;
ompi_proc_t * proc_pointer;
for (proc = 0; proc < group->grp_proc_count; proc++) {
OBJ_RETAIN(group->grp_proc_pointers[proc]);
proc_pointer = ompi_group_peer_lookup(group,proc);
OBJ_RETAIN(proc_pointer);
}
/* return */
return;
}
/*
* decrement the reference count of the proc structures
*/
void ompi_group_decrement_proc_count(ompi_group_t *group)
{
/* local variable */
int proc;
ompi_proc_t * proc_pointer;
for (proc = 0; proc < group->grp_proc_count; proc++) {
OBJ_RELEASE(group->grp_proc_pointers[proc]);
proc_pointer = ompi_group_peer_lookup(group,proc);
OBJ_RELEASE(proc_pointer);
}
/* return */
return;
}
/*
* group constructor
*/
@ -141,6 +247,9 @@ static void ompi_group_construct(ompi_group_t *new_group)
new_group->grp_f_to_c_index = ret_val;
new_group->grp_flags = 0;
/* default the sparse values for groups */
new_group->grp_parent_group_ptr = NULL;
/* return */
return;
}
@ -157,8 +266,26 @@ static void ompi_group_destruct(ompi_group_t *group)
either). */
/* release thegrp_proc_pointers memory */
if (NULL != group->grp_proc_pointers)
if (NULL != group->grp_proc_pointers) {
free(group->grp_proc_pointers);
}
if (OMPI_GROUP_IS_SPORADIC(group)) {
if (NULL != group->sparse_data.grp_sporadic.grp_sporadic_list) {
free(group->sparse_data.grp_sporadic.grp_sporadic_list);
}
}
if (OMPI_GROUP_IS_BITMAP(group)) {
if (NULL != group->sparse_data.grp_bitmap.grp_bitmap_array) {
free(group->sparse_data.grp_bitmap.grp_bitmap_array);
}
}
if (NULL != group->grp_parent_group_ptr){
ompi_group_decrement_proc_count(group->grp_parent_group_ptr);
OBJ_RELEASE(group->grp_parent_group_ptr);
}
/* reset the ompi_group_f_to_c_table entry - make sure that the
* entry is in the table */
@ -183,18 +310,20 @@ int ompi_group_init(void)
/* add MPI_GROUP_NULL to table */
OBJ_CONSTRUCT(&ompi_mpi_group_null, ompi_group_t);
ompi_mpi_group_null.grp_proc_count = 0;
ompi_mpi_group_null.grp_my_rank = MPI_PROC_NULL;
ompi_mpi_group_null.grp_proc_pointers = NULL;
ompi_mpi_group_null.grp_flags |= OMPI_GROUP_INTRINSIC;
ompi_mpi_group_null.grp_proc_count = 0;
ompi_mpi_group_null.grp_my_rank = MPI_PROC_NULL;
ompi_mpi_group_null.grp_proc_pointers = NULL;
ompi_mpi_group_null.grp_flags |= OMPI_GROUP_DENSE;
ompi_mpi_group_null.grp_flags |= OMPI_GROUP_INTRINSIC;
/* add MPI_GROUP_EMPTRY to table */
OBJ_CONSTRUCT(&ompi_mpi_group_empty, ompi_group_t);
ompi_mpi_group_empty.grp_proc_count = 0;
ompi_mpi_group_empty.grp_my_rank = MPI_UNDEFINED;
ompi_mpi_group_empty.grp_proc_pointers = NULL;
ompi_mpi_group_empty.grp_flags |= OMPI_GROUP_INTRINSIC;
ompi_mpi_group_empty.grp_proc_count = 0;
ompi_mpi_group_empty.grp_my_rank = MPI_UNDEFINED;
ompi_mpi_group_empty.grp_proc_pointers = NULL;
ompi_mpi_group_empty.grp_flags |= OMPI_GROUP_DENSE;
ompi_mpi_group_empty.grp_flags |= OMPI_GROUP_INTRINSIC;
return OMPI_SUCCESS;
}
@ -214,3 +343,6 @@ int ompi_group_finalize(void)
return OMPI_SUCCESS;
}
/* LocalWords: grp
*/

294
ompi/group/group_plist.c Обычный файл
Просмотреть файл

@ -0,0 +1,294 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2007 University of Houston. All rights reserved.
* Copyright (c) 2007 Cisco, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/group/group.h"
#include "ompi/constants.h"
#include "ompi/proc/proc.h"
#include "mpi.h"
#include <math.h>
int ompi_group_calc_plist ( int n , int *ranks ) {
return sizeof(char *) * n ;
}
int ompi_group_incl_plist(ompi_group_t* group, int n, int *ranks,
ompi_group_t **new_group)
{
/* local variables */
int proc,my_group_rank;
ompi_group_t *group_pointer, *new_group_pointer;
ompi_proc_t *my_proc_pointer;
group_pointer = (ompi_group_t *)group;
if ( 0 == n ) {
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return OMPI_SUCCESS;
}
/* get new group struct */
new_group_pointer=ompi_group_allocate(n);
if( NULL == new_group_pointer ) {
return MPI_ERR_GROUP;
}
/* put group elements in the list */
for (proc = 0; proc < n; proc++) {
new_group_pointer->grp_proc_pointers[proc] =
ompi_group_peer_lookup(group_pointer,ranks[proc]);
} /* end proc loop */
/* increment proc reference counters */
ompi_group_increment_proc_count(new_group_pointer);
/* find my rank */
my_group_rank=group_pointer->grp_my_rank;
my_proc_pointer=ompi_group_peer_lookup (group_pointer,my_group_rank);
ompi_set_group_rank(new_group_pointer,my_proc_pointer);
*new_group = (MPI_Group)new_group_pointer;
return OMPI_SUCCESS;
}
/*
* Group Union has to use the dense format since we don't support
* two parent groups in the group structure and maintain functions
*/
int ompi_group_union (ompi_group_t* group1, ompi_group_t* group2,
ompi_group_t **new_group)
{
/* local variables */
int new_group_size, proc1, proc2, found_in_group;
int my_group_rank, cnt;
ompi_group_t *group1_pointer, *group2_pointer, *new_group_pointer;
ompi_proc_t *proc1_pointer, *proc2_pointer, *my_proc_pointer = NULL;
group1_pointer = (ompi_group_t *) group1;
group2_pointer = (ompi_group_t *) group2;
/*
* form union
*/
/* get new group size */
new_group_size = group1_pointer->grp_proc_count;
/* check group2 elements to see if they need to be included in the list */
for (proc2 = 0; proc2 < group2_pointer->grp_proc_count; proc2++) {
proc2_pointer = ompi_group_peer_lookup(group2_pointer,proc2);
/* check to see if this proc2 is alread in the group */
found_in_group = 0;
for (proc1 = 0; proc1 < group1_pointer->grp_proc_count; proc1++) {
proc1_pointer = ompi_group_peer_lookup(group1_pointer,proc1);
if (proc1_pointer == proc2_pointer) {
/* proc2 is in group1 - don't double count */
found_in_group = 1;
break;
}
} /* end proc1 loop */
if (found_in_group)
continue;
new_group_size++;
} /* end proc loop */
if ( 0 == new_group_size ) {
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return MPI_SUCCESS;
}
/* get new group struct */
new_group_pointer = ompi_group_allocate(new_group_size);
if (NULL == new_group_pointer) {
return MPI_ERR_GROUP;
}
/* fill in the new group list */
/* put group1 elements in the list */
for (proc1 = 0; proc1 < group1_pointer->grp_proc_count; proc1++) {
new_group_pointer->grp_proc_pointers[proc1] =
ompi_group_peer_lookup(group1_pointer,proc1);
}
cnt = group1_pointer->grp_proc_count;
/* check group2 elements to see if they need to be included in the list */
for (proc2 = 0; proc2 < group2_pointer->grp_proc_count; proc2++) {
proc2_pointer = ompi_group_peer_lookup(group2_pointer,proc2);
/* check to see if this proc2 is alread in the group */
found_in_group = 0;
for (proc1 = 0; proc1 < group1_pointer->grp_proc_count; proc1++) {
proc1_pointer = ompi_group_peer_lookup(group1_pointer,proc1);
if (proc1_pointer == proc2_pointer) {
/* proc2 is in group1 - don't double count */
found_in_group = 1;
break;
}
} /* end proc1 loop */
if (found_in_group)
continue;
new_group_pointer->grp_proc_pointers[cnt] =
ompi_group_peer_lookup(group2_pointer,proc2);
cnt++;
} /* end proc loop */
/* increment proc reference counters */
ompi_group_increment_proc_count(new_group_pointer);
/* find my rank */
my_group_rank = group1_pointer->grp_my_rank;
if (MPI_UNDEFINED == my_group_rank) {
my_group_rank = group2_pointer->grp_my_rank;
if ( MPI_UNDEFINED != my_group_rank) {
my_proc_pointer = ompi_group_peer_lookup(group2_pointer,my_group_rank);
}
} else {
my_proc_pointer = ompi_group_peer_lookup(group1_pointer,my_group_rank);
}
if ( MPI_UNDEFINED == my_group_rank ) {
new_group_pointer->grp_my_rank = MPI_UNDEFINED;
}
else {
ompi_set_group_rank(new_group_pointer, my_proc_pointer);
}
*new_group = (MPI_Group) new_group_pointer;
return OMPI_SUCCESS;
}
/*
* Group Difference has to use the dense format since we don't support
* two parent groups in the group structure and maintain functions
*/
int ompi_group_difference(ompi_group_t* group1, ompi_group_t* group2,
ompi_group_t **new_group) {
/* local varibles */
int new_group_size, proc1, proc2, found_in_group2, cnt;
int my_group_rank;
ompi_group_t *group1_pointer, *group2_pointer, *new_group_pointer;
ompi_proc_t *proc1_pointer, *proc2_pointer, *my_proc_pointer = NULL;
group1_pointer=(ompi_group_t *)group1;
group2_pointer=(ompi_group_t *)group2;
/*
* form union
*/
/* get new group size */
new_group_size=0;
/* loop over group1 members */
for( proc1=0; proc1 < group1_pointer->grp_proc_count; proc1++ ) {
proc1_pointer = ompi_group_peer_lookup(group1_pointer,proc1);
/* check to see if this proc is in group2 */
found_in_group2=0;
for( proc2=0 ; proc2 < group2_pointer->grp_proc_count ; proc2++ ) {
proc2_pointer = ompi_group_peer_lookup(group2_pointer,proc2);
if( proc1_pointer == proc2_pointer ) {
found_in_group2=true;
break;
}
} /* end proc1 loop */
if(found_in_group2)
continue;
new_group_size++;
} /* end proc loop */
if ( 0 == new_group_size ) {
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return MPI_SUCCESS;
}
/* allocate a new ompi_group_t structure */
new_group_pointer=ompi_group_allocate(new_group_size);
if( NULL == new_group_pointer ) {
return MPI_ERR_GROUP;
}
/* fill in group list */
cnt=0;
/* loop over group1 members */
for( proc1=0; proc1 < group1_pointer->grp_proc_count; proc1++ ) {
proc1_pointer = ompi_group_peer_lookup(group1_pointer,proc1);
/* check to see if this proc is in group2 */
found_in_group2=0;
for( proc2=0 ; proc2 < group2_pointer->grp_proc_count ; proc2++ ) {
proc2_pointer = ompi_group_peer_lookup(group2_pointer,proc2);
if( proc1_pointer == proc2_pointer ) {
found_in_group2=true;
break;
}
} /* end proc1 loop */
if(found_in_group2)
continue;
new_group_pointer->grp_proc_pointers[cnt] =
ompi_group_peer_lookup(group1_pointer,proc1);
cnt++;
} /* end proc loop */
/* increment proc reference counters */
ompi_group_increment_proc_count(new_group_pointer);
/* find my rank */
my_group_rank=group1_pointer->grp_my_rank;
if ( MPI_UNDEFINED != my_group_rank ) {
my_proc_pointer = ompi_group_peer_lookup(group1_pointer,my_group_rank);
}
else {
my_group_rank=group2_pointer->grp_my_rank;
if ( MPI_UNDEFINED != my_group_rank ) {
my_proc_pointer = ompi_group_peer_lookup(group2_pointer,my_group_rank);
}
}
if ( MPI_UNDEFINED == my_group_rank ) {
new_group_pointer->grp_my_rank = MPI_UNDEFINED;
}
else {
ompi_set_group_rank(new_group_pointer,my_proc_pointer);
}
*new_group = (MPI_Group)new_group_pointer;
return OMPI_SUCCESS;
}

Просмотреть файл

@ -9,7 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006 University of Houston. All rights reserved.
* Copyright (c) 2006-2007 University of Houston. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -38,7 +38,7 @@ void ompi_set_group_rank(ompi_group_t *group, struct ompi_proc_t *proc_pointer)
for (proc = 0; proc < group->grp_proc_count; proc++) {
/* check and see if this proc pointer matches proc_pointer
*/
if (group->grp_proc_pointers[proc] == proc_pointer) {
if (ompi_group_peer_lookup(group,proc) == proc_pointer) {
group->grp_my_rank = proc;
}
} /* end proc loop */

181
ompi/group/group_sporadic.c Обычный файл
Просмотреть файл

@ -0,0 +1,181 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2007 University of Houston. All rights reserved.
* Copyright (c) 2007 Cisco, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/group/group.h"
#include "ompi/constants.h"
#include "ompi/proc/proc.h"
#include "mpi.h"
int ompi_group_calc_sporadic ( int n , int *ranks)
{
int i,l=0;
for (i=0 ; i<n ; i++) {
if(ranks[i] == ranks[i-1]+1) {
if(l==0) l++;
}
else l++;
}
return sizeof(struct ompi_group_sporadic_list_t ) * l;
}
/* from parent group to child group*/
int ompi_group_translate_ranks_sporadic ( ompi_group_t *parent_group,
int n_ranks, int *ranks1,
ompi_group_t *child_group,
int *ranks2)
{
int i,count,j;
for (j=0 ; j<n_ranks ; j++) {
if (MPI_PROC_NULL == ranks1[j]) {
ranks2[j] = MPI_PROC_NULL;
}
else {
/*
* if the rank is in the current range of the sporadic list, we calculate
* the rank in the child by adding the length of all ranges that we passed
* and the position in the current range
*/
ranks2[j] = MPI_UNDEFINED;
count = 0;
for(i=0 ; i <child_group->sparse_data.grp_sporadic.grp_sporadic_list_len ; i++) {
if( child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].rank_first
<= ranks1[j] && ranks1[j] <=
child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].rank_first +
child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].length -1 ) {
ranks2[j] = ranks1[j] - child_group->
sparse_data.grp_sporadic.grp_sporadic_list[i].rank_first + count;
break;
}
else {
count = count + child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].length;
}
}
}
}
return OMPI_SUCCESS;
}
/* from child group to parent group*/
int ompi_group_translate_ranks_sporadic_reverse ( ompi_group_t *child_group,
int n_ranks, int *ranks1,
ompi_group_t *parent_group,
int *ranks2)
{
int i,j,count;
for (j=0 ; j<n_ranks ; j++) {
if (MPI_PROC_NULL == ranks1[j]) {
ranks2[j] = MPI_PROC_NULL;
}
else {
count = 0;
/*
* if the rank of the child is in the current range, the rank of the parent will be
* the position in the current range of the sporadic list
*/
for (i=0 ; i<child_group->sparse_data.grp_sporadic.grp_sporadic_list_len ; i++) {
if ( ranks1[j] > ( count +
child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].length
- 1) ) {
count = count + child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].length;
}
else {
ranks2[j] = child_group->sparse_data.grp_sporadic.grp_sporadic_list[i].rank_first
+ (ranks1[j] - count);
break;
}
}
}
}
return OMPI_SUCCESS;
}
int ompi_group_incl_spor(ompi_group_t* group, int n, int *ranks,
ompi_group_t **new_group)
{
/* local variables */
int my_group_rank,l,i,j,proc_count;
ompi_group_t *group_pointer, *new_group_pointer;
group_pointer = (ompi_group_t *)group;
if (0 == n) {
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return OMPI_SUCCESS;
}
l=0;
j=0;
proc_count = 0;
for(i=0 ; i<n ; i++){
if(ranks[i] == ranks[i-1]+1) {
if(l==0) l++;
}
else l++;
}
new_group_pointer = ompi_group_allocate_sporadic(l);
if( NULL == new_group_pointer ) {
return MPI_ERR_GROUP;
}
new_group_pointer ->
sparse_data.grp_sporadic.grp_sporadic_list[j].rank_first = ranks[0];
new_group_pointer ->
sparse_data.grp_sporadic.grp_sporadic_list[j].length = 1;
for(i=1 ; i<n ; i++){
if(ranks[i] == ranks[i-1]+1) {
new_group_pointer -> sparse_data.grp_sporadic.grp_sporadic_list[j].length ++;
}
else {
j++;
new_group_pointer ->
sparse_data.grp_sporadic.grp_sporadic_list[j].rank_first = ranks[i];
new_group_pointer ->
sparse_data.grp_sporadic.grp_sporadic_list[j].length = 1;
}
}
new_group_pointer->sparse_data.grp_sporadic.grp_sporadic_list_len = j+1;
new_group_pointer -> grp_parent_group_ptr = group_pointer;
OBJ_RETAIN(new_group_pointer -> grp_parent_group_ptr);
ompi_group_increment_proc_count(new_group_pointer -> grp_parent_group_ptr);
for(i=0 ; i<new_group_pointer->sparse_data.grp_sporadic.grp_sporadic_list_len ; i++) {
proc_count = proc_count + new_group_pointer ->
sparse_data.grp_sporadic.grp_sporadic_list[i].length;
}
new_group_pointer->grp_proc_count = proc_count;
ompi_group_increment_proc_count(new_group_pointer);
my_group_rank=group_pointer->grp_my_rank;
ompi_group_translate_ranks (group_pointer,1,&my_group_rank,
new_group_pointer,&new_group_pointer->grp_my_rank);
*new_group = (MPI_Group)new_group_pointer;
return OMPI_SUCCESS;
}

139
ompi/group/group_strided.c Обычный файл
Просмотреть файл

@ -0,0 +1,139 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2007 University of Houston. All rights reserved.
* Copyright (c) 2007 Cisco, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/group/group.h"
#include "ompi/constants.h"
#include "ompi/proc/proc.h"
#include "mpi.h"
static int check_stride(int[],int);
int ompi_group_calc_strided ( int n , int *ranks ) {
if(-1 == check_stride(ranks,n)) {
return -1;
}
else {
return (sizeof(int)*3);
}
}
/* from parent group to child group*/
int ompi_group_translate_ranks_strided (ompi_group_t *parent_group,
int n_ranks, int *ranks1,
ompi_group_t *child_group,
int *ranks2)
{
int s,o,l,i;
s = child_group->sparse_data.grp_strided.grp_strided_stride;
o = child_group->sparse_data.grp_strided.grp_strided_offset;
l = child_group->sparse_data.grp_strided.grp_strided_last_element;
for (i = 0; i < n_ranks; i++) {
if ( MPI_PROC_NULL == ranks1[i]) {
ranks2[i] = MPI_PROC_NULL;
}
else {
ranks2[i] = MPI_UNDEFINED;
if ( (ranks1[i]-o) >= 0 && (ranks1[i]-o)%s == 0 && ranks1[i] <= l) {
ranks2[i] = (ranks1[i] - o)/s;
}
}
}
return OMPI_SUCCESS;
}
/* from child group to parent group*/
int ompi_group_translate_ranks_strided_reverse (ompi_group_t *child_group,
int n_ranks, int *ranks1,
ompi_group_t *parent_group,
int *ranks2)
{
int s,o,i;
s = child_group->sparse_data.grp_strided.grp_strided_stride;
o = child_group->sparse_data.grp_strided.grp_strided_offset;
for (i = 0; i < n_ranks; i++) {
if ( MPI_PROC_NULL == ranks1[i]) {
ranks2[i] = MPI_PROC_NULL;
}
else {
ranks2[i] =s*ranks1[i] + o;
}
}
return OMPI_SUCCESS;
}
static int check_stride(int incl[],int incllen) {
int s,i;
if (incllen > 1) {
s = incl[1] - incl[0];
}
else {
s = 1;
}
if( s < 0 ) {
return -1;
}
for(i=0 ; i < incllen-1 ; i++) {
if(incl[i+1] - incl[i] != s)
return -1;
}
return s;
}
int ompi_group_incl_strided(ompi_group_t* group, int n, int *ranks,
ompi_group_t **new_group)
{
/* local variables */
int my_group_rank,stride;
ompi_group_t *group_pointer, *new_group_pointer;
group_pointer = (ompi_group_t *)group;
if ( 0 == n ) {
*new_group = MPI_GROUP_EMPTY;
OBJ_RETAIN(MPI_GROUP_EMPTY);
return OMPI_SUCCESS;
}
stride = check_stride(ranks,n);
new_group_pointer = ompi_group_allocate_strided();
if( NULL == new_group_pointer ) {
return MPI_ERR_GROUP;
}
new_group_pointer -> grp_parent_group_ptr = group_pointer;
OBJ_RETAIN(new_group_pointer -> grp_parent_group_ptr);
ompi_group_increment_proc_count(new_group_pointer -> grp_parent_group_ptr);
new_group_pointer -> sparse_data.grp_strided.grp_strided_stride = stride;
new_group_pointer -> sparse_data.grp_strided.grp_strided_offset = ranks[0];
new_group_pointer -> sparse_data.grp_strided.grp_strided_last_element = ranks[n-1];
new_group_pointer -> grp_proc_count = n;
ompi_group_increment_proc_count(new_group_pointer);
my_group_rank = group_pointer->grp_my_rank;
ompi_group_translate_ranks (new_group_pointer->grp_parent_group_ptr,1,&my_group_rank,
new_group_pointer,&new_group_pointer->grp_my_rank);
*new_group = (MPI_Group)new_group_pointer;
return OMPI_SUCCESS;
}

Просмотреть файл

@ -65,7 +65,7 @@ uint32_t mca_coll_sm_iov_size = 1;
static const struct mca_coll_base_module_1_0_0_t *
sm_module_init(struct ompi_communicator_t *comm);
static int sm_module_finalize(struct ompi_communicator_t *comm);
static bool have_local_peers(ompi_proc_t **procs, size_t size);
static bool have_local_peers(ompi_group_t *group, size_t size);
static int bootstrap_init(void);
static int bootstrap_comm(ompi_communicator_t *comm);
@ -172,8 +172,7 @@ mca_coll_sm_comm_query(struct ompi_communicator_t *comm, int *priority,
are not on this node, then we don't want to run */
if (OMPI_COMM_IS_INTER(comm) || 1 == ompi_comm_size(comm) ||
!have_local_peers(comm->c_local_group->grp_proc_pointers,
ompi_comm_size(comm))) {
!have_local_peers(comm->c_local_group, ompi_comm_size(comm))) {
return NULL;
}
@ -509,13 +508,14 @@ static int sm_module_finalize(struct ompi_communicator_t *comm)
return OMPI_SUCCESS;
}
static bool have_local_peers(ompi_proc_t **procs, size_t size)
static bool have_local_peers(ompi_group_t *group, size_t size)
{
size_t i;
ompi_proc_t *proc;
for (i = 0; i < size; ++i) {
if (0 == (procs[i]->proc_flags & OMPI_PROC_FLAG_LOCAL)) {
proc = ompi_group_peer_lookup(group,i);
if (0 == (proc->proc_flags & OMPI_PROC_FLAG_LOCAL)) {
return false;
}
}
@ -611,6 +611,7 @@ static int bootstrap_comm(ompi_communicator_t *comm)
int frag_size = c->sm_fragment_size;
int control_size = c->sm_control_size;
orte_process_name_t *rank0;
ompi_proc_t * proc;
/* Is our CID in the CIDs array? If not, loop until we can find
an open slot in the array to use in the bootstrap to setup our
@ -620,7 +621,10 @@ static int bootstrap_comm(ompi_communicator_t *comm)
c->sm_bootstrap_meta->map_seg;
bscs = bshe->smbhe_segments;
opal_atomic_lock(&bshe->super.seg_lock);
rank0 = &(comm->c_local_group->grp_proc_pointers[0]->proc_name);
proc = ompi_group_peer_lookup(comm->c_local_group,0);
rank0 = &(proc->proc_name);
free(proc);
while (1) {
opal_atomic_wmb();
found = false;

Просмотреть файл

@ -129,7 +129,7 @@ int mca_pml_dr_add_comm(ompi_communicator_t* comm)
comm->c_pml_comm = pml_comm;
for( i = 0; i < comm->c_remote_group->grp_proc_count; i++ ) {
pml_comm->procs[i].ompi_proc = comm->c_remote_group->grp_proc_pointers[i];
pml_comm->procs[i].ompi_proc = ompi_group_peer_lookup(comm->c_remote_group,i);
}
return OMPI_SUCCESS;
}

Просмотреть файл

@ -101,7 +101,7 @@ int mca_pml_dr_comm_init(mca_pml_dr_comm_t* dr_comm, ompi_communicator_t* ompi_c
proc = dr_comm->procs+i;
OBJ_CONSTRUCT(proc, mca_pml_dr_comm_proc_t);
proc->comm_rank = i;
ompi_proc = ompi_comm->c_remote_group->grp_proc_pointers[i];
ompi_proc = ompi_group_peer_lookup(ompi_comm->c_remote_group,i);
proc->ompi_proc = ompi_proc;
pml_ep = (mca_pml_dr_endpoint_t*) ompi_proc->proc_pml;
ompi_pointer_array_set_item(&dr_comm->sparse_procs,

Просмотреть файл

@ -164,7 +164,7 @@ int mca_pml_ob1_add_comm(ompi_communicator_t* comm)
comm->c_pml_comm = pml_comm;
for( i = 0; i < comm->c_remote_group->grp_proc_count; i++ ) {
pml_comm->procs[i].ompi_proc = comm->c_remote_group->grp_proc_pointers[i];
pml_comm->procs[i].ompi_proc = ompi_group_peer_lookup(comm->c_remote_group,i);
}
return OMPI_SUCCESS;
}

Просмотреть файл

@ -34,15 +34,7 @@ static const char FUNC_NAME[] = "MPI_Comm_compare";
int MPI_Comm_compare(MPI_Comm comm1, MPI_Comm comm2, int *result) {
/* local variables */
ompi_communicator_t *comp1, *comp2;
ompi_group_t *group1, *group2;
int size1, size2, rsize1, rsize2;
int lresult, rresult=MPI_CONGRUENT;
int sameranks=1;
int sameorder=1;
int i, j;
int found = 0;
int rc;
OPAL_CR_TEST_CHECKPOINT_READY();
@ -60,114 +52,8 @@ int MPI_Comm_compare(MPI_Comm comm1, MPI_Comm comm2, int *result) {
}
}
comp1 = (ompi_communicator_t *) comm1;
comp2 = (ompi_communicator_t *) comm2;
if ( comp1->c_contextid == comp2->c_contextid ) {
*result = MPI_IDENT;
return MPI_SUCCESS;
}
if ( MPI_COMM_NULL == comm1 || MPI_COMM_NULL == comm2 ) {
*result = MPI_UNEQUAL;
return MPI_SUCCESS;
}
/* compare sizes of local and remote groups */
size1 = ompi_comm_size (comp1);
size2 = ompi_comm_size (comp2);
rsize1 = ompi_comm_remote_size (comp1);
rsize2 = ompi_comm_remote_size (comp2);
if ( size1 != size2 || rsize1 != rsize2 ) {
*result = MPI_UNEQUAL;
return MPI_SUCCESS;
}
/* Compare local groups */
/* we need to check whether the communicators contain
the same processes and in the same order */
group1 = (ompi_group_t *)comp1->c_local_group;
group2 = (ompi_group_t *)comp2->c_local_group;
for ( i = 0; i < size1; i++ ) {
if ( group1->grp_proc_pointers[i] != group2->grp_proc_pointers[i]) {
sameorder = 0;
break;
}
}
for ( i = 0; i < size1; i++ ) {
found = 0;
for ( j = 0; j < size2; j++ ) {
if ( group1->grp_proc_pointers[i] == group2->grp_proc_pointers[j]) {
found = 1;
break;
}
}
if ( !found ) {
sameranks = 0;
break;
}
}
if ( sameranks && sameorder )
lresult = MPI_CONGRUENT;
else if ( sameranks && !sameorder )
lresult = MPI_SIMILAR;
else
lresult = MPI_UNEQUAL;
if ( rsize1 > 0 ) {
/* Compare remote groups for inter-communicators */
/* we need to check whether the communicators contain
the same processes and in the same order */
sameranks = sameorder = 1;
group1 = (ompi_group_t *)comp1->c_remote_group;
group2 = (ompi_group_t *)comp2->c_remote_group;
for ( i = 0; i < rsize1; i++ ) {
if ( group1->grp_proc_pointers[i] != group2->grp_proc_pointers[i]) {
sameorder = 0;
break;
}
}
for ( i = 0; i < rsize1; i++ ) {
found = 0;
for ( j = 0; j < rsize2; j++ ) {
if ( group1->grp_proc_pointers[i] == group2->grp_proc_pointers[j]) {
found = 1;
break;
}
}
if ( !found ) {
sameranks = 0;
break;
}
}
if ( sameranks && sameorder )
rresult = MPI_CONGRUENT;
else if ( sameranks && !sameorder )
rresult = MPI_SIMILAR;
else
rresult = MPI_UNEQUAL;
}
/* determine final results */
if ( MPI_CONGRUENT == rresult ) {
*result = lresult;
}
else if ( MPI_SIMILAR == rresult ) {
if ( MPI_SIMILAR == lresult || MPI_CONGRUENT == lresult ) {
*result = MPI_SIMILAR;
}
else
*result = MPI_UNEQUAL;
}
else if ( MPI_UNEQUAL == rresult )
*result = MPI_UNEQUAL;
return MPI_SUCCESS;
rc = ompi_comm_compare ( (ompi_communicator_t*)comm1,
(ompi_communicator_t*)comm2,
result);
OMPI_ERRHANDLER_RETURN ( rc, comm1, rc, FUNC_NAME);
}

Просмотреть файл

@ -90,11 +90,11 @@ int MPI_Group_compare(MPI_Group group1, MPI_Group group2, int *result) {
similar=1;
identical=1;
for(proc1=0 ; proc1 < group1_pointer->grp_proc_count ; proc1++ ) {
proc1_pointer=group1_pointer->grp_proc_pointers[proc1];
proc1_pointer= ompi_group_peer_lookup(group1_pointer,proc1);
/* loop over group2 processes to find "match" */
match=-1;
for(proc2=0 ; proc2 < group2_pointer->grp_proc_count ; proc2++ ) {
proc2_pointer=group2_pointer->grp_proc_pointers[proc2];
proc2_pointer=ompi_group_peer_lookup(group2_pointer,proc2);
if( proc1_pointer == proc2_pointer ) {
if(proc1 != proc2 ) {
identical=0;

Просмотреть файл

@ -9,7 +9,8 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2006-2007 University of Houston. All rights reserved.
* Copyright (c) 2006-2007 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -44,6 +45,9 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
ompi_communicator_t *newcomp=NULL;
struct ompi_proc_t **rprocs=NULL;
int rc=0, rsize=0;
ompi_proc_t **proc_list=NULL;
int i,j;
ompi_group_t *new_group_pointer;
OPAL_CR_TEST_CHECKPOINT_READY();
@ -124,20 +128,63 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
}
if ( MPI_PARAM_CHECK ) {
rc = ompi_comm_overlapping_groups(local_comm->c_local_group->grp_proc_count,
local_comm->c_local_group->grp_proc_pointers,
rsize,
rprocs);
if(OMPI_GROUP_IS_DENSE(local_comm->c_local_group)) {
rc = ompi_comm_overlapping_groups(local_comm->c_local_group->grp_proc_count,
local_comm->c_local_group->grp_proc_pointers,
rsize,
rprocs);
}
else {
proc_list = (ompi_proc_t **) calloc (local_comm->c_local_group->grp_proc_count,
sizeof (ompi_proc_t *));
for(j=0 ; j<local_comm->c_local_group->grp_proc_count ; j++) {
proc_list[j] = ompi_group_peer_lookup(local_comm->c_local_group,j);
}
rc = ompi_comm_overlapping_groups(local_comm->c_local_group->grp_proc_count,
proc_list,
rsize,
rprocs);
}
if ( OMPI_SUCCESS != rc ) {
goto err_exit;
}
}
new_group_pointer=ompi_group_allocate(rsize);
if( NULL == new_group_pointer ) {
return MPI_ERR_GROUP;
}
/* put group elements in the list */
for (j = 0; j < rsize; j++) {
new_group_pointer->grp_proc_pointers[j] = rprocs[j];
}
ompi_group_increment_proc_count(new_group_pointer);
rc = ompi_comm_set ( &newcomp, /* new comm */
local_comm, /* old comm */
local_comm->c_local_group->grp_proc_count, /* local_size */
NULL, /* local_procs*/
rsize, /* remote_size */
NULL, /* remote_procs */
NULL, /* attrs */
local_comm->error_handler, /* error handler*/
NULL, /* topo mpodule */
local_comm->c_local_group, /* local group */
new_group_pointer /* remote group */
);
newcomp = ompi_comm_allocate ( local_comm->c_local_group->grp_proc_count, rsize);
if ( NULL == newcomp ) {
rc = MPI_ERR_INTERN;
goto err_exit;
}
if ( MPI_SUCCESS != rc ) {
goto err_exit;
}
ompi_group_decrement_proc_count (new_group_pointer);
OBJ_RELEASE(new_group_pointer);
new_group_pointer = MPI_GROUP_NULL;
/* Determine context id. It is identical to f_2_c_handle */
rc = ompi_comm_nextcid ( newcomp, /* new comm */
@ -152,20 +199,6 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
goto err_exit;
}
rc = ompi_comm_set ( newcomp, /* new comm */
local_comm, /* old comm */
local_comm->c_local_group->grp_proc_count, /* local_size */
local_comm->c_local_group->grp_proc_pointers, /* local_procs*/
rsize, /* remote_size */
rprocs, /* remote_procs */
NULL, /* attrs */
local_comm->error_handler, /* error handler*/
NULL /* topo mpodule */
);
if ( MPI_SUCCESS != rc ) {
goto err_exit;
}
/* activate comm and init coll-module */
rc = ompi_comm_activate ( newcomp, /* new comm */
local_comm, /* old comm */
@ -185,6 +218,9 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
if ( NULL != rprocs ) {
free ( rprocs );
}
if ( NULL != proc_list ) {
free ( proc_list );
}
if ( OMPI_SUCCESS != rc ) {
*newintercomm = MPI_COMM_NULL;
return OMPI_ERRHANDLER_INVOKE(local_comm, MPI_ERR_INTERN,

Просмотреть файл

@ -9,7 +9,8 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2006-2007 University of Houston. All rights reserved.
* Copyright (c) 2006-2007 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -47,6 +48,9 @@ int MPI_Intercomm_merge(MPI_Comm intercomm, int high,
int total_size;
int rc=MPI_SUCCESS;
int thigh = high;
ompi_proc_t **l_proc_list=NULL , **r_proc_list=NULL;
ompi_group_t *new_group_pointer;
OPAL_CR_TEST_CHECKPOINT_READY();
@ -79,23 +83,35 @@ int MPI_Intercomm_merge(MPI_Comm intercomm, int high,
}
if ( first ) {
memcpy ( procs, intercomm->c_local_group->grp_proc_pointers,
local_size * sizeof(ompi_proc_t *));
memcpy ( &procs[local_size], intercomm->c_remote_group->grp_proc_pointers,
remote_size * sizeof(ompi_proc_t *));
ompi_group_union ( intercomm->c_local_group, intercomm->c_remote_group, &new_group_pointer );
}
else {
memcpy ( procs, intercomm->c_remote_group->grp_proc_pointers,
remote_size * sizeof(ompi_proc_t *));
memcpy ( &procs[remote_size], intercomm->c_local_group->grp_proc_pointers,
local_size * sizeof(ompi_proc_t *));
ompi_group_union ( intercomm->c_remote_group, intercomm->c_local_group, &new_group_pointer );
}
newcomp = ompi_comm_allocate ( total_size, 0 );
rc = ompi_comm_set ( &newcomp, /* new comm */
intercomm, /* old comm */
total_size, /* local_size */
NULL, /* local_procs*/
0, /* remote_size */
NULL, /* remote_procs */
NULL, /* attrs */
intercomm->error_handler, /* error handler*/
NULL, /* topo mpodule */
new_group_pointer, /* local group */
NULL /* remote group */
);
if ( NULL == newcomp ) {
rc = MPI_ERR_INTERN;
goto exit;
}
if ( MPI_SUCCESS != rc ) {
goto exit;
}
ompi_group_decrement_proc_count(new_group_pointer);
OBJ_RELEASE(new_group_pointer);
new_group_pointer = MPI_GROUP_NULL;
/* Determine context id. It is identical to f_2_c_handle */
rc = ompi_comm_nextcid ( newcomp, /* new comm */
@ -109,20 +125,6 @@ int MPI_Intercomm_merge(MPI_Comm intercomm, int high,
goto exit;
}
rc = ompi_comm_set ( newcomp, /* new comm */
intercomm, /* old comm */
total_size, /* local_size */
procs, /* local_procs*/
0, /* remote_size */
NULL, /* remote_procs */
NULL, /* attrs */
intercomm->error_handler, /* error handler*/
NULL /* topo mpodule */
);
if ( MPI_SUCCESS != rc ) {
goto exit;
}
/* activate communicator and init coll-module */
rc = ompi_comm_activate ( newcomp, /* new comm */
intercomm, /* old comm */
@ -142,6 +144,12 @@ int MPI_Intercomm_merge(MPI_Comm intercomm, int high,
if ( NULL != procs ) {
free ( procs );
}
if ( NULL != l_proc_list ) {
free ( l_proc_list );
}
if ( NULL != r_proc_list ) {
free ( r_proc_list );
}
if ( MPI_SUCCESS != rc ) {
if ( MPI_COMM_NULL != newcomp ) {
OBJ_RELEASE(newcomp);

Просмотреть файл

@ -60,3 +60,8 @@ compiled with heterogeneous support. A process running on host
which will not work. Please recompile Open MPI with the
configure option --enable-heterogeneous or use a homogeneous
environment.
#
[sparse groups enabled but compiled out]
WARNING: The MCA parameter mpi_use_sparse_group_storage has been set
to true, but sparse group support was not compiled into Open MPI. The
mpi_use_sparse_group_storage value has therefore been ignored.

Просмотреть файл

@ -54,6 +54,8 @@ int ompi_mpi_abort_delay = 0;
bool ompi_mpi_keep_peer_hostnames = true;
bool ompi_mpi_leave_pinned = false;
bool ompi_mpi_leave_pinned_pipeline = false;
bool ompi_have_sparse_group_storage = OPAL_INT_TO_BOOL(OMPI_GROUP_SPARSE);
bool ompi_use_sparse_group_storage = OPAL_INT_TO_BOOL(OMPI_GROUP_SPARSE);
int ompi_mpi_register_params(void)
@ -238,6 +240,28 @@ int ompi_mpi_register_params(void)
true);
}
/* Sparse group storage support */
mca_base_param_reg_int_name("mpi", "have_sparse_group_storage",
"Whether this Open MPI installation supports storing of data in MPI groups in \"sparse\" formats (good for extremely large process count MPI jobs that create many communicators/groups)",
false, true, (int) OMPI_GROUP_SPARSE, NULL);
mca_base_param_reg_int_name("mpi", "use_sparse_group_storage",
"Whether to use \"sparse\" storage formats for MPI groups (only relevant if mpi_have_sparse_group_storage is 1)",
false, false, OMPI_GROUP_SPARSE, &value);
ompi_use_sparse_group_storage = OPAL_INT_TO_BOOL(value);
if (ompi_use_sparse_group_storage) {
value = 0;
if (OMPI_GROUP_SPARSE) {
value = 1;
}
if (0 == value) {
opal_show_help("help-mpi-runtime.txt",
"sparse groups enabled but compiled out",
true);
ompi_use_sparse_group_storage = false;
}
}
/* The ddt engine has a few parameters */
return ompi_ddt_register_params();

Просмотреть файл

@ -132,6 +132,16 @@ OMPI_DECLSPEC extern bool ompi_mpi_leave_pinned;
*/
OMPI_DECLSPEC extern bool ompi_mpi_leave_pinned_pipeline;
/**
* Whether sparse MPI group storage formats are supported or not.
*/
OMPI_DECLSPEC extern bool ompi_have_sparse_group_storage;
/**
* Whether sparse MPI group storage formats should be used or not.
*/
OMPI_DECLSPEC extern bool ompi_use_sparse_group_storage;
/**
* Register MCA parameters used by the MPI layer.
*

Просмотреть файл

@ -351,7 +351,8 @@ void ompi_info::do_config(bool want_all)
const string want_libltdl(OMPI_WANT_LIBLTDL ? "yes" : "no");
const string mpirun_prefix_by_default(ORTE_WANT_ORTERUN_PREFIX_BY_DEFAULT ?
"yes" : "no");
const string sparse_groups(OMPI_GROUP_SPARSE ? "yes" : "no");
if (OMPI_HAVE_SOLARIS_THREADS || OMPI_HAVE_POSIX_THREADS) {
threads = OMPI_HAVE_SOLARIS_THREADS ? "solaris" :
OMPI_HAVE_POSIX_THREADS ? "posix" : "type unknown";
@ -549,6 +550,7 @@ void ompi_info::do_config(bool want_all)
out("C++ exceptions", "option:cxx_exceptions", cxxexceptions);
out("Thread support", "option:threads", threads);
out("Sparse Groups", "option:sparse:groups", sparse_groups);
if (want_all) {