1
1

updating the communicator interface functions to the new interfaces and procedures of comm.c

This commit was SVN r1856.
Этот коммит содержится в:
Edgar Gabriel 2004-08-03 22:08:23 +00:00
родитель 20a512a9b7
Коммит d6334fa558
9 изменённых файлов: 219 добавлений и 161 удалений

Просмотреть файл

@ -105,18 +105,9 @@ int MPI_Comm_accept(char *port_name, MPI_Info info, int root,
/* if process rprocs[i] not yet in our list, add it. */
}
/* setup the intercomm-structure using ompi_comm_set (); */
newcomp = ompi_comm_set ( comp, /* old comm */
comp->c_local_group->grp_proc_count, /* local_size */
comp->c_local_group->grp_proc_pointers, /* local_procs*/
maxprocs, /* remote_size */
rprocs, /* remote_procs */
NULL, /* attrs */
comp->error_handler, /* error handler */
NULL, /* coll module */
NULL /* topo module */
);
if ( MPI_COMM_NULL == newcomp ) {
newcomp = ompi_comm_allocate ( comp->c_local_group->grp_proc_count, maxprocs);
if ( NULL == newcomp ) {
rc = MPI_ERR_INTERN;
goto exit;
}
@ -131,6 +122,23 @@ int MPI_Comm_accept(char *port_name, MPI_Info info, int root,
goto exit;
}
/* setup the intercomm-structure using ompi_comm_set (); */
rc = ompi_comm_set ( newcomp, /* new comm */
comp, /* old comm */
comp->c_local_group->grp_proc_count, /* local_size */
comp->c_local_group->grp_proc_pointers, /* local_procs*/
maxprocs, /* remote_size */
rprocs, /* remote_procs */
NULL, /* attrs */
comp->error_handler, /* error handler */
NULL, /* coll module */
NULL /* topo module */
);
if ( MPI_SUCCESS != rc ) {
goto exit;
}
/* PROBLEM: do we have to re-start some low level stuff
to enable the usage of fast communication devices
between the two worlds ? */

Просмотреть файл

@ -93,9 +93,10 @@ int MPI_Comm_connect(char *port_name, MPI_Info info, int root,
}
}
/* bcast list of remote procs to all processes in comm */
/* bcast list of remote procs to all processes in comm.
TO BE CHANGED. */
rc = comp->c_coll.coll_bcast ( &rprocs, maxprocs, MPI_UNSIGNED, root, comm);
if ( OMPI_SUCCESS != rc ) {
if ( MPI_SUCCESS != rc ) {
goto exit;
}
@ -104,18 +105,9 @@ int MPI_Comm_connect(char *port_name, MPI_Info info, int root,
/* if process rprocs[i] not yet in our list, add it. */
}
/* setup the intercomm-structure using ompi_comm_set (); */
newcomp = ompi_comm_set ( comp, /* old comm */
comp->c_local_group->grp_proc_count, /* local_size */
comp->c_local_group->grp_proc_pointers, /* local_procs*/
maxprocs, /* remote_size */
rprocs, /* remote_procs */
NULL, /* attrs */
comp->error_handler, /* error handler */
NULL, /* coll module */
NULL /* topo module */
);
if ( MPI_COMM_NULL == newcomp ) {
newcomp = ompi_comm_allocate ( comp->c_local_group->grp_proc_count, maxprocs );
if ( NULL == newcomp ) {
rc = MPI_ERR_INTERN;
goto exit;
}
@ -126,10 +118,27 @@ int MPI_Comm_connect(char *port_name, MPI_Info info, int root,
&lleader, /* local leader */
&rleader, /* remote_leader */
OMPI_COMM_CID_INTRA_OOB); /* mode */
if ( OMPI_SUCCESS != rc ) {
if ( MPI_SUCCESS != rc ) {
goto exit;
}
/* setup the intercomm-structure using ompi_comm_set (); */
rc = ompi_comm_set ( newcomp, /* new comm */
comp, /* old comm */
comp->c_local_group->grp_proc_count, /* local_size */
comp->c_local_group->grp_proc_pointers, /* local_procs*/
maxprocs, /* remote_size */
rprocs, /* remote_procs */
NULL, /* attrs */
comp->error_handler, /* error handler */
NULL, /* coll module */
NULL /* topo module */
);
if ( MPI_SUCCESS != rc ) {
goto exit;
}
/* PROBLEM: do we have to re-start some low level stuff
to enable the usage of fast communication devices
between the two worlds ? */

Просмотреть файл

@ -52,19 +52,9 @@ int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm)
mode = OMPI_COMM_CID_INTRA;
}
newcomp = ompi_comm_set ( comp, /* old comm */
comp->c_local_group->grp_proc_count, /* local_size */
comp->c_local_group->grp_proc_pointers, /* local_procs*/
rsize, /* remote_size */
rprocs, /* remote_procs */
comp->c_keyhash, /* attrs */
comp->error_handler, /* error handler */
(mca_base_component_t*) comp->c_coll_selected_module, /* coll module,t.b.d */
NULL /* topo module, t.b.d */
);
if ( MPI_COMM_NULL == newcomp ) {
OMPI_ERRHANDLER_INVOKE (comm, MPI_ERR_INTERN, FUNC_NAME);
newcomp = ompi_comm_allocate (comp->c_local_group->grp_proc_count, rsize );
if ( NULL == newcomp ) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_INTERN, FUNC_NAME);
}
/* Determine context id. It is identical to f_2_c_handle */
@ -74,11 +64,27 @@ int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm)
NULL, /* local leader */
NULL, /* remote_leader */
mode ); /* mode */
if ( OMPI_SUCCESS != rc ) {
if ( MPI_SUCCESS != rc ) {
*newcomm = MPI_COMM_NULL;
OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_INTERN, FUNC_NAME);
return OMPI_ERRHANDLER_INVOKE(comm, rc, FUNC_NAME);
}
rc = ompi_comm_set ( newcomp, /* new comm */
comp, /* old comm */
comp->c_local_group->grp_proc_count, /* local_size */
comp->c_local_group->grp_proc_pointers, /* local_procs*/
rsize, /* remote_size */
rprocs, /* remote_procs */
comp->c_keyhash, /* attrs */
comp->error_handler, /* error handler */
(mca_base_component_t*) comp->c_coll_selected_module, /* coll module */
NULL /* topo module, t.b.d */
);
if ( MPI_SUCCESS != rc) {
return OMPI_ERRHANDLER_INVOKE (comm, rc, FUNC_NAME);
}
*newcomm = newcomp;
return ( MPI_SUCCESS );
}

Просмотреть файл

@ -23,7 +23,7 @@ static const char FUNC_NAME[] = "MPI_Comm_join";
int MPI_Comm_join(int fd, MPI_Comm *intercomm)
{
int rc;
ompi_proc_t rproc;
ompi_proc_t *rproc;
uint32_t lleader=0; /* OOB contact information of our root */
ompi_communicator_t *comp, *newcomp;
@ -43,17 +43,11 @@ int MPI_Comm_join(int fd, MPI_Comm *intercomm)
here. */
/* if proc unknown, set up the proc-structure */
/* setup the intercomm-structure using ompi_comm_set (); */
newcomp = ompi_comm_set ( comp, /* old comm */
comp->c_local_group->grp_proc_count, /* local_size */
comp->c_local_group->grp_proc_pointers, /* local_procs*/
1, /* remote_size */
&rproc, /* remote_procs */
NULL, /* attrs */
comp->error_handler, /* error handler */
NULL, /* coll module */
NULL /* topo module */
);
newcomp = ompi_comm_allocate ( comp->c_local_group->grp_proc_count, 1 );
if ( NULL == newcomp ) {
rc = MPI_ERR_INTERN;
goto exit;
}
/* setup comm_cid */
rc = ompi_comm_nextcid ( newcomp, /* new comm */
@ -63,13 +57,35 @@ int MPI_Comm_join(int fd, MPI_Comm *intercomm)
&rproc, /* remote_leader */
OMPI_COMM_CID_INTRA_OOB); /* mode */
if ( OMPI_SUCCESS != rc ) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_SELF, rc, FUNC_NAME);
goto exit;
}
/* setup the intercomm-structure using ompi_comm_set (); */
rc = ompi_comm_set ( newcomp, /* new comm */
comp, /* old comm */
comp->c_local_group->grp_proc_count, /* local_size */
comp->c_local_group->grp_proc_pointers, /* local_procs*/
1, /* remote_size */
rproc, /* remote_procs */
NULL, /* attrs */
comp->error_handler, /* error handler */
NULL, /* coll module */
NULL /* topo module */
);
if ( MPI_SUCCESS != rc ) {
goto exit;
}
/* PROBLEM: do we have to re-start some low level stuff
to enable the usage of fast communication devices
between the two worlds ? */
exit:
if ( MPI_SUCCESS != rc ) {
*intercomm = MPI_COMM_NULL;
return OMPI_ERRHANDLER_INVOKE (MPI_COMM_SELF, rc, FUNC_NAME);
}
*intercomm = newcomp;
return MPI_SUCCESS;

Просмотреть файл

@ -124,18 +124,9 @@ int MPI_Comm_spawn(char *command, char **argv, int maxprocs, MPI_Info info,
for ( i=0; i<maxprocs; i++ ) {
}
/* setup the intercomm-structure using ompi_comm_set (); */
newcomp = ompi_comm_set ( comp, /* old comm */
comp->c_local_group->grp_proc_count, /* local_size */
comp->c_local_group->grp_proc_pointers, /* local_procs*/
maxprocs, /* remote_size */
rprocs, /* remote_procs */
NULL, /* attrs */
comp->error_handler, /* error handler */
NULL, /* coll module */
NULL /* topo module */
);
if ( MPI_COMM_NULL == newcomp ) {
newcomp = ompi_comm_allocate ( comp->c_local_group->grp_proc_count, maxprocs );
if ( NULL == newcomp ) {
rc = MPI_ERR_INTERN;
goto exit;
}
@ -150,6 +141,23 @@ int MPI_Comm_spawn(char *command, char **argv, int maxprocs, MPI_Info info,
goto exit;
}
/* setup the intercomm-structure using ompi_comm_set (); */
rc = ompi_comm_set ( newcomp, /* new comm */
comp, /* old comm */
comp->c_local_group->grp_proc_count, /* local_size */
comp->c_local_group->grp_proc_pointers, /* local_procs*/
maxprocs, /* remote_size */
rprocs, /* remote_procs */
NULL, /* attrs */
comp->error_handler, /* error handler */
NULL, /* coll module */
NULL /* topo module */
);
if ( MPI_SUCCESS != rc ) {
goto exit;
}
/* PROBLEM: do we have to re-start some low level stuff
to enable the usage of fast communication devices

Просмотреть файл

@ -153,18 +153,9 @@ int MPI_Comm_spawn_multiple(int count, char **array_of_commands, char ***array_o
for ( i=0; i < totalnumprocs; i++ ) {
}
/* setup the intercomm-structure using ompi_comm_set (); */
newcomp = ompi_comm_set ( comp, /* old comm */
comp->c_local_group->grp_proc_count, /* local_size */
comp->c_local_group->grp_proc_pointers, /* local_procs*/
totalnumprocs, /* remote_size */
rprocs, /* remote_procs */
NULL, /* attrs */
comp->error_handler, /* error handler */
NULL, /* coll module */
NULL /* topo module */
);
if ( MPI_COMM_NULL == newcomp ) {
newcomp = ompi_comm_allocate ( comp->c_local_group->grp_proc_count, totalnumprocs);
if ( NULL == newcomp ) {
rc = MPI_ERR_INTERN;
goto exit;
}
@ -179,6 +170,23 @@ int MPI_Comm_spawn_multiple(int count, char **array_of_commands, char ***array_o
goto exit;
}
/* setup the intercomm-structure using ompi_comm_set (); */
rc = ompi_comm_set ( newcomp, /* new comm */
comp, /* old comm */
comp->c_local_group->grp_proc_count, /* local_size */
comp->c_local_group->grp_proc_pointers, /* local_procs*/
totalnumprocs, /* remote_size */
rprocs, /* remote_procs */
NULL, /* attrs */
comp->error_handler, /* error handler */
NULL, /* coll module */
NULL /* topo module */
);
if ( MPI_SUCCESS != rc ) {
goto exit;
}
/* PROBLEM: do we have to re-start some low level stuff
to enable the usage of fast communication devices
between the two worlds ? */

Просмотреть файл

@ -24,12 +24,7 @@ static const char FUNC_NAME[] = "MPI_Group_translate_ranks";
int MPI_Group_translate_ranks(MPI_Group group1, int n_ranks, int *ranks1,
MPI_Group group2, int *ranks2)
{
int rank, proc, proc2;
ompi_proc_t *proc1_pointer, *proc2_pointer;
ompi_group_t *group1_pointer, *group2_pointer;
group1_pointer=(ompi_group_t *)group1;
group2_pointer=(ompi_group_t *)group2;
int err;
/* check for errors */
if( MPI_PARAM_CHECK ) {
@ -40,7 +35,7 @@ int MPI_Group_translate_ranks(MPI_Group group1, int n_ranks, int *ranks1,
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_GROUP,
FUNC_NAME);
}
if( (n_ranks > group1_pointer->grp_proc_count) || (0 >= n_ranks) ){
if( (n_ranks > group1->grp_proc_count) || (0 >= n_ranks) ){
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_GROUP,
FUNC_NAME);
}
@ -50,21 +45,7 @@ int MPI_Group_translate_ranks(MPI_Group group1, int n_ranks, int *ranks1,
}
}
/* loop over all ranks */
for (proc = 0; proc < n_ranks; proc++) {
rank=ranks1[proc];
proc1_pointer=group1_pointer->grp_proc_pointers[rank];
/* initialize to no "match" */
ranks2[proc] = MPI_UNDEFINED;
for (proc2 = 0; proc2 < group2_pointer->grp_proc_count; proc2++)
{
proc2_pointer=group2_pointer->grp_proc_pointers[proc2];
if ( proc1_pointer == proc2_pointer) {
ranks2[proc] = proc2;
break;
}
} /* end proc2 loop */
} /* end proc loop */
return MPI_SUCCESS;
err = ompi_group_translate_ranks ( group1, n_ranks, ranks1,
group2, ranks2 );
OMPI_ERRHANDLER_RETURN(err, MPI_COMM_WORLD, err, FUNC_NAME );
}

Просмотреть файл

@ -27,6 +27,7 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
int tag, MPI_Comm *newintercomm)
{
int local_size, local_rank;
int lleader, rleader;
ompi_communicator_t *newcomp;
ompi_proc_t **rprocs=NULL;
int rc, rsize;
@ -43,16 +44,19 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
return OMPI_ERRHANDLER_INVOKE ( local_comm, MPI_ERR_ARG,
FUNC_NAME);
if ( tag < 0 || tag > MPI_TAG_UB )
/* if ( tag < 0 || tag > MPI_TAG_UB )
return OMPI_ERRHANDLER_INVOKE ( local_comm, MPI_ERR_ARG,
FUNC_NAME);
*/
}
local_size = ompi_comm_size ( local_comm );
local_rank = ompi_comm_rank ( local_comm );
lleader = local_leader;
rleader = remote_leader;
if ( MPI_PARAM_CHECK ) {
if ( 0 < local_leader || local_leader > local_size )
if ( 0 > local_leader || local_leader > local_size )
return OMPI_ERRHANDLER_INVOKE ( local_comm, MPI_ERR_ARG,
FUNC_NAME);
@ -74,9 +78,8 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
if ( local_rank == local_leader ) {
MPI_Request req;
MPI_Status status;
/* local leader exchange group sizes and vpid lists */
/* local leader exchange group sizes lists */
rc =mca_pml.pml_irecv (&rsize, 1, MPI_INT, remote_leader, tag, bridge_comm,
&req );
if ( rc != MPI_SUCCESS ) {
@ -87,14 +90,14 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
if ( rc != MPI_SUCCESS ) {
goto err_exit;
}
rc = mca_pml.pml_wait ( 1, &req, NULL, &status);
rc = mca_pml.pml_wait_all ( 1, &req, MPI_STATUS_IGNORE);
if ( rc != MPI_SUCCESS ) {
goto err_exit;
}
}
/* bcast size and vpid lists to all processes in local_comm */
/* bcast size and list of remote processes to all processes in local_comm */
rc = local_comm->c_coll.coll_bcast ( &rsize, 1, MPI_INT, local_leader,
local_comm );
if ( rc != MPI_SUCCESS ) {
@ -103,7 +106,29 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
rprocs = ompi_comm_get_rprocs ( local_comm, bridge_comm, local_leader,
remote_leader, tag, rsize );
newcomp = ompi_comm_set ( local_comm, /* old comm */
if ( NULL == rprocs ) {
goto err_exit;
}
newcomp = ompi_comm_allocate ( local_comm->c_local_group->grp_proc_count, rsize);
if ( NULL == newcomp ) {
rc = MPI_ERR_INTERN;
goto err_exit;
}
/* Determine context id. It is identical to f_2_c_handle */
rc = ompi_comm_nextcid ( newcomp, /* new comm */
local_comm, /* old comm */
bridge_comm, /* bridge comm */
&lleader, /* local leader */
&rleader, /* remote_leader */
OMPI_COMM_CID_INTRA_BRIDGE); /* mode */
if ( MPI_SUCCESS != rc ) {
goto err_exit;
}
rc = ompi_comm_set ( newcomp, /* new comm */
local_comm, /* old comm */
local_comm->c_local_group->grp_proc_count, /* local_size */
local_comm->c_local_group->grp_proc_pointers, /* local_procs*/
rsize, /* remote_size */
@ -113,19 +138,10 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
NULL, /* coll module */
NULL /* topo mpodule */
);
if ( newcomp == MPI_COMM_NULL ) {
return OMPI_ERRHANDLER_INVOKE (local_comm, MPI_ERR_INTERN,
FUNC_NAME);
if ( MPI_SUCCESS != rc ) {
goto err_exit;
}
/* Determine context id. It is identical to f_2_c_handle */
rc = ompi_comm_nextcid ( newcomp, /* new comm */
local_comm, /* old comm */
bridge_comm, /* bridge comm */
&local_leader, /* local leader */
&remote_leader, /* remote_leader */
OMPI_COMM_CID_INTRA_BRIDGE); /* mode */
err_exit:
if ( NULL == rprocs ) {
free ( rprocs );

Просмотреть файл

@ -32,6 +32,7 @@ int MPI_Intercomm_merge(MPI_Comm intercomm, int high,
int first;
int total_size;
int rc=MPI_SUCCESS;
int thigh = high;
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -56,7 +57,7 @@ int MPI_Intercomm_merge(MPI_Comm intercomm, int high,
FUNC_NAME);
}
first = ompi_comm_determine_first ( intercomm, high );
first = ompi_comm_determine_first ( intercomm, thigh );
if ( first ) {
memcpy ( procs, intercomm->c_local_group->grp_proc_pointers,
local_size * sizeof(ompi_proc_t *));
@ -70,20 +71,10 @@ int MPI_Intercomm_merge(MPI_Comm intercomm, int high,
local_size * sizeof(ompi_proc_t *));
}
newcomp = ompi_comm_set ( intercomm, /* old comm */
total_size, /* local_size */
procs, /* local_procs*/
0, /* remote_size */
NULL, /* remote_procs */
NULL, /* attrs */
intercomm->error_handler, /* error handler*/
NULL, /* coll module */
NULL /* topo mpodule */
);
if ( newcomp == MPI_COMM_NULL ) {
return OMPI_ERRHANDLER_INVOKE (intercomm, MPI_ERR_INTERN,
FUNC_NAME);
newcomp = ompi_comm_allocate ( total_size, 0 );
if ( NULL == newcomp ) {
rc = MPI_ERR_INTERN;
goto exit;
}
/* Determine context id. It is identical to f_2_c_handle */
@ -97,14 +88,29 @@ int MPI_Intercomm_merge(MPI_Comm intercomm, int high,
goto exit;
}
rc = ompi_comm_set ( newcomp, /* new comm */
intercomm, /* old comm */
total_size, /* local_size */
procs, /* local_procs*/
0, /* remote_size */
NULL, /* remote_procs */
NULL, /* attrs */
intercomm->error_handler, /* error handler*/
NULL, /* coll module */
NULL /* topo mpodule */
);
if ( MPI_SUCCESS != rc ) {
goto exit;
}
exit:
if ( NULL != procs ) {
free ( procs );
}
if ( OMPI_SUCCESS != rc ) {
if ( MPI_SUCCESS != rc ) {
*newcomm = MPI_COMM_NULL;
return OMPI_ERRHANDLER_INVOKE(intercomm, MPI_ERR_INTERN,
FUNC_NAME);
return OMPI_ERRHANDLER_INVOKE(intercomm, rc, FUNC_NAME);
}
*newcomm = newcomp;