updating the comm-creating functions to the new flow-chart
This commit was SVN r1901.
Этот коммит содержится в:
родитель
2e43e4980e
Коммит
eb4cec3566
@ -24,13 +24,9 @@ static const char FUNC_NAME[] = "MPI_Comm_accept";
|
|||||||
int MPI_Comm_accept(char *port_name, MPI_Info info, int root,
|
int MPI_Comm_accept(char *port_name, MPI_Info info, int root,
|
||||||
MPI_Comm comm, MPI_Comm *newcomm)
|
MPI_Comm comm, MPI_Comm *newcomm)
|
||||||
{
|
{
|
||||||
int rank, i, rc;
|
int rank, rc;
|
||||||
int maxprocs;
|
int send_first=0; /*wrong, we receive first */
|
||||||
uint32_t *rprocs=NULL;
|
ompi_communicator_t *newcomp=MPI_COMM_NULL;
|
||||||
uint32_t lleader=0, rleader=0; /* OOB contact information of our and other root */
|
|
||||||
ompi_communicator_t *comp, *newcomp;
|
|
||||||
|
|
||||||
comp = (ompi_communicator_t *) comm;
|
|
||||||
|
|
||||||
if ( MPI_PARAM_CHECK ) {
|
if ( MPI_PARAM_CHECK ) {
|
||||||
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
||||||
@ -63,95 +59,19 @@ int MPI_Comm_accept(char *port_name, MPI_Info info, int root,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if ( rank == root && MPI_INFO_NULL != info ) {
|
/* parse info object. no prefedined values for this function in MPI-2
|
||||||
/* parse info object. no prefedined values for this function in MPI-2 */
|
* so lets ignore it for the moment.
|
||||||
|
* if ( rank == root && MPI_INFO_NULL != info ) {
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
|
||||||
/* accept connection from other app */
|
/*
|
||||||
|
* Our own port_name is not of interest here, so we pass in NULL.
|
||||||
|
* The two leader will figure this out later.
|
||||||
|
*/
|
||||||
|
|
||||||
/* recv number of procs (maxprocs) of other app */
|
rc = ompi_comm_connect_accept (comm, root, NULL, send_first, &newcomp);
|
||||||
rprocs = (uint32_t *)malloc (maxprocs * sizeof(uint32_t));
|
|
||||||
if ( NULL == rprocs ) {
|
|
||||||
rc = MPI_ERR_INTERN;
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* recv list of procs of other app */
|
|
||||||
/* send number of procs to other app */
|
|
||||||
/* send list of process to other app */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* bcast maxprocs to all processes in comm and allocate the rprocs array*/
|
|
||||||
rc = comp->c_coll.coll_bcast ( &maxprocs, 1, MPI_INT, root, comm);
|
|
||||||
if ( OMPI_SUCCESS != rc ) {
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( rank != root ) {
|
|
||||||
rprocs = (uint32_t *)malloc (maxprocs * sizeof(uint32_t));
|
|
||||||
if ( NULL == rprocs ) {
|
|
||||||
rc = MPI_ERR_INTERN;
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* bcast list of remote procs to all processes in comm */
|
|
||||||
rc = comp->c_coll.coll_bcast ( &rprocs, maxprocs, MPI_UNSIGNED, root, comm);
|
|
||||||
if ( OMPI_SUCCESS != rc ) {
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* setup the proc-structures for the new processes, which are not yet known */
|
|
||||||
for ( i=0; i<maxprocs; i++ ) {
|
|
||||||
/* if process rprocs[i] not yet in our list, add it. */
|
|
||||||
}
|
|
||||||
|
|
||||||
newcomp = ompi_comm_allocate ( comp->c_local_group->grp_proc_count, maxprocs);
|
|
||||||
if ( NULL == newcomp ) {
|
|
||||||
rc = MPI_ERR_INTERN;
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Determine context id. It is identical to f_2_c_handle */
|
|
||||||
rc = ompi_comm_nextcid ( newcomp, /* new comm */
|
|
||||||
comp, /* old comm */
|
|
||||||
NULL, /* bridge comm */
|
|
||||||
&lleader, /* local leader */
|
|
||||||
&rleader, /* remote_leader */
|
|
||||||
OMPI_COMM_CID_INTRA_OOB ); /* mode */
|
|
||||||
if ( OMPI_SUCCESS != rc ) {
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* setup the intercomm-structure using ompi_comm_set (); */
|
|
||||||
rc = ompi_comm_set ( newcomp, /* new comm */
|
|
||||||
comp, /* old comm */
|
|
||||||
comp->c_local_group->grp_proc_count, /* local_size */
|
|
||||||
comp->c_local_group->grp_proc_pointers, /* local_procs*/
|
|
||||||
maxprocs, /* remote_size */
|
|
||||||
rprocs, /* remote_procs */
|
|
||||||
NULL, /* attrs */
|
|
||||||
comp->error_handler, /* error handler */
|
|
||||||
NULL, /* coll module */
|
|
||||||
NULL /* topo module */
|
|
||||||
);
|
|
||||||
if ( MPI_SUCCESS != rc ) {
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* PROBLEM: do we have to re-start some low level stuff
|
|
||||||
to enable the usage of fast communication devices
|
|
||||||
between the two worlds ? */
|
|
||||||
|
|
||||||
exit:
|
|
||||||
if ( NULL != rprocs ) {
|
|
||||||
free ( rprocs );
|
|
||||||
}
|
|
||||||
if ( MPI_SUCCESS != rc ) {
|
|
||||||
*newcomm = MPI_COMM_NULL;
|
|
||||||
return OMPI_ERRHANDLER_INVOKE(comm, rc, FUNC_NAME);
|
|
||||||
}
|
|
||||||
|
|
||||||
*newcomm = newcomp;
|
*newcomm = newcomp;
|
||||||
return MPI_SUCCESS;
|
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME );
|
||||||
}
|
}
|
||||||
|
@ -24,14 +24,11 @@ static const char FUNC_NAME[] = "MPI_Comm_connect";
|
|||||||
int MPI_Comm_connect(char *port_name, MPI_Info info, int root,
|
int MPI_Comm_connect(char *port_name, MPI_Info info, int root,
|
||||||
MPI_Comm comm, MPI_Comm *newcomm)
|
MPI_Comm comm, MPI_Comm *newcomm)
|
||||||
{
|
{
|
||||||
int rank, i, rc;
|
int rank, rc;
|
||||||
int maxprocs;
|
int send_first=1; /* yes, we are the active part in this game */
|
||||||
uint32_t *rprocs=NULL;
|
ompi_communicator_t *newcomp=MPI_COMM_NULL;
|
||||||
uint32_t lleader=0; /* OOB contact information of our root */
|
ompi_process_name_t *port_proc_name;
|
||||||
uint32_t rleader=0; /* OOB contact information of other root */
|
|
||||||
ompi_communicator_t *comp, *newcomp;
|
|
||||||
|
|
||||||
comp = (ompi_communicator_t *) comm;
|
|
||||||
if ( MPI_PARAM_CHECK ) {
|
if ( MPI_PARAM_CHECK ) {
|
||||||
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
||||||
|
|
||||||
@ -62,96 +59,20 @@ int MPI_Comm_connect(char *port_name, MPI_Info info, int root,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( rank == root && MPI_INFO_NULL != info ) {
|
/* parse info object. No prefedined values for this function in MPI-2,
|
||||||
/* parse info object. No prefedined values for this function in MPI-2*/
|
* so lets ignore it for the moment.
|
||||||
|
*
|
||||||
|
* if ( rank == root && MPI_INFO_NULL != info ) {
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
|
||||||
/* connect to other app */
|
/*
|
||||||
/* send number of procs */
|
* translate the port_name string into the according process_name_t
|
||||||
/* send list of procs */
|
* structure. This functionality is currently missing from ns.
|
||||||
|
*/
|
||||||
/* receive number of procs (maxprocs) of other app */
|
rc = ompi_comm_connect_accept (comm, root, port_proc_name, send_first,
|
||||||
rprocs = (uint32_t *)malloc (maxprocs * sizeof(uint32_t));
|
&newcomp);
|
||||||
if ( NULL == rprocs ) {
|
|
||||||
rc = MPI_ERR_INTERN;
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* receive list of process of other app */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* bcast maxprocs to all processes in comm and allocate the rprocs array*/
|
|
||||||
rc = comp->c_coll.coll_bcast ( &maxprocs, 1, MPI_INT, root, comm);
|
|
||||||
if ( OMPI_SUCCESS != rc ) {
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( rank != root ) {
|
|
||||||
rprocs = (uint32_t *)malloc (maxprocs * sizeof(uint32_t));
|
|
||||||
if ( NULL == rprocs ) {
|
|
||||||
rc = MPI_ERR_INTERN;
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* bcast list of remote procs to all processes in comm.
|
|
||||||
TO BE CHANGED. */
|
|
||||||
rc = comp->c_coll.coll_bcast ( &rprocs, maxprocs, MPI_UNSIGNED, root, comm);
|
|
||||||
if ( MPI_SUCCESS != rc ) {
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* setup the proc-structures for the new processes, which are not yet known */
|
|
||||||
for ( i=0; i<maxprocs; i++ ) {
|
|
||||||
/* if process rprocs[i] not yet in our list, add it. */
|
|
||||||
}
|
|
||||||
|
|
||||||
newcomp = ompi_comm_allocate ( comp->c_local_group->grp_proc_count, maxprocs );
|
|
||||||
if ( NULL == newcomp ) {
|
|
||||||
rc = MPI_ERR_INTERN;
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Determine context id. It is identical to f_2_c_handle */
|
|
||||||
rc = ompi_comm_nextcid ( newcomp, /* new comm */
|
|
||||||
comp, /* old comm */
|
|
||||||
NULL, /* bridge comm */
|
|
||||||
&lleader, /* local leader */
|
|
||||||
&rleader, /* remote_leader */
|
|
||||||
OMPI_COMM_CID_INTRA_OOB); /* mode */
|
|
||||||
if ( MPI_SUCCESS != rc ) {
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* setup the intercomm-structure using ompi_comm_set (); */
|
|
||||||
rc = ompi_comm_set ( newcomp, /* new comm */
|
|
||||||
comp, /* old comm */
|
|
||||||
comp->c_local_group->grp_proc_count, /* local_size */
|
|
||||||
comp->c_local_group->grp_proc_pointers, /* local_procs*/
|
|
||||||
maxprocs, /* remote_size */
|
|
||||||
rprocs, /* remote_procs */
|
|
||||||
NULL, /* attrs */
|
|
||||||
comp->error_handler, /* error handler */
|
|
||||||
NULL, /* coll module */
|
|
||||||
NULL /* topo module */
|
|
||||||
);
|
|
||||||
if ( MPI_SUCCESS != rc ) {
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* PROBLEM: do we have to re-start some low level stuff
|
|
||||||
to enable the usage of fast communication devices
|
|
||||||
between the two worlds ? */
|
|
||||||
|
|
||||||
exit:
|
|
||||||
if ( NULL != rprocs ) {
|
|
||||||
free ( rprocs );
|
|
||||||
}
|
|
||||||
if ( MPI_SUCCESS != rc ) {
|
|
||||||
*newcomm = MPI_COMM_NULL;
|
|
||||||
return OMPI_ERRHANDLER_INVOKE(comm, rc, FUNC_NAME);
|
|
||||||
}
|
|
||||||
|
|
||||||
*newcomm = newcomp;
|
*newcomm = newcomp;
|
||||||
return MPI_SUCCESS;
|
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
|
||||||
}
|
}
|
||||||
|
@ -52,6 +52,7 @@ int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm)
|
|||||||
mode = OMPI_COMM_CID_INTRA;
|
mode = OMPI_COMM_CID_INTRA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
*newcomm = MPI_COMM_NULL;
|
||||||
newcomp = ompi_comm_allocate (comp->c_local_group->grp_proc_count, rsize );
|
newcomp = ompi_comm_allocate (comp->c_local_group->grp_proc_count, rsize );
|
||||||
if ( NULL == newcomp ) {
|
if ( NULL == newcomp ) {
|
||||||
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_INTERN, FUNC_NAME);
|
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_INTERN, FUNC_NAME);
|
||||||
@ -63,9 +64,9 @@ int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm)
|
|||||||
NULL, /* bridge comm */
|
NULL, /* bridge comm */
|
||||||
NULL, /* local leader */
|
NULL, /* local leader */
|
||||||
NULL, /* remote_leader */
|
NULL, /* remote_leader */
|
||||||
mode ); /* mode */
|
mode, /* mode */
|
||||||
|
-1 ); /* send_first */
|
||||||
if ( MPI_SUCCESS != rc ) {
|
if ( MPI_SUCCESS != rc ) {
|
||||||
*newcomm = MPI_COMM_NULL;
|
|
||||||
return OMPI_ERRHANDLER_INVOKE(comm, rc, FUNC_NAME);
|
return OMPI_ERRHANDLER_INVOKE(comm, rc, FUNC_NAME);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -77,13 +78,26 @@ int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm)
|
|||||||
rprocs, /* remote_procs */
|
rprocs, /* remote_procs */
|
||||||
comp->c_keyhash, /* attrs */
|
comp->c_keyhash, /* attrs */
|
||||||
comp->error_handler, /* error handler */
|
comp->error_handler, /* error handler */
|
||||||
(mca_base_component_t*) comp->c_coll_selected_module, /* coll module */
|
|
||||||
NULL /* topo module, t.b.d */
|
NULL /* topo module, t.b.d */
|
||||||
);
|
);
|
||||||
if ( MPI_SUCCESS != rc) {
|
if ( MPI_SUCCESS != rc) {
|
||||||
return OMPI_ERRHANDLER_INVOKE (comm, rc, FUNC_NAME);
|
return OMPI_ERRHANDLER_INVOKE (comm, rc, FUNC_NAME);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* activate communicator and init coll-module */
|
||||||
|
rc = ompi_comm_activate (newcomp, /* new communicator */
|
||||||
|
comp, /* old comm */
|
||||||
|
NULL, /* bridge comm */
|
||||||
|
NULL, /* local leader */
|
||||||
|
NULL, /* remote_leader */
|
||||||
|
mode, /* mode */
|
||||||
|
-1, /* send_first */
|
||||||
|
(mca_base_component_t*) comp->c_coll_selected_module /* coll module */
|
||||||
|
);
|
||||||
|
if ( MPI_SUCCESS != rc ) {
|
||||||
|
return OMPI_ERRHANDLER_INVOKE(comm, rc, FUNC_NAME);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
*newcomm = newcomp;
|
*newcomm = newcomp;
|
||||||
return ( MPI_SUCCESS );
|
return ( MPI_SUCCESS );
|
||||||
|
@ -23,11 +23,9 @@ static const char FUNC_NAME[] = "MPI_Comm_join";
|
|||||||
int MPI_Comm_join(int fd, MPI_Comm *intercomm)
|
int MPI_Comm_join(int fd, MPI_Comm *intercomm)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
ompi_proc_t *rproc;
|
int send_first;
|
||||||
uint32_t lleader=0; /* OOB contact information of our root */
|
ompi_communicator_t *newcomp;
|
||||||
ompi_communicator_t *comp, *newcomp;
|
ompi_process_name_t *port_proc_name;
|
||||||
|
|
||||||
comp = (ompi_communicator_t *)MPI_COMM_SELF;
|
|
||||||
|
|
||||||
if ( MPI_PARAM_CHECK ) {
|
if ( MPI_PARAM_CHECK ) {
|
||||||
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
||||||
@ -41,52 +39,10 @@ int MPI_Comm_join(int fd, MPI_Comm *intercomm)
|
|||||||
/* sendrecv OOB-name (port-name) through the socket connection.
|
/* sendrecv OOB-name (port-name) through the socket connection.
|
||||||
Need to determine somehow how to avoid a potential deadlock
|
Need to determine somehow how to avoid a potential deadlock
|
||||||
here. */
|
here. */
|
||||||
/* if proc unknown, set up the proc-structure */
|
|
||||||
|
|
||||||
newcomp = ompi_comm_allocate ( comp->c_local_group->grp_proc_count, 1 );
|
rc = ompi_comm_connect_accept (MPI_COMM_SELF, 0, port_proc_name,
|
||||||
if ( NULL == newcomp ) {
|
send_first, &newcomp);
|
||||||
rc = MPI_ERR_INTERN;
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* setup comm_cid */
|
|
||||||
rc = ompi_comm_nextcid ( newcomp, /* new comm */
|
|
||||||
comp, /* old comm */
|
|
||||||
NULL, /* bridge comm */
|
|
||||||
&lleader, /* local leader */
|
|
||||||
&rproc, /* remote_leader */
|
|
||||||
OMPI_COMM_CID_INTRA_OOB); /* mode */
|
|
||||||
if ( OMPI_SUCCESS != rc ) {
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* setup the intercomm-structure using ompi_comm_set (); */
|
|
||||||
rc = ompi_comm_set ( newcomp, /* new comm */
|
|
||||||
comp, /* old comm */
|
|
||||||
comp->c_local_group->grp_proc_count, /* local_size */
|
|
||||||
comp->c_local_group->grp_proc_pointers, /* local_procs*/
|
|
||||||
1, /* remote_size */
|
|
||||||
rproc, /* remote_procs */
|
|
||||||
NULL, /* attrs */
|
|
||||||
comp->error_handler, /* error handler */
|
|
||||||
NULL, /* coll module */
|
|
||||||
NULL /* topo module */
|
|
||||||
);
|
|
||||||
if ( MPI_SUCCESS != rc ) {
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* PROBLEM: do we have to re-start some low level stuff
|
|
||||||
to enable the usage of fast communication devices
|
|
||||||
between the two worlds ? */
|
|
||||||
exit:
|
|
||||||
if ( MPI_SUCCESS != rc ) {
|
|
||||||
*intercomm = MPI_COMM_NULL;
|
|
||||||
return OMPI_ERRHANDLER_INVOKE (MPI_COMM_SELF, rc, FUNC_NAME);
|
|
||||||
}
|
|
||||||
|
|
||||||
*intercomm = newcomp;
|
*intercomm = newcomp;
|
||||||
return MPI_SUCCESS;
|
OMPI_ERRHANDLER_RETURN (rc, MPI_COMM_SELF, rc, FUNC_NAME);
|
||||||
}
|
}
|
||||||
|
@ -27,11 +27,8 @@ int MPI_Comm_spawn(char *command, char **argv, int maxprocs, MPI_Info info,
|
|||||||
int *array_of_errcodes)
|
int *array_of_errcodes)
|
||||||
{
|
{
|
||||||
int rank, rc, i;
|
int rank, rc, i;
|
||||||
ompi_communicator_t *comp, *newcomp;
|
int send_first=0; /* we wait to be contacted */
|
||||||
uint32_t *rprocs=NULL;
|
ompi_communicator_t *newcomp;
|
||||||
uint32_t lleader=0, rleader=0; /* OOB contact information of me and the other root */
|
|
||||||
|
|
||||||
comp = (ompi_communicator_t *) comm;
|
|
||||||
|
|
||||||
if ( MPI_PARAM_CHECK ) {
|
if ( MPI_PARAM_CHECK ) {
|
||||||
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
||||||
@ -77,18 +74,6 @@ int MPI_Comm_spawn(char *command, char **argv, int maxprocs, MPI_Info info,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* bcast maxprocs to all processes in comm and allocate the rprocs array*/
|
|
||||||
rc = comp->c_coll.coll_bcast ( &maxprocs, 1, MPI_INT, root, comm);
|
|
||||||
if ( OMPI_SUCCESS != rc ) {
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
rprocs = (uint32_t *)malloc (maxprocs * sizeof(uint32_t));
|
|
||||||
if ( NULL == rprocs ) {
|
|
||||||
rc = MPI_ERR_INTERN;
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( rank == root && MPI_INFO_NULL != info ) {
|
if ( rank == root && MPI_INFO_NULL != info ) {
|
||||||
/* parse the info object */
|
/* parse the info object */
|
||||||
|
|
||||||
@ -105,80 +90,26 @@ int MPI_Comm_spawn(char *command, char **argv, int maxprocs, MPI_Info info,
|
|||||||
/* start processes. if number of processes started != maxprocs
|
/* start processes. if number of processes started != maxprocs
|
||||||
return MPI_ERR_SPAWN.*/
|
return MPI_ERR_SPAWN.*/
|
||||||
|
|
||||||
/* publish your name */
|
/* publish your name. this should be based on the jobid of the
|
||||||
/* accept connection from other group.
|
children, to support the scenario of having several
|
||||||
Root in the new application is rank 0 in their COMM_WORLD ? */
|
spawns of non-interleaving communicators working */
|
||||||
|
|
||||||
|
/* rc = ompi_comm_namepublish (service_name, port_name ); */
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = ompi_comm_connect_accept (comm, root, NULL, send_first, &newcomp);
|
||||||
|
|
||||||
|
if ( rank == root ) {
|
||||||
/* unpublish name */
|
/* unpublish name */
|
||||||
|
|
||||||
/* send list of procs to other app */
|
|
||||||
/* receive list of procs from other app */
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* bcast list of remote procs to all processes in comm */
|
|
||||||
rc = comp->c_coll.coll_bcast ( &rprocs, maxprocs, MPI_UNSIGNED, root, comm);
|
|
||||||
if ( OMPI_SUCCESS != rc ) {
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* setup the proc-structures for the new processes */
|
|
||||||
for ( i=0; i<maxprocs; i++ ) {
|
|
||||||
}
|
|
||||||
|
|
||||||
newcomp = ompi_comm_allocate ( comp->c_local_group->grp_proc_count, maxprocs );
|
|
||||||
if ( NULL == newcomp ) {
|
|
||||||
rc = MPI_ERR_INTERN;
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Determine context id. It is identical to f_2_c_handle */
|
|
||||||
rc = ompi_comm_nextcid ( newcomp, /* new comm */
|
|
||||||
comp, /* old comm */
|
|
||||||
NULL, /* bridge comm */
|
|
||||||
&lleader, /* local leader */
|
|
||||||
&rleader, /* remote_leader */
|
|
||||||
OMPI_COMM_CID_INTRA_OOB ); /* mode */
|
|
||||||
if ( OMPI_SUCCESS != rc ) {
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* setup the intercomm-structure using ompi_comm_set (); */
|
|
||||||
rc = ompi_comm_set ( newcomp, /* new comm */
|
|
||||||
comp, /* old comm */
|
|
||||||
comp->c_local_group->grp_proc_count, /* local_size */
|
|
||||||
comp->c_local_group->grp_proc_pointers, /* local_procs*/
|
|
||||||
maxprocs, /* remote_size */
|
|
||||||
rprocs, /* remote_procs */
|
|
||||||
NULL, /* attrs */
|
|
||||||
comp->error_handler, /* error handler */
|
|
||||||
NULL, /* coll module */
|
|
||||||
NULL /* topo module */
|
|
||||||
);
|
|
||||||
if ( MPI_SUCCESS != rc ) {
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* PROBLEM: do we have to re-start some low level stuff
|
|
||||||
to enable the usage of fast communication devices
|
|
||||||
between the two worlds ? */
|
|
||||||
|
|
||||||
/* set error codes */
|
/* set error codes */
|
||||||
if (MPI_ERRCODES_IGNORE != array_of_errcodes) {
|
if (MPI_ERRCODES_IGNORE != array_of_errcodes) {
|
||||||
for ( i=0; i < maxprocs; i++ ) {
|
for ( i=0; i < maxprocs; i++ ) {
|
||||||
array_of_errcodes[i]=MPI_SUCCESS;
|
array_of_errcodes[i]=rc;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
exit:
|
|
||||||
if ( NULL != rprocs) {
|
|
||||||
free ( rprocs );
|
|
||||||
}
|
|
||||||
if ( MPI_SUCCESS != rc ) {
|
|
||||||
*intercomm = MPI_COMM_NULL;
|
|
||||||
return OMPI_ERRHANDLER_INVOKE(comm, rc, FUNC_NAME);
|
|
||||||
}
|
|
||||||
|
|
||||||
*intercomm = newcomp;
|
*intercomm = newcomp;
|
||||||
return MPI_SUCCESS;
|
OMPI_ERRHANDLER_RETURN (rc, comm, rc, FUNC_NAME);
|
||||||
}
|
}
|
||||||
|
@ -28,11 +28,8 @@ int MPI_Comm_spawn_multiple(int count, char **array_of_commands, char ***array_o
|
|||||||
{
|
{
|
||||||
int i, rc, rank;
|
int i, rc, rank;
|
||||||
int totalnumprocs=0;
|
int totalnumprocs=0;
|
||||||
uint32_t *rprocs=NULL;
|
ompi_communicator_t *newcomp;
|
||||||
ompi_communicator_t *comp, *newcomp;
|
int send_first=0; /* they are contacting us first */
|
||||||
uint32_t lleader=0, rleader=0; /* OOB contact information of root and the other root */
|
|
||||||
|
|
||||||
comp = (ompi_communicator_t *) comm;
|
|
||||||
|
|
||||||
if ( MPI_PARAM_CHECK ) {
|
if ( MPI_PARAM_CHECK ) {
|
||||||
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
||||||
@ -102,111 +99,41 @@ int MPI_Comm_spawn_multiple(int count, char **array_of_commands, char ***array_o
|
|||||||
if ( rank == root ) {
|
if ( rank == root ) {
|
||||||
for ( i=0; i < count; i++ ) {
|
for ( i=0; i < count; i++ ) {
|
||||||
totalnumprocs += array_of_maxprocs[i];
|
totalnumprocs += array_of_maxprocs[i];
|
||||||
|
|
||||||
/* parse the info[i] */
|
|
||||||
|
|
||||||
/* check potentially for:
|
|
||||||
- "host": desired host where to spawn the processes
|
|
||||||
- "arch": desired architecture
|
|
||||||
- "wdir": directory, where executable can be found
|
|
||||||
- "path": list of directories where to look for the executable
|
|
||||||
- "file": filename, where additional information is provided.
|
|
||||||
- "soft": see page 92 of MPI-2.
|
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
}
|
/* parse the info[i] */
|
||||||
|
|
||||||
/* bcast totalnumprocs to all processes in comm and allocate the rprocs array*/
|
/* check potentially for:
|
||||||
rc = comp->c_coll.coll_bcast ( &totalnumprocs, 1, MPI_INT, root, comm);
|
- "host": desired host where to spawn the processes
|
||||||
if ( OMPI_SUCCESS != rc ) {
|
- "arch": desired architecture
|
||||||
goto exit;
|
- "wdir": directory, where executable can be found
|
||||||
}
|
- "path": list of directories where to look for the executable
|
||||||
|
- "file": filename, where additional information is provided.
|
||||||
|
- "soft": see page 92 of MPI-2.
|
||||||
|
*/
|
||||||
|
|
||||||
rprocs = (uint32_t *)malloc (totalnumprocs * sizeof(uint32_t));
|
|
||||||
if ( NULL == rprocs ) {
|
|
||||||
rc = MPI_ERR_INTERN;
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( rank == root ) {
|
|
||||||
/* map potentially array_of_argvs == MPI_ARGVS_NULL to a correct value */
|
/* map potentially array_of_argvs == MPI_ARGVS_NULL to a correct value */
|
||||||
/* map potentially array_of_argvs[i] == MPI_ARGV_NULL to a correct value.
|
/* map potentially array_of_argvs[i] == MPI_ARGV_NULL to a correct value.
|
||||||
not required by the standard. */
|
not required by the standard. */
|
||||||
/* start processes */
|
/* start processes */
|
||||||
|
|
||||||
/* publish name */
|
/* publish name, which should be based on the jobid of the children */
|
||||||
/* accept connection from other group.
|
|
||||||
Root in the new application is rank 0 in their COMM_WORLD ? */
|
/* rc = ompi_comm_namepublish (service_name, port_name ); */
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = ompi_comm_connect_accept (comm, root, NULL, send_first, &newcomp);
|
||||||
|
|
||||||
|
if ( rank == root ) {
|
||||||
/* unpublish name */
|
/* unpublish name */
|
||||||
|
|
||||||
/* send list of procs from other app */
|
|
||||||
/* receive list of procs from other app */
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* bcast list of remote procs to all processes in comm */
|
|
||||||
rc = comp->c_coll.coll_bcast ( &rprocs, totalnumprocs, MPI_UNSIGNED, root, comm);
|
|
||||||
if ( OMPI_SUCCESS != rc ) {
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* setup the proc-structures for the new processes */
|
|
||||||
for ( i=0; i < totalnumprocs; i++ ) {
|
|
||||||
}
|
|
||||||
|
|
||||||
newcomp = ompi_comm_allocate ( comp->c_local_group->grp_proc_count, totalnumprocs);
|
|
||||||
if ( NULL == newcomp ) {
|
|
||||||
rc = MPI_ERR_INTERN;
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Determine context id. It is identical to f_2_c_handle */
|
|
||||||
rc = ompi_comm_nextcid ( newcomp, /* new comm */
|
|
||||||
comp, /* old comm */
|
|
||||||
NULL, /* bridge comm */
|
|
||||||
&lleader, /* local leader */
|
|
||||||
&rleader, /* remote_leader */
|
|
||||||
OMPI_COMM_CID_INTRA_OOB ); /* mode */
|
|
||||||
if ( OMPI_SUCCESS != rc ) {
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* setup the intercomm-structure using ompi_comm_set (); */
|
|
||||||
rc = ompi_comm_set ( newcomp, /* new comm */
|
|
||||||
comp, /* old comm */
|
|
||||||
comp->c_local_group->grp_proc_count, /* local_size */
|
|
||||||
comp->c_local_group->grp_proc_pointers, /* local_procs*/
|
|
||||||
totalnumprocs, /* remote_size */
|
|
||||||
rprocs, /* remote_procs */
|
|
||||||
NULL, /* attrs */
|
|
||||||
comp->error_handler, /* error handler */
|
|
||||||
NULL, /* coll module */
|
|
||||||
NULL /* topo module */
|
|
||||||
);
|
|
||||||
if ( MPI_SUCCESS != rc ) {
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* PROBLEM: do we have to re-start some low level stuff
|
|
||||||
to enable the usage of fast communication devices
|
|
||||||
between the two worlds ? */
|
|
||||||
|
|
||||||
/* set array of errorcodes */
|
/* set array of errorcodes */
|
||||||
if (MPI_ERRCODES_IGNORE != array_of_errcodes) {
|
if (MPI_ERRCODES_IGNORE != array_of_errcodes) {
|
||||||
for ( i=0; i < totalnumprocs; i++ ) {
|
for ( i=0; i < totalnumprocs; i++ ) {
|
||||||
array_of_errcodes[i]=MPI_SUCCESS;
|
array_of_errcodes[i]=rc;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
exit:
|
*intercomm = newcomp;
|
||||||
if ( NULL != rprocs) {
|
OMPI_ERRHANDLER_RETURN (rc, comm, rc, FUNC_NAME);
|
||||||
free ( rprocs );
|
|
||||||
}
|
|
||||||
if ( MPI_SUCCESS != rc ) {
|
|
||||||
*intercomm = MPI_COMM_NULL;
|
|
||||||
return OMPI_ERRHANDLER_INVOKE(comm, rc, FUNC_NAME);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
return MPI_SUCCESS;
|
|
||||||
}
|
}
|
||||||
|
@ -122,7 +122,9 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
|
|||||||
bridge_comm, /* bridge comm */
|
bridge_comm, /* bridge comm */
|
||||||
&lleader, /* local leader */
|
&lleader, /* local leader */
|
||||||
&rleader, /* remote_leader */
|
&rleader, /* remote_leader */
|
||||||
OMPI_COMM_CID_INTRA_BRIDGE); /* mode */
|
OMPI_COMM_CID_INTRA_BRIDGE, /* mode */
|
||||||
|
-1 ); /* send_first */
|
||||||
|
|
||||||
if ( MPI_SUCCESS != rc ) {
|
if ( MPI_SUCCESS != rc ) {
|
||||||
goto err_exit;
|
goto err_exit;
|
||||||
}
|
}
|
||||||
@ -135,13 +137,26 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
|
|||||||
rprocs, /* remote_procs */
|
rprocs, /* remote_procs */
|
||||||
NULL, /* attrs */
|
NULL, /* attrs */
|
||||||
local_comm->error_handler, /* error handler*/
|
local_comm->error_handler, /* error handler*/
|
||||||
NULL, /* coll module */
|
|
||||||
NULL /* topo mpodule */
|
NULL /* topo mpodule */
|
||||||
);
|
);
|
||||||
if ( MPI_SUCCESS != rc ) {
|
if ( MPI_SUCCESS != rc ) {
|
||||||
goto err_exit;
|
goto err_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* activate comm and init coll-module */
|
||||||
|
rc = ompi_comm_activate ( newcomp, /* new comm */
|
||||||
|
local_comm, /* old comm */
|
||||||
|
bridge_comm, /* bridge comm */
|
||||||
|
&lleader, /* local leader */
|
||||||
|
&rleader, /* remote_leader */
|
||||||
|
OMPI_COMM_CID_INTRA_BRIDGE, /* mode */
|
||||||
|
-1, /* send_first */
|
||||||
|
NULL ); /* coll component */
|
||||||
|
|
||||||
|
if ( MPI_SUCCESS != rc ) {
|
||||||
|
goto err_exit;
|
||||||
|
}
|
||||||
|
|
||||||
err_exit:
|
err_exit:
|
||||||
if ( NULL == rprocs ) {
|
if ( NULL == rprocs ) {
|
||||||
free ( rprocs );
|
free ( rprocs );
|
||||||
|
@ -88,7 +88,8 @@ int MPI_Intercomm_merge(MPI_Comm intercomm, int high,
|
|||||||
NULL, /* bridge comm */
|
NULL, /* bridge comm */
|
||||||
NULL, /* local leader */
|
NULL, /* local leader */
|
||||||
NULL, /* remote_leader */
|
NULL, /* remote_leader */
|
||||||
OMPI_COMM_CID_INTER); /* mode */
|
OMPI_COMM_CID_INTER, /* mode */
|
||||||
|
-1 ); /* send_first */
|
||||||
if ( OMPI_SUCCESS != rc ) {
|
if ( OMPI_SUCCESS != rc ) {
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
@ -101,13 +102,25 @@ int MPI_Intercomm_merge(MPI_Comm intercomm, int high,
|
|||||||
NULL, /* remote_procs */
|
NULL, /* remote_procs */
|
||||||
NULL, /* attrs */
|
NULL, /* attrs */
|
||||||
intercomm->error_handler, /* error handler*/
|
intercomm->error_handler, /* error handler*/
|
||||||
NULL, /* coll module */
|
|
||||||
NULL /* topo mpodule */
|
NULL /* topo mpodule */
|
||||||
);
|
);
|
||||||
if ( MPI_SUCCESS != rc ) {
|
if ( MPI_SUCCESS != rc ) {
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* activate communicator and init coll-module */
|
||||||
|
rc = ompi_comm_activate ( newcomp, /* new comm */
|
||||||
|
intercomm, /* old comm */
|
||||||
|
NULL, /* bridge comm */
|
||||||
|
NULL, /* local leader */
|
||||||
|
NULL, /* remote_leader */
|
||||||
|
OMPI_COMM_CID_INTER, /* mode */
|
||||||
|
-1, /* send_first */
|
||||||
|
NULL ); /* coll module */
|
||||||
|
if ( OMPI_SUCCESS != rc ) {
|
||||||
|
goto exit;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
exit:
|
exit:
|
||||||
if ( NULL != procs ) {
|
if ( NULL != procs ) {
|
||||||
|
Загрузка…
Ссылка в новой задаче
Block a user