2004-01-21 00:05:46 +00:00
|
|
|
/*
|
|
|
|
* $HEADERS$
|
|
|
|
*/
|
2004-06-07 15:33:53 +00:00
|
|
|
#include "ompi_config.h"
|
2004-01-21 00:05:46 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
|
|
|
|
#include "mpi.h"
|
2004-03-17 18:45:16 +00:00
|
|
|
#include "mpi/c/bindings.h"
|
2004-06-16 01:41:01 +00:00
|
|
|
#include "runtime/runtime.h"
|
|
|
|
#include "info/info.h"
|
|
|
|
#include "communicator/communicator.h"
|
2004-01-21 00:05:46 +00:00
|
|
|
|
2004-06-07 15:33:53 +00:00
|
|
|
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
|
2004-01-21 00:05:46 +00:00
|
|
|
#pragma weak MPI_Comm_spawn_multiple = PMPI_Comm_spawn_multiple
|
|
|
|
#endif
|
|
|
|
|
2004-06-07 15:33:53 +00:00
|
|
|
#if OMPI_PROFILING_DEFINES
|
2004-04-20 18:50:43 +00:00
|
|
|
#include "mpi/c/profile/defines.h"
|
|
|
|
#endif
|
|
|
|
|
2004-01-21 00:05:46 +00:00
|
|
|
int MPI_Comm_spawn_multiple(int count, char **array_of_commands, char ***array_of_argv,
|
|
|
|
int *array_of_maxprocs, MPI_Info *array_of_info,
|
|
|
|
int root, MPI_Comm comm, MPI_Comm *intercomm,
|
2004-06-16 01:41:01 +00:00
|
|
|
int *array_of_errcodes)
|
|
|
|
{
|
2004-06-16 21:35:31 +00:00
|
|
|
int i, rc, rank;
|
2004-06-16 01:41:01 +00:00
|
|
|
int totalnumprocs=0;
|
2004-06-16 21:35:31 +00:00
|
|
|
uint32_t *rprocs=NULL;
|
|
|
|
ompi_communicator_t *comp, *newcomp;
|
|
|
|
int mode;
|
|
|
|
|
|
|
|
comp = (ompi_communicator_t *) comm;
|
|
|
|
|
2004-06-16 01:41:01 +00:00
|
|
|
if ( MPI_PARAM_CHECK ) {
|
|
|
|
if ( ompi_mpi_finalized )
|
|
|
|
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_INTERN,
|
|
|
|
"MPI_Comm_spawn_multiple");
|
|
|
|
if ( MPI_COMM_NULL == comm || ompi_comm_invalid (comm))
|
|
|
|
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
|
|
|
|
"MPI_Comm_spawn_multiple");
|
2004-06-16 21:35:31 +00:00
|
|
|
if ( OMPI_COMM_IS_INTER(comm))
|
|
|
|
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COMM,
|
|
|
|
"MPI_Comm_spawn_multiple");
|
2004-06-16 01:41:01 +00:00
|
|
|
if ( 0 > root || ompi_comm_size(comm) < root )
|
|
|
|
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG,
|
|
|
|
"MPI_Comm_spawn_multiple");
|
|
|
|
if ( NULL == intercomm )
|
|
|
|
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG,
|
|
|
|
"MPI_Comm_spawn_multiple");
|
|
|
|
if ( NULL == array_of_errcodes )
|
|
|
|
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG,
|
|
|
|
"MPI_Comm_spawn_multiple");
|
|
|
|
}
|
|
|
|
|
|
|
|
rank = ompi_comm_rank ( comm );
|
|
|
|
if ( MPI_PARAM_CHECK ) {
|
|
|
|
if ( rank == root ) {
|
|
|
|
if ( 0 > count )
|
|
|
|
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG,
|
|
|
|
"MPI_Comm_spawn_multiple");
|
|
|
|
if ( NULL == array_of_commands )
|
|
|
|
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG,
|
|
|
|
"MPI_Comm_spawn_multiple");
|
|
|
|
if ( NULL == array_of_argv )
|
|
|
|
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG,
|
|
|
|
"MPI_Comm_spawn_multiple");
|
|
|
|
if ( NULL == array_of_maxprocs )
|
|
|
|
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG,
|
|
|
|
"MPI_Comm_spawn_multiple");
|
|
|
|
if ( NULL == array_of_info )
|
|
|
|
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG,
|
|
|
|
"MPI_Comm_spawn_multiple");
|
|
|
|
for ( i=0; i<count; i++ ) {
|
|
|
|
if ( NULL == array_of_commands[i] )
|
|
|
|
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG,
|
|
|
|
"MPI_Comm_spawn_multiple");
|
|
|
|
if ( NULL == array_of_argv[i] )
|
|
|
|
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG,
|
|
|
|
"MPI_Comm_spawn_multiple");
|
|
|
|
if ( 0 > array_of_maxprocs[i] )
|
|
|
|
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG,
|
|
|
|
"MPI_Comm_spawn_multiple");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ( rank == root ) {
|
|
|
|
for ( i=0; i < count; i++ ) {
|
|
|
|
totalnumprocs += array_of_maxprocs[i];
|
|
|
|
|
|
|
|
/* parse the info[i] */
|
|
|
|
|
|
|
|
/* check potentially for:
|
|
|
|
- "host": desired host where to spawn the processes
|
|
|
|
- "arch": desired architecture
|
|
|
|
- "wdir": directory, where executable can be found
|
|
|
|
- "path": list of directories where to look for the executable
|
|
|
|
- "file": filename, where additional information is provided.
|
|
|
|
- "soft": see page 92 of MPI-2.
|
|
|
|
*/
|
|
|
|
}
|
2004-06-16 21:35:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* bcast totalnumprocs to all processes in comm and allocate the rprocs array*/
|
|
|
|
rc = comp->c_coll.coll_bcast_intra ( &totalnumprocs, 1, MPI_INT, root, comm);
|
|
|
|
if ( OMPI_SUCCESS != rc ) {
|
|
|
|
goto exit;
|
|
|
|
}
|
2004-06-16 01:41:01 +00:00
|
|
|
|
2004-06-16 21:35:31 +00:00
|
|
|
rprocs = (uint32_t *)malloc (totalnumprocs * sizeof(uint32_t));
|
|
|
|
if ( NULL == rprocs ) {
|
|
|
|
rc = MPI_ERR_INTERN;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ( rank == root ) {
|
2004-06-16 01:41:01 +00:00
|
|
|
/* map potentially array_of_argvs == MPI_ARGVS_NULL to a correct value */
|
2004-06-16 21:35:31 +00:00
|
|
|
/* map potentially array_of_argvs[i] == MPI_ARGV_NULL to a correct value.
|
|
|
|
not required by the standard. */
|
2004-06-16 01:41:01 +00:00
|
|
|
/* start processes */
|
2004-06-16 21:35:31 +00:00
|
|
|
|
2004-06-16 01:41:01 +00:00
|
|
|
/* publish name */
|
|
|
|
/* accept connection from other group.
|
|
|
|
Root in the new application is rank 0 in their COMM_WORLD ? */
|
|
|
|
/* unpublish name */
|
2004-06-16 21:35:31 +00:00
|
|
|
|
2004-06-16 01:41:01 +00:00
|
|
|
/* send list of procs from other app */
|
|
|
|
/* receive list of procs from other app */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* bcast list of remote procs to all processes in comm */
|
2004-06-16 21:35:31 +00:00
|
|
|
rc = comp->c_coll.coll_bcast_intra ( &rprocs, totalnumprocs, MPI_UNSIGNED, root, comm);
|
|
|
|
if ( OMPI_SUCCESS != rc ) {
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2004-06-16 01:41:01 +00:00
|
|
|
/* setup the proc-structures for the new processes */
|
2004-06-16 21:35:31 +00:00
|
|
|
for ( i=0; i < totalnumprocs; i++ ) {
|
|
|
|
}
|
|
|
|
|
2004-06-16 01:41:01 +00:00
|
|
|
/* setup the intercomm-structure using ompi_comm_set (); */
|
2004-06-16 21:35:31 +00:00
|
|
|
newcomp = ompi_comm_set ( comp, /* old comm */
|
|
|
|
comp->c_local_group->grp_proc_count, /* local_size */
|
|
|
|
comp->c_local_group->grp_proc_pointers, /* local_procs*/
|
|
|
|
totalnumprocs, /* remote_size */
|
|
|
|
rprocs, /* remote_procs */
|
|
|
|
NULL, /* attrs */
|
|
|
|
comp->error_handler, /* error handler */
|
|
|
|
NULL, /* coll module */
|
|
|
|
NULL /* topo module */
|
|
|
|
);
|
|
|
|
if ( MPI_COMM_NULL == newcomp ) {
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Determine context id. It is identical to f_2_c_handle */
|
|
|
|
rc = ompi_comm_nextcid ( newcomp, /* new communicator */
|
|
|
|
comp, /* old comm */
|
|
|
|
NULL, /* bridge comm */
|
|
|
|
MPI_UNDEFINED, /* local leader */
|
|
|
|
MPI_UNDEFINED, /* remote_leader */
|
|
|
|
mode ); /* mode */
|
|
|
|
if ( OMPI_SUCCESS != rc ) {
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2004-06-16 01:41:01 +00:00
|
|
|
/* PROBLEM: do we have to re-start some low level stuff
|
|
|
|
to enable the usage of fast communication devices
|
|
|
|
between the two worlds ? */
|
|
|
|
|
|
|
|
/* set array of errorcodes */
|
2004-06-16 21:35:31 +00:00
|
|
|
if (MPI_ERRCODES_IGNORE != array_of_errcodes) {
|
|
|
|
for ( i=0; i < totalnumprocs; i++ ) {
|
|
|
|
array_of_errcodes[i]=MPI_SUCCESS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
exit:
|
|
|
|
if ( NULL != rprocs) {
|
|
|
|
free ( rprocs );
|
|
|
|
}
|
|
|
|
if ( MPI_SUCCESS != rc ) {
|
|
|
|
*intercomm = MPI_COMM_NULL;
|
|
|
|
return OMPI_ERRHANDLER_INVOKE(comm, rc, "MPI_Comm_spawn_multiple");
|
|
|
|
}
|
|
|
|
|
2004-06-16 01:41:01 +00:00
|
|
|
|
2004-01-21 00:05:46 +00:00
|
|
|
return MPI_SUCCESS;
|
|
|
|
}
|