1
1

adding/fixing the implementation of various communicator function.

Attention: the routines are tested rudimentary, since for 
having the full functionality, I need collective operations.

Reviewed by Jeff, Rich and Tim.

This commit was SVN r1147.
Этот коммит содержится в:
Edgar Gabriel 2004-05-21 19:32:18 +00:00
родитель 5f570a8b62
Коммит afdfb83c6c
8 изменённых файлов: 201 добавлений и 94 удалений

Просмотреть файл

@ -6,6 +6,9 @@
#include "mpi.h"
#include "mpi/c/bindings.h"
#include "runtime/runtime.h"
#include "errhandler/errhandler.h"
#include "communicator/communicator.h"
#if LAM_HAVE_WEAK_SYMBOLS && LAM_PROFILING_DEFINES
#pragma weak MPI_Comm_c2f = PMPI_Comm_c2f
@ -15,10 +18,21 @@
#include "mpi/c/profile/defines.h"
#endif
MPI_Fint MPI_Comm_c2f(MPI_Comm comm) {
/*
* Anju:
* Dont know what it is supposed to return
*/
return MPI_SUCCESS;
MPI_Fint MPI_Comm_c2f(MPI_Comm comm)
{
lam_communicator_t *cptr=(lam_communicator_t *)comm;
if ( MPI_PARAM_CHECK) {
if ( lam_mpi_finalized )
return LAM_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_INTERN,
"MPI_Comm_c2f");
if ( lam_comm_invalid (cptr))
return LAM_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
"MPI_Comm_c2f");
}
/* Since MPI_COMM_NULL is an object itself, we do not have to check
for that */
return ((MPI_Fint) comm->c_f_to_c_index);
}

Просмотреть файл

@ -3,11 +3,10 @@
*/
#include "lam_config.h"
#include <stdio.h>
#include <string.h>
#include "mpi.h"
#include "mpi/c/bindings.h"
#include "runtime/runtime.h"
#include "mpi/c/bindings.h"
#include "communicator/communicator.h"
#if LAM_HAVE_WEAK_SYMBOLS && LAM_PROFILING_DEFINES
@ -22,7 +21,8 @@ int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm) {
/* local variables */
lam_communicator_t *comp, *newcomp;
int rc;
int rsize, mode;
lam_proc_t **rprocs;
/* argument checking */
if ( MPI_PARAM_CHECK ) {
@ -40,53 +40,35 @@ int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm) {
}
comp = (lam_communicator_t *) comm;
/* This routine allocates an element, allocates the according groups,
sets the f2c handle and increases the reference counters of
comm, group and remote_group */
newcomp = lam_comm_allocate (comp->c_local_group->grp_proc_count,
comp->c_remote_group->grp_proc_count );
/* copy local group */
newcomp->c_local_group->grp_my_rank = comp->c_local_group->grp_my_rank;
memcpy (newcomp->c_local_group->grp_proc_pointers,
comp->c_local_group->grp_proc_pointers,
comp->c_local_group->grp_proc_count * sizeof(lam_proc_t *));
lam_group_increment_proc_count(newcomp->c_local_group);
if ( comp->c_flags & LAM_COMM_INTER ) {
/* copy remote group */
memcpy (newcomp->c_remote_group->grp_proc_pointers,
comp->c_remote_group->grp_proc_pointers,
comp->c_remote_group->grp_proc_count * sizeof(lam_proc_t *));
lam_group_increment_proc_count(newcomp->c_remote_group);
/* Get new context id */
newcomp->c_contextid = lam_comm_nextcid (comm, LAM_COMM_INTER_INTER);
if ( LAM_COMM_IS_INTER ( comp ) ){
rsize = comp->c_remote_group->grp_proc_count;
rprocs = comp->c_remote_group->grp_proc_pointers;
mode = LAM_COMM_INTER_INTER;
}
else {
/* Get new context id */
newcomp->c_contextid = lam_comm_nextcid (comm, LAM_COMM_INTRA_INTRA);
rsize = 0;
rprocs = NULL;
mode = LAM_COMM_INTRA_INTRA;
}
/* other fields */
newcomp->c_my_rank = comp->c_my_rank;
newcomp->c_flags = comp->c_flags;
newcomp = lam_comm_set ( mode, /* mode */
comp, /* old comm */
NULL, /* bridge comm */
comp->c_local_group->grp_proc_count, /* local_size */
comp->c_local_group->grp_proc_pointers, /* local_procs*/
rsize, /* remote_size */
rprocs, /* remote_procs */
comp->c_keyhash, /* attrs */
comp->error_handler, /* error handler */
NULL, /* coll module, to be modified */
NULL, /* topo module, to be modified */
MPI_UNDEFINED, /* local leader */
MPI_UNDEFINED /* remote leader */
);
/* Copy topology information */
if ( newcomp == MPI_COMM_NULL )
LAM_ERRHANDLER_INVOKE (comm, MPI_ERR_INTERN, "MPI_Comm_dup");
/* Copy error handler */
newcomp->error_handler = comp->error_handler;
OBJ_RETAIN ( comp->error_handler );
/* Copy attributes */
rc = lam_attr_copy_all ( COMM_ATTR, comp, newcomp );
if ( rc != LAM_SUCCESS ) {
lam_comm_free ( (MPI_Comm *)newcomp );
return LAM_ERRHANDLER_INVOKE ( comm, rc, "MPI_Comm_dup");
}
*newcomm = (MPI_Comm) newcomp;
return MPI_SUCCESS;
*newcomm = newcomp;
return ( MPI_SUCCESS );
}

Просмотреть файл

@ -6,6 +6,9 @@
#include "mpi.h"
#include "mpi/c/bindings.h"
#include "runtime/runtime.h"
#include "errhandler/errhandler.h"
#include "communicator/communicator.h"
#if LAM_HAVE_WEAK_SYMBOLS && LAM_PROFILING_DEFINES
#pragma weak MPI_Comm_f2c = PMPI_Comm_f2c
@ -16,10 +19,20 @@
#endif
MPI_Comm MPI_Comm_f2c(MPI_Fint comm) {
/*
* Anju:
* Check if what I have returned is right
*/
return MPI_COMM_NULL;
MPI_Comm MPI_Comm_f2c(MPI_Fint comm)
{
size_t o_index= (size_t) comm;
if ( MPI_PARAM_CHECK ) {
if ( lam_mpi_finalized )
return LAM_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_INTERN,
"MPI_Comm_f2c");
if ( 0 > o_index ||
o_index >= lam_pointer_array_get_size(&lam_mpi_communicators)) {
return MPI_COMM_NULL;
}
}
return lam_mpi_communicators.addr[o_index];
}

Просмотреть файл

@ -30,10 +30,6 @@ int MPI_Comm_free(MPI_Comm *comm) {
"MPI_Comm_free");
}
/* Call attribute delete functions */
/* free the object */
lam_comm_free ( comm );
*comm = MPI_COMM_NULL;

Просмотреть файл

@ -18,8 +18,8 @@
#endif
int MPI_Comm_rank(MPI_Comm comm, int *rank) {
int MPI_Comm_rank(MPI_Comm comm, int *rank)
{
if ( MPI_PARAM_CHECK ) {
if ( lam_mpi_finalized )
return LAM_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_INTERN,

Просмотреть файл

@ -20,10 +20,8 @@
#endif
int MPI_Comm_remote_group(MPI_Comm comm, MPI_Group *group) {
lam_communicator_t *comp;
lam_group_t *group_p;
int MPI_Comm_remote_group(MPI_Comm comm, MPI_Group *group)
{
if ( MPI_PARAM_CHECK ) {
if ( lam_mpi_finalized )
@ -39,26 +37,13 @@ int MPI_Comm_remote_group(MPI_Comm comm, MPI_Group *group) {
"MPI_Comm_remote_group");
}
comp = (lam_communicator_t *) comm;
if ( comp->c_flags & LAM_COMM_INTER ) {
/* get new group struct */
group_p=lam_group_allocate(comp->c_remote_group->grp_proc_count);
if( NULL == group_p ) {
return LAM_ERRHANDLER_INVOKE (comm, MPI_ERR_INTERN,
"MPI_Comm_remote_group");
}
group_p->grp_my_rank = MPI_UNDEFINED;
memcpy ( group_p->grp_proc_pointers,
comp->c_remote_group->grp_proc_pointers,
group_p->grp_proc_count * sizeof ( lam_proc_t *));
/* increment proc reference counters */
lam_group_increment_proc_count(group_p);
if ( LAM_COMM_IS_INTER(comm) ) {
OBJ_RETAIN(comm->c_remote_group);
}
else
return LAM_ERRHANDLER_INVOKE (comm, MPI_ERR_COMM,
"MPI_Comm_remote_group");
*group = (MPI_Group) group_p;
*group = (MPI_Group) comm->c_remote_group;
return MPI_SUCCESS;
}

Просмотреть файл

@ -6,6 +6,7 @@
#include "mpi.h"
#include "mpi/c/bindings.h"
#include "mca/pml/pml.h"
#include "runtime/runtime.h"
#include "communicator/communicator.h"
@ -20,9 +21,12 @@
int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
MPI_Comm bridge_comm, int remote_leader,
int tag, MPI_Comm *newintercomm) {
int tag, MPI_Comm *newintercomm)
{
int local_size, local_rank;
lam_communicator_t *newcomp;
lam_proc_t **rprocs=NULL;
int rc, rsize;
if ( MPI_PARAM_CHECK ) {
if ( lam_mpi_finalized )
@ -42,12 +46,12 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
return LAM_ERRHANDLER_INVOKE ( local_comm, MPI_ERR_ARG,
"MPI_Intercomm_create");
}
local_size = lam_comm_size ( local_comm );
local_rank = lam_comm_size ( local_comm );
local_rank = lam_comm_rank ( local_comm );
if ( MPI_PARAM_CHECK ) {
if ( local_leader < 0 || local_leader > local_size )
if ( 0 < local_leader || local_leader > local_size )
return LAM_ERRHANDLER_INVOKE ( local_comm, MPI_ERR_ARG,
"MPI_Intercomm_create");
@ -55,15 +59,73 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
just have to be valid at the local_leader */
if ( local_rank == local_leader ) {
if ( MPI_COMM_NULL == bridge_comm || lam_comm_invalid ( bridge_comm) ||
bridge_comm->c_flags & LAM_COMM_INTER )
bridge_comm->c_flags & LAM_COMM_INTER ) {
return LAM_ERRHANDLER_INVOKE ( local_comm, MPI_ERR_COMM,
"MPI_Intercomm_create");
if ( remote_leader < 0 || remote_leader > lam_comm_size(bridge_comm))
}
if ( remote_leader < 0 || remote_leader > lam_comm_size(bridge_comm)) {
return LAM_ERRHANDLER_INVOKE ( local_comm, MPI_ERR_ARG,
"MPI_Intercomm_create");
}
} /* if ( local_rank == local_leader ) */
}
if ( local_rank == local_leader ) {
MPI_Request req;
MPI_Status status;
/* local leader exchange group sizes and vpid lists */
rc =mca_pml.pml_irecv (&rsize, 1, MPI_INT, remote_leader, tag, bridge_comm,
&req );
if ( rc != MPI_SUCCESS ) {
goto err_exit;
}
rc = mca_pml.pml_send ( &local_size, 1, MPI_INT, remote_leader, tag,
MCA_PML_BASE_SEND_STANDARD, bridge_comm );
if ( rc != MPI_SUCCESS ) {
goto err_exit;
}
rc = mca_pml.pml_wait ( 1, &req, NULL, &status);
if ( rc != MPI_SUCCESS ) {
goto err_exit;
}
}
/* bcast size and vpid lists to all processes in local_comm */
rc = local_comm->c_coll.coll_bcast_intra ( &rsize, 1, MPI_INT, local_leader,
local_comm );
if ( rc != MPI_SUCCESS ) {
goto err_exit;
}
rprocs = lam_comm_get_rprocs ( local_comm, bridge_comm, local_leader,
remote_leader, tag, rsize );
newcomp = lam_comm_set ( LAM_COMM_INTRA_INTER, /* mode */
local_comm, /* old comm */
bridge_comm, /* bridge comm */
local_comm->c_local_group->grp_proc_count, /* local_size */
local_comm->c_local_group->grp_proc_pointers, /* local_procs*/
rsize, /* remote_size */
rprocs, /* remote_procs */
NULL, /* attrs */
local_comm->error_handler, /* error handler*/
NULL, /* coll module */
NULL, /* topo mpodule */
local_leader, /* local leader */
remote_leader /* remote leader */
);
if ( newcomp == MPI_COMM_NULL ) {
return LAM_ERRHANDLER_INVOKE (local_comm, MPI_ERR_INTERN, "MPI_Intercomm_create");
}
err_exit:
if ( NULL == rprocs ) {
free ( rprocs );
}
*newintercomm = newcomp;
return MPI_SUCCESS;
}

Просмотреть файл

@ -17,9 +17,17 @@
#include "mpi/c/profile/defines.h"
#endif
int MPI_Intercomm_merge(MPI_Comm intercomm, int high,
MPI_Comm *newcomm) {
#define INTERCOMM_MERGE_TAG 1010
int MPI_Intercomm_merge(MPI_Comm intercomm, int high,
MPI_Comm *newcomm)
{
lam_communicator_t *newcomp;
lam_proc_t **procs=NULL;
int local_size, remote_size;
int local_rank;
int first;
int total_size;
if ( MPI_PARAM_CHECK ) {
if ( lam_mpi_finalized )
@ -36,5 +44,52 @@ int MPI_Intercomm_merge(MPI_Comm intercomm, int high,
"MPI_Intercomm_merge");
}
local_size = lam_comm_size ( intercomm );
local_rank = lam_comm_rank ( intercomm );
remote_size = lam_comm_remote_size ( intercomm );
total_size = local_size + remote_size;
procs = (lam_proc_t **) malloc ( total_size * sizeof(lam_proc_t *));
if ( NULL == procs ) {
return LAM_ERRHANDLER_INVOKE(intercomm,MPI_ERR_INTERN, "MPI_Intercomm_merge");
}
first = lam_comm_determine_first ( intercomm, high );
if ( first ) {
memcpy ( procs, intercomm->c_local_group->grp_proc_pointers,
local_size * sizeof(lam_proc_t *));
memcpy ( &procs[local_size], intercomm->c_remote_group->grp_proc_pointers,
remote_size * sizeof(lam_proc_t *));
}
else {
memcpy ( procs, intercomm->c_remote_group->grp_proc_pointers,
remote_size * sizeof(lam_proc_t *));
memcpy ( &procs[remote_size], intercomm->c_local_group->grp_proc_pointers,
local_size * sizeof(lam_proc_t *));
}
newcomp = lam_comm_set ( LAM_COMM_INTER_INTRA, /* mode */
intercomm, /* old comm */
NULL, /* bridge comm */
total_size, /* local_size */
procs, /* local_procs*/
0, /* remote_size */
NULL, /* remote_procs */
NULL, /* attrs */
intercomm->error_handler, /* error handler*/
NULL, /* coll module */
NULL, /* topo mpodule */
MPI_UNDEFINED, /* local leader */
MPI_UNDEFINED /* remote leader */
);
if ( newcomp == MPI_COMM_NULL ) {
return LAM_ERRHANDLER_INVOKE (intercomm, MPI_ERR_INTERN, "MPI_Intercomm_merge");
}
if ( NULL != procs ) {
free ( procs );
}
*newcomm = newcomp;
return MPI_SUCCESS;
}