1
1

- Only minor white-space changes, to clean up

This commit was SVN r7843.
Этот коммит содержится в:
Rainer Keller 2005-10-24 10:36:16 +00:00
родитель f92185c43b
Коммит d6120d32d6
3 изменённых файлов: 119 добавлений и 119 удалений

Просмотреть файл

@ -30,6 +30,7 @@
#include "mca/gpr/gpr_types.h"
#include "mca/oob/oob_types.h"
#include "request/request.h"
#include "ompi/proc/proc.h"
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
@ -74,49 +75,49 @@ OMPI_DECLSPEC extern opal_class_t ompi_communicator_t_class;
#define OMPI_COMM_BARRIER_TAG -31079
#define OMPI_COMM_ALLREDUCE_TAG -31080
/**
* Modes reqquired for accquiring the new comm-id.
/**
* Modes required for acquiring the new comm-id.
* The first (INTER/INTRA) indicates whether the
* input comm was an inter/intra-comm, the second
* whether the new communicator will be an inter/intra
*comm
* whether the new communicator will be an inter/intra
* comm
*/
#define OMPI_COMM_CID_INTRA 0x00000020
#define OMPI_COMM_CID_INTER 0x00000040
#define OMPI_COMM_CID_INTRA_BRIDGE 0x00000080
#define OMPI_COMM_CID_INTRA_OOB 0x00000100
OMPI_DECLSPEC extern ompi_pointer_array_t ompi_mpi_communicators;
OMPI_DECLSPEC extern ompi_pointer_array_t ompi_mpi_communicators;
struct ompi_communicator_t {
opal_object_t c_base;
opal_mutex_t c_lock; /* mutex for name and potentially
opal_object_t c_base;
opal_mutex_t c_lock; /* mutex for name and potentially
attributes */
char c_name[MPI_MAX_OBJECT_NAME];
uint32_t c_contextid;
int c_my_rank;
uint32_t c_flags; /* flags, e.g. intercomm,
uint32_t c_flags; /* flags, e.g. intercomm,
topology, etc. */
ompi_group_t *c_local_group;
ompi_group_t *c_remote_group;
/* Attributes */
opal_hash_table_t *c_keyhash;
/**< inscribing cube dimension */
int c_cube_dim;
int c_cube_dim;
/* Hooks for topo module to hang things */
mca_base_component_t *c_topo_component;
const mca_topo_base_module_1_0_0_t *c_topo;
const mca_topo_base_module_1_0_0_t *c_topo;
/**< structure of function pointers */
mca_topo_base_comm_t *c_topo_comm;
mca_topo_base_comm_t *c_topo_comm;
/**< structure containing basic information about the topology */
struct mca_topo_base_module_comm_t *c_topo_module;
struct mca_topo_base_module_comm_t *c_topo_module;
/**< module specific data */
/* index in Fortran <-> C translation array */
@ -163,12 +164,12 @@ struct ompi_communicator_t {
*/
static inline int ompi_comm_invalid(ompi_communicator_t* comm)
{
if ((NULL == comm) || (MPI_COMM_NULL == comm) ||
(comm->c_flags & OMPI_COMM_ISFREED ) ||
(OMPI_COMM_IS_INVALID(comm)) )
return true;
else
return false;
if ((NULL == comm) || (MPI_COMM_NULL == comm) ||
(comm->c_flags & OMPI_COMM_ISFREED ) ||
(OMPI_COMM_IS_INVALID(comm)) )
return true;
else
return false;
}
/**
@ -176,7 +177,7 @@ struct ompi_communicator_t {
*/
static inline int ompi_comm_rank(ompi_communicator_t* comm)
{
return comm->c_my_rank;
return comm->c_my_rank;
}
/**
@ -184,148 +185,148 @@ struct ompi_communicator_t {
*/
static inline int ompi_comm_size(ompi_communicator_t* comm)
{
return comm->c_local_group->grp_proc_count;
return comm->c_local_group->grp_proc_count;
}
/**
* size of the remote group for inter-communicators.
* returns zero for an intra-communicator
*/
static inline int ompi_comm_remote_size(ompi_communicator_t* comm)
{
if ( comm->c_flags & OMPI_COMM_INTER )
return comm->c_remote_group->grp_proc_count;
else
return 0;
if ( comm->c_flags & OMPI_COMM_INTER )
return comm->c_remote_group->grp_proc_count;
else
return 0;
}
/* return pointer to communicator associated with context id cid,
* No error checking is done*/
static inline ompi_communicator_t *ompi_comm_lookup(uint32_t cid)
{
/* array of pointers to communicators, indexed by context ID */
OMPI_DECLSPEC extern ompi_pointer_array_t ompi_mpi_communicators;
return (ompi_communicator_t*)ompi_pointer_array_get_item(&ompi_mpi_communicators, cid);
static inline ompi_communicator_t *ompi_comm_lookup(uint32_t cid)
{
/* array of pointers to communicators, indexed by context ID */
OMPI_DECLSPEC extern ompi_pointer_array_t ompi_mpi_communicators;
return (ompi_communicator_t*)ompi_pointer_array_get_item(&ompi_mpi_communicators, cid);
}
static inline struct ompi_proc_t* ompi_comm_peer_lookup(ompi_communicator_t* comm, int peer_id)
{
#if OMPI_ENABLE_DEBUG
if(peer_id >= comm->c_remote_group->grp_proc_count) {
opal_output(0, "ompi_comm_lookup_peer: invalid peer index (%d)", peer_id);
return (struct ompi_proc_t *) NULL;
}
if(peer_id >= comm->c_remote_group->grp_proc_count) {
opal_output(0, "ompi_comm_lookup_peer: invalid peer index (%d)", peer_id);
return (struct ompi_proc_t *) NULL;
}
#endif
return comm->c_remote_group->grp_proc_pointers[peer_id];
return comm->c_remote_group->grp_proc_pointers[peer_id];
}
static inline bool ompi_comm_peer_invalid(ompi_communicator_t* comm, int peer_id)
{
if(peer_id < 0 || peer_id >= comm->c_remote_group->grp_proc_count) {
return true;
}
return false;
if(peer_id < 0 || peer_id >= comm->c_remote_group->grp_proc_count) {
return true;
}
return false;
}
/**
* Initialise MPI_COMM_WORLD and MPI_COMM_SELF
/**
* Initialise MPI_COMM_WORLD and MPI_COMM_SELF
*/
int ompi_comm_init(void);
OMPI_DECLSPEC int ompi_comm_link_function(void);
/**
* extract the local group from a communicator
*/
int ompi_comm_group ( ompi_communicator_t *comm, ompi_group_t **group );
/**
* create a communicator based on a group
* extract the local group from a communicator
*/
int ompi_comm_create ( ompi_communicator_t* comm, ompi_group_t *group,
ompi_communicator_t** newcomm );
int ompi_comm_group (ompi_communicator_t *comm, ompi_group_t **group);
/**
* create a communicator based on a group
*/
int ompi_comm_create (ompi_communicator_t* comm, ompi_group_t *group,
ompi_communicator_t** newcomm);
/**
* create a cartesian communicator
*/
int ompi_topo_create (ompi_communicator_t *old_comm,
int ompi_topo_create (ompi_communicator_t *old_comm,
int ndims_or_nnodes,
int *dims_or_index,
int *periods_or_edges,
bool reorder,
ompi_communicator_t **comm_cart,
int cart_or_graph);
/**
* split a communicator based on color and key. Parameters
* are identical to the MPI-counterpart of the function.
*
*
* @param comm: input communicator
* @param color
* @param key
*
* @
*/
int ompi_comm_split ( ompi_communicator_t *comm, int color, int key,
int ompi_comm_split (ompi_communicator_t *comm, int color, int key,
ompi_communicator_t** newcomm, bool pass_on_topo);
/**
* dup a communicator. Parameter are identical to the MPI-counterpart
* of the function. It has been extracted, since we need to be able
/**
* dup a communicator. Parameter are identical to the MPI-counterpart
* of the function. It has been extracted, since we need to be able
* to dup a communicator internally as well.
*
* @param comm: input communicator
*
*/
int ompi_comm_dup ( ompi_communicator_t *comm, ompi_communicator_t **newcomm);
int ompi_comm_dup (ompi_communicator_t *comm, ompi_communicator_t **newcomm);
/**
* free a communicator
* free a communicator
*/
int ompi_comm_free ( ompi_communicator_t **comm );
int ompi_comm_free (ompi_communicator_t **comm);
/**
* allocate a new communicator structure
* allocate a new communicator structure
* @param local_group_size
* @param remote_group_size
*
* this routine allocates the structure, the according local and
* This routine allocates the structure, the according local and
* remote groups, the proc-arrays in the local and remote group.
* It furthermore sets the fortran index correctly,
* It furthermore sets the fortran index correctly,
* and sets all other elements to zero.
*/
ompi_communicator_t* ompi_comm_allocate ( int local_group_size,
int remote_group_size );
ompi_communicator_t* ompi_comm_allocate (int local_group_size,
int remote_group_size);
/**
* allocate new communicator ID
* @param newcomm: pointer to the new communicator
* @param oldcomm: original comm
* @param bridgecomm: bridge comm for intercomm_create
* @param mode: combination of input
* @param mode: combination of input
* OMPI_COMM_CID_INTRA: intra-comm
* OMPI_COMM_CID_INTER: inter-comm
* OMPI_COMM_CID_INTRA_BRIDGE: 2 intracomms connected by
* OMPI_COMM_CID_INTER: inter-comm
* OMPI_COMM_CID_INTRA_BRIDGE: 2 intracomms connected by
* a bridge comm. local_leader
* and remote leader are in this
* case an int (rank in bridge-comm).
* OMPI_COMM_CID_INTRA_OOB: 2 intracomms, leaders talk
* through OOB. lleader and rleader
* are the required contact information.
* @param send_first: to avoid a potential deadlock for
* @param send_first: to avoid a potential deadlock for
* the OOB version.
* This routine has to be thread safe in the final version.
*/
int ompi_comm_nextcid ( ompi_communicator_t* newcomm,
ompi_communicator_t* oldcomm,
ompi_communicator_t* bridgecomm,
void* local_leader,
void* remote_leader,
int mode,
int ompi_comm_nextcid ( ompi_communicator_t* newcomm,
ompi_communicator_t* oldcomm,
ompi_communicator_t* bridgecomm,
void* local_leader,
void* remote_leader,
int mode,
int send_first);
/**
* shut down the communicator infrastructure.
@ -338,33 +339,31 @@ struct ompi_communicator_t {
*/
int ompi_comm_set ( ompi_communicator_t* newcomm,
ompi_communicator_t* oldcomm,
int local_size,
int local_size,
struct ompi_proc_t **local_procs,
int remote_size,
struct ompi_proc_t **remote_procs,
opal_hash_table_t *attr,
ompi_errhandler_t *errh,
ompi_errhandler_t *errh,
mca_base_component_t *topocomponent );
/**
* This is a short-hand routine used in intercomm_create.
* The routine makes sure, that all processes have afterwards
* a list of ompi_proc_t pointers for the remote group.
*/
struct ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm,
ompi_communicator_t *bridge_comm,
struct ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm,
ompi_communicator_t *bridge_comm,
int local_leader,
int remote_leader,
orte_rml_tag_t tag,
int rsize);
/**
* This routine verifies, whether local_group and remote group are overlapping
* in intercomm_create
*/
int ompi_comm_overlapping_groups (int size, struct ompi_proc_t **lprocs,
int rsize, struct ompi_proc_t ** rprocs);
int ompi_comm_overlapping_groups (int size, struct ompi_proc_t ** lprocs,
int rsize, struct ompi_proc_t ** rprocs);
/**
* This is a routine determining whether the local or the
@ -375,22 +374,22 @@ struct ompi_communicator_t {
int high );
int ompi_comm_activate ( ompi_communicator_t* newcomm,
ompi_communicator_t* oldcomm,
ompi_communicator_t* bridgecomm,
void* local_leader,
void* remote_leader,
int mode,
int ompi_comm_activate ( ompi_communicator_t* newcomm,
ompi_communicator_t* oldcomm,
ompi_communicator_t* bridgecomm,
void* local_leader,
void* remote_leader,
int mode,
int send_first,
mca_base_component_t *collcomponent );
/**
* a simple function to dump the structure
*/
int ompi_comm_dump ( ompi_communicator_t *comm );
/**
/**
* a simple function to determint a port number
*/
int ompi_open_port (char *port_name);
@ -401,12 +400,12 @@ struct ompi_communicator_t {
*/
char * ompi_parse_port (char *port_name, orte_rml_tag_t *tag) ;
/**
/**
* routines handling name publishing, lookup and unpublishing
*/
int ompi_comm_namepublish ( char *service_name, char *port_name );
char* ompi_comm_namelookup ( char *service_name );
int ompi_comm_nameunpublish ( char *service_name );
int ompi_comm_nameunpublish ( char *service_name );
/* setting name */
@ -422,17 +421,17 @@ struct ompi_communicator_t {
/* A helper routine for ompi_comm_connect_accept.
* This routine is necessary, since in the connect/accept case, the processes
* executing the connect operation have the OOB contact information of the
* leader of the remote group, however, the processes executing the
* accept get their own port_name = OOB contact information passed in as
* leader of the remote group, however, the processes executing the
* accept get their own port_name = OOB contact information passed in as
* an argument. This is however useless.
*
*
* Therefore, the two root processes exchange this information at this point.
*
*/
orte_process_name_t *ompi_comm_get_rport (orte_process_name_t *port,
int send_first, struct ompi_proc_t *proc,
orte_rml_tag_t tag);
orte_rml_tag_t tag);
/*
@ -444,12 +443,12 @@ struct ompi_communicator_t {
void ompi_comm_reg_finalize(void);
/* start the new processes from MPI_Comm_spawn_multiple. Initial
* version, very rough
* version, very rough
*/
int ompi_comm_start_processes(int count, char **array_of_commands,
char ***array_of_argv,
int *array_of_maxprocs,
MPI_Info *array_of_info,
char ***array_of_argv,
int *array_of_maxprocs,
MPI_Info *array_of_info,
char *port_name);
/*
@ -461,7 +460,7 @@ struct ompi_communicator_t {
*/
int ompi_comm_dyn_init(void);
/**
/**
* Executes internally a disconnect on all dynamic communicators
* in case the user did not disconnect them.
*/
@ -480,17 +479,17 @@ struct ompi_communicator_t {
of more than one initiated ibarrier. This is required for waiting
for all still connected processes in MPI_Finalize.
ompi_comm_disconnect_init returns a handle, which has to be passed in
ompi_comm_disconnect_init returns a handle, which has to be passed in
to ompi_comm_disconnect_waitall. The second routine blocks, until
all non-blocking barriers described by the handles are finished.
The communicators can than be released.
*/
struct ompi_comm_disconnect_obj {
ompi_communicator_t *comm;
int size;
ompi_request_t **reqs;
int buf;
ompi_communicator_t *comm;
int size;
ompi_request_t **reqs;
int buf;
};
typedef struct ompi_comm_disconnect_obj ompi_comm_disconnect_obj;

Просмотреть файл

@ -53,7 +53,7 @@ struct ompi_datatype_t;
typedef int32_t (*conversion_fct_t)( uint32_t count,
const void* from, uint32_t from_len, long from_extent,
void* to, uint32_t in_length, long to_extent );
void* to, uint32_t to_length, long to_extent );
typedef struct ompi_convertor_t ompi_convertor_t;
typedef int32_t (*convertor_advance_fct_t)( ompi_convertor_t* pConvertor,

Просмотреть файл

@ -70,6 +70,7 @@ OMPI_DECLSPEC extern ompi_pointer_array_t *ompi_datatype_f_to_c_table;
#define DT_FLAG_DATA_CPP 0x8000
#define DT_FLAG_DATA_FORTRAN 0xC000
#define DT_FLAG_DATA_LANGUAGE 0xC000
/*
* We should make the difference here between the predefined contiguous and non contiguous
* datatypes. The DT_FLAG_BASIC is held by all predefined contiguous datatypes.