Coding standards...
This commit was SVN r16118.
Этот коммит содержится в:
родитель
617ff3a413
Коммит
4033a40e4e
@ -104,7 +104,7 @@ int ompi_comm_set ( ompi_communicator_t **ncomm,
|
||||
if (NULL == local_group) {
|
||||
/* determine how the list of local_rank can be stored most
|
||||
efficiently */
|
||||
ret = ompi_group_incl(oldcomm->c_local_group, local_size,
|
||||
ret = ompi_group_incl(oldcomm->c_local_group, local_size,
|
||||
local_ranks, &newcomm->c_local_group);
|
||||
}
|
||||
else {
|
||||
|
@ -229,13 +229,13 @@ int ompi_comm_nextcid ( ompi_communicator_t* newcomm,
|
||||
newcomm->c_f_to_c_index = newcomm->c_contextid;
|
||||
ompi_pointer_array_set_item (&ompi_mpi_communicators, nextcid, newcomm);
|
||||
|
||||
/* for synchronization purposes, avoids receiving fragments for
|
||||
a communicator id, which might not yet been known. For single-threaded
|
||||
scenarios, this call is in ompi_comm_activate, for multi-threaded
|
||||
scenarios, it has to be already here ( before releasing another
|
||||
thread into the cid-allocation loop ) */
|
||||
(allredfnct)(&response, &glresponse, 1, MPI_MIN, comm, bridgecomm,
|
||||
local_leader, remote_leader, send_first );
|
||||
/* for synchronization purposes, avoids receiving fragments for
|
||||
a communicator id, which might not yet been known. For single-threaded
|
||||
scenarios, this call is in ompi_comm_activate, for multi-threaded
|
||||
scenarios, it has to be already here ( before releasing another
|
||||
thread into the cid-allocation loop ) */
|
||||
(allredfnct)(&response, &glresponse, 1, MPI_MIN, comm, bridgecomm,
|
||||
local_leader, remote_leader, send_first );
|
||||
OPAL_THREAD_LOCK(&ompi_cid_lock);
|
||||
ompi_comm_unregister_cid (comm->c_contextid);
|
||||
OPAL_THREAD_UNLOCK(&ompi_cid_lock);
|
||||
@ -442,14 +442,14 @@ int ompi_comm_activate ( ompi_communicator_t* newcomm,
|
||||
break;
|
||||
}
|
||||
|
||||
if (MPI_THREAD_MULTIPLE != ompi_mpi_thread_provided) {
|
||||
/* Only execute the synchronization for single-threaded scenarios.
|
||||
For multi-threaded cases, the synchronization has already
|
||||
been executed in the cid-allocation loop */
|
||||
(allredfnct)(&ok, &gok, 1, MPI_MIN, comm, bridgecomm,
|
||||
local_leader, remote_leader, send_first );
|
||||
|
||||
}
|
||||
if (MPI_THREAD_MULTIPLE != ompi_mpi_thread_provided) {
|
||||
/* Only execute the synchronization for single-threaded scenarios.
|
||||
For multi-threaded cases, the synchronization has already
|
||||
been executed in the cid-allocation loop */
|
||||
(allredfnct)(&ok, &gok, 1, MPI_MIN, comm, bridgecomm,
|
||||
local_leader, remote_leader, send_first );
|
||||
|
||||
}
|
||||
}
|
||||
/* Check to see if this process is in the new communicator.
|
||||
|
||||
|
@ -121,8 +121,10 @@ int ompi_comm_connect_accept ( ompi_communicator_t *comm, int root,
|
||||
proc_list[rank], tag,
|
||||
&tmp_port_name);
|
||||
}
|
||||
if (OMPI_SUCCESS != rc) return rc;
|
||||
rport = &tmp_port_name;
|
||||
if (OMPI_SUCCESS != rc) {
|
||||
return rc;
|
||||
}
|
||||
rport = &tmp_port_name;
|
||||
} else {
|
||||
rport = port;
|
||||
}
|
||||
@ -139,13 +141,13 @@ int ompi_comm_connect_accept ( ompi_communicator_t *comm, int root,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if(OMPI_GROUP_IS_DENSE(group)) {
|
||||
ompi_proc_pack(group->grp_proc_pointers, size, nbuf);
|
||||
}
|
||||
else {
|
||||
ompi_proc_pack(proc_list, size, nbuf);
|
||||
}
|
||||
|
||||
if(OMPI_GROUP_IS_DENSE(group)) {
|
||||
ompi_proc_pack(group->grp_proc_pointers, size, nbuf);
|
||||
}
|
||||
else {
|
||||
ompi_proc_pack(proc_list, size, nbuf);
|
||||
}
|
||||
|
||||
nrbuf = OBJ_NEW(orte_buffer_t);
|
||||
if (NULL == nrbuf ) {
|
||||
rc = OMPI_ERROR;
|
||||
@ -250,8 +252,8 @@ int ompi_comm_connect_accept ( ompi_communicator_t *comm, int root,
|
||||
NULL, /* attrs */
|
||||
comm->error_handler, /* error handler */
|
||||
NULL, /* topo component */
|
||||
group, /* local group */
|
||||
new_group_pointer /* remote group */
|
||||
group, /* local group */
|
||||
new_group_pointer /* remote group */
|
||||
);
|
||||
if ( NULL == newcomp ) {
|
||||
rc = OMPI_ERR_OUT_OF_RESOURCE;
|
||||
@ -955,8 +957,8 @@ void ompi_comm_mark_dyncomm (ompi_communicator_t *comm)
|
||||
of different jobids. */
|
||||
grp = comm->c_local_group;
|
||||
for (i=0; i< size; i++) {
|
||||
proc = ompi_group_peer_lookup(grp,i);
|
||||
thisjobid = proc->proc_name.jobid;
|
||||
proc = ompi_group_peer_lookup(grp,i);
|
||||
thisjobid = proc->proc_name.jobid;
|
||||
found = 0;
|
||||
for ( j=0; j<numjobids; j++) {
|
||||
if (thisjobid == jobids[j]) {
|
||||
@ -973,8 +975,8 @@ void ompi_comm_mark_dyncomm (ompi_communicator_t *comm)
|
||||
and count number of different jobids */
|
||||
grp = comm->c_remote_group;
|
||||
for (i=0; i< rsize; i++) {
|
||||
proc = ompi_group_peer_lookup(grp,i);
|
||||
thisjobid = proc->proc_name.jobid;
|
||||
proc = ompi_group_peer_lookup(grp,i);
|
||||
thisjobid = proc->proc_name.jobid;
|
||||
found = 0;
|
||||
for ( j=0; j<numjobids; j++) {
|
||||
if ( thisjobid == jobids[j]) {
|
||||
|
@ -243,7 +243,7 @@ struct ompi_communicator_t {
|
||||
}
|
||||
#endif
|
||||
/*return comm->c_remote_group->grp_proc_pointers[peer_id];*/
|
||||
return ompi_group_peer_lookup(comm->c_remote_group,peer_id);
|
||||
return ompi_group_peer_lookup(comm->c_remote_group,peer_id);
|
||||
}
|
||||
|
||||
static inline bool ompi_comm_peer_invalid(ompi_communicator_t* comm, int peer_id)
|
||||
@ -381,8 +381,8 @@ struct ompi_communicator_t {
|
||||
opal_hash_table_t *attr,
|
||||
ompi_errhandler_t *errh,
|
||||
mca_base_component_t *topocomponent,
|
||||
ompi_group_t *local_group,
|
||||
ompi_group_t *remote_group );
|
||||
ompi_group_t *local_group,
|
||||
ompi_group_t *remote_group );
|
||||
/**
|
||||
* This is a short-hand routine used in intercomm_create.
|
||||
* The routine makes sure, that all processes have afterwards
|
||||
|
@ -89,8 +89,8 @@ int MPI_Comm_accept(char *port_name, MPI_Info info, int root,
|
||||
* The two leaders will figure this out later. However, we need the tag.
|
||||
*/
|
||||
if ( rank == root ) {
|
||||
tmp_port = ompi_parse_port(port_name, &tag);
|
||||
free (tmp_port);
|
||||
tmp_port = ompi_parse_port(port_name, &tag);
|
||||
free (tmp_port);
|
||||
}
|
||||
rc = ompi_comm_connect_accept (comm, root, NULL, send_first, &newcomp, tag);
|
||||
|
||||
|
@ -53,7 +53,7 @@ int MPI_Comm_compare(MPI_Comm comm1, MPI_Comm comm2, int *result) {
|
||||
}
|
||||
|
||||
rc = ompi_comm_compare ( (ompi_communicator_t*)comm1,
|
||||
(ompi_communicator_t*)comm2,
|
||||
result);
|
||||
(ompi_communicator_t*)comm2,
|
||||
result);
|
||||
OMPI_ERRHANDLER_RETURN ( rc, comm1, rc, FUNC_NAME);
|
||||
}
|
||||
|
@ -92,15 +92,15 @@ int MPI_Comm_connect(char *port_name, MPI_Info info, int root,
|
||||
* structure.
|
||||
*/
|
||||
if ( rank == root ) {
|
||||
tmp_port = ompi_parse_port (port_name, &tag);
|
||||
if (ORTE_SUCCESS != (rc = orte_ns.convert_string_to_process_name(&port_proc_name, tmp_port))) {
|
||||
return rc;
|
||||
}
|
||||
if ( NULL == port_proc_name ) {
|
||||
*newcomm = MPI_COMM_NULL;
|
||||
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_PORT, FUNC_NAME);
|
||||
}
|
||||
free (tmp_port);
|
||||
tmp_port = ompi_parse_port (port_name, &tag);
|
||||
if (ORTE_SUCCESS != (rc = orte_ns.convert_string_to_process_name(&port_proc_name, tmp_port))) {
|
||||
return rc;
|
||||
}
|
||||
if ( NULL == port_proc_name ) {
|
||||
*newcomm = MPI_COMM_NULL;
|
||||
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_PORT, FUNC_NAME);
|
||||
}
|
||||
free (tmp_port);
|
||||
}
|
||||
|
||||
rc = ompi_comm_connect_accept (comm, root, port_proc_name, send_first,
|
||||
|
@ -35,7 +35,7 @@ static const char FUNC_NAME[] = "MPI_Comm_create_keyval";
|
||||
|
||||
int MPI_Comm_create_keyval(MPI_Comm_copy_attr_function *comm_copy_attr_fn,
|
||||
MPI_Comm_delete_attr_function *comm_delete_attr_fn,
|
||||
int *comm_keyval, void *extra_state)
|
||||
int *comm_keyval, void *extra_state)
|
||||
{
|
||||
int ret;
|
||||
ompi_attribute_fn_ptr_union_t copy_fn;
|
||||
@ -45,11 +45,11 @@ int MPI_Comm_create_keyval(MPI_Comm_copy_attr_function *comm_copy_attr_fn,
|
||||
|
||||
if (MPI_PARAM_CHECK) {
|
||||
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
||||
if ((NULL == comm_copy_attr_fn) || (NULL == comm_delete_attr_fn) ||
|
||||
(NULL == comm_keyval)) {
|
||||
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
|
||||
if ((NULL == comm_copy_attr_fn) || (NULL == comm_delete_attr_fn) ||
|
||||
(NULL == comm_keyval)) {
|
||||
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
|
||||
FUNC_NAME);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
copy_fn.attr_communicator_copy_fn = (MPI_Comm_internal_copy_attr_function*)comm_copy_attr_fn;
|
||||
|
@ -41,10 +41,10 @@ int MPI_Comm_delete_attr(MPI_Comm comm, int comm_keyval)
|
||||
|
||||
if (MPI_PARAM_CHECK) {
|
||||
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
||||
if (ompi_comm_invalid(comm)) {
|
||||
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
|
||||
FUNC_NAME);
|
||||
}
|
||||
if (ompi_comm_invalid(comm)) {
|
||||
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
|
||||
FUNC_NAME);
|
||||
}
|
||||
}
|
||||
|
||||
ret = ompi_attr_delete(COMM_ATTR, comm, comm->c_keyhash, comm_keyval,
|
||||
|
@ -45,18 +45,18 @@ int MPI_Comm_disconnect(MPI_Comm *comm)
|
||||
}
|
||||
|
||||
if (MPI_COMM_WORLD == *comm || MPI_COMM_SELF == *comm ) {
|
||||
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
|
||||
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
|
||||
}
|
||||
|
||||
|
||||
if ( OMPI_COMM_IS_DYNAMIC(*comm)) {
|
||||
ompi_comm_disconnect_obj *dobj;
|
||||
|
||||
dobj = ompi_comm_disconnect_init (*comm);
|
||||
ompi_comm_disconnect_waitall(1, &dobj);
|
||||
ompi_comm_disconnect_obj *dobj;
|
||||
|
||||
dobj = ompi_comm_disconnect_init (*comm);
|
||||
ompi_comm_disconnect_waitall(1, &dobj);
|
||||
}
|
||||
else {
|
||||
(*comm)->c_coll.coll_barrier(*comm, (*comm)->c_coll.coll_barrier_module);
|
||||
(*comm)->c_coll.coll_barrier(*comm, (*comm)->c_coll.coll_barrier_module);
|
||||
}
|
||||
|
||||
ompi_comm_free(comm);
|
||||
|
@ -42,10 +42,10 @@ int MPI_Comm_free_keyval(int *comm_keyval)
|
||||
|
||||
if (MPI_PARAM_CHECK) {
|
||||
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
||||
if (NULL == comm_keyval) {
|
||||
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
|
||||
FUNC_NAME);
|
||||
}
|
||||
if (NULL == comm_keyval) {
|
||||
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
|
||||
FUNC_NAME);
|
||||
}
|
||||
}
|
||||
|
||||
ret = ompi_attr_free_keyval(COMM_ATTR, comm_keyval, 0);
|
||||
|
@ -41,9 +41,9 @@ int MPI_Comm_get_attr(MPI_Comm comm, int comm_keyval,
|
||||
|
||||
if (MPI_PARAM_CHECK) {
|
||||
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
||||
if ((NULL == attribute_val) || (NULL == flag)) {
|
||||
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
|
||||
}
|
||||
if ((NULL == attribute_val) || (NULL == flag)) {
|
||||
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
|
||||
}
|
||||
}
|
||||
|
||||
/* This stuff is very confusing. Be sure to see
|
||||
|
@ -127,13 +127,13 @@ static int ompi_socket_send (int fd, char *buf, int len )
|
||||
c_ptr = buf;
|
||||
|
||||
do {
|
||||
s_num = (size_t) num;
|
||||
s_num = (size_t) num;
|
||||
a = write ( fd, c_ptr, s_num );
|
||||
if ( a == -1 ) {
|
||||
if ( errno == EINTR ) {
|
||||
/* Catch EINTR on, mainly on IBM RS6000 */
|
||||
continue;
|
||||
}
|
||||
}
|
||||
#ifdef __SR8000
|
||||
else if ( errno == EWOULDBLOCK ) {
|
||||
/*Catch EWOULDBLOCK on Hitachi SR8000 */
|
||||
@ -149,19 +149,19 @@ static int ompi_socket_send (int fd, char *buf, int len )
|
||||
fprintf (stderr,"ompi_socket_send: error while writing to socket"
|
||||
" error:%s", strerror (errno) );
|
||||
return MPI_ERR_OTHER;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
num -= a;
|
||||
c_ptr += a;
|
||||
} while ( num > 0 );
|
||||
|
||||
|
||||
if ( num < 0 ) {
|
||||
fprintf (stderr, "ompi_socket_send: more data written then available");
|
||||
ret = MPI_ERR_INTERN;
|
||||
}
|
||||
|
||||
return ret;
|
||||
if ( num < 0 ) {
|
||||
fprintf (stderr, "ompi_socket_send: more data written then available");
|
||||
ret = MPI_ERR_INTERN;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ompi_socket_recv (int fd, char *buf, int len )
|
||||
@ -176,13 +176,13 @@ static int ompi_socket_recv (int fd, char *buf, int len )
|
||||
c_ptr = buf;
|
||||
|
||||
do {
|
||||
s_num = (size_t ) num;
|
||||
s_num = (size_t ) num;
|
||||
a = read ( fd, c_ptr, s_num );
|
||||
if ( a == -1 ) {
|
||||
if ( errno == EINTR ) {
|
||||
/* Catch EINTR on, mainly on IBM RS6000 */
|
||||
continue;
|
||||
}
|
||||
}
|
||||
#ifdef __SR8000
|
||||
else if ( errno == EWOULDBLOCK ) {
|
||||
/*Catch EWOULDBLOCK on Hitachi SR8000 */
|
||||
@ -198,16 +198,16 @@ static int ompi_socket_recv (int fd, char *buf, int len )
|
||||
fprintf (stderr,"ompi_socket_recv: error while reading from socket"
|
||||
" error:%s", strerror (errno) );
|
||||
return MPI_ERR_OTHER;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
num -= a;
|
||||
c_ptr += a;
|
||||
} while ( num > 0 );
|
||||
|
||||
if ( num < 0 ) {
|
||||
fprintf (stderr, "ompi_socket_recv: more data read then available");
|
||||
ret = MPI_ERR_INTERN;
|
||||
}
|
||||
if ( num < 0 ) {
|
||||
fprintf (stderr, "ompi_socket_recv: more data read then available");
|
||||
ret = MPI_ERR_INTERN;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
@ -41,13 +41,13 @@ int MPI_Comm_set_attr(MPI_Comm comm, int comm_keyval, void *attribute_val)
|
||||
|
||||
if (MPI_PARAM_CHECK) {
|
||||
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
||||
if (ompi_comm_invalid(comm)) {
|
||||
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
|
||||
FUNC_NAME);
|
||||
}
|
||||
if (ompi_comm_invalid(comm)) {
|
||||
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
|
||||
FUNC_NAME);
|
||||
}
|
||||
}
|
||||
|
||||
ret = ompi_attr_set_c(COMM_ATTR, comm, &comm->c_keyhash,
|
||||
comm_keyval, attribute_val, false, true);
|
||||
comm_keyval, attribute_val, false, true);
|
||||
OMPI_ERRHANDLER_RETURN(ret, comm, MPI_ERR_OTHER, FUNC_NAME);
|
||||
}
|
||||
|
@ -35,32 +35,32 @@ int MPI_Comm_set_errhandler(MPI_Comm comm, MPI_Errhandler errhandler)
|
||||
{
|
||||
OPAL_CR_TEST_CHECKPOINT_READY();
|
||||
|
||||
/* Error checking */
|
||||
/* Error checking */
|
||||
|
||||
if (MPI_PARAM_CHECK) {
|
||||
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
||||
if (ompi_comm_invalid(comm)) {
|
||||
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
|
||||
FUNC_NAME);
|
||||
} else if (NULL == errhandler ||
|
||||
MPI_ERRHANDLER_NULL == errhandler ||
|
||||
( OMPI_ERRHANDLER_TYPE_COMM != errhandler->eh_mpi_object_type &&
|
||||
OMPI_ERRHANDLER_TYPE_PREDEFINED != errhandler->eh_mpi_object_type) ) {
|
||||
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG,
|
||||
FUNC_NAME);
|
||||
if (MPI_PARAM_CHECK) {
|
||||
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
|
||||
if (ompi_comm_invalid(comm)) {
|
||||
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
|
||||
FUNC_NAME);
|
||||
} else if (NULL == errhandler ||
|
||||
MPI_ERRHANDLER_NULL == errhandler ||
|
||||
( OMPI_ERRHANDLER_TYPE_COMM != errhandler->eh_mpi_object_type &&
|
||||
OMPI_ERRHANDLER_TYPE_PREDEFINED != errhandler->eh_mpi_object_type) ) {
|
||||
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG,
|
||||
FUNC_NAME);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Ditch the old errhandler, and decrement its refcount */
|
||||
/* Ditch the old errhandler, and decrement its refcount */
|
||||
|
||||
OBJ_RELEASE(comm->error_handler);
|
||||
OBJ_RELEASE(comm->error_handler);
|
||||
|
||||
/* We have a valid comm and errhandler, so increment its refcount */
|
||||
/* We have a valid comm and errhandler, so increment its refcount */
|
||||
|
||||
comm->error_handler = errhandler;
|
||||
OBJ_RETAIN(comm->error_handler);
|
||||
comm->error_handler = errhandler;
|
||||
OBJ_RETAIN(comm->error_handler);
|
||||
|
||||
/* All done */
|
||||
/* All done */
|
||||
|
||||
return MPI_SUCCESS;
|
||||
return MPI_SUCCESS;
|
||||
}
|
||||
|
@ -72,40 +72,39 @@ int MPI_Comm_spawn(char *command, char **argv, int maxprocs, MPI_Info info,
|
||||
}
|
||||
}
|
||||
|
||||
rank = ompi_comm_rank ( comm );
|
||||
if ( MPI_PARAM_CHECK ) {
|
||||
if ( rank == root ) {
|
||||
if ( NULL == command ) {
|
||||
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG,
|
||||
FUNC_NAME);
|
||||
}
|
||||
if ( 0 > maxprocs ) {
|
||||
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG,
|
||||
FUNC_NAME);
|
||||
}
|
||||
}
|
||||
}
|
||||
rank = ompi_comm_rank ( comm );
|
||||
if ( MPI_PARAM_CHECK ) {
|
||||
if ( rank == root ) {
|
||||
if ( NULL == command ) {
|
||||
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG,
|
||||
FUNC_NAME);
|
||||
}
|
||||
if ( 0 > maxprocs ) {
|
||||
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG,
|
||||
FUNC_NAME);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if ( rank == root ) {
|
||||
/* Open a port. The port_name is passed as an environment variable
|
||||
to the children. */
|
||||
ompi_open_port (port_name);
|
||||
if (OMPI_SUCCESS != (rc = ompi_comm_start_processes (1, &command, &argv, &maxprocs,
|
||||
&info, port_name))) {
|
||||
goto error;
|
||||
}
|
||||
tmp_port = ompi_parse_port (port_name, &tag);
|
||||
free(tmp_port);
|
||||
}
|
||||
if ( rank == root ) {
|
||||
/* Open a port. The port_name is passed as an environment variable
|
||||
to the children. */
|
||||
ompi_open_port (port_name);
|
||||
if (OMPI_SUCCESS != (rc = ompi_comm_start_processes (1, &command, &argv, &maxprocs,
|
||||
&info, port_name))) {
|
||||
goto error;
|
||||
}
|
||||
tmp_port = ompi_parse_port (port_name, &tag);
|
||||
free(tmp_port);
|
||||
}
|
||||
|
||||
|
||||
rc = ompi_comm_connect_accept (comm, root, NULL, send_first, &newcomp, tag);
|
||||
rc = ompi_comm_connect_accept (comm, root, NULL, send_first, &newcomp, tag);
|
||||
|
||||
error:
|
||||
/* close the port again. Nothing has to be done for that at the moment.*/
|
||||
|
||||
/* set error codes */
|
||||
/* set error codes */
|
||||
if (MPI_ERRCODES_IGNORE != array_of_errcodes) {
|
||||
for ( i=0; i < maxprocs; i++ ) {
|
||||
array_of_errcodes[i]=rc;
|
||||
|
@ -130,25 +130,25 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
|
||||
|
||||
if ( MPI_PARAM_CHECK ) {
|
||||
if(OMPI_GROUP_IS_DENSE(local_comm->c_local_group)) {
|
||||
rc = ompi_comm_overlapping_groups(local_comm->c_local_group->grp_proc_count,
|
||||
local_comm->c_local_group->grp_proc_pointers,
|
||||
rsize,
|
||||
rprocs);
|
||||
}
|
||||
else {
|
||||
proc_list = (ompi_proc_t **) calloc (local_comm->c_local_group->grp_proc_count,
|
||||
sizeof (ompi_proc_t *));
|
||||
for(j=0 ; j<local_comm->c_local_group->grp_proc_count ; j++) {
|
||||
proc_list[j] = ompi_group_peer_lookup(local_comm->c_local_group,j);
|
||||
rc = ompi_comm_overlapping_groups(local_comm->c_local_group->grp_proc_count,
|
||||
local_comm->c_local_group->grp_proc_pointers,
|
||||
rsize,
|
||||
rprocs);
|
||||
}
|
||||
else {
|
||||
proc_list = (ompi_proc_t **) calloc (local_comm->c_local_group->grp_proc_count,
|
||||
sizeof (ompi_proc_t *));
|
||||
for(j=0 ; j<local_comm->c_local_group->grp_proc_count ; j++) {
|
||||
proc_list[j] = ompi_group_peer_lookup(local_comm->c_local_group,j);
|
||||
}
|
||||
rc = ompi_comm_overlapping_groups(local_comm->c_local_group->grp_proc_count,
|
||||
proc_list,
|
||||
rsize,
|
||||
rprocs);
|
||||
}
|
||||
if ( OMPI_SUCCESS != rc ) {
|
||||
goto err_exit;
|
||||
}
|
||||
rc = ompi_comm_overlapping_groups(local_comm->c_local_group->grp_proc_count,
|
||||
proc_list,
|
||||
rsize,
|
||||
rprocs);
|
||||
}
|
||||
if ( OMPI_SUCCESS != rc ) {
|
||||
goto err_exit;
|
||||
}
|
||||
}
|
||||
new_group_pointer=ompi_group_allocate(rsize);
|
||||
if( NULL == new_group_pointer ) {
|
||||
@ -171,8 +171,8 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
|
||||
NULL, /* attrs */
|
||||
local_comm->error_handler, /* error handler*/
|
||||
NULL, /* topo mpodule */
|
||||
local_comm->c_local_group, /* local group */
|
||||
new_group_pointer /* remote group */
|
||||
local_comm->c_local_group, /* local group */
|
||||
new_group_pointer /* remote group */
|
||||
);
|
||||
|
||||
if ( NULL == newcomp ) {
|
||||
@ -208,7 +208,7 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
|
||||
&rleader, /* remote_leader */
|
||||
OMPI_COMM_CID_INTRA_BRIDGE, /* mode */
|
||||
-1, /* send_first */
|
||||
0); /* sync_flag */
|
||||
0); /* sync_flag */
|
||||
|
||||
if ( MPI_SUCCESS != rc ) {
|
||||
goto err_exit;
|
||||
|
@ -98,8 +98,8 @@ int MPI_Intercomm_merge(MPI_Comm intercomm, int high,
|
||||
NULL, /* attrs */
|
||||
intercomm->error_handler, /* error handler*/
|
||||
NULL, /* topo mpodule */
|
||||
new_group_pointer, /* local group */
|
||||
NULL /* remote group */
|
||||
new_group_pointer, /* local group */
|
||||
NULL /* remote group */
|
||||
);
|
||||
if ( NULL == newcomp ) {
|
||||
rc = MPI_ERR_INTERN;
|
||||
@ -133,7 +133,7 @@ int MPI_Intercomm_merge(MPI_Comm intercomm, int high,
|
||||
NULL, /* remote_leader */
|
||||
OMPI_COMM_CID_INTER, /* mode */
|
||||
-1, /* send_first */
|
||||
0); /* sync_flag */
|
||||
0); /* sync_flag */
|
||||
if ( OMPI_SUCCESS != rc ) {
|
||||
goto exit;
|
||||
}
|
||||
|
@ -181,25 +181,25 @@ int ompi_mpi_finalize(void)
|
||||
|
||||
/* free file resources */
|
||||
if (OMPI_SUCCESS != (ret = ompi_file_finalize())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* free window resources */
|
||||
if (OMPI_SUCCESS != (ret = ompi_win_finalize())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
if (OMPI_SUCCESS != (ret = ompi_osc_base_finalize())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* free communicator resources */
|
||||
if (OMPI_SUCCESS != (ret = ompi_comm_finalize())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* free requests */
|
||||
if (OMPI_SUCCESS != (ret = ompi_request_finalize())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* If requested, print out a list of memory allocated by ALLOC_MEM
|
||||
@ -211,7 +211,7 @@ int ompi_mpi_finalize(void)
|
||||
/* Now that all MPI objects dealing with communications are gone,
|
||||
shut down MCA types having to do with communications */
|
||||
if (OMPI_SUCCESS != (ret = mca_pml_base_close())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* shut down buffered send code */
|
||||
@ -231,54 +231,54 @@ int ompi_mpi_finalize(void)
|
||||
|
||||
/* free attr resources */
|
||||
if (OMPI_SUCCESS != (ret = ompi_attr_finalize())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* free group resources */
|
||||
if (OMPI_SUCCESS != (ret = ompi_group_finalize())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* free proc resources */
|
||||
if ( OMPI_SUCCESS != (ret = ompi_proc_finalize())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* free internal error resources */
|
||||
if (OMPI_SUCCESS != (ret = ompi_errcode_intern_finalize())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* free error code resources */
|
||||
if (OMPI_SUCCESS != (ret = ompi_mpi_errcode_finalize())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* free errhandler resources */
|
||||
if (OMPI_SUCCESS != (ret = ompi_errhandler_finalize())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Free all other resources */
|
||||
|
||||
/* free op resources */
|
||||
if (OMPI_SUCCESS != (ret = ompi_op_finalize())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* free ddt resources */
|
||||
if (OMPI_SUCCESS != (ret = ompi_ddt_finalize())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* free info resources */
|
||||
if (OMPI_SUCCESS != (ret = ompi_info_finalize())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* free module exchange resources */
|
||||
if (OMPI_SUCCESS != (ret = ompi_modex_finalize())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Close down MCA modules */
|
||||
@ -293,16 +293,16 @@ int ompi_mpi_finalize(void)
|
||||
}
|
||||
}
|
||||
if (OMPI_SUCCESS != (ret = mca_topo_base_close())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
if (OMPI_SUCCESS != (ret = ompi_osc_base_close())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
if (OMPI_SUCCESS != (ret = mca_coll_base_close())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
if (OMPI_SUCCESS != (ret = mca_mpool_base_close())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
if (OMPI_SUCCESS != (ret = mca_rcache_base_close())) {
|
||||
return ret;
|
||||
@ -328,7 +328,7 @@ int ompi_mpi_finalize(void)
|
||||
/* Leave the RTE */
|
||||
|
||||
if (OMPI_SUCCESS != (ret = orte_finalize())) {
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* All done */
|
||||
|
Загрузка…
x
Ссылка в новой задаче
Block a user