1
1

Due to MPI_Comm_idup we can no longer use the communicator's CID as

the fortran handle. Use a seperate opal_pointer_array to keep track of
the fortran handles of communicators.

This commit also fixes a bug in ompi_comm_idup where the newcomm was not
set until after the operation completed.

cmr=v1.7.4:reviewer=jsquyres:ticket=trac:3796

This commit was SVN r29342.

The following Trac tickets were found above:
  Ticket 3796 --> https://svn.open-mpi.org/trac/ompi/ticket/3796
Этот коммит содержится в:
Nathan Hjelm 2013-10-03 01:11:28 +00:00
родитель 2121e9c01b
Коммит c17b21b11d
7 изменённых файлов: 35 добавлений и 23 удалений

Просмотреть файл

@ -1005,6 +1005,9 @@ int ompi_comm_idup_with_info (ompi_communicator_t *comm, ompi_info_t *info, ompi
ompi_comm_request_schedule_append (request, ompi_comm_idup_getcid, &subreq, subreq ? 1 : 0);
/* assign the newcomm now */
*newcomm = context->newcomp;
/* kick off the request */
ompi_comm_request_start (request);
*req = &request->super;
@ -1071,11 +1074,6 @@ static int ompi_comm_idup_with_info_activate (ompi_comm_request_t *request)
static int ompi_comm_idup_with_info_finish (ompi_comm_request_t *request)
{
struct ompi_comm_idup_with_info_context *context =
(struct ompi_comm_idup_with_info_context *) request->context;
*context->newcomm = context->newcomp;
/* done */
return MPI_SUCCESS;
}

Просмотреть файл

@ -302,7 +302,6 @@ int ompi_comm_nextcid ( ompi_communicator_t* newcomm,
/* set the according values to the newcomm */
newcomm->c_contextid = nextcid;
newcomm->c_f_to_c_index = newcomm->c_contextid;
opal_pointer_array_set_item (&ompi_mpi_communicators, nextcid, newcomm);
release_and_return:
@ -468,7 +467,6 @@ static int ompi_comm_nextcid_check_flag (ompi_comm_request_t *request)
if (1 == context->rflag) {
/* set the according values to the newcomm */
context->newcomm->c_contextid = context->nextcid;
context->newcomm->c_f_to_c_index = context->newcomm->c_contextid;
opal_pointer_array_set_item (&ompi_mpi_communicators, context->nextcid, context->newcomm);
ompi_comm_unregister_cid (context->comm->c_contextid);

Просмотреть файл

@ -46,6 +46,7 @@
**
*/
opal_pointer_array_t ompi_mpi_communicators;
opal_pointer_array_t ompi_comm_f_to_c_table;
ompi_predefined_communicator_t ompi_mpi_comm_world;
ompi_predefined_communicator_t ompi_mpi_comm_self;
@ -86,8 +87,16 @@ int ompi_comm_init(void)
return OMPI_ERROR;
}
/* Setup f to c table (we can no longer use the cid as the fortran handle) */
OBJ_CONSTRUCT(&ompi_comm_f_to_c_table, opal_pointer_array_t);
if( OPAL_SUCCESS != opal_pointer_array_init(&ompi_comm_f_to_c_table, 0,
OMPI_FORTRAN_HANDLE_MAX, 64) ) {
return OMPI_ERROR;
}
/* Setup MPI_COMM_WORLD */
OBJ_CONSTRUCT(&ompi_mpi_comm_world, ompi_communicator_t);
assert(ompi_mpi_comm_world.comm.c_f_to_c_index == 0);
group = OBJ_NEW(ompi_group_t);
group->grp_proc_pointers = ompi_proc_world(&size);
group->grp_proc_count = (int)size;
@ -99,7 +108,6 @@ int ompi_comm_init(void)
ompi_mpi_comm_world.comm.c_contextid = 0;
ompi_mpi_comm_world.comm.c_id_start_index = 4;
ompi_mpi_comm_world.comm.c_id_available = 4;
ompi_mpi_comm_world.comm.c_f_to_c_index = 0;
ompi_mpi_comm_world.comm.c_my_rank = group->grp_my_rank;
ompi_mpi_comm_world.comm.c_local_group = group;
ompi_mpi_comm_world.comm.c_remote_group = group;
@ -124,15 +132,15 @@ int ompi_comm_init(void)
/* Setup MPI_COMM_SELF */
OBJ_CONSTRUCT(&ompi_mpi_comm_self, ompi_communicator_t);
assert(ompi_mpi_comm_self.comm.c_f_to_c_index == 1);
group = OBJ_NEW(ompi_group_t);
group->grp_proc_pointers = ompi_proc_self(&size);
group->grp_my_rank = 0;
group->grp_proc_count = (int)size;
OMPI_GROUP_SET_INTRINSIC (group);
OMPI_GROUP_SET_DENSE (group);
ompi_mpi_comm_self.comm.c_contextid = 1;
ompi_mpi_comm_self.comm.c_f_to_c_index = 1;
ompi_mpi_comm_self.comm.c_id_start_index = 20;
ompi_mpi_comm_self.comm.c_id_available = 20;
ompi_mpi_comm_self.comm.c_my_rank = group->grp_my_rank;
@ -156,13 +164,13 @@ int ompi_comm_init(void)
/* Setup MPI_COMM_NULL */
OBJ_CONSTRUCT(&ompi_mpi_comm_null, ompi_communicator_t);
assert(ompi_mpi_comm_null.comm.c_f_to_c_index == 2);
ompi_mpi_comm_null.comm.c_local_group = &ompi_mpi_group_null.group;
ompi_mpi_comm_null.comm.c_remote_group = &ompi_mpi_group_null.group;
OBJ_RETAIN(&ompi_mpi_group_null.group);
OBJ_RETAIN(&ompi_mpi_group_null.group);
ompi_mpi_comm_null.comm.c_contextid = 2;
ompi_mpi_comm_null.comm.c_f_to_c_index = 2;
ompi_mpi_comm_null.comm.c_my_rank = MPI_PROC_NULL;
ompi_mpi_comm_null.comm.error_handler = &ompi_mpi_errors_are_fatal.eh;
@ -184,6 +192,9 @@ int ompi_comm_init(void)
allocation */
ompi_comm_reg_init();
/* initialize communicator requests (for ompi_comm_idup) */
ompi_comm_request_init ();
return OMPI_SUCCESS;
}
@ -296,6 +307,9 @@ int ompi_comm_finalize(void)
/* finalize the comm_reg stuff */
ompi_comm_reg_finalize();
/* finalize communicator requests */
ompi_comm_request_fini ();
return OMPI_SUCCESS;
}
@ -314,7 +328,7 @@ int ompi_comm_link_function(void)
static void ompi_comm_construct(ompi_communicator_t* comm)
{
comm->c_f_to_c_index = MPI_UNDEFINED;
comm->c_f_to_c_index = opal_pointer_array_add(&ompi_comm_f_to_c_table, comm);
comm->c_name[0] = '\0';
comm->c_contextid = MPI_UNDEFINED;
comm->c_id_available = MPI_UNDEFINED;
@ -406,12 +420,19 @@ static void ompi_comm_destruct(ompi_communicator_t* comm)
comm->error_handler = NULL;
}
/* mark this cid as available */
if ( MPI_UNDEFINED != comm->c_contextid &&
NULL != opal_pointer_array_get_item(&ompi_mpi_communicators,
comm->c_contextid)) {
opal_pointer_array_set_item ( &ompi_mpi_communicators,
comm->c_contextid, NULL);
}
/* reset the ompi_comm_f_to_c_table entry */
if ( MPI_UNDEFINED != comm->c_f_to_c_index &&
NULL != opal_pointer_array_get_item(&ompi_mpi_communicators,
comm->c_f_to_c_index )) {
NULL != opal_pointer_array_get_item(&ompi_comm_f_to_c_table,
comm->c_f_to_c_index)) {
opal_pointer_array_set_item ( &ompi_mpi_communicators,
comm->c_f_to_c_index, NULL);
}
}

Просмотреть файл

@ -106,6 +106,7 @@ OMPI_DECLSPEC OBJ_CLASS_DECLARATION(ompi_communicator_t);
OMPI_DECLSPEC extern opal_pointer_array_t ompi_mpi_communicators;
OMPI_DECLSPEC extern opal_pointer_array_t ompi_comm_f_to_c_table;
struct ompi_communicator_t {
opal_object_t c_base;

Просмотреть файл

@ -52,9 +52,9 @@ MPI_Comm MPI_Comm_f2c(MPI_Fint comm)
return an invalid C handle. */
if ( 0 > o_index ||
o_index >= opal_pointer_array_get_size(&ompi_mpi_communicators)) {
o_index >= opal_pointer_array_get_size(&ompi_comm_f_to_c_table)) {
return NULL;
}
return (MPI_Comm)opal_pointer_array_get_item(&ompi_mpi_communicators, o_index);
return (MPI_Comm)opal_pointer_array_get_item(&ompi_comm_f_to_c_table, o_index);
}

Просмотреть файл

@ -283,9 +283,6 @@ int ompi_mpi_finalize(void)
return ret;
}
/* release resources held by comm requests */
ompi_comm_request_fini ();
if (OMPI_SUCCESS != (ret = ompi_message_finalize())) {
return ret;
}

Просмотреть файл

@ -871,9 +871,6 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
goto error;
}
/* Prepare communicator requests */
ompi_comm_request_init ();
/* Init coll for the comms. This has to be after dpm_base_select,
(since dpm.mark_dyncomm is not set in the communicator creation
function else), but before dpm.dyncom_init, since this function