From 0758d7570e2041345471ede9de2b63866350b4bb Mon Sep 17 00:00:00 2001 From: Edgar Gabriel Date: Sat, 29 Nov 2014 20:01:36 -0600 Subject: [PATCH] part 1 of the fix to get rid of the missing symbols that prevent the sub-modules to be loaded. --- .../dynamic/fcoll_dynamic_file_read_all.c | 112 ++++++++-------- .../dynamic/fcoll_dynamic_file_write_all.c | 122 +++++++++--------- .../fcoll_individual_file_read_all.c | 3 +- .../fcoll_individual_file_write_all.c | 3 +- .../fcoll/static/fcoll_static_file_read_all.c | 108 ++++++++-------- .../static/fcoll_static_file_write_all.c | 104 +++++++-------- .../two_phase/fcoll_two_phase_file_read_all.c | 38 +++--- .../fcoll_two_phase_file_write_all.c | 38 +++--- ompi/mca/io/ompio/io_ompio.c | 6 +- ompi/mca/io/ompio/io_ompio.h | 118 ++++++++++++++++- ompi/mca/io/ompio/io_ompio_file_open.c | 19 +++ 11 files changed, 401 insertions(+), 270 deletions(-) diff --git a/ompi/mca/fcoll/dynamic/fcoll_dynamic_file_read_all.c b/ompi/mca/fcoll/dynamic/fcoll_dynamic_file_read_all.c index 470f555b38..e4cabc64a9 100644 --- a/ompi/mca/fcoll/dynamic/fcoll_dynamic_file_read_all.c +++ b/ompi/mca/fcoll/dynamic/fcoll_dynamic_file_read_all.c @@ -112,13 +112,13 @@ ** In case the data is not contigous in memory, decode it into an iovec ** **************************************************************************/ if (! (fh->f_flags & OMPIO_CONTIGUOUS_MEMORY)) { - ret = ompi_io_ompio_decode_datatype (fh, - datatype, - count, - buf, - &max_data, - &decoded_iov, - &iov_count); + ret = fh->f_decode_datatype ((struct mca_io_ompio_file_t *)fh, + datatype, + count, + buf, + &max_data, + &decoded_iov, + &iov_count); if (OMPI_SUCCESS != ret){ goto exit; } @@ -131,10 +131,10 @@ status->_ucount = max_data; } - mca_io_ompio_get_num_aggregators ( &dynamic_num_io_procs); - ret = ompi_io_ompio_set_aggregator_props (fh, - dynamic_num_io_procs, - max_data); + fh->f_get_num_aggregators ( &dynamic_num_io_procs); + ret = fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *) fh, + dynamic_num_io_procs, + max_data); if (OMPI_SUCCESS != ret){ goto exit; } @@ -147,16 +147,16 @@ goto exit; } - ret = ompi_io_ompio_allgather_array (&max_data, - 1, - MPI_LONG, - total_bytes_per_process, - 1, - MPI_LONG, - fh->f_aggregator_index, - fh->f_procs_in_group, - fh->f_procs_per_group, - fh->f_comm); + ret = fh->f_allgather_array (&max_data, + 1, + MPI_LONG, + total_bytes_per_process, + 1, + MPI_LONG, + fh->f_aggregator_index, + fh->f_procs_in_group, + fh->f_procs_per_group, + fh->f_comm); if (OMPI_SUCCESS != ret){ goto exit; } @@ -173,13 +173,13 @@ /********************************************************************* *** Generate the File offsets/lengths corresponding to this write *** ********************************************************************/ - ret = ompi_io_ompio_generate_current_file_view (fh, - max_data, - &local_iov_array, - &local_count); - + ret = fh->f_generate_current_file_view ((struct mca_io_ompio_file_t *) fh, + max_data, + &local_iov_array, + &local_count); + if (ret != OMPI_SUCCESS){ - goto exit; + goto exit; } @@ -197,21 +197,21 @@ goto exit; } - ret = ompi_io_ompio_allgather_array (&local_count, - 1, - MPI_INT, - fview_count, - 1, - MPI_INT, - fh->f_aggregator_index, - fh->f_procs_in_group, - fh->f_procs_per_group, - fh->f_comm); - + ret = fh->f_allgather_array (&local_count, + 1, + MPI_INT, + fview_count, + 1, + MPI_INT, + fh->f_aggregator_index, + fh->f_procs_in_group, + fh->f_procs_per_group, + fh->f_comm); + if (OMPI_SUCCESS != ret){ - goto exit; + goto exit; } - + displs = (int*)malloc (fh->f_procs_per_group*sizeof(int)); if (NULL == displs) { opal_output (1, "OUT OF MEMORY\n"); @@ -249,17 +249,17 @@ } } - ret = ompi_io_ompio_allgatherv_array (local_iov_array, - local_count, - fh->f_iov_type, - global_iov_array, - fview_count, - displs, - fh->f_iov_type, - fh->f_aggregator_index, - fh->f_procs_in_group, - fh->f_procs_per_group, - fh->f_comm); + ret = fh->f_allgatherv_array (local_iov_array, + local_count, + fh->f_iov_type, + global_iov_array, + fview_count, + displs, + fh->f_iov_type, + fh->f_aggregator_index, + fh->f_procs_in_group, + fh->f_procs_per_group, + fh->f_comm); if (OMPI_SUCCESS != ret){ goto exit; @@ -273,7 +273,7 @@ ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } - ompi_io_ompio_sort_iovec (global_iov_array, total_fview_count, sorted); + fh->f_sort_iovec (global_iov_array, total_fview_count, sorted); } if (NULL != local_iov_array) { @@ -325,7 +325,7 @@ /* * Calculate how many bytes are read in each cycle */ - mca_io_ompio_get_bytes_per_agg ( (int *) &bytes_per_cycle); + fh->f_get_bytes_per_agg ( (int *) &bytes_per_cycle); cycles = ceil((double)total_bytes/bytes_per_cycle); n = 0; @@ -845,9 +845,9 @@ else nentry.aggregator = 0; nentry.nprocs_for_coll = dynamic_num_io_procs; - if (!ompi_io_ompio_full_print_queue(READ_PRINT_QUEUE)){ - ompi_io_ompio_register_print_entry(READ_PRINT_QUEUE, - nentry); + if (!fh->f_full_print_queue(READ_PRINT_QUEUE)){ + fh->f_register_print_entry(READ_PRINT_QUEUE, + nentry); } #endif diff --git a/ompi/mca/fcoll/dynamic/fcoll_dynamic_file_write_all.c b/ompi/mca/fcoll/dynamic/fcoll_dynamic_file_write_all.c index 1103b7910e..8c34fbb2a8 100644 --- a/ompi/mca/fcoll/dynamic/fcoll_dynamic_file_write_all.c +++ b/ompi/mca/fcoll/dynamic/fcoll_dynamic_file_write_all.c @@ -115,13 +115,13 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh, ** In case the data is not contigous in memory, decode it into an iovec ** **************************************************************************/ if (! (fh->f_flags & OMPIO_CONTIGUOUS_MEMORY)) { - ret = ompi_io_ompio_decode_datatype (fh, - datatype, - count, - buf, - &max_data, - &decoded_iov, - &iov_count); + ret = fh->f_decode_datatype ((struct mca_io_ompio_file_t *) fh, + datatype, + count, + buf, + &max_data, + &decoded_iov, + &iov_count); if (OMPI_SUCCESS != ret ){ goto exit; } @@ -134,11 +134,11 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh, status->_ucount = max_data; } - mca_io_ompio_get_num_aggregators ( &dynamic_num_io_procs ); - ret = ompi_io_ompio_set_aggregator_props (fh, - dynamic_num_io_procs, - max_data); - + fh->f_get_num_aggregators ( &dynamic_num_io_procs ); + ret = fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *) fh, + dynamic_num_io_procs, + max_data); + if (OMPI_SUCCESS != ret){ goto exit; } @@ -152,19 +152,19 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh, goto exit; } - ret = ompi_io_ompio_allgather_array (&max_data, - 1, - MPI_LONG, - total_bytes_per_process, - 1, - MPI_LONG, - fh->f_aggregator_index, - fh->f_procs_in_group, - fh->f_procs_per_group, - fh->f_comm); + ret = fh->f_allgather_array (&max_data, + 1, + MPI_LONG, + total_bytes_per_process, + 1, + MPI_LONG, + fh->f_aggregator_index, + fh->f_procs_in_group, + fh->f_procs_per_group, + fh->f_comm); if( OMPI_SUCCESS != ret){ - goto exit; + goto exit; } for (i=0 ; if_procs_per_group ; i++) { total_bytes += total_bytes_per_process[i]; @@ -178,12 +178,12 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh, /********************************************************************* *** Generate the File offsets/lengths corresponding to this write *** ********************************************************************/ - ret = ompi_io_ompio_generate_current_file_view(fh, - max_data, - &local_iov_array, - &local_count); + ret = fh->f_generate_current_file_view( (struct mca_io_ompio_file_t *) fh, + max_data, + &local_iov_array, + &local_count); if (ret != OMPI_SUCCESS){ - goto exit; + goto exit; } #if DEBUG_ON @@ -208,19 +208,19 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh, goto exit; } - ret = ompi_io_ompio_allgather_array (&local_count, - 1, - MPI_INT, - fview_count, - 1, - MPI_INT, - fh->f_aggregator_index, - fh->f_procs_in_group, - fh->f_procs_per_group, - fh->f_comm); + ret = fh->f_allgather_array (&local_count, + 1, + MPI_INT, + fview_count, + 1, + MPI_INT, + fh->f_aggregator_index, + fh->f_procs_in_group, + fh->f_procs_per_group, + fh->f_comm); if( OMPI_SUCCESS != ret){ - goto exit; + goto exit; } displs = (int*) malloc (fh->f_procs_per_group * sizeof (int)); @@ -263,19 +263,19 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh, } - ret = ompi_io_ompio_allgatherv_array (local_iov_array, - local_count, - fh->f_iov_type, - global_iov_array, - fview_count, - displs, - fh->f_iov_type, - fh->f_aggregator_index, - fh->f_procs_in_group, - fh->f_procs_per_group, - fh->f_comm); + ret = fh->f_allgatherv_array (local_iov_array, + local_count, + fh->f_iov_type, + global_iov_array, + fview_count, + displs, + fh->f_iov_type, + fh->f_aggregator_index, + fh->f_procs_in_group, + fh->f_procs_per_group, + fh->f_comm); if (OMPI_SUCCESS != ret){ - goto exit; + goto exit; } /* sort it */ @@ -286,19 +286,19 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh, ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } - ompi_io_ompio_sort_iovec (global_iov_array, total_fview_count, sorted); + fh->f_sort_iovec (global_iov_array, total_fview_count, sorted); } if (NULL != local_iov_array){ - free(local_iov_array); - local_iov_array = NULL; + free(local_iov_array); + local_iov_array = NULL; } if (NULL != displs){ - free(displs); - displs=NULL; + free(displs); + displs=NULL; } - + #if DEBUG_ON if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) { @@ -341,7 +341,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh, } - mca_io_ompio_get_bytes_per_agg ( (int *)&bytes_per_cycle ); + fh->f_get_bytes_per_agg ( (int *)&bytes_per_cycle ); cycles = ceil((double)total_bytes/bytes_per_cycle); @@ -983,9 +983,9 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh, else nentry.aggregator = 0; nentry.nprocs_for_coll = dynamic_num_io_procs; - if (!ompi_io_ompio_full_print_queue(WRITE_PRINT_QUEUE)){ - ompi_io_ompio_register_print_entry(WRITE_PRINT_QUEUE, - nentry); + if (!fh->f_full_print_queue(WRITE_PRINT_QUEUE)){ + fh->f_register_print_entry(WRITE_PRINT_QUEUE, + nentry); } #endif diff --git a/ompi/mca/fcoll/individual/fcoll_individual_file_read_all.c b/ompi/mca/fcoll/individual/fcoll_individual_file_read_all.c index 5a391d7887..2cfdbbe367 100644 --- a/ompi/mca/fcoll/individual/fcoll_individual_file_read_all.c +++ b/ompi/mca/fcoll/individual/fcoll_individual_file_read_all.c @@ -38,5 +38,6 @@ mca_fcoll_individual_file_read_all (mca_io_ompio_file_t *fh, struct ompi_datatype_t *datatype, ompi_status_public_t *status) { - return mca_io_ompio_file_read ( fh->f_fh, buf, count, datatype, status); + return fh->f_fh->f_io_selected_module.v2_0_0. + io_module_file_read( fh->f_fh, buf, count, datatype, status); } diff --git a/ompi/mca/fcoll/individual/fcoll_individual_file_write_all.c b/ompi/mca/fcoll/individual/fcoll_individual_file_write_all.c index 260c5c74a7..2697f9aebf 100644 --- a/ompi/mca/fcoll/individual/fcoll_individual_file_write_all.c +++ b/ompi/mca/fcoll/individual/fcoll_individual_file_write_all.c @@ -35,5 +35,6 @@ int mca_fcoll_individual_file_write_all (mca_io_ompio_file_t *fh, struct ompi_datatype_t *datatype, ompi_status_public_t *status) { - return mca_io_ompio_file_write (fh->f_fh, buf, count, datatype, status); + return fh->f_fh->f_io_selected_module.v2_0_0. + io_module_file_write (fh->f_fh, buf, count, datatype, status); } diff --git a/ompi/mca/fcoll/static/fcoll_static_file_read_all.c b/ompi/mca/fcoll/static/fcoll_static_file_read_all.c index 863ee603bf..5bbdac26d7 100644 --- a/ompi/mca/fcoll/static/fcoll_static_file_read_all.c +++ b/ompi/mca/fcoll/static/fcoll_static_file_read_all.c @@ -111,13 +111,13 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh, /* In case the data is not contigous in memory, decode it into an iovec */ if (! (fh->f_flags & OMPIO_CONTIGUOUS_MEMORY)) { - ompi_io_ompio_decode_datatype (fh, - datatype, - count, - buf, - &max_data, - &decoded_iov, - &iov_count); + fh->f_decode_datatype ( (struct mca_io_ompio_file_t *)fh, + datatype, + count, + buf, + &max_data, + &decoded_iov, + &iov_count); } else { max_data = count * datatype->super.size; @@ -128,18 +128,18 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh, } - mca_io_ompio_get_num_aggregators ( &static_num_io_procs ); - ompi_io_ompio_set_aggregator_props (fh, - static_num_io_procs, - max_data); + fh->f_get_num_aggregators ( &static_num_io_procs ); + fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *) fh, + static_num_io_procs, + max_data); /* printf("max_data %ld\n", max_data); */ - ret = ompi_io_ompio_generate_current_file_view(fh, - max_data, - &iov, - &iov_size); + ret = fh->f_generate_current_file_view((struct mca_io_ompio_file_t *)fh, + max_data, + &iov, + &iov_size); if (ret != OMPI_SUCCESS){ - goto exit; + goto exit; } if ( iov_size > 0 ) { @@ -195,7 +195,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh, /* #########################################################*/ - mca_io_ompio_get_bytes_per_agg ( (int*) &bytes_per_cycle); + fh->f_get_bytes_per_agg ( (int*) &bytes_per_cycle); local_cycles = ceil((double)max_data/bytes_per_cycle); ret = fh->f_comm->c_coll.coll_allreduce (&local_cycles, &cycles, @@ -276,16 +276,16 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh, } - ret = ompi_io_ompio_allgather_array (&iov_size, - 1, - MPI_INT, - iovec_count_per_process, - 1, - MPI_INT, - fh->f_aggregator_index, - fh->f_procs_in_group, - fh->f_procs_per_group, - fh->f_comm); + ret = fh->f_allgather_array (&iov_size, + 1, + MPI_INT, + iovec_count_per_process, + 1, + MPI_INT, + fh->f_aggregator_index, + fh->f_procs_in_group, + fh->f_procs_per_group, + fh->f_comm); if( OMPI_SUCCESS != ret){ goto exit; @@ -311,21 +311,21 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh, } } - ret = ompi_io_ompio_gatherv_array (local_iov_array, - iov_size, - io_array_type, - global_iov_array, - iovec_count_per_process, - displs, - io_array_type, - fh->f_aggregator_index, - fh->f_procs_in_group, - fh->f_procs_per_group, - fh->f_comm); + ret = fh->f_gatherv_array (local_iov_array, + iov_size, + io_array_type, + global_iov_array, + iovec_count_per_process, + displs, + io_array_type, + fh->f_aggregator_index, + fh->f_procs_in_group, + fh->f_procs_per_group, + fh->f_comm); if (OMPI_SUCCESS != ret){ - fprintf(stderr,"global_iov_array gather error!\n"); - goto exit; + fprintf(stderr,"global_iov_array gather error!\n"); + goto exit; } @@ -422,17 +422,17 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh, else { bytes_to_read_in_cycle = 0; } - ompi_io_ompio_gather_array (&bytes_to_read_in_cycle, - 1, - MPI_INT, - bytes_per_process, - 1, - MPI_INT, - fh->f_aggregator_index, - fh->f_procs_in_group, - fh->f_procs_per_group, - fh->f_comm); - + fh->f_gather_array (&bytes_to_read_in_cycle, + 1, + MPI_INT, + bytes_per_process, + 1, + MPI_INT, + fh->f_aggregator_index, + fh->f_procs_in_group, + fh->f_procs_per_group, + fh->f_comm); + if (fh->f_flags & OMPIO_CONTIGUOUS_MEMORY) { receive_buf = &((char*)buf)[position]; } @@ -898,9 +898,9 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh, else nentry.aggregator = 0; nentry.nprocs_for_coll = static_num_io_procs; - if (!ompi_io_ompio_full_print_queue(READ_PRINT_QUEUE)){ - ompi_io_ompio_register_print_entry(READ_PRINT_QUEUE, - nentry); + if (!fh->f_full_print_queue(READ_PRINT_QUEUE)){ + fh->f_register_print_entry(READ_PRINT_QUEUE, + nentry); } #endif diff --git a/ompi/mca/fcoll/static/fcoll_static_file_write_all.c b/ompi/mca/fcoll/static/fcoll_static_file_write_all.c index 26d8f61fe4..f8f839a258 100644 --- a/ompi/mca/fcoll/static/fcoll_static_file_write_all.c +++ b/ompi/mca/fcoll/static/fcoll_static_file_write_all.c @@ -109,13 +109,13 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh, /* In case the data is not contigous in memory, decode it into an iovec */ if (! (fh->f_flags & OMPIO_CONTIGUOUS_MEMORY)) { - ompi_io_ompio_decode_datatype (fh, - datatype, - count, - buf, - &max_data, - &decoded_iov, - &iov_count); + fh->f_decode_datatype ((struct mca_io_ompio_file_t *)fh, + datatype, + count, + buf, + &max_data, + &decoded_iov, + &iov_count); } else { max_data = count * datatype->super.size; @@ -125,10 +125,10 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh, status->_ucount = max_data; } - mca_io_ompio_get_num_aggregators ( & static_num_io_procs ); - ompi_io_ompio_set_aggregator_props (fh, - static_num_io_procs, - max_data); + fh->f_get_num_aggregators ( & static_num_io_procs ); + fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *)fh, + static_num_io_procs, + max_data); /* io_array datatype for using in communication*/ @@ -153,10 +153,10 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh, - ret = ompi_io_ompio_generate_current_file_view(fh, - max_data, - &iov, - &iov_size); + ret = fh->f_generate_current_file_view((struct mca_io_ompio_file_t *)fh, + max_data, + &iov, + &iov_size); if (ret != OMPI_SUCCESS){ fprintf(stderr,"Current File View Generation Error\n"); goto exit; @@ -182,7 +182,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh, } - mca_io_ompio_get_bytes_per_agg ( (int *) &bytes_per_cycle); + fh->f_get_bytes_per_agg ( (int *) &bytes_per_cycle); local_cycles = ceil((double)max_data/bytes_per_cycle); @@ -267,16 +267,16 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh, goto exit; } - ret = ompi_io_ompio_allgather_array (&iov_size, - 1, - MPI_INT, - iovec_count_per_process, - 1, - MPI_INT, - fh->f_aggregator_index, - fh->f_procs_in_group, - fh->f_procs_per_group, - fh->f_comm); + ret = fh->f_allgather_array (&iov_size, + 1, + MPI_INT, + iovec_count_per_process, + 1, + MPI_INT, + fh->f_aggregator_index, + fh->f_procs_in_group, + fh->f_procs_per_group, + fh->f_comm); if( OMPI_SUCCESS != ret){ fprintf(stderr,"iov size allgatherv array!\n"); @@ -303,20 +303,20 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh, } } - ret = ompi_io_ompio_gatherv_array (local_iov_array, - iov_size, - io_array_type, - global_iov_array, - iovec_count_per_process, - displs, - io_array_type, - fh->f_aggregator_index, - fh->f_procs_in_group, - fh->f_procs_per_group, - fh->f_comm); + ret = fh->f_gatherv_array (local_iov_array, + iov_size, + io_array_type, + global_iov_array, + iovec_count_per_process, + displs, + io_array_type, + fh->f_aggregator_index, + fh->f_procs_in_group, + fh->f_procs_per_group, + fh->f_comm); if (OMPI_SUCCESS != ret){ - fprintf(stderr,"global_iov_array gather error!\n"); - goto exit; + fprintf(stderr,"global_iov_array gather error!\n"); + goto exit; } if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) { @@ -427,16 +427,16 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh, *********************************************************/ /* gather from each process how many bytes each will be sending */ - ompi_io_ompio_gather_array (&bytes_to_write_in_cycle, - 1, - MPI_INT, - bytes_per_process, - 1, - MPI_INT, - fh->f_aggregator_index, - fh->f_procs_in_group, - fh->f_procs_per_group, - fh->f_comm); + fh->f_gather_array (&bytes_to_write_in_cycle, + 1, + MPI_INT, + bytes_per_process, + 1, + MPI_INT, + fh->f_aggregator_index, + fh->f_procs_in_group, + fh->f_procs_per_group, + fh->f_comm); /* For each aggregator @@ -901,9 +901,9 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh, else nentry.aggregator = 0; nentry.nprocs_for_coll = static_num_io_procs; - if (!ompi_io_ompio_full_print_queue(WRITE_PRINT_QUEUE)){ - ompi_io_ompio_register_print_entry(WRITE_PRINT_QUEUE, - nentry); + if (!fh->f_full_print_queue(WRITE_PRINT_QUEUE)){ + fh->f_register_print_entry(WRITE_PRINT_QUEUE, + nentry); } #endif diff --git a/ompi/mca/fcoll/two_phase/fcoll_two_phase_file_read_all.c b/ompi/mca/fcoll/two_phase/fcoll_two_phase_file_read_all.c index d163170b20..50ec126824 100644 --- a/ompi/mca/fcoll/two_phase/fcoll_two_phase_file_read_all.c +++ b/ompi/mca/fcoll/two_phase/fcoll_two_phase_file_read_all.c @@ -143,13 +143,13 @@ mca_fcoll_two_phase_file_read_all (mca_io_ompio_file_t *fh, } if (! (fh->f_flags & OMPIO_CONTIGUOUS_MEMORY)) { - ret = ompi_io_ompio_decode_datatype (fh, - datatype, - count, - buf, - &max_data, - &temp_iov, - &iov_count); + ret = fh->f_decode_datatype ((struct mca_io_ompio_file_t *)fh, + datatype, + count, + buf, + &max_data, + &temp_iov, + &iov_count); if (OMPI_SUCCESS != ret ){ goto exit; } @@ -179,11 +179,11 @@ mca_fcoll_two_phase_file_read_all (mca_io_ompio_file_t *fh, status->_ucount = max_data; } - mca_io_ompio_get_num_aggregators (&two_phase_num_io_procs); + fh->f_get_num_aggregators (&two_phase_num_io_procs); if (-1 == two_phase_num_io_procs ){ - ret = ompi_io_ompio_set_aggregator_props (fh, - two_phase_num_io_procs, - max_data); + ret = fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *)fh, + two_phase_num_io_procs, + max_data); if (OMPI_SUCCESS != ret){ return ret; } @@ -207,10 +207,10 @@ mca_fcoll_two_phase_file_read_all (mca_io_ompio_file_t *fh, aggregator_list[i] = i * fh->f_size / two_phase_num_io_procs; } - ret = ompi_io_ompio_generate_current_file_view (fh, - max_data, - &iov, - &local_count); + ret = fh->f_generate_current_file_view ((struct mca_io_ompio_file_t *)fh, + max_data, + &iov, + &local_count); if (OMPI_SUCCESS != ret){ goto exit; @@ -481,9 +481,9 @@ mca_fcoll_two_phase_file_read_all (mca_io_ompio_file_t *fh, nentry.nprocs_for_coll = two_phase_num_io_procs; - if (!ompi_io_ompio_full_print_queue(READ_PRINT_QUEUE)){ - ompi_io_ompio_register_print_entry(READ_PRINT_QUEUE, - nentry); + if (!fh->f_full_print_queue(READ_PRINT_QUEUE)){ + fh->f_register_print_entry(READ_PRINT_QUEUE, + nentry); } #endif @@ -569,7 +569,7 @@ static int two_phase_read_and_exch(mca_io_ompio_file_t *fh, } } - mca_io_ompio_get_bytes_per_agg ( &two_phase_cycle_buffer_size); + fh->f_get_bytes_per_agg ( &two_phase_cycle_buffer_size); ntimes = (int)((end_loc - st_loc + two_phase_cycle_buffer_size)/ two_phase_cycle_buffer_size); diff --git a/ompi/mca/fcoll/two_phase/fcoll_two_phase_file_write_all.c b/ompi/mca/fcoll/two_phase/fcoll_two_phase_file_write_all.c index c218ff86f6..aea5ff2798 100644 --- a/ompi/mca/fcoll/two_phase/fcoll_two_phase_file_write_all.c +++ b/ompi/mca/fcoll/two_phase/fcoll_two_phase_file_write_all.c @@ -174,13 +174,13 @@ mca_fcoll_two_phase_file_write_all (mca_io_ompio_file_t *fh, if (! (fh->f_flags & OMPIO_CONTIGUOUS_MEMORY)) { - ret = ompi_io_ompio_decode_datatype (fh, - datatype, - count, - buf, - &max_data, - &temp_iov, - &iov_count); + ret = fh->f_decode_datatype ((struct mca_io_ompio_file_t *)fh, + datatype, + count, + buf, + &max_data, + &temp_iov, + &iov_count); if (OMPI_SUCCESS != ret ){ goto exit; } @@ -212,11 +212,11 @@ mca_fcoll_two_phase_file_write_all (mca_io_ompio_file_t *fh, status->_ucount = max_data; } - mca_io_ompio_get_num_aggregators ( &two_phase_num_io_procs ); + fh->f_get_num_aggregators ( &two_phase_num_io_procs ); if(-1 == two_phase_num_io_procs){ - ret = ompi_io_ompio_set_aggregator_props (fh, - two_phase_num_io_procs, - max_data); + ret = fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *)fh, + two_phase_num_io_procs, + max_data); if ( OMPI_SUCCESS != ret){ return ret; } @@ -244,10 +244,10 @@ mca_fcoll_two_phase_file_write_all (mca_io_ompio_file_t *fh, } - ret = ompi_io_ompio_generate_current_file_view (fh, - max_data, - &iov, - &local_count); + ret = fh->f_generate_current_file_view ((struct mca_io_ompio_file_t*)fh, + max_data, + &iov, + &local_count); if ( OMPI_SUCCESS != ret ){ @@ -529,9 +529,9 @@ mca_fcoll_two_phase_file_write_all (mca_io_ompio_file_t *fh, nentry.aggregator = 0; } nentry.nprocs_for_coll = two_phase_num_io_procs; - if (!ompi_io_ompio_full_print_queue(WRITE_PRINT_QUEUE)){ - ompi_io_ompio_register_print_entry(WRITE_PRINT_QUEUE, - nentry); + if (!fh->f_full_print_queue(WRITE_PRINT_QUEUE)){ + fh->f_ompio_register_print_entry(WRITE_PRINT_QUEUE, + nentry); } #endif @@ -622,7 +622,7 @@ static int two_phase_exch_and_write(mca_io_ompio_file_t *fh, } } - mca_io_ompio_get_bytes_per_agg ( &two_phase_cycle_buffer_size ); + fh->f_get_bytes_per_agg ( &two_phase_cycle_buffer_size ); ntimes = (int) ((end_loc - st_loc + two_phase_cycle_buffer_size)/two_phase_cycle_buffer_size); if ((st_loc == -1) && (end_loc == -1)) { diff --git a/ompi/mca/io/ompio/io_ompio.c b/ompi/mca/io/ompio/io_ompio.c index a4f9bc5673..8db5f70b26 100644 --- a/ompi/mca/io/ompio/io_ompio.c +++ b/ompi/mca/io/ompio/io_ompio.c @@ -132,7 +132,7 @@ int ompi_io_ompio_set_file_defaults (mca_io_ompio_file_t *fh) } } -int ompi_io_ompio_generate_current_file_view (mca_io_ompio_file_t *fh, +int ompi_io_ompio_generate_current_file_view (struct mca_io_ompio_file_t *fh, size_t max_data, struct iovec **f_iov, int *iov_count) @@ -470,7 +470,7 @@ int ompi_io_ompio_set_explicit_offset (mca_io_ompio_file_t *fh, return OMPI_SUCCESS; } -int ompi_io_ompio_decode_datatype (mca_io_ompio_file_t *fh, +int ompi_io_ompio_decode_datatype (struct mca_io_ompio_file_t *fh, ompi_datatype_t *datatype, int count, void *buf, @@ -903,7 +903,7 @@ int ompi_io_ompio_sort_offlen (mca_io_ompio_offlen_array_t *io_array, return OMPI_SUCCESS; } -int ompi_io_ompio_set_aggregator_props (mca_io_ompio_file_t *fh, +int ompi_io_ompio_set_aggregator_props (struct mca_io_ompio_file_t *fh, int num_aggregators, size_t bytes_per_proc) { diff --git a/ompi/mca/io/ompio/io_ompio.h b/ompi/mca/io/ompio/io_ompio.h index 8bf4b83204..97251ea444 100644 --- a/ompi/mca/io/ompio/io_ompio.h +++ b/ompi/mca/io/ompio/io_ompio.h @@ -174,6 +174,99 @@ typedef struct{ int procs_per_contg_group; } contg; + + +/* + * Function that takes in a datatype and buffer, and decodes that datatype + * into an iovec using the convertor_raw function + */ + +/* forward declaration to keep the compiler happy. */ +struct mca_io_ompio_file_t; +typedef int (*mca_io_ompio_decode_datatype_fn_t) (struct mca_io_ompio_file_t *fh, + struct ompi_datatype_t *datatype, + int count, + void *buf, + size_t *max_data, + struct iovec **iov, + uint32_t *iov_count); +typedef int (*mca_io_ompio_generate_current_file_view_fn_t) (struct mca_io_ompio_file_t *fh, + size_t max_data, + struct iovec **f_iov, + int *iov_count); + +/* + * Function that sorts an io_array according to the offset by filling + * up an array of the indices into the array (HEAP SORT) + */ +typedef int (*mca_io_ompio_sort_fn_t) (mca_io_ompio_io_array_t *io_array, + int num_entries, + int *sorted); + +typedef int (*mca_io_ompio_sort_iovec_fn_t) (struct iovec *iov, + int num_entries, + int *sorted); + +/* collective operations based on list of participating ranks instead of communicators*/ +typedef int (*mca_io_ompio_allgather_array_fn_t) (void *sbuf, + int scount, + ompi_datatype_t *sdtype, + void *rbuf, + int rcount, + ompi_datatype_t *rdtype, + int root_index, + int *procs_in_group, + int procs_per_group, + ompi_communicator_t *comm); + +typedef int (*mca_io_ompio_allgatherv_array_fn_t) (void *sbuf, + int scount, + ompi_datatype_t *sdtype, + void *rbuf, + int *rcounts, + int *disps, + ompi_datatype_t *rdtype, + int root_index, + int *procs_in_group, + int procs_per_group, + ompi_communicator_t *comm); + +typedef int (*mca_io_ompio_gather_array_fn_t) (void *sbuf, + int scount, + ompi_datatype_t *sdtype, + void *rbuf, + int rcount, + ompi_datatype_t *rdtype, + int root_index, + int *procs_in_group, + int procs_per_group, + ompi_communicator_t *comm); +typedef int (*mca_io_ompio_gatherv_array_fn_t) (void *sbuf, + int scount, + ompi_datatype_t *sdtype, + void *rbuf, + int *rcounts, + int *disps, + ompi_datatype_t *rdtype, + int root_index, + int *procs_in_group, + int procs_per_group, + ompi_communicator_t *comm); + +/* functions to retrieve the number of aggregators and the size of the + temporary buffer on aggregators from the fcoll modules */ +typedef void (*mca_io_ompio_get_num_aggregators_fn_t) ( int *num_aggregators); +typedef void (*mca_io_ompio_get_bytes_per_agg_fn_t) ( int *bytes_per_agg); +typedef int (*mca_io_ompio_set_aggregator_props_fn_t) (struct mca_io_ompio_file_t *fh, + int num_aggregators, + size_t bytes_per_proc); + + +typedef int (*mca_io_ompio_full_print_queue_fn_t) (int queue_type); +typedef int (*mca_io_ompio_register_print_entry_fn_t) (int queue_type, + print_entry x); + + /** * Back-end structure for MPI_File */ @@ -251,7 +344,24 @@ struct mca_io_ompio_file_t { int f_init_procs_per_group; int *f_init_procs_in_group; - + + mca_io_ompio_decode_datatype_fn_t f_decode_datatype; + mca_io_ompio_generate_current_file_view_fn_t f_generate_current_file_view; + + mca_io_ompio_sort_fn_t f_sort; + mca_io_ompio_sort_iovec_fn_t f_sort_iovec; + + mca_io_ompio_allgather_array_fn_t f_allgather_array; + mca_io_ompio_allgatherv_array_fn_t f_allgatherv_array; + mca_io_ompio_gather_array_fn_t f_gather_array; + mca_io_ompio_gatherv_array_fn_t f_gatherv_array; + + mca_io_ompio_get_num_aggregators_fn_t f_get_num_aggregators; + mca_io_ompio_get_bytes_per_agg_fn_t f_get_bytes_per_agg; + mca_io_ompio_set_aggregator_props_fn_t f_set_aggregator_props; + + mca_io_ompio_full_print_queue_fn_t f_full_print_queue; + mca_io_ompio_register_print_entry_fn_t f_register_print_entry; }; typedef struct mca_io_ompio_file_t mca_io_ompio_file_t; @@ -374,7 +484,7 @@ OMPI_DECLSPEC int ompio_io_ompio_file_get_position (mca_io_ompio_file_t *fh, * Function that takes in a datatype and buffer, and decodes that datatype * into an iovec using the convertor_raw function */ -OMPI_DECLSPEC int ompi_io_ompio_decode_datatype (mca_io_ompio_file_t *fh, +OMPI_DECLSPEC int ompi_io_ompio_decode_datatype (struct mca_io_ompio_file_t *fh, struct ompi_datatype_t *datatype, int count, void *buf, @@ -403,7 +513,7 @@ OMPI_DECLSPEC int ompi_io_ompio_sort_offlen (mca_io_ompio_offlen_array_t *io_arr OMPI_DECLSPEC int ompi_io_ompio_set_explicit_offset (mca_io_ompio_file_t *fh, OMPI_MPI_OFFSET_TYPE offset); -OMPI_DECLSPEC int ompi_io_ompio_generate_current_file_view (mca_io_ompio_file_t *fh, +OMPI_DECLSPEC int ompi_io_ompio_generate_current_file_view (struct mca_io_ompio_file_t *fh, size_t max_data, struct iovec **f_iov, int *iov_count); @@ -415,7 +525,7 @@ OMPI_DECLSPEC int ompi_io_ompio_generate_groups (mca_io_ompio_file_t *fh, int **ranks); /*Aggregator selection methods*/ -OMPI_DECLSPEC int ompi_io_ompio_set_aggregator_props (mca_io_ompio_file_t *fh, +OMPI_DECLSPEC int ompi_io_ompio_set_aggregator_props (struct mca_io_ompio_file_t *fh, int num_aggregators, size_t bytes_per_proc); diff --git a/ompi/mca/io/ompio/io_ompio_file_open.c b/ompi/mca/io/ompio/io_ompio_file_open.c index 3295b066de..a2c131f0e3 100644 --- a/ompi/mca/io/ompio/io_ompio_file_open.c +++ b/ompi/mca/io/ompio/io_ompio_file_open.c @@ -121,6 +121,25 @@ ompio_io_ompio_file_open (ompi_communicator_t *comm, ompi_io_ompio_initialize_print_queue(coll_write_time); ompi_io_ompio_initialize_print_queue(coll_read_time); + /* set some function pointers required for fcoll, fbtls and sharedfp modules*/ + ompio_fh->f_decode_datatype=ompi_io_ompio_decode_datatype; + ompio_fh->f_generate_current_file_view=ompi_io_ompio_generate_current_file_view; + + ompio_fh->f_sort=ompi_io_ompio_sort; + ompio_fh->f_sort_iovec=ompi_io_ompio_sort_iovec; + + ompio_fh->f_allgather_array=ompi_io_ompio_allgather_array; + ompio_fh->f_allgatherv_array=ompi_io_ompio_allgatherv_array; + ompio_fh->f_gather_array=ompi_io_ompio_gather_array; + ompio_fh->f_gatherv_array=ompi_io_ompio_gatherv_array; + + ompio_fh->f_get_num_aggregators=mca_io_ompio_get_num_aggregators; + ompio_fh->f_get_bytes_per_agg=mca_io_ompio_get_bytes_per_agg; + ompio_fh->f_set_aggregator_props=ompi_io_ompio_set_aggregator_props; + + ompio_fh->f_full_print_queue=ompi_io_ompio_full_print_queue; + ompio_fh->f_register_print_entry=ompi_io_ompio_register_print_entry; + /* if (MPI_INFO_NULL != info) { ret = ompi_info_dup (info, &ompio_fh->f_info);