1
1

Merge pull request #5306 from edgargabriel/pr/minor-improvements

Pr/minor improvements
Этот коммит содержится в:
Edgar Gabriel 2018-06-20 08:43:41 -05:00 коммит произвёл GitHub
родитель 4bd745940e 0757cb11a8
Коммит 7bbeaf30ff
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
16 изменённых файлов: 232 добавлений и 170 удалений

Просмотреть файл

@ -45,6 +45,12 @@
#define OMPIO_MAX(a, b) (((a) < (b)) ? (b) : (a))
#define OMPIO_MCA_GET(fh, name) ((fh)->f_get_mca_parameter_value(#name, strlen(#name)+1))
#define OMPIO_MCA_PRINT_INFO(_fh,_infostr,_infoval, _msg ) { \
int _verbose = _fh->f_get_mca_parameter_value("verbose_info_parsing", strlen("verbose_info_parsing")); \
if ( 1==_verbose && 0==_fh->f_rank ) printf("File: %s info: %s value %s %s\n", _fh->f_filename, _infostr, _infoval, _msg); \
if ( 2==_verbose ) printf("File: %s info: %s value %s %s\n", _fh->f_filename, _infostr, _infoval, _msg); \
}
/*
* Flags

Просмотреть файл

@ -97,8 +97,8 @@ int mca_common_ompio_file_open (ompi_communicator_t *comm,
ompio_fh->f_generate_current_file_view=generate_current_file_view_fn;
ompio_fh->f_get_mca_parameter_value=get_mca_parameter_value_fn;
mca_common_ompio_set_file_defaults (ompio_fh);
ompio_fh->f_filename = filename;
mca_common_ompio_set_file_defaults (ompio_fh);
ompio_fh->f_split_coll_req = NULL;
ompio_fh->f_split_coll_in_use = false;
@ -400,70 +400,79 @@ int mca_common_ompio_set_file_defaults (ompio_file_t *fh)
{
if (NULL != fh) {
ompi_datatype_t *types[2];
int blocklen[2] = {1, 1};
ptrdiff_t d[2], base;
int i;
char char_stripe[MPI_MAX_INFO_VAL];
ompi_datatype_t *types[2];
int blocklen[2] = {1, 1};
ptrdiff_t d[2], base;
int i, flag;
fh->f_io_array = NULL;
fh->f_perm = OMPIO_PERM_NULL;
fh->f_flags = 0;
fh->f_bytes_per_agg = OMPIO_MCA_GET(fh, bytes_per_agg);
opal_info_get (fh->f_info, "cb_buffer_size", MPI_MAX_INFO_VAL, char_stripe, &flag);
if ( flag ) {
/* Info object trumps mca parameter value */
sscanf ( char_stripe, "%d", &fh->f_bytes_per_agg );
OMPIO_MCA_PRINT_INFO(fh, "cb_buffer_size", char_stripe, "");
}
fh->f_io_array = NULL;
fh->f_perm = OMPIO_PERM_NULL;
fh->f_flags = 0;
fh->f_bytes_per_agg = OMPIO_MCA_GET(fh, bytes_per_agg);
fh->f_atomicity = 0;
fh->f_fs_block_size = 4096;
fh->f_offset = 0;
fh->f_disp = 0;
fh->f_position_in_file_view = 0;
fh->f_index_in_file_view = 0;
fh->f_total_bytes = 0;
fh->f_init_procs_per_group = -1;
fh->f_init_procs_in_group = NULL;
fh->f_procs_per_group = -1;
fh->f_procs_in_group = NULL;
fh->f_init_num_aggrs = -1;
fh->f_init_aggr_list = NULL;
fh->f_num_aggrs = -1;
fh->f_aggr_list = NULL;
/* Default file View */
fh->f_iov_type = MPI_DATATYPE_NULL;
fh->f_stripe_size = 0;
/*Decoded iovec of the file-view*/
fh->f_decoded_iov = NULL;
fh->f_etype = MPI_DATATYPE_NULL;
fh->f_filetype = MPI_DATATYPE_NULL;
fh->f_orig_filetype = MPI_DATATYPE_NULL;
fh->f_datarep = NULL;
/*Create a derived datatype for the created iovec */
types[0] = &ompi_mpi_long.dt;
types[1] = &ompi_mpi_long.dt;
d[0] = (ptrdiff_t) fh->f_decoded_iov;
d[1] = (ptrdiff_t) &fh->f_decoded_iov[0].iov_len;
base = d[0];
for (i=0 ; i<2 ; i++) {
d[i] -= base;
}
ompi_datatype_create_struct (2,
blocklen,
d,
types,
&fh->f_iov_type);
ompi_datatype_commit (&fh->f_iov_type);
return OMPI_SUCCESS;
}
else {
return OMPI_ERROR;
}
fh->f_atomicity = 0;
fh->f_fs_block_size = 4096;
fh->f_offset = 0;
fh->f_disp = 0;
fh->f_position_in_file_view = 0;
fh->f_index_in_file_view = 0;
fh->f_total_bytes = 0;
fh->f_init_procs_per_group = -1;
fh->f_init_procs_in_group = NULL;
fh->f_procs_per_group = -1;
fh->f_procs_in_group = NULL;
fh->f_init_num_aggrs = -1;
fh->f_init_aggr_list = NULL;
fh->f_num_aggrs = -1;
fh->f_aggr_list = NULL;
/* Default file View */
fh->f_iov_type = MPI_DATATYPE_NULL;
fh->f_stripe_size = 0;
/*Decoded iovec of the file-view*/
fh->f_decoded_iov = NULL;
fh->f_etype = MPI_DATATYPE_NULL;
fh->f_filetype = MPI_DATATYPE_NULL;
fh->f_orig_filetype = MPI_DATATYPE_NULL;
fh->f_datarep = NULL;
/*Create a derived datatype for the created iovec */
types[0] = &ompi_mpi_long.dt;
types[1] = &ompi_mpi_long.dt;
d[0] = (ptrdiff_t) fh->f_decoded_iov;
d[1] = (ptrdiff_t) &fh->f_decoded_iov[0].iov_len;
base = d[0];
for (i=0 ; i<2 ; i++) {
d[i] -= base;
}
ompi_datatype_create_struct (2,
blocklen,
d,
types,
&fh->f_iov_type);
ompi_datatype_commit (&fh->f_iov_type);
return OMPI_SUCCESS;
}
else {
return OMPI_ERROR;
}
}

Просмотреть файл

@ -174,17 +174,19 @@ int mca_common_ompio_set_view (ompio_file_t *fh,
}
}
char char_stripe[MPI_MAX_INFO_KEY];
char char_stripe[MPI_MAX_INFO_VAL];
/* Check the info object set during File_open */
opal_info_get (fh->f_info, "cb_nodes", MPI_MAX_INFO_VAL, char_stripe, &flag);
if ( flag ) {
sscanf ( char_stripe, "%d", &num_cb_nodes );
OMPIO_MCA_PRINT_INFO(fh, "cb_nodes", char_stripe, "");
}
else {
/* Check the info object set during file_set_view */
opal_info_get (info, "cb_nodes", MPI_MAX_INFO_VAL, char_stripe, &flag);
if ( flag ) {
sscanf ( char_stripe, "%d", &num_cb_nodes );
OMPIO_MCA_PRINT_INFO(fh, "cb_nodes", char_stripe, "");
}
}
@ -274,13 +276,39 @@ int mca_common_ompio_set_view (ompio_file_t *fh,
ompi_datatype_destroy ( &newfiletype );
}
bool info_is_set=false;
opal_info_get (fh->f_info, "collective_buffering", MPI_MAX_INFO_VAL, char_stripe, &flag);
if ( flag ) {
if ( strncmp ( char_stripe, "false", sizeof("true") )){
info_is_set = true;
OMPIO_MCA_PRINT_INFO(fh, "collective_buffering", char_stripe, "enforcing using individual fcoll component");
} else {
OMPIO_MCA_PRINT_INFO(fh, "collective_buffering", char_stripe, "");
}
} else {
opal_info_get (info, "collective_buffering", MPI_MAX_INFO_VAL, char_stripe, &flag);
if ( flag ) {
if ( strncmp ( char_stripe, "false", sizeof("true") )){
info_is_set = true;
OMPIO_MCA_PRINT_INFO(fh, "collective_buffering", char_stripe, "enforcing using individual fcoll component");
} else {
OMPIO_MCA_PRINT_INFO(fh, "collective_buffering", char_stripe, "");
}
}
}
ret = mca_fcoll_base_file_select (fh, NULL);
mca_fcoll_base_component_t *preferred =NULL;
if ( info_is_set ) {
/* user requested using an info object to disable collective buffering. */
preferred = mca_fcoll_base_component_lookup ("individual");
}
ret = mca_fcoll_base_file_select (fh, (mca_base_component_t *)preferred);
if ( OMPI_SUCCESS != ret ) {
opal_output(1, "mca_common_ompio_set_view: mca_fcoll_base_file_select() failed\n");
goto exit;
}
if ( NULL != fh->f_sharedfp ) {
ret = fh->f_sharedfp->sharedfp_seek( fh, 0, MPI_SEEK_SET);
}

Просмотреть файл

@ -53,6 +53,8 @@ OMPI_DECLSPEC int mca_fcoll_base_init_file (struct ompio_file_t *file);
OMPI_DECLSPEC int mca_fcoll_base_get_param (struct ompio_file_t *file, int keyval);
OMPI_DECLSPEC int ompi_fcoll_base_sort_iovec (struct iovec *iov, int num_entries, int *sorted);
OMPI_DECLSPEC mca_fcoll_base_component_t* mca_fcoll_base_component_lookup(const char* name);
/*
* Globals
*/

Просмотреть файл

@ -44,3 +44,27 @@
MCA_BASE_FRAMEWORK_DECLARE(ompi, fcoll, NULL, NULL, NULL, NULL,
mca_fcoll_base_static_components, 0);
/**
* Traverses through the list of available components, calling their init
* functions until it finds the component that has the specified name. It
* then returns the found component.
*
* @param name the name of the component that is being searched for.
* @retval mca_fcoll_base_component_t* pointer to the requested component
* @retval NULL if the requested component is not found
*/
mca_fcoll_base_component_t* mca_fcoll_base_component_lookup(const char* name)
{
/* Traverse the list of available components; call their init functions. */
mca_base_component_list_item_t *cli;
OPAL_LIST_FOREACH(cli, &ompi_fcoll_base_framework.framework_components, mca_base_component_list_item_t) {
mca_fcoll_base_component_t* component = (mca_fcoll_base_component_t *) cli->cli_component;
if(strcmp(component->fcollm_version.mca_component_name,
name) == 0) {
return component;
}
}
return NULL;
}

Просмотреть файл

@ -337,12 +337,7 @@ mca_fcoll_dynamic_file_read_all (ompio_file_t *fh,
*** 6. Determine the number of cycles required to execute this
*** operation
*************************************************************/
bytes_per_cycle = fh->f_get_mca_parameter_value ("bytes_per_agg", strlen ("bytes_per_agg"));
if ( OMPI_ERR_MAX == bytes_per_cycle ) {
ret = OMPI_ERROR;
goto exit;
}
bytes_per_cycle = fh->f_bytes_per_agg;
cycles = ceil((double)total_bytes/bytes_per_cycle);
if ( my_aggregator == fh->f_rank) {

Просмотреть файл

@ -361,11 +361,7 @@ mca_fcoll_dynamic_file_write_all (ompio_file_t *fh,
*** 6. Determine the number of cycles required to execute this
*** operation
*************************************************************/
bytes_per_cycle = fh->f_get_mca_parameter_value ("bytes_per_agg", strlen ("bytes_per_agg"));
if ( OMPI_ERR_MAX == bytes_per_cycle ) {
ret = OMPI_ERROR;
goto exit;
}
bytes_per_cycle = fh->f_bytes_per_agg;
cycles = ceil((double)total_bytes/bytes_per_cycle);
if (my_aggregator == fh->f_rank) {

Просмотреть файл

@ -337,11 +337,7 @@ mca_fcoll_dynamic_gen2_file_read_all (ompio_file_t *fh,
*** 6. Determine the number of cycles required to execute this
*** operation
*************************************************************/
bytes_per_cycle = fh->f_get_mca_parameter_value ("bytes_per_agg", strlen ("bytes_per_agg"));
if ( OMPI_ERR_MAX == bytes_per_cycle ) {
ret = OMPI_ERROR;
goto exit;
}
bytes_per_cycle = fh->f_bytes_per_agg;
cycles = ceil((double)total_bytes/bytes_per_cycle);
if ( my_aggregator == fh->f_rank) {

Просмотреть файл

@ -159,11 +159,8 @@ int mca_fcoll_dynamic_gen2_file_write_all (ompio_file_t *fh,
/**************************************************************************
** 1. In case the data is not contigous in memory, decode it into an iovec
**************************************************************************/
bytes_per_cycle = fh->f_get_mca_parameter_value ("bytes_per_agg", strlen ("bytes_per_agg"));
if ( OMPI_ERR_MAX == bytes_per_cycle ) {
ret = OMPI_ERROR;
goto exit;
}
bytes_per_cycle = fh->f_bytes_per_agg;
/* since we want to overlap 2 iterations, define the bytes_per_cycle to be half of what
the user requested */
bytes_per_cycle =bytes_per_cycle/2;
@ -258,42 +255,53 @@ int mca_fcoll_dynamic_gen2_file_write_all (ompio_file_t *fh,
/**************************************************************************
** 3. Determine the total amount of data to be written and no. of cycles
**************************************************************************/
total_bytes_per_process = (MPI_Aint*)malloc
(dynamic_gen2_num_io_procs * fh->f_procs_per_group*sizeof(MPI_Aint));
if (NULL == total_bytes_per_process) {
opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit;
}
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_comm_time = MPI_Wtime();
#endif
if ( 1 == mca_fcoll_dynamic_gen2_num_groups ) {
ret = fh->f_comm->c_coll->coll_allgather (broken_total_lengths,
dynamic_gen2_num_io_procs,
MPI_LONG,
total_bytes_per_process,
dynamic_gen2_num_io_procs,
MPI_LONG,
fh->f_comm,
fh->f_comm->c_coll->coll_allgather_module);
ret = fh->f_comm->c_coll->coll_allreduce (MPI_IN_PLACE,
broken_total_lengths,
dynamic_gen2_num_io_procs,
MPI_LONG,
MPI_SUM,
fh->f_comm,
fh->f_comm->c_coll->coll_allreduce_module);
if( OMPI_SUCCESS != ret){
goto exit;
}
}
else {
ret = ompi_fcoll_base_coll_allgather_array (broken_total_lengths,
dynamic_gen2_num_io_procs,
MPI_LONG,
total_bytes_per_process,
dynamic_gen2_num_io_procs,
MPI_LONG,
0,
fh->f_procs_in_group,
fh->f_procs_per_group,
fh->f_comm);
}
total_bytes_per_process = (MPI_Aint*)malloc
(dynamic_gen2_num_io_procs * fh->f_procs_per_group*sizeof(MPI_Aint));
if (NULL == total_bytes_per_process) {
opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit;
}
if( OMPI_SUCCESS != ret){
goto exit;
ret = ompi_fcoll_base_coll_allgather_array (broken_total_lengths,
dynamic_gen2_num_io_procs,
MPI_LONG,
total_bytes_per_process,
dynamic_gen2_num_io_procs,
MPI_LONG,
0,
fh->f_procs_in_group,
fh->f_procs_per_group,
fh->f_comm);
if( OMPI_SUCCESS != ret){
goto exit;
}
for ( i=0; i<dynamic_gen2_num_io_procs; i++ ) {
broken_total_lengths[i] = 0;
for (j=0 ; j<fh->f_procs_per_group ; j++) {
broken_total_lengths[i] += total_bytes_per_process[j*dynamic_gen2_num_io_procs + i];
}
}
if (NULL != total_bytes_per_process) {
free (total_bytes_per_process);
total_bytes_per_process = NULL;
}
}
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_comm_time = MPI_Wtime();
@ -302,10 +310,6 @@ int mca_fcoll_dynamic_gen2_file_write_all (ompio_file_t *fh,
cycles=0;
for ( i=0; i<dynamic_gen2_num_io_procs; i++ ) {
broken_total_lengths[i] = 0;
for (j=0 ; j<fh->f_procs_per_group ; j++) {
broken_total_lengths[i] += total_bytes_per_process[j*dynamic_gen2_num_io_procs + i];
}
#if DEBUG_ON
printf("%d: Overall broken_total_lengths[%d] = %ld\n", fh->f_rank, i, broken_total_lengths[i]);
#endif
@ -314,10 +318,6 @@ int mca_fcoll_dynamic_gen2_file_write_all (ompio_file_t *fh,
}
}
if (NULL != total_bytes_per_process) {
free (total_bytes_per_process);
total_bytes_per_process = NULL;
}
result_counts = (int *) malloc ( dynamic_gen2_num_io_procs * fh->f_procs_per_group * sizeof(int) );
if ( NULL == result_counts ) {

Просмотреть файл

@ -579,11 +579,7 @@ static int two_phase_read_and_exch(ompio_file_t *fh,
}
}
two_phase_cycle_buffer_size = fh->f_get_mca_parameter_value ("bytes_per_agg", strlen ("bytes_per_agg"));
if ( OMPI_ERR_MAX == two_phase_cycle_buffer_size ) {
ret = OMPI_ERROR;
goto exit;
}
two_phase_cycle_buffer_size = fh->f_bytes_per_agg;
ntimes = (int)((end_loc - st_loc + two_phase_cycle_buffer_size)/
two_phase_cycle_buffer_size);

Просмотреть файл

@ -646,11 +646,7 @@ static int two_phase_exch_and_write(ompio_file_t *fh,
}
}
two_phase_cycle_buffer_size = fh->f_get_mca_parameter_value ("bytes_per_agg", strlen ("bytes_per_agg"));
if ( OMPI_ERR_MAX == two_phase_cycle_buffer_size ) {
ret = OMPI_ERROR;
goto exit;
}
two_phase_cycle_buffer_size = fh->f_bytes_per_agg;
ntimes = (int) ((end_loc - st_loc + two_phase_cycle_buffer_size)/two_phase_cycle_buffer_size);
if ((st_loc == -1) && (end_loc == -1)) {

Просмотреть файл

@ -337,11 +337,7 @@ mca_fcoll_vulcan_file_read_all (ompio_file_t *fh,
*** 6. Determine the number of cycles required to execute this
*** operation
*************************************************************/
bytes_per_cycle = fh->f_get_mca_parameter_value ("bytes_per_agg", strlen ("bytes_per_agg"));
if ( OMPI_ERR_MAX == bytes_per_cycle ) {
ret = OMPI_ERROR;
goto exit;
}
bytes_per_cycle = fh->f_bytes_per_agg;
cycles = ceil((double)total_bytes/bytes_per_cycle);
if ( my_aggregator == fh->f_rank) {

Просмотреть файл

@ -167,11 +167,7 @@ int mca_fcoll_vulcan_file_write_all (ompio_file_t *fh,
ret = OMPI_ERROR;
goto exit;
}
bytes_per_cycle = fh->f_get_mca_parameter_value ("bytes_per_agg", strlen ("bytes_per_agg"));
if ( OMPI_ERR_MAX == bytes_per_cycle ) {
ret = OMPI_ERROR;
goto exit;
}
bytes_per_cycle = fh->f_bytes_per_agg;
if( (1 == mca_fcoll_vulcan_async_io) && (NULL == fh->f_fbtl->fbtl_ipwritev) ) {
opal_output (1, "vulcan_write_all: fbtl Does NOT support ipwritev() (asynchrounous write) \n");
@ -256,28 +252,31 @@ int mca_fcoll_vulcan_file_write_all (ompio_file_t *fh,
/**************************************************************************
** 3. Determine the total amount of data to be written and no. of cycles
**************************************************************************/
total_bytes_per_process = (MPI_Aint*)malloc
(fh->f_num_aggrs * fh->f_procs_per_group*sizeof(MPI_Aint));
if (NULL == total_bytes_per_process) {
opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit;
}
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_comm_time = MPI_Wtime();
#endif
if ( 1 == mca_fcoll_vulcan_num_groups ) {
ret = fh->f_comm->c_coll->coll_allgather (broken_total_lengths,
fh->f_num_aggrs,
MPI_LONG,
total_bytes_per_process,
ret = fh->f_comm->c_coll->coll_allreduce (MPI_IN_PLACE,
broken_total_lengths,
fh->f_num_aggrs,
MPI_LONG,
MPI_SUM,
fh->f_comm,
fh->f_comm->c_coll->coll_allgather_module);
fh->f_comm->c_coll->coll_allreduce_module);
if( OMPI_SUCCESS != ret){
goto exit;
}
}
else {
total_bytes_per_process = (MPI_Aint*)malloc
(fh->f_num_aggrs * fh->f_procs_per_group*sizeof(MPI_Aint));
if (NULL == total_bytes_per_process) {
opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit;
}
ret = ompi_fcoll_base_coll_allgather_array (broken_total_lengths,
fh->f_num_aggrs,
MPI_LONG,
@ -288,11 +287,22 @@ int mca_fcoll_vulcan_file_write_all (ompio_file_t *fh,
fh->f_procs_in_group,
fh->f_procs_per_group,
fh->f_comm);
if( OMPI_SUCCESS != ret){
goto exit;
}
for ( i=0; i<fh->f_num_aggrs; i++ ) {
broken_total_lengths[i] = 0;
for (j=0 ; j<fh->f_procs_per_group ; j++) {
broken_total_lengths[i] += total_bytes_per_process[j*fh->f_num_aggrs + i];
}
}
if (NULL != total_bytes_per_process) {
free (total_bytes_per_process);
total_bytes_per_process = NULL;
}
}
if( OMPI_SUCCESS != ret){
goto exit;
}
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_comm_time = MPI_Wtime();
comm_time += (end_comm_time - start_comm_time);
@ -300,10 +310,6 @@ int mca_fcoll_vulcan_file_write_all (ompio_file_t *fh,
cycles=0;
for ( i=0; i<fh->f_num_aggrs; i++ ) {
broken_total_lengths[i] = 0;
for (j=0 ; j<fh->f_procs_per_group ; j++) {
broken_total_lengths[i] += total_bytes_per_process[j*fh->f_num_aggrs + i];
}
#if DEBUG_ON
printf("%d: Overall broken_total_lengths[%d] = %ld\n", fh->f_rank, i, broken_total_lengths[i]);
#endif
@ -312,11 +318,6 @@ int mca_fcoll_vulcan_file_write_all (ompio_file_t *fh,
}
}
if (NULL != total_bytes_per_process) {
free (total_bytes_per_process);
total_bytes_per_process = NULL;
}
result_counts = (int *) malloc ( fh->f_num_aggrs * fh->f_procs_per_group * sizeof(int) );
if ( NULL == result_counts ) {
ret = OMPI_ERR_OUT_OF_RESOURCE;

Просмотреть файл

@ -521,7 +521,10 @@ int ompi_io_ompio_sort_offlen (mca_io_ompio_offlen_array_t *io_array,
int mca_io_ompio_get_mca_parameter_value ( char *mca_parameter_name, int name_length )
{
if ( !strncmp ( mca_parameter_name, "num_aggregators", name_length )) {
if ( !strncmp ( mca_parameter_name, "verbose_info_parsing", name_length )) {
return mca_io_ompio_verbose_info_parsing;
}
else if ( !strncmp ( mca_parameter_name, "num_aggregators", name_length )) {
return mca_io_ompio_num_aggregators;
}
else if ( !strncmp ( mca_parameter_name, "bytes_per_agg", name_length )) {

Просмотреть файл

@ -52,6 +52,7 @@ extern int mca_io_ompio_grouping_option;
extern int mca_io_ompio_max_aggregators_ratio;
extern int mca_io_ompio_aggregators_cutoff_threshold;
extern int mca_io_ompio_overwrite_amode;
extern int mca_io_ompio_verbose_info_parsing;
OMPI_DECLSPEC extern int mca_io_ompio_coll_timing_info;

Просмотреть файл

@ -42,6 +42,7 @@ int mca_io_ompio_coll_timing_info = 0;
int mca_io_ompio_max_aggregators_ratio=8;
int mca_io_ompio_aggregators_cutoff_threshold=3;
int mca_io_ompio_overwrite_amode = 1;
int mca_io_ompio_verbose_info_parsing = 0;
int mca_io_ompio_grouping_option=5;
@ -240,6 +241,18 @@ static int register_component(void)
MCA_BASE_VAR_SCOPE_READONLY,
&mca_io_ompio_overwrite_amode);
mca_io_ompio_verbose_info_parsing = 0;
(void) mca_base_component_var_register(&mca_io_ompio_component.io_version,
"verbose_info_parsing",
"Provide visual output when parsing info objects "
"0: no verbose output (default) "
"1: verbose output by rank 0 "
"2: verbose output by all ranks ",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_9,
MCA_BASE_VAR_SCOPE_READONLY,
&mca_io_ompio_verbose_info_parsing);
return OMPI_SUCCESS;
}