1
1

Code cleanup for the time breakdown feature in ompio/fcoll

- make the internal structure follow the Open MPI naming convention
 - provide a single flag/macro which controls the compilation/utilization of this
   feature, to avoid that somebody using this has to modify every single
   fcoll component. A configure option could be added later if desired.
Этот коммит содержится в:
Edgar Gabriel 2015-08-14 08:53:04 -05:00
родитель a7dcfb2012
Коммит 072b18e197
9 изменённых файлов: 86 добавлений и 91 удалений

Просмотреть файл

@ -9,7 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2014 University of Houston. All rights reserved.
* Copyright (c) 2008-2015 University of Houston. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -29,7 +29,6 @@
#include "ompi/mca/pml/pml.h"
#include <unistd.h>
#define TIME_BREAKDOWN 1
#define DEBUG_ON 0
/*Used for loading file-offsets per aggregator*/
@ -97,11 +96,11 @@
MPI_Request *send_req=NULL, *recv_req=NULL;
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
double read_time = 0.0, start_read_time = 0.0, end_read_time = 0.0;
double rcomm_time = 0.0, start_rcomm_time = 0.0, end_rcomm_time = 0.0;
double read_exch = 0.0, start_rexch = 0.0, end_rexch = 0.0;
print_entry nentry;
mca_io_ompio_print_entry nentry;
#endif
@ -333,7 +332,7 @@
current_index = 0;
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rexch = MPI_Wtime();
#endif
for (index = 0; index < cycles; index++) {
@ -612,7 +611,7 @@
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_read_time = MPI_Wtime();
#endif
@ -624,7 +623,7 @@
}
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_read_time = MPI_Wtime();
read_time += end_read_time - start_read_time;
#endif
@ -664,7 +663,7 @@
ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit;
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rcomm_time = MPI_Wtime();
#endif
for (i=0;i<fh->f_procs_per_group;i++){
@ -686,7 +685,7 @@
goto exit;
}
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_rcomm_time = MPI_Wtime();
rcomm_time += end_rcomm_time - start_rcomm_time;
#endif
@ -710,7 +709,7 @@
}
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rcomm_time = MPI_Wtime();
#endif
recv_req = (MPI_Request *) malloc (sizeof (MPI_Request));
@ -786,7 +785,7 @@
receive_buf = NULL;
}
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_rcomm_time = MPI_Wtime();
rcomm_time += end_rcomm_time - start_rcomm_time;
#endif
@ -834,7 +833,7 @@
}
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_rexch = MPI_Wtime();
read_exch += end_rexch - start_rexch;
nentry.time[0] = read_time;

Просмотреть файл

@ -31,7 +31,6 @@
#define DEBUG_ON 0
#define TIME_BREAKDOWN 0
/*Used for loading file-offsets per aggregator*/
typedef struct local_io_array{
@ -99,11 +98,11 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
int recv_req_count=0;
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
double write_time = 0.0, start_write_time = 0.0, end_write_time = 0.0;
double comm_time = 0.0, start_comm_time = 0.0, end_comm_time = 0.0;
double exch_write = 0.0, start_exch = 0.0, end_exch = 0.0;
print_entry nentry;
mca_io_ompio_print_entry nentry;
#endif
@ -352,7 +351,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_exch = MPI_Wtime();
#endif
@ -699,7 +698,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
printf("%d : global_count : %ld, bytes_sent : %d\n",
fh->f_rank,global_count, bytes_sent);
#endif
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_comm_time = MPI_Wtime();
#endif
@ -855,7 +854,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
}
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_comm_time = MPI_Wtime();
comm_time += (end_comm_time - start_comm_time);
#endif
@ -871,7 +870,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_write_time = MPI_Wtime();
#endif
@ -933,7 +932,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
goto exit;
}
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_write_time = MPI_Wtime();
write_time += end_write_time - start_write_time;
#endif
@ -973,7 +972,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_exch = MPI_Wtime();
exch_write += end_exch - start_exch;
nentry.time[0] = write_time;

Просмотреть файл

@ -9,7 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2014 University of Houston. All rights reserved.
* Copyright (c) 2008-2015 University of Houston. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights reserved.
*
* $COPYRIGHT$
@ -33,7 +33,6 @@
#include <unistd.h>
#define DEBUG_ON 0
#define TIME_BREAKDOWN 0
typedef struct local_io_array {
@ -96,11 +95,11 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
MPI_Request *send_req=NULL, *recv_req=NULL;
/* MPI_Request *grecv_req=NULL, *gsend_req=NULL; */
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
double read_time = 0.0, start_read_time = 0.0, end_read_time = 0.0;
double rcomm_time = 0.0, start_rcomm_time = 0.0, end_rcomm_time = 0.0;
double read_exch = 0.0, start_rexch = 0.0, end_rexch = 0.0;
print_entry nentry;
mca_io_ompio_print_entry nentry;
#endif
#if DEBUG_ON
MPI_Aint gc_in;
@ -360,7 +359,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
}
#endif
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rexch = MPI_Wtime();
#endif
@ -456,7 +455,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
goto exit;
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rcomm_time = MPI_Wtime();
#endif
@ -471,7 +470,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
goto exit;
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_rcomm_time = MPI_Wtime();
rcomm_time += end_rcomm_time - start_rcomm_time;
#endif
@ -708,7 +707,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
fh->f_io_array[i].length);
}
#endif
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_read_time = MPI_Wtime();
#endif
@ -720,7 +719,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
}
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_read_time = MPI_Wtime();
read_time += end_read_time - start_read_time;
#endif
@ -768,7 +767,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit;
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rcomm_time = MPI_Wtime();
#endif
@ -805,7 +804,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
goto exit;
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_rcomm_time = MPI_Wtime();
rcomm_time += end_rcomm_time - start_rcomm_time;
#endif
@ -894,7 +893,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
}
}
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_rexch = MPI_Wtime();
read_exch += end_rexch - start_rexch;
nentry.time[0] = read_time;

Просмотреть файл

@ -9,7 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2014 University of Houston. All rights reserved.
* Copyright (c) 2008-2015 University of Houston. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights reserved.
*
* $COPYRIGHT$
@ -32,7 +32,6 @@
#include <unistd.h>
#define DEBUG_ON 0
#define TIME_BREAKDOWN 0
typedef struct local_io_array{
OMPI_MPI_OFFSET_TYPE offset;
@ -93,11 +92,11 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
ompi_datatype_t *types[3];
ompi_datatype_t *io_array_type=MPI_DATATYPE_NULL;
/*----------------------------------------------*/
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
double write_time = 0.0, start_write_time = 0.0, end_write_time = 0.0;
double comm_time = 0.0, start_comm_time = 0.0, end_comm_time = 0.0;
double exch_write = 0.0, start_exch = 0.0, end_exch = 0.0;
print_entry nentry;
mca_io_ompio_print_entry nentry;
#endif
@ -349,7 +348,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
}
#endif
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_exch = MPI_Wtime();
#endif
@ -673,7 +672,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
bytes_to_write_in_cycle,
fh->f_procs_per_group);
#endif
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_comm_time = MPI_Wtime();
#endif
global_buf = (char *) malloc (global_count);
@ -800,7 +799,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
}
#endif
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_comm_time = MPI_Wtime();
comm_time += end_comm_time - start_comm_time;
#endif
@ -851,7 +850,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
}
#endif
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_write_time = MPI_Wtime();
#endif
@ -863,7 +862,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
}
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_write_time = MPI_Wtime();
write_time += end_write_time - start_write_time;
#endif
@ -897,7 +896,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
}
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_exch = MPI_Wtime();
exch_write += end_exch - start_exch;
nentry.time[0] = write_time;

Просмотреть файл

@ -31,7 +31,6 @@
#include <unistd.h>
#define DEBUG 0
#define TIME_BREAKDOWN 0
/* Two Phase implementation from ROMIO ported to OMPIO infrastructure
* This is pretty much the same as ROMIO's two_phase and based on ROMIO's code
@ -99,14 +98,14 @@ static void two_phase_fill_user_buffer(mca_io_ompio_file_t *fh,
MPI_Aint buftype_extent,
int striping_unit,
int num_io_procs, int *aggregator_list);
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
static int isread_aggregator(int rank,
int nprocs_for_coll,
int *aggregator_list);
#endif
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
double read_time = 0.0, start_read_time = 0.0, end_read_time = 0.0;
double rcomm_time = 0.0, start_rcomm_time = 0.0, end_rcomm_time = 0.0;
double read_exch = 0.0, start_rexch = 0.0, end_rexch = 0.0;
@ -137,8 +136,8 @@ mca_fcoll_two_phase_file_read_all (mca_io_ompio_file_t *fh,
OMPI_MPI_OFFSET_TYPE *fd_start=NULL, *fd_end=NULL, min_st_offset = 0;
Flatlist_node *flat_buf=NULL;
mca_io_ompio_access_array_t *my_req=NULL, *others_req=NULL;
#if TIME_BREAKDOWN
print_entry nentry;
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
mca_io_ompio_print_entry nentry;
#endif
if (opal_datatype_is_predefined(&datatype->super)) {
fh->f_flags = fh->f_flags | OMPIO_CONTIGUOUS_MEMORY;
@ -441,7 +440,7 @@ mca_fcoll_two_phase_file_read_all (mca_io_ompio_file_t *fh,
count_other_req_procs);
#endif
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rexch = MPI_Wtime();
#endif
@ -466,7 +465,7 @@ mca_fcoll_two_phase_file_read_all (mca_io_ompio_file_t *fh,
if (OMPI_SUCCESS != ret){
goto exit;
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_rexch = MPI_Wtime();
read_exch += (end_rexch - start_rexch);
nentry.time[0] = read_time;
@ -709,7 +708,7 @@ static int two_phase_read_and_exch(mca_io_ompio_file_t *fh,
if (flag) {
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_read_time = MPI_Wtime();
#endif
@ -751,7 +750,7 @@ static int two_phase_read_and_exch(mca_io_ompio_file_t *fh,
fh->f_io_array = NULL;
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_read_time = MPI_Wtime();
read_time += (end_read_time - start_read_time);
#endif
@ -860,7 +859,7 @@ static int two_phase_exchange_data(mca_io_ompio_file_t *fh,
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rcomm_time = MPI_Wtime();
#endif
@ -1016,7 +1015,7 @@ static int two_phase_exchange_data(mca_io_ompio_file_t *fh,
free(recv_buf);
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_rcomm_time = MPI_Wtime();
rcomm_time += (end_rcomm_time - start_rcomm_time);
#endif
@ -1183,7 +1182,7 @@ static void two_phase_fill_user_buffer(mca_io_ompio_file_t *fh,
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
int isread_aggregator(int rank,
int nprocs_for_coll,
int *aggregator_list){

Просмотреть файл

@ -33,7 +33,6 @@
#include <unistd.h>
#define DEBUG_ON 0
#define TIME_BREAKDOWN 0
/* Two Phase implementation from ROMIO ported to OMPIO infrastructure
* This is pretty much the same as ROMIO's two_phase and based on ROMIO's code
@ -111,7 +110,7 @@ static int two_phase_fill_send_buffer(mca_io_ompio_file_t *fh,
int iter, MPI_Aint buftype_extent,
int striping_unit,
int num_io_procs, int *aggregator_list);
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
static int is_aggregator(int rank,
int nprocs_for_coll,
int *aggregator_list);
@ -131,7 +130,7 @@ void two_phase_heap_merge(mca_io_ompio_access_array_t *others_req,
/* local function declarations ends here!*/
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
double write_time = 0.0, start_write_time = 0.0, end_write_time = 0.0;
double comm_time = 0.0, start_comm_time = 0.0, end_comm_time = 0.0;
double exch_write = 0.0, start_exch = 0.0, end_exch = 0.0;
@ -165,8 +164,8 @@ mca_fcoll_two_phase_file_write_all (mca_io_ompio_file_t *fh,
Flatlist_node *flat_buf=NULL;
mca_io_ompio_access_array_t *my_req=NULL, *others_req=NULL;
MPI_Aint send_buf_addr;
#if TIME_BREAKDOWN
print_entry nentry;
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
mca_io_ompio_print_entry nentry;
#endif
@ -499,7 +498,7 @@ mca_fcoll_two_phase_file_write_all (mca_io_ompio_file_t *fh,
printf("count_other_req_procs : %d\n", count_other_req_procs);
#endif
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_exch = MPI_Wtime();
#endif
@ -524,7 +523,7 @@ mca_fcoll_two_phase_file_write_all (mca_io_ompio_file_t *fh,
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_exch = MPI_Wtime();
exch_write += (end_exch - start_exch);
@ -541,7 +540,7 @@ mca_fcoll_two_phase_file_write_all (mca_io_ompio_file_t *fh,
}
nentry.nprocs_for_coll = two_phase_num_io_procs;
if (!fh->f_full_print_queue(WRITE_PRINT_QUEUE)){
fh->f_ompio_register_print_entry(WRITE_PRINT_QUEUE,
fh->f_register_print_entry(WRITE_PRINT_QUEUE,
nentry);
}
#endif
@ -803,7 +802,7 @@ static int two_phase_exch_and_write(mca_io_ompio_file_t *fh,
if (flag){
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_write_time = MPI_Wtime();
#endif
@ -843,7 +842,7 @@ static int two_phase_exch_and_write(mca_io_ompio_file_t *fh,
return OMPI_ERROR;
}
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_write_time = MPI_Wtime();
write_time += (end_write_time - start_write_time);
#endif
@ -952,7 +951,7 @@ static int two_phase_exchage_data(mca_io_ompio_file_t *fh,
OMPI_MPI_OFFSET_TYPE *srt_off=NULL;
char **send_buf = NULL;
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_comm_time = MPI_Wtime();
#endif
ret = fh->f_comm->c_coll.coll_alltoall (recv_size,
@ -1193,7 +1192,7 @@ static int two_phase_exchage_data(mca_io_ompio_file_t *fh,
free(requests);
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_comm_time = MPI_Wtime();
comm_time += (end_comm_time - start_comm_time);
#endif
@ -1494,7 +1493,7 @@ void two_phase_heap_merge( mca_io_ompio_access_array_t *others_req,
}
free(a);
}
#if TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
int is_aggregator(int rank,
int nprocs_for_coll,
int *aggregator_list){

Просмотреть файл

@ -51,8 +51,8 @@
#endif
#include "io_ompio.h"
print_queue *coll_write_time=NULL;
print_queue *coll_read_time=NULL;
mca_io_ompio_print_queue *coll_write_time=NULL;
mca_io_ompio_print_queue *coll_read_time=NULL;
static int mca_io_ompio_create_groups(mca_io_ompio_file_t *fh,
@ -1902,7 +1902,7 @@ void mca_io_ompio_get_bytes_per_agg ( int *bytes_per_agg)
}
/* Print queue related function implementations */
int ompi_io_ompio_set_print_queue (print_queue **q,
int ompi_io_ompio_set_print_queue (mca_io_ompio_print_queue **q,
int queue_type){
int ret = OMPI_SUCCESS;
@ -1925,7 +1925,7 @@ int ompi_io_ompio_set_print_queue (print_queue **q,
}
int ompi_io_ompio_initialize_print_queue(print_queue *q){
int ompi_io_ompio_initialize_print_queue(mca_io_ompio_print_queue *q){
int ret = OMPI_SUCCESS;
q->first = 0;
@ -1934,10 +1934,10 @@ int ompi_io_ompio_initialize_print_queue(print_queue *q){
return ret;
}
int ompi_io_ompio_register_print_entry (int queue_type,
print_entry x){
mca_io_ompio_print_entry x){
int ret = OMPI_SUCCESS;
print_queue *q=NULL;
mca_io_ompio_print_queue *q=NULL;
ret = ompi_io_ompio_set_print_queue(&q, queue_type);
@ -1955,10 +1955,10 @@ int ompi_io_ompio_register_print_entry (int queue_type,
}
int ompi_io_ompio_unregister_print_entry (int queue_type,
print_entry *x){
mca_io_ompio_print_entry *x){
int ret = OMPI_SUCCESS;
print_queue *q=NULL;
mca_io_ompio_print_queue *q=NULL;
ret = ompi_io_ompio_set_print_queue(&q, queue_type);
if (ret != OMPI_ERROR){
if (q->count <= 0){
@ -1976,7 +1976,7 @@ int ompi_io_ompio_unregister_print_entry (int queue_type,
int ompi_io_ompio_empty_print_queue(int queue_type){
int ret = OMPI_SUCCESS;
print_queue *q=NULL;
mca_io_ompio_print_queue *q=NULL;
ret = ompi_io_ompio_set_print_queue(&q, queue_type);
assert (ret != OMPI_ERROR);
@ -1992,7 +1992,7 @@ int ompi_io_ompio_full_print_queue(int queue_type){
int ret = OMPI_SUCCESS;
print_queue *q=NULL;
mca_io_ompio_print_queue *q=NULL;
ret = ompi_io_ompio_set_print_queue(&q, queue_type);
assert ( ret != OMPI_ERROR);
@ -2012,7 +2012,7 @@ int ompi_io_ompio_print_time_info(int queue_type,
double *time_details = NULL, *final_sum = NULL;
double *final_max = NULL, *final_min = NULL;
double *final_time_details=NULL;
print_queue *q=NULL;
mca_io_ompio_print_queue *q=NULL;
ret = ompi_io_ompio_set_print_queue(&q, queue_type);

Просмотреть файл

@ -60,6 +60,7 @@ OMPI_DECLSPEC extern int mca_io_ompio_coll_timing_info;
#define QUEUESIZE 2048
#define MCA_IO_DEFAULT_FILE_VIEW_SIZE 4*1024*1024
#define OMPIO_FCOLL_WANT_TIME_BREAKDOWN 0
#define OMPIO_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define OMPIO_MAX(a, b) (((a) < (b)) ? (b) : (a))
@ -157,14 +158,14 @@ typedef struct {
double time[3];
int nprocs_for_coll;
int aggregator;
}print_entry;
}mca_io_ompio_print_entry;
typedef struct {
print_entry entry[QUEUESIZE + 1];
mca_io_ompio_print_entry entry[QUEUESIZE + 1];
int first;
int last;
int count;
} print_queue;
} mca_io_ompio_print_queue;
typedef struct {
int ndims;
@ -271,7 +272,7 @@ typedef int (*mca_io_ompio_set_aggregator_props_fn_t) (struct mca_io_ompio_file_
typedef int (*mca_io_ompio_full_print_queue_fn_t) (int queue_type);
typedef int (*mca_io_ompio_register_print_entry_fn_t) (int queue_type,
print_entry x);
mca_io_ompio_print_entry x);
/**
@ -381,8 +382,8 @@ struct mca_io_ompio_data_t {
};
typedef struct mca_io_ompio_data_t mca_io_ompio_data_t;
OMPI_DECLSPEC extern print_queue *coll_write_time;
OMPI_DECLSPEC extern print_queue *coll_read_time;
OMPI_DECLSPEC extern mca_io_ompio_print_queue *coll_write_time;
OMPI_DECLSPEC extern mca_io_ompio_print_queue *coll_read_time;
/* functions to retrieve the number of aggregators and the size of the
temporary buffer on aggregators from the fcoll modules */
@ -657,20 +658,20 @@ OMPI_DECLSPEC int ompi_io_ompio_bcast_array (void *buff,
ompi_communicator_t *comm);
OMPI_DECLSPEC int ompi_io_ompio_register_print_entry (int queue_type,
print_entry x);
mca_io_ompio_print_entry x);
OMPI_DECLSPEC int ompi_io_ompio_unregister_print_entry (int queue_type, print_entry *x);
OMPI_DECLSPEC int ompi_io_ompio_unregister_print_entry (int queue_type, mca_io_ompio_print_entry *x);
OMPI_DECLSPEC int ompi_io_ompio_empty_print_queue(int queue_type);
OMPI_DECLSPEC int ompi_io_ompio_full_print_queue(int queue_type);
OMPI_DECLSPEC int ompi_io_ompio_initialize_print_queue(print_queue *q);
OMPI_DECLSPEC int ompi_io_ompio_initialize_print_queue(mca_io_ompio_print_queue *q);
OMPI_DECLSPEC int ompi_io_ompio_print_time_info(int queue_type,
char *name_operation,
mca_io_ompio_file_t *fh);
int ompi_io_ompio_set_print_queue (print_queue **q,
int ompi_io_ompio_set_print_queue (mca_io_ompio_print_queue **q,
int queue_type);

Просмотреть файл

@ -128,8 +128,8 @@ ompio_io_ompio_file_open (ompi_communicator_t *comm,
ompio_fh->f_split_coll_in_use = false;
/*Initialize the print_queues queues here!*/
coll_write_time = (print_queue *) malloc (sizeof(print_queue));
coll_read_time = (print_queue *) malloc (sizeof(print_queue));
coll_write_time = (mca_io_ompio_print_queue *) malloc (sizeof(mca_io_ompio_print_queue));
coll_read_time = (mca_io_ompio_print_queue *) malloc (sizeof(mca_io_ompio_print_queue));
ompi_io_ompio_initialize_print_queue(coll_write_time);
ompi_io_ompio_initialize_print_queue(coll_read_time);