1
1

step 0: clean up indenting and space vs. tabs

Этот коммит содержится в:
Edgar Gabriel 2015-08-31 12:41:11 -05:00
родитель d8cb3fe705
Коммит cf1e4e0d35
2 изменённых файлов: 1757 добавлений и 1757 удалений

Просмотреть файл

@ -17,41 +17,41 @@
* $HEADER$
*/
#include "ompi_config.h"
#include "fcoll_dynamic.h"
#include "ompi_config.h"
#include "fcoll_dynamic.h"
#include "mpi.h"
#include "ompi/constants.h"
#include "ompi/mca/fcoll/fcoll.h"
#include "ompi/mca/io/ompio/io_ompio.h"
#include "ompi/mca/io/io.h"
#include "math.h"
#include "ompi/mca/pml/pml.h"
#include <unistd.h>
#include "mpi.h"
#include "ompi/constants.h"
#include "ompi/mca/fcoll/fcoll.h"
#include "ompi/mca/io/ompio/io_ompio.h"
#include "ompi/mca/io/io.h"
#include "math.h"
#include "ompi/mca/pml/pml.h"
#include <unistd.h>
#define DEBUG_ON 0
#define DEBUG_ON 0
/*Used for loading file-offsets per aggregator*/
typedef struct local_io_array{
/*Used for loading file-offsets per aggregator*/
typedef struct local_io_array{
OMPI_MPI_OFFSET_TYPE offset;
MPI_Aint length;
int process_id;
}local_io_array;
}local_io_array;
static int read_heap_sort (local_io_array *io_array,
static int read_heap_sort (local_io_array *io_array,
int num_entries,
int *sorted);
int
mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
int
mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
void *buf,
int count,
struct ompi_datatype_t *datatype,
ompi_status_public_t *status)
{
{
MPI_Aint position = 0;
MPI_Aint total_bytes = 0; /* total bytes to be read */
MPI_Aint bytes_to_read_in_cycle = 0; /* left to be read in a cycle*/
@ -96,12 +96,12 @@
MPI_Request *send_req=NULL, *recv_req=NULL;
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
double read_time = 0.0, start_read_time = 0.0, end_read_time = 0.0;
double rcomm_time = 0.0, start_rcomm_time = 0.0, end_rcomm_time = 0.0;
double read_exch = 0.0, start_rexch = 0.0, end_rexch = 0.0;
mca_io_ompio_print_entry nentry;
#endif
#endif
// if (opal_datatype_is_contiguous_memory_layout(&datatype->super,1)) {
@ -225,7 +225,7 @@
displs[i] = displs[i-1] + fview_count[i-1];
}
#if DEBUG_ON
#if DEBUG_ON
if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
for (i=0 ; i<fh->f_procs_per_group ; i++) {
printf ("%d: PROCESS: %d ELEMENTS: %d DISPLS: %d\n",
@ -235,7 +235,7 @@
displs[i]);
}
}
#endif
#endif
/* allocate the global iovec */
if (0 != total_fview_count) {
@ -280,7 +280,7 @@
local_iov_array = NULL;
}
#if DEBUG_ON
#if DEBUG_ON
if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
for (i=0 ; i<total_fview_count ; i++) {
printf("%d: OFFSET: %p LENGTH: %d\n",
@ -289,7 +289,7 @@
global_iov_array[sorted[i]].iov_len);
}
}
#endif
#endif
if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
@ -332,9 +332,9 @@
current_index = 0;
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rexch = MPI_Wtime();
#endif
#endif
for (index = 0; index < cycles; index++) {
/* Getting ready for next cycle
Initializing and freeing buffers */
@ -398,14 +398,14 @@
bytes_to_read_in_cycle = bytes_per_cycle;
}
#if DEBUG_ON
#if DEBUG_ON
if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
printf ("****%d: CYCLE %d Bytes %d**********\n",
fh->f_rank,
index,
bytes_to_write_in_cycle);
}
#endif
#endif
/* Calculate how much data will be contributed in this cycle
by each process*/
@ -611,9 +611,9 @@
}
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_read_time = MPI_Wtime();
#endif
#endif
if (fh->f_num_of_io_entries) {
if ( 0 > fh->f_fbtl->fbtl_preadv (fh)) {
@ -623,10 +623,10 @@
}
}
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_read_time = MPI_Wtime();
read_time += end_read_time - start_read_time;
#endif
#endif
/**********************************************************
******************** DONE READING ************************
*********************************************************/
@ -663,9 +663,9 @@
ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit;
}
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rcomm_time = MPI_Wtime();
#endif
#endif
for (i=0;i<fh->f_procs_per_group;i++){
ompi_datatype_create_hindexed(disp_index[i],
blocklen_per_process[i],
@ -685,10 +685,10 @@
goto exit;
}
}
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_rcomm_time = MPI_Wtime();
rcomm_time += end_rcomm_time - start_rcomm_time;
#endif
#endif
}
/**********************************************************
@ -709,9 +709,9 @@
}
}
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rcomm_time = MPI_Wtime();
#endif
#endif
recv_req = (MPI_Request *) malloc (sizeof (MPI_Request));
if (NULL == recv_req){
opal_output (1, "OUT OF MEMORY\n");
@ -836,7 +836,7 @@
}
}
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_rexch = MPI_Wtime();
read_exch += end_rexch - start_rexch;
nentry.time[0] = read_time;
@ -851,9 +851,9 @@
fh->f_register_print_entry(READ_PRINT_QUEUE,
nentry);
}
#endif
#endif
exit:
exit:
if (!(fh->f_flags & OMPIO_CONTIGUOUS_MEMORY)) {
if (NULL != receive_buf) {
free (receive_buf);
@ -943,13 +943,13 @@
}
return ret;
}
}
static int read_heap_sort (local_io_array *io_array,
static int read_heap_sort (local_io_array *io_array,
int num_entries,
int *sorted)
{
{
int i = 0;
int j = 0;
int left = 0;
@ -1047,7 +1047,7 @@
temp_arr = NULL;
}
return OMPI_SUCCESS;
}
}

Просмотреть файл

@ -994,7 +994,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
#endif
exit :
exit :
if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
if (NULL != sorted_file_offsets){
free(sorted_file_offsets);