1
1

whitespace: purge whitespace at end of lines

Generated by running "./contrib/whitespace-purge.sh".
Этот коммит содержится в:
Jeff Squyres 2015-09-08 09:47:17 -07:00
родитель 12367d8444
Коммит bc9e5652ff
116 изменённых файлов: 638 добавлений и 638 удалений

Просмотреть файл

@ -334,7 +334,7 @@ static int init_ml_message_desc(opal_free_list_item_t *desc , void* ctx)
/* finish setting up the fragment descriptor */ /* finish setting up the fragment descriptor */
init_ml_fragment_desc((opal_free_list_item_t*)&(msg_desc->fragment),module); init_ml_fragment_desc((opal_free_list_item_t*)&(msg_desc->fragment),module);
return OPAL_SUCCESS; return OPAL_SUCCESS;
} }

Просмотреть файл

@ -84,7 +84,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
char *global_buf = NULL; char *global_buf = NULL;
MPI_Aint global_count = 0; MPI_Aint global_count = 0;
mca_io_ompio_local_io_array *file_offsets_for_agg=NULL; mca_io_ompio_local_io_array *file_offsets_for_agg=NULL;
/* array that contains the sorted indices of the global_iov */ /* array that contains the sorted indices of the global_iov */
int *sorted = NULL; int *sorted = NULL;
int *displs = NULL; int *displs = NULL;
@ -96,30 +96,30 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
int my_aggregator =-1; int my_aggregator =-1;
bool recvbuf_is_contiguous=false; bool recvbuf_is_contiguous=false;
size_t ftype_size; size_t ftype_size;
OPAL_PTRDIFF_TYPE ftype_extent, lb; OPAL_PTRDIFF_TYPE ftype_extent, lb;
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
double read_time = 0.0, start_read_time = 0.0, end_read_time = 0.0; double read_time = 0.0, start_read_time = 0.0, end_read_time = 0.0;
double rcomm_time = 0.0, start_rcomm_time = 0.0, end_rcomm_time = 0.0; double rcomm_time = 0.0, start_rcomm_time = 0.0, end_rcomm_time = 0.0;
double read_exch = 0.0, start_rexch = 0.0, end_rexch = 0.0; double read_exch = 0.0, start_rexch = 0.0, end_rexch = 0.0;
mca_io_ompio_print_entry nentry; mca_io_ompio_print_entry nentry;
#endif #endif
/************************************************************************** /**************************************************************************
** 1. In case the data is not contigous in memory, decode it into an iovec ** 1. In case the data is not contigous in memory, decode it into an iovec
**************************************************************************/ **************************************************************************/
opal_datatype_type_size ( &datatype->super, &ftype_size ); opal_datatype_type_size ( &datatype->super, &ftype_size );
opal_datatype_get_extent ( &datatype->super, &lb, &ftype_extent ); opal_datatype_get_extent ( &datatype->super, &lb, &ftype_extent );
if ( (ftype_extent == (OPAL_PTRDIFF_TYPE) ftype_size) && if ( (ftype_extent == (OPAL_PTRDIFF_TYPE) ftype_size) &&
opal_datatype_is_contiguous_memory_layout(&datatype->super,1) && opal_datatype_is_contiguous_memory_layout(&datatype->super,1) &&
0 == lb ) { 0 == lb ) {
recvbuf_is_contiguous = true; recvbuf_is_contiguous = true;
} }
if (! recvbuf_is_contiguous ) { if (! recvbuf_is_contiguous ) {
ret = fh->f_decode_datatype ((struct mca_io_ompio_file_t *)fh, ret = fh->f_decode_datatype ((struct mca_io_ompio_file_t *)fh,
datatype, datatype,
@ -135,11 +135,11 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
else { else {
max_data = count * datatype->super.size; max_data = count * datatype->super.size;
} }
if ( MPI_STATUS_IGNORE != status ) { if ( MPI_STATUS_IGNORE != status ) {
status->_ucount = max_data; status->_ucount = max_data;
} }
fh->f_get_num_aggregators ( &dynamic_num_io_procs); fh->f_get_num_aggregators ( &dynamic_num_io_procs);
ret = fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *) fh, ret = fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *) fh,
dynamic_num_io_procs, dynamic_num_io_procs,
@ -148,10 +148,10 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
goto exit; goto exit;
} }
my_aggregator = fh->f_procs_in_group[fh->f_aggregator_index]; my_aggregator = fh->f_procs_in_group[fh->f_aggregator_index];
/************************************************************************** /**************************************************************************
** 2. Determine the total amount of data to be written ** 2. Determine the total amount of data to be written
**************************************************************************/ **************************************************************************/
total_bytes_per_process = (MPI_Aint*)malloc(fh->f_procs_per_group*sizeof(MPI_Aint)); total_bytes_per_process = (MPI_Aint*)malloc(fh->f_procs_per_group*sizeof(MPI_Aint));
if (NULL == total_bytes_per_process) { if (NULL == total_bytes_per_process) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
@ -160,8 +160,8 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rcomm_time = MPI_Wtime(); start_rcomm_time = MPI_Wtime();
#endif #endif
ret = fh->f_allgather_array (&max_data, ret = fh->f_allgather_array (&max_data,
1, 1,
MPI_LONG, MPI_LONG,
total_bytes_per_process, total_bytes_per_process,
@ -177,17 +177,17 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_rcomm_time = MPI_Wtime(); end_rcomm_time = MPI_Wtime();
rcomm_time += end_rcomm_time - start_rcomm_time; rcomm_time += end_rcomm_time - start_rcomm_time;
#endif #endif
for (i=0 ; i<fh->f_procs_per_group ; i++) { for (i=0 ; i<fh->f_procs_per_group ; i++) {
total_bytes += total_bytes_per_process[i]; total_bytes += total_bytes_per_process[i];
} }
if (NULL != total_bytes_per_process) { if (NULL != total_bytes_per_process) {
free (total_bytes_per_process); free (total_bytes_per_process);
total_bytes_per_process = NULL; total_bytes_per_process = NULL;
} }
/********************************************************************* /*********************************************************************
*** 3. Generate the File offsets/lengths corresponding to this write *** 3. Generate the File offsets/lengths corresponding to this write
********************************************************************/ ********************************************************************/
@ -195,15 +195,15 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
max_data, max_data,
&local_iov_array, &local_iov_array,
&local_count); &local_count);
if (ret != OMPI_SUCCESS){ if (ret != OMPI_SUCCESS){
goto exit; goto exit;
} }
/************************************************************* /*************************************************************
*** 4. Allgather the File View information at all processes *** 4. Allgather the File View information at all processes
*************************************************************/ *************************************************************/
fview_count = (int *) malloc (fh->f_procs_per_group * sizeof (int)); fview_count = (int *) malloc (fh->f_procs_per_group * sizeof (int));
if (NULL == fview_count) { if (NULL == fview_count) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
@ -212,7 +212,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rcomm_time = MPI_Wtime(); start_rcomm_time = MPI_Wtime();
#endif #endif
ret = fh->f_allgather_array (&local_count, ret = fh->f_allgather_array (&local_count,
1, 1,
MPI_INT, MPI_INT,
@ -223,29 +223,29 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
fh->f_procs_in_group, fh->f_procs_in_group,
fh->f_procs_per_group, fh->f_procs_per_group,
fh->f_comm); fh->f_comm);
if (OMPI_SUCCESS != ret){ if (OMPI_SUCCESS != ret){
goto exit; goto exit;
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_rcomm_time = MPI_Wtime(); end_rcomm_time = MPI_Wtime();
rcomm_time += end_rcomm_time - start_rcomm_time; rcomm_time += end_rcomm_time - start_rcomm_time;
#endif #endif
displs = (int*)malloc (fh->f_procs_per_group*sizeof(int)); displs = (int*)malloc (fh->f_procs_per_group*sizeof(int));
if (NULL == displs) { if (NULL == displs) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
displs[0] = 0; displs[0] = 0;
total_fview_count = fview_count[0]; total_fview_count = fview_count[0];
for (i=1 ; i<fh->f_procs_per_group ; i++) { for (i=1 ; i<fh->f_procs_per_group ; i++) {
total_fview_count += fview_count[i]; total_fview_count += fview_count[i];
displs[i] = displs[i-1] + fview_count[i-1]; displs[i] = displs[i-1] + fview_count[i-1];
} }
#if DEBUG_ON #if DEBUG_ON
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
for (i=0 ; i<fh->f_procs_per_group ; i++) { for (i=0 ; i<fh->f_procs_per_group ; i++) {
@ -257,7 +257,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
} }
} }
#endif #endif
/* allocate the global iovec */ /* allocate the global iovec */
if (0 != total_fview_count) { if (0 != total_fview_count) {
global_iov_array = (struct iovec*)malloc (total_fview_count * global_iov_array = (struct iovec*)malloc (total_fview_count *
@ -270,7 +270,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rcomm_time = MPI_Wtime(); start_rcomm_time = MPI_Wtime();
#endif #endif
ret = fh->f_allgatherv_array (local_iov_array, ret = fh->f_allgatherv_array (local_iov_array,
local_count, local_count,
fh->f_iov_type, fh->f_iov_type,
@ -282,21 +282,21 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
fh->f_procs_in_group, fh->f_procs_in_group,
fh->f_procs_per_group, fh->f_procs_per_group,
fh->f_comm); fh->f_comm);
if (OMPI_SUCCESS != ret){ if (OMPI_SUCCESS != ret){
goto exit; goto exit;
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_rcomm_time = MPI_Wtime(); end_rcomm_time = MPI_Wtime();
rcomm_time += end_rcomm_time - start_rcomm_time; rcomm_time += end_rcomm_time - start_rcomm_time;
#endif #endif
/**************************************************************************************** /****************************************************************************************
*** 5. Sort the global offset/lengths list based on the offsets. *** 5. Sort the global offset/lengths list based on the offsets.
*** The result of the sort operation is the 'sorted', an integer array, *** The result of the sort operation is the 'sorted', an integer array,
*** which contains the indexes of the global_iov_array based on the offset. *** which contains the indexes of the global_iov_array based on the offset.
*** For example, if global_iov_array[x].offset is followed by global_iov_array[y].offset *** For example, if global_iov_array[x].offset is followed by global_iov_array[y].offset
*** in the file, and that one is followed by global_iov_array[z].offset, than *** in the file, and that one is followed by global_iov_array[z].offset, than
*** sorted[0] = x, sorted[1]=y and sorted[2]=z; *** sorted[0] = x, sorted[1]=y and sorted[2]=z;
******************************************************************************************/ ******************************************************************************************/
if (0 != total_fview_count) { if (0 != total_fview_count) {
@ -308,12 +308,12 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
} }
fh->f_sort_iovec (global_iov_array, total_fview_count, sorted); fh->f_sort_iovec (global_iov_array, total_fview_count, sorted);
} }
if (NULL != local_iov_array) { if (NULL != local_iov_array) {
free (local_iov_array); free (local_iov_array);
local_iov_array = NULL; local_iov_array = NULL;
} }
#if DEBUG_ON #if DEBUG_ON
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
for (i=0 ; i<total_fview_count ; i++) { for (i=0 ; i<total_fview_count ; i++) {
@ -324,7 +324,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
} }
} }
#endif #endif
/************************************************************* /*************************************************************
*** 6. Determine the number of cycles required to execute this *** 6. Determine the number of cycles required to execute this
*** operation *** operation
@ -339,21 +339,21 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
blocklen_per_process = (int **)malloc (fh->f_procs_per_group * sizeof (int*)); blocklen_per_process = (int **)malloc (fh->f_procs_per_group * sizeof (int*));
if (NULL == blocklen_per_process) { if (NULL == blocklen_per_process) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
displs_per_process = (MPI_Aint **)malloc (fh->f_procs_per_group * sizeof (MPI_Aint*)); displs_per_process = (MPI_Aint **)malloc (fh->f_procs_per_group * sizeof (MPI_Aint*));
if (NULL == displs_per_process){ if (NULL == displs_per_process){
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
for (i=0;i<fh->f_procs_per_group;i++){ for (i=0;i<fh->f_procs_per_group;i++){
blocklen_per_process[i] = NULL; blocklen_per_process[i] = NULL;
displs_per_process[i] = NULL; displs_per_process[i] = NULL;
@ -378,16 +378,16 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
for(l=0;l<fh->f_procs_per_group;l++){ for(l=0;l<fh->f_procs_per_group;l++){
sendtype[l] = MPI_DATATYPE_NULL; sendtype[l] = MPI_DATATYPE_NULL;
} }
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rexch = MPI_Wtime(); start_rexch = MPI_Wtime();
#endif #endif
@ -405,7 +405,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
fh->f_io_array = NULL; fh->f_io_array = NULL;
} }
fh->f_num_of_io_entries = 0; fh->f_num_of_io_entries = 0;
if (NULL != sendtype){ if (NULL != sendtype){
for (i =0; i< fh->f_procs_per_group; i++) { for (i =0; i< fh->f_procs_per_group; i++) {
if ( MPI_DATATYPE_NULL != sendtype[i] ) { if ( MPI_DATATYPE_NULL != sendtype[i] ) {
@ -414,10 +414,10 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
} }
} }
} }
for(l=0;l<fh->f_procs_per_group;l++){ for(l=0;l<fh->f_procs_per_group;l++){
disp_index[l] = 1; disp_index[l] = 1;
if (NULL != blocklen_per_process[l]){ if (NULL != blocklen_per_process[l]){
free(blocklen_per_process[l]); free(blocklen_per_process[l]);
blocklen_per_process[l] = NULL; blocklen_per_process[l] = NULL;
@ -439,12 +439,12 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
goto exit; goto exit;
} }
} }
if (NULL != sorted_file_offsets){ if (NULL != sorted_file_offsets){
free(sorted_file_offsets); free(sorted_file_offsets);
sorted_file_offsets = NULL; sorted_file_offsets = NULL;
} }
if(NULL != file_offsets_for_agg){ if(NULL != file_offsets_for_agg){
free(file_offsets_for_agg); free(file_offsets_for_agg);
file_offsets_for_agg = NULL; file_offsets_for_agg = NULL;
@ -454,17 +454,17 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
memory_displacements = NULL; memory_displacements = NULL;
} }
} /* (my_aggregator == fh->f_rank */ } /* (my_aggregator == fh->f_rank */
/************************************************************************** /**************************************************************************
*** 7b. Determine the number of bytes to be actually read in this cycle *** 7b. Determine the number of bytes to be actually read in this cycle
**************************************************************************/ **************************************************************************/
if (cycles-1 == index) { if (cycles-1 == index) {
bytes_to_read_in_cycle = total_bytes - bytes_per_cycle*index; bytes_to_read_in_cycle = total_bytes - bytes_per_cycle*index;
} }
else { else {
bytes_to_read_in_cycle = bytes_per_cycle; bytes_to_read_in_cycle = bytes_per_cycle;
} }
#if DEBUG_ON #if DEBUG_ON
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
printf ("****%d: CYCLE %d Bytes %d**********\n", printf ("****%d: CYCLE %d Bytes %d**********\n",
@ -473,13 +473,13 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
bytes_to_write_in_cycle); bytes_to_write_in_cycle);
} }
#endif #endif
/***************************************************************** /*****************************************************************
*** 7c. Calculate how much data will be contributed in this cycle *** 7c. Calculate how much data will be contributed in this cycle
*** by each process *** by each process
*****************************************************************/ *****************************************************************/
bytes_received = 0; bytes_received = 0;
while (bytes_to_read_in_cycle) { while (bytes_to_read_in_cycle) {
/* This next block identifies which process is the holder /* This next block identifies which process is the holder
** of the sorted[current_index] element; ** of the sorted[current_index] element;
@ -549,7 +549,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
displs_per_process[n][disp_index[n] - 1] = displs_per_process[n][disp_index[n] - 1] =
(OPAL_PTRDIFF_TYPE)global_iov_array[sorted[current_index]].iov_base ; (OPAL_PTRDIFF_TYPE)global_iov_array[sorted[current_index]].iov_base ;
} }
if (fh->f_procs_in_group[n] == fh->f_rank) { if (fh->f_procs_in_group[n] == fh->f_rank) {
bytes_received += bytes_to_read_in_cycle; bytes_received += bytes_to_read_in_cycle;
} }
@ -587,7 +587,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
/************************************************************************* /*************************************************************************
*** 7d. Calculate the displacement on where to put the data and allocate *** 7d. Calculate the displacement on where to put the data and allocate
*** the recieve buffer (global_buf) *** the recieve buffer (global_buf)
*************************************************************************/ *************************************************************************/
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
entries_per_aggregator=0; entries_per_aggregator=0;
@ -637,7 +637,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
read_heap_sort (file_offsets_for_agg, read_heap_sort (file_offsets_for_agg,
entries_per_aggregator, entries_per_aggregator,
sorted_file_offsets); sorted_file_offsets);
memory_displacements = (MPI_Aint *) malloc memory_displacements = (MPI_Aint *) malloc
(entries_per_aggregator * sizeof(MPI_Aint)); (entries_per_aggregator * sizeof(MPI_Aint));
memory_displacements[sorted_file_offsets[0]] = 0; memory_displacements[sorted_file_offsets[0]] = 0;
@ -646,7 +646,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
memory_displacements[sorted_file_offsets[i-1]] + memory_displacements[sorted_file_offsets[i-1]] +
file_offsets_for_agg[sorted_file_offsets[i-1]].length; file_offsets_for_agg[sorted_file_offsets[i-1]].length;
} }
/********************************************************** /**********************************************************
*** 7e. Create the io array, and pass it to fbtl *** 7e. Create the io array, and pass it to fbtl
*********************************************************/ *********************************************************/
@ -657,7 +657,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
fh->f_num_of_io_entries = 0; fh->f_num_of_io_entries = 0;
fh->f_io_array[0].offset = fh->f_io_array[0].offset =
(IOVBASE_TYPE *)(intptr_t)file_offsets_for_agg[sorted_file_offsets[0]].offset; (IOVBASE_TYPE *)(intptr_t)file_offsets_for_agg[sorted_file_offsets[0]].offset;
@ -683,12 +683,12 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
fh->f_num_of_io_entries++; fh->f_num_of_io_entries++;
} }
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_read_time = MPI_Wtime(); start_read_time = MPI_Wtime();
#endif #endif
if (fh->f_num_of_io_entries) { if (fh->f_num_of_io_entries) {
if ( 0 > fh->f_fbtl->fbtl_preadv (fh)) { if ( 0 > fh->f_fbtl->fbtl_preadv (fh)) {
opal_output (1, "READ FAILED\n"); opal_output (1, "READ FAILED\n");
@ -696,7 +696,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
goto exit; goto exit;
} }
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_read_time = MPI_Wtime(); end_read_time = MPI_Wtime();
read_time += end_read_time - start_read_time; read_time += end_read_time - start_read_time;
@ -704,7 +704,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
/********************************************************** /**********************************************************
******************** DONE READING ************************ ******************** DONE READING ************************
*********************************************************/ *********************************************************/
temp_disp_index = (int *)calloc (1, fh->f_procs_per_group * sizeof (int)); temp_disp_index = (int *)calloc (1, fh->f_procs_per_group * sizeof (int));
if (NULL == temp_disp_index) { if (NULL == temp_disp_index) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
@ -729,7 +729,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
free(temp_disp_index); free(temp_disp_index);
temp_disp_index = NULL; temp_disp_index = NULL;
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rcomm_time = MPI_Wtime(); start_rcomm_time = MPI_Wtime();
#endif #endif
@ -760,7 +760,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
rcomm_time += end_rcomm_time - start_rcomm_time; rcomm_time += end_rcomm_time - start_rcomm_time;
#endif #endif
} }
/********************************************************** /**********************************************************
*** 7f. Scatter the Data from the readers *** 7f. Scatter the Data from the readers
*********************************************************/ *********************************************************/
@ -778,7 +778,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
goto exit; goto exit;
} }
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rcomm_time = MPI_Wtime(); start_rcomm_time = MPI_Wtime();
#endif #endif
@ -792,8 +792,8 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
if (OMPI_SUCCESS != ret){ if (OMPI_SUCCESS != ret){
goto exit; goto exit;
} }
if (my_aggregator == fh->f_rank){ if (my_aggregator == fh->f_rank){
ret = ompi_request_wait_all (fh->f_procs_per_group, ret = ompi_request_wait_all (fh->f_procs_per_group,
send_req, send_req,
@ -802,26 +802,26 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
goto exit; goto exit;
} }
} }
ret = ompi_request_wait (&recv_req, MPI_STATUS_IGNORE); ret = ompi_request_wait (&recv_req, MPI_STATUS_IGNORE);
if (OMPI_SUCCESS != ret){ if (OMPI_SUCCESS != ret){
goto exit; goto exit;
} }
position += bytes_received; position += bytes_received;
/* If data is not contigous in memory, copy the data from the /* If data is not contigous in memory, copy the data from the
receive buffer into the buffer passed in */ receive buffer into the buffer passed in */
if (!recvbuf_is_contiguous ) { if (!recvbuf_is_contiguous ) {
OPAL_PTRDIFF_TYPE mem_address; OPAL_PTRDIFF_TYPE mem_address;
size_t remaining = 0; size_t remaining = 0;
size_t temp_position = 0; size_t temp_position = 0;
remaining = bytes_received; remaining = bytes_received;
while (remaining) { while (remaining) {
mem_address = (OPAL_PTRDIFF_TYPE) mem_address = (OPAL_PTRDIFF_TYPE)
(decoded_iov[iov_index].iov_base) + current_position; (decoded_iov[iov_index].iov_base) + current_position;
if (remaining >= if (remaining >=
(decoded_iov[iov_index].iov_len - current_position)) { (decoded_iov[iov_index].iov_len - current_position)) {
memcpy ((IOVBASE_TYPE *) mem_address, memcpy ((IOVBASE_TYPE *) mem_address,
@ -842,7 +842,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
remaining = 0; remaining = 0;
} }
} }
if (NULL != receive_buf) { if (NULL != receive_buf) {
free (receive_buf); free (receive_buf);
receive_buf = NULL; receive_buf = NULL;
@ -853,7 +853,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
rcomm_time += end_rcomm_time - start_rcomm_time; rcomm_time += end_rcomm_time - start_rcomm_time;
#endif #endif
} /* end for (index=0; index < cycles; index ++) */ } /* end for (index=0; index < cycles; index ++) */
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_rexch = MPI_Wtime(); end_rexch = MPI_Wtime();
read_exch += end_rexch - start_rexch; read_exch += end_rexch - start_rexch;
@ -870,7 +870,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
nentry); nentry);
} }
#endif #endif
exit: exit:
if (!recvbuf_is_contiguous) { if (!recvbuf_is_contiguous) {
if (NULL != receive_buf) { if (NULL != receive_buf) {
@ -881,7 +881,7 @@ exit:
if (NULL != global_buf) { if (NULL != global_buf) {
free (global_buf); free (global_buf);
global_buf = NULL; global_buf = NULL;
} }
if (NULL != sorted) { if (NULL != sorted) {
free (sorted); free (sorted);
sorted = NULL; sorted = NULL;
@ -902,13 +902,13 @@ exit:
free(local_iov_array); free(local_iov_array);
local_iov_array=NULL; local_iov_array=NULL;
} }
if (NULL != displs) { if (NULL != displs) {
free (displs); free (displs);
displs = NULL; displs = NULL;
} }
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
if (NULL != sorted_file_offsets){ if (NULL != sorted_file_offsets){
free(sorted_file_offsets); free(sorted_file_offsets);
sorted_file_offsets = NULL; sorted_file_offsets = NULL;
@ -930,12 +930,12 @@ exit:
free(sendtype); free(sendtype);
sendtype=NULL; sendtype=NULL;
} }
if (NULL != disp_index){ if (NULL != disp_index){
free(disp_index); free(disp_index);
disp_index = NULL; disp_index = NULL;
} }
if ( NULL != blocklen_per_process){ if ( NULL != blocklen_per_process){
for(l=0;l<fh->f_procs_per_group;l++){ for(l=0;l<fh->f_procs_per_group;l++){
if (NULL != blocklen_per_process[l]){ if (NULL != blocklen_per_process[l]){
@ -943,11 +943,11 @@ exit:
blocklen_per_process[l] = NULL; blocklen_per_process[l] = NULL;
} }
} }
free(blocklen_per_process); free(blocklen_per_process);
blocklen_per_process = NULL; blocklen_per_process = NULL;
} }
if (NULL != displs_per_process){ if (NULL != displs_per_process){
for (l=0; i<fh->f_procs_per_group; l++){ for (l=0; i<fh->f_procs_per_group; l++){
if (NULL != displs_per_process[l]){ if (NULL != displs_per_process[l]){
@ -980,7 +980,7 @@ static int read_heap_sort (mca_io_ompio_local_io_array *io_array,
int temp = 0; int temp = 0;
unsigned char done = 0; unsigned char done = 0;
int* temp_arr = NULL; int* temp_arr = NULL;
temp_arr = (int*)malloc(num_entries*sizeof(int)); temp_arr = (int*)malloc(num_entries*sizeof(int));
if (NULL == temp_arr) { if (NULL == temp_arr) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
@ -995,7 +995,7 @@ static int read_heap_sort (mca_io_ompio_local_io_array *io_array,
done = 0; done = 0;
j = i; j = i;
largest = j; largest = j;
while (!done) { while (!done) {
left = j*2+1; left = j*2+1;
right = j*2+2; right = j*2+2;
@ -1022,7 +1022,7 @@ static int read_heap_sort (mca_io_ompio_local_io_array *io_array,
} }
} }
} }
for (i = num_entries-1; i >=1; --i) { for (i = num_entries-1; i >=1; --i) {
temp = temp_arr[0]; temp = temp_arr[0];
temp_arr[0] = temp_arr[i]; temp_arr[0] = temp_arr[i];
@ -1031,11 +1031,11 @@ static int read_heap_sort (mca_io_ompio_local_io_array *io_array,
done = 0; done = 0;
j = 0; j = 0;
largest = j; largest = j;
while (!done) { while (!done) {
left = j*2+1; left = j*2+1;
right = j*2+2; right = j*2+2;
if ((left <= heap_size) && if ((left <= heap_size) &&
(io_array[temp_arr[left]].offset > (io_array[temp_arr[left]].offset >
io_array[temp_arr[j]].offset)) { io_array[temp_arr[j]].offset)) {
@ -1062,7 +1062,7 @@ static int read_heap_sort (mca_io_ompio_local_io_array *io_array,
sorted[i] = temp_arr[i]; sorted[i] = temp_arr[i];
} }
sorted[0] = temp_arr[0]; sorted[0] = temp_arr[0];
if (NULL != temp_arr) { if (NULL != temp_arr) {
free(temp_arr); free(temp_arr);
temp_arr = NULL; temp_arr = NULL;

Просмотреть файл

@ -65,7 +65,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
value from total_bytes_per_process */ value from total_bytes_per_process */
int bytes_sent = 0, ret =0; int bytes_sent = 0, ret =0;
int blocks=0, entries_per_aggregator=0; int blocks=0, entries_per_aggregator=0;
/* iovec structure and count of the buffer passed in */ /* iovec structure and count of the buffer passed in */
uint32_t iov_count = 0; uint32_t iov_count = 0;
struct iovec *decoded_iov = NULL; struct iovec *decoded_iov = NULL;
@ -80,11 +80,11 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
int local_count = 0, temp_pindex; int local_count = 0, temp_pindex;
int *fview_count = NULL, *disp_index=NULL, *temp_disp_index=NULL; int *fview_count = NULL, *disp_index=NULL, *temp_disp_index=NULL;
int current_index = 0, temp_index=0; int current_index = 0, temp_index=0;
char *global_buf = NULL; char *global_buf = NULL;
MPI_Aint global_count = 0; MPI_Aint global_count = 0;
/* array that contains the sorted indices of the global_iov */ /* array that contains the sorted indices of the global_iov */
int *sorted = NULL, *sorted_file_offsets=NULL; int *sorted = NULL, *sorted_file_offsets=NULL;
int *displs = NULL; int *displs = NULL;
@ -98,28 +98,28 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
int my_aggregator=-1; int my_aggregator=-1;
bool sendbuf_is_contiguous = false; bool sendbuf_is_contiguous = false;
size_t ftype_size; size_t ftype_size;
OPAL_PTRDIFF_TYPE ftype_extent, lb; OPAL_PTRDIFF_TYPE ftype_extent, lb;
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
double write_time = 0.0, start_write_time = 0.0, end_write_time = 0.0; double write_time = 0.0, start_write_time = 0.0, end_write_time = 0.0;
double comm_time = 0.0, start_comm_time = 0.0, end_comm_time = 0.0; double comm_time = 0.0, start_comm_time = 0.0, end_comm_time = 0.0;
double exch_write = 0.0, start_exch = 0.0, end_exch = 0.0; double exch_write = 0.0, start_exch = 0.0, end_exch = 0.0;
mca_io_ompio_print_entry nentry; mca_io_ompio_print_entry nentry;
#endif #endif
opal_datatype_type_size ( &datatype->super, &ftype_size ); opal_datatype_type_size ( &datatype->super, &ftype_size );
opal_datatype_get_extent ( &datatype->super, &lb, &ftype_extent ); opal_datatype_get_extent ( &datatype->super, &lb, &ftype_extent );
/************************************************************************** /**************************************************************************
** 1. In case the data is not contigous in memory, decode it into an iovec ** 1. In case the data is not contigous in memory, decode it into an iovec
**************************************************************************/ **************************************************************************/
if ( ( ftype_extent == (OPAL_PTRDIFF_TYPE) ftype_size) && if ( ( ftype_extent == (OPAL_PTRDIFF_TYPE) ftype_size) &&
opal_datatype_is_contiguous_memory_layout(&datatype->super,1) && opal_datatype_is_contiguous_memory_layout(&datatype->super,1) &&
0 == lb ) { 0 == lb ) {
sendbuf_is_contiguous = true; sendbuf_is_contiguous = true;
} }
if (! sendbuf_is_contiguous ) { if (! sendbuf_is_contiguous ) {
@ -137,23 +137,23 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
else { else {
max_data = count * datatype->super.size; max_data = count * datatype->super.size;
} }
if ( MPI_STATUS_IGNORE != status ) { if ( MPI_STATUS_IGNORE != status ) {
status->_ucount = max_data; status->_ucount = max_data;
} }
fh->f_get_num_aggregators ( &dynamic_num_io_procs ); fh->f_get_num_aggregators ( &dynamic_num_io_procs );
ret = fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *) fh, ret = fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *) fh,
dynamic_num_io_procs, dynamic_num_io_procs,
max_data); max_data);
if (OMPI_SUCCESS != ret){ if (OMPI_SUCCESS != ret){
goto exit; goto exit;
} }
my_aggregator = fh->f_procs_in_group[fh->f_aggregator_index]; my_aggregator = fh->f_procs_in_group[fh->f_aggregator_index];
/************************************************************************** /**************************************************************************
** 2. Determine the total amount of data to be written ** 2. Determine the total amount of data to be written
**************************************************************************/ **************************************************************************/
total_bytes_per_process = (MPI_Aint*)malloc total_bytes_per_process = (MPI_Aint*)malloc
(fh->f_procs_per_group*sizeof(MPI_Aint)); (fh->f_procs_per_group*sizeof(MPI_Aint));
if (NULL == total_bytes_per_process) { if (NULL == total_bytes_per_process) {
@ -164,7 +164,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_comm_time = MPI_Wtime(); start_comm_time = MPI_Wtime();
#endif #endif
ret = fh->f_allgather_array (&max_data, ret = fh->f_allgather_array (&max_data,
1, 1,
MPI_LONG, MPI_LONG,
@ -175,7 +175,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
fh->f_procs_in_group, fh->f_procs_in_group,
fh->f_procs_per_group, fh->f_procs_per_group,
fh->f_comm); fh->f_comm);
if( OMPI_SUCCESS != ret){ if( OMPI_SUCCESS != ret){
goto exit; goto exit;
} }
@ -187,14 +187,14 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
for (i=0 ; i<fh->f_procs_per_group ; i++) { for (i=0 ; i<fh->f_procs_per_group ; i++) {
total_bytes += total_bytes_per_process[i]; total_bytes += total_bytes_per_process[i];
} }
if (NULL != total_bytes_per_process) { if (NULL != total_bytes_per_process) {
free (total_bytes_per_process); free (total_bytes_per_process);
total_bytes_per_process = NULL; total_bytes_per_process = NULL;
} }
/********************************************************************* /*********************************************************************
*** 3. Generate the local offsets/lengths array corresponding to *** 3. Generate the local offsets/lengths array corresponding to
*** this write operation *** this write operation
********************************************************************/ ********************************************************************/
ret = fh->f_generate_current_file_view( (struct mca_io_ompio_file_t *) fh, ret = fh->f_generate_current_file_view( (struct mca_io_ompio_file_t *) fh,
@ -204,21 +204,21 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
if (ret != OMPI_SUCCESS){ if (ret != OMPI_SUCCESS){
goto exit; goto exit;
} }
#if DEBUG_ON #if DEBUG_ON
for (i=0 ; i<local_count ; i++) { for (i=0 ; i<local_count ; i++) {
printf("%d: OFFSET: %d LENGTH: %ld\n", printf("%d: OFFSET: %d LENGTH: %ld\n",
fh->f_rank, fh->f_rank,
local_iov_array[i].iov_base, local_iov_array[i].iov_base,
local_iov_array[i].iov_len); local_iov_array[i].iov_len);
} }
#endif #endif
/************************************************************* /*************************************************************
*** 4. Allgather the offset/lengths array from all processes *** 4. Allgather the offset/lengths array from all processes
*************************************************************/ *************************************************************/
fview_count = (int *) malloc (fh->f_procs_per_group * sizeof (int)); fview_count = (int *) malloc (fh->f_procs_per_group * sizeof (int));
if (NULL == fview_count) { if (NULL == fview_count) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
@ -227,7 +227,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_comm_time = MPI_Wtime(); start_comm_time = MPI_Wtime();
#endif #endif
ret = fh->f_allgather_array (&local_count, ret = fh->f_allgather_array (&local_count,
1, 1,
MPI_INT, MPI_INT,
@ -238,14 +238,14 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
fh->f_procs_in_group, fh->f_procs_in_group,
fh->f_procs_per_group, fh->f_procs_per_group,
fh->f_comm); fh->f_comm);
if( OMPI_SUCCESS != ret){ if( OMPI_SUCCESS != ret){
goto exit; goto exit;
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_comm_time = MPI_Wtime(); end_comm_time = MPI_Wtime();
comm_time += (end_comm_time - start_comm_time); comm_time += (end_comm_time - start_comm_time);
#endif #endif
displs = (int*) malloc (fh->f_procs_per_group * sizeof (int)); displs = (int*) malloc (fh->f_procs_per_group * sizeof (int));
if (NULL == displs) { if (NULL == displs) {
@ -253,14 +253,14 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
displs[0] = 0; displs[0] = 0;
total_fview_count = fview_count[0]; total_fview_count = fview_count[0];
for (i=1 ; i<fh->f_procs_per_group ; i++) { for (i=1 ; i<fh->f_procs_per_group ; i++) {
total_fview_count += fview_count[i]; total_fview_count += fview_count[i];
displs[i] = displs[i-1] + fview_count[i-1]; displs[i] = displs[i-1] + fview_count[i-1];
} }
#if DEBUG_ON #if DEBUG_ON
printf("total_fview_count : %d\n", total_fview_count); printf("total_fview_count : %d\n", total_fview_count);
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
@ -273,9 +273,9 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
} }
} }
#endif #endif
/* allocate the global iovec */ /* allocate the global iovec */
if (0 != total_fview_count) { if (0 != total_fview_count) {
global_iov_array = (struct iovec*) malloc (total_fview_count * global_iov_array = (struct iovec*) malloc (total_fview_count *
sizeof(struct iovec)); sizeof(struct iovec));
@ -284,12 +284,12 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_comm_time = MPI_Wtime(); start_comm_time = MPI_Wtime();
#endif #endif
ret = fh->f_allgatherv_array (local_iov_array, ret = fh->f_allgatherv_array (local_iov_array,
local_count, local_count,
fh->f_iov_type, fh->f_iov_type,
@ -308,13 +308,13 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
end_comm_time = MPI_Wtime(); end_comm_time = MPI_Wtime();
comm_time += (end_comm_time - start_comm_time); comm_time += (end_comm_time - start_comm_time);
#endif #endif
/**************************************************************************************** /****************************************************************************************
*** 5. Sort the global offset/lengths list based on the offsets. *** 5. Sort the global offset/lengths list based on the offsets.
*** The result of the sort operation is the 'sorted', an integer array, *** The result of the sort operation is the 'sorted', an integer array,
*** which contains the indexes of the global_iov_array based on the offset. *** which contains the indexes of the global_iov_array based on the offset.
*** For example, if global_iov_array[x].offset is followed by global_iov_array[y].offset *** For example, if global_iov_array[x].offset is followed by global_iov_array[y].offset
*** in the file, and that one is followed by global_iov_array[z].offset, than *** in the file, and that one is followed by global_iov_array[z].offset, than
*** sorted[0] = x, sorted[1]=y and sorted[2]=z; *** sorted[0] = x, sorted[1]=y and sorted[2]=z;
******************************************************************************************/ ******************************************************************************************/
if (0 != total_fview_count) { if (0 != total_fview_count) {
@ -326,18 +326,18 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
} }
fh->f_sort_iovec (global_iov_array, total_fview_count, sorted); fh->f_sort_iovec (global_iov_array, total_fview_count, sorted);
} }
if (NULL != local_iov_array){ if (NULL != local_iov_array){
free(local_iov_array); free(local_iov_array);
local_iov_array = NULL; local_iov_array = NULL;
} }
if (NULL != displs){ if (NULL != displs){
free(displs); free(displs);
displs=NULL; displs=NULL;
} }
#if DEBUG_ON #if DEBUG_ON
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
uint32_t tv=0; uint32_t tv=0;
@ -355,7 +355,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
*************************************************************/ *************************************************************/
fh->f_get_bytes_per_agg ( (int *)&bytes_per_cycle ); fh->f_get_bytes_per_agg ( (int *)&bytes_per_cycle );
cycles = ceil((double)total_bytes/bytes_per_cycle); cycles = ceil((double)total_bytes/bytes_per_cycle);
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
disp_index = (int *)malloc (fh->f_procs_per_group * sizeof (int)); disp_index = (int *)malloc (fh->f_procs_per_group * sizeof (int));
if (NULL == disp_index) { if (NULL == disp_index) {
@ -363,21 +363,21 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
blocklen_per_process = (int **)malloc (fh->f_procs_per_group * sizeof (int*)); blocklen_per_process = (int **)malloc (fh->f_procs_per_group * sizeof (int*));
if (NULL == blocklen_per_process) { if (NULL == blocklen_per_process) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
displs_per_process = (MPI_Aint **)malloc (fh->f_procs_per_group * sizeof (MPI_Aint*)); displs_per_process = (MPI_Aint **)malloc (fh->f_procs_per_group * sizeof (MPI_Aint*));
if (NULL == displs_per_process) { if (NULL == displs_per_process) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
for(i=0;i<fh->f_procs_per_group;i++){ for(i=0;i<fh->f_procs_per_group;i++){
blocklen_per_process[i] = NULL; blocklen_per_process[i] = NULL;
displs_per_process[i] = NULL; displs_per_process[i] = NULL;
@ -394,7 +394,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
opal_output(1, "OUT OF MEMORY"); opal_output(1, "OUT OF MEMORY");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
recvtype = (ompi_datatype_t **) malloc (fh->f_procs_per_group * sizeof(ompi_datatype_t *)); recvtype = (ompi_datatype_t **) malloc (fh->f_procs_per_group * sizeof(ompi_datatype_t *));
if (NULL == recvtype) { if (NULL == recvtype) {
@ -403,21 +403,21 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
goto exit; goto exit;
} }
for(l=0;l<fh->f_procs_per_group;l++){ for(l=0;l<fh->f_procs_per_group;l++){
recvtype[l] = MPI_DATATYPE_NULL; recvtype[l] = MPI_DATATYPE_NULL;
} }
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_exch = MPI_Wtime(); start_exch = MPI_Wtime();
#endif #endif
n = 0; n = 0;
bytes_remaining = 0; bytes_remaining = 0;
current_index = 0; current_index = 0;
for (index = 0; index < cycles; index++) { for (index = 0; index < cycles; index++) {
/********************************************************************** /**********************************************************************
*** 7a. Getting ready for next cycle: initializing and freeing buffers *** 7a. Getting ready for next cycle: initializing and freeing buffers
**********************************************************************/ **********************************************************************/
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
if (NULL != fh->f_io_array) { if (NULL != fh->f_io_array) {
free (fh->f_io_array); free (fh->f_io_array);
@ -431,12 +431,12 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
ompi_datatype_destroy(&recvtype[i]); ompi_datatype_destroy(&recvtype[i]);
recvtype[i] = MPI_DATATYPE_NULL; recvtype[i] = MPI_DATATYPE_NULL;
} }
} }
} }
for(l=0;l<fh->f_procs_per_group;l++){ for(l=0;l<fh->f_procs_per_group;l++){
disp_index[l] = 1; disp_index[l] = 1;
if (NULL != blocklen_per_process[l]){ if (NULL != blocklen_per_process[l]){
free(blocklen_per_process[l]); free(blocklen_per_process[l]);
blocklen_per_process[l] = NULL; blocklen_per_process[l] = NULL;
@ -458,24 +458,24 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
goto exit; goto exit;
} }
} }
if (NULL != sorted_file_offsets){ if (NULL != sorted_file_offsets){
free(sorted_file_offsets); free(sorted_file_offsets);
sorted_file_offsets = NULL; sorted_file_offsets = NULL;
} }
if(NULL != file_offsets_for_agg){ if(NULL != file_offsets_for_agg){
free(file_offsets_for_agg); free(file_offsets_for_agg);
file_offsets_for_agg = NULL; file_offsets_for_agg = NULL;
} }
if (NULL != memory_displacements){ if (NULL != memory_displacements){
free(memory_displacements); free(memory_displacements);
memory_displacements = NULL; memory_displacements = NULL;
} }
} /* (my_aggregator == fh->f_rank */ } /* (my_aggregator == fh->f_rank */
/************************************************************************** /**************************************************************************
*** 7b. Determine the number of bytes to be actually written in this cycle *** 7b. Determine the number of bytes to be actually written in this cycle
**************************************************************************/ **************************************************************************/
@ -485,7 +485,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
else { else {
bytes_to_write_in_cycle = bytes_per_cycle; bytes_to_write_in_cycle = bytes_per_cycle;
} }
#if DEBUG_ON #if DEBUG_ON
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
printf ("****%d: CYCLE %d Bytes %lld**********\n", printf ("****%d: CYCLE %d Bytes %lld**********\n",
@ -497,7 +497,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
/********************************************************** /**********************************************************
**Gather the Data from all the processes at the writers ** **Gather the Data from all the processes at the writers **
*********************************************************/ *********************************************************/
#if DEBUG_ON #if DEBUG_ON
printf("bytes_to_write_in_cycle: %ld, cycle : %d\n", bytes_to_write_in_cycle, printf("bytes_to_write_in_cycle: %ld, cycle : %d\n", bytes_to_write_in_cycle,
index); index);
@ -511,10 +511,10 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
/* The blocklen and displs calculation only done at aggregators!*/ /* The blocklen and displs calculation only done at aggregators!*/
while (bytes_to_write_in_cycle) { while (bytes_to_write_in_cycle) {
/* This next block identifies which process is the holder /* This next block identifies which process is the holder
** of the sorted[current_index] element; ** of the sorted[current_index] element;
*/ */
blocks = fview_count[0]; blocks = fview_count[0];
for (j=0 ; j<fh->f_procs_per_group ; j++) { for (j=0 ; j<fh->f_procs_per_group ; j++) {
if (sorted[current_index] < blocks) { if (sorted[current_index] < blocks) {
@ -525,9 +525,9 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
blocks += fview_count[j+1]; blocks += fview_count[j+1];
} }
} }
if (bytes_remaining) { if (bytes_remaining) {
/* Finish up a partially used buffer from the previous cycle */ /* Finish up a partially used buffer from the previous cycle */
if (bytes_remaining <= bytes_to_write_in_cycle) { if (bytes_remaining <= bytes_to_write_in_cycle) {
/* The data fits completely into the block */ /* The data fits completely into the block */
@ -537,9 +537,9 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
(OPAL_PTRDIFF_TYPE)global_iov_array[sorted[current_index]].iov_base + (OPAL_PTRDIFF_TYPE)global_iov_array[sorted[current_index]].iov_base +
(global_iov_array[sorted[current_index]].iov_len (global_iov_array[sorted[current_index]].iov_len
- bytes_remaining); - bytes_remaining);
/* In this cases the length is consumed so allocating for /* In this cases the length is consumed so allocating for
next displacement and blocklength*/ next displacement and blocklength*/
blocklen_per_process[n] = (int *) realloc blocklen_per_process[n] = (int *) realloc
((void *)blocklen_per_process[n], (disp_index[n]+1)*sizeof(int)); ((void *)blocklen_per_process[n], (disp_index[n]+1)*sizeof(int));
displs_per_process[n] = (MPI_Aint *) realloc displs_per_process[n] = (MPI_Aint *) realloc
@ -566,7 +566,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
(global_iov_array[sorted[current_index]].iov_len (global_iov_array[sorted[current_index]].iov_len
- bytes_remaining); - bytes_remaining);
} }
if (fh->f_procs_in_group[n] == fh->f_rank) { if (fh->f_procs_in_group[n] == fh->f_rank) {
bytes_sent += bytes_to_write_in_cycle; bytes_sent += bytes_to_write_in_cycle;
} }
@ -587,7 +587,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
} }
if (fh->f_procs_in_group[n] == fh->f_rank) { if (fh->f_procs_in_group[n] == fh->f_rank) {
bytes_sent += bytes_to_write_in_cycle; bytes_sent += bytes_to_write_in_cycle;
} }
bytes_remaining = global_iov_array[sorted[current_index]].iov_len - bytes_remaining = global_iov_array[sorted[current_index]].iov_len -
bytes_to_write_in_cycle; bytes_to_write_in_cycle;
@ -601,7 +601,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
global_iov_array[sorted[current_index]].iov_len; global_iov_array[sorted[current_index]].iov_len;
displs_per_process[n][disp_index[n] - 1] = (OPAL_PTRDIFF_TYPE) displs_per_process[n][disp_index[n] - 1] = (OPAL_PTRDIFF_TYPE)
global_iov_array[sorted[current_index]].iov_base; global_iov_array[sorted[current_index]].iov_base;
/*realloc for next blocklength /*realloc for next blocklength
and assign this displacement and check for next displs as and assign this displacement and check for next displs as
the total length of this entry has been consumed!*/ the total length of this entry has been consumed!*/
@ -623,11 +623,11 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
} }
} }
} }
/************************************************************************* /*************************************************************************
*** 7d. Calculate the displacement on where to put the data and allocate *** 7d. Calculate the displacement on where to put the data and allocate
*** the recieve buffer (global_buf) *** the recieve buffer (global_buf)
*************************************************************************/ *************************************************************************/
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
entries_per_aggregator=0; entries_per_aggregator=0;
@ -637,13 +637,13 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
entries_per_aggregator++ ; entries_per_aggregator++ ;
} }
} }
#if DEBUG_ON #if DEBUG_ON
printf("%d: cycle: %d, bytes_sent: %d\n ",fh->f_rank,index, printf("%d: cycle: %d, bytes_sent: %d\n ",fh->f_rank,index,
bytes_sent); bytes_sent);
printf("%d : Entries per aggregator : %d\n",fh->f_rank,entries_per_aggregator); printf("%d : Entries per aggregator : %d\n",fh->f_rank,entries_per_aggregator);
#endif #endif
if (entries_per_aggregator > 0){ if (entries_per_aggregator > 0){
file_offsets_for_agg = (mca_io_ompio_local_io_array *) file_offsets_for_agg = (mca_io_ompio_local_io_array *)
malloc(entries_per_aggregator*sizeof(mca_io_ompio_local_io_array)); malloc(entries_per_aggregator*sizeof(mca_io_ompio_local_io_array));
@ -652,7 +652,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
sorted_file_offsets = (int *) sorted_file_offsets = (int *)
malloc (entries_per_aggregator*sizeof(int)); malloc (entries_per_aggregator*sizeof(int));
if (NULL == sorted_file_offsets){ if (NULL == sorted_file_offsets){
@ -660,10 +660,10 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
/*Moving file offsets to an IO array!*/ /*Moving file offsets to an IO array!*/
temp_index = 0; temp_index = 0;
for (i=0;i<fh->f_procs_per_group; i++){ for (i=0;i<fh->f_procs_per_group; i++){
for(j=0;j<disp_index[i];j++){ for(j=0;j<disp_index[i];j++){
if (blocklen_per_process[i][j] > 0){ if (blocklen_per_process[i][j] > 0){
@ -673,11 +673,11 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
file_offsets_for_agg[temp_index].offset = file_offsets_for_agg[temp_index].offset =
displs_per_process[i][j]; displs_per_process[i][j];
temp_index++; temp_index++;
#if DEBUG_ON #if DEBUG_ON
printf("************Cycle: %d, Aggregator: %d ***************\n", printf("************Cycle: %d, Aggregator: %d ***************\n",
index+1,fh->f_rank); index+1,fh->f_rank);
printf("%d sends blocklen[%d]: %d, disp[%d]: %ld to %d\n", printf("%d sends blocklen[%d]: %d, disp[%d]: %ld to %d\n",
fh->f_procs_in_group[i],j, fh->f_procs_in_group[i],j,
blocklen_per_process[i][j],j, blocklen_per_process[i][j],j,
@ -695,28 +695,28 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
local_heap_sort (file_offsets_for_agg, local_heap_sort (file_offsets_for_agg,
entries_per_aggregator, entries_per_aggregator,
sorted_file_offsets); sorted_file_offsets);
/*create contiguous memory displacements /*create contiguous memory displacements
based on blocklens on the same displs array based on blocklens on the same displs array
and map it to this aggregator's actual and map it to this aggregator's actual
file-displacements (this is in the io-array created above)*/ file-displacements (this is in the io-array created above)*/
memory_displacements = (MPI_Aint *) malloc memory_displacements = (MPI_Aint *) malloc
(entries_per_aggregator * sizeof(MPI_Aint)); (entries_per_aggregator * sizeof(MPI_Aint));
memory_displacements[sorted_file_offsets[0]] = 0; memory_displacements[sorted_file_offsets[0]] = 0;
for (i=1; i<entries_per_aggregator; i++){ for (i=1; i<entries_per_aggregator; i++){
memory_displacements[sorted_file_offsets[i]] = memory_displacements[sorted_file_offsets[i]] =
memory_displacements[sorted_file_offsets[i-1]] + memory_displacements[sorted_file_offsets[i-1]] +
file_offsets_for_agg[sorted_file_offsets[i-1]].length; file_offsets_for_agg[sorted_file_offsets[i-1]].length;
} }
temp_disp_index = (int *)calloc (1, fh->f_procs_per_group * sizeof (int)); temp_disp_index = (int *)calloc (1, fh->f_procs_per_group * sizeof (int));
if (NULL == temp_disp_index) { if (NULL == temp_disp_index) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
/*Now update the displacements array with memory offsets*/ /*Now update the displacements array with memory offsets*/
global_count = 0; global_count = 0;
for (i=0;i<entries_per_aggregator;i++){ for (i=0;i<entries_per_aggregator;i++){
@ -734,14 +734,14 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
global_count += global_count +=
file_offsets_for_agg[sorted_file_offsets[i]].length; file_offsets_for_agg[sorted_file_offsets[i]].length;
} }
if (NULL != temp_disp_index){ if (NULL != temp_disp_index){
free(temp_disp_index); free(temp_disp_index);
temp_disp_index = NULL; temp_disp_index = NULL;
} }
#if DEBUG_ON #if DEBUG_ON
printf("************Cycle: %d, Aggregator: %d ***************\n", printf("************Cycle: %d, Aggregator: %d ***************\n",
index+1,fh->f_rank); index+1,fh->f_rank);
for (i=0;i<fh->f_procs_per_group; i++){ for (i=0;i<fh->f_procs_per_group; i++){
@ -752,7 +752,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
blocklen_per_process[i][j],j, blocklen_per_process[i][j],j,
displs_per_process[i][j], displs_per_process[i][j],
fh->f_rank); fh->f_rank);
} }
} }
} }
@ -773,7 +773,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
#endif #endif
/************************************************************************* /*************************************************************************
*** 7e. Perform the actual communication *** 7e. Perform the actual communication
*************************************************************************/ *************************************************************************/
for (i=0;i<fh->f_procs_per_group; i++) { for (i=0;i<fh->f_procs_per_group; i++) {
recv_req[i] = MPI_REQUEST_NULL; recv_req[i] = MPI_REQUEST_NULL;
if ( 0 < disp_index[i] ) { if ( 0 < disp_index[i] ) {
@ -784,8 +784,8 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
&recvtype[i]); &recvtype[i]);
ompi_datatype_commit(&recvtype[i]); ompi_datatype_commit(&recvtype[i]);
opal_datatype_type_size(&recvtype[i]->super, &datatype_size); opal_datatype_type_size(&recvtype[i]->super, &datatype_size);
if (datatype_size){ if (datatype_size){
ret = MCA_PML_CALL(irecv(global_buf, ret = MCA_PML_CALL(irecv(global_buf,
1, 1,
recvtype[i], recvtype[i],
@ -800,8 +800,8 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
} }
} }
} /* end if (my_aggregator == fh->f_rank ) */ } /* end if (my_aggregator == fh->f_rank ) */
if ( sendbuf_is_contiguous ) { if ( sendbuf_is_contiguous ) {
send_buf = &((char*)buf)[total_bytes_written]; send_buf = &((char*)buf)[total_bytes_written];
} }
@ -812,20 +812,20 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
OPAL_PTRDIFF_TYPE mem_address; OPAL_PTRDIFF_TYPE mem_address;
size_t remaining = 0; size_t remaining = 0;
size_t temp_position = 0; size_t temp_position = 0;
send_buf = malloc (bytes_sent); send_buf = malloc (bytes_sent);
if (NULL == send_buf) { if (NULL == send_buf) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
remaining = bytes_sent; remaining = bytes_sent;
while (remaining) { while (remaining) {
mem_address = (OPAL_PTRDIFF_TYPE) mem_address = (OPAL_PTRDIFF_TYPE)
(decoded_iov[iov_index].iov_base) + current_position; (decoded_iov[iov_index].iov_base) + current_position;
if (remaining >= if (remaining >=
(decoded_iov[iov_index].iov_len - current_position)) { (decoded_iov[iov_index].iov_len - current_position)) {
memcpy (send_buf+temp_position, memcpy (send_buf+temp_position,
@ -848,10 +848,10 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
} }
} }
total_bytes_written += bytes_sent; total_bytes_written += bytes_sent;
/* Gather the sendbuf from each process in appropritate locations in /* Gather the sendbuf from each process in appropritate locations in
aggregators*/ aggregators*/
if (bytes_sent){ if (bytes_sent){
ret = MCA_PML_CALL(isend(send_buf, ret = MCA_PML_CALL(isend(send_buf,
bytes_sent, bytes_sent,
@ -861,12 +861,12 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
MCA_PML_BASE_SEND_STANDARD, MCA_PML_BASE_SEND_STANDARD,
fh->f_comm, fh->f_comm,
&send_req)); &send_req));
if ( OMPI_SUCCESS != ret ){ if ( OMPI_SUCCESS != ret ){
goto exit; goto exit;
} }
ret = ompi_request_wait(&send_req, MPI_STATUS_IGNORE); ret = ompi_request_wait(&send_req, MPI_STATUS_IGNORE);
if (OMPI_SUCCESS != ret){ if (OMPI_SUCCESS != ret){
goto exit; goto exit;
@ -877,7 +877,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
ret = ompi_request_wait_all (fh->f_procs_per_group, ret = ompi_request_wait_all (fh->f_procs_per_group,
recv_req, recv_req,
MPI_STATUS_IGNORE); MPI_STATUS_IGNORE);
if (OMPI_SUCCESS != ret){ if (OMPI_SUCCESS != ret){
goto exit; goto exit;
} }
@ -891,28 +891,28 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
printf (" RECV %d \n",((int *)global_buf)[i]); printf (" RECV %d \n",((int *)global_buf)[i]);
} }
#endif #endif
if (! sendbuf_is_contiguous) { if (! sendbuf_is_contiguous) {
if (NULL != send_buf) { if (NULL != send_buf) {
free (send_buf); free (send_buf);
send_buf = NULL; send_buf = NULL;
} }
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_comm_time = MPI_Wtime(); end_comm_time = MPI_Wtime();
comm_time += (end_comm_time - start_comm_time); comm_time += (end_comm_time - start_comm_time);
#endif #endif
/********************************************************** /**********************************************************
*** 7f. Create the io array, and pass it to fbtl *** 7f. Create the io array, and pass it to fbtl
*********************************************************/ *********************************************************/
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_write_time = MPI_Wtime(); start_write_time = MPI_Wtime();
#endif #endif
fh->f_io_array = (mca_io_ompio_io_array_t *) malloc fh->f_io_array = (mca_io_ompio_io_array_t *) malloc
(entries_per_aggregator * sizeof (mca_io_ompio_io_array_t)); (entries_per_aggregator * sizeof (mca_io_ompio_io_array_t));
if (NULL == fh->f_io_array) { if (NULL == fh->f_io_array) {
@ -920,7 +920,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
fh->f_num_of_io_entries = 0; fh->f_num_of_io_entries = 0;
/*First entry for every aggregator*/ /*First entry for every aggregator*/
fh->f_io_array[0].offset = fh->f_io_array[0].offset =
@ -930,7 +930,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
fh->f_io_array[0].memory_address = fh->f_io_array[0].memory_address =
global_buf+memory_displacements[sorted_file_offsets[0]]; global_buf+memory_displacements[sorted_file_offsets[0]];
fh->f_num_of_io_entries++; fh->f_num_of_io_entries++;
for (i=1;i<entries_per_aggregator;i++){ for (i=1;i<entries_per_aggregator;i++){
/* If the enrties are contiguous merge them, /* If the enrties are contiguous merge them,
else make a new entry */ else make a new entry */
@ -949,9 +949,9 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
global_buf+memory_displacements[sorted_file_offsets[i]]; global_buf+memory_displacements[sorted_file_offsets[i]];
fh->f_num_of_io_entries++; fh->f_num_of_io_entries++;
} }
} }
#if DEBUG_ON #if DEBUG_ON
printf("*************************** %d\n", fh->f_num_of_io_entries); printf("*************************** %d\n", fh->f_num_of_io_entries);
for (i=0 ; i<fh->f_num_of_io_entries ; i++) { for (i=0 ; i<fh->f_num_of_io_entries ; i++) {
@ -960,9 +960,9 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
(OPAL_PTRDIFF_TYPE)fh->f_io_array[i].offset, (OPAL_PTRDIFF_TYPE)fh->f_io_array[i].offset,
fh->f_io_array[i].length); fh->f_io_array[i].length);
} }
#endif #endif
if (fh->f_num_of_io_entries) { if (fh->f_num_of_io_entries) {
if ( 0 > fh->f_fbtl->fbtl_pwritev (fh)) { if ( 0 > fh->f_fbtl->fbtl_pwritev (fh)) {
opal_output (1, "WRITE FAILED\n"); opal_output (1, "WRITE FAILED\n");
@ -974,11 +974,11 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
end_write_time = MPI_Wtime(); end_write_time = MPI_Wtime();
write_time += end_write_time - start_write_time; write_time += end_write_time - start_write_time;
#endif #endif
} /* end if (my_aggregator == fh->f_rank) */ } /* end if (my_aggregator == fh->f_rank) */
} /* end for (index = 0; index < cycles; index++) */ } /* end for (index = 0; index < cycles; index++) */
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_exch = MPI_Wtime(); end_exch = MPI_Wtime();
exch_write += end_exch - start_exch; exch_write += end_exch - start_exch;
@ -995,8 +995,8 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
nentry); nentry);
} }
#endif #endif
exit : exit :
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
if (NULL != sorted_file_offsets){ if (NULL != sorted_file_offsets){
@ -1020,7 +1020,7 @@ exit :
free(recvtype); free(recvtype);
recvtype=NULL; recvtype=NULL;
} }
if (NULL != fh->f_io_array) { if (NULL != fh->f_io_array) {
free (fh->f_io_array); free (fh->f_io_array);
fh->f_io_array = NULL; fh->f_io_array = NULL;
@ -1059,9 +1059,9 @@ exit :
free(displs_per_process); free(displs_per_process);
displs_per_process = NULL; displs_per_process = NULL;
} }
} }
if (NULL != displs){ if (NULL != displs){
free(displs); free(displs);
displs=NULL; displs=NULL;
@ -1093,8 +1093,8 @@ exit :
free (decoded_iov); free (decoded_iov);
decoded_iov = NULL; decoded_iov = NULL;
} }
return OMPI_SUCCESS; return OMPI_SUCCESS;
} }
@ -1112,7 +1112,7 @@ static int local_heap_sort (mca_io_ompio_local_io_array *io_array,
int temp = 0; int temp = 0;
unsigned char done = 0; unsigned char done = 0;
int* temp_arr = NULL; int* temp_arr = NULL;
temp_arr = (int*)malloc(num_entries*sizeof(int)); temp_arr = (int*)malloc(num_entries*sizeof(int));
if (NULL == temp_arr) { if (NULL == temp_arr) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
@ -1127,7 +1127,7 @@ static int local_heap_sort (mca_io_ompio_local_io_array *io_array,
done = 0; done = 0;
j = i; j = i;
largest = j; largest = j;
while (!done) { while (!done) {
left = j*2+1; left = j*2+1;
right = j*2+2; right = j*2+2;
@ -1154,7 +1154,7 @@ static int local_heap_sort (mca_io_ompio_local_io_array *io_array,
} }
} }
} }
for (i = num_entries-1; i >=1; --i) { for (i = num_entries-1; i >=1; --i) {
temp = temp_arr[0]; temp = temp_arr[0];
temp_arr[0] = temp_arr[i]; temp_arr[0] = temp_arr[i];
@ -1163,11 +1163,11 @@ static int local_heap_sort (mca_io_ompio_local_io_array *io_array,
done = 0; done = 0;
j = 0; j = 0;
largest = j; largest = j;
while (!done) { while (!done) {
left = j*2+1; left = j*2+1;
right = j*2+2; right = j*2+2;
if ((left <= heap_size) && if ((left <= heap_size) &&
(io_array[temp_arr[left]].offset > (io_array[temp_arr[left]].offset >
io_array[temp_arr[j]].offset)) { io_array[temp_arr[j]].offset)) {
@ -1194,7 +1194,7 @@ static int local_heap_sort (mca_io_ompio_local_io_array *io_array,
sorted[i] = temp_arr[i]; sorted[i] = temp_arr[i];
} }
sorted[0] = temp_arr[0]; sorted[0] = temp_arr[0];
if (NULL != temp_arr) { if (NULL != temp_arr) {
free(temp_arr); free(temp_arr);
temp_arr = NULL; temp_arr = NULL;

Просмотреть файл

@ -66,15 +66,15 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
struct ompi_datatype_t *datatype, struct ompi_datatype_t *datatype,
ompi_status_public_t *status) ompi_status_public_t *status)
{ {
int ret = OMPI_SUCCESS, iov_size=0, *bytes_remaining=NULL; int ret = OMPI_SUCCESS, iov_size=0, *bytes_remaining=NULL;
int i, j, l,cycles=0, local_cycles=0, *current_index=NULL; int i, j, l,cycles=0, local_cycles=0, *current_index=NULL;
int index, *disp_index=NULL, *bytes_per_process=NULL, current_position=0; int index, *disp_index=NULL, *bytes_per_process=NULL, current_position=0;
int **blocklen_per_process=NULL, *iovec_count_per_process=NULL; int **blocklen_per_process=NULL, *iovec_count_per_process=NULL;
int *displs=NULL, *sorted=NULL ,entries_per_aggregator=0; int *displs=NULL, *sorted=NULL ,entries_per_aggregator=0;
int *sorted_file_offsets=NULL, temp_index=0, position=0, *temp_disp_index=NULL; int *sorted_file_offsets=NULL, temp_index=0, position=0, *temp_disp_index=NULL;
MPI_Aint **displs_per_process=NULL, global_iov_count=0, global_count=0; MPI_Aint **displs_per_process=NULL, global_iov_count=0, global_count=0;
MPI_Aint *memory_displacements=NULL; MPI_Aint *memory_displacements=NULL;
int bytes_to_read_in_cycle=0; int bytes_to_read_in_cycle=0;
@ -83,9 +83,9 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
struct iovec *decoded_iov=NULL, *iov=NULL; struct iovec *decoded_iov=NULL, *iov=NULL;
mca_fcoll_static_local_io_array *local_iov_array=NULL, *global_iov_array=NULL; mca_fcoll_static_local_io_array *local_iov_array=NULL, *global_iov_array=NULL;
mca_fcoll_static_local_io_array *file_offsets_for_agg=NULL; mca_fcoll_static_local_io_array *file_offsets_for_agg=NULL;
char *global_buf=NULL, *receive_buf=NULL; char *global_buf=NULL, *receive_buf=NULL;
int blocklen[3] = {1, 1, 1}; int blocklen[3] = {1, 1, 1};
int static_num_io_procs=1; int static_num_io_procs=1;
OPAL_PTRDIFF_TYPE d[3], base; OPAL_PTRDIFF_TYPE d[3], base;
@ -96,7 +96,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
int my_aggregator=-1; int my_aggregator=-1;
bool recvbuf_is_contiguous=false; bool recvbuf_is_contiguous=false;
size_t ftype_size; size_t ftype_size;
OPAL_PTRDIFF_TYPE ftype_extent, lb; OPAL_PTRDIFF_TYPE ftype_extent, lb;
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
double read_time = 0.0, start_read_time = 0.0, end_read_time = 0.0; double read_time = 0.0, start_read_time = 0.0, end_read_time = 0.0;
@ -109,17 +109,17 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
#endif #endif
opal_datatype_type_size ( &datatype->super, &ftype_size ); opal_datatype_type_size ( &datatype->super, &ftype_size );
opal_datatype_get_extent ( &datatype->super, &lb, &ftype_extent ); opal_datatype_get_extent ( &datatype->super, &lb, &ftype_extent );
/************************************************************************** /**************************************************************************
** 1. In case the data is not contigous in memory, decode it into an iovec ** 1. In case the data is not contigous in memory, decode it into an iovec
**************************************************************************/ **************************************************************************/
if ( ( ftype_extent == (OPAL_PTRDIFF_TYPE) ftype_size) && if ( ( ftype_extent == (OPAL_PTRDIFF_TYPE) ftype_size) &&
opal_datatype_is_contiguous_memory_layout(&datatype->super,1) && opal_datatype_is_contiguous_memory_layout(&datatype->super,1) &&
0 == lb ) { 0 == lb ) {
recvbuf_is_contiguous = true; recvbuf_is_contiguous = true;
} }
/* In case the data is not contigous in memory, decode it into an iovec */ /* In case the data is not contigous in memory, decode it into an iovec */
if (!recvbuf_is_contiguous ) { if (!recvbuf_is_contiguous ) {
fh->f_decode_datatype ( (struct mca_io_ompio_file_t *)fh, fh->f_decode_datatype ( (struct mca_io_ompio_file_t *)fh,
@ -133,18 +133,18 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
else { else {
max_data = count * datatype->super.size; max_data = count * datatype->super.size;
} }
if ( MPI_STATUS_IGNORE != status ) { if ( MPI_STATUS_IGNORE != status ) {
status->_ucount = max_data; status->_ucount = max_data;
} }
fh->f_get_num_aggregators ( &static_num_io_procs ); fh->f_get_num_aggregators ( &static_num_io_procs );
fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *) fh, fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *) fh,
static_num_io_procs, static_num_io_procs,
max_data); max_data);
my_aggregator = fh->f_procs_in_group[fh->f_aggregator_index]; my_aggregator = fh->f_procs_in_group[fh->f_aggregator_index];
/* printf("max_data %ld\n", max_data); */ /* printf("max_data %ld\n", max_data); */
ret = fh->f_generate_current_file_view((struct mca_io_ompio_file_t *)fh, ret = fh->f_generate_current_file_view((struct mca_io_ompio_file_t *)fh,
max_data, max_data,
@ -153,21 +153,21 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
if (ret != OMPI_SUCCESS){ if (ret != OMPI_SUCCESS){
goto exit; goto exit;
} }
if ( iov_size > 0 ) { if ( iov_size > 0 ) {
local_iov_array = (mca_fcoll_static_local_io_array *)malloc (iov_size * sizeof(mca_fcoll_static_local_io_array)); local_iov_array = (mca_fcoll_static_local_io_array *)malloc (iov_size * sizeof(mca_fcoll_static_local_io_array));
if ( NULL == local_iov_array){ if ( NULL == local_iov_array){
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
for (j=0; j < iov_size; j++){ for (j=0; j < iov_size; j++){
local_iov_array[j].offset = (OMPI_MPI_OFFSET_TYPE)(intptr_t) local_iov_array[j].offset = (OMPI_MPI_OFFSET_TYPE)(intptr_t)
iov[j].iov_base; iov[j].iov_base;
local_iov_array[j].length = (size_t)iov[j].iov_len; local_iov_array[j].length = (size_t)iov[j].iov_len;
local_iov_array[j].process_id = fh->f_rank; local_iov_array[j].process_id = fh->f_rank;
} }
} }
else { else {
@ -178,13 +178,13 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
local_iov_array[0].offset = (OMPI_MPI_OFFSET_TYPE)(intptr_t) 0; local_iov_array[0].offset = (OMPI_MPI_OFFSET_TYPE)(intptr_t) 0;
local_iov_array[0].length = (size_t) 0; local_iov_array[0].length = (size_t) 0;
local_iov_array[0].process_id = fh->f_rank; local_iov_array[0].process_id = fh->f_rank;
} }
d[0] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0]; d[0] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0];
d[1] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0].length; d[1] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0].length;
d[2] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0].process_id; d[2] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0].process_id;
@ -192,12 +192,12 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
for (i=0 ; i<3 ; i++) { for (i=0 ; i<3 ; i++) {
d[i] -= base; d[i] -= base;
} }
/* io_array datatype for using in communication*/ /* io_array datatype for using in communication*/
types[0] = &ompi_mpi_long.dt; types[0] = &ompi_mpi_long.dt;
types[1] = &ompi_mpi_long.dt; types[1] = &ompi_mpi_long.dt;
types[2] = &ompi_mpi_int.dt; types[2] = &ompi_mpi_int.dt;
ompi_datatype_create_struct (3, ompi_datatype_create_struct (3,
blocklen, blocklen,
d, d,
@ -219,7 +219,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
MPI_MAX, MPI_MAX,
fh->f_comm, fh->f_comm,
fh->f_comm->c_coll.coll_allreduce_module); fh->f_comm->c_coll.coll_allreduce_module);
if (OMPI_SUCCESS != ret){ if (OMPI_SUCCESS != ret){
goto exit; goto exit;
} }
@ -227,8 +227,8 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
end_rcomm_time = MPI_Wtime(); end_rcomm_time = MPI_Wtime();
rcomm_time += end_rcomm_time - start_rcomm_time; rcomm_time += end_rcomm_time - start_rcomm_time;
#endif #endif
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
disp_index = (int *) malloc (fh->f_procs_per_group * sizeof(int)); disp_index = (int *) malloc (fh->f_procs_per_group * sizeof(int));
if (NULL == disp_index) { if (NULL == disp_index) {
@ -236,42 +236,42 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
bytes_per_process = (int *) malloc (fh->f_procs_per_group * sizeof(int )); bytes_per_process = (int *) malloc (fh->f_procs_per_group * sizeof(int ));
if (NULL == bytes_per_process){ if (NULL == bytes_per_process){
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
bytes_remaining = (int *) malloc (fh->f_procs_per_group * sizeof(int)); bytes_remaining = (int *) malloc (fh->f_procs_per_group * sizeof(int));
if (NULL == bytes_remaining){ if (NULL == bytes_remaining){
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
current_index = (int *) malloc (fh->f_procs_per_group * sizeof(int)); current_index = (int *) malloc (fh->f_procs_per_group * sizeof(int));
if (NULL == current_index){ if (NULL == current_index){
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
blocklen_per_process = (int **)malloc (fh->f_procs_per_group * sizeof (int*)); blocklen_per_process = (int **)malloc (fh->f_procs_per_group * sizeof (int*));
if (NULL == blocklen_per_process) { if (NULL == blocklen_per_process) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
displs_per_process = (MPI_Aint **)malloc (fh->f_procs_per_group * sizeof (MPI_Aint*)); displs_per_process = (MPI_Aint **)malloc (fh->f_procs_per_group * sizeof (MPI_Aint*));
if (NULL == displs_per_process) { if (NULL == displs_per_process) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
for(i=0;i<fh->f_procs_per_group;i++){ for(i=0;i<fh->f_procs_per_group;i++){
current_index[i] = 0; current_index[i] = 0;
bytes_remaining[i] = 0; bytes_remaining[i] = 0;
@ -279,22 +279,22 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
displs_per_process[i] = NULL; displs_per_process[i] = NULL;
} }
} }
iovec_count_per_process = (int *) malloc (fh->f_procs_per_group * sizeof(int)); iovec_count_per_process = (int *) malloc (fh->f_procs_per_group * sizeof(int));
if (NULL == iovec_count_per_process){ if (NULL == iovec_count_per_process){
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
displs = (int *) malloc (fh->f_procs_per_group * sizeof(int)); displs = (int *) malloc (fh->f_procs_per_group * sizeof(int));
if (NULL == displs){ if (NULL == displs){
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rexch = MPI_Wtime(); start_rexch = MPI_Wtime();
#endif #endif
@ -308,7 +308,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
fh->f_procs_in_group, fh->f_procs_in_group,
fh->f_procs_per_group, fh->f_procs_per_group,
fh->f_comm); fh->f_comm);
if( OMPI_SUCCESS != ret){ if( OMPI_SUCCESS != ret){
goto exit; goto exit;
} }
@ -316,7 +316,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
end_rcomm_time = MPI_Wtime(); end_rcomm_time = MPI_Wtime();
rcomm_time += end_rcomm_time - start_rcomm_time; rcomm_time += end_rcomm_time - start_rcomm_time;
#endif #endif
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
displs[0] = 0; displs[0] = 0;
global_iov_count = iovec_count_per_process[0]; global_iov_count = iovec_count_per_process[0];
@ -325,8 +325,8 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
displs[i] = displs[i-1] + iovec_count_per_process[i-1]; displs[i] = displs[i-1] + iovec_count_per_process[i-1];
} }
} }
if ( (my_aggregator == fh->f_rank) && if ( (my_aggregator == fh->f_rank) &&
(global_iov_count > 0 )) { (global_iov_count > 0 )) {
global_iov_array = (mca_fcoll_static_local_io_array *) malloc (global_iov_count * global_iov_array = (mca_fcoll_static_local_io_array *) malloc (global_iov_count *
@ -352,7 +352,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
fh->f_procs_in_group, fh->f_procs_in_group,
fh->f_procs_per_group, fh->f_procs_per_group,
fh->f_comm); fh->f_comm);
if (OMPI_SUCCESS != ret){ if (OMPI_SUCCESS != ret){
fprintf(stderr,"global_iov_array gather error!\n"); fprintf(stderr,"global_iov_array gather error!\n");
goto exit; goto exit;
@ -361,13 +361,13 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
end_rcomm_time = MPI_Wtime(); end_rcomm_time = MPI_Wtime();
rcomm_time += end_rcomm_time - start_rcomm_time; rcomm_time += end_rcomm_time - start_rcomm_time;
#endif #endif
if (NULL != local_iov_array){ if (NULL != local_iov_array){
free(local_iov_array); free(local_iov_array);
local_iov_array = NULL; local_iov_array = NULL;
} }
if ( ( my_aggregator == fh->f_rank) && if ( ( my_aggregator == fh->f_rank) &&
( global_iov_count > 0 )) { ( global_iov_count > 0 )) {
sorted = (int *)malloc (global_iov_count * sizeof(int)); sorted = (int *)malloc (global_iov_count * sizeof(int));
@ -404,9 +404,9 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
} }
} }
} }
#if DEBUG_ON #if DEBUG_ON
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
for (gc_in=0; gc_in<global_iov_count; gc_in++){ for (gc_in=0; gc_in<global_iov_count; gc_in++){
printf("%d: Offset[%ld]: %lld, Length[%ld]: %ld\n", printf("%d: Offset[%ld]: %lld, Length[%ld]: %ld\n",
@ -416,15 +416,15 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
} }
} }
#endif #endif
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rexch = MPI_Wtime(); start_rexch = MPI_Wtime();
#endif #endif
for (index = 0; index < cycles; index++){ for (index = 0; index < cycles; index++){
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
fh->f_num_of_io_entries = 0; fh->f_num_of_io_entries = 0;
if (NULL != fh->f_io_array) { if (NULL != fh->f_io_array) {
free (fh->f_io_array); free (fh->f_io_array);
@ -434,7 +434,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
free (global_buf); free (global_buf);
global_buf = NULL; global_buf = NULL;
} }
if (NULL != sorted_file_offsets){ if (NULL != sorted_file_offsets){
free(sorted_file_offsets); free(sorted_file_offsets);
sorted_file_offsets = NULL; sorted_file_offsets = NULL;
@ -447,7 +447,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
free(memory_displacements); free(memory_displacements);
memory_displacements= NULL; memory_displacements= NULL;
} }
if ( NULL != sendtype ) { if ( NULL != sendtype ) {
for ( i=0; i<fh->f_procs_per_group; i++ ) { for ( i=0; i<fh->f_procs_per_group; i++ ) {
if ( MPI_DATATYPE_NULL != sendtype[i] ) { if ( MPI_DATATYPE_NULL != sendtype[i] ) {
@ -456,7 +456,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
} }
} }
} }
for(l=0;l<fh->f_procs_per_group;l++){ for(l=0;l<fh->f_procs_per_group;l++){
disp_index[l] = 1; disp_index[l] = 1;
if (NULL != blocklen_per_process[l]){ if (NULL != blocklen_per_process[l]){
@ -510,7 +510,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
fh->f_procs_in_group, fh->f_procs_in_group,
fh->f_procs_per_group, fh->f_procs_per_group,
fh->f_comm); fh->f_comm);
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_rcomm_time = MPI_Wtime(); end_rcomm_time = MPI_Wtime();
rcomm_time += end_rcomm_time - start_rcomm_time; rcomm_time += end_rcomm_time - start_rcomm_time;
@ -527,12 +527,12 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
goto exit; goto exit;
} }
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rcomm_time = MPI_Wtime(); start_rcomm_time = MPI_Wtime();
#endif #endif
ret = MCA_PML_CALL(irecv(receive_buf, ret = MCA_PML_CALL(irecv(receive_buf,
bytes_to_read_in_cycle, bytes_to_read_in_cycle,
MPI_BYTE, MPI_BYTE,
@ -543,13 +543,13 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
if (OMPI_SUCCESS != ret){ if (OMPI_SUCCESS != ret){
goto exit; goto exit;
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_rcomm_time = MPI_Wtime(); end_rcomm_time = MPI_Wtime();
rcomm_time += end_rcomm_time - start_rcomm_time; rcomm_time += end_rcomm_time - start_rcomm_time;
#endif #endif
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
for (i=0;i<fh->f_procs_per_group; i++){ for (i=0;i<fh->f_procs_per_group; i++){
while (bytes_per_process[i] > 0){ while (bytes_per_process[i] > 0){
@ -560,7 +560,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
if (bytes_remaining[i]){ /*Remaining bytes in the current entry of if (bytes_remaining[i]){ /*Remaining bytes in the current entry of
the global offset array*/ the global offset array*/
if (bytes_remaining[i] <= bytes_per_process[i]){ if (bytes_remaining[i] <= bytes_per_process[i]){
blocklen_per_process[i][disp_index[i] - 1] = bytes_remaining[i]; blocklen_per_process[i][disp_index[i] - 1] = bytes_remaining[i];
displs_per_process[i][disp_index[i] - 1] = displs_per_process[i][disp_index[i] - 1] =
global_iov_array[sorted[current_index[i]]].offset + global_iov_array[sorted[current_index[i]]].offset +
@ -653,7 +653,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
} }
} }
} }
entries_per_aggregator=0; entries_per_aggregator=0;
for (i=0;i<fh->f_procs_per_group;i++){ for (i=0;i<fh->f_procs_per_group;i++){
for (j=0;j<disp_index[i];j++){ for (j=0;j<disp_index[i];j++){
@ -665,12 +665,12 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
blocklen_per_process[i][j],j, blocklen_per_process[i][j],j,
displs_per_process[i][j], displs_per_process[i][j],
fh->f_rank); fh->f_rank);
#endif #endif
} }
} }
} }
if (entries_per_aggregator > 0){ if (entries_per_aggregator > 0){
file_offsets_for_agg = (mca_fcoll_static_local_io_array *) file_offsets_for_agg = (mca_fcoll_static_local_io_array *)
malloc(entries_per_aggregator*sizeof(mca_fcoll_static_local_io_array)); malloc(entries_per_aggregator*sizeof(mca_fcoll_static_local_io_array));
@ -715,7 +715,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
memory_displacements[sorted_file_offsets[i-1]] + memory_displacements[sorted_file_offsets[i-1]] +
file_offsets_for_agg[sorted_file_offsets[i-1]].length; file_offsets_for_agg[sorted_file_offsets[i-1]].length;
} }
global_buf = (char *) malloc (global_count * sizeof(char)); global_buf = (char *) malloc (global_count * sizeof(char));
if (NULL == global_buf){ if (NULL == global_buf){
opal_output(1, "OUT OF MEMORY\n"); opal_output(1, "OUT OF MEMORY\n");
@ -734,7 +734,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
disp_index[i]); disp_index[i]);
} }
#endif #endif
fh->f_io_array = (mca_io_ompio_io_array_t *) malloc fh->f_io_array = (mca_io_ompio_io_array_t *) malloc
(entries_per_aggregator * sizeof (mca_io_ompio_io_array_t)); (entries_per_aggregator * sizeof (mca_io_ompio_io_array_t));
if (NULL == fh->f_io_array) { if (NULL == fh->f_io_array) {
@ -742,9 +742,9 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
fh->f_num_of_io_entries = 0; fh->f_num_of_io_entries = 0;
fh->f_io_array[0].offset = fh->f_io_array[0].offset =
(IOVBASE_TYPE *)(intptr_t)file_offsets_for_agg[sorted_file_offsets[0]].offset; (IOVBASE_TYPE *)(intptr_t)file_offsets_for_agg[sorted_file_offsets[0]].offset;
@ -768,7 +768,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
fh->f_num_of_io_entries++; fh->f_num_of_io_entries++;
} }
} }
#if DEBUG_ON #if DEBUG_ON
printf("*************************** %d\n", fh->f_num_of_io_entries); printf("*************************** %d\n", fh->f_num_of_io_entries);
for (i=0 ; i<fh->f_num_of_io_entries ; i++) { for (i=0 ; i<fh->f_num_of_io_entries ; i++) {
@ -781,7 +781,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_read_time = MPI_Wtime(); start_read_time = MPI_Wtime();
#endif #endif
if (fh->f_num_of_io_entries) { if (fh->f_num_of_io_entries) {
if ( 0 > fh->f_fbtl->fbtl_preadv (fh)) { if ( 0 > fh->f_fbtl->fbtl_preadv (fh)) {
opal_output (1, "READ FAILED\n"); opal_output (1, "READ FAILED\n");
@ -789,13 +789,13 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
goto exit; goto exit;
} }
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_read_time = MPI_Wtime(); end_read_time = MPI_Wtime();
read_time += end_read_time - start_read_time; read_time += end_read_time - start_read_time;
#endif #endif
#if DEBUG_ON #if DEBUG_ON
printf("************Cycle: %d, Aggregator: %d ***************\n", printf("************Cycle: %d, Aggregator: %d ***************\n",
index+1,fh->f_rank); index+1,fh->f_rank);
@ -804,14 +804,14 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
printf (" READ %d \n",((int *)global_buf)[i]); printf (" READ %d \n",((int *)global_buf)[i]);
} }
#endif #endif
temp_disp_index = (int *)calloc (1, fh->f_procs_per_group * sizeof (int)); temp_disp_index = (int *)calloc (1, fh->f_procs_per_group * sizeof (int));
if (NULL == temp_disp_index) { if (NULL == temp_disp_index) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
for (i=0; i<entries_per_aggregator; i++){ for (i=0; i<entries_per_aggregator; i++){
temp_index = temp_index =
file_offsets_for_agg[sorted_file_offsets[i]].process_id; file_offsets_for_agg[sorted_file_offsets[i]].process_id;
@ -830,11 +830,11 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
free(temp_disp_index); free(temp_disp_index);
temp_disp_index = NULL; temp_disp_index = NULL;
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_rcomm_time = MPI_Wtime(); start_rcomm_time = MPI_Wtime();
#endif #endif
for (i=0;i<fh->f_procs_per_group; i++){ for (i=0;i<fh->f_procs_per_group; i++){
send_req[i] = MPI_REQUEST_NULL; send_req[i] = MPI_REQUEST_NULL;
ompi_datatype_create_hindexed(disp_index[i], ompi_datatype_create_hindexed(disp_index[i],
@ -855,7 +855,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
goto exit; goto exit;
} }
} }
ret = ompi_request_wait_all (fh->f_procs_per_group, ret = ompi_request_wait_all (fh->f_procs_per_group,
send_req, send_req,
MPI_STATUS_IGNORE); MPI_STATUS_IGNORE);
@ -863,30 +863,30 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
goto exit; goto exit;
} }
} /* if ( my_aggregator == fh->f_rank ) */ } /* if ( my_aggregator == fh->f_rank ) */
ret = ompi_request_wait (&recv_req, MPI_STATUS_IGNORE); ret = ompi_request_wait (&recv_req, MPI_STATUS_IGNORE);
if (OMPI_SUCCESS != ret){ if (OMPI_SUCCESS != ret){
goto exit; goto exit;
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_rcomm_time = MPI_Wtime(); end_rcomm_time = MPI_Wtime();
rcomm_time += end_rcomm_time - start_rcomm_time; rcomm_time += end_rcomm_time - start_rcomm_time;
#endif #endif
position += bytes_to_read_in_cycle; position += bytes_to_read_in_cycle;
if (!recvbuf_is_contiguous) { if (!recvbuf_is_contiguous) {
OPAL_PTRDIFF_TYPE mem_address; OPAL_PTRDIFF_TYPE mem_address;
size_t remaining = 0; size_t remaining = 0;
size_t temp_position = 0; size_t temp_position = 0;
remaining = bytes_to_read_in_cycle; remaining = bytes_to_read_in_cycle;
while (remaining && (iov_count > iov_index)){ while (remaining && (iov_count > iov_index)){
mem_address = (OPAL_PTRDIFF_TYPE) mem_address = (OPAL_PTRDIFF_TYPE)
(decoded_iov[iov_index].iov_base) + current_position; (decoded_iov[iov_index].iov_base) + current_position;
if (remaining >= if (remaining >=
(decoded_iov[iov_index].iov_len - current_position)) { (decoded_iov[iov_index].iov_len - current_position)) {
memcpy ((IOVBASE_TYPE *) mem_address, memcpy ((IOVBASE_TYPE *) mem_address,
@ -912,7 +912,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
receive_buf = NULL; receive_buf = NULL;
} }
} }
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_rexch = MPI_Wtime(); end_rexch = MPI_Wtime();
@ -930,35 +930,35 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
nentry); nentry);
} }
#endif #endif
exit: exit:
if (NULL != decoded_iov){ if (NULL != decoded_iov){
free(decoded_iov); free(decoded_iov);
decoded_iov = NULL; decoded_iov = NULL;
} }
if (NULL != displs){ if (NULL != displs){
free(displs); free(displs);
displs = NULL; displs = NULL;
} }
if (NULL != iovec_count_per_process){ if (NULL != iovec_count_per_process){
free(iovec_count_per_process); free(iovec_count_per_process);
iovec_count_per_process=NULL; iovec_count_per_process=NULL;
} }
if (NULL != local_iov_array){ if (NULL != local_iov_array){
free(local_iov_array); free(local_iov_array);
local_iov_array=NULL; local_iov_array=NULL;
} }
if (NULL != global_iov_array){ if (NULL != global_iov_array){
free(global_iov_array); free(global_iov_array);
global_iov_array=NULL; global_iov_array=NULL;
} }
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
for(l=0;l<fh->f_procs_per_group;l++){ for(l=0;l<fh->f_procs_per_group;l++){
if (NULL != blocklen_per_process[l]){ if (NULL != blocklen_per_process[l]){
free(blocklen_per_process[l]); free(blocklen_per_process[l]);
@ -970,74 +970,74 @@ exit:
} }
} }
} }
if (NULL != bytes_per_process){ if (NULL != bytes_per_process){
free(bytes_per_process); free(bytes_per_process);
bytes_per_process =NULL; bytes_per_process =NULL;
} }
if (NULL != disp_index){ if (NULL != disp_index){
free(disp_index); free(disp_index);
disp_index =NULL; disp_index =NULL;
} }
if (NULL != displs_per_process){ if (NULL != displs_per_process){
free(displs_per_process); free(displs_per_process);
displs_per_process = NULL; displs_per_process = NULL;
} }
if(NULL != bytes_remaining){ if(NULL != bytes_remaining){
free(bytes_remaining); free(bytes_remaining);
bytes_remaining = NULL; bytes_remaining = NULL;
} }
if(NULL != current_index){ if(NULL != current_index){
free(current_index); free(current_index);
current_index = NULL; current_index = NULL;
} }
if (NULL != blocklen_per_process){ if (NULL != blocklen_per_process){
free(blocklen_per_process); free(blocklen_per_process);
blocklen_per_process =NULL; blocklen_per_process =NULL;
} }
if (NULL != bytes_remaining){ if (NULL != bytes_remaining){
free(bytes_remaining); free(bytes_remaining);
bytes_remaining =NULL; bytes_remaining =NULL;
} }
if (NULL != memory_displacements){ if (NULL != memory_displacements){
free(memory_displacements); free(memory_displacements);
memory_displacements= NULL; memory_displacements= NULL;
} }
if (NULL != file_offsets_for_agg){ if (NULL != file_offsets_for_agg){
free(file_offsets_for_agg); free(file_offsets_for_agg);
file_offsets_for_agg = NULL; file_offsets_for_agg = NULL;
} }
if (NULL != sorted_file_offsets){ if (NULL != sorted_file_offsets){
free(sorted_file_offsets); free(sorted_file_offsets);
sorted_file_offsets = NULL; sorted_file_offsets = NULL;
} }
if (NULL != sendtype){ if (NULL != sendtype){
free(sendtype); free(sendtype);
sendtype=NULL; sendtype=NULL;
} }
if ( !recvbuf_is_contiguous ) { if ( !recvbuf_is_contiguous ) {
if (NULL != receive_buf){ if (NULL != receive_buf){
free(receive_buf); free(receive_buf);
receive_buf=NULL; receive_buf=NULL;
} }
} }
if (NULL != global_buf) { if (NULL != global_buf) {
free(global_buf); free(global_buf);
global_buf = NULL; global_buf = NULL;
} }
if (NULL != sorted) { if (NULL != sorted) {
free(sorted); free(sorted);
sorted = NULL; sorted = NULL;
@ -1048,9 +1048,9 @@ exit:
send_req = NULL; send_req = NULL;
} }
return ret; return ret;
} }
@ -1067,11 +1067,11 @@ int read_local_heap_sort (mca_fcoll_static_local_io_array *io_array,
int temp = 0; int temp = 0;
unsigned char done = 0; unsigned char done = 0;
int* temp_arr = NULL; int* temp_arr = NULL;
if ( 0 == num_entries ) { if ( 0 == num_entries ) {
return OMPI_SUCCESS; return OMPI_SUCCESS;
} }
temp_arr = (int*)malloc(num_entries*sizeof(int)); temp_arr = (int*)malloc(num_entries*sizeof(int));
if (NULL == temp_arr) { if (NULL == temp_arr) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
@ -1086,7 +1086,7 @@ int read_local_heap_sort (mca_fcoll_static_local_io_array *io_array,
done = 0; done = 0;
j = i; j = i;
largest = j; largest = j;
while (!done) { while (!done) {
left = j*2+1; left = j*2+1;
right = j*2+2; right = j*2+2;
@ -1113,7 +1113,7 @@ int read_local_heap_sort (mca_fcoll_static_local_io_array *io_array,
} }
} }
} }
for (i = num_entries-1; i >=1; --i) { for (i = num_entries-1; i >=1; --i) {
temp = temp_arr[0]; temp = temp_arr[0];
temp_arr[0] = temp_arr[i]; temp_arr[0] = temp_arr[i];
@ -1122,11 +1122,11 @@ int read_local_heap_sort (mca_fcoll_static_local_io_array *io_array,
done = 0; done = 0;
j = 0; j = 0;
largest = j; largest = j;
while (!done) { while (!done) {
left = j*2+1; left = j*2+1;
right = j*2+2; right = j*2+2;
if ((left <= heap_size) && if ((left <= heap_size) &&
(io_array[temp_arr[left]].offset > (io_array[temp_arr[left]].offset >
io_array[temp_arr[j]].offset)) { io_array[temp_arr[j]].offset)) {
@ -1153,7 +1153,7 @@ int read_local_heap_sort (mca_fcoll_static_local_io_array *io_array,
sorted[i] = temp_arr[i]; sorted[i] = temp_arr[i];
} }
sorted[0] = temp_arr[0]; sorted[0] = temp_arr[0];
if (NULL != temp_arr) { if (NULL != temp_arr) {
free(temp_arr); free(temp_arr);
temp_arr = NULL; temp_arr = NULL;
@ -1169,7 +1169,7 @@ int read_find_next_index( int proc_index,
int global_iov_count, int global_iov_count,
int *sorted){ int *sorted){
int i; int i;
for(i=c_index+1; i<global_iov_count;i++){ for(i=c_index+1; i<global_iov_count;i++){
if (read_get_process_id(global_iov_array[sorted[i]].process_id, if (read_get_process_id(global_iov_array[sorted[i]].process_id,
fh) == proc_index) fh) == proc_index)

Просмотреть файл

@ -63,9 +63,9 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
struct ompi_datatype_t *datatype, struct ompi_datatype_t *datatype,
ompi_status_public_t *status) ompi_status_public_t *status)
{ {
size_t max_data = 0, bytes_per_cycle=0; size_t max_data = 0, bytes_per_cycle=0;
struct iovec *iov=NULL, *decoded_iov=NULL; struct iovec *iov=NULL, *decoded_iov=NULL;
uint32_t iov_count=0, iov_index=0; uint32_t iov_count=0, iov_index=0;
@ -76,7 +76,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
size_t total_bytes_written=0; size_t total_bytes_written=0;
MPI_Aint **displs_per_process=NULL, *memory_displacements=NULL; MPI_Aint **displs_per_process=NULL, *memory_displacements=NULL;
MPI_Aint bytes_to_write_in_cycle=0, global_iov_count=0, global_count=0; MPI_Aint bytes_to_write_in_cycle=0, global_iov_count=0, global_count=0;
mca_fcoll_static_local_io_array *local_iov_array =NULL, *global_iov_array=NULL; mca_fcoll_static_local_io_array *local_iov_array =NULL, *global_iov_array=NULL;
mca_fcoll_static_local_io_array *file_offsets_for_agg=NULL; mca_fcoll_static_local_io_array *file_offsets_for_agg=NULL;
int *sorted=NULL, *sorted_file_offsets=NULL, temp_pindex, *temp_disp_index=NULL; int *sorted=NULL, *sorted_file_offsets=NULL, temp_pindex, *temp_disp_index=NULL;
@ -94,7 +94,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
int my_aggregator=-1; int my_aggregator=-1;
bool sendbuf_is_contiguous= false; bool sendbuf_is_contiguous= false;
size_t ftype_size; size_t ftype_size;
OPAL_PTRDIFF_TYPE ftype_extent, lb; OPAL_PTRDIFF_TYPE ftype_extent, lb;
/*----------------------------------------------*/ /*----------------------------------------------*/
@ -104,25 +104,25 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
double exch_write = 0.0, start_exch = 0.0, end_exch = 0.0; double exch_write = 0.0, start_exch = 0.0, end_exch = 0.0;
mca_io_ompio_print_entry nentry; mca_io_ompio_print_entry nentry;
#endif #endif
#if DEBUG_ON #if DEBUG_ON
MPI_Aint gc_in; MPI_Aint gc_in;
#endif #endif
opal_datatype_type_size ( &datatype->super, &ftype_size ); opal_datatype_type_size ( &datatype->super, &ftype_size );
opal_datatype_get_extent ( &datatype->super, &lb, &ftype_extent ); opal_datatype_get_extent ( &datatype->super, &lb, &ftype_extent );
/************************************************************************** /**************************************************************************
** 1. In case the data is not contigous in memory, decode it into an iovec ** 1. In case the data is not contigous in memory, decode it into an iovec
**************************************************************************/ **************************************************************************/
if ( ( ftype_extent == (OPAL_PTRDIFF_TYPE) ftype_size) && if ( ( ftype_extent == (OPAL_PTRDIFF_TYPE) ftype_size) &&
opal_datatype_is_contiguous_memory_layout(&datatype->super,1) && opal_datatype_is_contiguous_memory_layout(&datatype->super,1) &&
0 == lb ) { 0 == lb ) {
sendbuf_is_contiguous = true; sendbuf_is_contiguous = true;
} }
/* In case the data is not contigous in memory, decode it into an iovec */ /* In case the data is not contigous in memory, decode it into an iovec */
if (! sendbuf_is_contiguous ) { if (! sendbuf_is_contiguous ) {
fh->f_decode_datatype ((struct mca_io_ompio_file_t *)fh, fh->f_decode_datatype ((struct mca_io_ompio_file_t *)fh,
@ -136,23 +136,23 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
else { else {
max_data = count * datatype->super.size; max_data = count * datatype->super.size;
} }
if ( MPI_STATUS_IGNORE != status ) { if ( MPI_STATUS_IGNORE != status ) {
status->_ucount = max_data; status->_ucount = max_data;
} }
fh->f_get_num_aggregators ( & static_num_io_procs ); fh->f_get_num_aggregators ( & static_num_io_procs );
fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *)fh, fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *)fh,
static_num_io_procs, static_num_io_procs,
max_data); max_data);
my_aggregator = fh->f_procs_in_group[fh->f_aggregator_index]; my_aggregator = fh->f_procs_in_group[fh->f_aggregator_index];
/* io_array datatype for using in communication*/ /* io_array datatype for using in communication*/
types[0] = &ompi_mpi_long.dt; types[0] = &ompi_mpi_long.dt;
types[1] = &ompi_mpi_long.dt; types[1] = &ompi_mpi_long.dt;
types[2] = &ompi_mpi_int.dt; types[2] = &ompi_mpi_int.dt;
d[0] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0]; d[0] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0];
d[1] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0].length; d[1] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0].length;
d[2] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0].process_id; d[2] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0].process_id;
@ -167,9 +167,9 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
&io_array_type); &io_array_type);
ompi_datatype_commit (&io_array_type); ompi_datatype_commit (&io_array_type);
/* #########################################################*/ /* #########################################################*/
ret = fh->f_generate_current_file_view((struct mca_io_ompio_file_t *)fh, ret = fh->f_generate_current_file_view((struct mca_io_ompio_file_t *)fh,
max_data, max_data,
&iov, &iov,
@ -178,27 +178,27 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
fprintf(stderr,"Current File View Generation Error\n"); fprintf(stderr,"Current File View Generation Error\n");
goto exit; goto exit;
} }
if (0 == iov_size){ if (0 == iov_size){
iov_size = 1; iov_size = 1;
} }
local_iov_array = (mca_fcoll_static_local_io_array *)malloc (iov_size * sizeof(mca_fcoll_static_local_io_array)); local_iov_array = (mca_fcoll_static_local_io_array *)malloc (iov_size * sizeof(mca_fcoll_static_local_io_array));
if ( NULL == local_iov_array){ if ( NULL == local_iov_array){
fprintf(stderr,"local_iov_array allocation error\n"); fprintf(stderr,"local_iov_array allocation error\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
for (j=0; j < iov_size; j++){ for (j=0; j < iov_size; j++){
local_iov_array[j].offset = (OMPI_MPI_OFFSET_TYPE)(intptr_t) local_iov_array[j].offset = (OMPI_MPI_OFFSET_TYPE)(intptr_t)
iov[j].iov_base; iov[j].iov_base;
local_iov_array[j].length = (size_t)iov[j].iov_len; local_iov_array[j].length = (size_t)iov[j].iov_len;
local_iov_array[j].process_id = fh->f_rank; local_iov_array[j].process_id = fh->f_rank;
} }
fh->f_get_bytes_per_agg ( (int *) &bytes_per_cycle); fh->f_get_bytes_per_agg ( (int *) &bytes_per_cycle);
local_cycles = ceil( ((double)max_data*fh->f_procs_per_group) /bytes_per_cycle); local_cycles = ceil( ((double)max_data*fh->f_procs_per_group) /bytes_per_cycle);
@ -212,7 +212,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
MPI_MAX, MPI_MAX,
fh->f_comm, fh->f_comm,
fh->f_comm->c_coll.coll_allreduce_module); fh->f_comm->c_coll.coll_allreduce_module);
if (OMPI_SUCCESS != ret){ if (OMPI_SUCCESS != ret){
fprintf(stderr,"local cycles allreduce!\n"); fprintf(stderr,"local cycles allreduce!\n");
goto exit; goto exit;
@ -221,53 +221,53 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
end_comm_time = MPI_Wtime(); end_comm_time = MPI_Wtime();
comm_time += end_comm_time - start_comm_time; comm_time += end_comm_time - start_comm_time;
#endif #endif
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
disp_index = (int *)malloc (fh->f_procs_per_group * sizeof (int)); disp_index = (int *)malloc (fh->f_procs_per_group * sizeof (int));
if (NULL == disp_index) { if (NULL == disp_index) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
bytes_per_process = (int *) malloc (fh->f_procs_per_group * sizeof(int )); bytes_per_process = (int *) malloc (fh->f_procs_per_group * sizeof(int ));
if (NULL == bytes_per_process){ if (NULL == bytes_per_process){
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
bytes_remaining = (int *) malloc (fh->f_procs_per_group * sizeof(int)); bytes_remaining = (int *) malloc (fh->f_procs_per_group * sizeof(int));
if (NULL == bytes_remaining){ if (NULL == bytes_remaining){
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
current_index = (int *) malloc (fh->f_procs_per_group * sizeof(int)); current_index = (int *) malloc (fh->f_procs_per_group * sizeof(int));
if (NULL == current_index){ if (NULL == current_index){
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
blocklen_per_process = (int **)malloc (fh->f_procs_per_group * sizeof (int*)); blocklen_per_process = (int **)malloc (fh->f_procs_per_group * sizeof (int*));
if (NULL == blocklen_per_process) { if (NULL == blocklen_per_process) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
displs_per_process = (MPI_Aint **) displs_per_process = (MPI_Aint **)
malloc (fh->f_procs_per_group * sizeof (MPI_Aint*)); malloc (fh->f_procs_per_group * sizeof (MPI_Aint*));
if (NULL == displs_per_process) { if (NULL == displs_per_process) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
for(i=0;i<fh->f_procs_per_group;i++){ for(i=0;i<fh->f_procs_per_group;i++){
current_index[i] = 0; current_index[i] = 0;
bytes_remaining[i] =0; bytes_remaining[i] =0;
@ -275,21 +275,21 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
displs_per_process[i] = NULL; displs_per_process[i] = NULL;
} }
} }
iovec_count_per_process = (int *) malloc (fh->f_procs_per_group * sizeof(int)); iovec_count_per_process = (int *) malloc (fh->f_procs_per_group * sizeof(int));
if (NULL == iovec_count_per_process){ if (NULL == iovec_count_per_process){
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
displs = (int *) malloc (fh->f_procs_per_group * sizeof(int)); displs = (int *) malloc (fh->f_procs_per_group * sizeof(int));
if (NULL == displs){ if (NULL == displs){
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_exch = MPI_Wtime(); start_exch = MPI_Wtime();
#endif #endif
@ -303,7 +303,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
fh->f_procs_in_group, fh->f_procs_in_group,
fh->f_procs_per_group, fh->f_procs_per_group,
fh->f_comm); fh->f_comm);
if( OMPI_SUCCESS != ret){ if( OMPI_SUCCESS != ret){
fprintf(stderr,"iov size allgatherv array!\n"); fprintf(stderr,"iov size allgatherv array!\n");
goto exit; goto exit;
@ -312,8 +312,8 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
end_comm_time = MPI_Wtime(); end_comm_time = MPI_Wtime();
comm_time += end_comm_time - start_comm_time; comm_time += end_comm_time - start_comm_time;
#endif #endif
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
displs[0] = 0; displs[0] = 0;
global_iov_count = iovec_count_per_process[0]; global_iov_count = iovec_count_per_process[0];
@ -322,8 +322,8 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
displs[i] = displs[i-1] + iovec_count_per_process[i-1]; displs[i] = displs[i-1] + iovec_count_per_process[i-1];
} }
} }
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
global_iov_array = (mca_fcoll_static_local_io_array *) malloc (global_iov_count * global_iov_array = (mca_fcoll_static_local_io_array *) malloc (global_iov_count *
sizeof(mca_fcoll_static_local_io_array)); sizeof(mca_fcoll_static_local_io_array));
@ -333,7 +333,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
goto exit; goto exit;
} }
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_exch = MPI_Wtime(); start_exch = MPI_Wtime();
#endif #endif
@ -356,13 +356,13 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
end_comm_time = MPI_Wtime(); end_comm_time = MPI_Wtime();
comm_time += end_comm_time - start_comm_time; comm_time += end_comm_time - start_comm_time;
#endif #endif
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
if ( 0 == global_iov_count){ if ( 0 == global_iov_count){
global_iov_count = 1; global_iov_count = 1;
} }
sorted = (int *)malloc (global_iov_count * sizeof(int)); sorted = (int *)malloc (global_iov_count * sizeof(int));
if (NULL == sorted) { if (NULL == sorted) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
@ -390,9 +390,9 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
} }
} }
#if DEBUG_ON #if DEBUG_ON
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
for (gc_in=0; gc_in<global_iov_count; gc_in++){ for (gc_in=0; gc_in<global_iov_count; gc_in++){
printf("%d: Offset[%ld]: %lld, Length[%ld]: %ld\n", printf("%d: Offset[%ld]: %lld, Length[%ld]: %ld\n",
@ -402,12 +402,12 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
} }
} }
#endif #endif
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_exch = MPI_Wtime(); start_exch = MPI_Wtime();
#endif #endif
for (index = 0; index < cycles; index++){ for (index = 0; index < cycles; index++){
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
@ -420,7 +420,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
free (global_buf); free (global_buf);
global_buf = NULL; global_buf = NULL;
} }
if ( NULL != recvtype ) { if ( NULL != recvtype ) {
for ( i=0; i < fh->f_procs_per_group; i++ ) { for ( i=0; i < fh->f_procs_per_group; i++ ) {
if (MPI_DATATYPE_NULL != recvtype[i] ) { if (MPI_DATATYPE_NULL != recvtype[i] ) {
@ -456,17 +456,17 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
free(sorted_file_offsets); free(sorted_file_offsets);
sorted_file_offsets = NULL; sorted_file_offsets = NULL;
} }
if(NULL != file_offsets_for_agg){ if(NULL != file_offsets_for_agg){
free(file_offsets_for_agg); free(file_offsets_for_agg);
file_offsets_for_agg = NULL; file_offsets_for_agg = NULL;
} }
if (NULL != memory_displacements){ if (NULL != memory_displacements){
free(memory_displacements); free(memory_displacements);
memory_displacements = NULL; memory_displacements = NULL;
} }
} }
if (local_cycles > index) { if (local_cycles > index) {
if ((index == local_cycles-1) && (max_data % bytes_per_cycle)) { if ((index == local_cycles-1) && (max_data % bytes_per_cycle)) {
@ -493,7 +493,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
/********************************************************** /**********************************************************
**Gather the Data from all the processes at the writers ** **Gather the Data from all the processes at the writers **
*********************************************************/ *********************************************************/
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_exch = MPI_Wtime(); start_exch = MPI_Wtime();
#endif #endif
@ -508,7 +508,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
fh->f_procs_in_group, fh->f_procs_in_group,
fh->f_procs_per_group, fh->f_procs_per_group,
fh->f_comm); fh->f_comm);
if (OMPI_SUCCESS != ret){ if (OMPI_SUCCESS != ret){
fprintf(stderr,"bytes_to_write_in_cycle gather error!\n"); fprintf(stderr,"bytes_to_write_in_cycle gather error!\n");
goto exit; goto exit;
@ -517,28 +517,28 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
end_comm_time = MPI_Wtime(); end_comm_time = MPI_Wtime();
comm_time += end_comm_time - start_comm_time; comm_time += end_comm_time - start_comm_time;
#endif #endif
/* /*
For each aggregator For each aggregator
it needs to get bytes_to_write_in_cycle from each process it needs to get bytes_to_write_in_cycle from each process
in group which adds up to bytes_per_cycle in group which adds up to bytes_per_cycle
*/ */
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
for (i=0;i<fh->f_procs_per_group; i++){ for (i=0;i<fh->f_procs_per_group; i++){
/* printf("bytes_per_process[%d]: %d\n", i, bytes_per_process[i]); /* printf("bytes_per_process[%d]: %d\n", i, bytes_per_process[i]);
*/ */
#if DEBUG_ON #if DEBUG_ON
printf ("%d : bytes_per_process : %d\n", printf ("%d : bytes_per_process : %d\n",
fh->f_procs_in_group[i], fh->f_procs_in_group[i],
bytes_per_process[i]); bytes_per_process[i]);
#endif #endif
while (bytes_per_process[i] > 0){ while (bytes_per_process[i] > 0){
if (get_process_id(global_iov_array[sorted[current_index[i]]].process_id, if (get_process_id(global_iov_array[sorted[current_index[i]]].process_id,
fh) == i){ /* current id owns this entry!*/ fh) == i){ /* current id owns this entry!*/
/*Add and subtract length and create /*Add and subtract length and create
blocklength and displs array*/ blocklength and displs array*/
if (bytes_remaining[i]){ /*Remaining bytes in the current entry of if (bytes_remaining[i]){ /*Remaining bytes in the current entry of
@ -549,7 +549,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
global_iov_array[sorted[current_index[i]]].offset + global_iov_array[sorted[current_index[i]]].offset +
(global_iov_array[sorted[current_index[i]]].length (global_iov_array[sorted[current_index[i]]].length
- bytes_remaining[i]); - bytes_remaining[i]);
blocklen_per_process[i] = (int *) realloc blocklen_per_process[i] = (int *) realloc
((void *)blocklen_per_process[i], (disp_index[i]+1)*sizeof(int)); ((void *)blocklen_per_process[i], (disp_index[i]+1)*sizeof(int));
displs_per_process[i] = (MPI_Aint *)realloc displs_per_process[i] = (MPI_Aint *)realloc
@ -591,7 +591,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
bytes_per_process[i]; bytes_per_process[i];
displs_per_process[i][disp_index[i] - 1] = displs_per_process[i][disp_index[i] - 1] =
global_iov_array[sorted[current_index[i]]].offset; global_iov_array[sorted[current_index[i]]].offset;
bytes_remaining[i] = bytes_remaining[i] =
global_iov_array[sorted[current_index[i]]].length - global_iov_array[sorted[current_index[i]]].length -
bytes_per_process[i]; bytes_per_process[i];
@ -650,13 +650,13 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
blocklen_per_process[i][j],j, blocklen_per_process[i][j],j,
displs_per_process[i][j], displs_per_process[i][j],
fh->f_rank); fh->f_rank);
#endif #endif
} }
} }
} }
if (entries_per_aggregator > 0){ if (entries_per_aggregator > 0){
file_offsets_for_agg = (mca_fcoll_static_local_io_array *) file_offsets_for_agg = (mca_fcoll_static_local_io_array *)
malloc(entries_per_aggregator*sizeof(mca_fcoll_static_local_io_array)); malloc(entries_per_aggregator*sizeof(mca_fcoll_static_local_io_array));
@ -692,7 +692,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
local_heap_sort (file_offsets_for_agg, local_heap_sort (file_offsets_for_agg,
entries_per_aggregator, entries_per_aggregator,
sorted_file_offsets); sorted_file_offsets);
memory_displacements = (MPI_Aint *) malloc memory_displacements = (MPI_Aint *) malloc
(entries_per_aggregator * sizeof(MPI_Aint)); (entries_per_aggregator * sizeof(MPI_Aint));
memory_displacements[sorted_file_offsets[0]] = 0; memory_displacements[sorted_file_offsets[0]] = 0;
@ -701,7 +701,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
memory_displacements[sorted_file_offsets[i-1]] + memory_displacements[sorted_file_offsets[i-1]] +
file_offsets_for_agg[sorted_file_offsets[i-1]].length; file_offsets_for_agg[sorted_file_offsets[i-1]].length;
} }
temp_disp_index = (int *)calloc (1, fh->f_procs_per_group * sizeof (int)); temp_disp_index = (int *)calloc (1, fh->f_procs_per_group * sizeof (int));
if (NULL == temp_disp_index) { if (NULL == temp_disp_index) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
@ -728,7 +728,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
free(temp_disp_index); free(temp_disp_index);
temp_disp_index = NULL; temp_disp_index = NULL;
} }
#if DEBUG_ON #if DEBUG_ON
printf("************Cycle: %d, Aggregator: %d ***************\n", printf("************Cycle: %d, Aggregator: %d ***************\n",
index+1,fh->f_rank); index+1,fh->f_rank);
@ -741,7 +741,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
disp_index[ file_offsets_for_agg[sorted_file_offsets[i]].process_id]); disp_index[ file_offsets_for_agg[sorted_file_offsets[i]].process_id]);
} }
#endif #endif
#if DEBUG_ON #if DEBUG_ON
printf("%d: global_count : %ld, bytes_to_write_in_cycle : %ld, procs_per_group: %d\n", printf("%d: global_count : %ld, bytes_to_write_in_cycle : %ld, procs_per_group: %d\n",
fh->f_rank, fh->f_rank,
@ -758,7 +758,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
ret = OMPI_ERR_OUT_OF_RESOURCE; ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit; goto exit;
} }
for (i=0;i<fh->f_procs_per_group; i++){ for (i=0;i<fh->f_procs_per_group; i++){
ompi_datatype_create_hindexed(disp_index[i], ompi_datatype_create_hindexed(disp_index[i],
blocklen_per_process[i], blocklen_per_process[i],
@ -779,7 +779,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
} }
} }
} }
if ( sendbuf_is_contiguous ) { if ( sendbuf_is_contiguous ) {
send_buf = &((char*)buf)[total_bytes_written]; send_buf = &((char*)buf)[total_bytes_written];
} }
@ -790,7 +790,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
OPAL_PTRDIFF_TYPE mem_address; OPAL_PTRDIFF_TYPE mem_address;
size_t remaining = 0; size_t remaining = 0;
size_t temp_position = 0; size_t temp_position = 0;
send_buf = malloc (bytes_to_write_in_cycle); send_buf = malloc (bytes_to_write_in_cycle);
if (NULL == send_buf) { if (NULL == send_buf) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
@ -798,11 +798,11 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
goto exit; goto exit;
} }
remaining = bytes_to_write_in_cycle; remaining = bytes_to_write_in_cycle;
while (remaining) { while (remaining) {
mem_address = (OPAL_PTRDIFF_TYPE) mem_address = (OPAL_PTRDIFF_TYPE)
(decoded_iov[iov_index].iov_base) + current_position; (decoded_iov[iov_index].iov_base) + current_position;
if (remaining >= if (remaining >=
(decoded_iov[iov_index].iov_len - current_position)) { (decoded_iov[iov_index].iov_len - current_position)) {
memcpy (send_buf+temp_position, memcpy (send_buf+temp_position,
@ -825,7 +825,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
} }
} }
total_bytes_written += bytes_to_write_in_cycle; total_bytes_written += bytes_to_write_in_cycle;
ret = MCA_PML_CALL(isend(send_buf, ret = MCA_PML_CALL(isend(send_buf,
bytes_to_write_in_cycle, bytes_to_write_in_cycle,
MPI_BYTE, MPI_BYTE,
@ -834,12 +834,12 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
MCA_PML_BASE_SEND_STANDARD, MCA_PML_BASE_SEND_STANDARD,
fh->f_comm, fh->f_comm,
&send_req)); &send_req));
if ( OMPI_SUCCESS != ret ){ if ( OMPI_SUCCESS != ret ){
fprintf(stderr,"isend error!\n"); fprintf(stderr,"isend error!\n");
goto exit; goto exit;
} }
ret = ompi_request_wait (&send_req, MPI_STATUS_IGNORE); ret = ompi_request_wait (&send_req, MPI_STATUS_IGNORE);
if (OMPI_SUCCESS != ret){ if (OMPI_SUCCESS != ret){
goto exit; goto exit;
@ -850,7 +850,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
send_buf = NULL; send_buf = NULL;
} }
} }
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
ret = ompi_request_wait_all (fh->f_procs_per_group, ret = ompi_request_wait_all (fh->f_procs_per_group,
recv_req, recv_req,
@ -858,7 +858,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
if (OMPI_SUCCESS != ret){ if (OMPI_SUCCESS != ret){
goto exit; goto exit;
} }
#if DEBUG_ON #if DEBUG_ON
printf("************Cycle: %d, Aggregator: %d ***************\n", printf("************Cycle: %d, Aggregator: %d ***************\n",
index+1,fh->f_rank); index+1,fh->f_rank);
@ -872,9 +872,9 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
end_comm_time = MPI_Wtime(); end_comm_time = MPI_Wtime();
comm_time += end_comm_time - start_comm_time; comm_time += end_comm_time - start_comm_time;
#endif #endif
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
fh->f_io_array = (mca_io_ompio_io_array_t *) malloc fh->f_io_array = (mca_io_ompio_io_array_t *) malloc
(entries_per_aggregator * sizeof (mca_io_ompio_io_array_t)); (entries_per_aggregator * sizeof (mca_io_ompio_io_array_t));
@ -918,11 +918,11 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
fh->f_io_array[i].length); fh->f_io_array[i].length);
} }
#endif #endif
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
start_write_time = MPI_Wtime(); start_write_time = MPI_Wtime();
#endif #endif
if (fh->f_num_of_io_entries) { if (fh->f_num_of_io_entries) {
if ( 0 > fh->f_fbtl->fbtl_pwritev (fh)) { if ( 0 > fh->f_fbtl->fbtl_pwritev (fh)) {
opal_output (1, "WRITE FAILED\n"); opal_output (1, "WRITE FAILED\n");
@ -930,17 +930,17 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
goto exit; goto exit;
} }
} }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_write_time = MPI_Wtime(); end_write_time = MPI_Wtime();
write_time += end_write_time - start_write_time; write_time += end_write_time - start_write_time;
#endif #endif
} }
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
} } } }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
end_exch = MPI_Wtime(); end_exch = MPI_Wtime();
exch_write += end_exch - start_exch; exch_write += end_exch - start_exch;
@ -957,17 +957,17 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
nentry); nentry);
} }
#endif #endif
exit: exit:
if (NULL != decoded_iov){ if (NULL != decoded_iov){
free(decoded_iov); free(decoded_iov);
decoded_iov = NULL; decoded_iov = NULL;
} }
if (my_aggregator == fh->f_rank) { if (my_aggregator == fh->f_rank) {
if (NULL != local_iov_array){ if (NULL != local_iov_array){
free(local_iov_array); free(local_iov_array);
local_iov_array = NULL; local_iov_array = NULL;
@ -983,7 +983,7 @@ exit:
} }
} }
} }
if ( NULL != recv_req ) { if ( NULL != recv_req ) {
free ( recv_req ); free ( recv_req );
recv_req = NULL; recv_req = NULL;
@ -999,57 +999,57 @@ exit:
free(global_buf); free(global_buf);
global_buf = NULL; global_buf = NULL;
} }
if (NULL != recvtype){ if (NULL != recvtype){
free(recvtype); free(recvtype);
recvtype = NULL; recvtype = NULL;
} }
if (NULL != sorted_file_offsets){ if (NULL != sorted_file_offsets){
free(sorted_file_offsets); free(sorted_file_offsets);
sorted_file_offsets = NULL; sorted_file_offsets = NULL;
} }
if (NULL != file_offsets_for_agg){ if (NULL != file_offsets_for_agg){
free(file_offsets_for_agg); free(file_offsets_for_agg);
file_offsets_for_agg = NULL; file_offsets_for_agg = NULL;
} }
if (NULL != memory_displacements){ if (NULL != memory_displacements){
free(memory_displacements); free(memory_displacements);
memory_displacements = NULL; memory_displacements = NULL;
} }
if (NULL != displs_per_process){ if (NULL != displs_per_process){
free(displs_per_process); free(displs_per_process);
displs_per_process = NULL; displs_per_process = NULL;
} }
if (NULL != blocklen_per_process){ if (NULL != blocklen_per_process){
free(blocklen_per_process); free(blocklen_per_process);
blocklen_per_process = NULL; blocklen_per_process = NULL;
} }
if(NULL != current_index){ if(NULL != current_index){
free(current_index); free(current_index);
current_index = NULL; current_index = NULL;
} }
if(NULL != bytes_remaining){ if(NULL != bytes_remaining){
free(bytes_remaining); free(bytes_remaining);
bytes_remaining = NULL; bytes_remaining = NULL;
} }
if (NULL != disp_index){ if (NULL != disp_index){
free(disp_index); free(disp_index);
disp_index = NULL; disp_index = NULL;
} }
if (NULL != sorted) { if (NULL != sorted) {
free(sorted); free(sorted);
sorted = NULL; sorted = NULL;
} }
return ret; return ret;
} }
@ -1068,12 +1068,12 @@ static int local_heap_sort (mca_fcoll_static_local_io_array *io_array,
int temp = 0; int temp = 0;
unsigned char done = 0; unsigned char done = 0;
int* temp_arr = NULL; int* temp_arr = NULL;
if( 0 == num_entries){ if( 0 == num_entries){
num_entries = 1; num_entries = 1;
} }
temp_arr = (int*)malloc(num_entries*sizeof(int)); temp_arr = (int*)malloc(num_entries*sizeof(int));
if (NULL == temp_arr) { if (NULL == temp_arr) {
opal_output (1, "OUT OF MEMORY\n"); opal_output (1, "OUT OF MEMORY\n");
@ -1088,7 +1088,7 @@ static int local_heap_sort (mca_fcoll_static_local_io_array *io_array,
done = 0; done = 0;
j = i; j = i;
largest = j; largest = j;
while (!done) { while (!done) {
left = j*2+1; left = j*2+1;
right = j*2+2; right = j*2+2;
@ -1115,7 +1115,7 @@ static int local_heap_sort (mca_fcoll_static_local_io_array *io_array,
} }
} }
} }
for (i = num_entries-1; i >=1; --i) { for (i = num_entries-1; i >=1; --i) {
temp = temp_arr[0]; temp = temp_arr[0];
temp_arr[0] = temp_arr[i]; temp_arr[0] = temp_arr[i];
@ -1124,11 +1124,11 @@ static int local_heap_sort (mca_fcoll_static_local_io_array *io_array,
done = 0; done = 0;
j = 0; j = 0;
largest = j; largest = j;
while (!done) { while (!done) {
left = j*2+1; left = j*2+1;
right = j*2+2; right = j*2+2;
if ((left <= heap_size) && if ((left <= heap_size) &&
(io_array[temp_arr[left]].offset > (io_array[temp_arr[left]].offset >
io_array[temp_arr[j]].offset)) { io_array[temp_arr[j]].offset)) {
@ -1155,7 +1155,7 @@ static int local_heap_sort (mca_fcoll_static_local_io_array *io_array,
sorted[i] = temp_arr[i]; sorted[i] = temp_arr[i];
} }
sorted[0] = temp_arr[0]; sorted[0] = temp_arr[0];
if (NULL != temp_arr) { if (NULL != temp_arr) {
free(temp_arr); free(temp_arr);
temp_arr = NULL; temp_arr = NULL;
@ -1170,7 +1170,7 @@ int find_next_index( int proc_index,
int global_iov_count, int global_iov_count,
int *sorted){ int *sorted){
int i; int i;
for(i=c_index+1; i<global_iov_count;i++){ for(i=c_index+1; i<global_iov_count;i++){
if (get_process_id(global_iov_array[sorted[i]].process_id, if (get_process_id(global_iov_array[sorted[i]].process_id,
fh) == proc_index) fh) == proc_index)

Просмотреть файл

@ -918,7 +918,7 @@ static int mca_btl_tcp_component_exchange(void)
} /* end of for opal_ifbegin() */ } /* end of for opal_ifbegin() */
} /* end of for tcp_num_btls */ } /* end of for tcp_num_btls */
OPAL_MODEX_SEND(rc, OPAL_PMIX_GLOBAL, OPAL_MODEX_SEND(rc, OPAL_PMIX_GLOBAL,
&mca_btl_tcp_component.super.btl_version, &mca_btl_tcp_component.super.btl_version,
addrs, xfer_size); addrs, xfer_size);
free(addrs); free(addrs);
} /* end if */ } /* end if */

Просмотреть файл

@ -4,7 +4,7 @@ Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
Copyright (c) 2004-2005 The University of Tennessee and The University Copyright (c) 2004-2005 The University of Tennessee and The University
of Tennessee Research Foundation. All rights of Tennessee Research Foundation. All rights
reserved. reserved.
Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
University of Stuttgart. All rights reserved. University of Stuttgart. All rights reserved.
Copyright (c) 2004-2005 The Regents of the University of California. Copyright (c) 2004-2005 The Regents of the University of California.
All rights reserved. All rights reserved.

Просмотреть файл

@ -2,9 +2,9 @@
# #
# Copyright (c) 2015 Intel, Inc. All rights reserved. # Copyright (c) 2015 Intel, Inc. All rights reserved.
# $COPYRIGHT$ # $COPYRIGHT$
# #
# Additional copyrights may follow # Additional copyrights may follow
# #
# $HEADER$ # $HEADER$
# #

Просмотреть файл

@ -220,7 +220,7 @@ Return values:
int PMI_Get_appnum( int *appnum ); int PMI_Get_appnum( int *appnum );
/*@ /*@
PMI_Publish_name - publish a name PMI_Publish_name - publish a name
Input parameters: Input parameters:
. service_name - string representing the service being published . service_name - string representing the service being published
@ -444,7 +444,7 @@ This function returns the string length required to store a keyval space name.
A routine is used rather than setting a maximum value in 'pmi.h' to allow A routine is used rather than setting a maximum value in 'pmi.h' to allow
different implementations of PMI to be used with the same executable. These different implementations of PMI to be used with the same executable. These
different implementations may allow different maximum lengths; by using a different implementations may allow different maximum lengths; by using a
routine here, we can interface with a variety of implementations of PMI. routine here, we can interface with a variety of implementations of PMI.
@*/ @*/
@ -544,7 +544,7 @@ Return values:
Notes: Notes:
This function puts the key/value pair in the specified keyval space. The This function puts the key/value pair in the specified keyval space. The
value is not visible to other processes until 'PMI_KVS_Commit()' is called. value is not visible to other processes until 'PMI_KVS_Commit()' is called.
The function may complete locally. After 'PMI_KVS_Commit()' is called, the The function may complete locally. After 'PMI_KVS_Commit()' is called, the
value may be retrieved by calling 'PMI_KVS_Get()'. All keys put to a keyval value may be retrieved by calling 'PMI_KVS_Get()'. All keys put to a keyval
space must be unique to the keyval space. You may not put more than once space must be unique to the keyval space. You may not put more than once
@ -649,7 +649,7 @@ Return values:
- PMI_FAIL - failed to get the next keyval pair - PMI_FAIL - failed to get the next keyval pair
Notes: Notes:
This function retrieves the next keyval pair from the specified keyval space. This function retrieves the next keyval pair from the specified keyval space.
'PMI_KVS_Iter_first()' must have been previously called. The end of the keyval 'PMI_KVS_Iter_first()' must have been previously called. The end of the keyval
space is specified by returning an empty key string. The output parameters, space is specified by returning an empty key string. The output parameters,
key and val, must be at least as long as the values returned by key and val, must be at least as long as the values returned by
@ -682,7 +682,7 @@ Input Parameters:
. cmds - array of command strings . cmds - array of command strings
. argvs - array of argv arrays for each command string . argvs - array of argv arrays for each command string
. maxprocs - array of maximum processes to spawn for each command string . maxprocs - array of maximum processes to spawn for each command string
. info_keyval_sizes - array giving the number of elements in each of the . info_keyval_sizes - array giving the number of elements in each of the
'info_keyval_vectors' 'info_keyval_vectors'
. info_keyval_vectors - array of keyval vector arrays . info_keyval_vectors - array of keyval vector arrays
. preput_keyval_size - Number of elements in 'preput_keyval_vector' . preput_keyval_size - Number of elements in 'preput_keyval_vector'
@ -703,7 +703,7 @@ field refers to the size of the array parameters - 'cmd', 'argvs', 'maxprocs',
to the size of the 'preput_keyval_vector' array. The 'preput_keyval_vector' to the size of the 'preput_keyval_vector' array. The 'preput_keyval_vector'
contains keyval pairs that will be put in the keyval space of the newly contains keyval pairs that will be put in the keyval space of the newly
created process group before the processes are started. The 'maxprocs' array created process group before the processes are started. The 'maxprocs' array
specifies the desired number of processes to create for each 'cmd' string. specifies the desired number of processes to create for each 'cmd' string.
The actual number of processes may be less than the numbers specified in The actual number of processes may be less than the numbers specified in
maxprocs. The acceptable number of processes spawned may be controlled by maxprocs. The acceptable number of processes spawned may be controlled by
``soft'' keyvals in the info arrays. The ``soft'' option is specified by ``soft'' keyvals in the info arrays. The ``soft'' option is specified by
@ -774,7 +774,7 @@ Notes:
This function removes PMI specific arguments from the command line and This function removes PMI specific arguments from the command line and
creates the corresponding 'PMI_keyval_t' structures for them. It returns creates the corresponding 'PMI_keyval_t' structures for them. It returns
an array and size to the caller that can then be passed to 'PMI_Spawn_multiple()'. an array and size to the caller that can then be passed to 'PMI_Spawn_multiple()'.
The array can be freed by 'PMI_Free_keyvals()'. The routine 'free()' should The array can be freed by 'PMI_Free_keyvals()'. The routine 'free()' should
not be used to free this array as there is no requirement that the array be not be used to free this array as there is no requirement that the array be
allocated with 'malloc()'. allocated with 'malloc()'.
@ -795,7 +795,7 @@ Return values:
Notes: Notes:
This function frees the data returned by 'PMI_Args_to_keyval' and 'PMI_Parse_option'. This function frees the data returned by 'PMI_Args_to_keyval' and 'PMI_Parse_option'.
Using this routine instead of 'free' allows the PMI package to track Using this routine instead of 'free' allows the PMI package to track
allocation of storage or to use interal storage as it sees fit. allocation of storage or to use interal storage as it sees fit.
@*/ @*/
int PMI_Free_keyvals(PMI_keyval_t keyvalp[], int size); int PMI_Free_keyvals(PMI_keyval_t keyvalp[], int size);

Просмотреть файл

@ -89,7 +89,7 @@ typedef struct PMI_keyval_t
cannot access the KVS spaces of another job (this may happen, for cannot access the KVS spaces of another job (this may happen, for
example, if each mpiexec creates the KVS spaces for the processes example, if each mpiexec creates the KVS spaces for the processes
that it manages). that it manages).
@*/ @*/
typedef struct PMI2_Connect_comm { typedef struct PMI2_Connect_comm {
int (*read)( void *buf, int maxlen, void *ctx ); int (*read)( void *buf, int maxlen, void *ctx );
@ -107,10 +107,10 @@ typedef struct PMI2_Connect_comm {
. size - number of processes in the job . size - number of processes in the job
. rank - rank of this process in the job . rank - rank of this process in the job
- appnum - which executable is this on the mpiexec commandline - appnum - which executable is this on the mpiexec commandline
Return values: Return values:
Returns 'MPI_SUCCESS' on success and an MPI error code on failure. Returns 'MPI_SUCCESS' on success and an MPI error code on failure.
Notes: Notes:
Initialize PMI for this process group. The value of spawned indicates whether Initialize PMI for this process group. The value of spawned indicates whether
this process was created by 'PMI2_Spawn_multiple'. 'spawned' will be non-zero this process was created by 'PMI2_Spawn_multiple'. 'spawned' will be non-zero
@ -121,13 +121,13 @@ int PMI2_Init(int *spawned, int *size, int *rank, int *appnum);
/*@ /*@
PMI2_Finalize - finalize the Process Manager Interface PMI2_Finalize - finalize the Process Manager Interface
Return values: Return values:
Returns 'MPI_SUCCESS' on success and an MPI error code on failure. Returns 'MPI_SUCCESS' on success and an MPI error code on failure.
Notes: Notes:
Finalize PMI for this job. Finalize PMI for this job.
@*/ @*/
int PMI2_Finalize(void); int PMI2_Finalize(void);
@ -136,17 +136,17 @@ int PMI2_Finalize(void);
Return values: Return values:
Non-zero if PMI2_Initialize has been called successfully, zero otherwise. Non-zero if PMI2_Initialize has been called successfully, zero otherwise.
@*/ @*/
int PMI2_Initialized(void); int PMI2_Initialized(void);
/*@ /*@
PMI2_Abort - abort the process group associated with this process PMI2_Abort - abort the process group associated with this process
Input Parameters: Input Parameters:
+ flag - non-zero if all processes in this job should abort, zero otherwise + flag - non-zero if all processes in this job should abort, zero otherwise
- error_msg - error message to be printed - error_msg - error message to be printed
Return values: Return values:
If the abort succeeds this function will not return. Returns an MPI If the abort succeeds this function will not return. Returns an MPI
error code otherwise. error code otherwise.
@ -163,7 +163,7 @@ int PMI2_Abort(int flag, const char msg[]);
. argcs - size of argv arrays for each command string . argcs - size of argv arrays for each command string
. argvs - array of argv arrays for each command string . argvs - array of argv arrays for each command string
. maxprocs - array of maximum processes to spawn for each command string . maxprocs - array of maximum processes to spawn for each command string
. info_keyval_sizes - array giving the number of elements in each of the . info_keyval_sizes - array giving the number of elements in each of the
'info_keyval_vectors' 'info_keyval_vectors'
. info_keyval_vectors - array of keyval vector arrays . info_keyval_vectors - array of keyval vector arrays
. preput_keyval_size - Number of elements in 'preput_keyval_vector' . preput_keyval_size - Number of elements in 'preput_keyval_vector'
@ -184,7 +184,7 @@ int PMI2_Abort(int flag, const char msg[]);
to the size of the 'preput_keyval_vector' array. The 'preput_keyval_vector' to the size of the 'preput_keyval_vector' array. The 'preput_keyval_vector'
contains keyval pairs that will be put in the keyval space of the newly contains keyval pairs that will be put in the keyval space of the newly
created job before the processes are started. The 'maxprocs' array created job before the processes are started. The 'maxprocs' array
specifies the desired number of processes to create for each 'cmd' string. specifies the desired number of processes to create for each 'cmd' string.
The actual number of processes may be less than the numbers specified in The actual number of processes may be less than the numbers specified in
maxprocs. The acceptable number of processes spawned may be controlled by maxprocs. The acceptable number of processes spawned may be controlled by
``soft'' keyvals in the info arrays. The ``soft'' option is specified by ``soft'' keyvals in the info arrays. The ``soft'' option is specified by
@ -202,14 +202,14 @@ int PMI2_Job_Spawn(int count, const char * cmds[],
int errors[]); int errors[]);
/*@ /*@
PMI2_Job_GetId - get job id of this job PMI2_Job_GetId - get job id of this job
Input parameters: Input parameters:
. jobid_size - size of buffer provided in jobid . jobid_size - size of buffer provided in jobid
Output parameters: Output parameters:
. jobid - the job id of this job . jobid - the job id of this job
Return values: Return values:
Returns 'MPI_SUCCESS' on success and an MPI error code on failure. Returns 'MPI_SUCCESS' on success and an MPI error code on failure.
@ -225,7 +225,7 @@ int PMI2_Job_GetId(char jobid[], int jobid_size);
Output parameters: Output parameters:
. conn - connection structure used to exteblish communication with . conn - connection structure used to exteblish communication with
the remote job the remote job
Return values: Return values:
Returns 'MPI_SUCCESS' on success and an MPI error code on failure. Returns 'MPI_SUCCESS' on success and an MPI error code on failure.
@ -258,7 +258,7 @@ int PMI2_Job_Disconnect(const char jobid[]);
Input Parameters: Input Parameters:
+ key - key + key - key
- value - value - value - value
Return values: Return values:
Returns 'MPI_SUCCESS' on success and an MPI error code on failure. Returns 'MPI_SUCCESS' on success and an MPI error code on failure.
@ -287,7 +287,7 @@ int PMI2_KVS_Put(const char key[], const char value[]);
their corresponding PMI2_KVS_Fence until some process issues a their corresponding PMI2_KVS_Fence until some process issues a
PMI2_KVS_Get. This might be appropriate for some wide-area PMI2_KVS_Get. This might be appropriate for some wide-area
implementations. implementations.
@*/ @*/
int PMI2_KVS_Fence(void); int PMI2_KVS_Fence(void);
@ -308,7 +308,7 @@ int PMI2_KVS_Fence(void);
+ value - value associated with key + value - value associated with key
- vallen - length of the returned value, or, if the length is longer - vallen - length of the returned value, or, if the length is longer
than maxvalue, the negative of the required length is returned than maxvalue, the negative of the required length is returned
Return values: Return values:
Returns 'MPI_SUCCESS' on success and an MPI error code on failure. Returns 'MPI_SUCCESS' on success and an MPI error code on failure.
@ -328,7 +328,7 @@ int PMI2_KVS_Get(const char *jobid, int src_pmi_id, const char key[], char value
Output Parameters: Output Parameters:
+ value - value of the attribute + value - value of the attribute
- found - non-zero indicates that the attribute was found - found - non-zero indicates that the attribute was found
Return values: Return values:
Returns 'MPI_SUCCESS' on success and an MPI error code on failure. Returns 'MPI_SUCCESS' on success and an MPI error code on failure.
@ -367,7 +367,7 @@ int PMI2_Info_GetNodeAttr(const char name[], char value[], int valuelen, int *fo
+ array - value of attribute + array - value of attribute
. outlen - number of elements returned . outlen - number of elements returned
- found - non-zero if attribute was found - found - non-zero if attribute was found
Return values: Return values:
Returns 'MPI_SUCCESS' on success and an MPI error code on failure. Returns 'MPI_SUCCESS' on success and an MPI error code on failure.
@ -403,7 +403,7 @@ int PMI2_Info_GetNodeAttrIntArray(const char name[], int array[], int arraylen,
Notes: Notes:
For example, it might be used to share segment ids with other For example, it might be used to share segment ids with other
processes on the same SMP node. processes on the same SMP node.
@*/ @*/
int PMI2_Info_PutNodeAttr(const char name[], const char value[]); int PMI2_Info_PutNodeAttr(const char name[], const char value[]);
@ -418,7 +418,7 @@ int PMI2_Info_PutNodeAttr(const char name[], const char value[]);
Output Parameters: Output Parameters:
+ value - value of the attribute + value - value of the attribute
- found - non-zero indicates that the attribute was found - found - non-zero indicates that the attribute was found
Return values: Return values:
Returns 'MPI_SUCCESS' on success and an MPI error code on failure. Returns 'MPI_SUCCESS' on success and an MPI error code on failure.
@ -437,7 +437,7 @@ int PMI2_Info_GetJobAttr(const char name[], char value[], int valuelen, int *fou
+ array - value of attribute + array - value of attribute
. outlen - number of elements returned . outlen - number of elements returned
- found - non-zero if attribute was found - found - non-zero if attribute was found
Return values: Return values:
Returns 'MPI_SUCCESS' on success and an MPI error code on failure. Returns 'MPI_SUCCESS' on success and an MPI error code on failure.
@ -449,7 +449,7 @@ int PMI2_Info_GetJobAttr(const char name[], char value[], int valuelen, int *fou
. hasNameServ - The value hasNameServ is true if the PMI2 environment . hasNameServ - The value hasNameServ is true if the PMI2 environment
supports the name service operations (publish, lookup, and supports the name service operations (publish, lookup, and
unpublish). unpublish).
. physTopology - Return the topology of the underlying network. The . physTopology - Return the topology of the underlying network. The
valid topology types include cartesian, hierarchical, complete, valid topology types include cartesian, hierarchical, complete,
kautz, hypercube; additional types may be added as necessary. If kautz, hypercube; additional types may be added as necessary. If
@ -471,7 +471,7 @@ int PMI2_Info_GetJobAttr(const char name[], char value[], int valuelen, int *fou
is cartesian,complete. All processes are connected by the is cartesian,complete. All processes are connected by the
cartesian part of this, but for each complete network, only the cartesian part of this, but for each complete network, only the
processes on the same node are connected. processes on the same node are connected.
. cartDims - Return a string of comma-separated values describing . cartDims - Return a string of comma-separated values describing
the dimensions of the Cartesian topology. This must be consistent the dimensions of the Cartesian topology. This must be consistent
with the value of cartCoords that may be returned by with the value of cartCoords that may be returned by
@ -482,7 +482,7 @@ int PMI2_Info_GetJobAttr(const char name[], char value[], int valuelen, int *fou
PMI interface and how extensions can be added within the same API PMI interface and how extensions can be added within the same API
and wire protocol. For example, adding more complex network and wire protocol. For example, adding more complex network
topologies requires only adding new keys, not new routines. topologies requires only adding new keys, not new routines.
. isHeterogeneous - The value isHeterogeneous is true if the . isHeterogeneous - The value isHeterogeneous is true if the
processes belonging to the job are running on nodes with different processes belonging to the job are running on nodes with different
underlying data models. underlying data models.
@ -491,7 +491,7 @@ int PMI2_Info_GetJobAttr(const char name[], char value[], int valuelen, int *fou
int PMI2_Info_GetJobAttrIntArray(const char name[], int array[], int arraylen, int *outlen, int *found); int PMI2_Info_GetJobAttrIntArray(const char name[], int array[], int arraylen, int *outlen, int *found);
/*@ /*@
PMI2_Nameserv_publish - publish a name PMI2_Nameserv_publish - publish a name
Input parameters: Input parameters:
+ service_name - string representing the service being published + service_name - string representing the service being published
@ -511,7 +511,7 @@ int PMI2_Nameserv_publish(const char service_name[], const PMI_keyval_t *info_pt
+ service_name - string representing the service being published + service_name - string representing the service being published
. info_ptr - . info_ptr -
- portLen - size of buffer provided in port - portLen - size of buffer provided in port
Output parameters: Output parameters:
. port - string representing the port on which to contact the service . port - string representing the port on which to contact the service
@ -532,7 +532,7 @@ int PMI2_Nameserv_lookup(const char service_name[], const PMI_keyval_t *info_ptr
Returns 'MPI_SUCCESS' on success and an MPI error code on failure. Returns 'MPI_SUCCESS' on success and an MPI error code on failure.
@*/ @*/
int PMI2_Nameserv_unpublish(const char service_name[], int PMI2_Nameserv_unpublish(const char service_name[],
const PMI_keyval_t *info_ptr); const PMI_keyval_t *info_ptr);

Просмотреть файл

@ -69,7 +69,7 @@
/* Maybe before gcc 2.95 too */ /* Maybe before gcc 2.95 too */
#ifdef PMIX_HAVE_ATTRIBUTE_UNUSED #ifdef PMIX_HAVE_ATTRIBUTE_UNUSED
#define __PMIX_HAVE_ATTRIBUTE_UNUSED PMIX_HAVE_ATTRIBUTE_UNUSED #define __PMIX_HAVE_ATTRIBUTE_UNUSED PMIX_HAVE_ATTRIBUTE_UNUSED
#elif defined(__GNUC__) #elif defined(__GNUC__)
# define __PMIX_HAVE_ATTRIBUTE_UNUSED (GXX_ABOVE_3_4 || GCC_ABOVE_2_95) # define __PMIX_HAVE_ATTRIBUTE_UNUSED (GXX_ABOVE_3_4 || GCC_ABOVE_2_95)
#else #else
@ -82,7 +82,7 @@
#endif #endif
#ifdef PMIX_HAVE_ATTRIBUTE_MALLOC #ifdef PMIX_HAVE_ATTRIBUTE_MALLOC
#define __PMIX_HAVE_ATTRIBUTE_MALLOC PMIX_HAVE_ATTRIBUTE_MALLOC #define __PMIX_HAVE_ATTRIBUTE_MALLOC PMIX_HAVE_ATTRIBUTE_MALLOC
#elif defined(__GNUC__) #elif defined(__GNUC__)
# define __PMIX_HAVE_ATTRIBUTE_MALLOC (GXX_ABOVE_3_4 || GCC_ABOVE_2_96) # define __PMIX_HAVE_ATTRIBUTE_MALLOC (GXX_ABOVE_3_4 || GCC_ABOVE_2_96)
#else #else
@ -95,7 +95,7 @@
#endif #endif
#ifdef PMIX_HAVE_ATTRIBUTE_CONST #ifdef PMIX_HAVE_ATTRIBUTE_CONST
#define __PMIX_HAVE_ATTRIBUTE_CONST PMIX_HAVE_ATTRIBUTE_CONST #define __PMIX_HAVE_ATTRIBUTE_CONST PMIX_HAVE_ATTRIBUTE_CONST
#elif defined(__GNUC__) #elif defined(__GNUC__)
# define __PMIX_HAVE_ATTRIBUTE_CONST (GXX_ABOVE_3_4 || GCC_ABOVE_2_95) # define __PMIX_HAVE_ATTRIBUTE_CONST (GXX_ABOVE_3_4 || GCC_ABOVE_2_95)
#else #else
@ -108,7 +108,7 @@
#endif #endif
#ifdef PMIX_HAVE_ATTRIBUTE_PURE #ifdef PMIX_HAVE_ATTRIBUTE_PURE
#define __PMIX_HAVE_ATTRIBUTE_PURE PMIX_HAVE_ATTRIBUTE_PURE #define __PMIX_HAVE_ATTRIBUTE_PURE PMIX_HAVE_ATTRIBUTE_PURE
#elif defined(__GNUC__) #elif defined(__GNUC__)
# define __PMIX_HAVE_ATTRIBUTE_PURE (GXX_ABOVE_3_4 || GCC_ABOVE_2_96) # define __PMIX_HAVE_ATTRIBUTE_PURE (GXX_ABOVE_3_4 || GCC_ABOVE_2_96)
#else #else
@ -121,7 +121,7 @@
#endif #endif
#ifdef PMIX_HAVE_ATTRIBUTE_DEPRECATED #ifdef PMIX_HAVE_ATTRIBUTE_DEPRECATED
#define __PMIX_HAVE_ATTRIBUTE_DEPRECATED PMIX_HAVE_ATTRIBUTE_DEPRECATED #define __PMIX_HAVE_ATTRIBUTE_DEPRECATED PMIX_HAVE_ATTRIBUTE_DEPRECATED
#elif defined(__GNUC__) #elif defined(__GNUC__)
# define __PMIX_HAVE_ATTRIBUTE_DEPRECATED (GXX_ABOVE_3_4 || GCC_ABOVE_3_3) # define __PMIX_HAVE_ATTRIBUTE_DEPRECATED (GXX_ABOVE_3_4 || GCC_ABOVE_3_3)
#else #else

Просмотреть файл

@ -5,7 +5,7 @@
* Copyright (c) 2004-2005 The University of Tennessee and The University * Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved. * University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California. * Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved. * All rights reserved.
@ -13,9 +13,9 @@
* Copyright (c) 2010 Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2010 Oracle and/or its affiliates. All rights reserved.
* *
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
* *
* $HEADER$ * $HEADER$
*/ */

Просмотреть файл

@ -3,9 +3,9 @@
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
* *
* $HEADER$ * $HEADER$
*/ */

Просмотреть файл

@ -5,15 +5,15 @@
* Copyright (c) 2004-2005 The University of Tennessee and The University * Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved. * University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California. * Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved. * All rights reserved.
* Copyright (c) 2015 Intel, Inc. All rights reserved. * Copyright (c) 2015 Intel, Inc. All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
* *
* $HEADER$ * $HEADER$
*/ */
#ifndef PMIX_GET_SOCKET_ERROR_H #ifndef PMIX_GET_SOCKET_ERROR_H

Просмотреть файл

@ -5,14 +5,14 @@
* Copyright (c) 2004-2005 The University of Tennessee and The University * Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved. * University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California. * Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved. * All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
* *
* $HEADER$ * $HEADER$
* *
* This file includes the C99 stdint.h file if available, and otherwise * This file includes the C99 stdint.h file if available, and otherwise
@ -186,7 +186,7 @@ typedef unsigned long long uintptr_t;
/* inttypes.h printf specifiers */ /* inttypes.h printf specifiers */
#ifdef HAVE_INTTYPES_H #ifdef HAVE_INTTYPES_H
# include <inttypes.h> # include <inttypes.h>
#else #else
# if SIZEOF_LONG == 8 # if SIZEOF_LONG == 8
# define __PRI64_PREFIX "l" # define __PRI64_PREFIX "l"
@ -305,7 +305,7 @@ typedef unsigned long long uintptr_t;
# define PRIoPTR __PRIPTR_PREFIX "o" # define PRIoPTR __PRIPTR_PREFIX "o"
# define PRIuPTR __PRIPTR_PREFIX "u" # define PRIuPTR __PRIPTR_PREFIX "u"
# define PRIxPTR __PRIPTR_PREFIX "x" # define PRIxPTR __PRIPTR_PREFIX "x"
# define PRIXPTR __PRIPTR_PREFIX "X" # define PRIXPTR __PRIPTR_PREFIX "X"
#endif #endif

Просмотреть файл

@ -3,9 +3,9 @@
* All rights reserved. * All rights reserved.
* Copyright (c) 2014 Intel, Inc. All rights reserved. * Copyright (c) 2014 Intel, Inc. All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
* *
* $HEADER$ * $HEADER$
*/ */

Просмотреть файл

@ -5,15 +5,15 @@
* Copyright (c) 2004-2005 The University of Tennessee and The University * Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved. * University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California. * Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved. * All rights reserved.
* Copyright (c) 2014-2015 Intel, Inc. All rights reserved. * Copyright (c) 2014-2015 Intel, Inc. All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
* *
* $HEADER$ * $HEADER$
*/ */
@ -158,7 +158,7 @@ static inline void* pmix_ptr_ltop( uint64_t value )
#if defined(WORDS_BIGENDIAN) || !defined(HAVE_UNIX_BYTESWAP) #if defined(WORDS_BIGENDIAN) || !defined(HAVE_UNIX_BYTESWAP)
static inline uint16_t pmix_swap_bytes2(uint16_t val) __pmix_attribute_const__; static inline uint16_t pmix_swap_bytes2(uint16_t val) __pmix_attribute_const__;
static inline uint16_t pmix_swap_bytes2(uint16_t val) static inline uint16_t pmix_swap_bytes2(uint16_t val)
{ {
union { uint16_t bigval; union { uint16_t bigval;
uint8_t arrayval[2]; uint8_t arrayval[2];
@ -203,7 +203,7 @@ static inline uint64_t pmix_swap_bytes8(uint64_t val)
r.arrayval[5] = w.arrayval[2]; r.arrayval[5] = w.arrayval[2];
r.arrayval[6] = w.arrayval[1]; r.arrayval[6] = w.arrayval[1];
r.arrayval[7] = w.arrayval[0]; r.arrayval[7] = w.arrayval[0];
return r.bigval; return r.bigval;
} }

Просмотреть файл

@ -1,6 +1,6 @@
This is a short description how to run tests for pmix standalone library. This is a short description how to run tests for pmix standalone library.
To compile test the user should run make in the test subdirectory. To compile test the user should run make in the test subdirectory.
To start testing, the user should run either pmix_test or pmix_test_lite executable. To start testing, the user should run either pmix_test or pmix_test_lite executable.
These applications are using two different versions of server functionality and fork These applications are using two different versions of server functionality and fork

Просмотреть файл

@ -9,7 +9,7 @@
* University of Stuttgart. All rights reserved. * University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California. * Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved. * All rights reserved.
* Copyright (c) 2006-2013 Los Alamos National Security, LLC. * Copyright (c) 2006-2013 Los Alamos National Security, LLC.
* All rights reserved. * All rights reserved.
* Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2011 Oak Ridge National Labs. All rights reserved. * Copyright (c) 2011 Oak Ridge National Labs. All rights reserved.
@ -33,7 +33,7 @@ int main(int argc, char **argv)
int spawned, size, rank, appnum; int spawned, size, rank, appnum;
int rc; int rc;
char *key; char *key;
/* init us */ /* init us */
if (PMI2_SUCCESS != (rc = PMI2_Init(&spawned, &size, &rank, &appnum))) { if (PMI2_SUCCESS != (rc = PMI2_Init(&spawned, &size, &rank, &appnum))) {
fprintf(stderr, "PMI2_Init failed: %d\n", rc); fprintf(stderr, "PMI2_Init failed: %d\n", rc);
@ -66,6 +66,6 @@ int main(int argc, char **argv)
fprintf(stderr, "PMI2_Finalize failed: %d\n", rc); fprintf(stderr, "PMI2_Finalize failed: %d\n", rc);
return rc; return rc;
} }
return 0; return 0;
} }

Просмотреть файл

@ -9,7 +9,7 @@
* University of Stuttgart. All rights reserved. * University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California. * Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved. * All rights reserved.
* Copyright (c) 2006-2013 Los Alamos National Security, LLC. * Copyright (c) 2006-2013 Los Alamos National Security, LLC.
* All rights reserved. * All rights reserved.
* Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2011 Oak Ridge National Labs. All rights reserved. * Copyright (c) 2011 Oak Ridge National Labs. All rights reserved.
@ -71,6 +71,6 @@ int main(int argc, char **argv)
if (PMI_SUCCESS != (rc = PMI_Finalize())) { if (PMI_SUCCESS != (rc = PMI_Finalize())) {
fprintf(stderr, "PMI_Finalize failed: %d\n", rc); fprintf(stderr, "PMI_Finalize failed: %d\n", rc);
} }
return rc; return rc;
} }

Просмотреть файл

@ -42,7 +42,7 @@ int main(int argc, char **argv)
char *regex; char *regex;
char **nodes, **procs; char **nodes, **procs;
pmix_status_t rc; pmix_status_t rc;
/* smoke test */ /* smoke test */
if (PMIX_SUCCESS != 0) { if (PMIX_SUCCESS != 0) {
TEST_ERROR(("ERROR IN COMPUTING CONSTANTS: PMIX_SUCCESS = %d", PMIX_SUCCESS)); TEST_ERROR(("ERROR IN COMPUTING CONSTANTS: PMIX_SUCCESS = %d", PMIX_SUCCESS));
@ -68,7 +68,7 @@ int main(int argc, char **argv)
} else { } else {
fprintf(stderr, "Node reverse failed: %d\n\n\n", rc); fprintf(stderr, "Node reverse failed: %d\n\n\n", rc);
} }
fprintf(stderr, "PROCS: %s\n", TEST_PROCS); fprintf(stderr, "PROCS: %s\n", TEST_PROCS);
PMIx_generate_ppn(TEST_PROCS, &regex); PMIx_generate_ppn(TEST_PROCS, &regex);
fprintf(stderr, "PPN: %s\n\n", regex); fprintf(stderr, "PPN: %s\n\n", regex);

Просмотреть файл

@ -53,9 +53,9 @@ int main(int argc, char **argv)
pmix_proc_t *peers; pmix_proc_t *peers;
size_t npeers, ntmp=0; size_t npeers, ntmp=0;
char *nodelist; char *nodelist;
gethostname(hostname, 1024); gethostname(hostname, 1024);
/* init us */ /* init us */
if (PMIX_SUCCESS != (rc = PMIx_Init(&myproc))) { if (PMIX_SUCCESS != (rc = PMIx_Init(&myproc))) {
pmix_output(0, "Client ns %s rank %d: PMIx_Init failed: %d", myproc.nspace, myproc.rank, rc); pmix_output(0, "Client ns %s rank %d: PMIx_Init failed: %d", myproc.nspace, myproc.rank, rc);
@ -71,7 +71,7 @@ int main(int argc, char **argv)
nprocs = val->data.uint32; nprocs = val->data.uint32;
PMIX_VALUE_RELEASE(val); PMIX_VALUE_RELEASE(val);
pmix_output(0, "Client %s:%d universe size %d", myproc.nspace, myproc.rank, nprocs); pmix_output(0, "Client %s:%d universe size %d", myproc.nspace, myproc.rank, nprocs);
/* call fence to sync */ /* call fence to sync */
PMIX_PROC_CONSTRUCT(&proc); PMIX_PROC_CONSTRUCT(&proc);
(void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN); (void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN);
@ -80,7 +80,7 @@ int main(int argc, char **argv)
pmix_output(0, "Client ns %s rank %d: PMIx_Fence failed: %d", myproc.nspace, myproc.rank, rc); pmix_output(0, "Client ns %s rank %d: PMIx_Fence failed: %d", myproc.nspace, myproc.rank, rc);
goto done; goto done;
} }
/* rank=0 calls spawn */ /* rank=0 calls spawn */
if (0 == myproc.rank) { if (0 == myproc.rank) {
PMIX_APP_CREATE(app, 1); PMIX_APP_CREATE(app, 1);
@ -172,7 +172,7 @@ int main(int argc, char **argv)
} }
PMIX_PROC_FREE(peers, npeers); PMIX_PROC_FREE(peers, npeers);
free(nodelist); free(nodelist);
done: done:
/* call fence to sync */ /* call fence to sync */
PMIX_PROC_CONSTRUCT(&proc); PMIX_PROC_CONSTRUCT(&proc);
@ -182,10 +182,10 @@ int main(int argc, char **argv)
pmix_output(0, "Client ns %s rank %d: PMIx_Fence failed: %d", myproc.nspace, myproc.rank, rc); pmix_output(0, "Client ns %s rank %d: PMIx_Fence failed: %d", myproc.nspace, myproc.rank, rc);
goto done; goto done;
} }
/* finalize us */ /* finalize us */
pmix_output(0, "Client ns %s rank %d: Finalizing", myproc.nspace, myproc.rank); pmix_output(0, "Client ns %s rank %d: Finalizing", myproc.nspace, myproc.rank);
if (PMIX_SUCCESS != (rc = PMIx_Finalize())) { if (PMIX_SUCCESS != (rc = PMIx_Finalize())) {
fprintf(stderr, "Client ns %s rank %d:PMIx_Finalize failed: %d\n", myproc.nspace, myproc.rank, rc); fprintf(stderr, "Client ns %s rank %d:PMIx_Finalize failed: %d\n", myproc.nspace, myproc.rank, rc);
} else { } else {

Просмотреть файл

@ -55,7 +55,7 @@ int main(int argc, char **argv)
pmix_value_t *val = &value; pmix_value_t *val = &value;
pmix_proc_t proc; pmix_proc_t proc;
uint32_t nprocs; uint32_t nprocs;
/* init us */ /* init us */
if (PMIX_SUCCESS != (rc = PMIx_Init(&myproc))) { if (PMIX_SUCCESS != (rc = PMIx_Init(&myproc))) {
pmix_output(0, "Client ns %s rank %d: PMIx_Init failed: %d", myproc.nspace, myproc.rank, rc); pmix_output(0, "Client ns %s rank %d: PMIx_Init failed: %d", myproc.nspace, myproc.rank, rc);
@ -72,10 +72,10 @@ int main(int argc, char **argv)
PMIX_VALUE_RELEASE(val); PMIX_VALUE_RELEASE(val);
pmix_output(0, "Client %s:%d universe size %d", myproc.nspace, myproc.rank, nprocs); pmix_output(0, "Client %s:%d universe size %d", myproc.nspace, myproc.rank, nprocs);
completed = false; completed = false;
/* register our errhandler */ /* register our errhandler */
PMIx_Register_errhandler(NULL, 0, notification_fn); PMIx_Register_errhandler(NULL, 0, notification_fn);
/* call fence to sync */ /* call fence to sync */
PMIX_PROC_CONSTRUCT(&proc); PMIX_PROC_CONSTRUCT(&proc);
(void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN); (void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN);
@ -84,7 +84,7 @@ int main(int argc, char **argv)
pmix_output(0, "Client ns %s rank %d: PMIx_Fence failed: %d", myproc.nspace, myproc.rank, rc); pmix_output(0, "Client ns %s rank %d: PMIx_Fence failed: %d", myproc.nspace, myproc.rank, rc);
goto done; goto done;
} }
/* rank=0 calls abort */ /* rank=0 calls abort */
if (0 == myproc.rank) { if (0 == myproc.rank) {
PMIx_Abort(PMIX_ERR_OUT_OF_RESOURCE, "Eat rocks", PMIx_Abort(PMIX_ERR_OUT_OF_RESOURCE, "Eat rocks",
@ -103,7 +103,7 @@ int main(int argc, char **argv)
/* finalize us */ /* finalize us */
pmix_output(0, "Client ns %s rank %d: Finalizing", myproc.nspace, myproc.rank); pmix_output(0, "Client ns %s rank %d: Finalizing", myproc.nspace, myproc.rank);
PMIx_Deregister_errhandler(); PMIx_Deregister_errhandler();
if (PMIX_SUCCESS != (rc = PMIx_Finalize())) { if (PMIX_SUCCESS != (rc = PMIx_Finalize())) {
fprintf(stderr, "Client ns %s rank %d:PMIx_Finalize failed: %d\n", myproc.nspace, myproc.rank, rc); fprintf(stderr, "Client ns %s rank %d:PMIx_Finalize failed: %d\n", myproc.nspace, myproc.rank, rc);
} else { } else {

Просмотреть файл

@ -506,7 +506,7 @@ static void process_opens(int fd, short args, void *cbdata)
orte_process_name_t daemon; orte_process_name_t daemon;
opal_list_t lt; opal_list_t lt;
opal_namelist_t *nm; opal_namelist_t *nm;
/* get the scheme to determine if we can process locally or not */ /* get the scheme to determine if we can process locally or not */
if (NULL == (scheme = opal_uri_get_scheme(dfs->uri))) { if (NULL == (scheme = opal_uri_get_scheme(dfs->uri))) {
ORTE_ERROR_LOG(ORTE_ERR_BAD_PARAM); ORTE_ERROR_LOG(ORTE_ERR_BAD_PARAM);

Просмотреть файл

@ -448,7 +448,7 @@ static void process_opens(int fd, short args, void *cbdata)
orte_process_name_t daemon; orte_process_name_t daemon;
opal_list_t lt; opal_list_t lt;
opal_namelist_t *nm; opal_namelist_t *nm;
opal_output_verbose(1, orte_dfs_base_framework.framework_output, opal_output_verbose(1, orte_dfs_base_framework.framework_output,
"%s PROCESSING OPEN", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)); "%s PROCESSING OPEN", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME));
@ -490,7 +490,7 @@ static void process_opens(int fd, short args, void *cbdata)
nm = (opal_namelist_t*)opal_list_get_first(&lt); nm = (opal_namelist_t*)opal_list_get_first(&lt);
daemon.vpid = nm->name.vpid; daemon.vpid = nm->name.vpid;
OPAL_LIST_DESTRUCT(&lt); OPAL_LIST_DESTRUCT(&lt);
opal_output_verbose(1, orte_dfs_base_framework.framework_output, opal_output_verbose(1, orte_dfs_base_framework.framework_output,
"%s file %s on host %s daemon %s", "%s file %s on host %s daemon %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),

Просмотреть файл

@ -133,7 +133,7 @@ static void job_errors(int fd, short args, void *cbdata)
opal_buffer_t *answer; opal_buffer_t *answer;
int32_t rc, ret; int32_t rc, ret;
int room; int room;
/* /*
* if orte is trying to shutdown, just let it * if orte is trying to shutdown, just let it
*/ */

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_broadcast32.3 .so man3/shmem_broadcast32.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_set_lock.3 .so man3/shmem_set_lock.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_collect32.3 .so man3/shmem_collect32.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_g.3 .so man3/shmem_char_g.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_get.3 .so man3/shmem_char_get.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_iget.3 .so man3/shmem_short_iget.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_iput.3 .so man3/shmem_short_iput.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_max_to_all.3 .so man3/shmem_short_max_to_all.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_p.3 .so man3/shmem_char_p.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_put.3 .so man3/shmem_char_put.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_swap.3 .so man3/shmem_swap.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_collect32.3 .so man3/shmem_collect32.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_collect32.3 .so man3/shmem_collect32.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_g.3 .so man3/shmem_char_g.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_get.3 .so man3/shmem_char_get.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_iget.3 .so man3/shmem_short_iget.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_iput.3 .so man3/shmem_short_iput.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_max_to_all.3 .so man3/shmem_short_max_to_all.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_p.3 .so man3/shmem_char_p.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_put.3 .so man3/shmem_char_put.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_swap.3 .so man3/shmem_swap.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_get.3 .so man3/shmem_char_get.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_get.3 .so man3/shmem_char_get.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_get.3 .so man3/shmem_char_get.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_get.3 .so man3/shmem_char_get.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_iget.3 .so man3/shmem_short_iget.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_iget.3 .so man3/shmem_short_iget.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_iget.3 .so man3/shmem_short_iget.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_and_to_all.3 .so man3/shmem_short_and_to_all.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_g.3 .so man3/shmem_char_g.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_get.3 .so man3/shmem_char_get.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_iget.3 .so man3/shmem_short_iget.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_iput.3 .so man3/shmem_short_iput.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_max_to_all.3 .so man3/shmem_short_max_to_all.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_or_to_all.3 .so man3/shmem_short_or_to_all.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_p.3 .so man3/shmem_char_p.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_put.3 .so man3/shmem_char_put.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_swap.3 .so man3/shmem_swap.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_xor_all.3 .so man3/shmem_short_xor_all.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_iput.3 .so man3/shmem_short_iput.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_iput.3 .so man3/shmem_short_iput.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_iput.3 .so man3/shmem_short_iput.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_int_add.3 .so man3/shmem_int_add.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_and_to_all.3 .so man3/shmem_short_and_to_all.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_int_cswap.3 .so man3/shmem_int_cswap.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_int_fadd.3 .so man3/shmem_int_fadd.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_int_finc.3 .so man3/shmem_int_finc.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_g.3 .so man3/shmem_char_g.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_get.3 .so man3/shmem_char_get.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_iget.3 .so man3/shmem_short_iget.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_int_inc.3 .so man3/shmem_int_inc.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_iput.3 .so man3/shmem_short_iput.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_max_to_all.3 .so man3/shmem_short_max_to_all.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_or_to_all.3 .so man3/shmem_short_or_to_all.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_p.3 .so man3/shmem_char_p.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_put.3 .so man3/shmem_char_put.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_swap.3 .so man3/shmem_swap.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_xor_all.3 .so man3/shmem_short_xor_all.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_g.3 .so man3/shmem_char_g.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_get.3 .so man3/shmem_char_get.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_iget.3 .so man3/shmem_short_iget.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_iput.3 .so man3/shmem_short_iput.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_max_to_all.3 .so man3/shmem_short_max_to_all.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_p.3 .so man3/shmem_char_p.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_put.3 .so man3/shmem_char_put.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_int_add.3 .so man3/shmem_int_add.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_and_to_all.3 .so man3/shmem_short_and_to_all.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_int_cswap.3 .so man3/shmem_int_cswap.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_int_fadd.3 .so man3/shmem_int_fadd.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_int_finc.3 .so man3/shmem_int_finc.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_g.3 .so man3/shmem_char_g.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_char_get.3 .so man3/shmem_char_get.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_short_iget.3 .so man3/shmem_short_iget.3

Просмотреть файл

@ -1 +1 @@
.so man3/shmem_int_inc.3 .so man3/shmem_int_inc.3

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше