/* parallel MPI write to a single file */ #include "mpi.h" #include #include #define D 3 /* dimensions */ #define X 256 /* global x grid size */ #define Y 256 /* global y grid size */ #define Z 256 /* global z grid size */ #define nx 128 /* local x grid size */ #define ny 128 /* local y grid size */ #define nz 128 /* local z grid size */ #define ng (nx*ny*nz) /* local grid (cube) size */ #define npx 2 /* number of PE's in x direction */ #define npy 2 /* number of PE's in y direction */ #define npz 2 /* number of PE's in z direction */ #define np (npx*npy*npz) /* total PE count */ #define LOOP 1 #define MAX_RR_NAME 7 int main(int argc, char* argv[]) { int i, rank, npes, bug=0; int buf[ng]; MPI_File thefile; MPI_Status status; MPI_Datatype filetype; MPI_Comm new_comm; MPI_Offset offset=0; MPI_Info info=MPI_INFO_NULL; int gsize[D],distrib[D],dargs[D],psize[D]; int dims[D],periods[D],reorder; double t1,t2,mbs; double to1,to2,tc1,tc2; double et,eto,etc; double max_mbs,min_mbs,avg_mbs; double max_et,min_et,avg_et; double max_eto,min_eto,avg_eto; double max_etc,min_etc,avg_etc; char process_name[MPI_MAX_PROCESSOR_NAME + 1]; char rr_blank[] = {" "}; char rr_empty[] = {"???????"}; int count; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &npes); if ( rank == 0 ) { if ( argc < 2 ) { printf(" ERROR: no filename given\n"); bug++; } if ( npes == np ) { printf(" file name: %s\n",argv[1]); printf(" total number of PE's: %3d\n",np); printf(" number of PE's in x direction: %4d\n",npx); printf(" number of PE's in y direction: %4d\n",npy); printf(" number of PE's in z direction: %4d\n",npz); printf(" global grid size: %dx%dx%d 4 byte integers (total %lu)\n",X,Y,Z,(unsigned long)X*Y*Z); printf(" local grid size: %dx%dx%d 4 byte integers (total %d)\n",nx,ny,nz,ng); } else { printf(" ERROR: total number of PE's must be %d\n",np); printf(" actual number of PE's was %d\n",npes); bug++; } if ( bug ) { MPI_Abort(MPI_COMM_WORLD,-1); } } if ( MPI_Get_processor_name(process_name, &count) != MPI_SUCCESS) { sprintf(process_name, "%s", rr_empty); } else { if (count < MAX_RR_NAME) strncat(&process_name[count],rr_blank,MAX_RR_NAME-count); process_name[MAX_RR_NAME] = '\0'; } MPI_Barrier(MPI_COMM_WORLD); MPI_Info_create(&info); /* allow multiple writers to write to the file concurrently */ /*MPI_Info_set(info,"panfs_concurrent_write","1");*/ /* use data aggregation */ /*MPI_Info_set(info,"romio_cb_write","enable"); */ /*MPI_Info_set(info,"romio_cb_write","disable");*/ /*MPI_Info_set(info,"romio_cb_read","enable"); */ /*MPI_Info_set(info,"romio_cb_read","disable");*/ /* use one aggregator/writer per node */ /*MPI_Info_set(info,"cb_config_list","*:1");*/ /* aggregators/writers per allocation: use this or the above (both work) */ /*i = ((npes-1)/8) + 1; sprintf(awpa,"%d",i); MPI_Info_set (info,"cb_nodes",awpa);*/ for ( i=0; i