diff --git a/orte/test/mpi/Makefile b/orte/test/mpi/Makefile index 012ace1fda..c50e74c48c 100644 --- a/orte/test/mpi/Makefile +++ b/orte/test/mpi/Makefile @@ -1,4 +1,4 @@ -PROGS = mpi_no_op mpi_barrier hello hello_nodename abort multi_abort simple_spawn concurrent_spawn spawn_multiple mpi_spin delayed_abort loop_spawn loop_child bad_exit pubsub hello_barrier segv accept connect hello_output hello_show_help crisscross read_write ziatest slave_spawn slave cell_spawn reduce-hang ziaprobe ziatest bcast_loop +PROGS = mpi_no_op mpi_barrier hello hello_nodename abort multi_abort simple_spawn concurrent_spawn spawn_multiple mpi_spin delayed_abort loop_spawn loop_child bad_exit pubsub hello_barrier segv accept connect hello_output hello_show_help crisscross read_write ziatest slave_spawn slave cell_spawn reduce-hang ziaprobe ziatest bcast_loop parallel_w8 parallel_w64 parallel_r8 parallel_r64 all: $(PROGS) diff --git a/orte/test/mpi/parallel_r64.c b/orte/test/mpi/parallel_r64.c new file mode 100755 index 0000000000..72fb5bcb3e --- /dev/null +++ b/orte/test/mpi/parallel_r64.c @@ -0,0 +1,222 @@ + +/* parallel MPI read from a single file */ + +#include "mpi.h" +#include +#include + +#define D 3 /* dimensions */ + +#define X 1024 /* global x grid size */ +#define Y 1024 /* global y grid size */ +#define Z 1024 /* global z grid size */ + +#define nx 256 /* local x grid size */ +#define ny 256 /* local y grid size */ +#define nz 256 /* local z grid size */ + +#define ng (nx*ny*nz) /* local grid (cube) size */ + +#define npx 4 /* number of PE's in x direction */ +#define npy 4 /* number of PE's in y direction */ +#define npz 4 /* number of PE's in z direction */ + +#define np (npx*npy*npz) /* total PE count */ + +#define LOOP 1 + +#define MAX_RR_NAME 7 + +int +main(int argc, char* argv[]) +{ + int i, rank, npes, bug=0; + int buf[ng]; + MPI_File thefile; + MPI_Status status; + MPI_Datatype filetype; + MPI_Comm new_comm; + MPI_Offset offset=0; + MPI_Info info=MPI_INFO_NULL; + int gsize[D],distrib[D],dargs[D],psize[D]; + int dims[D],periods[D],reorder; + double t1,t2,mbs; + double to1,to2,tc1,tc2; + double et,eto,etc; + double max_mbs,min_mbs,avg_mbs; + double max_et,min_et,avg_et; + double max_eto,min_eto,avg_eto; + double max_etc,min_etc,avg_etc; + char process_name[MPI_MAX_PROCESSOR_NAME + 1]; + char rr_blank[] = {" "}; + char rr_empty[] = {"???????"}; + int count; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &npes); + if ( rank == 0 ) + { + if ( argc < 2 ) + { + printf(" ERROR: no filename given\n"); + bug++; + } + if ( npes == np ) + { + printf(" file name: %s\n",argv[1]); + printf(" total number of PE's: %3d\n",np); + printf(" number of PE's in x direction: %3d\n",npx); + printf(" number of PE's in y direction: %3d\n",npy); + printf(" number of PE's in z direction: %3d\n",npz); + printf(" global grid size: %dx%dx%d 4 byte integers (total %lld)\n",X,Y,Z,(unsigned long)X*Y*Z); + printf(" local grid size: %dx%dx%d 4 byte integers (total %d)\n",nx,ny,nz,ng); + } + else + { + printf(" ERROR: total number of PE's must be %d\n",np); + printf(" actual number of PE's was %d\n",npes); + bug++; + } + if ( bug ) + { + MPI_Abort(MPI_COMM_WORLD,-1); + } + } + if ( MPI_Get_processor_name(process_name, &count) != MPI_SUCCESS) + { + sprintf(process_name, rr_empty); + } + else + { + if (count < MAX_RR_NAME) strncat(&process_name[count],rr_blank,MAX_RR_NAME-count); + process_name[MAX_RR_NAME] = '\0'; + } + + MPI_Info_create(&info); + +/* allow multiple writers to write to the file concurrently */ + +/*MPI_Info_set(info,"panfs_concurrent_write","1");*/ + +/* use data aggregation */ + +/*MPI_Info_set(info,"romio_cb_write","enable"); */ +/*MPI_Info_set(info,"romio_cb_write","disable");*/ +/*MPI_Info_set(info,"romio_cb_read","enable"); */ +/*MPI_Info_set(info,"romio_cb_read","disable");*/ + +/* use one aggregator/writer per node */ + +/*MPI_Info_set(info,"cb_config_list","*:1");*/ + +/* aggregators/writers per allocation: use this or the above (both work) */ + +/*i = ((npes-1)/8) + 1; + sprintf(awpa,"%d",i); + MPI_Info_set (info,"cb_nodes",awpa);*/ + + for ( i=0; i +#include + +#define D 3 /* dimensions */ + +#define X 256 /* global x grid size */ +#define Y 256 /* global y grid size */ +#define Z 256 /* global z grid size */ + +#define nx 128 /* local x grid size */ +#define ny 128 /* local y grid size */ +#define nz 128 /* local z grid size */ + +#define ng (nx*ny*nz) /* local grid (cube) size */ + +#define npx 2 /* number of PE's in x direction */ +#define npy 2 /* number of PE's in y direction */ +#define npz 2 /* number of PE's in z direction */ + +#define np (npx*npy*npz) /* total PE count */ + +#define LOOP 1 + +#define MAX_RR_NAME 7 + +int +main(int argc, char* argv[]) +{ + int i, rank, npes, bug=0; + int buf[ng]; + MPI_File thefile; + MPI_Status status; + MPI_Datatype filetype; + MPI_Comm new_comm; + MPI_Offset offset=0; + MPI_Info info=MPI_INFO_NULL; + int gsize[D],distrib[D],dargs[D],psize[D]; + int dims[D],periods[D],reorder; + double t1,t2,mbs; + double to1,to2,tc1,tc2; + double et,eto,etc; + double max_mbs,min_mbs,avg_mbs; + double max_et,min_et,avg_et; + double max_eto,min_eto,avg_eto; + double max_etc,min_etc,avg_etc; + char process_name[MPI_MAX_PROCESSOR_NAME + 1]; + char rr_blank[] = {" "}; + char rr_empty[] = {"???????"}; + int count; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &npes); + if ( rank == 0 ) + { + if ( argc < 2 ) + { + printf(" ERROR: no filename given\n"); + bug++; + } + if ( npes == np ) + { + printf(" file name: %s\n",argv[1]); + printf(" total number of PE's: %3d\n",np); + printf(" number of PE's in x direction: %3d\n",npx); + printf(" number of PE's in y direction: %3d\n",npy); + printf(" number of PE's in z direction: %3d\n",npz); + printf(" global grid size: %dx%dx%d 4 byte integers (total %lld)\n",X,Y,Z,(unsigned long)X*Y*Z); + printf(" local grid size: %dx%dx%d 4 byte integers (total %d)\n",nx,ny,nz,ng); + } + else + { + printf(" ERROR: total number of PE's must be %d\n",np); + printf(" actual number of PE's was %d\n",npes); + bug++; + } + if ( bug ) + { + MPI_Abort(MPI_COMM_WORLD,-1); + } + } + if ( MPI_Get_processor_name(process_name, &count) != MPI_SUCCESS) + { + sprintf(process_name, rr_empty); + } + else + { + if (count < MAX_RR_NAME) strncat(&process_name[count],rr_blank,MAX_RR_NAME-count); + process_name[MAX_RR_NAME] = '\0'; + } + + MPI_Info_create(&info); + +/* allow multiple writers to write to the file concurrently */ + +/*MPI_Info_set(info,"panfs_concurrent_write","1");*/ + +/* use data aggregation */ + +/*MPI_Info_set(info,"romio_cb_write","enable"); */ +/*MPI_Info_set(info,"romio_cb_write","disable");*/ +/*MPI_Info_set(info,"romio_cb_read","enable"); */ +/*MPI_Info_set(info,"romio_cb_read","disable");*/ + +/* use one aggregator/writer per node */ + +/*MPI_Info_set(info,"cb_config_list","*:1");*/ + +/* aggregators/writers per allocation: use this or the above (both work) */ + +/*i = ((npes-1)/8) + 1; + sprintf(awpa,"%d",i); + MPI_Info_set (info,"cb_nodes",awpa);*/ + + for ( i=0; i +#include + +#define D 3 /* dimensions */ + +#define X 1024 /* global x grid size */ +#define Y 1024 /* global y grid size */ +#define Z 1024 /* global z grid size */ + +#define nx 256 /* local x grid size */ +#define ny 256 /* local y grid size */ +#define nz 256 /* local z grid size */ + +#define ng (nx*ny*nz) /* local grid (cube) size */ + +#define npx 4 /* number of PE's in x direction */ +#define npy 4 /* number of PE's in y direction */ +#define npz 4 /* number of PE's in z direction */ + +#define np (npx*npy*npz) /* total PE count */ + +#define LOOP 1 + +#define MAX_RR_NAME 7 + +int +main(int argc, char* argv[]) +{ + int i, rank, npes, bug=0; + int buf[ng]; + MPI_File thefile; + MPI_Status status; + MPI_Datatype filetype; + MPI_Comm new_comm; + MPI_Offset offset=0; + MPI_Info info=MPI_INFO_NULL; + int gsize[D],distrib[D],dargs[D],psize[D]; + int dims[D],periods[D],reorder; + double t1,t2,mbs; + double to1,to2,tc1,tc2; + double et,eto,etc; + double max_mbs,min_mbs,avg_mbs; + double max_et,min_et,avg_et; + double max_eto,min_eto,avg_eto; + double max_etc,min_etc,avg_etc; + char process_name[MPI_MAX_PROCESSOR_NAME + 1]; + char rr_blank[] = {" "}; + char rr_empty[] = {"???????"}; + int count; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &npes); + if ( rank == 0 ) + { + if ( argc < 2 ) + { + printf(" ERROR: no filename given\n"); + bug++; + } + if ( npes == np ) + { + printf(" file name: %s\n",argv[1]); + printf(" total number of PE's: %3d\n",np); + printf(" number of PE's in x direction: %4d\n",npx); + printf(" number of PE's in y direction: %4d\n",npy); + printf(" number of PE's in z direction: %4d\n",npz); + printf(" global grid size: %dx%dx%d 4 byte integers (total %lld)\n",X,Y,Z,(unsigned long)X*Y*Z); + printf(" local grid size: %dx%dx%d 4 byte integers (total %d)\n",nx,ny,nz,ng); + } + else + { + printf(" ERROR: total number of PE's must be %d\n",np); + printf(" actual number of PE's was %d\n",npes); + bug++; + } + if ( bug ) + { + MPI_Abort(MPI_COMM_WORLD,-1); + } + } + if ( MPI_Get_processor_name(process_name, &count) != MPI_SUCCESS) + { + sprintf(process_name, rr_empty); + } + else + { + if (count < MAX_RR_NAME) strncat(&process_name[count],rr_blank,MAX_RR_NAME-count); + process_name[MAX_RR_NAME] = '\0'; + } + + MPI_Barrier(MPI_COMM_WORLD); + + MPI_Info_create(&info); + +/* allow multiple writers to write to the file concurrently */ + +/*MPI_Info_set(info,"panfs_concurrent_write","1");*/ + +/* use data aggregation */ + +/*MPI_Info_set(info,"romio_cb_write","enable"); */ +/*MPI_Info_set(info,"romio_cb_write","disable");*/ +/*MPI_Info_set(info,"romio_cb_read","enable"); */ +/*MPI_Info_set(info,"romio_cb_read","disable");*/ + +/* use one aggregator/writer per node */ + +/*MPI_Info_set(info,"cb_config_list","*:1");*/ + +/* aggregators/writers per allocation: use this or the above (both work) */ + +/*i = ((npes-1)/8) + 1; + sprintf(awpa,"%d",i); + MPI_Info_set (info,"cb_nodes",awpa);*/ + + + for ( i=0; i +#include + +#define D 3 /* dimensions */ + +#define X 256 /* global x grid size */ +#define Y 256 /* global y grid size */ +#define Z 256 /* global z grid size */ + +#define nx 128 /* local x grid size */ +#define ny 128 /* local y grid size */ +#define nz 128 /* local z grid size */ + +#define ng (nx*ny*nz) /* local grid (cube) size */ + +#define npx 2 /* number of PE's in x direction */ +#define npy 2 /* number of PE's in y direction */ +#define npz 2 /* number of PE's in z direction */ + +#define np (npx*npy*npz) /* total PE count */ + +#define LOOP 1 + +#define MAX_RR_NAME 7 + +int +main(int argc, char* argv[]) +{ + int i, rank, npes, bug=0; + int buf[ng]; + MPI_File thefile; + MPI_Status status; + MPI_Datatype filetype; + MPI_Comm new_comm; + MPI_Offset offset=0; + MPI_Info info=MPI_INFO_NULL; + int gsize[D],distrib[D],dargs[D],psize[D]; + int dims[D],periods[D],reorder; + double t1,t2,mbs; + double to1,to2,tc1,tc2; + double et,eto,etc; + double max_mbs,min_mbs,avg_mbs; + double max_et,min_et,avg_et; + double max_eto,min_eto,avg_eto; + double max_etc,min_etc,avg_etc; + char process_name[MPI_MAX_PROCESSOR_NAME + 1]; + char rr_blank[] = {" "}; + char rr_empty[] = {"???????"}; + int count; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &npes); + if ( rank == 0 ) + { + if ( argc < 2 ) + { + printf(" ERROR: no filename given\n"); + bug++; + } + if ( npes == np ) + { + printf(" file name: %s\n",argv[1]); + printf(" total number of PE's: %3d\n",np); + printf(" number of PE's in x direction: %4d\n",npx); + printf(" number of PE's in y direction: %4d\n",npy); + printf(" number of PE's in z direction: %4d\n",npz); + printf(" global grid size: %dx%dx%d 4 byte integers (total %lld)\n",X,Y,Z,(unsigned long)X*Y*Z); + printf(" local grid size: %dx%dx%d 4 byte integers (total %d)\n",nx,ny,nz,ng); + } + else + { + printf(" ERROR: total number of PE's must be %d\n",np); + printf(" actual number of PE's was %d\n",npes); + bug++; + } + if ( bug ) + { + MPI_Abort(MPI_COMM_WORLD,-1); + } + } + if ( MPI_Get_processor_name(process_name, &count) != MPI_SUCCESS) + { + sprintf(process_name, rr_empty); + } + else + { + if (count < MAX_RR_NAME) strncat(&process_name[count],rr_blank,MAX_RR_NAME-count); + process_name[MAX_RR_NAME] = '\0'; + } + + MPI_Barrier(MPI_COMM_WORLD); + + MPI_Info_create(&info); + + /* allow multiple writers to write to the file concurrently */ + + /*MPI_Info_set(info,"panfs_concurrent_write","1");*/ + + /* use data aggregation */ + + /*MPI_Info_set(info,"romio_cb_write","enable"); */ + /*MPI_Info_set(info,"romio_cb_write","disable");*/ + /*MPI_Info_set(info,"romio_cb_read","enable"); */ + /*MPI_Info_set(info,"romio_cb_read","disable");*/ + + /* use one aggregator/writer per node */ + + /*MPI_Info_set(info,"cb_config_list","*:1");*/ + + /* aggregators/writers per allocation: use this or the above (both work) */ + + /*i = ((npes-1)/8) + 1; + sprintf(awpa,"%d",i); + MPI_Info_set (info,"cb_nodes",awpa);*/ + + + for ( i=0; i