1
1

Save the old ORTE simple tests

Useful when debugging RTE-related issues

Not for inclusion in the tarball - just added to git repo for use by
developers.

Signed-off-by: Ralph Castain <rhc@pmix.org>
Этот коммит содержится в:
Ralph Castain 2020-02-21 06:14:38 -08:00
родитель 3366f3ec40
Коммит 7e2874a83d
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: B63B630167D26BB5
66 изменённых файлов: 5267 добавлений и 0 удалений

60
test/simple/Makefile.include Обычный файл
Просмотреть файл

@ -0,0 +1,60 @@
# -*- makefile -*-
#
# Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The University of Tennessee and The University
# of Tennessee Research Foundation. All rights
# reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2007 Sun Microsystems, Inc. All rights reserved.
# Copyright (c) 2017 Intel, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
# Note that this file does not stand on its own. It is included by a
# higher-level Makefile so that Automake features such as "make dist"
# work properly (and include all the relevant files in this directory
# in the distribution tarball).
# If you are looking for the file that builds these examples, look at
# "Makefile" in this same directory (it is *NOT* generated by
# Automake).
EXTRA_DIST += \
test/mpi/Makefile \
test/mpi/abort.c \
test/mpi/accept.c \
test/mpi/bad_exit.c \
test/mpi/concurrent_spawn.c \
test/mpi/connect.c \
test/mpi/crisscross.c \
test/mpi/delayed_abort.c \
test/mpi/hello_barrier.c \
test/mpi/hello.c \
test/mpi/loop_child.c \
test/mpi/loop_spawn.c \
test/mpi/mpi_barrier.c \
test/mpi/mpi_no_op.c \
test/mpi/mpi_spin.c \
test/mpi/multi_abort.c \
test/mpi/pubsub.c \
test/mpi/sendrecv_blaster.c \
test/mpi/simple_spawn.c \
test/mpi/slave.c \
test/mpi/spawn_multiple.c \
test/mpi/ziatest.c \
test/mpi/ziaprobe.c \
test/mpi/singleton_client_server.c \
test/mpi/spawn_tree.c \
test/mpi/info_spawn.c \
test/mpi/pmix.c \
test/mpi/xlib.c

44
test/simple/abort.c Обычный файл
Просмотреть файл

@ -0,0 +1,44 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include "mpi.h"
int main(int argc, char* argv[])
{
int rank, size;
int errcode;
if (1 < argc) {
errcode = strtol(argv[1], NULL, 10);
} else {
errcode = 2;
}
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
printf("Hello, World, I am %d of %d\n", rank, size);
if (1 == size) {
MPI_Abort(MPI_COMM_WORLD, errcode);
} else {
if (1 == rank) {
MPI_Abort(MPI_COMM_WORLD, errcode);
} else {
errcode = 0;
sleep(99999999);
}
}
MPI_Finalize();
return errcode;
}

44
test/simple/accept.c Обычный файл
Просмотреть файл

@ -0,0 +1,44 @@
/* -*- C -*-
*
* $HEADER$
*
* Test of connect/accept - the accept (server) side
*/
#include <stdio.h>
#include "mpi.h"
int main(int argc, char* argv[])
{
int rank, size;
MPI_Info info;
char port[MPI_MAX_PORT_NAME];
MPI_Comm client;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
printf("Hello, World, I am %d of %d\n", rank, size);
fflush(stdout);
MPI_Info_create(&info);
MPI_Info_set(info, "ompi_global_scope", "true");
if (0 == rank) {
MPI_Open_port(MPI_INFO_NULL, port);
MPI_Publish_name("test-pub", info, port);
MPI_Comm_accept(port, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &client);
}
MPI_Barrier(client);
if (0 == rank) {
MPI_Unpublish_name("test-pub", info, port);
MPI_Close_port(port);
}
MPI_Finalize();
return 0;
}

85
test/simple/add_host.c Обычный файл
Просмотреть файл

@ -0,0 +1,85 @@
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <unistd.h>
#include <sys/param.h>
#include "opal/runtime/opal.h"
#include <mpi.h>
int main(int argc, char* argv[])
{
int msg, rc;
MPI_Comm parent, child;
int rank, size;
const char *hostname;
pid_t pid;
char *env_rank,*env_nspace;
MPI_Info info;
env_rank = getenv("PMIX_RANK");
env_nspace = getenv("PMIX_NAMESPACE");
pid = getpid();
hostname = opal_gethostname();
printf("[%s:%s pid %ld] starting up on node %s!\n", env_nspace, env_rank, (long)pid, hostname);
MPI_Init(NULL, NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
printf("%d completed MPI_Init\n", rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_get_parent(&parent);
/* If we get COMM_NULL back, then we're the parent */
if (MPI_COMM_NULL == parent) {
pid = getpid();
printf("Parent [pid %ld] about to spawn!\n", (long)pid);
MPI_Info_create(&info);
MPI_Info_set(info, "add-host", "rhc002:24");
if (MPI_SUCCESS != (rc = MPI_Comm_spawn(argv[0], MPI_ARGV_NULL, 3, info,
0, MPI_COMM_WORLD, &child, MPI_ERRCODES_IGNORE))) {
printf("Child failed to spawn\n");
return rc;
}
printf("Parent done with spawn\n");
if (0 == rank) {
msg = 38;
printf("Parent sending message to child\n");
MPI_Send(&msg, 1, MPI_INT, 0, 1, child);
}
MPI_Comm_disconnect(&child);
printf("Parent disconnected\n");
/* do it again */
MPI_Info_set(info, "add-host", "rhc003:24");
if (MPI_SUCCESS != (rc = MPI_Comm_spawn(argv[0], MPI_ARGV_NULL, 3, info,
0, MPI_COMM_WORLD, &child, MPI_ERRCODES_IGNORE))) {
printf("Child failed to spawn\n");
return rc;
}
printf("Parent done with second spawn\n");
if (0 == rank) {
msg = 38;
printf("Parent sending message to second children\n");
MPI_Send(&msg, 1, MPI_INT, 0, 1, child);
}
MPI_Comm_disconnect(&child);
printf("Parent disconnected again\n");
}
/* Otherwise, we're the child */
else {
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
pid = getpid();
printf("Hello from the child %d of %d on host %s pid %ld\n", rank, 3, hostname, (long)pid);
if (0 == rank) {
MPI_Recv(&msg, 1, MPI_INT, 0, 1, parent, MPI_STATUS_IGNORE);
printf("Child %d received msg: %d\n", rank, msg);
}
MPI_Comm_disconnect(&parent);
printf("Child %d disconnected\n", rank);
}
MPI_Finalize();
fprintf(stderr, "%d: exiting\n", pid);
return 0;
}

30
test/simple/attach.c Обычный файл
Просмотреть файл

@ -0,0 +1,30 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
int main(int argc, char* argv[])
{
unsigned char fifo_cmd = 1;
int fd;
if (1 > argc) {
fprintf(stderr, "usage: attach <full-path-to-debugger-fifo-file>\n");
exit(1);
}
fd = open(argv[1], O_WRONLY);
write(fd, &fifo_cmd, sizeof(unsigned char));
close(fd);
return 0;
}

25
test/simple/bad_exit.c Обычный файл
Просмотреть файл

@ -0,0 +1,25 @@
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <mpi.h>
#define RANK_DEATH 1
int main(int argc, char **argv)
{
int rank;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
sleep(2);
if (rank==RANK_DEATH) {
printf("Rank %d exiting without calling finalize...\n", rank);
exit(1);
}
sleep(2);
printf("Rank %d calling MPI_Finalize\n", rank);
MPI_Finalize();
printf("Rank %d exiting\n", rank);
return 0;
}

28
test/simple/badcoll.c Обычный файл
Просмотреть файл

@ -0,0 +1,28 @@
#include <stdio.h>
#include <unistd.h>
#include "mpi.h"
const int count = 1234;
int buffer[1234] = {0};
int main(int argc, char *argv[])
{
int rank, size, i;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
for (i=0; i < 1000; i++) {
fprintf(stderr, "%d: Executing Bcast #%d\n", rank, i);
MPI_Bcast(buffer, count, MPI_INT, 0, MPI_COMM_WORLD);
if (0 != rank) {
sleep(1);
}
}
MPI_Finalize();
return 0;
}

36
test/simple/bcast_loop.c Обычный файл
Просмотреть файл

@ -0,0 +1,36 @@
#include <mpi.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int myid, nprocs, tag;
int i, m, nt;
MPI_Status status;
double workarray1[561], workarray2[561];
const int numm = 50000;
const int numt = 142;
MPI_Init(NULL, NULL);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
for (m = 0; m < numm; ++m) {
if (0 == (m % 1000)) {
printf("rank %d, m = %d\n", myid, m);
}
for (nt = 0; nt <= numt; ++nt) {
if (0 == myid) {
for (i = 0; i < 561; ++i) {
workarray1[i] = numm * numt * i;
workarray2[i] = numm * numt * (i + 1);
}
}
MPI_Bcast(workarray1, 561, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(workarray2, 561, MPI_DOUBLE, 0, MPI_COMM_WORLD);
}
}
MPI_Finalize();
return 0;
}

63
test/simple/binding.c Обычный файл
Просмотреть файл

@ -0,0 +1,63 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <sys/types.h>
#include <unistd.h>
#include <sched.h>
#include "opal/mca/hwloc/base/base.h"
#include "opal/runtime/opal.h"
#include "mpi.h"
#include "orte/util/proc_info.h"
int main(int argc, char* argv[])
{
int rank, size, rc;
hwloc_cpuset_t cpus;
char *bindings = NULL;
cpu_set_t *mask;
int nrcpus, c;
size_t csize;
const char *hostname;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
hostname = opal_gethostname();
if (OPAL_SUCCESS == opal_hwloc_base_get_topology()) {
cpus = hwloc_bitmap_alloc();
rc = hwloc_get_cpubind(opal_hwloc_topology, cpus, HWLOC_CPUBIND_PROCESS);
hwloc_bitmap_list_asprintf(&bindings, cpus);
}
printf("[%s;%d] Hello, World, I am %d of %d [%d local peers]: get_cpubind: %d bitmap %s\n",
hostname, (int)getpid(), rank, size, orte_process_info.num_local_peers, rc,
(NULL == bindings) ? "NULL" : bindings);
nrcpus = sysconf(_SC_NPROCESSORS_ONLN);
mask = CPU_ALLOC(nrcpus);
csize = CPU_ALLOC_SIZE(nrcpus);
CPU_ZERO_S(csize, mask);
if ( sched_getaffinity(0, csize, mask) == -1 ) {
perror("sched_getaffinity");
} else {
for ( c = 0; c < nrcpus; c++ ) {
if ( CPU_ISSET_S(c, csize, mask) ) {
printf("[%s:%d] CPU %d is set\n", hostname, (int)getpid(), c);
}
}
}
CPU_FREE(mask);
MPI_Finalize();
return 0;
}

37
test/simple/client.c Обычный файл
Просмотреть файл

@ -0,0 +1,37 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "mpi.h"
#define MAX_DATA 100
int main( int argc, char **argv )
{
MPI_Comm server;
double buf[MAX_DATA];
char port_name[MPI_MAX_PORT_NAME];
int done = 0, tag, n, CNT=0;
MPI_Init( &argc, &argv );
strcpy(port_name, argv[1] ); /* assume server's name is cmd-line arg */
MPI_Comm_connect( port_name, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &server );
n = MAX_DATA;
while (!done)
{
tag = 2; /* Action to perform */
if ( CNT == 5 ) { tag = 0; done = 1; }
fprintf(stderr, "Client sending message %d\n", CNT);
MPI_Send( buf, n, MPI_DOUBLE, 0, tag, server );
CNT++;
/* etc */
}
MPI_Comm_disconnect( &server );
MPI_Finalize();
return 0;
}

84
test/simple/concurrent_spawn.c Обычный файл
Просмотреть файл

@ -0,0 +1,84 @@
#define _GNU_SOURCE
#include "orte_config.h"
#include "opal/runtime/opal.h"
#include <stdio.h>
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
#include <mpi.h>
#define NUM_CHILDREN 5
int main(int argc, char* argv[])
{
int msg;
MPI_Comm parent, children[NUM_CHILDREN];
int rank, size, i;
const char *hostname;
pid_t pid;
char *child_argv[2] = { "", NULL };
MPI_Init(NULL, NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_get_parent(&parent);
/* If we get COMM_NULL back, then we're the parent */
if (MPI_COMM_NULL == parent) {
pid = getpid();
/* First, spawn all the children. Give them an argv
identifying which child they are */
for (i = 0; i < NUM_CHILDREN; ++i) {
printf("Parent [pid %ld] about to spawn child #%d\n",
(long)pid, i);
opal_asprintf(&(child_argv[0]), "%d", i);
MPI_Comm_spawn(argv[0], child_argv, 1, MPI_INFO_NULL,
0, MPI_COMM_WORLD, &children[i],
MPI_ERRCODES_IGNORE);
printf("Parent done with spawn of child %d\n", i);
}
/* Now send each of the children a message */
if (0 == rank) {
for (i = 0; i < NUM_CHILDREN; ++i) {
printf("Parent sending message to child %d\n", i);
MPI_Send(&i, 1, MPI_INT, 0, 1, children[i]);
}
}
/* Now disconnect from each of the children */
for (i = 0; i < NUM_CHILDREN; ++i) {
printf("Parent disconnecting from child %d\n", i);
MPI_Comm_disconnect(&children[i]);
printf("Parent disconnected from child %d\n", i);
}
}
/* Otherwise, we're the child */
else {
hostname = opal_gethostname();
if (argc == 1) {
printf("ERROR: child did not receive exepcted argv!\n");
i = -1;
} else {
i = atoi(argv[1]);
}
pid = getpid();
printf("Hello from the child %d on host %s pid %ld\n", i, hostname, (long)pid);
if (0 == rank) {
MPI_Recv(&msg, 1, MPI_INT, 0, 1, parent, MPI_STATUS_IGNORE);
printf("Child %d received msg: %d\n", i, msg);
if (i != msg) {
printf("ERROR: Child %d got wrong message (got %d, expected %d)\n",
i, msg, i);
}
}
MPI_Comm_disconnect(&parent);
printf("Child %d disconnected\n", i);
}
MPI_Finalize();
return 0;
}

35
test/simple/connect.c Обычный файл
Просмотреть файл

@ -0,0 +1,35 @@
/* -*- C -*-
*
* $HEADER$
*
* Test of connect/accept - the accept (server) side
*/
#include <stdio.h>
#include "mpi.h"
int main(int argc, char* argv[])
{
int rank, size;
MPI_Comm server;
MPI_Info info;
char port[MPI_MAX_PORT_NAME];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
printf("Hello, World, I am %d of %d\n", rank, size);
MPI_Info_create(&info);
MPI_Info_set(info, "ompi_global_scope", "true");
MPI_Lookup_name("test-pub", info, port);
MPI_Comm_connect(port, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &server);
MPI_Barrier(server);
MPI_Finalize();
return 0;
}

273
test/simple/crisscross.c Обычный файл
Просмотреть файл

@ -0,0 +1,273 @@
/*
cc -o crisscross crisscross.c -lmpi
*/
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#include <unistd.h>
#include <string.h>
#include <time.h>
#define MAX_RR_NAME 7
int main(int argc, char *argv[])
{
MPI_Status status; /* MPI status */
int mpierr; /* MPI function return code */
int rank; /* Process rank within MPI_COMM_WORLD */
int nproc; /* Total number of MPI processes */
int tag0=41; /* MPI message tag */
int tag1=42; /* MPI message tag */
int tag2=43; /* MPI message tag */
int warmup=1; /* MPI warmup loops */
char process_name[MPI_MAX_PROCESSOR_NAME + 1];
char partner_name[MPI_MAX_PROCESSOR_NAME + 1];
char rr_blank[] = {" "};
char rr_empty[] = {"???????"};
int n_bytes=128*1024*1024;
int n_loops=2;
unsigned char* send_buff;
unsigned char* recv_buff;
int i,j,k,m,count,mismatch;
double et1,et2,mbs;
double avg_mbs=0, sum_avg_mbs=0;
int xfers=0, sum_xfers=0;
double max_mbs=-1.0,min_mbs=999999.9;
double r_max_mbs,r_min_mbs;
time_t curtime;
struct tm *loctime;
if ( argc > 2 )
{
n_loops = atoi(argv[2]);
n_loops = n_loops < 1 ? 10 : n_loops;
}
if ( argc > 1 )
{
n_bytes = atoi(argv[1]);
n_bytes = n_bytes < 1 ? 32768 : n_bytes;
}
send_buff = (unsigned char *) valloc(n_bytes);
recv_buff = (unsigned char *) valloc(n_bytes);
for ( i=0; i<n_bytes; i++ )
{
send_buff[i] = i%128;
}
mpierr = MPI_Init(&argc, &argv);
if (mpierr != MPI_SUCCESS)
{
fprintf(stderr, "MPI Error %d (MPI_Init)\n",mpierr);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
mpierr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (mpierr != MPI_SUCCESS || rank < 0)
{
fprintf(stderr, "MPI Error %d (MPI_Comm_rank)\n",mpierr);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
if ( rank == 0 )
{
curtime = time (NULL);
loctime = localtime (&curtime);
printf("\n %s\n",asctime (loctime));
}
mpierr = MPI_Comm_size(MPI_COMM_WORLD, &nproc);
if (mpierr != MPI_SUCCESS || nproc < 1 || nproc <= rank)
{
fprintf(stderr, "MPI Error %d (MPI_Comm_size) [%d]\n",mpierr, rank);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
mpierr = MPI_Get_processor_name(process_name, &count);
if (mpierr != MPI_SUCCESS)
{
fprintf(stderr,"MPI Error %d (MPI_Get_processor_name) [%d]\n", mpierr, rank);
sprintf(process_name, "%s", rr_empty);
}
else
{
if (count < MAX_RR_NAME) strncat(&process_name[count],rr_blank,MAX_RR_NAME-count);
process_name[MAX_RR_NAME] = '\0';
}
for ( i=0; i<nproc; i++ )
{
mpierr = MPI_Barrier(MPI_COMM_WORLD);
if (mpierr != MPI_SUCCESS)
{
fprintf(stderr, "MPI Error %d (MPI_Barrier) [%d]\n", mpierr, rank);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
for ( j=0; j<nproc; j++ )
{
if ( i != j )
{
if (rank == j)
{
mpierr = MPI_Sendrecv(process_name, MPI_MAX_PROCESSOR_NAME + 1, MPI_CHAR, i, tag0,
partner_name, MPI_MAX_PROCESSOR_NAME + 1, MPI_CHAR, i, tag0, MPI_COMM_WORLD, &status);
if (mpierr != MPI_SUCCESS)
{
fprintf(stderr,"MPI Error %d (MPI_Sendrecv) %s [%d,%d]\n",mpierr,process_name,rank,i);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
for ( k=0; k<n_bytes; k++ )
{
recv_buff[k] = 0x80;
}
}
if ( rank == i )
{
mpierr = MPI_Sendrecv(process_name, MPI_MAX_PROCESSOR_NAME + 1, MPI_CHAR, j, tag0,
partner_name, MPI_MAX_PROCESSOR_NAME + 1, MPI_CHAR, j, tag0, MPI_COMM_WORLD, &status);
if (mpierr != MPI_SUCCESS)
{
fprintf(stderr,"MPI Error %d (MPI_Sendrecv) %s [%d,%d]\n",mpierr,process_name,i,j);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
}
for ( k=0; k<n_loops+warmup; k++ )
{
if ( rank == i )
{
if (k == warmup) et1 = MPI_Wtime();
mpierr = MPI_Send(send_buff, n_bytes, MPI_BYTE, j, tag1, MPI_COMM_WORLD);
if (mpierr != MPI_SUCCESS)
{
fprintf(stderr,"MPI Error %d (MPI_Send) %s [4%d] --> %s [4%d]\n",mpierr,process_name,i,partner_name,j);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
}
if ( rank == j )
{
mpierr = MPI_Recv(recv_buff, n_bytes, MPI_BYTE, i, tag1, MPI_COMM_WORLD, &status);
if (mpierr != MPI_SUCCESS)
{
fprintf(stderr,"MPI Error %d (MPI_Recv) %s [4%d] <-- %s [4%d]\n",mpierr,process_name,j,partner_name,i);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
if (k == n_loops+warmup-1) et2 = MPI_Wtime();
}
}
if ( rank == i )
{
mpierr = MPI_Send(&et1, 1, MPI_DOUBLE, j, tag1, MPI_COMM_WORLD);
if (mpierr != MPI_SUCCESS)
{
fprintf(stderr,"MPI Error %d (MPI_Send) %s [4%d] --> %s [4%d]\n",mpierr,process_name,i,partner_name,j);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
}
if ( rank == j )
{
mpierr = MPI_Recv(&et1, 1, MPI_DOUBLE, i, tag1, MPI_COMM_WORLD, &status);
if (mpierr != MPI_SUCCESS)
{
fprintf(stderr,"MPI Error %d (MPI_Recv) %s [4%d] <-- %s [4%d]\n",mpierr,process_name,j,partner_name,i);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
mbs = ((double)n_loops*n_bytes)/(1000000.0*(et2-et1));
if (mbs < 50.0)
{
printf(" %s [%4d] =====>> %s [%4d] %9.1f mbs SLOW!\n",partner_name,i,process_name,j,mbs);
}
else
{
printf(" %s [%4d] =====>> %s [%4d] %9.1f mbs\n",partner_name,i,process_name,j,mbs);
}
min_mbs = (mbs < min_mbs) ? mbs:min_mbs;
max_mbs = (mbs > max_mbs) ? mbs:max_mbs;
avg_mbs += mbs;
xfers++;
mismatch = 0;
for ( k=0; k<n_bytes; k++ )
{
if ( recv_buff[k] != k%128 ) mismatch++;
}
if ( mismatch ) printf(" WARNING! %d data mismatches\n",mismatch);
fflush(stdout);
}
}
}
}
mpierr = MPI_Reduce(&xfers, &sum_xfers, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (mpierr != MPI_SUCCESS)
{
fprintf(stderr,"MPI Error %d (MPI_Reduce) %s [%d]\n",mpierr,process_name,rank);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
mpierr = MPI_Reduce(&avg_mbs, &sum_avg_mbs, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (mpierr != MPI_SUCCESS)
{
fprintf(stderr,"MPI Error %d (MPI_Reduce) %s [%d]\n",mpierr,process_name,rank);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
mpierr = MPI_Reduce(&min_mbs, &r_min_mbs, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
if (mpierr != MPI_SUCCESS)
{
fprintf(stderr,"MPI Error %d (MPI_Reduce) %s [%d]\n",mpierr,process_name,rank);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
mpierr = MPI_Reduce(&max_mbs, &r_max_mbs, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
if (mpierr != MPI_SUCCESS)
{
fprintf(stderr,"MPI Error %d (MPI_Reduce) %s [%d]\n",mpierr,process_name,rank);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
mpierr = MPI_Finalize();
if (mpierr != MPI_SUCCESS)
{
fprintf(stderr,"MPI Error %d (MPI_Finalize) %s [%d]\n",mpierr,process_name,rank);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
fflush(stdout);
if ( rank == 0 )
{
mbs = sum_avg_mbs/sum_xfers;
printf("\n average tranfer rate for %d transfers: %9.1f mbs\n",sum_xfers, mbs);
printf(" minimum tranfer rate for %d transfers: %9.1f mbs\n",sum_xfers, r_min_mbs);
printf(" maximum tranfer rate for %d transfers: %9.1f mbs\n",sum_xfers, r_max_mbs);
fflush(stdout);
}
return 0;
}

31
test/simple/debugger.c Обычный файл
Просмотреть файл

@ -0,0 +1,31 @@
/* -*- C -*-
*
* $HEADER$
*
* A program that just spins - provides mechanism for testing user-driven
* abnormal program termination
*/
#include <stdio.h>
#include <unistd.h>
int main(int argc, char* argv[])
{
int i, rc, j=0;
double pi;
pid_t pid;
pid = getpid();
printf("spin: Pid %ld\n", (long)pid);
i = 0;
while (0 == j) {
i++;
pi = i / 3.14159256;
if (i > 100) i = 0;
}
return 0;
}

41
test/simple/delayed_abort.c Обычный файл
Просмотреть файл

@ -0,0 +1,41 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include "orte_config.h"
#include "opal/runtime/opal.h"
#include <stdio.h>
#include <unistd.h>
#include "mpi.h"
int main(int argc, char* argv[])
{
int rank, size;
const char *hostname;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
hostname = opal_gethostname();
printf("%s: I am %d of %d. pid=%d\n", hostname, rank, size, getpid());
if (rank%3 == 0) {
printf("%s: rank %d aborts\n", hostname, rank);
if (rank == 3) {
printf("%s: rank %d is going to sleep\n", hostname, rank);
sleep(2);
}
MPI_Abort(MPI_COMM_WORLD, 2);
printf("%s: sleeping. You should not see this\n", hostname);
sleep(100);
}
MPI_Finalize();
return 0;
}

46
test/simple/early_abort.c Обычный файл
Просмотреть файл

@ -0,0 +1,46 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include "mpi.h"
int main(int argc, char* argv[])
{
int rank, size;
char *rk;
/* get the MPI rank from the environ */
if (NULL == (rk = getenv("OMPI_COMM_WORLD_RANK"))) {
fprintf(stderr, "FAILED TO GET RANK\n");
exit(1);
}
if (1 < argc) {
/* rank 0 exits first */
if (0 == strcmp(rk, "0")) {
exit(1);
} else {
sleep(1);
}
} else {
if (0 == strcmp(rk, "0")) {
sleep(1);
exit(1);
}
}
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
printf("Hello, World, I am %d of %d\n", rank, size);
MPI_Finalize();
return 0;
}

40
test/simple/hello.c Обычный файл
Просмотреть файл

@ -0,0 +1,40 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include <stdio.h>
#include "opal/mca/hwloc/base/base.h"
#include "mpi.h"
#include "orte/util/proc_info.h"
int main(int argc, char* argv[])
{
int rank, size, rc;
hwloc_cpuset_t cpus;
char *bindings = NULL;
pid_t pid;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
pid = getpid();
printf("[%lu] Rank %d: getting topology\n", (unsigned long)pid, rank);
fflush(stdout);
if (OPAL_SUCCESS == opal_hwloc_base_get_topology()) {
cpus = hwloc_bitmap_alloc();
rc = hwloc_get_cpubind(opal_hwloc_topology, cpus, HWLOC_CPUBIND_PROCESS);
hwloc_bitmap_list_asprintf(&bindings, cpus);
}
printf("Hello, World, I am %d of %d [%d local peers]: get_cpubind: %d bitmap %s\n",
rank, size, orte_process_info.num_local_peers, rc,
(NULL == bindings) ? "NULL" : bindings);
MPI_Finalize();
return 0;
}

24
test/simple/hello_barrier.c Обычный файл
Просмотреть файл

@ -0,0 +1,24 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include <stdio.h>
#include "mpi.h"
int main(int argc, char* argv[])
{
int rank, size;
MPI_Init(&argc, &argv);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
printf("Hello, World, I am %d of %d\n", rank, size);
MPI_Finalize();
return 0;
}

54
test/simple/hello_nodename.c Обычный файл
Просмотреть файл

@ -0,0 +1,54 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#define _GNU_SOURCE
#include "orte_config.h"
#include "opal/runtime/opal.h"
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include "mpi.h"
int main(int argc, char* argv[])
{
int rank, size;
const char *hostname;
void *appnum;
void *univ_size;
char *appstr, *unistr;
int flag;
char *envar;
envar = getenv("OMPI_UNIVERSE_SIZE");
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_APPNUM, &appnum, &flag);
if (NULL == appnum) {
opal_asprintf(&appstr, "UNDEFINED");
} else {
opal_asprintf(&appstr, "%d", *(int*)appnum);
}
MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, &univ_size, &flag);
if (NULL == univ_size) {
opal_asprintf(&unistr, "UNDEFINED");
} else {
opal_asprintf(&unistr, "%d", *(int*)univ_size);
}
hostname = opal_gethostname();
printf("Hello, World, I am %d of %d on host %s from app number %s universe size %s universe envar %s\n",
rank, size, hostname, appstr, unistr, (NULL == envar) ? "NULL" : envar);
MPI_Finalize();
return 0;
}

37
test/simple/hello_output.c Обычный файл
Просмотреть файл

@ -0,0 +1,37 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include "opal_config.h"
#include <stdio.h>
#include "mpi.h"
#include "opal/util/output.h"
int main(int argc, char* argv[])
{
int rank, size;
int stream, stream2;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
stream = opal_output_open(NULL);
opal_output(stream, "(stream) Hello, World, I am %d of %d\n", rank, size);
printf("(printf) Hello, World, I am %d of %d\n", rank, size);
opal_output_set_verbosity(stream, 10);
opal_output(stream, "this is an opal_output on the verbose stream");
stream2 = opal_output_open(NULL);
opal_output(stream2, "opal_output stream2");
opal_output_set_verbosity(stream2, 10);
opal_output(stream2, "this is an opal_output on the same verbose stream2");
MPI_Finalize();
return 0;
}

53
test/simple/hello_show_help.c Обычный файл
Просмотреть файл

@ -0,0 +1,53 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include "opal_config.h"
#include <stdio.h>
#include <unistd.h>
#include "mpi.h"
#include "opal/util/output.h"
#include "orte/util/show_help.h"
int main(int argc, char* argv[])
{
int rank, size;
int stream;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (0 == rank) {
opal_output(0, "============================================================================");
opal_output(0, "This test ensures that the aggregation functionality of the orte_show_help\nsystem is working properly. It outputs a bogus warning about orte_init(),\nand contains sleep statements to ensure that the timer is firiing properly\nin the HNP and aggregates messages properly. The total sleep time is\n(3 * num_procs). You should see:\n\n - aggregation messages from the HNP every five seconds or so\n - a total of (2 * num_procs) messages");
opal_output(0, "============================================================================");
}
MPI_Barrier(MPI_COMM_WORLD);
orte_show_help("help-orte-runtime.txt",
"orte_init:startup:internal-failure", true,
"Nothing", "ORTE_EVERYTHING_IS_PEACHY", "42");
sleep(rank * 3);
orte_show_help("help-orte-runtime.txt",
"orte_init:startup:internal-failure", true,
"Duplicate orte_show_help detection",
"ORTE_SHOW_HELP_DUPLICATE_FAILED", "99999");
MPI_Barrier(MPI_COMM_WORLD);
if (0 == rank) {
opal_output(0, "============================================================================");
opal_output(0, "The test is now complete. Please verify that the HNP output all the required\nmessages (you may see 1 or 2 more messages from the HNP after this message).");
opal_output(0, "============================================================================");
}
MPI_Finalize();
return 0;
}

33
test/simple/hellocycle.pl Исполняемый файл
Просмотреть файл

@ -0,0 +1,33 @@
#!/usr/bin/env perl
#
use strict;
use warnings;
use Date::Parse;
#
$ENV{OMPI_MCA_btl} = "self";
#
sub prtime {
my $count = shift;
my $str = localtime;
print "$count: $str\n";
}
my $totalcount = 5000;
my $count = $totalcount;
prtime($count);
my $start = time();
while ($count > 0) {
system("./hello > /dev/null 2>&1");
$count--;
if ($count % 1000 == 0) {
prtime($count);
}
}
prtime($count);
my $stop = time();
my $rate = $totalcount / ($stop - $start);
print "Rate: $rate\n";

84
test/simple/info_spawn.c Обычный файл
Просмотреть файл

@ -0,0 +1,84 @@
#include "orte_config.h"
#include "opal/runtime/opal.h"
#include <stdio.h>
#include <sys/types.h>
#include <unistd.h>
#include <string.h>
#include <stdlib.h>
#include <mpi.h>
int main(int argc, char* argv[])
{
int msg, rc, i;
MPI_Comm parent, child;
int rank, size;
const char *hostname;
pid_t pid;
MPI_Info info;
char *keyval, *tmp;
pid = getpid();
MPI_Init(NULL, NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_get_parent(&parent);
/* If we get COMM_NULL back, then we're the parent */
if (MPI_COMM_NULL == parent) {
if (argc < 2) {
fprintf(stderr, "Usage: info_spawn key:value key:value...\n");
exit(1);
}
pid = getpid();
printf("Parent [pid %ld] about to spawn!\n", (long)pid);
MPI_Info_create(&info);
for (i=1; i < argc; i++) {
tmp = strdup(argv[i]);
keyval = strchr(tmp, ':');
if (NULL == keyval) {
fprintf(stderr, "Missing colon separator in key-value\n");
exit(1);
}
*keyval = '\0';
keyval++;
fprintf(stderr, "SETTING %s TO %s\n", tmp, keyval);
MPI_Info_set(info, tmp, keyval);
free(tmp);
}
if (MPI_SUCCESS != (rc = MPI_Comm_spawn(argv[0], MPI_ARGV_NULL, 3, info,
0, MPI_COMM_WORLD, &child, MPI_ERRCODES_IGNORE))) {
printf("Child failed to spawn\n");
return rc;
}
printf("Parent done with spawn\n");
if (0 == rank) {
msg = 38;
printf("Parent sending message to child\n");
MPI_Send(&msg, 1, MPI_INT, 0, 1, child);
}
MPI_Comm_disconnect(&child);
printf("Parent disconnected\n");
}
/* Otherwise, we're the child */
else {
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
hostname = opal_gethostname();
pid = getpid();
printf("Hello from the child %d of %d on host %s pid %ld\n", rank, 3, hostname, (long)pid);
if (0 == rank) {
MPI_Recv(&msg, 1, MPI_INT, 0, 1, parent, MPI_STATUS_IGNORE);
printf("Child %d received msg: %d\n", rank, msg);
}
MPI_Comm_disconnect(&parent);
printf("Child %d disconnected\n", rank);
}
MPI_Finalize();
fprintf(stderr, "%d: exiting\n", pid);
return 0;
}

13
test/simple/init-exit77.c Обычный файл
Просмотреть файл

@ -0,0 +1,13 @@
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
int main(int argc, char **argv) {
int debugme = 1;
MPI_Init(&argc, &argv);
printf("init...\n");
fflush(0);
MPI_Finalize();
exit(77);
}

172
test/simple/intercomm_create.c Обычный файл
Просмотреть файл

@ -0,0 +1,172 @@
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <unistd.h>
#include <mpi.h>
#define NB_SPAWN 4
static void do_parent(char *argv[], int rank, int count);
static void do_target(char* argv[], MPI_Comm parent);
static char *cmd_argv1 = "b";
static char *cmd_argv2 = "c";
static char *whoami = "a";
static int tag = 201;
void ompitest_warning( char* filename, int lineno, const char* fmt, ... )
{
char* buf = NULL;
va_list va_list;
va_start(va_list, fmt);
opal_vasprintf( &buf, fmt, va_list );
va_end(va_list);
printf( "*warning* %s:%d %s\n", filename, lineno, buf );
free(buf);
}
void ompitest_error( char* filename, int lineno, const char* fmt, ... )
{
char* buf = NULL;
va_list va_list;
va_start(va_list, fmt);
opal_vasprintf( &buf, fmt, va_list );
va_end(va_list);
printf( "*error* %s:%d %s\n", filename, lineno, buf );
free(buf);
}
int
main(int argc, char *argv[])
{
int rank, size;
MPI_Comm parent;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
/* Check to see if we *were* spawned -- because this is a test, we
can only assume the existence of this one executable. Hence, we
both mpirun it and spawn it. */
parent = MPI_COMM_NULL;
MPI_Comm_get_parent(&parent);
if (parent != MPI_COMM_NULL) {
whoami = argv[1];
do_target(argv, parent);
} else {
do_parent(argv, rank, size);
}
/* All done */
MPI_Finalize();
return 0;
}
static int
spawn_and_merge( char* argv[], char* arg, int count,
MPI_Comm* inter, MPI_Comm* intra )
{
int *errcode, err, i;
char *spawn_argv[2];
errcode = malloc(sizeof(int) * count);
if (errcode == NULL)
ompitest_error(__FILE__, __LINE__, "Doh! Rank %d was not able to allocate enough memory. MPI test aborted!\n", 0);
memset(errcode, -1, count);
/*MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);*/
spawn_argv[0] = arg;
spawn_argv[1] = NULL;
err = MPI_Comm_spawn(argv[0], spawn_argv, count, MPI_INFO_NULL, 0,
MPI_COMM_WORLD, inter, errcode);
for (i = 0; i < count; i++)
if (errcode[i] != MPI_SUCCESS)
ompitest_error(__FILE__, __LINE__,
"ERROR: MPI_Comm_spawn returned errcode[%d] = %d\n",
i, errcode[i]);
if (err != MPI_SUCCESS)
ompitest_error(__FILE__, __LINE__,
"ERROR: MPI_Comm_spawn returned errcode = %d\n", err);
err = MPI_Intercomm_merge( *inter, 0, intra );
free(errcode);
return err;
}
static void
do_parent(char *argv[], int rank, int count)
{
MPI_Comm ab_inter, ab_intra, ac_inter, ac_intra, ab_c_inter, abc_intra;
int err;
err = spawn_and_merge( argv, cmd_argv1, count, &ab_inter, &ab_intra );
err = spawn_and_merge( argv, cmd_argv2, count, &ac_inter, &ac_intra );
printf( "%s: MPI_Intercomm_create( ab_intra, 0, ac_intra, %d, %d, &inter) (%d)\n",
whoami, count, tag, err );
err = MPI_Intercomm_create( ab_intra, 0, ac_intra, count, tag, &ab_c_inter );
printf( "%s: intercomm_create (%d)\n", whoami, err );
printf( "%s: barrier on inter-comm - before\n", whoami );
err = MPI_Barrier(ab_c_inter);
printf( "%s: barrier on inter-comm - after\n", whoami );
err = MPI_Intercomm_merge(ab_c_inter, 0, &abc_intra);
printf( "%s: intercomm_merge(%d) (%d) [rank %d]\n", whoami, 0, err, rank );
err = MPI_Barrier(abc_intra);
printf( "%s: barrier (%d)\n", whoami, err );
MPI_Comm_free(&abc_intra);
MPI_Comm_free(&ab_c_inter);
MPI_Comm_free(&ab_intra);
MPI_Comm_free(&ac_intra);
MPI_Comm_disconnect(&ab_inter);
MPI_Comm_disconnect(&ac_inter);
}
static void
do_target(char* argv[], MPI_Comm parent)
{
int rank, first = 0, err;
MPI_Comm intra, inter, merge1;
if( 0 == strcmp(argv[1], cmd_argv1) ) first = 1;
/*MPI_Comm_set_errhandler(parent, MPI_ERRORS_RETURN);*/
err = MPI_Intercomm_merge( parent, 1, &intra );
MPI_Comm_rank(intra, &rank);
if( first ) {
printf( "%s: MPI_Intercomm_create( intra, 0, intra, MPI_COMM_NULL, %d, &inter) [rank %d]\n", whoami, tag, rank );
err = MPI_Intercomm_create( intra, 0, MPI_COMM_NULL, 0, tag, &inter);
printf( "%s: intercomm_create (%d)\n", whoami, err );
} else {
printf( "%s: MPI_Intercomm_create( MPI_COMM_WORLD, 0, intra, 0, %d, &inter) [rank %d]\n", whoami, tag, rank );
err = MPI_Intercomm_create( MPI_COMM_WORLD, 0, intra, 0, tag, &inter);
printf( "%s: intercomm_create (%d)\n", whoami, err );
}
printf( "%s: barrier on inter-comm - before\n", whoami );
err = MPI_Barrier(inter);
printf( "%s: barrier on inter-comm - after\n", whoami );
err = MPI_Intercomm_merge( inter, 0, &merge1 );
MPI_Comm_rank(merge1, &rank);
printf( "%s: intercomm_merge(%d) (%d) [rank %d]\n", whoami, first, err, rank );
err = MPI_Barrier(merge1);
printf( "%s: barrier (%d)\n", whoami, err );
MPI_Comm_free(&merge1);
MPI_Comm_free(&inter);
MPI_Comm_free(&intra);
MPI_Comm_disconnect(&parent);
}

293
test/simple/interlib.c Обычный файл
Просмотреть файл

@ -0,0 +1,293 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include <stdio.h>
#include <pthread.h>
#include "opal/mca/hwloc/base/base.h"
#include "mpi.h"
#include "orte/util/proc_info.h"
#include "opal/mca/pmix/base/base.h"
static size_t interlibhandler_id = SIZE_MAX;
static opal_pmix_lock_t thread_complete;
static void model_registration_callback(int status,
size_t errhandler_ref,
void *cbdata)
{
opal_pmix_lock_t *lock = (opal_pmix_lock_t*)cbdata;
interlibhandler_id = errhandler_ref;
OPAL_PMIX_WAKEUP_THREAD(lock);
}
static void model_callback(int status,
const opal_process_name_t *source,
opal_list_t *info, opal_list_t *results,
opal_pmix_notification_complete_fn_t cbfunc,
void *cbdata)
{
opal_value_t *val;
/* we can ignore our own callback as we obviously
* know that we are OpenMP */
if (NULL != info) {
OPAL_LIST_FOREACH(val, info, opal_value_t) {
if (0 == strcmp(val->key, OPAL_PMIX_PROGRAMMING_MODEL) &&
0 == strcmp(val->data.string, "OpenMP")) {
goto cback;
}
if (OPAL_STRING == val->type) {
opal_output(0, "Thread Model Callback Key: %s Val %s", val->key, val->data.string);
}
}
}
/* otherwise, do something clever here */
cback:
/* we must NOT tell the event handler state machine that we
* are the last step as that will prevent it from notifying
* anyone else that might be listening for declarations */
if (NULL != cbfunc) {
cbfunc(OPAL_SUCCESS, NULL, NULL, NULL, cbdata);
}
OPAL_PMIX_WAKEUP_THREAD(&thread_complete);
}
static void opcbfunc(int status, void *cbdata)
{
opal_pmix_lock_t *lock = (opal_pmix_lock_t*)cbdata;
OPAL_PMIX_WAKEUP_THREAD(lock);
}
static void infocb(int status,
opal_list_t *info,
void *cbdata,
opal_pmix_release_cbfunc_t release_fn,
void *release_cbdata)
{
opal_pmix_lock_t *lock = (opal_pmix_lock_t*)cbdata;
opal_value_t *kv;
OPAL_LIST_FOREACH(kv, info, opal_value_t) {
opal_output(0, "QUERY DATA KEY: %s VALUE %s", kv->key, kv->data.string);
}
if (NULL != release_fn) {
release_fn(release_cbdata);
}
OPAL_PMIX_WAKEUP_THREAD(lock);
}
static void *mylib(void *ptr)
{
opal_list_t info, directives;
opal_value_t *kv;
int ret;
opal_pmix_lock_t lock;
bool init = false;
opal_pmix_query_t *query;
opal_pmix_pdata_t *pdata;
OPAL_PMIX_CONSTRUCT_LOCK(&thread_complete);
/* declare that we are present and active */
OBJ_CONSTRUCT(&info, opal_list_t);
kv = OBJ_NEW(opal_value_t);
kv->key = strdup(OPAL_PMIX_PROGRAMMING_MODEL);
kv->type = OPAL_STRING;
kv->data.string = strdup("OpenMP");
opal_list_append(&info, &kv->super);
kv = OBJ_NEW(opal_value_t);
kv->key = strdup(OPAL_PMIX_MODEL_LIBRARY_NAME);
kv->type = OPAL_STRING;
kv->data.string = strdup("foobar");
opal_list_append(&info, &kv->super);
kv = OBJ_NEW(opal_value_t);
kv->key = strdup(OPAL_PMIX_MODEL_LIBRARY_VERSION);
kv->type = OPAL_STRING;
kv->data.string = strdup("1.2.3.4");
opal_list_append(&info, &kv->super);
kv = OBJ_NEW(opal_value_t);
kv->key = strdup(OPAL_PMIX_THREADING_MODEL);
kv->type = OPAL_STRING;
kv->data.string = strdup("PTHREAD");
opal_list_append(&info, &kv->super);
/* see if pmix is already initialized */
if (opal_pmix.initialized()) {
/* mark that this isn't to go to any default event handler - pmix_init
* takes care of that for us, but we have to explicitly do it here */
kv = OBJ_NEW(opal_value_t);
kv->key = strdup(OPAL_PMIX_EVENT_NON_DEFAULT);
kv->type = OPAL_BOOL;
kv->data.flag = true;
opal_list_append(&info, &kv->super);
/* it is, so let's just use the event notification
* API to let everyone know we are here */
OPAL_PMIX_CONSTRUCT_LOCK(&lock);
ret = opal_pmix.notify_event(OPAL_ERR_MODEL_DECLARED,
&orte_process_info.my_name,
OPAL_PMIX_RANGE_PROC_LOCAL, &info,
opcbfunc, &lock);
OPAL_PMIX_WAIT_THREAD(&lock);
OPAL_PMIX_DESTRUCT_LOCK(&lock);
OPAL_LIST_DESTRUCT(&info);
} else {
/* call pmix to initialize these values */
ret = opal_pmix.init(&info);
OPAL_LIST_DESTRUCT(&info);
init = true;
}
/* register to receive model callbacks */
/* give it a name so we can distinguish it */
OBJ_CONSTRUCT(&directives, opal_list_t);
kv = OBJ_NEW(opal_value_t);
kv->key = strdup(OPAL_PMIX_EVENT_HDLR_NAME);
kv->type = OPAL_STRING;
kv->data.string = strdup("My-Declarations");
opal_list_append(&directives, &kv->super);
/* specify the event code */
OBJ_CONSTRUCT(&info, opal_list_t);
kv = OBJ_NEW(opal_value_t);
kv->key = strdup("status"); // the key here is irrelevant
kv->type = OPAL_INT;
kv->data.integer = OPAL_ERR_MODEL_DECLARED;
opal_list_append(&info, &kv->super);
/* we could constrain the range to proc_local - technically, this
* isn't required so long as the code that generates
* the event stipulates its range as proc_local. We rely
* on that here */
OPAL_PMIX_CONSTRUCT_LOCK(&lock);
opal_pmix.register_evhandler(&info, &directives, model_callback,
model_registration_callback,
(void*)&lock);
OPAL_PMIX_WAIT_THREAD(&lock);
OPAL_PMIX_DESTRUCT_LOCK(&lock);
OPAL_LIST_DESTRUCT(&info);
OPAL_LIST_DESTRUCT(&directives);
/* wait for the model callback */
OPAL_PMIX_WAIT_THREAD(&thread_complete);
/* let's do a couple of operations just to verify we can,
* starting with a query */
OBJ_CONSTRUCT(&info, opal_list_t);
query = OBJ_NEW(opal_pmix_query_t);
opal_argv_append_nosize(&query->keys, OPAL_PMIX_QUERY_NAMESPACES);
opal_list_append(&info, &query->super);
OPAL_PMIX_CONSTRUCT_LOCK(&lock);
opal_pmix.query(&info, infocb, &lock);
OPAL_PMIX_WAIT_THREAD(&lock);
OPAL_PMIX_DESTRUCT_LOCK(&lock);
OPAL_LIST_DESTRUCT(&info);
/* Get something */
opal_pmix.get(&orte_process_info.my_name,
"WASSUP", NULL, &kv);
if (NULL == kv) {
fprintf(stderr, "ERROR GETTING WASSUP\n");
} else {
fprintf(stderr, "THREAD WASSUP: %s\n", kv->data.string);
OBJ_RELEASE(kv);
}
/* lookup something published by the main thread */
OBJ_CONSTRUCT(&info, opal_list_t);
pdata = OBJ_NEW(opal_pmix_pdata_t);
pdata->proc = orte_process_info.my_name;
pdata->value.key = strdup("SOMETHING");
opal_list_append(&info, &pdata->super);
/* tell the call to wait for the data to be published */
OBJ_CONSTRUCT(&directives, opal_list_t);
kv = OBJ_NEW(opal_value_t);
kv->key = strdup(OPAL_PMIX_WAIT);
kv->type = OPAL_INT;
kv->data.integer = 0; // wait for all
opal_list_append(&directives, &kv->super);
if (OPAL_SUCCESS != opal_pmix.lookup(&info, &directives)) {
fprintf(stderr, "LOOKUP FAILED\n");
} else {
pdata = (opal_pmix_pdata_t*)opal_list_get_first(&info);
fprintf(stderr, "LOOKUP RETURNED %s\n", pdata->value.data.string);
}
OPAL_LIST_DESTRUCT(&info);
OPAL_LIST_DESTRUCT(&directives);
if (init) {
/* need to finalize to maintain refcount */
opal_pmix.finalize();
}
/* done */
return NULL;
}
int main(int argc, char* argv[])
{
int rank, size, rc;
hwloc_cpuset_t cpus;
char *bindings = NULL;
pid_t pid;
pthread_t mythread;
opal_value_t kv, *kptr;
opal_list_t list;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
pid = getpid();
/* push something the thread can recognize */
OBJ_CONSTRUCT(&kv, opal_value_t);
kv.key = strdup("WASSUP");
kv.type = OPAL_STRING;
kv.data.string = strdup("nothing");
opal_pmix.put(OPAL_PMIX_LOCAL, &kv);
OBJ_DESTRUCT(&kv);
/* no need to commit it as this is strictly within ourselves */
/* spin up a thread */
if (pthread_create(&mythread, NULL, mylib, NULL)) {
fprintf(stderr, "Error creating thread\n");
goto done;
}
printf("[%lu] Rank %d: getting topology\n", (unsigned long)pid, rank);
fflush(stdout);
if (OPAL_SUCCESS == opal_hwloc_base_get_topology()) {
cpus = hwloc_bitmap_alloc();
rc = hwloc_get_cpubind(opal_hwloc_topology, cpus, HWLOC_CPUBIND_PROCESS);
hwloc_bitmap_list_asprintf(&bindings, cpus);
}
printf("Hello, World, I am %d of %d [%d local peers]: get_cpubind: %d bitmap %s\n",
rank, size, orte_process_info.num_local_peers, rc,
(NULL == bindings) ? "NULL" : bindings);
/* publish something */
OBJ_CONSTRUCT(&list, opal_list_t);
kptr = OBJ_NEW(opal_value_t);
kptr->key = strdup("SOMETHING");
kptr->type = OPAL_STRING;
kptr->data.string = strdup("SILLY-THING");
opal_list_append(&list, &kptr->super);
opal_pmix.publish(&list);
OPAL_LIST_DESTRUCT(&list);
/* wait for the thread to finish */
if (pthread_join(mythread, NULL)) {
fprintf(stderr, "Error joining thread\n");
}
done:
MPI_Finalize();
return 0;
}

65
test/simple/iof.c Обычный файл
Просмотреть файл

@ -0,0 +1,65 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <unistd.h>
#include <mpi.h>
#define ORTE_IOF_BASE_MSG_MAX 2048
int main(int argc, char *argv[])
{
int i, rank, size, next, prev, tag = 201;
int pos, msgsize, nbytes;
bool done;
char *msg;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
fprintf(stderr, "Rank %d has cleared MPI_Init\n", rank);
next = (rank + 1) % size;
prev = (rank + size - 1) % size;
msg = malloc(ORTE_IOF_BASE_MSG_MAX);
pos = 0;
nbytes = 0;
if (0 == rank) {
while (0 != (msgsize = read(0, msg, ORTE_IOF_BASE_MSG_MAX))) {
fprintf(stderr, "Rank %d: sending blob %d\n", rank, pos);
if (msgsize > 0) {
MPI_Bcast(msg, ORTE_IOF_BASE_MSG_MAX, MPI_BYTE, 0, MPI_COMM_WORLD);
}
++pos;
nbytes += msgsize;
}
fprintf(stderr, "Rank %d: sending termination blob %d\n", rank, pos);
memset(msg, 0, ORTE_IOF_BASE_MSG_MAX);
MPI_Bcast(msg, ORTE_IOF_BASE_MSG_MAX, MPI_BYTE, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
} else {
while (1) {
MPI_Bcast(msg, ORTE_IOF_BASE_MSG_MAX, MPI_BYTE, 0, MPI_COMM_WORLD);
fprintf(stderr, "Rank %d: recvd blob %d\n", rank, pos);
++pos;
done = true;
for (i=0; i < ORTE_IOF_BASE_MSG_MAX; i++) {
if (0 != msg[i]) {
done = false;
break;
}
}
if (done) {
break;
}
}
fprintf(stderr, "Rank %d: recv done\n", rank);
MPI_Barrier(MPI_COMM_WORLD);
}
fprintf(stderr, "Rank %d has completed bcast\n", rank);
MPI_Finalize();
return 0;
}

30
test/simple/loop_child.c Обычный файл
Просмотреть файл

@ -0,0 +1,30 @@
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <pthread.h>
#include <semaphore.h>
#include "mpi.h"
int main( int argc, char **argv )
{
MPI_Comm parent;
MPI_Comm merged;
int rank;
int size;
MPI_Init(&argc, &argv);
printf("Child: launch\n");
MPI_Comm_get_parent(&parent);
MPI_Intercomm_merge(parent, 1, &merged);
MPI_Comm_rank(merged, &rank);
MPI_Comm_size(merged, &size);
printf("Child merged rank = %d, size = %d\n", rank, size);
MPI_Comm_free(&merged);
MPI_Comm_disconnect(&parent);
MPI_Finalize();
printf("Child %d: exiting\n", (int)getpid());
return 0;
}

48
test/simple/loop_spawn.c Обычный файл
Просмотреть файл

@ -0,0 +1,48 @@
/*file .c : spawned the file Exe*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include "mpi.h"
#include <pthread.h>
#include <signal.h>
#include <sys/time.h>
#include <errno.h>
#define EXE_TEST "./loop_child"
int main(int argc, char **argv)
{
int iter, itermax, err, rank, size;
MPI_Comm comm, merged;
/* MPI environment */
if (2 == argc) {
itermax = atoi(argv[1]);
} else {
itermax = 100;
}
printf("parent*******************************\n");
printf("parent: Launching MPI*\n");
MPI_Init( &argc, &argv);
for (iter = 0; iter < itermax; ++iter) {
MPI_Comm_spawn(EXE_TEST, NULL, 1, MPI_INFO_NULL,
0, MPI_COMM_WORLD, &comm, &err);
printf("parent: MPI_Comm_spawn #%d return : %d\n", iter, err);
MPI_Intercomm_merge(comm, 0, &merged);
MPI_Comm_rank(merged, &rank);
MPI_Comm_size(merged, &size);
printf("parent: MPI_Comm_spawn #%d rank %d, size %d\n",
iter, rank, size);
MPI_Comm_free(&merged);
MPI_Comm_disconnect(&comm);
}
MPI_Finalize();
printf("parent: End .\n" );
return 0;
}

46
test/simple/makedata.pl Исполняемый файл
Просмотреть файл

@ -0,0 +1,46 @@
#!/usr/bin/env perl
# Make a simple data file of a argv[0]-specified size (understand "k",
# "m", and "g" suffixes) of a repeating pattern.
use strict;
my $size_arg = $ARGV[0];
$size_arg =~ m/^(\d+)/;
my $size = $1;
$size_arg =~ m/([mkg])$/i;
my $size_unit = lc($1);
if ($size_unit eq "k") {
$size *= 1024;
} elsif ($size_unit eq "m") {
$size *= 1048576;
} elsif ($size_unit eq "g") {
$size *= 1073741824;
}
print "Generating size $size\n";
my $file = "data-" . lc($size_arg);
unlink($file);
my $line = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%^&*()_+=-;";
my $line_len = length($line);
$line .= $line;
open(FILE, ">$file") || die "Can't open file $file";
my $count = 0;
my $line_count = 0;
while ($count < $size) {
my $offset = $line_count % $line_len;
my $num_to_print =
($size - $count < $line_len) ? $size - $count : $line_len;
if ($num_to_print > 0) {
my $printable = substr($line, $offset, $num_to_print - 1);
print FILE $printable . "\n";
$count += $num_to_print;
}
++$line_count;
}
close(FILE);
exit(0);

17
test/simple/mpi_barrier.c Обычный файл
Просмотреть файл

@ -0,0 +1,17 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include <stdio.h>
#include "mpi.h"
int main(int argc, char* argv[])
{
MPI_Init(&argc, &argv);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}

43
test/simple/mpi_info.c Обычный файл
Просмотреть файл

@ -0,0 +1,43 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include <stdio.h>
#include "mpi.h"
int main(int argc, char* argv[])
{
char value[MPI_MAX_INFO_VAL];
int flag;
char *keys[] = {
"command",
"argv",
"maxprocs",
"soft",
"host",
"arch",
"wdir",
"thread_level",
"ompi_num_apps",
"ompi_first_rank",
"ompi_np",
"ompi_positioned_file_dir"
};
int i, nk;
MPI_Init(&argc, &argv);
nk = sizeof(keys) / sizeof(char*);
for (i=0; i < nk; i++) {
MPI_Info_get(MPI_INFO_ENV, keys[i], MPI_MAX_INFO_VAL,
value, &flag);
fprintf(stderr, "%s: %s\n", keys[i], (flag) ? value : "Not found");
}
MPI_Finalize();
return 0;
}

17
test/simple/mpi_no_op.c Обычный файл
Просмотреть файл

@ -0,0 +1,17 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include <stdio.h>
#include "mpi.h"
int main(int argc, char* argv[])
{
MPI_Init(&argc, &argv);
MPI_Finalize();
return 0;
}

91
test/simple/mpi_spin.c Обычный файл
Просмотреть файл

@ -0,0 +1,91 @@
/* -*- C -*-
*
* $HEADER$
*
* A program that just spins - provides mechanism for testing user-driven
* abnormal program termination
*/
#include <stdio.h>
#include "mpi.h"
#include "opal/dss/dss.h"
#include "opal/mca/pmix/pmix.h"
#include "opal/util/output.h"
#include "orte/util/name_fns.h"
#include "orte/constants.h"
static volatile bool register_active = false;
static void _event_fn(int status,
const opal_process_name_t *source,
opal_list_t *info, opal_list_t *results,
opal_pmix_notification_complete_fn_t cbfunc,
void *cbdata)
{
opal_value_t *kv;
orte_process_name_t proc;
/* the name of the terminating proc should be on the info list */
proc.jobid = ORTE_JOBID_INVALID;
proc.vpid = ORTE_VPID_INVALID;
OPAL_LIST_FOREACH(kv, info, opal_value_t) {
if (0 == strcmp(kv->key, OPAL_PMIX_EVENT_AFFECTED_PROC)) {
proc.jobid = kv->data.name.jobid;
proc.vpid = kv->data.name.vpid;
break;
}
}
opal_output(0, "NOTIFIED OF TERMINATION OF PROC %s",
ORTE_NAME_PRINT(&proc));
/* must let the notifier know we are done */
if (NULL != cbfunc) {
cbfunc(ORTE_SUCCESS, NULL, NULL, NULL, cbdata);
}
}
static void _register_fn(int status,
size_t evhandler_ref,
void *cbdata)
{
opal_list_t *codes = (opal_list_t*)cbdata;
OPAL_LIST_RELEASE(codes);
register_active = false;
}
int main(int argc, char* argv[])
{
int i;
double pi;
opal_list_t *codes;
opal_value_t *kv;
MPI_Init(&argc, &argv);
/* register an event handler for the OPAL_ERR_PROC_ABORTED event */
codes = OBJ_NEW(opal_list_t);
kv = OBJ_NEW(opal_value_t);
kv->key = strdup("errorcode");
kv->type = OPAL_INT;
kv->data.integer = OPAL_ERR_PROC_ABORTED;
opal_list_append(codes, &kv->super);
register_active = true;
opal_pmix.register_evhandler(codes, NULL, _event_fn, _register_fn, codes);
i = 0;
while (1) {
i++;
pi = i / 3.14159256;
if (i > 100) i = 0;
}
MPI_Finalize();
return 0;
}

25
test/simple/multi_abort.c Обычный файл
Просмотреть файл

@ -0,0 +1,25 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include <stdio.h>
#include "mpi.h"
int main(int argc, char* argv[])
{
int rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
printf("Hello, World, I am %d of %d\n", rank, size);
if (0 != rank) MPI_Abort(MPI_COMM_WORLD, rank);
MPI_Finalize();
return 0;
}

3
test/simple/myhello.spec Обычный файл
Просмотреть файл

@ -0,0 +1,3 @@
Name: myhello
Exec: ./hello

3
test/simple/myring.spec Обычный файл
Просмотреть файл

@ -0,0 +1,3 @@
Name: myring
Exec: ./ring

210
test/simple/no-disconnect.c Обычный файл
Просмотреть файл

@ -0,0 +1,210 @@
/* Contributed by Marcia Cristina Cera
<marcia.cristina.cera@gmail.com>,
http://www.open-mpi.org/community/lists/users/2009/12/11540.php */
/* It was decided that the issue highlighted by this test will NOT be
fixed in the 1.3/1.4 series. It is already fixed in the 1.5
series. Hence, if we detect Open MPI < v1.5, return 77/skip. */
/* Turns out the hnp cannot handle concurrent MPI_Comm_spawns
as of Open MPI 1.7. However, we hope this feature will
work in 2.0. with the new state machine based orte. */
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <sys/utsname.h>
#include <mpi.h>
#define NCHARS 30
const int max_depth = 4;
/*
* Here are some replacements for standard, blocking MPI
* functions. These replacements are "nice" and yield the
* CPU instead of spinning hard. The interfaces are the same.
* Just replace:
* MPI_Recv with nice_recv
* MPI_Send with nice_send
* MPI_Barrier with nice_barrier
*/
static int nice_send(void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm) {
/* Assume a standard (presumably short/eager) send suffices. */
return MPI_Send(buf, count, datatype, dest, tag, comm);
}
static int nice_recv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status *status) {
MPI_Request req;
int flag;
struct timespec dt;
/*
* We're only interested in modest levels of oversubscription
* -- e.g., 2-4x more processes than physical processors.
* So, the sleep time only needs to be about 2-4x longer than
* a futile MPI_Test call. For a wide range of processors,
* something less than a millisecond should be sufficient.
* Excessive sleep times (e.g., 1 second) would degrade performance.
*/
dt.tv_sec = 0;
dt.tv_nsec = 100000;
MPI_Irecv(buf, count, datatype, source, tag, comm, &req);
MPI_Test(&req, &flag, status);
while ( ! flag ) {
nanosleep(&dt, NULL);
MPI_Test(&req, &flag, status);
}
return MPI_SUCCESS;
}
static void nice_barrier(MPI_Comm comm) {
int me, np, jump, buf = -1;
MPI_Comm_rank(comm,&me);
MPI_Comm_size(comm,&np);
/* fan in */
for ( jump = 1; jump < np; jump <<= 1 ) {
if ( ( me & jump ) != 0 ) {
nice_send(&buf, 1, MPI_INT, me - jump, 343, comm);
break;
} else if ( me + jump < np ) {
nice_recv(&buf, 1, MPI_INT, me + jump, 343, comm, MPI_STATUS_IGNORE);
}
}
/* fan out */
if ( 0 != me ) {
nice_recv(&buf, 1, MPI_INT, me - jump, 344, comm, MPI_STATUS_IGNORE);
}
jump >>= 1;
for ( ; jump > 0; jump >>= 1 ) {
if ( me + jump < np ) {
nice_send(&buf, 1, MPI_INT, me + jump, 344, comm);
}
}
}
int main (int argc, char **argv)
{
char bufs [NCHARS]; /* send buffer */
char bufr[2][NCHARS]; /* recv buffers */
MPI_Comm parent;
int level = 0, participate = 1;
struct utsname buf;
/* If this is prior to OMPI v2.0, return 77/skip */
#if defined(OPEN_MPI)
if (OMPI_MAJOR_VERSION < 2) {
printf("Skipping, because the orte cannot handle concurrent MPI_Comm_spawns\n");
return 77;
} else {
printf("Verify that this test is truly working because conncurrent MPI_Comm_spawns"
" has not worked before.\n");
}
#endif
uname(&buf);
printf("I AM pid %d with level %d on %s\n", getpid(), (argc < 2)?0:atoi(argv[1]), buf.nodename);
MPI_Init(&argc, &argv);
MPI_Comm_get_parent(&parent);
if (MPI_COMM_NULL != parent) {
/* spawned processes get stuff from parent */
level = atoi(argv[1]);
MPI_Recv(&bufr[0], sizeof(char)*NCHARS, MPI_CHAR, MPI_ANY_SOURCE,
MPI_ANY_TAG, parent, MPI_STATUS_IGNORE);
printf("Parent sent: %s\n", bufr[0]);
} else {
/* original processes have to decide whether to participate */
/* In this test, each process launched by "mpirun -n <np>" spawns a
* binary tree of processes. You end up with <np> * ( 1 << max_depth )
* processes altogether. For max_depth=4, this means 16*<np>. There
* is potential here for heavy oversubscription, especially if in
* testing we launch tests with <np> set to the number of available
* processors. This test tolerates oversubscription somewhat since
* it entails little inter-process synchronization. Nevertheless,
* we try to idle all but <np>/4 of the original processes, using a
* minimum of at least two processes
*/
int me, np;
MPI_Comm_size(MPI_COMM_WORLD,&np);
MPI_Comm_rank(MPI_COMM_WORLD,&me);
if ( np > 4 ) {
/* turn off all but every 4th process */
if ( ( me & 3 ) != 0 ) participate = 0;
} else
if ( np > 2 ) {
/* turn off all but every 2nd process */
if ( ( me & 1 ) != 0 ) participate = 0;
}
}
/* all spawned processes and selected "root" processes participate */
if ( participate ) {
printf("level = %d\n", level);
/* prepare send buffer */
sprintf(bufs,"level %d (pid:%d)", level, getpid());
/* spawn */
if (level < max_depth) {
int i, nspawn = 2, errcodes[1];
MPI_Request req[2];
MPI_Comm comm[2];
char argv1[NCHARS];
char *args[2];
/* level 0 spawns only one process to mimic the original test */
if ( level == 0 ) nspawn = 1;
/* prepare command line arguments */
snprintf(argv1, sizeof(argv1), "%d", level+1);
args[0] = argv1;
args[1] = NULL;
/* spawn, with a message sent to and received from each child */
for ( i = 0; i < nspawn; i++ ) {
MPI_Comm_spawn(argv[0], args, 1, MPI_INFO_NULL, 0, MPI_COMM_SELF,
&comm[i], errcodes);
MPI_Send(&bufs, sizeof(char)*NCHARS, MPI_CHAR, 0, 100, comm[i]);
MPI_Irecv(&bufr[i], sizeof(char)*NCHARS, MPI_CHAR, MPI_ANY_SOURCE,
MPI_ANY_TAG, comm[i], &req[i]);
}
/* wait for messages from children and print them */
MPI_Waitall(nspawn, req, MPI_STATUSES_IGNORE);
for ( i = 0; i < nspawn; i++ )
printf("Child %d sent: %s\n", i, bufr[i]);
}
/* send message back to parent */
if (MPI_COMM_NULL != parent) {
MPI_Send(&bufs, sizeof(char)*NCHARS, MPI_CHAR, 0, 100, parent);
}
}
/* non-participating processes wait at this barrier for their peers */
/* (This barrier won't cost that many CPU cycles.) */
if (MPI_COMM_NULL == parent) {
nice_barrier(MPI_COMM_WORLD);
}
MPI_Finalize();
return 0;
}

23
test/simple/nonzero.c Обычный файл
Просмотреть файл

@ -0,0 +1,23 @@
#include <stdlib.h>
#include <unistd.h>
#include <mpi.h>
int main(int argc, char **argv)
{
int rank;
if(argc < 2) {
return 0;
}
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
int i = atoi(argv[1]);
MPI_Finalize();
if (i != rank) {
sleep(1);
}
return i;
}

222
test/simple/parallel_r64.c Обычный файл
Просмотреть файл

@ -0,0 +1,222 @@
/* parallel MPI read from a single file */
#include "mpi.h"
#include <stdio.h>
#include <string.h>
#define D 3 /* dimensions */
#define X 1024 /* global x grid size */
#define Y 1024 /* global y grid size */
#define Z 1024 /* global z grid size */
#define nx 256 /* local x grid size */
#define ny 256 /* local y grid size */
#define nz 256 /* local z grid size */
#define ng (nx*ny*nz) /* local grid (cube) size */
#define npx 4 /* number of PE's in x direction */
#define npy 4 /* number of PE's in y direction */
#define npz 4 /* number of PE's in z direction */
#define np (npx*npy*npz) /* total PE count */
#define LOOP 1
#define MAX_RR_NAME 7
int
main(int argc, char* argv[])
{
int i, rank, npes, bug=0;
int buf[ng];
MPI_File thefile;
MPI_Status status;
MPI_Datatype filetype;
MPI_Comm new_comm;
MPI_Offset offset=0;
MPI_Info info=MPI_INFO_NULL;
int gsize[D],distrib[D],dargs[D],psize[D];
int dims[D],periods[D],reorder;
double t1,t2,mbs;
double to1,to2,tc1,tc2;
double et,eto,etc;
double max_mbs,min_mbs,avg_mbs;
double max_et,min_et,avg_et;
double max_eto,min_eto,avg_eto;
double max_etc,min_etc,avg_etc;
char process_name[MPI_MAX_PROCESSOR_NAME + 1];
char rr_blank[] = {" "};
char rr_empty[] = {"???????"};
int count;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &npes);
if ( rank == 0 )
{
if ( argc < 2 )
{
printf(" ERROR: no filename given\n");
bug++;
}
if ( npes == np )
{
printf(" file name: %s\n",argv[1]);
printf(" total number of PE's: %3d\n",np);
printf(" number of PE's in x direction: %3d\n",npx);
printf(" number of PE's in y direction: %3d\n",npy);
printf(" number of PE's in z direction: %3d\n",npz);
printf(" global grid size: %dx%dx%d 4 byte integers (total %lu)\n",X,Y,Z,(unsigned long)X*Y*Z);
printf(" local grid size: %dx%dx%d 4 byte integers (total %d)\n",nx,ny,nz,ng);
}
else
{
printf(" ERROR: total number of PE's must be %d\n",np);
printf(" actual number of PE's was %d\n",npes);
bug++;
}
if ( bug )
{
MPI_Abort(MPI_COMM_WORLD,-1);
}
}
if ( MPI_Get_processor_name(process_name, &count) != MPI_SUCCESS)
{
sprintf(process_name, "%s", rr_empty);
}
else
{
if (count < MAX_RR_NAME) strncat(&process_name[count],rr_blank,MAX_RR_NAME-count);
process_name[MAX_RR_NAME] = '\0';
}
MPI_Info_create(&info);
/* allow multiple writers to write to the file concurrently */
/*MPI_Info_set(info,"panfs_concurrent_write","1");*/
/* use data aggregation */
/*MPI_Info_set(info,"romio_cb_write","enable"); */
/*MPI_Info_set(info,"romio_cb_write","disable");*/
/*MPI_Info_set(info,"romio_cb_read","enable"); */
/*MPI_Info_set(info,"romio_cb_read","disable");*/
/* use one aggregator/writer per node */
/*MPI_Info_set(info,"cb_config_list","*:1");*/
/* aggregators/writers per allocation: use this or the above (both work) */
/*i = ((npes-1)/8) + 1;
sprintf(awpa,"%d",i);
MPI_Info_set (info,"cb_nodes",awpa);*/
for ( i=0; i<D; i++ )
{
periods[i] = 1; /* true */
}
reorder = 1; /* true */
dims[0] = npx;
dims[1] = npy;
dims[2] = npz;
MPI_Cart_create(MPI_COMM_WORLD, D, dims, periods, reorder, &new_comm);
for ( i=0; i<D; i++ )
{
distrib[i] = MPI_DISTRIBUTE_BLOCK;
dargs[i] = MPI_DISTRIBUTE_DFLT_DARG;
/* psize[i] = 0; */
}
gsize[0] = X;
gsize[1] = Y;
gsize[2] = Z;
psize[0] = npx;
psize[1] = npy;
psize[2] = npz;
/*
MPI_Dims_create(npes, D, psize);
printf("psize %d %d %d\n",psize[0],psize[1],psize[2]);
*/
MPI_Type_create_darray(npes, rank, D, gsize, distrib, dargs, psize, MPI_ORDER_FORTRAN, MPI_INT, &filetype);
MPI_Type_commit(&filetype);
to1 = MPI_Wtime();
MPI_File_open(new_comm, argv[1], MPI_MODE_RDONLY, info, &thefile);
to2 = MPI_Wtime();
MPI_File_set_view(thefile, offset, MPI_INT, filetype, "native", MPI_INFO_NULL);
t1 = MPI_Wtime();
for ( i=0; i<LOOP; i++ )
{
MPI_File_read_all(thefile, buf, ng, MPI_INT, &status);
}
t2 = MPI_Wtime();
/*MPI_File_sync(thefile); */
tc1 = MPI_Wtime();
MPI_File_close(&thefile);
tc2 = MPI_Wtime();
et = (t2 - t1)/LOOP;
eto = (to2 - to1)/LOOP;
etc = (tc2 - tc1)/LOOP;
mbs = (((double)(LOOP*X*Y*Z)*sizeof(int)))/(1000000.0*(t2-t1));
/*printf(" %s[%3d] ET %5.2f %6.2f %6.2f %5.1f mbs Data %9d %9d \n", process_name, rank, t1, t2, t2-t1, mbs, buf[0], buf[ng-1]);*/
MPI_Barrier(MPI_COMM_WORLD);
MPI_Reduce(&mbs, &avg_mbs, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&mbs, &min_mbs, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(&mbs, &max_mbs, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
MPI_Reduce(&et, &avg_et, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&et, &min_et, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(&et, &max_et, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
MPI_Reduce(&eto, &avg_eto, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&eto, &min_eto, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(&eto, &max_eto, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
MPI_Reduce(&etc, &avg_etc, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&etc, &min_etc, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(&etc, &max_etc, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
fflush(stdout);
if ( rank == 0 )
{
mbs = avg_mbs/npes;
printf("\n average read rate: %9.1f mbs\n", mbs);
printf(" minimum read rate: %9.1f mbs\n", min_mbs);
printf(" maximum read rate: %9.1f mbs\n\n", max_mbs);
avg_eto = avg_eto/npes;
avg_et = avg_et/npes;
avg_etc = avg_etc/npes;
printf(" open time: %9.3f min %9.3f avg %9.3f max\n",min_eto,avg_eto,max_eto);
printf(" read time: %9.3f min %9.3f avg %9.3f max\n",min_et,avg_et,max_et);
printf(" close time: %9.3f min %9.3f avg %9.3f max\n\n",min_etc,avg_etc,max_etc);
fflush(stdout);
}
MPI_Finalize();
return 0;
}

222
test/simple/parallel_r8.c Обычный файл
Просмотреть файл

@ -0,0 +1,222 @@
/* parallel MPI read from a single file */
#include "mpi.h"
#include <stdio.h>
#include <string.h>
#define D 3 /* dimensions */
#define X 256 /* global x grid size */
#define Y 256 /* global y grid size */
#define Z 256 /* global z grid size */
#define nx 128 /* local x grid size */
#define ny 128 /* local y grid size */
#define nz 128 /* local z grid size */
#define ng (nx*ny*nz) /* local grid (cube) size */
#define npx 2 /* number of PE's in x direction */
#define npy 2 /* number of PE's in y direction */
#define npz 2 /* number of PE's in z direction */
#define np (npx*npy*npz) /* total PE count */
#define LOOP 1
#define MAX_RR_NAME 7
int
main(int argc, char* argv[])
{
int i, rank, npes, bug=0;
int buf[ng];
MPI_File thefile;
MPI_Status status;
MPI_Datatype filetype;
MPI_Comm new_comm;
MPI_Offset offset=0;
MPI_Info info=MPI_INFO_NULL;
int gsize[D],distrib[D],dargs[D],psize[D];
int dims[D],periods[D],reorder;
double t1,t2,mbs;
double to1,to2,tc1,tc2;
double et,eto,etc;
double max_mbs,min_mbs,avg_mbs;
double max_et,min_et,avg_et;
double max_eto,min_eto,avg_eto;
double max_etc,min_etc,avg_etc;
char process_name[MPI_MAX_PROCESSOR_NAME + 1];
char rr_blank[] = {" "};
char rr_empty[] = {"???????"};
int count;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &npes);
if ( rank == 0 )
{
if ( argc < 2 )
{
printf(" ERROR: no filename given\n");
bug++;
}
if ( npes == np )
{
printf(" file name: %s\n",argv[1]);
printf(" total number of PE's: %3d\n",np);
printf(" number of PE's in x direction: %3d\n",npx);
printf(" number of PE's in y direction: %3d\n",npy);
printf(" number of PE's in z direction: %3d\n",npz);
printf(" global grid size: %dx%dx%d 4 byte integers (total %lu)\n",X,Y,Z,(unsigned long)X*Y*Z);
printf(" local grid size: %dx%dx%d 4 byte integers (total %d)\n",nx,ny,nz,ng);
}
else
{
printf(" ERROR: total number of PE's must be %d\n",np);
printf(" actual number of PE's was %d\n",npes);
bug++;
}
if ( bug )
{
MPI_Abort(MPI_COMM_WORLD,-1);
}
}
if ( MPI_Get_processor_name(process_name, &count) != MPI_SUCCESS)
{
sprintf(process_name, "%s", rr_empty);
}
else
{
if (count < MAX_RR_NAME) strncat(&process_name[count],rr_blank,MAX_RR_NAME-count);
process_name[MAX_RR_NAME] = '\0';
}
MPI_Info_create(&info);
/* allow multiple writers to write to the file concurrently */
/*MPI_Info_set(info,"panfs_concurrent_write","1");*/
/* use data aggregation */
/*MPI_Info_set(info,"romio_cb_write","enable"); */
/*MPI_Info_set(info,"romio_cb_write","disable");*/
/*MPI_Info_set(info,"romio_cb_read","enable"); */
/*MPI_Info_set(info,"romio_cb_read","disable");*/
/* use one aggregator/writer per node */
/*MPI_Info_set(info,"cb_config_list","*:1");*/
/* aggregators/writers per allocation: use this or the above (both work) */
/*i = ((npes-1)/8) + 1;
sprintf(awpa,"%d",i);
MPI_Info_set (info,"cb_nodes",awpa);*/
for ( i=0; i<D; i++ )
{
periods[i] = 1; /* true */
}
reorder = 1; /* true */
dims[0] = npx;
dims[1] = npy;
dims[2] = npz;
MPI_Cart_create(MPI_COMM_WORLD, D, dims, periods, reorder, &new_comm);
for ( i=0; i<D; i++ )
{
distrib[i] = MPI_DISTRIBUTE_BLOCK;
dargs[i] = MPI_DISTRIBUTE_DFLT_DARG;
/* psize[i] = 0; */
}
gsize[0] = X;
gsize[1] = Y;
gsize[2] = Z;
psize[0] = npx;
psize[1] = npy;
psize[2] = npz;
/*
MPI_Dims_create(npes, D, psize);
printf("psize %d %d %d\n",psize[0],psize[1],psize[2]);
*/
MPI_Type_create_darray(npes, rank, D, gsize, distrib, dargs, psize, MPI_ORDER_FORTRAN, MPI_INT, &filetype);
MPI_Type_commit(&filetype);
to1 = MPI_Wtime();
MPI_File_open(new_comm, argv[1], MPI_MODE_RDONLY, info, &thefile);
to2 = MPI_Wtime();
MPI_File_set_view(thefile, offset, MPI_INT, filetype, "native", MPI_INFO_NULL);
t1 = MPI_Wtime();
for ( i=0; i<LOOP; i++ )
{
MPI_File_read_all(thefile, buf, ng, MPI_INT, &status);
}
t2 = MPI_Wtime();
/*MPI_File_sync(thefile); */
tc1 = MPI_Wtime();
MPI_File_close(&thefile);
tc2 = MPI_Wtime();
et = (t2 - t1)/LOOP;
eto = (to2 - to1)/LOOP;
etc = (tc2 - tc1)/LOOP;
mbs = (((double)(LOOP*X*Y*Z)*sizeof(int)))/(1000000.0*(t2-t1));
/*printf(" %s[%3d] ET %5.2f %6.2f %6.2f %5.1f mbs Data %9d %9d \n", process_name, rank, t1, t2, t2-t1, mbs, buf[0], buf[ng-1]);*/
MPI_Barrier(MPI_COMM_WORLD);
MPI_Reduce(&mbs, &avg_mbs, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&mbs, &min_mbs, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(&mbs, &max_mbs, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
MPI_Reduce(&et, &avg_et, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&et, &min_et, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(&et, &max_et, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
MPI_Reduce(&eto, &avg_eto, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&eto, &min_eto, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(&eto, &max_eto, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
MPI_Reduce(&etc, &avg_etc, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&etc, &min_etc, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(&etc, &max_etc, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
fflush(stdout);
if ( rank == 0 )
{
mbs = avg_mbs/npes;
printf("\n average read rate: %9.1f mbs\n", mbs);
printf(" minimum read rate: %9.1f mbs\n", min_mbs);
printf(" maximum read rate: %9.1f mbs\n\n", max_mbs);
avg_eto = avg_eto/npes;
avg_et = avg_et/npes;
avg_etc = avg_etc/npes;
printf(" open time: %9.3f min %9.3f avg %9.3f max\n",min_eto,avg_eto,max_eto);
printf(" read time: %9.3f min %9.3f avg %9.3f max\n",min_et,avg_et,max_et);
printf(" close time: %9.3f min %9.3f avg %9.3f max\n\n",min_etc,avg_etc,max_etc);
fflush(stdout);
}
MPI_Finalize();
return 0;
}

228
test/simple/parallel_w64.c Обычный файл
Просмотреть файл

@ -0,0 +1,228 @@
/* parallel MPI write to a single file */
#include "mpi.h"
#include <stdio.h>
#include <string.h>
#define D 3 /* dimensions */
#define X 1024 /* global x grid size */
#define Y 1024 /* global y grid size */
#define Z 1024 /* global z grid size */
#define nx 256 /* local x grid size */
#define ny 256 /* local y grid size */
#define nz 256 /* local z grid size */
#define ng (nx*ny*nz) /* local grid (cube) size */
#define npx 4 /* number of PE's in x direction */
#define npy 4 /* number of PE's in y direction */
#define npz 4 /* number of PE's in z direction */
#define np (npx*npy*npz) /* total PE count */
#define LOOP 1
#define MAX_RR_NAME 7
int
main(int argc, char* argv[])
{
int i, rank, npes, bug=0;
int buf[ng];
MPI_File thefile;
MPI_Status status;
MPI_Datatype filetype;
MPI_Comm new_comm;
MPI_Offset offset=0;
MPI_Info info=MPI_INFO_NULL;
int gsize[D],distrib[D],dargs[D],psize[D];
int dims[D],periods[D],reorder;
double t1,t2,mbs;
double to1,to2,tc1,tc2;
double et,eto,etc;
double max_mbs,min_mbs,avg_mbs;
double max_et,min_et,avg_et;
double max_eto,min_eto,avg_eto;
double max_etc,min_etc,avg_etc;
char process_name[MPI_MAX_PROCESSOR_NAME + 1];
char rr_blank[] = {" "};
char rr_empty[] = {"???????"};
int count;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &npes);
if ( rank == 0 )
{
if ( argc < 2 )
{
printf(" ERROR: no filename given\n");
bug++;
}
if ( npes == np )
{
printf(" file name: %s\n",argv[1]);
printf(" total number of PE's: %3d\n",np);
printf(" number of PE's in x direction: %4d\n",npx);
printf(" number of PE's in y direction: %4d\n",npy);
printf(" number of PE's in z direction: %4d\n",npz);
printf(" global grid size: %dx%dx%d 4 byte integers (total %lu)\n",X,Y,Z,(unsigned long)X*Y*Z);
printf(" local grid size: %dx%dx%d 4 byte integers (total %d)\n",nx,ny,nz,ng);
}
else
{
printf(" ERROR: total number of PE's must be %d\n",np);
printf(" actual number of PE's was %d\n",npes);
bug++;
}
if ( bug )
{
MPI_Abort(MPI_COMM_WORLD,-1);
}
}
if ( MPI_Get_processor_name(process_name, &count) != MPI_SUCCESS)
{
sprintf(process_name, "%s", rr_empty);
}
else
{
if (count < MAX_RR_NAME) strncat(&process_name[count],rr_blank,MAX_RR_NAME-count);
process_name[MAX_RR_NAME] = '\0';
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Info_create(&info);
/* allow multiple writers to write to the file concurrently */
/*MPI_Info_set(info,"panfs_concurrent_write","1");*/
/* use data aggregation */
/*MPI_Info_set(info,"romio_cb_write","enable"); */
/*MPI_Info_set(info,"romio_cb_write","disable");*/
/*MPI_Info_set(info,"romio_cb_read","enable"); */
/*MPI_Info_set(info,"romio_cb_read","disable");*/
/* use one aggregator/writer per node */
/*MPI_Info_set(info,"cb_config_list","*:1");*/
/* aggregators/writers per allocation: use this or the above (both work) */
/*i = ((npes-1)/8) + 1;
sprintf(awpa,"%d",i);
MPI_Info_set (info,"cb_nodes",awpa);*/
for ( i=0; i<ng; i++ ) buf[i] = rank*10000 + (i+1)%1024;
for ( i=0; i<D; i++ )
{
periods[i] = 1; /* true */
}
reorder = 1; /* true */
dims[0] = npx;
dims[1] = npy;
dims[2] = npz;
MPI_Cart_create(MPI_COMM_WORLD, D, dims, periods, reorder, &new_comm);
for ( i=0; i<D; i++ )
{
distrib[i] = MPI_DISTRIBUTE_BLOCK;
dargs[i] = MPI_DISTRIBUTE_DFLT_DARG;
/* psize[i] = 0; */
}
gsize[0] = X;
gsize[1] = Y;
gsize[2] = Z;
psize[0] = npx;
psize[1] = npy;
psize[2] = npz;
/*
MPI_Dims_create(npes, D, psize);
printf("psize %d %d %d\n",psize[0],psize[1],psize[2]);
*/
MPI_Type_create_darray(npes, rank, D, gsize, distrib, dargs, psize, MPI_ORDER_FORTRAN, MPI_INT, &filetype);
/*MPI_Type_create_darray(npes, rank, D, gsize, distrib, dargs, psize, MPI_ORDER_C, MPI_INT, &filetype); don't do this */
MPI_Type_commit(&filetype);
to1 = MPI_Wtime();
MPI_File_open(new_comm, argv[1], MPI_MODE_WRONLY | MPI_MODE_CREATE, info, &thefile);
to2 = MPI_Wtime();
MPI_File_set_size(thefile, offset);
MPI_File_set_view(thefile, offset, MPI_INT, filetype, "native", MPI_INFO_NULL);
t1 = MPI_Wtime();
for ( i=0; i<LOOP; i++)
{
MPI_File_write_all(thefile, buf, ng, MPI_INT, &status);
}
t2 = MPI_Wtime();
tc1 = MPI_Wtime();
MPI_File_close(&thefile);
tc2 = MPI_Wtime();
et = (t2 - t1)/LOOP;
eto = (to2 - to1)/LOOP;
etc = (tc2 - tc1)/LOOP;
mbs = (((double)(LOOP*X*Y*Z)*sizeof(int)))/(1000000.0*(t2-t1));
/*printf(" %s[%3d] ET %8.2f %8.2f %8.2f %8.1f mbs\n", process_name, rank, t1, t2, t2-t1, mbs);*/
MPI_Barrier(MPI_COMM_WORLD);
MPI_Reduce(&mbs, &avg_mbs, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&mbs, &min_mbs, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(&mbs, &max_mbs, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
MPI_Reduce(&et, &avg_et, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&et, &min_et, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(&et, &max_et, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
MPI_Reduce(&eto, &avg_eto, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&eto, &min_eto, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(&eto, &max_eto, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
MPI_Reduce(&etc, &avg_etc, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&etc, &min_etc, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(&etc, &max_etc, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
fflush(stdout);
if ( rank == 0 )
{
mbs = avg_mbs/npes;
printf("\n average write rate: %9.1f mbs\n", mbs);
printf(" minimum write rate: %9.1f mbs\n", min_mbs);
printf(" maximum write rate: %9.1f mbs\n\n", max_mbs);
avg_eto = avg_eto/npes;
avg_et = avg_et/npes;
avg_etc = avg_etc/npes;
printf(" open time: %9.3f min %9.3f avg %9.3f max\n",min_eto,avg_eto,max_eto);
printf(" write time: %9.3f min %9.3f avg %9.3f max\n",min_et,avg_et,max_et);
printf(" close time: %9.3f min %9.3f avg %9.3f max\n\n",min_etc,avg_etc,max_etc);
fflush(stdout);
}
MPI_Finalize();
return 0;
}

227
test/simple/parallel_w8.c Обычный файл
Просмотреть файл

@ -0,0 +1,227 @@
/* parallel MPI write to a single file */
#include "mpi.h"
#include <stdio.h>
#include <string.h>
#define D 3 /* dimensions */
#define X 256 /* global x grid size */
#define Y 256 /* global y grid size */
#define Z 256 /* global z grid size */
#define nx 128 /* local x grid size */
#define ny 128 /* local y grid size */
#define nz 128 /* local z grid size */
#define ng (nx*ny*nz) /* local grid (cube) size */
#define npx 2 /* number of PE's in x direction */
#define npy 2 /* number of PE's in y direction */
#define npz 2 /* number of PE's in z direction */
#define np (npx*npy*npz) /* total PE count */
#define LOOP 1
#define MAX_RR_NAME 7
int
main(int argc, char* argv[])
{
int i, rank, npes, bug=0;
int buf[ng];
MPI_File thefile;
MPI_Status status;
MPI_Datatype filetype;
MPI_Comm new_comm;
MPI_Offset offset=0;
MPI_Info info=MPI_INFO_NULL;
int gsize[D],distrib[D],dargs[D],psize[D];
int dims[D],periods[D],reorder;
double t1,t2,mbs;
double to1,to2,tc1,tc2;
double et,eto,etc;
double max_mbs,min_mbs,avg_mbs;
double max_et,min_et,avg_et;
double max_eto,min_eto,avg_eto;
double max_etc,min_etc,avg_etc;
char process_name[MPI_MAX_PROCESSOR_NAME + 1];
char rr_blank[] = {" "};
char rr_empty[] = {"???????"};
int count;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &npes);
if ( rank == 0 )
{
if ( argc < 2 )
{
printf(" ERROR: no filename given\n");
bug++;
}
if ( npes == np )
{
printf(" file name: %s\n",argv[1]);
printf(" total number of PE's: %3d\n",np);
printf(" number of PE's in x direction: %4d\n",npx);
printf(" number of PE's in y direction: %4d\n",npy);
printf(" number of PE's in z direction: %4d\n",npz);
printf(" global grid size: %dx%dx%d 4 byte integers (total %lu)\n",X,Y,Z,(unsigned long)X*Y*Z);
printf(" local grid size: %dx%dx%d 4 byte integers (total %d)\n",nx,ny,nz,ng);
}
else
{
printf(" ERROR: total number of PE's must be %d\n",np);
printf(" actual number of PE's was %d\n",npes);
bug++;
}
if ( bug )
{
MPI_Abort(MPI_COMM_WORLD,-1);
}
}
if ( MPI_Get_processor_name(process_name, &count) != MPI_SUCCESS)
{
sprintf(process_name, "%s", rr_empty);
}
else
{
if (count < MAX_RR_NAME) strncat(&process_name[count],rr_blank,MAX_RR_NAME-count);
process_name[MAX_RR_NAME] = '\0';
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Info_create(&info);
/* allow multiple writers to write to the file concurrently */
/*MPI_Info_set(info,"panfs_concurrent_write","1");*/
/* use data aggregation */
/*MPI_Info_set(info,"romio_cb_write","enable"); */
/*MPI_Info_set(info,"romio_cb_write","disable");*/
/*MPI_Info_set(info,"romio_cb_read","enable"); */
/*MPI_Info_set(info,"romio_cb_read","disable");*/
/* use one aggregator/writer per node */
/*MPI_Info_set(info,"cb_config_list","*:1");*/
/* aggregators/writers per allocation: use this or the above (both work) */
/*i = ((npes-1)/8) + 1;
sprintf(awpa,"%d",i);
MPI_Info_set (info,"cb_nodes",awpa);*/
for ( i=0; i<ng; i++ ) buf[i] = rank*10000 + (i+1)%1024;
for ( i=0; i<D; i++ )
{
periods[i] = 1; /* true */
}
reorder = 1; /* true */
dims[0] = npx;
dims[1] = npy;
dims[2] = npz;
MPI_Cart_create(MPI_COMM_WORLD, D, dims, periods, reorder, &new_comm);
for ( i=0; i<D; i++ )
{
distrib[i] = MPI_DISTRIBUTE_BLOCK;
dargs[i] = MPI_DISTRIBUTE_DFLT_DARG;
/* psize[i] = 0; */
}
gsize[0] = X;
gsize[1] = Y;
gsize[2] = Z;
psize[0] = npx;
psize[1] = npy;
psize[2] = npz;
/*
MPI_Dims_create(npes, D, psize);
printf("psize %d %d %d\n",psize[0],psize[1],psize[2]);
*/
MPI_Type_create_darray(npes, rank, D, gsize, distrib, dargs, psize, MPI_ORDER_FORTRAN, MPI_INT, &filetype);
/*MPI_Type_create_darray(npes, rank, D, gsize, distrib, dargs, psize, MPI_ORDER_C, MPI_INT, &filetype); don't do this */
MPI_Type_commit(&filetype);
to1 = MPI_Wtime();
MPI_File_open(new_comm, argv[1], MPI_MODE_WRONLY | MPI_MODE_CREATE, info, &thefile);
to2 = MPI_Wtime();
MPI_File_set_size(thefile, offset);
MPI_File_set_view(thefile, offset, MPI_INT, filetype, "native", MPI_INFO_NULL);
t1 = MPI_Wtime();
for ( i=0; i<LOOP; i++)
{
MPI_File_write_all(thefile, buf, ng, MPI_INT, &status);
}
t2 = MPI_Wtime();
tc1 = MPI_Wtime();
MPI_File_close(&thefile);
tc2 = MPI_Wtime();
et = (t2 - t1)/LOOP;
eto = (to2 - to1)/LOOP;
etc = (tc2 - tc1)/LOOP;
mbs = (((double)(LOOP*X*Y*Z)*sizeof(int)))/(1000000.0*(t2-t1));
/*printf(" %s[%3d] ET %8.2f %8.2f %8.2f %8.1f mbs\n", process_name, rank, t1, t2, t2-t1, mbs);*/
MPI_Barrier(MPI_COMM_WORLD);
MPI_Reduce(&mbs, &avg_mbs, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&mbs, &min_mbs, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(&mbs, &max_mbs, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
MPI_Reduce(&et, &avg_et, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&et, &min_et, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(&et, &max_et, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
MPI_Reduce(&eto, &avg_eto, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&eto, &min_eto, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(&eto, &max_eto, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
MPI_Reduce(&etc, &avg_etc, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&etc, &min_etc, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(&etc, &max_etc, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
fflush(stdout);
if ( rank == 0 )
{
mbs = avg_mbs/npes;
printf("\n average write rate: %9.1f mbs\n", mbs);
printf(" minimum write rate: %9.1f mbs\n", min_mbs);
printf(" maximum write rate: %9.1f mbs\n\n", max_mbs);
avg_eto = avg_eto/npes;
avg_et = avg_et/npes;
avg_etc = avg_etc/npes;
printf(" open time: %9.3f min %9.3f avg %9.3f max\n",min_eto,avg_eto,max_eto);
printf(" write time: %9.3f min %9.3f avg %9.3f max\n",min_et,avg_et,max_et);
printf(" close time: %9.3f min %9.3f avg %9.3f max\n\n",min_etc,avg_etc,max_etc);
fflush(stdout);
}
MPI_Finalize();
return 0;
}

301
test/simple/pinterlib.c Обычный файл
Просмотреть файл

@ -0,0 +1,301 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include <stdio.h>
#include <pthread.h>
#include "mpi.h"
#include "pmix.h"
typedef struct {
pthread_mutex_t mutex;
pthread_cond_t cond;
volatile bool active;
pmix_status_t status;
} mylock_t;
#define MY_CONSTRUCT_LOCK(l) \
do { \
pthread_mutex_init(&(l)->mutex, NULL); \
pthread_cond_init(&(l)->cond, NULL); \
(l)->active = true; \
(l)->status = PMIX_SUCCESS; \
} while(0)
#define MY_DESTRUCT_LOCK(l) \
do { \
pthread_mutex_destroy(&(l)->mutex); \
pthread_cond_destroy(&(l)->cond); \
} while(0)
#define MY_WAIT_THREAD(lck) \
do { \
pthread_mutex_lock(&(lck)->mutex); \
while ((lck)->active) { \
pthread_cond_wait(&(lck)->cond, &(lck)->mutex); \
} \
pthread_mutex_unlock(&(lck)->mutex); \
} while(0)
#define MY_WAKEUP_THREAD(lck) \
do { \
pthread_mutex_lock(&(lck)->mutex); \
(lck)->active = false; \
pthread_cond_broadcast(&(lck)->cond); \
pthread_mutex_unlock(&(lck)->mutex); \
} while(0)
static size_t interlibhandler_id = SIZE_MAX;
static mylock_t thread_complete;
static pmix_proc_t myproc;
static void model_registration_callback(pmix_status_t status,
size_t errhandler_ref,
void *cbdata)
{
mylock_t *lock = (mylock_t*)cbdata;
interlibhandler_id = errhandler_ref;
MY_WAKEUP_THREAD(lock);
}
static void model_callback(size_t evhdlr_registration_id,
pmix_status_t status,
const pmix_proc_t *source,
pmix_info_t info[], size_t ninfo,
pmix_info_t *results, size_t nresults,
pmix_event_notification_cbfunc_fn_t cbfunc,
void *cbdata)
{
size_t n;
/* we can ignore our own callback as we obviously
* know that we are OpenMP */
if (NULL != info) {
for (n=0; n < ninfo; n++) {
if (0 == strcmp(info[n].key, PMIX_PROGRAMMING_MODEL) &&
0 == strcmp(info[n].value.data.string, "OpenMP")) {
goto cback;
}
if (PMIX_STRING == info[n].value.type) {
fprintf(stderr, "Thread Model Callback Key: %s Val %s\n", info[n].key, info[n].value.data.string);
}
}
}
/* otherwise, do something clever here */
cback:
/* we must NOT tell the event handler state machine that we
* are the last step as that will prevent it from notifying
* anyone else that might be listening for declarations */
if (NULL != cbfunc) {
cbfunc(PMIX_SUCCESS, NULL, 0, NULL, NULL, cbdata);
}
MY_WAKEUP_THREAD(&thread_complete);
}
static void opcbfunc(pmix_status_t status, void *cbdata)
{
mylock_t *lock = (mylock_t*)cbdata;
MY_WAKEUP_THREAD(lock);
}
static void infocb(pmix_status_t status,
pmix_info_t *info, size_t ninfo,
void *cbdata,
pmix_release_cbfunc_t release_fn,
void *release_cbdata)
{
mylock_t *lock = (mylock_t*)cbdata;
size_t n;
for (n=0; n < ninfo; n++) {
fprintf(stderr, "QUERY DATA KEY: %s VALUE %s\n", info[n].key, info[n].value.data.string);
}
if (NULL != release_fn) {
release_fn(release_cbdata);
}
MY_WAKEUP_THREAD(lock);
}
static void *mylib(void *ptr)
{
pmix_info_t *info, *directives;
pmix_status_t ret;
mylock_t lock;
bool init = false, flag;
pmix_query_t *query;
pmix_pdata_t *pdata;
pmix_status_t code = PMIX_MODEL_DECLARED;
pmix_value_t *val;
int wait = 0;
MY_CONSTRUCT_LOCK(&thread_complete);
/* declare that we are present and active */
PMIX_INFO_CREATE(info, 5);
PMIX_INFO_LOAD(&info[0], PMIX_PROGRAMMING_MODEL, "OpenMP", PMIX_STRING);
PMIX_INFO_LOAD(&info[1], PMIX_MODEL_LIBRARY_NAME, "foobar", PMIX_STRING);
PMIX_INFO_LOAD(&info[2], PMIX_MODEL_LIBRARY_VERSION, "1.2.3.4", PMIX_STRING);
PMIX_INFO_LOAD(&info[3], PMIX_THREADING_MODEL, "PTHREAD", PMIX_STRING);
/* mark that this isn't to go to any default event handler - pmix_init
* takes care of that for us, but we have to explicitly do it here */
flag = true;
PMIX_INFO_LOAD(&info[4], PMIX_EVENT_NON_DEFAULT, &flag, PMIX_BOOL);
/* see if pmix is already initialized - note that if we
* don't know our process identifier at this point (e.g.,
* we don't store it in some global location), then we
* could always call PMIx_Init anyway as it is just
* reference counted. */
if (PMIx_Initialized()) {
/* it is, so let's just use the event notification
* API to let everyone know we are here */
MY_CONSTRUCT_LOCK(&lock);
ret = PMIx_Notify_event(code, &myproc,
PMIX_RANGE_PROC_LOCAL,
info, 5,
opcbfunc, (void*)&lock);
MY_WAIT_THREAD(&lock);
MY_DESTRUCT_LOCK(&lock);
} else {
/* call pmix to initialize these values */
ret = PMIx_Init(&myproc, info, 5);
init = true;
}
PMIX_INFO_FREE(info, 5);
/* register to receive model callbacks */
PMIX_INFO_CREATE(directives, 1);
/* give the event a name so we can distinguish it */
PMIX_INFO_LOAD(&directives[0], PMIX_EVENT_HDLR_NAME, "My-Declarations", PMIX_STRING);
/* we could constrain the range to proc_local - technically, this
* isn't required so long as the code that generates
* the event stipulates its range as proc_local. We rely
* on that here */
MY_CONSTRUCT_LOCK(&lock);
PMIx_Register_event_handler(&code, 1, directives, 1,
model_callback,
model_registration_callback,
(void*)&lock);
MY_WAIT_THREAD(&lock);
MY_DESTRUCT_LOCK(&lock);
PMIX_INFO_FREE(directives, 1);
/* wait for the model callback */
MY_WAIT_THREAD(&thread_complete);
/* let's do a couple of operations just to verify we can,
* starting with a query */
PMIX_QUERY_CREATE(query, 1);
PMIX_ARGV_APPEND(ret, query->keys, PMIX_QUERY_NAMESPACES);
MY_CONSTRUCT_LOCK(&lock);
PMIx_Query_info_nb(query, 1, infocb, &lock);
MY_WAIT_THREAD(&lock);
MY_DESTRUCT_LOCK(&lock);
PMIX_QUERY_FREE(query, 1);
/* Get something */
val = NULL;
PMIx_Get(&myproc, "WASSUP", NULL, 0, &val);
if (NULL == val) {
fprintf(stderr, "ERROR GETTING WASSUP\n");
} else {
fprintf(stderr, "THREAD WASSUP: %s\n", val->data.string);
PMIX_VALUE_FREE(val, 1);
}
/* lookup something published by the main thread */
PMIX_PDATA_CREATE(pdata, 1);
PMIX_PDATA_LOAD(&pdata[0], &myproc, "SOMETHING", NULL, PMIX_BOOL);
/* tell the call to wait for the data to be published */
PMIX_INFO_CREATE(directives, 1);
PMIX_INFO_LOAD(&directives[0], PMIX_WAIT, &wait, PMIX_INT);
if (PMIX_SUCCESS != PMIx_Lookup(pdata, 1, directives, 1)) {
fprintf(stderr, "LOOKUP FAILED\n");
} else {
fprintf(stderr, "LOOKUP RETURNED %s\n", pdata[0].value.data.string);
}
PMIX_PDATA_FREE(pdata, 1);
PMIX_INFO_FREE(directives, 1);
if (init) {
/* need to finalize to maintain refcount */
PMIx_Finalize(NULL, 0);
}
/* done */
return NULL;
}
int main(int argc, char* argv[])
{
int rank, size, rc;
pid_t pid;
pthread_t mythread;
bool before = false;
pmix_info_t *info;
pmix_value_t value;
char *valstring;
pmix_data_range_t range = PMIX_RANGE_LOCAL;
if (1 < argc) {
if (0 == strcmp(argv[1], "-b") || 0 == strcmp(argv[1], "--before")) {
before = true;
}
}
if (before) {
/* spin up a thread */
if (pthread_create(&mythread, NULL, mylib, NULL)) {
fprintf(stderr, "Error creating thread\n");
goto done;
}
}
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
pid = getpid();
if (!before) {
/* spin up a thread */
if (pthread_create(&mythread, NULL, mylib, NULL)) {
fprintf(stderr, "Error creating thread\n");
goto done;
}
}
/* push something the thread can recognize */
PMIX_VALUE_CONSTRUCT(&value);
value.type = PMIX_STRING;
value.data.string = strdup("nothing");
PMIx_Put(PMIX_LOCAL, "WASSUP", &value);
PMIX_VALUE_DESTRUCT(&value);
/* no need to commit it as this is strictly within ourselves */
printf("Hello, World, I am %d of %d\n", rank, size);
/* publish something */
PMIX_INFO_CREATE(info, 2);
PMIX_INFO_LOAD(&info[0], "SOMETHING", "foobar", PMIX_STRING);
PMIX_INFO_LOAD(&info[1], PMIX_RANGE, &range, PMIX_DATA_RANGE);
PMIx_Publish(info, 2);
PMIX_INFO_FREE(info, 2);
/* wait for the thread to finish */
if (pthread_join(mythread, NULL)) {
fprintf(stderr, "Error joining thread\n");
}
done:
MPI_Finalize();
return 0;
}

130
test/simple/pmix.c Обычный файл
Просмотреть файл

@ -0,0 +1,130 @@
/*
* Copyright (c) 2015 Mellanox Technologies, Inc.
* All rights reserved.
* Copyright (c) 2016 Intel, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
/*
* To compile test:
* mpicc -I$src_dir -I$src_dir/opal/include -I$src_dir/orte/include -I$src_dir/ompi/include -DOMPI_BUILDING=1 pmix.c -o pmix
* To run test:
* mpirun -np 2 <any mca params> ./pmix
* Test should print "Passed" in case of success and print pmix time intervals at process with rank 0.
* */
#include <stdio.h>
#include <unistd.h>
#include <mpi.h>
#include <time.h>
#include <sys/time.h>
#include "opal/class/opal_list.h"
#include "opal/mca/pmix/pmix.h"
#include "ompi/proc/proc.h"
#define DO_FINALIZE(rc,flag,format,args...) \
do { \
if (flag) { \
fprintf(stderr, format, args); \
} \
MPI_Finalize(); \
return rc; \
} while(0);
static int my_rank;
static volatile bool waiting = true;
static double get_timestamp(void)
{
struct timeval tv;
gettimeofday(&tv, NULL);
return ((tv.tv_sec) + (tv.tv_usec) * 1.0e-6);
}
static void evhandler(int status,
const opal_process_name_t *source,
opal_list_t *info, opal_list_t *results,
opal_pmix_notification_complete_fn_t cbfunc,
void *cbdata)
{
fprintf(stderr, "%d: received notification status %d\n", my_rank, status);
if (NULL != cbfunc) {
cbfunc(OPAL_ERR_HANDLERS_COMPLETE, NULL, NULL, NULL, cbdata);
}
waiting = false;
}
int main(int argc, char* argv[])
{
int rc;
int recv_data;
size_t i, numprocs;
ompi_proc_t **procs, *thisproc;
double t0, t1, t2, t3, t4, t5, t6;
int *ptr;
struct timespec tp;
opal_list_t info;
opal_value_t *kv;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
/* register an event */
OBJ_CONSTRUCT(&info, opal_list_t);
kv = OBJ_NEW(opal_value_t);
kv->key = strdup(OPAL_PMIX_EVENT_ORDER_PREPEND);
opal_list_append(&info, &kv->super);
opal_pmix.register_evhandler(NULL, &info, evhandler, NULL, NULL);
int data = my_rank;
t0 = get_timestamp();
OPAL_MODEX_SEND_VALUE(rc, OPAL_PMIX_GLOBAL, "MY_RANK", &data, OPAL_INT);
t1 = get_timestamp();
if (OPAL_SUCCESS != rc) {
DO_FINALIZE(rc, 1, "[%d] OPAL_MODEX_SEND_STRING failed.\n", my_rank);
}
t2 = get_timestamp();
opal_pmix.commit();
opal_pmix.fence(NULL, 1);
t3 = get_timestamp();
procs = ompi_proc_world ( &numprocs );
ptr = &recv_data;
t4 = get_timestamp();
for ( i = 0; i < numprocs; i++ ) {
thisproc = procs[i];
OPAL_MODEX_RECV_VALUE(rc, "MY_RANK", &thisproc->super.proc_name, (void**)&ptr, OPAL_INT);
/* check return status and received data */
if (OPAL_SUCCESS != rc || i != recv_data) {
rc = OPAL_ERROR;
DO_FINALIZE(rc, 1, "[%d] OPAL_MODEX_RECV_VALUE failed from rank %d.\n", my_rank, i);
}
}
t5 = get_timestamp();
/* using fence as a barrier */
opal_pmix.fence(NULL, 0);
t6 = get_timestamp();
fprintf(stderr, "[%d] Test passed.\n", my_rank);
fprintf(stderr, "[%d] \"MODEX_SEND\" %f\n", my_rank, t1-t0);
fprintf(stderr, "[%d] \"FENCE\" %f\n", my_rank, t3-t2);
fprintf(stderr, "[%d] \"MODEX_RECV\" %f\n", my_rank, t5-t4);
fprintf(stderr, "[%d] \"BARRIER\" %f\n", my_rank, t6-t5);
fprintf(stderr, "[%d] \"TOTAL\" %f\n", my_rank, t6-t0);
fprintf(stderr, "[%d] Pid %d waiting for notification\n", my_rank, (int)getpid());
/* now wait for notification of someone failing */
tp.tv_sec = 0;
tp.tv_nsec = 100000;
waiting = true;
while (waiting) {
nanosleep(&tp, NULL);
}
free(procs);
DO_FINALIZE(0, 0, 0, 0);
}

64
test/simple/pubsub.c Обычный файл
Просмотреть файл

@ -0,0 +1,64 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include "mpi.h"
int main(int argc, char* argv[])
{
int rank, size;
MPI_Info info, srch;
char port[MPI_MAX_PORT_NAME];
bool local=false;
if (1 < argc) {
if (0 == strcmp("local", argv[1])) {
local = true;
}
}
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
printf("Hello, World, I am %d of %d\n", rank, size);
MPI_Info_create(&info);
if (local) {
MPI_Info_set(info, "ompi_global_scope", "false");
} else {
MPI_Info_set(info, "ompi_global_scope", "true");
}
if (0 == rank) {
MPI_Open_port(MPI_INFO_NULL, port);
MPI_Publish_name("pubsub-test", info, port);
printf("Rank %d published port %s\n", rank, port);
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Info_create(&srch);
MPI_Info_set(srch, "ompi_lookup_order", "local,global");
if (rank != 0) {
MPI_Lookup_name("pubsub-test", srch, port);
printf("Rank %d got port %s\n", rank, port);
}
MPI_Barrier(MPI_COMM_WORLD);
if (0 == rank) {
MPI_Unpublish_name("pubsub-test", info, port);
}
MPI_Info_free(&info);
MPI_Info_free(&srch);
MPI_Finalize();
return 0;
}

56
test/simple/read_write.c Обычный файл
Просмотреть файл

@ -0,0 +1,56 @@
/*
* A simple MPI test that reads lines from standard input and writes them
* to both standard output and a file
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <mpi.h>
int main(int argc, char *argv[])
{
int self;
int size;
int value;
char line[1024];
FILE *file;
unsigned int bytes = 0;
int reader = 0;
char *junk;
if (2 == argc) {
/* a reader was specified */
reader = strtol(argv[1], NULL, 10);
fprintf(stderr, "reading from %d\n", reader);
}
MPI_Init(NULL, NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &self);
MPI_Comm_size(MPI_COMM_WORLD, &size);
printf("Hello from process %d of %d\n", self, size);
MPI_Barrier(MPI_COMM_WORLD);
if (-1 == reader || reader == self) {
opal_asprintf(&junk, "./junk%d", self);
unlink(junk);
file = fopen(junk, "w+");
if (NULL == file) {
fprintf(stderr, "Couldn't open %s!", junk);
free(junk);
MPI_Abort(MPI_COMM_WORLD, 1);
}
while (NULL != fgets(line, sizeof(line), stdin)) {
fprintf(stderr, "%s", line);
fprintf(file, "%s", line);
bytes += strlen(line) + 1;
}
fclose(file);
fprintf(stderr, "\nWrote %d bytes to %s\n", bytes, junk);
free(junk);
}
MPI_Finalize();
return 0;
}

37
test/simple/reduce-hang.c Обычный файл
Просмотреть файл

@ -0,0 +1,37 @@
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[])
{
long count = 0;
int i = 8, j;
int self;
int do_barrier = 0;
int k;
double pi;
if (getenv("DO_BARRIER")) {
do_barrier = 1;
}
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &self);
while (1) {
#if 0
for (k=0; k < (7-self)*1000; k++) {
pi = 3.14159 * 18.0 / 35.3;
}
#endif
MPI_Reduce(&i, &j, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (do_barrier) {
MPI_Barrier(MPI_COMM_WORLD);
}
if (0 == (++count % 10000)) {
fprintf(stderr, "%d still going at %ld\n", self, count);
}
}
MPI_Finalize();
return 0;
}

63
test/simple/ring.c Обычный файл
Просмотреть файл

@ -0,0 +1,63 @@
#include <stdlib.h>
#include <stdio.h>
#include <mpi.h>
#define SIZE 20
#define POS 10
#define INITIAL_VALUE 10
int main(int argc, char *argv[])
{
int i, rank, size, next, prev, tag = 201;
int array_size = SIZE;
int pos = POS;
int *send_array;
int *recv_array;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
fprintf(stderr, "Rank %d has cleared MPI_Init\n", rank);
next = (rank + 1) % size;
prev = (rank + size - 1) % size;
send_array = malloc(sizeof(int) * SIZE);
recv_array = malloc(sizeof(int) * SIZE);
for (i = 0; i < array_size; ++i) {
send_array[i] = 17;
recv_array[i] = -1;
}
if (0 == rank) {
send_array[pos] = INITIAL_VALUE;
MPI_Send(send_array, array_size, MPI_INT, next, tag,
MPI_COMM_WORLD);
}
while (1) {
recv_array[pos] = -1;
MPI_Recv(recv_array, array_size, MPI_INT, prev, tag,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
send_array[pos] = recv_array[pos];
if (rank == 0) {
--send_array[pos];
}
MPI_Send(send_array, array_size, MPI_INT, next, tag, MPI_COMM_WORLD);
if (0 == send_array[pos]) {
break;
}
}
if (rank == 0) {
MPI_Recv(recv_array, array_size, MPI_INT, prev, tag,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
fprintf(stderr, "Rank %d has completed ring\n", rank);
MPI_Barrier(MPI_COMM_WORLD);
fprintf(stderr, "Rank %d has completed MPI_Barrier\n", rank);
MPI_Finalize();
return 0;
}

30
test/simple/segv.c Обычный файл
Просмотреть файл

@ -0,0 +1,30 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include <stdio.h>
#include <unistd.h>
#include "mpi.h"
int main(int argc, char* argv[])
{
int rank, size;
char *foo=0;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
printf("Hello, World, I am %d of %d\n", rank, size);
if (1 == rank) {
sleep(2);
*foo = 42;
}
MPI_Finalize();
return 0;
}

189
test/simple/sendrecv_blaster.c Обычный файл
Просмотреть файл

@ -0,0 +1,189 @@
/*
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
#include "mpi.h"
int main(int argc, char *argv[])
{
MPI_Status status; /* MPI status */
int mpierr; /* MPI function return code */
int rank; /* Process rank within MPI_COMM_WORLD */
int size;
int dest, src;
int tag0=41; /* MPI message tag */
int inject;
int report;
int iterations;
int n_bytes;
unsigned char* send_buff;
unsigned char* recv_buff;
char* tmp;
int i, j, count;
float fraction, randval;
struct timeval tp;
if (1 < argc) {
if (0 == strncmp(argv[1], "-h", 2) ||
0 == strncmp(argv[1], "--h", 3)) {
printf("Usage: mpirun --options-- ./sendrecv_blaster <options> where options are:\n"
"\tpattern=[self | pair | ring] where\n"
"\t\tself => sendrecv with self\n"
"\t\tpair => sendrecv with a complementary partner [0 <-> N-1, 1 <-> N-2...]\n"
"\t\tring [default] => sendrecv around a ring [0 recvs from N-1 and sends to 1]\n"
"\tsize=[value < 0 => max message size in kbytes, value > 0 => max message size in Mbytes (default=1MByte)]\n"
"\tinject=[value = #iterations before injecting MPI_Sendrecv to self (default: never)]\n"
"\treport=[value = #iterations/reporting point (default: 1000)\n"
"\titerations=[value = #iterations before stopping (default: 1000000)\n");
return 0;
}
}
mpierr = MPI_Init(&argc, &argv);
if (mpierr != MPI_SUCCESS)
{
fprintf(stderr, "MPI Error %d (MPI_Init)\n",mpierr);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
mpierr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (mpierr != MPI_SUCCESS || rank < 0)
{
fprintf(stderr, "MPI Error %d (MPI_Comm_rank)\n",mpierr);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
mpierr = MPI_Comm_size(MPI_COMM_WORLD, &size);
if (mpierr != MPI_SUCCESS || size < 0)
{
fprintf(stderr, "MPI Error %d (MPI_Comm_size)\n",mpierr);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
/* setup defaults in lieu of args */
n_bytes = 1024*1024;
inject = -1;
report = 1000;
iterations = 1000000;
/* do a ring */
src = rank - 1;
if (src < 0) {
src = size - 1;
}
dest = rank + 1;
if (dest > size-1) {
dest = 0;
}
for (i=1; i < argc; i++) {
fprintf(stderr, "got %s\n", argv[i]);
if (0 == strncmp(argv[i], "pattern", strlen("pattern"))) {
tmp = strchr(argv[i], '=');
tmp++;
if (0 == strcmp(tmp, "self")) {
/* just do it with myself */
src = rank;
dest = rank;
} else if (0 == strcmp(tmp, "pair")) {
/* do it pair-wise */
src = (size-1) - rank;
dest = src;
} else {
/* do a ring */
src = rank - 1;
if (src < 0) {
src = size - 1;
}
dest = rank + 1;
if (dest > size-1) {
dest = 0;
}
}
} else if (0 == strncmp(argv[i], "size", strlen("size"))) {
tmp = strchr(argv[i], '=');
tmp++;
n_bytes = atoi(tmp);
if (n_bytes < 0) {
n_bytes = -1 * n_bytes * 1024;
} else {
n_bytes = n_bytes * 1024*1024;
}
} else if (0 == strncmp(argv[i], "inject", strlen("inject"))) {
tmp = strchr(argv[i], '=');
tmp++;
inject = atoi(tmp);
} else if (0 == strncmp(argv[i], "report", strlen("report"))) {
tmp = strchr(argv[i], '=');
tmp++;
report = atoi(tmp);
} else if (0 == strncmp(argv[i], "iter", strlen("iter"))) {
tmp = strchr(argv[i], '=');
tmp++;
iterations = atoi(tmp);
}
}
send_buff = (unsigned char *) valloc(n_bytes);
recv_buff = (unsigned char *) valloc(n_bytes);
/* seed the random number generator */
gettimeofday (&tp, NULL);
srand (tp.tv_usec);
for ( i=0; i<n_bytes; i++ )
{
send_buff[i] = i%128;
}
fprintf(stderr, "Rank %d: recving from src %d sending to dest %d with max buff size %dKbytes\n",
rank, src, dest, n_bytes/1024);
i=0;
while (i < iterations)
{
randval = rand();
fraction = randval/RAND_MAX;
count = fraction * n_bytes;
mpierr = MPI_Sendrecv(send_buff, count, MPI_CHAR, dest, tag0,
recv_buff, n_bytes, MPI_CHAR, src, tag0, MPI_COMM_WORLD, &status);
if (mpierr != MPI_SUCCESS)
{
fprintf(stderr,"MPI Error %d (MPI_Sendrecv) [%d,%d] at iteration %d\n",mpierr,src,dest,i);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
}
i++;
if (0 == (i % report)) {
fprintf(stderr, "Rank %d has completed %dk iterations\n", rank, i/1000);
}
if (0 < inject && 0 == (i % inject)) {
mpierr = MPI_Sendrecv(send_buff, count, MPI_CHAR, rank, tag0,
recv_buff, n_bytes, MPI_CHAR, rank, tag0, MPI_COMM_WORLD, &status);
if (mpierr != MPI_SUCCESS)
{
fprintf(stderr,"MPI Error %d (MPI_Sendrecv) [%d,%d] at iteration %d\n",mpierr,rank,rank,i);
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
} else {
fprintf(stderr, "Rank %d has completed MPI_Sendrecv with myself\n", rank);
}
}
}
fprintf(stderr, "Rank %d completed test\n", rank);
MPI_Finalize();
}

55
test/simple/server.c Обычный файл
Просмотреть файл

@ -0,0 +1,55 @@
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include "mpi.h"
#define MAX_DATA 100
int main( int argc, char **argv )
{
MPI_Comm client;
MPI_Status status;
char port_name[MPI_MAX_PORT_NAME];
double buf[MAX_DATA];
int size, again;
MPI_Init( &argc, &argv );
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size != 1) {
fprintf(stderr, "Server too big - need only 1 rank\n");
exit(1);
}
MPI_Open_port(MPI_INFO_NULL, port_name);
printf("server available at %s\n",port_name);
while (1)
{
MPI_Comm_accept( port_name, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &client );
again = 1;
while (again)
{
fprintf(stderr, "Server loop %d\n", again);
MPI_Recv( buf, MAX_DATA, MPI_DOUBLE, MPI_ANY_SOURCE,
MPI_ANY_TAG, client, &status );
switch (status.MPI_TAG)
{
case 0:
fprintf(stderr, "Server recvd terminate cmd\n");
MPI_Comm_disconnect( &client );
MPI_Close_port(port_name);
MPI_Finalize();
return 0;
case 2: /* do something */
fprintf( stderr, "Do something ...\n" );
break;
default:
/* Unexpected message type */
MPI_Abort( MPI_COMM_WORLD, 1 );
}
++again;
}
}
}

3
test/simple/shell_hello Исполняемый файл
Просмотреть файл

@ -0,0 +1,3 @@
#!/bin/sh
echo Hello world! I am $MPI_COMM_WORLD_RANK of $MPI_COMM_WORLD_SIZE.
exit 0

67
test/simple/simple_spawn.c Обычный файл
Просмотреть файл

@ -0,0 +1,67 @@
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <unistd.h>
#include <sys/param.h>
#include "opal/runtime/opal.h"
#include <mpi.h>
int main(int argc, char* argv[])
{
int msg, rc;
MPI_Comm parent, child;
int rank, size;
const char *hostname;
pid_t pid;
char *env_rank,*env_nspace;
env_rank = getenv("PMIX_RANK");
env_nspace = getenv("PMIX_NAMESPACE");
pid = getpid();
hostname = opal_gethostname();
printf("[%s:%s pid %ld] starting up on node %s!\n", env_nspace, env_rank, (long)pid, hostname);
MPI_Init(NULL, NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
printf("%d completed MPI_Init\n", rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_get_parent(&parent);
/* If we get COMM_NULL back, then we're the parent */
if (MPI_COMM_NULL == parent) {
pid = getpid();
printf("Parent [pid %ld] about to spawn!\n", (long)pid);
if (MPI_SUCCESS != (rc = MPI_Comm_spawn(argv[0], MPI_ARGV_NULL, 3, MPI_INFO_NULL,
0, MPI_COMM_WORLD, &child, MPI_ERRCODES_IGNORE))) {
printf("Child failed to spawn\n");
return rc;
}
printf("Parent done with spawn\n");
if (0 == rank) {
msg = 38;
printf("Parent sending message to child\n");
MPI_Send(&msg, 1, MPI_INT, 0, 1, child);
}
MPI_Comm_disconnect(&child);
printf("Parent disconnected\n");
}
/* Otherwise, we're the child */
else {
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
pid = getpid();
printf("Hello from the child %d of %d on host %s pid %ld\n", rank, 3, hostname, (long)pid);
if (0 == rank) {
MPI_Recv(&msg, 1, MPI_INT, 0, 1, parent, MPI_STATUS_IGNORE);
printf("Child %d received msg: %d\n", rank, msg);
}
MPI_Comm_disconnect(&parent);
printf("Child %d disconnected\n", rank);
}
MPI_Finalize();
fprintf(stderr, "%d: exiting\n", pid);
return 0;
}

212
test/simple/singleton_client_server.c Обычный файл
Просмотреть файл

@ -0,0 +1,212 @@
#include "orte_config.h"
#include "opal/runtime/opal.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <mpi.h>
/*
LOGIC:
- the 'server' opens a port and write the info to a file
- the 'clients' open the file and connect to the port
- after each accept, the server and client do a merge to
convert the intercomm to an intracomm
DETAIL STEPS:
- server open port
- server does accept
- client #1 does connect
- server and client #1 do merge
- server does accept
- client #2 does connect
- server, client #1 and client #2 do merge
- server does accept
- client #3 does connect
- server, client #1, client #2 and client #3 do merge
*/
#define TAG 0
#define CHK(code) do \
{ \
int retval = code ; \
if (retval != MPI_SUCCESS) \
{ \
fprintf(stderr, "Error: " #code "\n") ; \
exit(1) ; \
} \
} while(0)
int main(int argc, char *argv[])
{
const char *hostname ;
char buff[255] ;
int role ;
int num_clients ;
int size, rank ;
int temp_errno ;
FILE *fp ;
char server_port_name[MPI_MAX_PORT_NAME] ;
MPI_Comm intercomm, intracomm ;
MPI_Status status ;
int msg_count ;
int i ;
/* sanity check the args */
if(argc != 3)
{
fprintf(stderr, "usage %s <num clients> <1:server | 0:client>\n", argv[0]) ;
exit(1) ;
}
num_clients = atoi(argv[1]) ;
role = atoi(argv[2]) ;
if (num_clients <= 0 || (role != 0 && role != 1))
{
fprintf(stderr, "usage %s <num clients> <1:server | 0:client>\n", argv[0]) ;
exit(1) ;
}
/* initialize MPI */
CHK(MPI_Init(&argc, &argv)) ;
/* get the node name */
/* The opal_gethostname() function is just a wrapper that returns a global
variable value that is set earlier, so we don't check for errors here */
hostname = opal_gethostname();
/* server */
if(role == 1)
{
printf("SERVER: on node '%s'\n", hostname) ;
/* open port to establish connections */
CHK(MPI_Open_port(MPI_INFO_NULL, server_port_name)) ;
printf("SERVER: opened port=%s\n", server_port_name) ;
/* store the port name */
fp = fopen("server_port_name.txt", "w") ;
if(fp == NULL)
{
fprintf(stderr, "fopen failed: %s\n", strerror(errno)) ;
exit(1) ;
}
fprintf(fp, "%s", server_port_name) ;
fclose(fp) ;
/* the server accepts connections from all the clients */
for(i = 0 ; i < num_clients ; i++ )
{
/* accept connections at this port */
CHK(MPI_Comm_accept(server_port_name, MPI_INFO_NULL, 0,
i == 0 ? MPI_COMM_WORLD : intracomm,
&intercomm)) ;
printf("SERVER: accepted connection from client %d\n", i+1) ;
/* merge, to form one intra communicator */
CHK(MPI_Intercomm_merge(intercomm, 0, &intracomm)) ;
printf("SERVER: merged with client %d\n", i+1) ;
CHK(MPI_Comm_size(intracomm, &size)) ;
CHK(MPI_Comm_rank(intracomm, &rank)) ;
printf("SERVER: after merging with client %d: size=%d rank=%d\n", i+1, size, rank) ;
}
} /* end server */
/* client */
if(role == 0)
{
printf("CLIENT: on node '%s'\n", hostname) ;
fp = fopen("server_port_name.txt", "r") ;
if(fp == NULL)
{
fprintf(stderr, "fopen failed: %s\n", strerror(errno)) ;
exit(1) ;
}
fscanf(fp, "%s", server_port_name) ;
fclose(fp) ;
printf("CLIENT: attempting to connect to server on port=%s\n", server_port_name) ;
/* connect to the server */
CHK(MPI_Comm_connect (server_port_name, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &intercomm)) ;
printf("CLIENT: connected to server on port\n") ;
/* merge the server and client to one intra communicator */
CHK(MPI_Intercomm_merge(intercomm, 1, &intracomm)) ;
printf("CLIENT: merged with existing intracomm\n") ;
CHK(MPI_Comm_size(intracomm, &size)) ;
CHK(MPI_Comm_rank(intracomm, &rank)) ;
printf("CLIENT: after merging, new comm: size=%d rank=%d\n", size, rank) ;
for (i = rank ; i < num_clients ; i++)
{
/* client performs a collective accept */
CHK(MPI_Comm_accept(server_port_name, MPI_INFO_NULL, 0, intracomm, &intercomm)) ;
printf("CLIENT: connected to server on port\n") ;
/* merge the two intra comms back to one communicator */
CHK(MPI_Intercomm_merge(intercomm, 0, &intracomm)) ;
printf("CLIENT: merged with existing members\n") ;
CHK(MPI_Comm_size(intracomm, &size)) ;
CHK(MPI_Comm_rank(intracomm, &rank)) ;
printf("CLIENT: new size after merging with existing members: size=%d rank=%d\n", size, rank) ;
}
} /* end client */
CHK(MPI_Comm_size(intracomm, &size)) ;
CHK(MPI_Comm_rank(intracomm, &rank)) ;
printf("After fusion: size=%d rank=%d\n", size, rank) ;
if(rank == 0)
{
msg_count = num_clients ;
while(msg_count)
{
CHK(MPI_Recv(buff, 255, MPI_CHAR, MPI_ANY_SOURCE,
MPI_ANY_TAG, intracomm, &status)) ;
printf("Received hello msg from '%s'\n", buff) ;
msg_count-- ;
}
}
else
{
/* all ranks > 0 */
CHK(MPI_Send(hostname, strlen(hostname) + 1, MPI_CHAR, 0, TAG, intracomm)) ;
}
CHK(MPI_Finalize()) ;
fprintf(stderr, "Rank %d is exiting\n", rank);
return 0 ;
}

23
test/simple/sio.c Обычный файл
Просмотреть файл

@ -0,0 +1,23 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include <stdio.h>
#include "mpi.h"
int main(int argc, char* argv[])
{
int i;
MPI_Init(&argc, &argv);
for (i=0; i < 100; i++) {
printf("some output from mpitest to test the xml problem: %d\n", i);
}
MPI_Finalize();
return 0;
}

33
test/simple/slave.c Обычный файл
Просмотреть файл

@ -0,0 +1,33 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include <stdio.h>
#include <unistd.h>
#include "mpi.h"
int main(int argc, char* argv[])
{
int rank, size;
MPI_Comm parent;
int msg;
printf("Slave [pid %ld] starting up!\n", (long)getpid());
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_get_parent(&parent);
MPI_Recv(&msg, 1, MPI_INT, 0, 1, parent, MPI_STATUS_IGNORE);
printf("Slave %d received msg: %d\n", rank, msg);
MPI_Comm_disconnect(&parent);
MPI_Finalize();
return 0;
}

52
test/simple/spawn-problem/ch_rec.c Обычный файл
Просмотреть файл

@ -0,0 +1,52 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <mpi.h>
int main (int argc, char **argv){
char buff[30];
MPI_Status st;
MPI_Comm comm[2], parent;
MPI_Request req[2];
int errcodes[1];
int level;
int x = 6, i, j;
MPI_Init(&argc, &argv);
MPI_Comm_get_parent(&parent);
argv++;
level = atoi(argv[0]);
printf("level = %d\n",level);
MPI_Recv(&buff, sizeof(char)*30, MPI_CHAR, MPI_ANY_SOURCE,
MPI_ANY_TAG, parent, &st);
printf("Parent sent: %s\n", buff);
if(level < x){
sprintf(argv[0], "%d", level+1);
MPI_Comm_spawn("ch_rec", argv, 1, MPI_INFO_NULL, 0, MPI_COMM_SELF,
&comm[0], errcodes);
sprintf(buff,"level %d (pid:%d)", level, getpid());
MPI_Send(&buff, sizeof(char)*30, MPI_CHAR, 0, 100, comm[0]);
MPI_Irecv(&buff, sizeof(char)*30, MPI_CHAR, MPI_ANY_SOURCE,
MPI_ANY_TAG, comm[0], &req[0]);
//sleep(2);
sprintf(argv[0], "%d", (level+1));
MPI_Comm_spawn("ch_rec", argv, 1, MPI_INFO_NULL, 0, MPI_COMM_SELF,
&comm[1], errcodes);
sprintf(buff,"level %d (pid:%d)", level, getpid());
MPI_Send(&buff, sizeof(char)*30, MPI_CHAR, 0, 100, comm[1]);
MPI_Irecv(&buff, sizeof(char)*30, MPI_CHAR, MPI_ANY_SOURCE,
MPI_ANY_TAG, comm[1], &req[1]);
for(i=0; i<2; i++){
MPI_Waitany(2, req, &j, MPI_STATUS_IGNORE);
printf("Child %d sent: %s\n", j, buff);
}
}
sprintf(buff,"level %d (pid:%d)", level, getpid());
MPI_Send(&buff, sizeof(char)*30, MPI_CHAR, 0, 100, parent);
MPI_Finalize();
return 0;
}

28
test/simple/spawn-problem/start.c Обычный файл
Просмотреть файл

@ -0,0 +1,28 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <mpi.h>
int main (int argc, char **argv){
char buff[30];
MPI_Status st;
MPI_Comm comm;
int errcodes[1];
MPI_Init(&argc, &argv);
int level = 0;
printf("level %d\n", level);
sprintf(argv[0], "%d", level+1);
MPI_Comm_spawn("ch_rec", argv, 1, MPI_INFO_NULL, 0, MPI_COMM_SELF,
&comm, errcodes);
sprintf(buff,"level %d (pid:%d)", level, getpid());
MPI_Send(&buff, sizeof(char)*30, MPI_CHAR, 0, 100, comm);
MPI_Recv(&buff, sizeof(char)*30, MPI_CHAR, MPI_ANY_SOURCE,
MPI_ANY_TAG, comm, &st);
printf("Child sent: %s\n", buff);
MPI_Finalize();
return 0;
}

66
test/simple/spawn_multiple.c Обычный файл
Просмотреть файл

@ -0,0 +1,66 @@
#include "orte_config.h"
#include "opal/runtime/opal.h"
#include <stdio.h>
#include <sys/types.h>
#include <unistd.h>
#include <mpi.h>
int main(int argc, char* argv[])
{
int msg;
MPI_Comm parent, child;
int rank, size;
const char *hostname;
pid_t pid;
int i;
char *cmds[2];
char *argv0[] = { "foo", NULL };
char *argv1[] = { "bar", NULL };
char **spawn_argv[2];
int maxprocs[] = { 1, 1 };
MPI_Info info[] = { MPI_INFO_NULL, MPI_INFO_NULL };
cmds[1] = cmds[0] = argv[0];
spawn_argv[0] = argv0;
spawn_argv[1] = argv1;
MPI_Init(NULL, NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_get_parent(&parent);
/* If we get COMM_NULL back, then we're the parent */
if (MPI_COMM_NULL == parent) {
pid = getpid();
printf("Parent [pid %ld] about to spawn!\n", (long)pid);
MPI_Comm_spawn_multiple(2, cmds, spawn_argv, maxprocs,
info, 0, MPI_COMM_WORLD,
&child, MPI_ERRCODES_IGNORE);
printf("Parent done with spawn\n");
if (0 == rank) {
msg = 38;
printf("Parent sending message to children\n");
MPI_Send(&msg, 1, MPI_INT, 0, 1, child);
MPI_Send(&msg, 1, MPI_INT, 1, 1, child);
}
MPI_Comm_disconnect(&child);
printf("Parent disconnected\n");
}
/* Otherwise, we're the child */
else {
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
hostname = opal_gethostname();
pid = getpid();
printf("Hello from the child %d of %d on host %s pid %ld: argv[1] = %s\n", rank, size, hostname, (long)pid, argv[1]);
MPI_Recv(&msg, 1, MPI_INT, 0, 1, parent, MPI_STATUS_IGNORE);
printf("Child %d received msg: %d\n", rank, msg);
MPI_Comm_disconnect(&parent);
printf("Child %d disconnected\n", rank);
}
MPI_Finalize();
return 0;
}

73
test/simple/spawn_tree.c Обычный файл
Просмотреть файл

@ -0,0 +1,73 @@
#include "orte_config.h"
#include "opal/runtime/opal.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <mpi.h>
int main(int argc, char ** argv){
int i;
int rank, size, child_rank;
const char *nomehost;
MPI_Comm parent, intercomm1, intercomm2;
int erro;
int level, curr_level;
if (argc < 2) {
fprintf(stderr, "Usage: spawn_tree <#levels>\n");
exit(1);
}
level = atoi(argv[1]);
MPI_Init(&argc, &argv);
MPI_Comm_get_parent(&parent);
if(parent == MPI_COMM_NULL){
rank=0;
}
else{
MPI_Recv(&rank, 1, MPI_INT, 0, 0, parent, MPI_STATUS_IGNORE);
}
curr_level = (int) log2(rank+1);
printf(" --> rank: %d and curr_level: %d\n", rank, curr_level);
// Node propagation
if(curr_level < level){
// 2^(curr_level+1) - 1 + 2*(rank - 2^curr_level - 1) = 2*rank + 1
child_rank = 2*rank + 1;
printf("(%d) Before create rank %d\n", rank, child_rank);
MPI_Comm_spawn(argv[0], &argv[1], 1, MPI_INFO_NULL, 0,
MPI_COMM_SELF, &intercomm1, &erro);
printf("(%d) After create rank %d\n", rank, child_rank);
MPI_Send(&child_rank, 1, MPI_INT, 0, 0, intercomm1);
//sleep(1);
child_rank = child_rank + 1;
printf("(%d) Before create rank %d\n", rank, child_rank);
MPI_Comm_spawn(argv[0], &argv[1], 1, MPI_INFO_NULL, 0,
MPI_COMM_SELF, &intercomm2, &erro);
printf("(%d) After create rank %d\n", rank, child_rank);
MPI_Send(&child_rank, 1, MPI_INT, 0, 0, intercomm2);
}
nomehost = opal_gethostname();
printf("(%d) in %s\n", rank, nomehost);
MPI_Finalize();
return(0);
}

11
test/simple/thread_init.c Обычный файл
Просмотреть файл

@ -0,0 +1,11 @@
#include <mpi.h>
#include <stdio.h>
int main(int argc, const char* argv[]) {
int provided = -1;
printf("Calling MPI_Init_thread...\n");
MPI_Init_thread(NULL, NULL, MPI_THREAD_MULTIPLE, &provided);
printf("MPI_Init_thread returned, provided = %d\n", provided);
MPI_Finalize();
return 0;
}

217
test/simple/xlib.c Обычный файл
Просмотреть файл

@ -0,0 +1,217 @@
#include <stdlib.h>
#include <stdio.h>
#include <mpi.h>
#include <pmix.h>
#define SIZE 20
#define POS 10
#define INITIAL_VALUE 10
static pmix_proc_t myproc;
/* this is the event notification function we pass down below
* when registering for general events - i.e.,, the default
* handler. We don't technically need to register one, but it
* is usually good practice to catch any events that occur */
static void notification_fn(size_t evhdlr_registration_id,
pmix_status_t status,
const pmix_proc_t *source,
pmix_info_t info[], size_t ninfo,
pmix_info_t results[], size_t nresults,
pmix_event_notification_cbfunc_fn_t cbfunc,
void *cbdata)
{
/* this example doesn't do anything with default events */
fprintf(stderr, "Default event handler called with status %s\n", PMIx_Error_string(status));
if (NULL != cbfunc) {
cbfunc(PMIX_EVENT_ACTION_COMPLETE, NULL, 0, NULL, NULL, cbdata);
}
}
/* this is an event notification function that we explicitly request
* be called when the PMIX_MODEL_DECLARED notification is issued.
* We could catch it in the general event notification function and test
* the status to see if the status matched, but it often is simpler
* to declare a use-specific notification callback point. In this case,
* we are asking to know whenever a programming model library is
* instantiated */
static void model_callback(size_t evhdlr_registration_id,
pmix_status_t status,
const pmix_proc_t *source,
pmix_info_t info[], size_t ninfo,
pmix_info_t results[], size_t nresults,
pmix_event_notification_cbfunc_fn_t cbfunc,
void *cbdata)
{
size_t n;
fprintf(stderr, "Model event handler called with status %d(%s)\n", status, PMIx_Error_string(status));
/* check to see what model declared itself */
for (n=0; n < ninfo; n++) {
if (PMIX_STRING == info[n].value.type) {
fprintf(stderr, "\t%s:\t%s\n", info[n].key, info[n].value.data.string);
}
}
/* we must NOT tell the event handler state machine that we
* are the last step as that will prevent it from notifying
* anyone else that might be listening for declarations */
if (NULL != cbfunc) {
cbfunc(PMIX_SUCCESS, NULL, 0, NULL, NULL, cbdata);
}
}
/* event handler registration is done asynchronously because it
* may involve the PMIx server registering with the host RM for
* external events. So we provide a callback function that returns
* the status of the request (success or an error), plus a numerical index
* to the registered event. The index is used later on to deregister
* an event handler - if we don't explicitly deregister it, then the
* PMIx server will do so when it see us exit */
static void model_registration_callback(pmix_status_t status,
size_t evhandler_ref,
void *cbdata)
{
volatile int *active = (volatile int*)cbdata;
if (PMIX_SUCCESS != status) {
fprintf(stderr, "Client %s:%d EVENT HANDLER REGISTRATION FAILED WITH STATUS %d, ref=%lu\n",
myproc.nspace, myproc.rank, status, (unsigned long)evhandler_ref);
}
*active = status;
}
int main(int argc, char *argv[])
{
int i, rank, size, next, prev, tag = 201;
int array_size = SIZE;
int pos = POS;
int *send_array;
int *recv_array;
pmix_info_t *info;
size_t ninfo;
pmix_status_t code = PMIX_MODEL_DECLARED;
pmix_status_t rc;
volatile int active;
if (1 < argc) {
fprintf(stderr, "Declaring ourselves\n");
/* declare ourselves as a non-MPI library prior to MPI_Init */
ninfo = 4;
PMIX_INFO_CREATE(info, ninfo);
PMIX_INFO_LOAD(&info[0], PMIX_PROGRAMMING_MODEL, "EXAMPLE", PMIX_STRING);
PMIX_INFO_LOAD(&info[1], PMIX_MODEL_LIBRARY_NAME, "FOOL", PMIX_STRING);
PMIX_INFO_LOAD(&info[2], PMIX_MODEL_LIBRARY_VERSION, "1.2.3", PMIX_STRING);
PMIX_INFO_LOAD(&info[3], PMIX_THREADING_MODEL, "NONE", PMIX_STRING);
if (PMIX_SUCCESS != (rc = PMIx_Init(&myproc, info, ninfo))) {
fprintf(stderr, "PMIx Init failed: %s\n", PMIx_Error_string(rc));
exit(1);
}
PMIX_INFO_FREE(info, ninfo);
/* register a handler specifically for when models declare */
active = -1;
ninfo = 1;
PMIX_INFO_CREATE(info, ninfo);
PMIX_INFO_LOAD(&info[0], PMIX_EVENT_HDLR_NAME, "APP-MODEL", PMIX_STRING);
PMIx_Register_event_handler(&code, 1, info, ninfo,
model_callback, model_registration_callback, (void*)&active);
while (-1 == active) {
usleep(10);
}
PMIX_INFO_FREE(info, ninfo);
if (0 != active) {
exit(active);
}
}
/* initialize the MPI library - it will declare itself */
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (argc <= 1) {
fprintf(stderr, "Registering handler\n");
/* register a handler specifically for when models declare */
active = -1;
ninfo = 1;
PMIX_INFO_CREATE(info, ninfo);
PMIX_INFO_LOAD(&info[0], PMIX_EVENT_HDLR_NAME, "APP-MODEL", PMIX_STRING);
PMIx_Register_event_handler(&code, 1, info, ninfo,
model_callback, model_registration_callback, (void*)&active);
while (-1 == active) {
usleep(10);
}
PMIX_INFO_FREE(info, ninfo);
if (0 != active) {
exit(active);
}
}
fprintf(stderr, "Rank %d has cleared MPI_Init\n", rank);
next = (rank + 1) % size;
prev = (rank + size - 1) % size;
send_array = malloc(sizeof(int) * SIZE);
recv_array = malloc(sizeof(int) * SIZE);
for (i = 0; i < array_size; ++i) {
send_array[i] = 17;
recv_array[i] = -1;
}
if (0 == rank) {
send_array[pos] = INITIAL_VALUE;
MPI_Send(send_array, array_size, MPI_INT, next, tag,
MPI_COMM_WORLD);
}
/* if we didn't already do it, declare another model now */
if (argc <= 1) {
fprintf(stderr, "Declaring ourselves\n");
/* declare ourselves as a non-MPI library after MPI_Init */
ninfo = 4;
PMIX_INFO_CREATE(info, ninfo);
PMIX_INFO_LOAD(&info[0], PMIX_PROGRAMMING_MODEL, "EXAMPLE", PMIX_STRING);
PMIX_INFO_LOAD(&info[1], PMIX_MODEL_LIBRARY_NAME, "FOOL", PMIX_STRING);
PMIX_INFO_LOAD(&info[2], PMIX_MODEL_LIBRARY_VERSION, "1.2.3", PMIX_STRING);
PMIX_INFO_LOAD(&info[3], PMIX_THREADING_MODEL, "NONE", PMIX_STRING);
if (PMIX_SUCCESS != (rc = PMIx_Init(&myproc, info, ninfo))) {
fprintf(stderr, "PMIx Init failed: %s\n", PMIx_Error_string(rc));
exit(1);
}
PMIX_INFO_FREE(info, ninfo);
}
while (1) {
recv_array[pos] = -1;
MPI_Recv(recv_array, array_size, MPI_INT, prev, tag,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
send_array[pos] = recv_array[pos];
if (rank == 0) {
--send_array[pos];
}
MPI_Send(send_array, array_size, MPI_INT, next, tag, MPI_COMM_WORLD);
if (0 == send_array[pos]) {
break;
}
}
if (rank == 0) {
MPI_Recv(recv_array, array_size, MPI_INT, prev, tag,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
fprintf(stderr, "Rank %d has completed ring\n", rank);
MPI_Barrier(MPI_COMM_WORLD);
fprintf(stderr, "Rank %d has completed MPI_Barrier\n", rank);
/* decrement the PMIx refcount */
PMIx_Finalize(NULL, 0);
MPI_Finalize();
return 0;
}

198
test/simple/ziaprobe.c Обычный файл
Просмотреть файл

@ -0,0 +1,198 @@
/* -*- C -*-
*
* Copyright (c) 2008 Los Alamos National Security, LLC. All rights reserved.
*
* Copyright (c) 2015 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
*/
#include <stdio.h>
#include <stdbool.h>
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <mpi.h>
int main(int argc, char* argv[])
{
int msg;
int rank, size, my_twin;
int ppn, my_node;
struct timeval tv;
unsigned long my_timestamp[2];
long *timestamps;
int i, maxrank;
unsigned long maxsec, maxusec, minutes, seconds;
unsigned long start_sec, start_usec;
float fsecs;
int nnodes;
bool odd_nnodes;
bool recvit;
char *ppnstr;
if (argc < 3) {
fprintf(stderr, "start times must be provided\n");
return 1;
}
ppnstr = getenv("OMPI_COMM_WORLD_LOCAL_SIZE");
ppn = strtol(ppnstr, NULL, 10);
start_sec = strtol(argv[1], NULL, 10);
start_usec = strtol(argv[2], NULL, 10);
MPI_Init(NULL, NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
/* this program requires that the size be an integer multiple of ppn */
if (0 != (size % ppn)) {
if (0 == rank) {
fprintf(stderr, "The number of procs must be an integer multiple of the ppn\n"
"Given: num_procs %d ppn %d\n", size, ppn);
MPI_Abort(MPI_COMM_WORLD, 1);
} else {
goto cleanup;
}
}
/* see how many nodes we have */
nnodes = size / ppn;
odd_nnodes = false;
if (0 != (nnodes % 2)) {
/* we have an odd # of nodes */
odd_nnodes = true;
}
/* compute the rank of the rank with which I am to exchange a message.
* Per requirements, this proc must be on another node. To accomplish
* this with max efficiency, we take advantage of knowing that the ppn
* on every node will be the same. We therefore pair up the nodes, and
* pair up the procs on each node, so that only one connection is setup
* for each proc. We also want to ensure that the node pairs are
* "neighboring" - i.e., that they hopefully share a switch so that the
* hop count of sending the messages is minimized.
*/
/* first, determine if my node is odd or even */
my_node = rank / ppn;
if (0 != (my_node % 2)) {
/* compute my twin's rank - as I am an odd numbered node, my
* twin will be on the node below me. Thus, its rank will be
* my rank - ppn
*/
my_twin = rank - ppn;
/* if I am an odd numbered node, then I will receive first */
MPI_Recv(&msg, 1, MPI_INT, my_twin, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
/* receive the return message so that we meet the stated requirement
* that -every- proc send a message
*/
MPI_Send(&msg, 1, MPI_INT, my_twin, 1, MPI_COMM_WORLD);
} else {
/* compute my twin's rank - as I am an even numbered node, my
* twin will be on the node above me. Thus, its rank will be
* my rank + ppn
*/
my_twin = rank + ppn;
/* if we have an odd number of nodes, then the last node will be
* even and will have no one above them. In this case, we wrap around
* and ask that node=0 take the additional connections
*/
recvit = true;
if (my_twin >= size) {
my_twin = my_twin - size;
recvit = false;
}
/* I am an even numbered node, so I send first */
MPI_Send(&msg, 1, MPI_INT, my_twin, 1, MPI_COMM_WORLD);
/* now receive the reply so my twin also meets the requirement - but only
* if we don't have an odd number of nodes. If we have an odd number of
* nodes, then the node=0 procs will already have met their requirement
*/
if (recvit) {
MPI_Recv(&msg, 1, MPI_INT, my_twin, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
/* if we have an odd number of nodes and I am on node=0, then I have
* to take the extra recv
*/
if (odd_nnodes && 0 == my_node) {
my_twin = size - ppn + rank;
MPI_Recv(&msg, 1, MPI_INT, my_twin, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
/* get a completion time stamp */
gettimeofday(&tv, NULL);
my_timestamp[0] = tv.tv_sec;
my_timestamp[1] = tv.tv_usec;
/* THIS COMPLETES THE OFFICIAL TIMING POINT */
/* Gather to get all the timestamps to rank 0 */
timestamps = NULL;
if (0 == rank) {
timestamps = malloc(2 * size * sizeof(unsigned long));
if (NULL == timestamps) {
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
MPI_Gather(&my_timestamp, 2, MPI_LONG,
timestamps, 2, MPI_LONG, 0, MPI_COMM_WORLD);
if (0 == rank) {
/* The "timestamps" array will now have everyone's timestamp
(i.e., rank 0's timestamp will be in pos 0 & 1,, rank 1's timestamp
will be in 2 & 3, ...etc. */
/* find the maximum timestamp */
maxsec = start_sec;
maxusec = start_usec;
maxrank = -1;
for (i=0; i < 2*size; i+=2) {
if (timestamps[i] < maxsec) {
continue;
}
if (timestamps[i] == maxsec &&
timestamps[i+1] < maxusec) {
continue;
}
maxsec = timestamps[i];
maxusec = timestamps[i+1];
maxrank = i/2;
}
free(timestamps);
/* subtract starting time to get time in microsecs for test */
maxsec = maxsec - start_sec;
if (maxusec >= start_usec) {
maxusec = maxusec - start_usec;
} else {
maxsec--;
maxusec = 1000000 - start_usec + maxusec;
}
/* pretty-print the result */
seconds = maxsec + (maxusec / 1000000l);
minutes = seconds / 60l;
seconds = seconds % 60l;
if (0 == minutes && 0 == seconds) {
fsecs = ((float)(maxsec)*1000000.0 + (float)maxusec) / 1000.0;
fprintf(stderr, "Time test was completed in %8.2f millisecs\nSlowest rank: %d\n",
fsecs, maxrank);
} else {
fprintf(stderr, "Time test was completed in %3lu:%02lu min:sec\nSlowest rank: %d\n",
minutes, seconds, maxrank);
}
}
cleanup:
/* this completes the test */
MPI_Finalize();
return 0;
}

21
test/simple/ziatest.README Обычный файл
Просмотреть файл

@ -0,0 +1,21 @@
To run the Zia launch/wireup timing test:
1. make ziatest ziaprobe
2. ./ziatest x
where x=ppn to be tested. The output of the program will be the time required to complete the test, plus the rank of the slowest process.
The ziatest consists of the following steps:
1. launches the specified ppn on each node in the allocation. If you want to restrict the nodes, then create a hostfile and add OMPI_MCA_hostfile=your-hostfile-name to your environment
2. each process executes MPI_Init
3. each process computes the rank of its "partner" on another node. The partner consists of the process whose local rank on the next nearest node is the same as the current process. In other words, the test identifies pairs of nodes, and then the processes with the same local rank on each pair of nodes exchange a zero-byte message.
4. each process reports back a timestamp indicating when the send/recv exchange with its partner completed
5. the ziatest program searches for the latest timestamp, subtracts that from its starting timestamp, and outputs the results

53
test/simple/ziatest.c Обычный файл
Просмотреть файл

@ -0,0 +1,53 @@
/* -*- C -*-
*
* Copyright (c) 2008 Los Alamos National Security, LLC. All rights reserved.
*
* Copyright (c) 2015 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2018 Intel, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
*/
#include <stdio.h>
#include <stdbool.h>
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <mpi.h>
int main(int argc, char* argv[])
{
int nppn;
struct timeval tv;
char *cmd;
/* check for proper usage */
if (2 < argc) {
printf("usage: ziatest <#procs/node>\n");
exit(1);
}
nppn = strtol(argv[1], NULL, 10);
/* THIS BEGINS THE OFFICIAL TIMING POINT */
/* get a starting time stamp */
gettimeofday(&tv, NULL);
/* form the command */
opal_asprintf(&cmd, "mpirun -npernode %d ./ziaprobe %ld %d",
nppn, (long) tv.tv_sec, tv.tv_usec);
/* execute it */
system(cmd);
/* done */
free(cmd);
return 0;
}