diff --git a/Makefile.am b/Makefile.am index 61a2a14706..0423f3067a 100644 --- a/Makefile.am +++ b/Makefile.am @@ -10,6 +10,7 @@ # Copyright (c) 2004-2005 The Regents of the University of California. # All rights reserved. # Copyright (c) 2006-2010 Cisco Systems, Inc. All rights reserved. +# Copyright (c) 2012 Los Alamos National Security, Inc. All rights reserved. # $COPYRIGHT$ # # Additional copyrights may follow @@ -18,7 +19,8 @@ # SUBDIRS = config contrib $(MCA_PROJECT_SUBDIRS) test -EXTRA_DIST = README INSTALL VERSION Doxyfile LICENSE autogen.pl autogen.sh CMakeLists.txt README.WINDOWS.txt +EXTRA_DIST = README INSTALL VERSION Doxyfile LICENSE autogen.pl autogen.sh \ + CMakeLists.txt README.WINDOWS.txt README.JAVA.txt include examples/Makefile.include diff --git a/README.JAVA.txt b/README.JAVA.txt new file mode 100644 index 0000000000..4e2eb4c7db --- /dev/null +++ b/README.JAVA.txt @@ -0,0 +1,99 @@ +Feb 10, 2012 +--------------- +*************************************************************************** +IMPORTANT NOTE + +JAVA BINDINGS ARE PROVIDED ON A "PROVISIONAL" BASIS - I.E., THEY ARE NOT +PART OF THE CURRENT OR PROPOSED MPI-3 STANDARDS. THUS, INCLUSION OF +JAVA SUPPORT IS NOT REQUIRED BY THE STANDARD. CONTINUED INCLUSION +OF THE JAVA BINDINGS IS CONTINGENT UPON ACTIVE USER INTEREST AND +CONTINUED DEVELOPER SUPPORT +*************************************************************************** + +This version of Open MPI provides support for Java-based +MPI applications. At the time of this writing, not all MPI functions +are supported. However, work on extending the Java bindings to +provide full MPI coverage is underway. + +The rest of this document provides step-by-step instructions on +building OMPI with Java bindings, and compiling and running +Java-based MPI applications + +============================================================================ + +Building Java Bindings + +If this software was obtained as a developer-level +checkout as opposed to a tarball, you will need to start your build by +running ./autogen.pl. This will also require that you have a fairly +recent version of autotools on your system - see the HACKING file for +details. + +Java support requires that Open MPI be built at least with shared libraries +(i.e., --enable-shared) - any additional options are fine and will not +conflict. Note that this is the default for Open MPI, so you don't +have to explicitly add the option. The Java bindings will build only +if --enable-mpi-java is specified, and a JDK is found in a typical +system default location. + +If the JDK is not in a place where we automatically find it, you can +specify the location. For example, this is required on the Mac +platform as the JDK headers are located in a non-typical location. Two +options are available for this purpose: + +--with-jdk-bindir= - the location of javac and javah +--with-jdk-headers= - the directory containing jni.h + +For simplicity, typical configurations are provided in platform files +under contrib/platform/hadoop. These will meet the needs of most +users, or at least provide a starting point for your own custom +configuration. + +In summary, therefore, you can configure the system using the +following Java-related options: + +./configure --with-platform=contrib/platform/hadoop/ +... + +or + +./configure --enable-mpi-java --with-jdk-bindir= +--with-jdk-headers=bar ... + +or simply + +./configure --enable-mpi-java ... + +if jdk is in a "standard" place that we automatically find. + +---------------------------------------------------------------------------- + +Running Java Applications + +For convenience, the "mpijavac" wrapper compiler has been provided for +compiling Java-based MPI applications. It ensures that all required MPI +libraries and class paths are defined. You can see the actual command +line using the --showme option, if you are interested. + +Once your application has been compiled, you can run it with the +standard "mpirun" command line: + +mpirun java + +For convenience, mpirun has been updated to detect the "java" command +and ensure that the required MPI libraries and class paths are defined +to support execution. You therefore do NOT need to specify the Java +library path to the MPI installation, nor the MPI classpath. Any class +path definitions required for your application should be specified +either on the command line or via the CLASSPATH environmental +variable. Note that the local directory will be added to the class +path if nothing is specified. + +As always, the "java" executable, all required libraries, and your application classes +must be available on all nodes. + +---------------------------------------------------------------------------- + +If you have any problems, or find any bugs, please feel free to report +them to Open MPI user's mailing list (see +http://www.open-mpi.org/community/lists/ompi.php). diff --git a/VERSION b/VERSION index 87a9f53905..38f5427b37 100644 --- a/VERSION +++ b/VERSION @@ -94,6 +94,7 @@ libmpi_f77_so_version=0:0:0 libmpi_f90_so_version=0:0:0 libopen_rte_so_version=0:0:0 libopen_pal_so_version=0:0:0 +libmpi_java_so_version=0:0:0 # "Common" components install standalone libraries that are run-time # linked by one or more components. So they need to be versioned as diff --git a/configure.ac b/configure.ac index 9302999576..55f878e786 100644 --- a/configure.ac +++ b/configure.ac @@ -123,6 +123,7 @@ m4_ifdef([project_ompi], AC_SUBST(libmpi_cxx_so_version) AC_SUBST(libmpi_f77_so_version) AC_SUBST(libmpi_f90_so_version) + AC_SUBST(libmpi_java_so_version) # It's icky that we have to hard-code the names of the # common components here. :-( This could probably be done # transparently by adding some intelligence in autogen.sh @@ -551,6 +552,15 @@ OPAL_CHECK_ATTRIBUTES OPAL_CHECK_COMPILER_VERSION_ID +################################## +# Java compiler characteristics +################################## + +# We don't need Java unless we're building Open MPI; ORTE and OPAL do +# not use Java at all +m4_ifdef([project_ompi], [OMPI_SETUP_JAVA]) + + ################################## # Assembler Configuration ################################## @@ -594,7 +604,8 @@ AC_CHECK_HEADERS([alloca.h aio.h arpa/inet.h dirent.h \ sys/types.h sys/uio.h net/uio.h sys/utsname.h sys/vfs.h sys/wait.h syslog.h \ time.h termios.h ulimit.h unistd.h util.h utmp.h malloc.h \ ifaddrs.h sys/sysctl.h crt_externs.h regex.h signal.h \ - ioLib.h sockLib.h hostLib.h shlwapi.h sys/synch.h limits.h db.h ndbm.h]) + ioLib.h sockLib.h hostLib.h shlwapi.h sys/synch.h limits.h db.h ndbm.h \ + TargetConditionals.h]) # Needed to work around Darwin requiring sys/socket.h for # net/if.h diff --git a/contrib/Makefile.am b/contrib/Makefile.am index 9fb8c79869..49e3e780a7 100644 --- a/contrib/Makefile.am +++ b/contrib/Makefile.am @@ -128,15 +128,13 @@ EXTRA_DIST = \ platform/cisco/macosx-dynamic.conf \ platform/cisco/linux \ platform/cisco/linux.conf \ - platform/cisco/ebuild/hlfr \ - platform/cisco/ebuild/hlfr.conf \ - platform/cisco/ebuild/ludd \ - platform/cisco/ebuild/ludd.conf \ - platform/cisco/ebuild/native \ - platform/cisco/ebuild/native.conf \ platform/ibm/debug-ppc32-gcc \ platform/ibm/debug-ppc64-gcc \ platform/ibm/optimized-ppc32-gcc \ - platform/ibm/optimized-ppc64-gcc + platform/ibm/optimized-ppc64-gcc \ + platform/hadoop/linux \ + platform/hadoop/linux.conf \ + platform/hadoop/mac \ + platform/hadoop/mac.conf dist_pkgdata_DATA = openmpi-valgrind.supp diff --git a/contrib/platform/hadoop/cisco b/contrib/platform/hadoop/cisco new file mode 100644 index 0000000000..fc5acfee70 --- /dev/null +++ b/contrib/platform/hadoop/cisco @@ -0,0 +1,34 @@ +enable_opal_multi_threads=no +enable_ft_thread=no +enable_mem_debug=no +enable_mem_profile=no +enable_debug_symbols=yes +enable_binaries=yes +enable_heterogeneous=no +enable_picky=yes +enable_debug=yes +enable_shared=yes +enable_static=no +enable_memchecker=no +enable_ipv6=yes +enable_mpi_f77=no +enable_mpi_f90=no +enable_mpi_cxx=no +enable_mpi_cxx_seek=no +enable_cxx_exceptions=no +enable_mpi_java=yes +enable_per_user_config_files=no +enable_script_wrapper_compilers=no +enable_orterun_prefix_by_default=yes +enable_io_romio=no +enable_vt=no +enable_mca_no_build=carto,crs,memchecker,snapc,crcp,paffinity,filem,sstore,compress,rml-ftrm +with_memory_manager=no +with_tm=no +with_devel_headers=yes +with_portals=no +with_valgrind=no +with_slurm=/opt/slurm/2.1.0 +with_openib=no +with_jdk_bindir=/usr/lib/jvm/java-1.6.0/bin +with_jdk_headers=/usr/lib/jvm/java-1.6.0/include diff --git a/contrib/platform/hadoop/cisco.conf b/contrib/platform/hadoop/cisco.conf new file mode 100644 index 0000000000..3af96eeadd --- /dev/null +++ b/contrib/platform/hadoop/cisco.conf @@ -0,0 +1,59 @@ +# +# Copyright (c) 2009 Cisco Systems, Inc. All rights reserved. +# $COPYRIGHT$ +# +# Additional copyrights may follow +# +# $HEADER$ +# + +# This is the default system-wide MCA parameters defaults file. +# Specifically, the MCA parameter "mca_param_files" defaults to a +# value of +# "$HOME/.openmpi/mca-params.conf:$sysconf/openmpi-mca-params.conf" +# (this file is the latter of the two). So if the default value of +# mca_param_files is not changed, this file is used to set system-wide +# MCA parameters. This file can therefore be used to set system-wide +# default MCA parameters for all users. Of course, users can override +# these values if they want, but this file is an excellent location +# for setting system-specific MCA parameters for those users who don't +# know / care enough to investigate the proper values for them. + +# Note that this file is only applicable where it is visible (in a +# filesystem sense). Specifically, MPI processes each read this file +# during their startup to determine what default values for MCA +# parameters should be used. mpirun does not bundle up the values in +# this file from the node where it was run and send them to all nodes; +# the default value decisions are effectively distributed. Hence, +# these values are only applicable on nodes that "see" this file. If +# $sysconf is a directory on a local disk, it is likely that changes +# to this file will need to be propagated to other nodes. If $sysconf +# is a directory that is shared via a networked filesystem, changes to +# this file will be visible to all nodes that share this $sysconf. + +# The format is straightforward: one per line, mca_param_name = +# rvalue. Quoting is ignored (so if you use quotes or escape +# characters, they'll be included as part of the value). For example: + +# Disable run-time MPI parameter checking +# mpi_param_check = 0 + +# Note that the value "~/" will be expanded to the current user's home +# directory. For example: + +# Change component loading path +# component_path = /usr/local/lib/openmpi:~/my_openmpi_components + +# See "ompi_info --param all all" for a full listing of Open MPI MCA +# parameters available and their default values. +# + +# Basic behavior to smooth startup +orte_abort_timeout = 10 +opal_set_max_sys_limits = 1 + +## Add the interface for out-of-band communication +## and set it up +#oob_tcp_listen_mode = listen_thread +oob_tcp_sndbuf = 32768 +oob_tcp_rcvbuf = 32768 diff --git a/contrib/platform/hadoop/linux b/contrib/platform/hadoop/linux new file mode 100644 index 0000000000..14e3d27dd9 --- /dev/null +++ b/contrib/platform/hadoop/linux @@ -0,0 +1,27 @@ +enable_opal_multi_threads=no +enable_ft_thread=no +enable_mem_debug=no +enable_mem_profile=no +enable_debug_symbols=yes +enable_binaries=yes +enable_heterogeneous=no +enable_picky=yes +enable_debug=yes +enable_shared=yes +enable_static=no +enable_memchecker=no +enable_ipv6=no +enable_mpi_f77=no +enable_mpi_f90=no +enable_mpi_cxx=no +enable_mpi_cxx_seek=no +enable_cxx_exceptions=no +enable_mpi_java=yes +enable_io_romio=no +enable_vt=no +enable_mca_no_build=carto,crs,memchecker,snapc,crcp,paffinity,filem,sstore,compress,rml-ftrm +with_memory_manager=no +with_tm=no +with_devel_headers=yes +with_portals=no +with_valgrind=no diff --git a/contrib/platform/hadoop/linux.conf b/contrib/platform/hadoop/linux.conf new file mode 100644 index 0000000000..3af96eeadd --- /dev/null +++ b/contrib/platform/hadoop/linux.conf @@ -0,0 +1,59 @@ +# +# Copyright (c) 2009 Cisco Systems, Inc. All rights reserved. +# $COPYRIGHT$ +# +# Additional copyrights may follow +# +# $HEADER$ +# + +# This is the default system-wide MCA parameters defaults file. +# Specifically, the MCA parameter "mca_param_files" defaults to a +# value of +# "$HOME/.openmpi/mca-params.conf:$sysconf/openmpi-mca-params.conf" +# (this file is the latter of the two). So if the default value of +# mca_param_files is not changed, this file is used to set system-wide +# MCA parameters. This file can therefore be used to set system-wide +# default MCA parameters for all users. Of course, users can override +# these values if they want, but this file is an excellent location +# for setting system-specific MCA parameters for those users who don't +# know / care enough to investigate the proper values for them. + +# Note that this file is only applicable where it is visible (in a +# filesystem sense). Specifically, MPI processes each read this file +# during their startup to determine what default values for MCA +# parameters should be used. mpirun does not bundle up the values in +# this file from the node where it was run and send them to all nodes; +# the default value decisions are effectively distributed. Hence, +# these values are only applicable on nodes that "see" this file. If +# $sysconf is a directory on a local disk, it is likely that changes +# to this file will need to be propagated to other nodes. If $sysconf +# is a directory that is shared via a networked filesystem, changes to +# this file will be visible to all nodes that share this $sysconf. + +# The format is straightforward: one per line, mca_param_name = +# rvalue. Quoting is ignored (so if you use quotes or escape +# characters, they'll be included as part of the value). For example: + +# Disable run-time MPI parameter checking +# mpi_param_check = 0 + +# Note that the value "~/" will be expanded to the current user's home +# directory. For example: + +# Change component loading path +# component_path = /usr/local/lib/openmpi:~/my_openmpi_components + +# See "ompi_info --param all all" for a full listing of Open MPI MCA +# parameters available and their default values. +# + +# Basic behavior to smooth startup +orte_abort_timeout = 10 +opal_set_max_sys_limits = 1 + +## Add the interface for out-of-band communication +## and set it up +#oob_tcp_listen_mode = listen_thread +oob_tcp_sndbuf = 32768 +oob_tcp_rcvbuf = 32768 diff --git a/contrib/platform/hadoop/mac b/contrib/platform/hadoop/mac new file mode 100644 index 0000000000..b54dbfe08d --- /dev/null +++ b/contrib/platform/hadoop/mac @@ -0,0 +1,25 @@ +enable_mem_debug=yes +enable_mem_profile=no +enable_debug_symbols=yes +enable_binaries=yes +enable_heterogeneous=no +enable_picky=yes +enable_debug=yes +enable_shared=yes +enable_static=no +enable_io_romio=no +enable_ipv6=no +enable_mpi_f77=no +enable_mpi_f90=no +enable_mpi_cxx=no +enable_mpi_cxx_seek=no +enable_mpi_java=yes +enable_memchecker=no +enable_vt=no +enable_mca_no_build=carto,crs,memchecker,snapc,crcp,paffinity,filem,sstore,compress,rml-ftrm,db,notifier +with_memory_manager=no +with_devel_headers=yes +with_xgrid=no +with_slurm=no +with_jdk_bindir=/usr/bin +with_jdk_headers=/System/Library/Frameworks/JavaVM.framework/Versions/Current/Headers diff --git a/contrib/platform/hadoop/mac.conf b/contrib/platform/hadoop/mac.conf new file mode 100644 index 0000000000..1630024e29 --- /dev/null +++ b/contrib/platform/hadoop/mac.conf @@ -0,0 +1,69 @@ +# +# Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana +# University Research and Technology +# Corporation. All rights reserved. +# Copyright (c) 2004-2005 The University of Tennessee and The University +# of Tennessee Research Foundation. All rights +# reserved. +# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, +# University of Stuttgart. All rights reserved. +# Copyright (c) 2004-2005 The Regents of the University of California. +# All rights reserved. +# Copyright (c) 2006-2011 Cisco Systems, Inc. All rights reserved. +# $COPYRIGHT$ +# +# Additional copyrights may follow +# +# $HEADER$ +# + +# This is the default system-wide MCA parameters defaults file. +# Specifically, the MCA parameter "mca_param_files" defaults to a +# value of +# "$HOME/.openmpi/mca-params.conf:$sysconf/openmpi-mca-params.conf" +# (this file is the latter of the two). So if the default value of +# mca_param_files is not changed, this file is used to set system-wide +# MCA parameters. This file can therefore be used to set system-wide +# default MCA parameters for all users. Of course, users can override +# these values if they want, but this file is an excellent location +# for setting system-specific MCA parameters for those users who don't +# know / care enough to investigate the proper values for them. + +# Note that this file is only applicable where it is visible (in a +# filesystem sense). Specifically, MPI processes each read this file +# during their startup to determine what default values for MCA +# parameters should be used. mpirun does not bundle up the values in +# this file from the node where it was run and send them to all nodes; +# the default value decisions are effectively distributed. Hence, +# these values are only applicable on nodes that "see" this file. If +# $sysconf is a directory on a local disk, it is likely that changes +# to this file will need to be propagated to other nodes. If $sysconf +# is a directory that is shared via a networked filesystem, changes to +# this file will be visible to all nodes that share this $sysconf. + +# The format is straightforward: one per line, mca_param_name = +# rvalue. Quoting is ignored (so if you use quotes or escape +# characters, they'll be included as part of the value). For example: + +# Disable run-time MPI parameter checking +# mpi_param_check = 0 + +# Note that the value "~/" will be expanded to the current user's home +# directory. For example: + +# Change component loading path +# component_path = /usr/local/lib/openmpi:~/my_openmpi_components + +# See "ompi_info --param all all" for a full listing of Open MPI MCA +# parameters available and their default values. +# + +# Basic behavior to smooth startup +mca_component_show_load_errors = 0 +orte_abort_timeout = 10 + +## Add the interface for out-of-band communication +## and set it up +oob_tcp_listen_mode = listen_thread +oob_tcp_sndbuf = 32768 +oob_tcp_rcvbuf = 32768 diff --git a/examples/Hello.java b/examples/Hello.java new file mode 100644 index 0000000000..25ac348e5b --- /dev/null +++ b/examples/Hello.java @@ -0,0 +1,39 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * Author of revised version: Franklyn Pinedo + * + * Adapted from Source Code in C of Tutorial/User's Guide for MPI by + * Peter Pacheco. + */ +/* + * Copyright (c) 2011 Cisco Systems, Inc. All rights reserved. + * + */ + +import mpi.*; + +class Hello { + static public void main(String[] args) throws MPIException { + + + MPI.Init(args); + + int myrank = MPI.COMM_WORLD.Rank(); + int size = MPI.COMM_WORLD.Size() ; + System.out.println("Hello world from rank " + myrank + " of " + size); + + MPI.Finalize(); + } +} diff --git a/examples/Makefile b/examples/Makefile index 321fd9a8a7..0fea3460ad 100644 --- a/examples/Makefile +++ b/examples/Makefile @@ -10,6 +10,7 @@ # Copyright (c) 2004-2005 The Regents of the University of California. # All rights reserved. # Copyright (c) 2006-2007 Sun Microsystems, Inc. All rights reserved. +# Copyright (c) 2012 Los Alamos National Security, Inc. All rights reserved. # $COPYRIGHT$ # # Additional copyrights may follow @@ -26,6 +27,7 @@ CXX = mpic++ CCC = mpic++ F77 = mpif77 FC = mpif90 +JAVAC = mpijavac # Using -g is not necessary, but it is helpful for example programs, # especially if users want to examine them with debuggers. Note that @@ -40,8 +42,8 @@ FCFLAGS = -g # Example programs to build -EXAMPLES = hello_c hello_cxx hello_f77 hello_f90 \ - ring_c ring_cxx ring_f77 ring_f90 connectivity_c +EXAMPLES = hello_c hello_cxx hello_f77 hello_f90 Hello.class \ + ring_c ring_cxx ring_f77 ring_f90 connectivity_c Ring.class # Default target. Always build the C example. Only build the others # if Open MPI was build with the relevant language bindings. @@ -56,6 +58,12 @@ all: hello_c ring_c connectivity_c @ if test "`ompi_info --parsable | grep bindings:f90:yes`" != ""; then \ $(MAKE) hello_f90 ring_f90; \ fi + @ if test "`ompi_info --parsable | grep bindings:java:yes`" != ""; then \ + $(MAKE) Hello.class; \ + fi + @ if test "`ompi_info --parsable | grep bindings:java:yes`" != ""; then \ + $(MAKE) Ring.class; \ + fi # The usual "clean" target @@ -63,7 +71,7 @@ all: hello_c ring_c connectivity_c clean: rm -f $(EXAMPLES) *~ *.o -# Don't rely on default rules for the fortran examples +# Don't rely on default rules for the fortran and Java examples hello_f77: hello_f77.f $(F77) $(F77FLAGS) $^ -o $@ @@ -75,3 +83,10 @@ hello_f90: hello_f90.f90 ring_f90: ring_f90.f90 $(FC) $(FCFLAGS) $^ -o $@ +Hello.class: Hello.java + $(JAVAC) Hello.java + +Ring.class: Ring.java + $(JAVAC) Ring.java + + diff --git a/examples/Makefile.include b/examples/Makefile.include index 724ccdf605..b7cb6cc613 100644 --- a/examples/Makefile.include +++ b/examples/Makefile.include @@ -12,6 +12,7 @@ # All rights reserved. # Copyright (c) 2006 Cisco Systems, Inc. All rights reserved. # Copyright (c) 2007 Sun Microsystems, Inc. All rights reserved. +# Copyright (c) 2012 Los Alamos National Security, Inc. All rights reserved. # $COPYRIGHT$ # # Additional copyrights may follow @@ -39,4 +40,6 @@ EXTRA_DIST += \ examples/ring_cxx.cc \ examples/ring_f77.f \ examples/ring_f90.f90 \ - examples/connectivity_c.c + examples/connectivity_c.c \ + examples/Hello.java \ + examples/Ring.java diff --git a/examples/README b/examples/README index db4e36c44c..ac65e608ab 100644 --- a/examples/README +++ b/examples/README @@ -17,19 +17,21 @@ not enough. Excellent MPI tutorials are available here: Get a free account and login; you can then browse to the list of available courses. Look for the ones with "MPI" in the title. -There are 2 MPI examples in this directory, each in four languages: +There are 2 MPI examples in this directory, each in five languages: - Hello world - C: hello_c.c - C++: hello_cxx.cc - F77: hello_f77.f - F90: hello_f90.f90 + C: hello_c.c + C++: hello_cxx.cc + F77: hello_f77.f + F90: hello_f90.f90 + Java: Hello.java - Send a trivial message around in a ring - C: ring_c.c - C++: ring_cxx.cc - F77: ring_f77.f - F90: ring_f90.f90 + C: ring_c.c + C++: ring_cxx.cc + F77: ring_f77.f + F90: ring_f90.f90 + Java: Ring.java - Test the connectivity between all processes C: connectivity_c.c diff --git a/examples/Ring.java b/examples/Ring.java new file mode 100644 index 0000000000..68f08b098f --- /dev/null +++ b/examples/Ring.java @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2011 Cisco Systems, Inc. All rights reserved. + * + * Simple ring test program + */ + +import mpi.* ; + +class Ring { + static public void main(String[] args) throws MPIException { + + + MPI.Init(args) ; + + int source; // Rank of sender + int dest; // Rank of receiver + int tag=50; // Tag for messages + int next; + int prev; + int message[] = new int [1]; + + int myrank = MPI.COMM_WORLD.Rank() ; + int size = MPI.COMM_WORLD.Size() ; + + /* Calculate the rank of the next process in the ring. Use the + modulus operator so that the last process "wraps around" to + rank zero. */ + + next = (myrank + 1) % size; + prev = (myrank + size - 1) % size; + + /* If we are the "master" process (i.e., MPI_COMM_WORLD rank 0), + put the number of times to go around the ring in the + message. */ + + if (0 == myrank) { + message[0] = 10; + + System.out.println("Process 0 sending " + message[0] + " to rank " + next + " (" + size + " processes in ring)"); + MPI.COMM_WORLD.Send(message, 0, 1, MPI.INT, next, tag); + } + + /* Pass the message around the ring. The exit mechanism works as + follows: the message (a positive integer) is passed around the + ring. Each time it passes rank 0, it is decremented. When + each processes receives a message containing a 0 value, it + passes the message on to the next process and then quits. By + passing the 0 message first, every process gets the 0 message + and can quit normally. */ + + while (true) { + MPI.COMM_WORLD.Recv(message, 0, 1, MPI.INT, prev, tag); + + if (0 == myrank) { + --message[0]; + System.out.println("Process 0 decremented value: " + message[0]); + } + + MPI.COMM_WORLD.Send(message, 0, 1, MPI.INT, next, tag); + if (0 == message[0]) { + System.out.println("Process " + myrank + " exiting"); + break; + } + } + + /* The last process does one extra send to process 0, which needs + to be received before the program can exit */ + + if (0 == myrank) { + MPI.COMM_WORLD.Recv(message, 0, 1, MPI.INT, prev, tag); + } + + MPI.Finalize(); + } +} diff --git a/ompi/Makefile.am b/ompi/Makefile.am index 25f398e3fd..5db9128ba1 100644 --- a/ompi/Makefile.am +++ b/ompi/Makefile.am @@ -80,6 +80,7 @@ SUBDIRS = \ mpi/cxx \ mpi/f77 \ mpi/f90 \ + mpi/java \ $(MCA_ompi_FRAMEWORK_COMPONENT_DSO_SUBDIRS) \ $(OMPI_CONTRIB_SUBDIRS) @@ -92,6 +93,7 @@ DIST_SUBDIRS = \ mpi/cxx \ mpi/f77 \ mpi/f90 \ + mpi/java \ $(OMPI_MPIEXT_ALL_SUBDIRS) \ $(MCA_ompi_FRAMEWORKS_SUBDIRS) \ $(MCA_ompi_FRAMEWORK_COMPONENT_ALL_SUBDIRS) \ diff --git a/ompi/config/config_files.m4 b/ompi/config/config_files.m4 index 38ad8b1621..802bef3ba1 100644 --- a/ompi/config/config_files.m4 +++ b/ompi/config/config_files.m4 @@ -40,6 +40,7 @@ AC_DEFUN([OMPI_CONFIG_FILES],[ ompi/tools/wrappers/ompi-cxx.pc ompi/tools/wrappers/ompi-f77.pc ompi/tools/wrappers/ompi-f90.pc + ompi/tools/wrappers/mpijavac.pl ompi/tools/ortetools/Makefile ompi/tools/ompi-server/Makefile ]) diff --git a/ompi/config/ompi_setup_java.m4 b/ompi/config/ompi_setup_java.m4 new file mode 100644 index 0000000000..1fd653bbe4 --- /dev/null +++ b/ompi/config/ompi_setup_java.m4 @@ -0,0 +1,276 @@ +dnl -*- shell-script -*- +dnl +dnl Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana +dnl University Research and Technology +dnl Corporation. All rights reserved. +dnl Copyright (c) 2004-2006 The University of Tennessee and The University +dnl of Tennessee Research Foundation. All rights +dnl reserved. +dnl Copyright (c) 2004-2008 High Performance Computing Center Stuttgart, +dnl University of Stuttgart. All rights reserved. +dnl Copyright (c) 2004-2006 The Regents of the University of California. +dnl All rights reserved. +dnl Copyright (c) 2006-2012 Los Alamos National Security, LLC. All rights +dnl reserved. +dnl Copyright (c) 2007-2009 Sun Microsystems, Inc. All rights reserved. +dnl Copyright (c) 2008-2012 Cisco Systems, Inc. All rights reserved. +dnl $COPYRIGHT$ +dnl +dnl Additional copyrights may follow +dnl +dnl $HEADER$ +dnl + +# This macro is necessary to get the title to be displayed first. :-) +AC_DEFUN([OMPI_SETUP_JAVA_BANNER],[ + ompi_show_subtitle "Java compiler" +]) + +# OMPI_SETUP_JAVA() +# ---------------- +# Do everything required to setup the Java compiler. Safe to AC_REQUIRE +# this macro. +AC_DEFUN([OMPI_SETUP_JAVA],[ + AC_REQUIRE([OMPI_SETUP_JAVA_BANNER]) + + AC_MSG_CHECKING([if want Java bindings]) + AC_ARG_ENABLE(mpi-java, + AC_HELP_STRING([--enable-mpi-java], + [enable Java MPI bindings (default: enabled)])) + + # Only build the Java bindings if requested + if test "$enable_mpi_java" = "yes"; then + AC_MSG_RESULT([yes]) + WANT_MPI_JAVA_SUPPORT=1 + AC_MSG_CHECKING([if shared libraries are enabled]) + AS_IF([test "$enable_shared" != "yes"], + [AC_MSG_RESULT([no]) + AS_IF([test "$enable_mpi_java" = "yes"], + [AC_MSG_WARN([Java bindings cannot be built without shared libraries]) + AC_MSG_ERROR([Cannot continue])], + [AC_MSG_WARN([Java bindings will not build as they require --enable-shared]) + WANT_MPI_JAVA_SUPPORT=0])], + [AC_MSG_RESULT([yes])]) + else + AC_MSG_RESULT([no]) + WANT_MPI_JAVA_SUPPORT=0 + fi + AC_DEFINE_UNQUOTED([OMPI_WANT_JAVA_BINDINGS], [$WANT_MPI_JAVA_SUPPORT], + [do we want java mpi bindings]) + AM_CONDITIONAL(OMPI_WANT_JAVA_BINDINGS, test "$WANT_MPI_JAVA_SUPPORT" = "1") + + AC_ARG_WITH(jdk-dir, + AC_HELP_STRING([--with-jdk-dir(=DIR)], + [Location of the JDK header directory. If you use this option, do not specify --with-jdk-bindir or --with-jdk-headers.])) + AC_ARG_WITH(jdk-bindir, + AC_HELP_STRING([--with-jdk-bindir(=DIR)], + [Location of the JDK bin directory. If you use this option, you must also use --with-jdk-headers (and you must NOT use --with-jdk-dir)])) + AC_ARG_WITH(jdk-headers, + AC_HELP_STRING([--with-jdk-headers(=DIR)], + [Location of the JDK header directory. If you use this option, you must also use --with-jdk-bindir (and you must NOT use --with-jdk-dir)])) + + # Check for bozo case: ensue a directory was specified + AS_IF([test "$with_jdk_dir" = "yes" -o "$with_jdk_dir" = "no"], + [AC_MSG_WARN([Must specify a directory name for --with-jdk-dir]) + AC_MSG_ERROR([Cannot continue])]) + AS_IF([test "$with_jdk_bindir" = "yes" -o "$with_jdk_bindir" = "no"], + [AC_MSG_WARN([Must specify a directory name for --with-jdk-bindir]) + AC_MSG_ERROR([Cannot continue])]) + AS_IF([test "$with_jdk_headers" = "yes" -o "$with_jdk_headers" = "no"], + [AC_MSG_WARN([Must specify a directory name for --with-jdk-headers]) + AC_MSG_ERROR([Cannot continue])]) + + # Check for bozo case: either specify --with-jdk-dir or + # (--with-jdk-bindir, --with-jdk-headers) -- not both. + bad=0 + AS_IF([test -n "$with_jdk_dir" -a -n "$with_jdk_bindir" -o \ + -n "$with_jdk_dir" -a -n "$with_jdk_headers"],[bad=1]) + AS_IF([test -z "$with_jdk_bindir" -a -n "$with_jdk_headers" -o \ + -n "$with_jdk_bindir" -a -z "$with_jdk_headers"],[bad=1]) + AS_IF([test "$bad" = "1"], + [AC_MSG_WARN([Either specify --with-jdk-dir or both of (--with-jdk_bindir, --with-jdk-headers) -- not both.]) + AC_MSG_ERROR([Cannot continue])]) + + AS_IF([test -n "$with_jdk_dir"], + [with_jdk_bindir=$with_jdk_dir/bin + with_jdk_headers=$with_jdk_dir/include]) + + ################################################################## + # with_jdk_dir can now be ignored; with_jdk_bindir and + # with_jdk_headers will be either empty or have valid values. + ################################################################## + + # Some java installations are in obscure places. So let's + # hard-code a few of the common ones so that users don't have to + # specify --with-java-=LONG_ANNOYING_DIRECTORY. + AS_IF([test "$WANT_MPI_JAVA_SUPPORT" = "1" -a -z "$with_jdk_dir" \ + -a -z "$with_jdk_dir" -a -z "$with_jdk_bindir"], + [ # OS X Snow Leopard and Lion (10.6 and 10.7 -- did not + # check prior versions) + dir=/System/Library/Frameworks/JavaVM.framework/Versions/Current/Headers + AS_IF([test -d $dir], [with_jdk_headers=$dir + with_jdk_bindir=/usr/bin]) + + # Various Linux + dir='/usr/lib/jvm/java-*-openjdk-*/include/' + jnih=`ls $dir/jni.h 2>/dev/null | head -n 1` + AS_IF([test -r "$jnih"], + [with_jdk_headers=`dirname $jnih` + OPAL_WHICH([javac], [with_jdk_bindir]) + AS_IF([test -n "$with_jdk_bindir"], + [with_jdk_bindir=`dirname $with_jdk_bindir`], + [with_jdk_headers=])], + [dir='/usr/lib/jvm/default-java/include/' + jnih=`ls $dir/jni.h 2>/dev/null | head -n 1` + AS_IF([test -r "$jnih"], + [with_jdk_headers=`dirname $jnih` + OPAL_WHICH([javac], [with_jdk_bindir]) + AS_IF([test -n "$with_jdk_bindir"], + [with_jdk_bindir=`dirname $with_jdk_bindir`], + [with_jdk_headers=])])]) + + # If we think we found them, announce + AS_IF([test -n "$with_jdk_headers" -a "$with_jdk_bindir"], + [AC_MSG_NOTICE([guessing that JDK headers are in $with_jdk_headers]) + AC_MSG_NOTICE([guessing that JDK javac is in $with_jdk_bindir])]) + ]) + + # Find javac and jni.h + AS_IF([test "$WANT_MPI_JAVA_SUPPORT" = "1"], + [OMPI_CHECK_WITHDIR([jdk-bindir], [$with_jdk_bindir], [javac]) + OMPI_CHECK_WITHDIR([jdk-headers], [$with_jdk_headers], [jni.h])]) + + # Look for various Java-related programs + ompi_java_happy=no + AS_IF([test "$WANT_MPI_JAVA_SUPPORT" = "1"], + [PATH_save=$PATH + AS_IF([test -n "$with_jdk_bindir" -a "$with_jdk_bindir" != "yes" -a "$with_jdk_bindir" != "no"], + [PATH="$PATH:$with_jdk_bindir"]) + AC_PATH_PROG(JAVAC, javac) + AC_PATH_PROG(JAVAH, javah) + AC_PATH_PROG(JAR, jar) + PATH=$PATH_save + + # Check to see if we have all 3 programs. + AS_IF([test -z "$JAVAC" -o -z "$JAVAH" -o -z "$JAR"], + [ompi_java_happy=no], + [ompi_java_happy=yes]) + ]) + + # Look for jni.h + AS_IF([test "$WANT_MPI_JAVA_SUPPORT" = "1" -a "$ompi_java_happy" = "yes"], + [CPPFLAGS_save=$CPPFLAGS + AS_IF([test -n "$with_jdk_headers" -a "$with_jdk_headers" != "yes" -a "$with_jdk_headers" != "no"], + [OMPI_JDK_CPPFLAGS="-I$with_jdk_headers" + # Some flavors of JDK also require -I/linux. + # See if that's there, and if so, add a -I for that, + # too. Ugh. + AS_IF([test -d "$with_jdk_headers/linux"], + [OMPI_JDK_CPPFLAGS="$OMPI_JDK_CPPFLAGS -I$with_jdk_headers/linux"]) + CPPFLAGS="$CPPFLAGS $OMPI_JDK_CPPFLAGS"]) + AC_CHECK_HEADER([jni.h], [], + [ompi_java_happy=no]) + CPPFLAGS=$CPPFLAGS_save + ]) + AC_SUBST(OMPI_JDK_CPPFLAGS) + + # Check for pinning support + # Uncomment when ready (or delete if we don't want it) + AS_IF([test "$WANT_MPI_JAVA_SUPPORT" = "1" -a "$ompi_java_happy" = "yes"], + [dnl OMPI_JAVA_CHECK_PINNING + echo ======we should check for java pinning support here... + ]) + + # Are we happy? + AS_IF([test "$WANT_MPI_JAVA_SUPPORT" = "1" -a "$ompi_java_happy" = "no"], + [AC_MSG_WARN([Java MPI bindings requested, but unable to find proper support]) + AC_MSG_ERROR([Cannot continue])], + [AC_MSG_WARN([Java MPI bindings are provided on a provisional basis - i.e., they are not]) + AC_MSG_WARN([part of the current or proposed MPI standard. Continued inclusion of the]) + AC_MSG_WARN([Java bindings is contingent upon user interest and developer support])]) + + AC_CONFIG_FILES([ + ompi/mpi/java/Makefile + ompi/mpi/java/java/Makefile + ompi/mpi/java/c/Makefile + ]) +]) + +########################################################################### + +AC_DEFUN([OMPI_JAVA_CHECK_PINNING],[ +### +dnl testing if Java GC supports pinning +### +AC_MSG_CHECKING(whether Java garbage collector supports pinning) + +###################### +# JMS This has not been touched yet. It needs to be OMPI-ified. +# Change to AC_DEFINE (instead of the AC_SUBST of DEFPINS at the end) +###################### + +changequote(,) + +cat > conftest.java < conftest.c <GetIntArrayElements(env, a, &isCopy) ; + return isCopy ; +} +END + +# For AIX shared object generation: +cat > conftest.exp </src/scripts' to your path environment + variable. + + Add the directory `/lib/classes' to your CLASSPATH + environment variable. + + Add the directory `/lib' to your LD_LIBRARY_PATH + (Linux, Solaris, etc) or LIBPATH (AIX) environment variable. + +(Some of these variables may be unnecesary if you are using the +`prunjava' script.) + + step 4. Test the installation: + + make check + + +NOTE: Several of the the scripts in this release assume your target +machines share user directories (presumably through NFS or equivalent), +and have compatible system commands *and library files* installed on +all nodes (e.g. in `/usr/lib'). Although it is possible to adapt the +basic mpiJava software to more heterogeneous situations, you will need +to do more work! + + +Using the software +------------------ + +If everything goes well, you can compile and run the test programs by +issuing the command + + make check + +in the mpiJava installation directory. + +An example of how to compile and run a program: + + javac Life.java + prunjava 4 Life + +The `prunjava' script is a wrapper for the various MPI run commands. +The first argument is the number of processors on which the program will be +executed. A list of available host computers may be given in an +MPICH-style `machines' file in the local directory. + +The `prunjava' script is provided mainly for purposes of testing. It is +not very general and in real situations you will often have to modify +this script, or start the program directly using the native MPI run +commands to achieve the effect you need. + +With MPICH on some platforms you may be able to run mpiJava programs by + + mpirun java + +With this approach, you may be responsible for ensuring the remote +environment is set up correctly, e.g. by setting appropriate class +paths and library paths in your `.cshrc', `.bashrc', etc, on the remote +machines (the `prunjava' script adopts a different approach it +dynamically creates a script that sets up the required environment and +invokes the `java' command. This script is run across nodes using +`mpirun'.) + +On SP2 you might run mpiJava by + + poe java + +Some MPI environments (SunHPC 4.0) may require that the native MPI library +be preloaded into the executable command---it may not be possible to +load the native `libmpi' with the Java `System.loadLibrary()' method. +Preloading can be achieved in Solaris or Linux by setting the LD_PRELOAD +environment variable. So for example with SunHPC you may start mpiJava by: + + LD_PRELOAD=/opt/SUNWhpc/lib/libmpi.so + export LD_PRELOAD + mprun java + +(It is best to restrict of the LD_PRELOAD variable scope +by defining it only within a script, like our `prunjava'. Otherwise the +library may get loaded into *every* executable you run! +For reliable operation you should also add the `libjsig' library, where +available, to the LD_PRELOAD variable. See the notes below. Check the +source of the `mpiJava/src/scripts/prunjava' script for examples.) + + +API +--- + +The API definition is in + + mpiJava/doc/api/mpi/mpiJava-spec.ps + +Javadoc documentation for the API is preinstalled at + + mpiJava/doc/api/mpi/package-summary.html + +For questions and comments, email us. + + +Recommended MPI configuration options +------------------------------------- + +In many case mpiJava will work using default MPI options. But after +much experimentation the options recommended below have been found to +eliminate certain failure modes. See the technical notes below for +more discussion. + +Note all `configure' options specified in this section are for MPICH +or LAM `configure' scripts, *not* mpiJava! + + +1) Redhat Linux 7.3 + Sun SDK 1.4.1 + MPICH 1.2.5 + +Default + + +2) Redhat Linux 7.3 + Sun SDK 1.4.1 + LAM 6.5.6 + +Default is recommended. + +If, however, problems are encountered, you may try reconfiguring LAM to +use a different signal, e.g.: + + ./configure ... --with-signal=SIGIO + + +3) Redhat Linux 7.3 + IBM 1.4 Java for Linux + MPICH 1.2.4 + +MPICH must be configured to use a signal other than the default SIGUSR1, +e.g.: + + ./configure ... -listener_sig=SIGIO + + +4) Redhat Linux 7.3 + IBM 1.4 Java for Linux + LAM 6.5.8 + +LAM must be configured to use a signal other than the default SIGUSR2, +e.g.: + + ./configure ... --with-signal=SIGIO + + +5) SunOS 5.8 + Sun SDK 1.4.1 + SunHPC-MPI 4 + +Default. + + +6) SunOS 5.8 + Sun SDK 1.4.1 + MPICH 1.2.4 + +Use: + + ./configure ... -cflags=-D_REENTRANT + +(Note: on Solaris mpiJava has been tested with MPICH built using cc.) + + +7) SunOS 5.8 + Sun SDK 1.4.1 + LAM 6.5.6 + +Use: + + ./configure ... --with-cflags=-D_REENTRANT + + +8) AIX 3.4 + IBM JDK 1.3.0 Java + IBM MPI (SP2/3) + +Default + + +9) AIX 3.4 + IBM JDK 1.3.0 Java + MPICH 1.2.5 + +Use: + + ./configure ... -cflags=-D_THREAD_SAFE + +Note however that certain test cases have been observed to intermittently +hang on this platform for unknown reasons. It's use is not recommended. +(Note: on AIX mpiJava has been tested with MPICH built using cc.) + + + Technical Notes + =============== + +The following technical notes and case studies are largely for the benefit +of people trying to port mpiJava to other platforms, but in some cases +they also bear on the required configuration of the native MPI... + + +Problems with Signal Handlers (mpiJava 1.2.5) +--------------------------------------------- + +A problem in porting mpiJava to different platforms is conflicts in +uses of OS signal handlers by the Java Virtual Machine (and Java +libraries) and by the native MPI implementation. + +Typical JVMs make use of OS signals and signal-handlers internally. +Typical MPI implementations override the default signal handlers. +If suitable measures are not taken, the MPI may blindly override the +signal-handlers installed by the JVM, leading to failures. + +If you are using Sun's Java, we recommended to upgrade to JDK 1.4, +and set the environment variable `LD_PRELOAD' described in + + http://java.sun.com/j2se/1.4/docs/guide/vm/signal-chaining.html + +For example: + + export LD_PRELOAD=$JAVA_HOME/jre/lib/$JARCH/$VM/libjsig.so + +This resolves various intermittent bugs reported with previous versions +of mpiJava (on many important platforms). + +In some cases this option is not sufficient or not available. Sometimes +it is nevertheless possible to work around problems by saving the signal +handlers installed by JVM, and restoring them after the MPI has overriden +them. The current release of mpiJava introduces a second native library +for saving and restoring relevant signal handlers. In other cases it may +be possible and/or necessary to reconfigure MPI to use a "safe" signal. + +[In the following notes we have tried to give plausible causes for +observed problems. But appearances can be deceptive and we don't always +have access to sources of the software concerned; even where we do, +it can be very labour intensive to trace intermittent failure modes +in detail. Nevertheless we hope the workarounds we found may suggest +ones that work in other situations.] + + +KNOWN SIGNAL-HANDLING ISSUES for specific platforms, with workarounds: + +The workarounds are configured in automatically for mpiJava 1.2.5 where +appropriate, but in some cases you may have to change your native MPI +configuration to avoid conflicting signals. + + 1) Redhat Linux 7.3 + Sun SDK 1.4.1 + MPICH 1.2.5 + + Hotspot sometimes deliberately throws and catches SIGSEGV and + similar signals. `MPI_Init' overrides the JVM signal handlers + leading to intermittent failures (especially in complex recursive + code, like object serialization). With earlier versions of JDK + many mpiJava programs ran successfully despite this conflict. + + JDK 1.4 signal-chaining using `libjsig' resolves all remaining issues + we are aware of. This is configured automatically into the mpiJava + 1.2.5 `prunjava' script, if mpiJava is built with JDK 1.4. + + + 2) Redhat Linux 7.3 + Sun SDK 1.4.1 + LAM 6.5.6 + + We expect the same issues with SIGSEGV, etc as in MPICH case, which + should be resolved by using `libjsig'. + + Additionally, there is a special problem with SIGUSR2, which causes + frequent, intermittent hanging of mpiJava programs. Just loading + `libjsig' doesn't resolve this problem (the signal handlers don't + seem to chain properly?) We found empirically that restoring the + original JVM signal handler for SIGUSR2 after `MPI_Init' eliminated + problems in all our test cases. This approach is automatically + configured into mpiJava 1.2.5. + + An alternative solution is to configure LAM to use a signal + that Hotspot doesn't use, e.g.: + + ./configure ... --with-signal=SIGIO + + (Note well this is the `configure' script for LAM, *not* mpiJava! + We randomly suggested SIGIO as the alternate signal.) + + + 3) Redhat Linux 7.3 + IBM 1.4 Java for Linux + MPICH 1.2.4 + + The IBM classic JVM uses SIGUSR1, and (we found) may block this signal + during JNI calls. By default MPICH (on the default P4 device) uses + SIGUSR1 as its listener signal. This conflict causes most mpiJava + programs to hang. The only known solution is to to configure MPICH + to use a different signal, e.g: + + ./configure ... -listener_sig=SIGIO + + (Note well this is the `configure' script for MPICH, *not* mpiJava! + We randomly suggested SIGIO rather than the more obvious SIGUSR2. + SIGUSR2 mostly worked, but apparently produced conflicts in GUI-based + example codes.) + + This resolves all problems we are currently aware of. + + + 4) Redhat Linux 7.3 + IBM 1.4 Java for Linux + LAM 6.5.8 + + We had some success. But the `tests/signals/' test case and + `examples/Nozzle', `examples/potts' examples hang on some of our + installations. Configuring LAM to use e.g. SIGIO -- see 2), above + -- appeared to help, but we aren't certain this is a complete + solution -- we had conflicting experiences. + + For now this configuration should be considered experimental. + + + 5) SunOS 5.8 + Sun SDK 1.4.1 + SunHPC-MPI 4 + + Comments similar to the Linux MPICH case, 1). No known problems + provide the `libjsig' signal interception library is loaded. + + + 6) SunOS 5.8 + Sun SDK 1.4.1 + MPICH 1.2.5 + + Comments similar to the Linux case, 1) above, except that on Solaris + the 1.4 JVM detects the occurrence of signal chaining it doesn't like, + and insists the java option "-Xusealtsigs" be set. This is configured + automatically into the mpiJava 1.2.5 `prunjava' script. + + SEE ALSO the notes on thread safety issues, below. + + (Note: on Solaris mpiJava has been tested assuming MPICH is built + with cc.) + + + 7) SunOS 5.8 + Sun SDK 1.4.1 + LAM 6.5.6 + + Comments similar to the Linux MPICH case, 1). No known problems. + + SEE ALSO the notes on thread safety issues, below. + + + 8) AIX 3.4 + IBM JDK 1.3.0 Java + IBM MPI (SP2/3) + + The JVM sometimes deliberately throws and catches SIGTRAP signals + (in a pattern similar to SIGSEGV, etc with Hotspot?), and the SP2 + MPI apparently overrides the JVM handler. We know of no `libjsig' + analogue for this platform, but we found empirically that restoring + the original JVM signal handler for SIGTRAP after the + `System.loadLibrary(mpijava)' call eliminated problems in all our + test cases. This solution is automatically configured into mpiJava + 1.2.5. + + + 9) AIX 3.4 + IBM JDK 1.3.0 Java + MPICH 1.2.5 + + Certain test cases have been observed to intermittently hang on this + platform for unknown reasons. It's use is not recommended. + + SEE ALSO the notes on thread safety issues, below. + + (Note: on AIX the mpiJava configure script assumes MPICH is built + with cc, not GNU C.) + + +Issues of Thread Safety (mpiJava 1.2.5) +--------------------------------------- + +Most MPI implementations are not "thread-safe", and of course Java +uses threads in an essential way---even a single-threaded user program +will have system daemon threads running in the background. + +In principle this could be a serious issue for mpiJava. To make +progress we have mainly disregarded the problem, and worked on the +optimistic assumption that provided *MPI* CALLS ARE NEVER MADE +CONCURRENTLY (and, by the way, it is *your* responsibility as the mpiJava +programmer to ensure this!) interference between Java threads should +not cause problems. + +A priori this is not guaranteed. The native MPI implementation might +be making OS system calls to send messages over sockets. Daemon +threads or other user threads could also (through the standard Java +API) be concurrently making system calls (e.g. an AWT program could be +communicating with an X server). If the MPI implementation happens not +to invoke its system calls in a thread-safe way, there could still be +interference effects with the system calls invoked internally by the +other "pure Java" threads. (One example is that the MPICH +implementation relies on the `errno' variable; in principle this +could be modified by other threads.) + +We have not encountered problems that were *provably* attributable to +this kind of effect. But we *have* encountered problems with graphics +codes (e.g. `examples/Nozzle', `example/potts') running on the Solaris ++ MPICH, Solaris + LAM and AIX + MPICH platforms that look suspiciously +like this. With the default build of MPICH and LAM, these programs +usually fail on these platforms. + +Experimentally we found that on Solaris these problems could be eliminated by +reconfiguring MPICH to compile with the flag `-D_REENTRANT': + + ./configure ... -cflags=-D_REENTRANT + +and similarly configuring LAM as follows: + + ./configure ... --with-cflags=-D_REENTRANT + +(Note well these are the `configure' scripts for MPICH and LAM, +*not* mpiJava!) + +On AIX the corresponding recipe that worked was: + + ./configure ... -cflags=-D_THREAD_SAFE + +(Note well this is for the `configure' scripts for MPICH, not mpiJava! +Unfortunately we failed to install LAM on AIX. As noted above AIX ++ MPICH has other problems, which are unresolved.) + +We were unable to trace the detailed cause of the observed failures, so +it is not 100% certain whether this is really a thread safety issue. +But in general setting `-D_REENTRANT' on Solaris or `-D_THREAD_SAFE' +on AIX would be expected to improve the thread safety characteristics +of C code. + +Another change in this release related to thread safety is in the +implementation of the `finalize()' methods of the `Datatype', `Group', +`Op' and `Status' classes. In earlier releases of mpiJava these were +native methods that directly called the corresponding `MPI_Free' +functions. Although this wasn't observed to cause problems, in principle +it is not thread safe because the `finalize()' methods may be called in +a separate garbage collector thread. In the current release the calls +to the native methods are deferred, and invoked in the user thread when +the next MPI operation is explicitly called. + + +JVMs and "pinning" (mpiJava 1.2.3) +---------------------------------- + +The garbage collectors associated with early JVMs, such as the +"classic" JVM, supported pinning of Java arrays---fixing the arrays +to a specific physical location while a JNI call was in progress. +Several more modern JVMs (e.g. Hotspot and others) do not support +pinning. Instead JNI calls access elements of Java arrays by first obtaining +a C copy of the Java array. The elements are typically copied back +from the C array to the Java array when the JNI call returns. + +mpiJava 1.2.3 supports two approaches to message buffers, reflecting +these two JNI mechanisms---pinning or copying. If you are using a +JVM which is known to support pinning, you may wish to uncomment the +definition of the macro `GC_DOES_PINNING' in the file `src/C/mpiJava.h'. + +If this macro is left undefined---presumably meaning the garbage +collector does *not* support pinning---mpiJava will copy buffers +from and to Java arrays explicitly using `MPI_Pack' and `MPI_Unpack'. +This works well with MPICH. + +Unfortunately this strategy doesn't always work with IBM MPI, +due to an apparent difference in the semantics of `MPI_Unpack'. +Luckily it turns out that many installations of Java on AIX still use +a variant of the classic JVM, which *does* support pinning. So on AIX +it is probably safest to define the `GC_DOES_PINNING' macro. + +[Note added: the `configure' script now attempts to determine whether +the JVM supports pinning and will define the `GC_DOES_PINNING' macro in +make files, if it thinks it does.] + + +Revision History +---------------- + +Significant changes from version 1.2.4: + +1) Fixes various problems associated with signal handlers + (see discussion above). + +2) README file greatly extended to better document supported platforms and + portability issues. + +3) Fixes a bug related to the behavior of `MPI_Unpack' on certain + MPI platforms. + +4) Fixed some programming errors in the `examples/potts' and + `examples/metropolis' codes. + +5) No longer use custom `jvmlauncher' for SunHPC. Instead use + LD_PRELOAD to preload -lmpi library into standard `java' command. + +6) Moves freeing of native MPI objects out of the garbage collector + thread, into MPI user thread (no particular problems were observed + with the old strategy, but in principle it isn't thread-unsafe). + + +Significant changes from version 1.2.3: + +1) Supports SunHPC version 4.0. Executable `src/bin/jvmlauncher' added. + + +Significant changes from version 1.2.2: + +1) Supports AIX + POE platform. + + +Significant changes from version 1.2.1: + +1) Major reorganization in handling communication buffers, the better to + support current JVMs, whose garbage collectors often don't implement + pinning. + +2) Fix related bug in `Sendrecv', afflicting the `Life.java' example. + +3) Fix bug reported by Jatinder Singh when `MPI.ANY_SOURCE' is used with + and `MPI.OBJECT' datatype. + +Significant changes from version 1.2: + +1) Mainly bug fixes. + + +Significant changes from version 1.1: + +1) Support for the `MPI.OBJECT' basic type (note that this release + uses default JDK serialization, which can be quite inefficient). + +2) Support for Linux platforms. + +3) Inclusion of new demo programs. + +4) Inclusion of `javadoc' documentation. + +5) Other minor changes to the API---see the spec in the `doc' directory. + +6) Bug fixes. + + +Known bugs and omissions +------------------------ + +1) The subclasses of `MPIException' documented in the mpiJava spec are still + not implemented (and in reality mpiJava methods never throw + exceptions---they generally abort the program in case of error). + +2) In general, sanity-checking method arguments is not nearly as thorough + as it should be. + + +mpiJava Directory Structure +--------------------------- + + mpiJava/ + bin/ + This directory contains binaries or installed scripts. + For NT releases, sub-directories contain Win32 Dynamic + Link Libraries (.dll). + + WMPI/ + For NT releases, contains wmpi.dll created by + compiling the JNI C stubs. The directory where the + DLL resides needs to be added to the PATH + environment variable so that it can be found at + run-time by Java. + + mpiJava.dll + + doc/ + + examples/ + metropolis/ + A Monte Carlo program + + Nozzle/ + A CFD program, with GUI + + PingPong/ + A simple benchmark, with C and Java versions + + potts/ + Another Monte Carlo program, with a GUI + + simple/ + A "Game of Life" program; a "Hello World" program. + + lib/ + For UNIX releases this directory contains shared libraries. + Class files are contained in a subdirectory. + + classes/ + The mpiJava class files live here. This directory + should be added to your CLASSPATH enviroment + variable. + + mpiJava.zip + + src/ + C/ + The JNI C stubs for mpiJava. This directory + contains the JNI C wrappers and the header files for + mpiJava. These files are compiled into a shared + (.so in UNIX) or dynamic-load-library (.dll in + Win32) that is loaded at runtime by the JVM + (loadlibary(mpiJava)) when the Java MPI interface is + used. + + Java/ + The Java interface to MPI. This directory includes + a sub-directory (mpi) holding the Java interface to + MPI. These files need to be compiled using a Java + compiler, such as javac. The resulting class files + are copied into the mpiJava/lib/classes directory. + + mpi/ + + scripts/ + Various scripts for configuraing and testing mpiJava + under UNIX. + + wmpi_jni/ + See notes in `NT_INSTALL.TXT' + + release/ + + bin/ + The `jvmlauncher' program + + tests/ + ccl/ + comm/ + dtyp/ + env/ + group/ + pt2pt/ + topo/ + + + + References + ========== + +MPI Home Page: + http://www.mcs.anl.gov/mpi/index.html + +MPICH home page: + http://www.mcs.anl.gov/mpi/mpich + +LAM home page: + http://www.lam-mpi.org/ + +WMPI (an MPI for Windows NT): + http://dsg.dei.uc.pt/w32mpi/ + +Sun J2SE 1.4 download: + http://java.sun.com/j2se/1.4/download.html + +IBM Java Developer Kit for Linux: + http://www.ibm.com/java/jdk/download + + +Contributions +------------- + +From Hiromitsu Takagi: + +I'd like to inform you that we have successfully built and run it on +Digital UNIX V4.0D (OSF JDK1.1.6) / MPICH but a few modifications are +required. + +o add "-I$(JDK)/include/java -I$(JDK)/include/java/alpha" into + INCLUDE of mpiJava-1.1/src/C/Makefile + (jni.h is placed on $(JDK)/include/java/ and jni_md.h is placed on + $(JDK)/include/alpha/.) + +o set LDFLAG of mpiJava-1.1/src/C/Makefile "-shared" + +[...] +-- +Hiromitsu Takagi +Computer Science Division, Electrotechnical Laboratory + +Sep 1, 98 + + ---=+ O +=--- + +Thanks to Rutger Hofman who pointed out a bug in `Request.Waitany', +`Request.Testany' and gave corrections. + +Feb 28, 01 + + ---=+ O +=--- + + The test case in `tests/signals/' is adapted from a bug +report submitted by Sivakumar Venkata Pabolu. + +Jan 10, 03 + + diff --git a/ompi/mpi/java/c/Makefile.am b/ompi/mpi/java/c/Makefile.am new file mode 100644 index 0000000000..db8131cf31 --- /dev/null +++ b/ompi/mpi/java/c/Makefile.am @@ -0,0 +1,41 @@ +# -*- makefile -*- +# +# Copyright (c) 2011 Cisco Systems, Inc. All rights reserved. +# Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. +# $COPYRIGHT$ +# +# Additional copyrights may follow +# +# $HEADER$ +# + +if OMPI_WANT_JAVA_BINDINGS + +# Get the include files that were generated from the .java source files +AM_CPPFLAGS = -I$(top_builddir)/ompi/mpi/java/java $(OMPI_JDK_CPPFLAGS) $(LTDLINCL) + +headers = \ + mpiJava.h +ompidir = $(includedir)/openmpi/ompi/mpi/java +ompi_HEADERS = \ + $(headers) + +lib_LTLIBRARIES = libmpi_java.la +libmpi_java_la_SOURCES = \ + mpi_Cartcomm.c \ + mpi_Comm.c \ + mpi_Datatype.c \ + mpi_Errhandler.c \ + mpi_Graphcomm.c \ + mpi_Group.c \ + mpi_Intercomm.c \ + mpi_Intracomm.c \ + mpi_MPI.c \ + mpi_Op.c \ + mpi_Request.c \ + mpi_Status.c + +libmpi_java_la_LIBADD = $(top_builddir)/ompi/libmpi.la $(LIBLTDL) +libmpi_java_la_LDFLAGS = -version-info $(libmpi_java_so_version) + +endif diff --git a/ompi/mpi/java/c/mpiJava.h b/ompi/mpi/java/c/mpiJava.h new file mode 100644 index 0000000000..c1ae95ef22 --- /dev/null +++ b/ompi/mpi/java/c/mpiJava.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2011 Cisco Systems, Inc. All rights reserved. + * + * $COPYRIGHT$ + * + * Additional copyrights may follow + * + * $HEADER$ + * + */ + + +#include "mpi.h" + +typedef struct { + jfieldID CommhandleID; + jfieldID ErrhandleID; + jfieldID GrouphandleID; + jfieldID DatatypehandleID; + jfieldID DatatypebaseTypeID; + jfieldID DatatypebaseSizeID; + jfieldID OphandleID; + jfieldID stathandleID; + jfieldID sourceID; + jfieldID tagID; + jfieldID indexID; + jfieldID elementsID; + jfieldID reqhandleID; + jfieldID opTagID; + jfieldID bufSaveID; + jfieldID countSaveID; + jfieldID offsetSaveID; + jfieldID baseTypeSaveID; + jfieldID bufbaseSaveID; + jfieldID bufptrSaveID; + jfieldID commSaveID; + jfieldID typeSaveID; + int *dt_sizes; +} ompi_java_globals_t; +extern ompi_java_globals_t ompi_java; + +void ompi_java_clearFreeList(JNIEnv*); + +void ompi_java_init_native_Datatype(void); + +void* ompi_java_getBufPtr(void** bufbase, + JNIEnv *env, jobject buf, + int baseType, int offset); + +void ompi_java_releaseBufPtr(JNIEnv *env, jobject buf, + void* bufbase, int baseType); + +void* ompi_java_getMPIWriteBuf(int* bsize, int count, + MPI_Datatype type, MPI_Comm comm); + +#ifndef GC_DOES_PINNING + +void* ompi_java_getMPIBuf(int* size, JNIEnv *env, jobject buf, int offset, + int count, MPI_Datatype type, MPI_Comm comm, + int baseType); + +void ompi_java_releaseMPIBuf(JNIEnv *env, jobject buf, int offset, + int count, MPI_Datatype type, MPI_Comm comm, + void* bufptr, int size, int baseType); + +void ompi_java_releaseMPIRecvBuf(int* elements, JNIEnv *env, jobject buf, int offset, + int count, MPI_Datatype type, MPI_Comm comm, + void* bufptr, MPI_Status* status, + int baseType); + +void ompi_java_releaseMPIReadBuf(void* bufptr); + +#endif /* GC_DOES_PINNING */ diff --git a/ompi/mpi/java/c/mpi_Cartcomm.c b/ompi/mpi/java/c/mpi_Cartcomm.c new file mode 100644 index 0000000000..c68394adaa --- /dev/null +++ b/ompi/mpi/java/c/mpi_Cartcomm.c @@ -0,0 +1,257 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : mpi_Cartcomm.c + * Headerfile : mpi_Cartcomm.h + * Author : Sung-Hoon Ko, Xinying Li + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.6 $ + * Updated : $Date: 2003/01/16 16:39:34 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ +#include "ompi_config.h" + +#include +#ifdef HAVE_TARGETCONDITIONALS_H +#include +#endif + +#include "mpi.h" +#include "mpi_Cartcomm.h" +#include "mpiJava.h" + +/* + * Class: mpi_Cartcomm + * Method: Get + * Signature: (I)Lmpi/CartParms; + */ +JNIEXPORT jobject JNICALL Java_mpi_Cartcomm_Get(JNIEnv *env, jobject jthis) +{ + jintArray dims, coords; + jbooleanArray periods; + jint *ds, *cs; + jboolean *ps; + int *ips ; + jboolean isCopy1=JNI_TRUE, isCopy2=JNI_TRUE ,isCopy3=JNI_TRUE; + int maxdims; + int i ; + + jclass cartparms_class=(*env)->FindClass(env,"mpi/CartParms"); + jfieldID dimsID,periodsID,coordsID; + jmethodID handleConstructorID = + (*env)->GetMethodID(env, cartparms_class, "", "()V"); + jobject cartparms = + (*env)->NewObject(env,cartparms_class, handleConstructorID); + + ompi_java_clearFreeList(env) ; + + MPI_Cartdim_get((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),&maxdims); + + dims=(*env)->NewIntArray(env,maxdims); + periods=(*env)->NewBooleanArray(env,maxdims); + coords=(*env)->NewIntArray(env,maxdims); + + ips = (int*) malloc(sizeof(int) * maxdims) ; + + ds=(*env)->GetIntArrayElements(env,dims,&isCopy1); + + cs=(*env)->GetIntArrayElements(env,coords,&isCopy3); + + MPI_Cart_get((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + maxdims, (int*)ds, ips, (int*)cs); + + ps=(*env)->GetBooleanArrayElements(env,periods,&isCopy2); + + for (i = 0 ; i < maxdims ; i++) { + ps [i] = ips [i] ? JNI_TRUE : JNI_FALSE ; + } + + dimsID=(*env)->GetFieldID(env,cartparms_class,"dims","[I"); + periodsID=(*env)->GetFieldID(env,cartparms_class,"periods","[Z"); + coordsID=(*env)->GetFieldID(env,cartparms_class , "coords", "[I"); + + (*env)->SetObjectField(env, cartparms, dimsID, dims); + (*env)->SetObjectField(env, cartparms, periodsID, periods); + (*env)->SetObjectField(env, cartparms, coordsID, coords); + + (*env)->ReleaseIntArrayElements(env,dims,ds,0); + (*env)->ReleaseBooleanArrayElements(env,periods,ps,0); + (*env)->ReleaseIntArrayElements(env,coords,cs,0); + + return cartparms; +} + +/* + * Class: mpi_Cartcomm + * Method: Shift + * Signature: (II)Lmpi/ShiftParms; + */ +JNIEXPORT jobject JNICALL Java_mpi_Cartcomm_Shift(JNIEnv *env, jobject jthis, + jint direction, jint disp) +{ + int sr, dr; + jclass shiftparms_class=(*env)->FindClass(env,"mpi/ShiftParms"); + jfieldID rsID,rdID; + jmethodID handleConstructorID = (*env)->GetMethodID(env, + shiftparms_class, "", "()V"); + jobject shiftparms=(*env)->NewObject(env,shiftparms_class, + handleConstructorID); + + ompi_java_clearFreeList(env) ; + + MPI_Cart_shift((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + direction, disp, &sr, &dr); + rsID=(*env)->GetFieldID(env,shiftparms_class,"rank_source","I"); + rdID=(*env)->GetFieldID(env,shiftparms_class,"rank_dest", "I"); + (*env)->SetIntField(env, shiftparms, rsID, sr); + (*env)->SetIntField(env, shiftparms, rdID, dr); + /* printf("Shift finished.\n"); */ + return shiftparms; +} + +/* + * Class: mpi_Cartcomm + * Method: Coords + * Signature: (I)[I + */ +JNIEXPORT jintArray JNICALL Java_mpi_Cartcomm_Coords(JNIEnv *env, jobject jthis, jint rank) +{ + jint *coords; + jboolean isCopy=JNI_TRUE; + jintArray jcoords; + int maxdims; + /* + jclass jthis_class=(*env)->FindClass(env,"mpi/Cartcomm"); + jfieldID maxdimsID=(*env)->GetFieldID(env,jthis_class,"maxdims","I"); + maxdims=(*env)->GetIntField(env,jthis, maxdimsID); + */ + + ompi_java_clearFreeList(env) ; + + MPI_Cartdim_get((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + &maxdims); + jcoords=(*env)->NewIntArray(env,maxdims); + coords=(*env)->GetIntArrayElements(env,jcoords,&isCopy); + MPI_Cart_coords((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + rank,maxdims,(int*)coords); + (*env)->ReleaseIntArrayElements(env,jcoords,coords,0); + return jcoords; +} + +/* + * Class: mpi_Cartcomm + * Method: Map + * Signature: ([I[Z)I + */ +JNIEXPORT jint JNICALL Java_mpi_Cartcomm_Map(JNIEnv *env, jobject jthis, + jintArray dims, jbooleanArray periods) +{ + int newrank; + jint *ds; + jboolean *ps; + jboolean isCopy=JNI_TRUE; + int ndims; + int *int_re_ds=(int*)calloc((*env)->GetArrayLength(env,periods), sizeof(int)); + int i; + + ompi_java_clearFreeList(env) ; + + ndims=(*env)->GetArrayLength(env,dims); + ds=(*env)->GetIntArrayElements(env,dims,&isCopy); + ps=(*env)->GetBooleanArrayElements(env,periods,&isCopy); + + for (i=0;i<=(*env)->GetArrayLength(env,periods);i++) + if(ps[i]==JNI_TRUE) + int_re_ds[i]=1; + else + int_re_ds[i]=0; + + MPI_Cart_map((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + ndims,(int*)ds,int_re_ds, &newrank); + (*env)->ReleaseIntArrayElements(env,dims,ds,0); + (*env)->ReleaseBooleanArrayElements(env,periods,ps,0); + free(int_re_ds); + return newrank; +} + +/* + * Class: mpi_Cartcomm + * Method: Rank + * Signature: ([I)I + */ +JNIEXPORT jint JNICALL Java_mpi_Cartcomm_Rank(JNIEnv *env, jobject jthis, jintArray coords) +{ + int rank; + jint *crds; + jboolean isCopy=JNI_TRUE; + + ompi_java_clearFreeList(env) ; + + crds=(*env)->GetIntArrayElements(env,coords,&isCopy); + MPI_Cart_rank((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + (int*)crds, &rank); + (*env)->ReleaseIntArrayElements(env,coords,crds,0); + return rank; +} + +/* + * Class: mpi_Cartcomm + * Method: Sub + * Signature: ([Z)Lmpi/Cartcomm; + */ +JNIEXPORT jlong JNICALL Java_mpi_Cartcomm_sub(JNIEnv *env, jobject jthis, + jbooleanArray remain_dims) +{ + MPI_Comm newcomm; + jboolean *re_ds; + jboolean isCopy=JNI_TRUE; + int *int_re_ds=(int*)calloc((*env)->GetArrayLength(env,remain_dims), sizeof(int)); + int i; + + ompi_java_clearFreeList(env) ; + + re_ds=(*env)->GetBooleanArrayElements(env,remain_dims,&isCopy); + for(i=0;i<=(*env)->GetArrayLength(env,remain_dims);i++) + if(re_ds[i]==JNI_TRUE) + int_re_ds[i]=1; + else + int_re_ds[i]=0; + + MPI_Cart_sub((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + int_re_ds, &newcomm); + (*env)->ReleaseBooleanArrayElements(env,remain_dims,re_ds,0); + + free(int_re_ds); + return (jlong)newcomm; +} + +/* + * Class: mpi_Cartcomm + * Method: Dims_create + * Signature: (I[I)V + */ +JNIEXPORT void JNICALL Java_mpi_Cartcomm_Dims_1create(JNIEnv *env, jclass jthis, + jint nnodes, jintArray dims ) +{ + jint *cdims; + jboolean isCopy=JNI_TRUE; + int ndims = (*env)->GetArrayLength(env,dims) ; + + ompi_java_clearFreeList(env) ; + + cdims=(*env)->GetIntArrayElements(env,dims,&isCopy); + MPI_Dims_create(nnodes,ndims,(int*)cdims); + (*env)->ReleaseIntArrayElements(env,dims,cdims,0); +} diff --git a/ompi/mpi/java/c/mpi_Comm.c b/ompi/mpi/java/c/mpi_Comm.c new file mode 100644 index 0000000000..13cabafe46 --- /dev/null +++ b/ompi/mpi/java/c/mpi_Comm.c @@ -0,0 +1,1544 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : mpi_Comm.c + * Headerfile : mpi_Comm.h + * Author : Sung-Hoon Ko, Xinying Li, Sang Lim, Bryan Carpenter + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.17 $ + * Updated : $Date: 2003/01/16 16:39:34 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ +#include "ompi_config.h" +#include +#ifdef HAVE_TARGETCONDITIONALS_H +#include +#endif + +#include "mpi.h" +#include "mpi_Comm.h" +#include "mpiJava.h" /* must come AFTER the related .h so JNI is included */ + +/* `getBufPtr' is used in + Send, Recv ... + for getting pointer from `jobject buf' +*/ +void* ompi_java_getBufPtr(void** bufbase, + JNIEnv *env, jobject buf, + int baseType, int offset) +{ + jboolean isCopy ; + void* bufptr = 0 ; + + *bufbase = 0 ; + + switch (baseType) { + case 0: /* NULL */ + break; + case 1: { + jbyte* els = (*env)->GetByteArrayElements(env,buf,&isCopy) ; + *bufbase = els ; + bufptr = els + offset ; + break; + } + case 2: { + jchar* els = (*env)->GetCharArrayElements(env,buf,&isCopy) ; + *bufbase = els ; + bufptr = els + offset ; + break; + } + case 3: { + jshort* els = (*env)->GetShortArrayElements(env,buf,&isCopy) ; + *bufbase = els ; + bufptr = els + offset ; + break; + } + case 4: { + jboolean* els = (*env)->GetBooleanArrayElements(env,buf,&isCopy) ; + *bufbase = els ; + bufptr = els + offset ; + break; + } + case 5: { + jint* els = (*env)->GetIntArrayElements(env,buf,&isCopy) ; + *bufbase = els ; + bufptr = els + offset ; + break; + } + case 6: { + jlong* els = (*env)->GetLongArrayElements(env,buf,&isCopy) ; + *bufbase = els ; + bufptr = els + offset ; + break; + } + case 7: { + jfloat* els = (*env)->GetFloatArrayElements(env,buf,&isCopy) ; + *bufbase = els ; + bufptr = els + offset ; + break; + } + case 8: { + jdouble* els = (*env)->GetDoubleArrayElements(env,buf,&isCopy) ; + *bufbase = els ; + bufptr = els + offset ; + break; + } + case 9: { + jbyte* els = (*env)->GetByteArrayElements(env,buf,&isCopy) ; + *bufbase = els ; + bufptr = els + offset ; + break; + } + case 10: + break; + case 11: + break; + case 12: { + jbyte* els = (*env)->GetByteArrayElements(env,buf,&isCopy) ; + *bufbase = els ; + bufptr = els + offset ; + break; + } + default: + break; /* `UNDEFINED' */ + } + + return bufptr ; +} + + +/* `releaseBufPtr' is used in + Send, Recv ... + to release pointer obtained by `getBufPtr'. +*/ + +void ompi_java_releaseBufPtr(JNIEnv *env, jobject buf, + void* bufbase, int baseType) +{ + switch (baseType) { + case 0: + break; + case 1: + (*env)->ReleaseByteArrayElements(env,buf,(jbyte*)bufbase,0); + break; + case 2: + (*env)->ReleaseCharArrayElements(env,buf,(jchar*)bufbase,0); + break; + case 3: + (*env)->ReleaseShortArrayElements(env,buf,(jshort*)bufbase,0); + break; + case 4: + (*env)->ReleaseBooleanArrayElements(env,buf,(jboolean*)bufbase,0); + break; + case 5: + (*env)->ReleaseIntArrayElements(env,buf,(jint*)bufbase,0); + break; + case 6: + (*env)->ReleaseLongArrayElements(env,buf,(jlong*)bufbase,0); + break; + case 7: + (*env)->ReleaseFloatArrayElements(env,buf,(jfloat*)bufbase,0); + break; + case 8: + (*env)->ReleaseDoubleArrayElements(env,buf,(jdouble*)bufbase,0); + break; + case 9: + (*env)->ReleaseByteArrayElements(env,buf,(jbyte*)bufbase,0); + break; + case 10: + break; + case 11: + break; + case 12: + (*env)->ReleaseByteArrayElements(env,buf,(jbyte*)bufbase,0); + break; + default: + break; + } +} + +#ifndef GC_DOES_PINNING + +/* `getBufCritical' is used in + getMPIBuf, releaseMPIBuf... + for getting pointer from `jobject buf' +*/ +static void* getBufCritical(void** bufbase, + JNIEnv *env, jobject buf, + int baseType, int offset) +{ + jboolean isCopy ; + void* bufptr ; + + *bufbase = (jbyte*) (*env)->GetPrimitiveArrayCritical(env,buf,&isCopy) ; + + switch (baseType) { + case 0: + break; + case 1: { + bufptr = ((jbyte*) *bufbase) + offset ; + break; + } + case 2: { + bufptr = ((jchar*) *bufbase) + offset ; + break; + } + case 3: { + bufptr = ((jshort*) *bufbase) + offset ; + break; + } + case 4: { + bufptr = ((jboolean*) *bufbase) + offset ; + break; + } + case 5: { + bufptr = ((jint*) *bufbase) + offset ; + break; + } + case 6: { + bufptr = ((jlong*) *bufbase) + offset ; + break; + } + case 7: { + bufptr = ((jfloat*) *bufbase) + offset ; + break; + } + case 8: { + bufptr = ((jdouble*) *bufbase) + offset ; + break; + } + case 9: { + bufptr = ((jbyte*) *bufbase) + offset ; + break; + } + case 10: + break; + case 11: + break; + case 12: { + bufptr = ((jbyte*) *bufbase) + offset ; + break; + } + default: + break; + } + + return bufptr ; +} + +extern MPI_Datatype Dts[] ; +extern int* dt_sizes ; + +#ifndef GC_DOES_PINNING +/* + * If the VM doesn't support pinning of arrays by + * native methods, we allocate a private buffer "by hand", and copy + * just the required data to that buffer. + * + * This avoids having the VM copy the whole of the Java array, when + * a message may only be a small portion of that array. It also + * avoids potentially erroneous behaviour when we have overlapping + * operations on disjoint segments of the same Java array. + * + * `getMPIBuf' uses `Get/ReleasePrimitiveArrayCritical' to access the + * Java array, hopefully without copying, and `MPI_Pack' to copy just + * the required segment. + * + * (Note packed messages are sent and received using `MPI_BYTE', + * not `MPI_PACKED'. In some ways `MPI_PACKED' would be more natural + * and flexible, but the specification of how it works is pretty + * vague, and the reference implementation seems inconsistent, + * and after spending too long trying to make it work I gave up... dbc) + */ + +void* ompi_java_getMPIBuf(int* bsize, JNIEnv *env, jobject buf, int offset, + int count, MPI_Datatype type, MPI_Comm comm, + int baseType) +{ + int dt_size ; + + void *bufptr, *javaptr, *bufbase ; + int pos ; + + MPI_Pack_size(count, type, comm, bsize) ; + *bsize += sizeof(int) ; + + bufptr = malloc(*bsize) ; + + MPI_Type_size(type, &dt_size) ; + ((int*) bufptr) [0] = count * dt_size ; + /* Append "elements" count to start of buffer. + * (In practise this is a count of actual number of data bytes, + * excluding packing overheads.) + */ + + pos = sizeof(int) ; + javaptr = getBufCritical(&bufbase, env, buf, baseType, offset); + if (count != 0 && *bsize != pos) /* LAM doesn't like count = 0 */ + MPI_Pack(javaptr, count, type, bufptr, *bsize, &pos, comm) ; + (*env)->ReleasePrimitiveArrayCritical(env, buf, bufbase, 0); + + return bufptr ; +} + +/* + * `releaseMPIBuf' uses `Get/ReleasePrimitiveArrayCritical' to access the + * Java array, hopefully without copying, and `MPI_Unpack' to write just + * the necessary segment. + */ +void ompi_java_releaseMPIBuf(JNIEnv *env, jobject buf, int offset, + int count, MPI_Datatype type, MPI_Comm comm, + void* bufptr, int bsize, int baseType) +{ + void *bufbase ; + int pos = sizeof(int) ; + + void *javaptr = getBufCritical(&bufbase, env, buf, baseType, offset); + MPI_Unpack(bufptr, bsize, &pos, javaptr, count, type, comm) ; + (*env)->ReleasePrimitiveArrayCritical(env, buf, bufbase, 0); + + free(bufptr) ; +} +#endif + +/* + * Optimization of `getMPIBuf' for the case of receive buffers, where + * we don't need to copy the original value. + */ +void* ompi_java_getMPIWriteBuf(int* bsize, int count, + MPI_Datatype type, MPI_Comm comm) +{ + MPI_Pack_size(count, type, comm, bsize) ; + *bsize += sizeof(int) ; + + return malloc(*bsize) ; +} + + +/* + * `releaseMPIRecvBuf' is a variant of `releaseMPIBuf' that retrieves + * the number of elements to be copied back from an `MPI_Status' object. + */ +void ompi_java_releaseMPIRecvBuf(int* elements, JNIEnv *env, jobject buf, int offset, + int count, MPI_Datatype type, MPI_Comm comm, + void* bufptr, MPI_Status* status, + int baseType) +{ + void *bufbase, *javaptr ; + int bsize ; + int pos ; + +#ifdef UNPACK_ALLOWS_SHORT_BUFFER + + MPI_Get_count(status, MPI_BYTE, &bsize) ; + + *elements = ((int*) bufptr) [0] ; + + pos = sizeof(int) ; + + javaptr = getBufCritical(&bufbase, env, buf, baseType, offset); + MPI_Unpack(bufptr, bsize, &pos, javaptr, count, type, comm) ; + (*env)->ReleasePrimitiveArrayCritical(env, buf, bufbase, 0); + +#else + + /* + * See thread "mpiJava on Sun HPC", Sep 2002, on java-mpi mailing list. + * + * MPICH and LAM allow short buffers. Sun HPC and SP2 don't. + * + * As discussed on the mailing list, this solution will fail for + * some (hopefully unusual) cases, but it is the best we can do for now. + */ + + int tsize ; + + MPI_Type_size(type, &tsize) ; + + MPI_Get_count(status, MPI_BYTE, &bsize) ; + + *elements = ((int*) bufptr) [0] ; + + pos = sizeof(int) ; + + javaptr = getBufCritical(&bufbase, env, buf, baseType, offset); + MPI_Unpack(bufptr, bsize, &pos, javaptr, *elements / tsize, type, comm) ; + (*env)->ReleasePrimitiveArrayCritical(env, buf, bufbase, 0); + +#endif /* UNPACK_ALLOWS_SHORT_BUFFER */ + + free(bufptr) ; +} + + +/* + * Optimization of `releaseMPIBuf' for the case of send buffers, where + * we don't need to write back modified data. + */ +void ompi_java_releaseMPIReadBuf(void* bufptr) +{ + free(bufptr) ; +} + +#endif /* GC_DOES_PINNING */ + +/* + * Class: mpi_Comm + * Method: init + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_Comm_init(JNIEnv *env, jclass thisClass) +{ + jfieldID nullHandleID = + (*env)->GetStaticFieldID(env,thisClass,"nullHandle","J"); + (*env)->SetStaticLongField(env,thisClass,nullHandleID,(jlong)MPI_COMM_NULL); + + ompi_java.CommhandleID = (*env)->GetFieldID(env,thisClass,"handle","J"); +} + +/* + * Class: mpi_Comm + * Method: GetComm + * Signature: (I)J + */ +JNIEXPORT void JNICALL Java_mpi_Comm_GetComm(JNIEnv *env, jobject jthis, + jint type) +{ + ompi_java_clearFreeList(env) ; + + switch (type) { + case 0: + (*env)->SetLongField(env,jthis, ompi_java.CommhandleID,(jlong)MPI_COMM_NULL); + break; + case 1: + (*env)->SetLongField(env,jthis, ompi_java.CommhandleID,(jlong)MPI_COMM_SELF); + break; + case 2: + (*env)->SetLongField(env,jthis, ompi_java.CommhandleID,(jlong)MPI_COMM_WORLD); + break; + } +} + + +/* + * Class: mpi_Comm + * Method: Size + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_mpi_Comm_Size(JNIEnv *env, jobject jthis) +{ + int size; + + ompi_java_clearFreeList(env) ; + + MPI_Comm_size((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + &size); + return size; +} + +/* + * Class: mpi_Comm + * Method: Rank + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_mpi_Comm_Rank(JNIEnv *env, jobject jthis) +{ + int rank; + + ompi_java_clearFreeList(env) ; + + MPI_Comm_rank((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + &rank); + return rank; +} + +/* + * Class: mpi_Comm + * Method: Dup + * Signature: ()Lmpi/Comm; + */ +JNIEXPORT jlong JNICALL Java_mpi_Comm_dup(JNIEnv *env, jobject jthis) +{ + MPI_Comm newcomm; + + ompi_java_clearFreeList(env) ; + + MPI_Comm_dup((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + &newcomm); + + return (jlong)newcomm; +} + +/* + * Class: mpi_Comm + * Method: Compare + * Signature: (Lmpi/Comm;Lmpi/Comm;)I + */ +JNIEXPORT jint JNICALL Java_mpi_Comm_Compare(JNIEnv *env, jclass jthis, + jobject comm1, jobject comm2) +{ + int result; + + ompi_java_clearFreeList(env) ; + + MPI_Comm_compare((MPI_Comm)((*env)->GetLongField(env,comm1,ompi_java.CommhandleID)), + (MPI_Comm)((*env)->GetLongField(env,comm2,ompi_java.CommhandleID)), + &result); + return result; +} + +/* + * Class: mpi_Comm + * Method: Free + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_Comm_Free(JNIEnv *env, jobject jthis) +{ + MPI_Comm comm=(MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)); + + ompi_java_clearFreeList(env) ; + + MPI_Comm_free(&comm); + (*env)->SetLongField(env,jthis, ompi_java.CommhandleID, (jlong)MPI_COMM_NULL); +} + +/* + * Class: mpi_Comm + * Method: Is_null + * Signature: ()Z + */ +JNIEXPORT jboolean JNICALL Java_mpi_Comm_Is_1null(JNIEnv *env, jobject jthis) +{ + MPI_Comm comm; + + ompi_java_clearFreeList(env) ; + + comm=(MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)); + if (comm == MPI_COMM_NULL) { + return JNI_TRUE; + } else { + return JNI_FALSE; + } +} + +/* + * Class: mpi_Comm + * Method: group + * Signature: ()J + */ +JNIEXPORT jlong JNICALL Java_mpi_Comm_group(JNIEnv *env, jobject jthis) +{ + MPI_Group group; + + ompi_java_clearFreeList(env) ; + + MPI_Comm_group((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + &group); + return (jlong)group; +} + +/* + * Class: mpi_Comm + * Method: Test_inter + * Signature: ()Z + */ +JNIEXPORT jboolean JNICALL Java_mpi_Comm_Test_1inter(JNIEnv *env, + jobject jthis) +{ + int flag; + + ompi_java_clearFreeList(env) ; + + MPI_Comm_test_inter((MPI_Comm)((*env)->GetLongField(env,jthis, + ompi_java.CommhandleID)),&flag); + if (flag == 0) { + return JNI_FALSE; + } else { + return JNI_TRUE; + } +} + +/* + * Class: mpi_Comm + * Method: GetIntercomm + * Signature: (Lmpi/Comm;III)J + */ +JNIEXPORT jlong JNICALL Java_mpi_Comm_GetIntercomm(JNIEnv *env, jobject jthis, + jobject local_comm, jint local_leader, + jint remote_leader, jint tag) +{ + MPI_Comm newintercomm; + + ompi_java_clearFreeList(env) ; + + MPI_Intercomm_create((MPI_Comm) + (*env)->GetLongField(env,local_comm, ompi_java.CommhandleID), + local_leader, + (MPI_Comm) + (*env)->GetLongField(env,jthis,ompi_java.CommhandleID), + remote_leader, tag, &newintercomm); + return (jlong)newintercomm; +} + +/* + * Class: mpi_Comm + * Method: send + * Signature: (Ljava/lang/Object;IILmpi/Datatype;II)V + */ +JNIEXPORT void JNICALL Java_mpi_Comm_send(JNIEnv *env, jobject jthis, + jobject buf, jint offset, jint count, + jobject type, jint dest, jint tag) +{ + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + MPI_Datatype mpi_type = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + void *bufptr ; + +#ifdef GC_DOES_PINNING + + void *bufbase ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getBufPtr(&bufbase, env, buf, baseType, offset) ; + MPI_Send(bufptr, count, mpi_type, dest, tag, mpi_comm) ; + ompi_java_releaseBufPtr(env, buf, bufbase, baseType) ; + +#else + + int size ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getMPIBuf(&size, env, buf, offset, + count, mpi_type, mpi_comm, baseType) ; + + MPI_Send(bufptr, size, MPI_BYTE, dest, tag, mpi_comm) ; + ompi_java_releaseMPIReadBuf(bufptr) ; + +#endif /* GC_DOES_PINNING */ + +} + +/* + * Class: mpi_Comm + * Method: Recv + * Signature: +(Ljava/lang/Object;IILmpi/Datatype;IILmpi/Status;)Lmpi/Status; + */ +JNIEXPORT jobject JNICALL Java_mpi_Comm_Recv(JNIEnv *env, jobject jthis, + jobject buf, jint offset, jint count, jobject type, + jint source, jint tag, jobject stat) +{ + MPI_Status *status = + (MPI_Status *)((*env)->GetLongField(env,stat,ompi_java.stathandleID)); + + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + MPI_Datatype mpi_type = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + void *bufptr ; + +#ifdef GC_DOES_PINNING + + void *bufbase ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getBufPtr(&bufbase, env, buf, baseType, offset) ; + MPI_Recv(bufptr, count, mpi_type, source, tag, mpi_comm, status); + ompi_java_releaseBufPtr(env, buf, bufbase, baseType) ; + +#else + + int size, elements ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getMPIWriteBuf(&size, count, mpi_type, mpi_comm) ; + MPI_Recv(bufptr, size, MPI_BYTE, source, tag, mpi_comm, status); + ompi_java_releaseMPIRecvBuf(&elements, env, buf, offset, count, mpi_type, + mpi_comm, bufptr, status, baseType) ; + + (*env)->SetIntField(env, stat, ompi_java.elementsID, elements); + +#endif /* GC_DOES_PINNING */ + + (*env)->SetIntField(env, stat, ompi_java.sourceID, status->MPI_SOURCE); + (*env)->SetIntField(env, stat, ompi_java.tagID, status->MPI_TAG); + + return stat; +} + +/* + * Class: mpi_Comm + * Method: Sendrecv + * Signature: +(Ljava/lang/Object;IILmpi/Datatype;IILjava/lang/Object;IILmpi/Datatype;IILmpi/Status;)Lmpi/Status; + */ +JNIEXPORT jobject JNICALL Java_mpi_Comm_Sendrecv(JNIEnv *env, jobject jthis, + jobject sbuf, jint soffset, jint scount, + jobject stype, jint dest, jint stag, + jobject rbuf, jint roffset, jint rcount, + jobject rtype, jint source, jint rtag, jobject stat) +{ + MPI_Status *status = + (MPI_Status *)((*env)->GetLongField(env,stat,ompi_java.stathandleID)); + + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + + MPI_Datatype mpi_stype = + (MPI_Datatype)((*env)->GetLongField(env,stype,ompi_java.DatatypehandleID)) ; + MPI_Datatype mpi_rtype = + (MPI_Datatype)((*env)->GetLongField(env,rtype,ompi_java.DatatypehandleID)) ; + + int sbaseType = (*env)->GetIntField(env, stype, ompi_java.DatatypebaseTypeID) ; + int rbaseType = (*env)->GetIntField(env, rtype, ompi_java.DatatypebaseTypeID) ; + + void *sbufptr, *rbufptr ; + +#ifdef GC_DOES_PINNING + + void *sbufbase, *rbufbase ; + + ompi_java_clearFreeList(env) ; + + rbufptr = ompi_java_getBufPtr(&rbufbase, env, rbuf, rbaseType, roffset) ; + sbufptr = ompi_java_getBufPtr(&sbufbase, env, sbuf, sbaseType, soffset) ; + + MPI_Sendrecv(sbufptr, scount, mpi_stype, dest, stag, + rbufptr, rcount, mpi_rtype, source, rtag, + mpi_comm, status); + + ompi_java_releaseBufPtr(env, sbuf, sbufbase, sbaseType) ; + ompi_java_releaseBufPtr(env, rbuf, rbufbase, rbaseType) ; + + /* Important to release receive buffer after send buffer, + in case actually the GC is not supporting pinning. + If order was reversed and buffers were part of *same* + Java array, copy-back of send buffer would overwrite + modifications made copying back the receive buffer! */ + +#else + + int rsize, ssize, elements ; + + ompi_java_clearFreeList(env) ; + + rbufptr = ompi_java_getMPIWriteBuf(&rsize, rcount, mpi_rtype, mpi_comm) ; + sbufptr = ompi_java_getMPIBuf(&ssize, env, sbuf, soffset, + scount, mpi_stype, mpi_comm, sbaseType) ; + + MPI_Sendrecv(sbufptr, ssize, MPI_BYTE, dest, stag, + rbufptr, rsize, MPI_BYTE, source, rtag, + mpi_comm, status); + + ompi_java_releaseMPIReadBuf(sbufptr) ; + ompi_java_releaseMPIRecvBuf(&elements, env, rbuf, roffset, rcount, mpi_rtype, + mpi_comm, rbufptr, status, rbaseType) ; + + (*env)->SetIntField(env, stat, ompi_java.elementsID, elements); + +#endif /* GC_DOES_PINNING */ + + (*env)->SetIntField(env, stat, ompi_java.sourceID, status->MPI_SOURCE); + (*env)->SetIntField(env,stat, ompi_java.tagID, status->MPI_TAG); + + return stat; +} + +/* + * Class: mpi_Comm + * Method: Sendrecv_replace + * Signature: +(Ljava/lang/Object;IILmpi/Datatype;IIILmpi/Datatype;IILmpi/Status;)Lmpi/Status; + */ +JNIEXPORT jobject JNICALL Java_mpi_Comm_Sendrecv_1replace(JNIEnv *env, + jobject jthis, + jobject buf, jint offset, jint count, jobject type, + jint dest, jint stag, jint source, jint rtag, jobject stat) +{ + MPI_Status *status = + (MPI_Status *)((*env)->GetLongField(env,stat,ompi_java.stathandleID)); + + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + MPI_Datatype mpi_type = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + void *bufptr ; + +#ifdef GC_DOES_PINNING + + void *bufbase ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getBufPtr(&bufbase, env, buf, baseType, offset) ; + MPI_Sendrecv_replace(bufptr, count, mpi_type, + dest, stag, source, rtag, mpi_comm, status); + ompi_java_releaseBufPtr(env, buf, bufbase, baseType) ; + +#else + + int size, elements ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getMPIBuf(&size, env, buf, offset, + count, mpi_type, mpi_comm, baseType) ; + + MPI_Sendrecv_replace(bufptr, size, MPI_BYTE, + dest, stag, source, rtag, mpi_comm, status); + + ompi_java_releaseMPIRecvBuf(&elements, env, buf, offset, count, mpi_type, + mpi_comm, bufptr, status, baseType) ; + + (*env)->SetIntField(env, stat, ompi_java.elementsID, elements); + +#endif /* GC_DOES_PINNING */ + + (*env)->SetIntField(env,stat, ompi_java.sourceID, status->MPI_SOURCE); + (*env)->SetIntField(env,stat, ompi_java.tagID, status->MPI_TAG); + + return stat; +} + +/* + * Class: mpi_Comm + * Method: bsend + * Signature: (Ljava/lang/Object;IILmpi/Datatype;II)V + */ +JNIEXPORT void JNICALL Java_mpi_Comm_bsend(JNIEnv *env, jobject jthis, + jobject buf, jint offset, jint count, jobject type, + jint dest, jint tag) +{ + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + MPI_Datatype mpi_type = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + void *bufptr ; + +#ifdef GC_DOES_PINNING + + void *bufbase ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getBufPtr(&bufbase, env, buf, baseType, offset) ; + MPI_Bsend(bufptr, count, mpi_type, dest, tag, mpi_comm) ; + ompi_java_releaseBufPtr(env, buf, bufbase, baseType) ; + +#else + + int size ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getMPIBuf(&size, env, buf, offset, + count, mpi_type, mpi_comm, baseType) ; + MPI_Bsend(bufptr, size, MPI_BYTE, dest, tag, mpi_comm) ; + ompi_java_releaseMPIReadBuf(bufptr) ; + +#endif /* GC_DOES_PINNING */ +} + +/* + * Class: mpi_Comm + * Method: ssend + * Signature: (Ljava/lang/Object;IILmpi/Datatype;II)V + */ +JNIEXPORT void JNICALL Java_mpi_Comm_ssend(JNIEnv *env, jobject jthis, + jobject buf, jint offset, jint count, jobject type, + jint dest, jint tag) +{ + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + MPI_Datatype mpi_type = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + void *bufptr ; + +#ifdef GC_DOES_PINNING + + void *bufbase ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getBufPtr(&bufbase, env, buf, baseType, offset) ; + MPI_Ssend(bufptr, count, mpi_type, dest, tag, mpi_comm) ; + ompi_java_releaseBufPtr(env, buf, bufbase, baseType) ; + +#else + + int size ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getMPIBuf(&size, env, buf, offset, + count, mpi_type, mpi_comm, baseType) ; + MPI_Ssend(bufptr, size, MPI_BYTE, dest, tag, mpi_comm) ; + ompi_java_releaseMPIReadBuf(bufptr) ; + +#endif /* GC_DOES_PINNING */ +} + +/* + * Class: mpi_Comm + * Method: rsend + * Signature: (Ljava/lang/Object;IILmpi/Datatype;II)V + */ +JNIEXPORT void JNICALL Java_mpi_Comm_rsend(JNIEnv *env, jobject jthis, + jobject buf, jint offset, jint count, jobject type, + jint dest, jint tag) +{ + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + MPI_Datatype mpi_type = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + void *bufptr ; + +#ifdef GC_DOES_PINNING + + void *bufbase ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getBufPtr(&bufbase, env, buf, baseType, offset) ; + MPI_Rsend(bufptr, count, mpi_type, dest, tag, mpi_comm) ; + ompi_java_releaseBufPtr(env, buf, bufbase, baseType) ; + +#else + + int size ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getMPIBuf(&size, env, buf, offset, + count, mpi_type, mpi_comm, baseType) ; + MPI_Rsend(bufptr, size, MPI_BYTE, dest, tag, mpi_comm) ; + ompi_java_releaseMPIReadBuf(bufptr) ; + +#endif /* GC_DOES_PINNING */ +} + +/* + * Class: mpi_Comm + * Method: Isend + * Signature: (Ljava/lang/Object;IILmpi/Datatype;IILmpi/Request;)Lmpi/Request; + */ +JNIEXPORT jobject JNICALL Java_mpi_Comm_Isend(JNIEnv *env, jobject jthis, + jobject buf, jint offset, jint count, jobject type, + jint dest, jint tag, jobject req) +{ + MPI_Request request; + + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + MPI_Datatype mpi_type = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + void *bufptr ; + +#ifdef GC_DOES_PINNING + + void *bufbase ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getBufPtr(&bufbase, env, buf, baseType, offset) ; + + MPI_Isend(bufptr, count, mpi_type, dest, tag, mpi_comm, &request); + + /* Cache information needed to release the buffer */ + + (*env)->SetIntField(env, req, ompi_java.opTagID, 0) ; /* Request.OP_SEND */ + + (*env)->SetObjectField(env, req, ompi_java.bufSaveID, buf) ; + (*env)->SetIntField(env, req, ompi_java.baseTypeSaveID, baseType) ; + (*env)->SetLongField(env, req, ompi_java.bufbaseSaveID, (jlong) bufbase) ; + +#else + + int size ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getMPIBuf(&size, env, buf, offset, + count, mpi_type, mpi_comm, baseType) ; + + MPI_Isend(bufptr, size, MPI_BYTE, dest, tag, mpi_comm, + &request) ; + + /* Cache information needed to release the buffer */ + + (*env)->SetIntField(env, req, ompi_java.opTagID, 0) ; /* Request.OP_SEND */ + + (*env)->SetLongField(env, req, ompi_java.bufptrSaveID, (jlong) bufptr) ; + +#endif /* GC_DOES_PINNING */ + + (*env)->SetLongField(env,req,ompi_java.reqhandleID,(jlong)request); + return req; +} + +/* + * Class: mpi_Comm + * Method: Ibsend + * Signature: (Ljava/lang/Object;IILmpi/Datatype;IILmpi/Request;)Lmpi/Request; + */ +JNIEXPORT jobject JNICALL Java_mpi_Comm_Ibsend(JNIEnv *env, jobject jthis, + jobject buf, jint offset, jint count, jobject type, + jint dest, jint tag, jobject req) +{ + MPI_Request request; + + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + MPI_Datatype mpi_type = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + void *bufptr ; + +#ifdef GC_DOES_PINNING + + void *bufbase ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getBufPtr(&bufbase, env, buf, baseType, offset) ; + + MPI_Ibsend(bufptr, count, mpi_type, dest, tag, mpi_comm, &request); + + /* Cache information needed to release the buffer */ + + (*env)->SetIntField(env, req, ompi_java.opTagID, 0) ; /* Request.OP_SEND */ + + (*env)->SetObjectField(env, req, ompi_java.bufSaveID, buf) ; + (*env)->SetIntField(env, req, ompi_java.baseTypeSaveID, baseType) ; + (*env)->SetLongField(env, req, ompi_java.bufbaseSaveID, (jlong) bufbase) ; + +#else + + int size ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getMPIBuf(&size, env, buf, offset, + count, mpi_type, mpi_comm, baseType) ; + + MPI_Ibsend(bufptr, size, MPI_BYTE, dest, tag, mpi_comm, + &request) ; + + /* Cache information needed to release the buffer */ + + (*env)->SetIntField(env, req, ompi_java.opTagID, 0) ; /* Request.OP_SEND */ + + (*env)->SetLongField(env, req, ompi_java.bufptrSaveID, (jlong) bufptr) ; + +#endif /* GC_DOES_PINNING */ + + (*env)->SetLongField(env,req,ompi_java.reqhandleID,(jlong)request); + return req; +} + +/* + * Class: mpi_Comm + * Method: Issend + * Signature: (Ljava/lang/Object;IILmpi/Datatype;IILmpi/Request;)Lmpi/Request; + */ +JNIEXPORT jobject JNICALL Java_mpi_Comm_Issend(JNIEnv *env, jobject jthis, + jobject buf, jint offset, jint count, jobject type, + jint dest, jint tag, jobject req) +{ + MPI_Request request; + + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + MPI_Datatype mpi_type = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + void *bufptr ; + +#ifdef GC_DOES_PINNING + + void *bufbase ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getBufPtr(&bufbase, env, buf, baseType, offset) ; + + MPI_Issend(bufptr, count, mpi_type, dest, tag, mpi_comm, &request); + + /* Cache information needed to release the buffer */ + + (*env)->SetIntField(env, req, ompi_java.opTagID, 0) ; /* Request.OP_SEND */ + + (*env)->SetObjectField(env, req, ompi_java.bufSaveID, buf) ; + (*env)->SetIntField(env, req, ompi_java.baseTypeSaveID, baseType) ; + (*env)->SetLongField(env, req, ompi_java.bufbaseSaveID, (jlong) bufbase) ; + +#else + + int size ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getMPIBuf(&size, env, buf, offset, + count, mpi_type, mpi_comm, baseType) ; + + MPI_Issend(bufptr, size, MPI_BYTE, dest, tag, mpi_comm, + &request) ; + + /* Cache information needed to release the buffer */ + + (*env)->SetIntField(env, req, ompi_java.opTagID, 0) ; /* Request.OP_SEND */ + + (*env)->SetLongField(env, req, ompi_java.bufptrSaveID, (jlong) bufptr) ; + +#endif /* GC_DOES_PINNING */ + + (*env)->SetLongField(env,req,ompi_java.reqhandleID,(jlong)request); + return req; +} + +/* + * Class: mpi_Comm + * Method: Irsend + * Signature: (Ljava/lang/Object;IILmpi/Datatype;IILmpi/Request;)Lmpi/Request; + */ +JNIEXPORT jobject JNICALL Java_mpi_Comm_Irsend(JNIEnv *env, jobject jthis, + jobject buf, jint offset, jint count, jobject type, + jint dest, jint tag, jobject req) +{ + MPI_Request request; + + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + MPI_Datatype mpi_type = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + void *bufptr ; + +#ifdef GC_DOES_PINNING + + void *bufbase ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getBufPtr(&bufbase, env, buf, baseType, offset) ; + + MPI_Irsend(bufptr, count, mpi_type, dest, tag, mpi_comm, &request); + + /* Cache information needed to release the buffer */ + + (*env)->SetIntField(env, req, opTagID, 0) ; /* Request.OP_SEND */ + + (*env)->SetObjectField(env, req, ompi_java.bufSaveID, buf) ; + (*env)->SetIntField(env, req, ompi_java.baseTypeSaveID, baseType) ; + (*env)->SetLongField(env, req, ompi_java.bufbaseSaveID, (jlong) bufbase) ; + +#else + + int size ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getMPIBuf(&size, env, buf, offset, + count, mpi_type, mpi_comm, baseType) ; + + MPI_Irsend(bufptr, size, MPI_BYTE, dest, tag, mpi_comm, + &request) ; + + /* Cache information needed to release the buffer */ + + (*env)->SetIntField(env, req, ompi_java.opTagID, 0) ; /* Request.OP_SEND */ + + (*env)->SetLongField(env, req, ompi_java.bufptrSaveID, (jlong) bufptr) ; + +#endif /* GC_DOES_PINNING */ + + (*env)->SetLongField(env,req,ompi_java.reqhandleID,(jlong)request); + return req; +} + +/* + * Class: mpi_Comm + * Method: Irecv + * Signature: (Ljava/lang/Object;IILmpi/Datatype;IILmpi/Status;)Lmpi/Status; + */ +JNIEXPORT jobject JNICALL Java_mpi_Comm_Irecv(JNIEnv *env, jobject jthis, + jobject buf, jint offset, jint count, jobject type, + jint source, jint tag, jobject req) +{ + MPI_Request request; + + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + MPI_Datatype mpi_type = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + void *bufptr ; + +#ifdef GC_DOES_PINNING + + void *bufbase ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getBufPtr(&bufbase, env, buf, baseType, offset) ; + + MPI_Irecv(bufptr, count, mpi_type, source, tag, mpi_comm, &request); + + /* Cache information needed to release the buffer */ + + (*env)->SetIntField(env, req, ompi_java.opTagID, 1) ; /* Request.OP_RECV ; */ + + (*env)->SetObjectField(env, req, ompi_java.bufSaveID, buf) ; + (*env)->SetIntField(env, req, ompi_java.baseTypeSaveID, baseType) ; + + (*env)->SetLongField(env, req, ompi_java.bufbaseSaveID, (jlong) bufbase) ; + +#else + + int size ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getMPIWriteBuf(&size, count, mpi_type, mpi_comm) ; + + MPI_Irecv(bufptr, size, MPI_BYTE, source, tag, mpi_comm, + &request) ; + + /* Cache information needed to release the buffer */ + + (*env)->SetIntField(env, req, ompi_java.opTagID, 1) ; /* Request.OP_RECV ; */ + + (*env)->SetObjectField(env, req, ompi_java.bufSaveID, buf) ; + (*env)->SetIntField(env, req, ompi_java.baseTypeSaveID, baseType) ; + + (*env)->SetIntField(env, req, ompi_java.offsetSaveID, offset) ; + (*env)->SetIntField(env, req, ompi_java.countSaveID, count) ; + (*env)->SetLongField(env, req, ompi_java.typeSaveID, (jlong) mpi_type) ; + (*env)->SetLongField(env, req, ompi_java.commSaveID, (jlong) mpi_comm) ; + (*env)->SetLongField(env, req, ompi_java.bufptrSaveID, (jlong) bufptr) ; + +#endif /* GC_DOES_PINNING */ + + (*env)->SetLongField(env,req,ompi_java.reqhandleID,(jlong)request); + return req; +} + + +/* + * Class: mpi_Comm + * Method: pack + * Signature: (Ljava/lang/Object;IILmpi/Datatype;[BI)I + */ +JNIEXPORT jint JNICALL Java_mpi_Comm_pack(JNIEnv *env, jobject jthis, + jobject inbuf, jint offset, jint incount, jobject type, + jbyteArray outbuf, jint position) +{ + jboolean isCopy=JNI_TRUE; + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + jbyte* obufptr=(*env)->GetByteArrayElements(env,outbuf,&isCopy); + int outsize=(*env)->GetArrayLength(env,outbuf); + + void *ibufbase ; + void *ibufptr = ompi_java_getBufPtr(&ibufbase, env, inbuf, baseType, offset); + + ompi_java_clearFreeList(env) ; + + if(incount != 0 && outsize != (int) position) + /* LAM doesn't like count = 0 */ + MPI_Pack(ibufptr, incount, + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)), + obufptr, outsize, (int*)&position, + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID))); + + ompi_java_releaseBufPtr(env, inbuf, ibufbase, baseType); + + (*env)->ReleaseByteArrayElements(env,outbuf,obufptr,0); + + return position; +} + +/* + * Class: mpi_Comm + * Method: unpack + * Signature: ([BILjava/lang/Object;IILmpi/Datatype;)I + */ +JNIEXPORT jint JNICALL Java_mpi_Comm_unpack(JNIEnv *env, jobject jthis, + jbyteArray inbuf, jint position, jobject outbuf, jint offset, + jint outcount, jobject type) +{ + jboolean isCopy=JNI_TRUE; + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + void* ibufptr=(*env)->GetByteArrayElements(env,inbuf,&isCopy); + int insize=(*env)->GetArrayLength(env,inbuf); + + void *obufbase ; + void *obufptr = ompi_java_getBufPtr(&obufbase, env, outbuf, baseType, offset); + + ompi_java_clearFreeList(env) ; + + MPI_Unpack(ibufptr, + insize, (int*)&position, + obufptr, outcount, + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)), + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID))); + + (*env)->ReleaseByteArrayElements(env,inbuf,ibufptr,0); + + ompi_java_releaseBufPtr(env, outbuf, obufbase, baseType); + + return position; +} + +/* + * Class: mpi_Comm + * Method: Pack_size + * Signature: (ILmpi/Datatype;)I + */ +JNIEXPORT jint JNICALL Java_mpi_Comm_Pack_1size(JNIEnv *env, jobject jthis, + jint incount, jobject type) +{ + int size; + + ompi_java_clearFreeList(env) ; + + MPI_Pack_size(incount, + (MPI_Datatype) ((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)), + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + &size); + + return size; +} + +/* + * Class: mpi_Comm + * Method: Iprobe + * Signature: (II)Lmpi/Status; + */ +JNIEXPORT jobject JNICALL Java_mpi_Comm_Iprobe(JNIEnv *env, jobject jthis, + jint source, jint tag, jobject stat) +{ + int flag; + MPI_Status *status = + (MPI_Status *)((*env)->GetLongField(env,stat,ompi_java.stathandleID)); + + ompi_java_clearFreeList(env) ; + + MPI_Iprobe(source, tag, + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + &flag, status); + + if(flag != 0) { + (*env)->SetIntField(env,stat, ompi_java.sourceID, status->MPI_SOURCE); + (*env)->SetIntField(env,stat, ompi_java.tagID, status->MPI_TAG); + + (*env)->SetIntField(env, stat, ompi_java.elementsID, -1); + + return stat; + } + else + return NULL; +} + +/* + * Class: mpi_Comm + * Method: Probe + * Signature: (II)Lmpi/Status; + */ +JNIEXPORT jobject JNICALL Java_mpi_Comm_Probe(JNIEnv *env, jobject jthis, + jint source, jint tag,jobject stat) +{ + MPI_Status *status = + (MPI_Status *)((*env)->GetLongField(env,stat,ompi_java.stathandleID)); + + ompi_java_clearFreeList(env) ; + + MPI_Probe(source, tag, + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + status); + + (*env)->SetIntField(env,stat, ompi_java.sourceID, status->MPI_SOURCE); + (*env)->SetIntField(env,stat, ompi_java.tagID, status->MPI_TAG); + + (*env)->SetIntField(env, stat, ompi_java.elementsID, -1); + + return stat; +} + +/* + * Class: mpi_Comm + * Method: Errhandler_set + * Signature: (Lmpi/Errhandler;)V + */ +JNIEXPORT void JNICALL Java_mpi_Comm_Errhandler_1set(JNIEnv *env, + jobject jthis, jobject errhandler) +{ + ompi_java_clearFreeList(env) ; + + MPI_Errhandler_set((MPI_Comm) + ((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + (MPI_Errhandler) + ((*env)->GetLongField(env,errhandler,ompi_java.ErrhandleID))); +} + + +/* + * Class: mpi_Comm + * Method: errorhandler_get + * Signature: ()J + */ +JNIEXPORT jlong JNICALL Java_mpi_Comm_errorhandler_1get(JNIEnv *env, + jobject jthis) +{ + MPI_Errhandler errhandler; + + ompi_java_clearFreeList(env) ; + + MPI_Errhandler_get((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + &errhandler); + return (jlong)errhandler; +} + +/* + * Class: mpi_Comm + * Method: Abort + * Signature: (I)V + */ +JNIEXPORT void JNICALL Java_mpi_Comm_Abort(JNIEnv *env, jobject jthis, + jint errorcode) +{ + ompi_java_clearFreeList(env) ; + + MPI_Abort((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + errorcode); +} + + +/* + * Class: mpi_Comm + * Method: Topo_test + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_mpi_Comm_Topo_1test(JNIEnv *env, jobject jthis) +{ + int status; + + ompi_java_clearFreeList(env) ; + + MPI_Topo_test((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + &status); + return status; +} + +/* + !! Attr_put and Attr_get are dealing with int attribute_val only now. + xli 3/26/98 +*/ + + +/* + * Class: mpi_Comm + * Method: Attr_put + * Signature: (II)V + */ +JNIEXPORT void JNICALL Java_mpi_Comm_Attr_1put(JNIEnv *env, jobject jthis, + jint keyval, jint attribute_val) +{ + ompi_java_clearFreeList(env) ; + + MPI_Attr_put((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + keyval, &attribute_val); +} + +/* + * Class: mpi_Comm + * Method: Attr_get + * Signature: (I)I + */ +JNIEXPORT jint JNICALL Java_mpi_Comm_Attr_1get(JNIEnv *env, jobject jthis, + jint keyval) +{ + int *attribute_val; + int flag; + + ompi_java_clearFreeList(env) ; + + MPI_Attr_get((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + keyval, &attribute_val, &flag); + + if (flag != 0) + return *attribute_val; + else + /* if we don't find anything, we still have to return something, + * so return zero for now + */ + return 0; +} + +/* + * Class: mpi_Comm + * Method: Attr_delete + * Signature: (I)V + */ +JNIEXPORT void JNICALL Java_mpi_Comm_Attr_1delete(JNIEnv *env, jobject jthis, + jint keyval) +{ + ompi_java_clearFreeList(env) ; + + MPI_Attr_delete((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + keyval); +} + +/* + * Things to do: + * + * Handle exceptions!! + */ + diff --git a/ompi/mpi/java/c/mpi_Datatype.c b/ompi/mpi/java/c/mpi_Datatype.c new file mode 100644 index 0000000000..38ae936950 --- /dev/null +++ b/ompi/mpi/java/c/mpi_Datatype.c @@ -0,0 +1,394 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : mpi_Datatype.c + * Headerfile : mpi_Datatype.h + * Author : Sung-Hoon Ko, Xinying Li, Sang Lim, Bryan Carpenter + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.10 $ + * Updated : $Date: 2003/01/16 16:39:34 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ +#include "ompi_config.h" + +#include +#ifdef HAVE_TARGETCONDITIONALS_H +#include +#endif + +#include "mpi.h" +#include "mpi_Datatype.h" +#include "mpiJava.h" + +/* + * public class Datatype { + * private final static int UNDEFINED = -1; + * public final static int NULL = 0; + * public final static int BYTE = 1; + * public final static int CHAR = 2; + * + * public final static int SHORT = 3; + * public final static int BOOLEAN = 4; + * public final static int INT = 5; + * + * public final static int LONG = 6; + * public final static int FLOAT = 7; + * public final static int DOUBLE = 8; + * + * public final static int PACKED = 9; + * public final static int LB =10; + * public final static int UB =11; + * + * public final static int OBJECT =12; + * + * ... + * } + */ + + +MPI_Datatype Dts[] = { MPI_DATATYPE_NULL, MPI_BYTE, MPI_SHORT, + MPI_SHORT, MPI_BYTE, MPI_INT, + MPI_LONG_INT, MPI_FLOAT, MPI_DOUBLE, + MPI_PACKED, MPI_LB, MPI_UB, + MPI_BYTE }; + +void ompi_java_init_native_Datatype(void) +{ + /* Initialization that can only be done after MPI_Init() has + * been called. Called from `mpi_MPI.c'. + */ + + int i ; + + ompi_java.dt_sizes = (int*) malloc(13 * sizeof(int)) ; + for (i = 1 ; i < 13 ; i++) { + MPI_Type_size(Dts[i], &(ompi_java.dt_sizes[i])) ; + } +} + +/* + * Class: mpi_Datatype + * Method: init + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_Datatype_init(JNIEnv *env, jclass thisClass) +{ + ompi_java.DatatypehandleID = (*env)->GetFieldID(env,thisClass,"handle","J"); + ompi_java.DatatypebaseTypeID = (*env)->GetFieldID(env,thisClass,"baseType","I"); + ompi_java.DatatypebaseSizeID = (*env)->GetFieldID(env,thisClass,"baseSize","I"); +} + +/* + * Class: mpi_Datatype + * Method: GetDatatype + * Signature: (I)J + */ +JNIEXPORT void JNICALL Java_mpi_Datatype_GetDatatype(JNIEnv *env, jobject jthis, jint type) +{ + (*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)Dts[type]); +} + +/* + * Class: mpi_Datatype + * Method: size + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_mpi_Datatype_size(JNIEnv *env, jobject jthis) +{ + int result; + + ompi_java_clearFreeList(env) ; + + MPI_Type_size((MPI_Datatype)((*env)->GetLongField(env,jthis,ompi_java.DatatypehandleID)), + &result ); + + return result; +} + +/* + * Class: mpi_Datatype + * Method: extent + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_mpi_Datatype_extent(JNIEnv *env, jobject jthis) +{ + MPI_Aint result; + + ompi_java_clearFreeList(env) ; + + MPI_Type_extent((MPI_Datatype)((*env)->GetLongField(env,jthis,ompi_java.DatatypehandleID)), + &result); + + return result; +} + +/* + * Class: mpi_Datatype + * Method: lB + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_mpi_Datatype_lB(JNIEnv *env, jobject jthis) +{ + MPI_Aint result; + + ompi_java_clearFreeList(env) ; + + MPI_Type_lb((MPI_Datatype)((*env)->GetLongField(env,jthis,ompi_java.DatatypehandleID)), + &result); + + return result; +} + +/* + * Class: mpi_Datatype + * Method: uB + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_mpi_Datatype_uB(JNIEnv *env, jobject jthis) +{ + MPI_Aint result; + + ompi_java_clearFreeList(env) ; + + MPI_Type_ub((MPI_Datatype)((*env)->GetLongField(env,jthis,ompi_java.DatatypehandleID)), + &result); + + return result; +} + +/* + * Class: mpi_Datatype + * Method: commit + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_Datatype_commit(JNIEnv *env, jobject jthis) +{ + MPI_Datatype type; + + ompi_java_clearFreeList(env) ; + + type=(MPI_Datatype)((*env)->GetLongField(env,jthis,ompi_java.DatatypehandleID)); + MPI_Type_commit(&type); +} + +/* + * Class: mpi_Datatype + * Method: free + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_Datatype_free(JNIEnv *env, jobject jthis) +{ + MPI_Datatype type; + type=(MPI_Datatype)((*env)->GetLongField(env,jthis,ompi_java.DatatypehandleID)); + if (type != MPI_DATATYPE_NULL) { + MPI_Type_free(&type); + } +} + +/* + * Class: mpi_Datatype + * Method: GetContiguous + * Signature: (I)V + */ +JNIEXPORT void JNICALL Java_mpi_Datatype_GetContiguous(JNIEnv *env, jobject jthis, + jint count,jobject oldtype) +{ + MPI_Datatype type; + + ompi_java_clearFreeList(env) ; + + MPI_Type_contiguous(count, + (MPI_Datatype)((*env)->GetLongField(env,oldtype,ompi_java.DatatypehandleID)), + &type); + + (*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)type); +} + +/* + * Class: mpi_Datatype + * Method: GetVector + * Signature: (III)V + */ +JNIEXPORT void JNICALL Java_mpi_Datatype_GetVector(JNIEnv *env, jobject jthis, + jint count, jint blocklength, jint stride, + jobject oldtype) +{ + MPI_Datatype type; + + ompi_java_clearFreeList(env) ; + + MPI_Type_vector(count, blocklength, stride, + (MPI_Datatype)((*env)->GetLongField(env,oldtype,ompi_java.DatatypehandleID)), + &type); + + (*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)type); +} + +/* + * Class: mpi_Datatype + * Method: GetHvector + * Signature: (III)V + */ +JNIEXPORT void JNICALL Java_mpi_Datatype_GetHvector(JNIEnv *env, jobject jthis, + jint count, jint blocklength, jint stride, + jobject oldtype) +{ + MPI_Datatype type; + jint baseSize = (*env)->GetIntField(env, jthis, ompi_java.DatatypebaseSizeID) ; + + ompi_java_clearFreeList(env) ; + + MPI_Type_hvector(count, blocklength, baseSize * stride, + (MPI_Datatype)((*env)->GetLongField(env,oldtype,ompi_java.DatatypehandleID)), + &type); + + (*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)type); +} + +/* + * Class: mpi_Datatype + * Method: GetIndexed + * Signature: (I[I[I)V + */ +JNIEXPORT void JNICALL Java_mpi_Datatype_GetIndexed(JNIEnv *env, jobject jthis, + jintArray blocklengths, jintArray + displacements, jobject oldtype) +{ + MPI_Datatype type; + int count=(*env)->GetArrayLength(env,blocklengths); + jboolean isCopy=JNI_TRUE; + jint *lengths; jint *disps; + + ompi_java_clearFreeList(env) ; + + lengths=(*env)->GetIntArrayElements(env,blocklengths,&isCopy); + disps = (*env)->GetIntArrayElements(env,displacements,&isCopy); + MPI_Type_indexed(count, (int*)lengths, (int*)disps, + (MPI_Datatype)((*env)->GetLongField(env,oldtype,ompi_java.DatatypehandleID)), &type); + (*env)->ReleaseIntArrayElements(env,blocklengths,lengths,0); + (*env)->ReleaseIntArrayElements(env,displacements,disps,0); + + (*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)type); +} + +/* + * Class: mpi_Datatype + * Method: GetHindexed + * Signature: (I[I[I)V + */ +JNIEXPORT void JNICALL Java_mpi_Datatype_GetHindexed(JNIEnv *env, jobject jthis, + jintArray blocklengths, + jintArray displacements, + jobject oldtype) +{ + MPI_Datatype type ; + int count = (*env)->GetArrayLength(env,blocklengths); + jboolean isCopy ; + jint *lengths; jint *disps; + jint baseSize = (*env)->GetIntField(env, jthis, ompi_java.DatatypebaseSizeID) ; + MPI_Aint* cdisps ; + int i ; + + ompi_java_clearFreeList(env) ; + + lengths=(*env)->GetIntArrayElements(env,blocklengths,&isCopy); + disps = (*env)->GetIntArrayElements(env,displacements,&isCopy); + + cdisps = (MPI_Aint*) calloc(count, sizeof(MPI_Aint)) ; + for(i = 0 ; i < count ; i++) + cdisps [i] = baseSize * disps [i] ; + + MPI_Type_hindexed(count, (int*)lengths, cdisps, + (MPI_Datatype)((*env)->GetLongField(env,oldtype,ompi_java.DatatypehandleID)), + &type); + + free(cdisps) ; + + (*env)->ReleaseIntArrayElements(env,blocklengths,lengths,0); + (*env)->ReleaseIntArrayElements(env,displacements,disps,0); + + (*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)type); +} + +/* + * Class: mpi_Datatype + * Method: GetStruct + * Signature: ([I[I[Lmpi/Datatype;ZIZI)V + */ +JNIEXPORT void JNICALL Java_mpi_Datatype_GetStruct(JNIEnv *env, jobject jthis, + jintArray blocklengths, jintArray displacements, + jobjectArray datatypes, + jboolean lbSet, jint lb, jboolean ubSet, jint ub) +{ + MPI_Datatype type; + int count, ptr, i ; + jboolean isCopy ; + jint *lengths, *disps ; + MPI_Datatype *ctypes ; + int *clengths ; + MPI_Aint *cdisps ; + jint baseSize = (*env)->GetIntField(env, jthis, ompi_java.DatatypebaseSizeID) ; + + ompi_java_clearFreeList(env) ; + + count = (*env)->GetArrayLength(env,blocklengths); + + lengths = (*env)->GetIntArrayElements(env,blocklengths,&isCopy); + disps = (*env)->GetIntArrayElements(env,displacements,&isCopy); + + /* Remove components with UNDEFINED base type, but add upper bound + and lower bound markers if required. */ + + ctypes = (MPI_Datatype*) calloc(count + 2, sizeof(MPI_Datatype)) ; + clengths = (int*) calloc(count + 2, sizeof(int)) ; + cdisps = (MPI_Aint*) calloc(count + 2, sizeof(MPI_Aint)) ; + + ptr = 0 ; + for(i = 0 ; i < count ; i++) { + jobject type = (*env)->GetObjectArrayElement(env, datatypes, i) ; + jint baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + if(baseType != -1) { + jlong handle = (*env)->GetLongField(env, type, ompi_java.DatatypehandleID) ; + ctypes [ptr] = (MPI_Datatype) handle ; + clengths [ptr] = lengths [i] ; + cdisps [ptr] = baseSize * disps [i] ; + ptr++ ; + } + } + if(lbSet == JNI_TRUE) { + ctypes [ptr] = MPI_LB ; + clengths [ptr] = 1 ; + cdisps [ptr] = baseSize * lb ; + ptr++ ; + } + if(ubSet == JNI_TRUE) { + ctypes [ptr] = MPI_UB ; + clengths [ptr] = 1 ; + cdisps [ptr] = baseSize * ub ; + ptr++ ; + } + + MPI_Type_struct(ptr, clengths, cdisps, ctypes, &type); + + free(cdisps); + free(clengths); + free(ctypes); + + (*env)->ReleaseIntArrayElements(env,blocklengths,lengths,0); + (*env)->ReleaseIntArrayElements(env,displacements,disps,0); + + (*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)type); +} diff --git a/ompi/mpi/java/c/mpi_Errhandler.c b/ompi/mpi/java/c/mpi_Errhandler.c new file mode 100644 index 0000000000..94da190f36 --- /dev/null +++ b/ompi/mpi/java/c/mpi_Errhandler.c @@ -0,0 +1,62 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + * File : mpi_Errhandler.c + * Headerfile : mpi_Errhandler.h + * Author : Bryan Carpenter + * Created : 1999 + * Revision : $Revision: 1.2 $ + * Updated : $Date: 2001/08/07 16:36:15 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ +#include "ompi_config.h" + +#ifdef HAVE_TARGETCONDITIONALS_H +#include +#endif + +#include +#include "mpi_Errhandler.h" +#include "mpiJava.h" + +jfieldID ErrhandleID; + +/* + * Class: mpi_Errhandler + * Method: init + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_Errhandler_init(JNIEnv *env, jclass thisClass) +{ + ompi_java.ErrhandleID = (*env)->GetFieldID(env,thisClass,"handle","J"); +} + +/* + * Class: mpi_Errhandler + * Method: GetErrhandler + * Signature: (I)V + */ +JNIEXPORT void JNICALL Java_mpi_Errhandler_GetErrhandler(JNIEnv *env, jobject jthis, jint type) +{ + switch (type) { + case 0: + (*env)->SetLongField(env,jthis, ompi_java.ErrhandleID, (jlong)MPI_ERRORS_RETURN); + case 1: + (*env)->SetLongField(env,jthis, ompi_java.ErrhandleID, (jlong)MPI_ERRORS_ARE_FATAL); + } +} + + diff --git a/ompi/mpi/java/c/mpi_Graphcomm.c b/ompi/mpi/java/c/mpi_Graphcomm.c new file mode 100644 index 0000000000..9ad4450e34 --- /dev/null +++ b/ompi/mpi/java/c/mpi_Graphcomm.c @@ -0,0 +1,126 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : mpi_Graphcomm.c + * Headerfile : mpi_Graphcomm.h + * Author : Xinying Li + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.2 $ + * Updated : $Date: 2003/01/16 16:39:34 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ +#include "ompi_config.h" + +#ifdef HAVE_TARGETCONDITIONALS_H +#include +#endif + +#include "mpi.h" +#include "mpi_Graphcomm.h" +#include "mpiJava.h" + + +/* + * Class: mpi_Graphcomm + * Method: Get + * Signature: ()Lmpi/GraphParms; + */ +JNIEXPORT jobject JNICALL Java_mpi_Graphcomm_Get(JNIEnv *env, jobject jthis) +{ + jintArray index, edges; + jint *ind, *edg; + jboolean isCopy=JNI_TRUE; + int maxind, maxedg; + + jclass graphparms_class=(*env)->FindClass(env,"mpi/GraphParms"); + jfieldID indexID,edgesID; + jmethodID handleConstructorID = (*env)->GetMethodID(env, + graphparms_class, "", "()V"); + jobject graphparms=(*env)->NewObject(env,graphparms_class, handleConstructorID); + + ompi_java_clearFreeList(env) ; + + MPI_Graphdims_get((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),&maxind,&maxedg); + index=(*env)->NewIntArray(env,maxind); + edges=(*env)->NewIntArray(env,maxedg); + ind=(*env)->GetIntArrayElements(env,index,&isCopy); + edg=(*env)->GetIntArrayElements(env,edges,&isCopy); + + MPI_Graph_get((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + maxind,maxedg, (int*)ind, (int*)edg); + + (*env)->ReleaseIntArrayElements(env,index,ind,0); + (*env)->ReleaseIntArrayElements(env,edges,edg,0); + + indexID=(*env)->GetFieldID(env,graphparms_class,"index","[I"); + edgesID=(*env)->GetFieldID(env,graphparms_class , "edges", "[I"); + + (*env)->SetObjectField(env, graphparms, indexID, index); + (*env)->SetObjectField(env, graphparms, edgesID, edges); + + /* printf("Graphcomm Get finished.\n"); */ + return graphparms; + +} + +/* + * Class: mpi_Graphcomm + * Method: Neighbours + * Signature: (I)[I + */ +JNIEXPORT jintArray JNICALL Java_mpi_Graphcomm_Neighbours(JNIEnv *env, jobject jthis, jint rank) +{ + jint *neighbors; + jboolean isCopy=JNI_TRUE; + jintArray jneighbors; + int maxns; + + ompi_java_clearFreeList(env) ; + + MPI_Graph_neighbors_count((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),rank,&maxns); + jneighbors=(*env)->NewIntArray(env,maxns); + neighbors=(*env)->GetIntArrayElements(env,jneighbors,&isCopy); + MPI_Graph_neighbors((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + rank,maxns,(int*)neighbors); + (*env)->ReleaseIntArrayElements(env,jneighbors,neighbors,0); + return jneighbors; +} + +/* + * Class: mpi_Graphcomm + * Method: Map + * Signature: ([I[I)I + */ +JNIEXPORT jint JNICALL Java_mpi_Graphcomm_Map(JNIEnv *env, jobject jthis, jintArray index, jintArray edges) +{ + int newrank; + jint *ind, *edg; + jboolean isCopy=JNI_TRUE; + int nnodes; + + ompi_java_clearFreeList(env) ; + + nnodes=(*env)->GetArrayLength(env,index); + ind=(*env)->GetIntArrayElements(env,index,&isCopy); + edg=(*env)->GetIntArrayElements(env,edges,&isCopy); + + MPI_Graph_map((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + nnodes,(int*)index,(int*)edges, &newrank); + (*env)->ReleaseIntArrayElements(env,index,ind,0); + (*env)->ReleaseIntArrayElements(env,edges,edg,0); + return newrank; +} + + diff --git a/ompi/mpi/java/c/mpi_Group.c b/ompi/mpi/java/c/mpi_Group.c new file mode 100644 index 0000000000..c967c98a76 --- /dev/null +++ b/ompi/mpi/java/c/mpi_Group.c @@ -0,0 +1,322 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : mpi_Group.c + * Headerfile : mpi_Group.h + * Author : Xinying Li + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.3 $ + * Updated : $Date: 2003/01/16 16:39:34 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +#include "ompi_config.h" + +#include +#ifdef HAVE_TARGETCONDITIONALS_H +#include +#endif + +#include "mpi.h" +#include "mpi_Group.h" +#include "mpiJava.h" + + +/* + * Class: mpi_Group + * Method: init + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_Group_init(JNIEnv *env, jclass thisClass) +{ + ompi_java.GrouphandleID = (*env)->GetFieldID(env,thisClass,"handle","J"); +} + +/* + * Class: mpi_Group + * Method: GetGroup + * Signature: (I)V + */ +JNIEXPORT void JNICALL Java_mpi_Group_GetGroup(JNIEnv *env, jobject jthis, jint type) +{ + switch (type) { + case 0: + (*env)->SetLongField(env,jthis, ompi_java.GrouphandleID, (jlong)MPI_GROUP_EMPTY); + break; + default: + break; + } +} + +/* + * Class: mpi_Group + * Method: Size + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_mpi_Group_Size(JNIEnv *env, jobject jthis) +{ + int size; + + ompi_java_clearFreeList(env) ; + + MPI_Group_size((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)), + &size); + return size; +} + +/* * Class: mpi_Group + * Method: Rank + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_mpi_Group_Rank(JNIEnv *env, jobject jthis) +{ + int rank; + + ompi_java_clearFreeList(env) ; + + MPI_Group_rank((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)), + &rank); + return rank; +} + +/* + * Class: mpi_Group + * Method: free + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_Group_free(JNIEnv *env, jobject jthis) +{ + MPI_Group group=(MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)); + + MPI_Group_free(&group); + (*env)->SetLongField(env,jthis, ompi_java.GrouphandleID,(jlong)MPI_GROUP_NULL); +} + +/* + * Class: mpi_Group + * Method: Translate_ranks + * Signature: (Lmpi/Group;[ILmpi/Group;)[I + */ +JNIEXPORT jintArray JNICALL Java_mpi_Group_Translate_1ranks(JNIEnv *env, jclass jthis, + jobject group1, jintArray ranks1, + jobject group2) +{ + jboolean isCopy=JNI_TRUE; + int n=(*env)->GetArrayLength(env,ranks1); + jint *rks1,*rks2; + jintArray jranks2; + + ompi_java_clearFreeList(env) ; + + rks1=(*env)->GetIntArrayElements(env,ranks1,&isCopy); + jranks2=(*env)->NewIntArray(env,n); + rks2=(*env)->GetIntArrayElements(env,jranks2,&isCopy); + MPI_Group_translate_ranks((MPI_Group)((*env)->GetLongField(env,group1,ompi_java.GrouphandleID)), + n, (int*)rks1, + (MPI_Group)((*env)->GetLongField(env,group2,ompi_java.GrouphandleID)), + (int*)rks2); + (*env)->ReleaseIntArrayElements(env,ranks1,rks1,0); + (*env)->ReleaseIntArrayElements(env,jranks2,rks2,0); + return jranks2; +} + +/* + * Class: mpi_Group + * Method: Compare + * Signature: (Lmpi/Group;Lmpi/Group;)I + */ +JNIEXPORT jint JNICALL Java_mpi_Group_Compare(JNIEnv *env, jclass jthis, + jobject group1, jobject group2) +{ + int result; + + ompi_java_clearFreeList(env) ; + + MPI_Group_compare((MPI_Group)((*env)->GetLongField(env,group1,ompi_java.GrouphandleID)), + (MPI_Group)((*env)->GetLongField(env,group2,ompi_java.GrouphandleID)), + &result); + return result; +} + +/* + * Class: mpi_Group + * Method: union + * Signature: (Lmpi/Group;Lmpi/Group;)J + */ +JNIEXPORT jlong JNICALL Java_mpi_Group_union(JNIEnv *env, jclass jthis, + jobject group1, jobject group2) +{ + MPI_Group newgroup; + + ompi_java_clearFreeList(env) ; + + MPI_Group_union((MPI_Group)((*env)->GetLongField(env,group1,ompi_java.GrouphandleID)), + (MPI_Group)((*env)->GetLongField(env,group2,ompi_java.GrouphandleID)), + &newgroup); + return (jlong)newgroup; +} +/* + * Class: mpi_Group + * Method: intersection + * Signature: (Lmpi/Group;Lmpi/Group;)J + */ +JNIEXPORT jlong JNICALL Java_mpi_Group_intersection(JNIEnv *env, jclass jthis, + jobject group1, jobject group2) +{ + MPI_Group newgroup; + + ompi_java_clearFreeList(env) ; + + MPI_Group_intersection((MPI_Group)((*env)->GetLongField(env,group1,ompi_java.GrouphandleID)), + (MPI_Group)((*env)->GetLongField(env,group2,ompi_java.GrouphandleID)), + &newgroup); + return (jlong)newgroup; +} + +/* + * Class: mpi_Group + * Method: difference + * Signature: (Lmpi/Group;Lmpi/Group;)J + */ +JNIEXPORT jlong JNICALL Java_mpi_Group_difference(JNIEnv *env, jclass jthis, + jobject group1, jobject group2) +{ + MPI_Group newgroup; + + ompi_java_clearFreeList(env) ; + + MPI_Group_difference((MPI_Group)((*env)->GetLongField(env,group1,ompi_java.GrouphandleID)), + (MPI_Group)((*env)->GetLongField(env,group2,ompi_java.GrouphandleID)), + &newgroup); + return (jlong)newgroup; +} + +/* + * Class: mpi_Group + * Method: incl + * Signature: ([I)J + */ +JNIEXPORT jlong JNICALL Java_mpi_Group_incl(JNIEnv *env, jobject jthis, jintArray ranks) +{ + int n; + jint *rks; + jboolean isCopy=JNI_TRUE; + MPI_Group newgroup; + + ompi_java_clearFreeList(env) ; + + n=(*env)->GetArrayLength(env,ranks); + rks=(*env)->GetIntArrayElements(env,ranks,&isCopy); + MPI_Group_incl((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)), + n, (int*)rks, + &newgroup); + (*env)->ReleaseIntArrayElements(env,ranks,rks,0); + return (jlong)newgroup; +} + +/* + * Class: mpi_Group + * Method: excl + * Signature: ([I)J + */ +JNIEXPORT jlong JNICALL Java_mpi_Group_excl(JNIEnv *env, jobject jthis, jintArray ranks) +{ + int n; + jint *rks; + jboolean isCopy=JNI_TRUE; + MPI_Group newgroup; + + ompi_java_clearFreeList(env) ; + + n=(*env)->GetArrayLength(env,ranks); + rks=(*env)->GetIntArrayElements(env,ranks,&isCopy); + MPI_Group_excl((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)), + n, (int*)rks, + &newgroup); + (*env)->ReleaseIntArrayElements(env,ranks,rks,0); + return (jlong)newgroup; +} + +/* + * Class: mpi_Group + * Method: range_incl + * Signature: ([[I)J + */ +JNIEXPORT jlong JNICALL Java_mpi_Group_range_1incl(JNIEnv *env, jobject jthis, jobjectArray ranges) +{ + int i; + int n=(*env)->GetArrayLength(env,ranges); + jboolean isCopy=JNI_TRUE; + MPI_Group newgroup; + /* jint **rngs=(jint**)calloc(n,sizeof(jint[3])); */ + int (*rngs) [3] =(int (*) [3])calloc(n,sizeof(int[3])); + jintArray *jrngs=(jobject*)calloc(n,sizeof(jintArray)); + + ompi_java_clearFreeList(env) ; + + for(i=0;iGetObjectArrayElement(env,ranges,i); + vec=(*env)->GetIntArrayElements(env, jrngs[i],&isCopy); + rngs [i] [0] = vec [0] ; + rngs [i] [1] = vec [1] ; + rngs [i] [2] = vec [2] ; + (*env)->ReleaseIntArrayElements(env,jrngs[i],vec,0); + } + + MPI_Group_range_incl((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)), + n,rngs,&newgroup); + + free(rngs); + free(jrngs); + return (jlong)newgroup; +} + +/* + * Class: mpi_Group + * Method: range_excl + * Signature: ([[I)J + */ +JNIEXPORT jlong JNICALL Java_mpi_Group_range_1excl(JNIEnv *env, jobject jthis, jobjectArray ranges) +{ + int i; + int n=(*env)->GetArrayLength(env,ranges); + jboolean isCopy=JNI_TRUE; + MPI_Group newgroup; + /* jint **rngs=(jint**)calloc(n,sizeof(jint*)); */ + int (*rngs) [3] =(int (*) [3])calloc(n,sizeof(int[3])); + jintArray *jrngs=(jobject*)calloc(n,sizeof(jintArray)); + + ompi_java_clearFreeList(env) ; + + for(i=0;iGetObjectArrayElement(env,ranges,i); + vec=(*env)->GetIntArrayElements(env, + jrngs[i],&isCopy); + rngs [i] [0] = vec [0] ; + rngs [i] [1] = vec [1] ; + rngs [i] [2] = vec [2] ; + + (*env)->ReleaseIntArrayElements(env,jrngs[i],vec,0); + } + MPI_Group_range_excl((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)), + n, rngs,&newgroup); + + free(rngs); + free(jrngs); + return (jlong)newgroup; +} + diff --git a/ompi/mpi/java/c/mpi_Intercomm.c b/ompi/mpi/java/c/mpi_Intercomm.c new file mode 100644 index 0000000000..183abfd43d --- /dev/null +++ b/ompi/mpi/java/c/mpi_Intercomm.c @@ -0,0 +1,81 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : mpi_Intercomm.c + * Headerfile : mpi_Intercomm.h + * Author : Xinying Li + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.3 $ + * Updated : $Date: 2003/01/16 16:39:34 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +#include "ompi_config.h" + +#ifdef HAVE_TARGETCONDITIONALS_H +#include +#endif + +#include "mpi.h" +#include "mpi_Intercomm.h" +#include "mpiJava.h" + + +/* + * Class: mpi_Intercomm + * Method: Remote_size + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_mpi_Intercomm_Remote_1size(JNIEnv *env, jobject jthis) +{ + int size; + + ompi_java_clearFreeList(env) ; + + MPI_Comm_remote_size((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + &size); + return size; +} +/* + * Class: mpi_Intercomm + * Method: remote_group + * Signature: ()J + */ +JNIEXPORT jlong JNICALL Java_mpi_Intercomm_remote_1group(JNIEnv *env, jobject jthis) +{ + MPI_Group group; + + ompi_java_clearFreeList(env) ; + + MPI_Comm_remote_group((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + &group); + return (jlong)group; +} + +/* + * Class: mpi_Intercomm + * Method: merge + * Signature: (Z)Lmpi/Intracomm; + */ +JNIEXPORT jlong JNICALL Java_mpi_Intercomm_merge(JNIEnv *env, jobject jthis, jboolean high) +{ + MPI_Comm newintracomm; + + ompi_java_clearFreeList(env) ; + + MPI_Intercomm_merge((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), high, + &newintracomm); + return (jlong)newintracomm; +} diff --git a/ompi/mpi/java/c/mpi_Intracomm.c b/ompi/mpi/java/c/mpi_Intracomm.c new file mode 100644 index 0000000000..bd65eebb22 --- /dev/null +++ b/ompi/mpi/java/c/mpi_Intracomm.c @@ -0,0 +1,827 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : mpi_Intracomm.c + * Headerfile : mpi_Intracomm.h + * Author : Xinying Li, Bryan Carpenter + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.10 $ + * Updated : $Date: 2003/01/16 16:39:34 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +#include "ompi_config.h" + +#include +#ifdef HAVE_TARGETCONDITIONALS_H +#include +#endif + +#include "mpi.h" +#include "mpi_Comm.h" +#include "mpi_Intracomm.h" +#include "mpiJava.h" + + +/* Collectives are not particularly amenable to the strategies used + * in point-to-point to reduce copying when the GC does not support pinning. + * + * It's possibly doable, but may too complex to be worth the effort. + * A general problem is that the relation between positions in the + * original buffer and positions in a packed buffer is not very + * well-defined. + * + * Collectives that use `Op' have an additional problem that + * `MPI_User_function' prototype expects the actual user-specified + * datatype as an argument. Packing, then operating on data transferred + * as a more primitive datatype is not generally correct. + */ + + +extern MPI_Datatype Dts[] ; + + +/* + * Class: mpi_Intracomm + * Method: split + * Signature: (II)J + */ +JNIEXPORT jlong JNICALL Java_mpi_Intracomm_split(JNIEnv *env, jobject jthis, + jint colour, jint key) +{ + MPI_Comm newcomm; + + ompi_java_clearFreeList(env) ; + + MPI_Comm_split((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + colour, key, &newcomm); + + return (jlong)newcomm; +} + +/* + * Class: mpi_Intracomm + * Method: creat + * Signature: (Lmpi/Group;)J + */ +JNIEXPORT jlong JNICALL Java_mpi_Intracomm_creat(JNIEnv *env, jobject jthis, + jobject group) +{ + MPI_Comm newcomm; + + ompi_java_clearFreeList(env) ; + + MPI_Comm_create((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + (MPI_Group)((*env)->GetLongField(env,group,ompi_java.GrouphandleID)), + &newcomm); + return (jlong)newcomm; +} + +/* + * Class: mpi_Intracomm + * Method: Barrier + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_Intracomm_Barrier(JNIEnv *env, jobject jthis) +{ + ompi_java_clearFreeList(env) ; + + MPI_Barrier((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID))); +} + +/* + * Class: mpi_Intracomm + * Method: GetCart + * Signature: ([I[ZZ)J + */ +JNIEXPORT jlong JNICALL Java_mpi_Intracomm_GetCart(JNIEnv *env, jobject jthis, + jintArray dims, jbooleanArray periods, + jboolean reorder) +{ + MPI_Comm cart; + int ndims=(*env)->GetArrayLength(env,dims); + jboolean isCopy=JNI_TRUE; + jint *ds; jboolean *ps; + int i; + int *int_re_ds=(int*)calloc((*env)->GetArrayLength(env,periods), + sizeof(int)); + + ompi_java_clearFreeList(env) ; + + ds=(*env)->GetIntArrayElements(env,dims,&isCopy); + ps=(*env)->GetBooleanArrayElements(env,periods,&isCopy); + + for(i=0;i<=(*env)->GetArrayLength(env,periods);i++) + if(ps[i]==JNI_TRUE) + int_re_ds[i]=1; + else + int_re_ds[i]=0; + + MPI_Cart_create((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + ndims, (int*)ds, int_re_ds, reorder, &cart); + (*env)->ReleaseIntArrayElements(env,dims,ds,0); + (*env)->ReleaseBooleanArrayElements(env,periods,ps,0); + free(int_re_ds); + return (jlong)cart; +} + +/* + * Class: mpi_Intracomm + * Method: GetGraph + * Signature: ([I[IZ)J + */ +JNIEXPORT jlong JNICALL Java_mpi_Intracomm_GetGraph(JNIEnv *env, jobject jthis, + jintArray index, jintArray edges, + jboolean reorder) +{ + MPI_Comm graph; + int nnodes=(*env)->GetArrayLength(env,index); + jboolean isCopy=JNI_TRUE; + jint *ind, *edg; + + ompi_java_clearFreeList(env) ; + + ind=(*env)->GetIntArrayElements(env,index,&isCopy); + edg=(*env)->GetIntArrayElements(env,edges,&isCopy); + MPI_Graph_create((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), + nnodes, (int*)ind, (int*)edg, reorder, &graph); + (*env)->ReleaseIntArrayElements(env,index,ind,0); + (*env)->ReleaseIntArrayElements(env,edges,edg,0); + return (jlong)graph; +} + +/* + * Class: mpi_Intracomm + * Method: bcast + * Signature: (Ljava/lang/Object;IILmpi/Datatype;I)V + */ +JNIEXPORT void JNICALL Java_mpi_Intracomm_bcast(JNIEnv *env, jobject jthis, + jobject buf, jint offset, + jint count, jobject type, jint root) +{ + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + MPI_Datatype mpi_type = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + void *bufptr ; + +#ifdef GC_DOES_PINNING + + void *bufbase ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getBufPtr(&bufbase, env, buf, baseType, offset) ; + MPI_Bcast(bufptr, count, mpi_type, root, mpi_comm) ; + ompi_java_releaseBufPtr(env, buf, bufbase, baseType) ; + +#else + + int size ; + + ompi_java_clearFreeList(env) ; + + bufptr = ompi_java_getMPIBuf(&size, env, buf, offset, + count, mpi_type, mpi_comm, baseType) ; + + MPI_Bcast(bufptr, size, MPI_BYTE, root, mpi_comm) ; + + ompi_java_releaseMPIBuf(env, buf, offset, count, mpi_type, mpi_comm, + bufptr, size, baseType) ; + +#endif /* GC_DOES_PINNING */ +} + + +/* + * Class: mpi_Intracomm + * Method: Gather + * Signature: + (Ljava/lang/Object;IILmpi/Datatype;Ljava/lang/Object;IILmpi/Datatype;I)V +*/ +JNIEXPORT void JNICALL Java_mpi_Intracomm_gather(JNIEnv *env, jobject jthis, + jobject sendbuf, jint sendoffset, + jint sendcount, jobject sendtype, + jobject recvbuf, jint recvoffset, + jint recvcount, jobject recvtype, + jint root) +{ + int id ; + + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + + MPI_Datatype mpi_stype = + (MPI_Datatype)((*env)->GetLongField(env,sendtype,ompi_java.DatatypehandleID)) ; + MPI_Datatype mpi_rtype = (MPI_Datatype) + ((*env)->GetLongField(env, recvtype, ompi_java.DatatypehandleID)) ; + + int sbaseType = (*env)->GetIntField(env, sendtype, ompi_java.DatatypebaseTypeID) ; + int rbaseType ; + + void *sendptr, *recvptr = NULL; + void *sbufbase, *rbufbase ; + + ompi_java_clearFreeList(env) ; + + MPI_Comm_rank(mpi_comm, &id) ; + if(id == root) { + /* + * In principle need the "id == root" check here and elsewere for + * correctness, in case arguments that are not supposed to be + * significant except on root are legitimately passed in as `null', + * say. Shouldn't produce null pointer exception. + * + * (However in this case MPICH complains if `mpi_rtype' is not defined + * in all processes, notwithstanding what the spec says.) + */ + + rbaseType = (*env)->GetIntField(env, recvtype, ompi_java.DatatypebaseTypeID) ; + + recvptr = ompi_java_getBufPtr(&rbufbase, + env, recvbuf, rbaseType, recvoffset) ; + } + + sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, sbaseType, sendoffset) ; + + MPI_Gather(sendptr, sendcount, mpi_stype, + recvptr, recvcount, mpi_rtype, root, mpi_comm) ; + + ompi_java_releaseBufPtr(env, sendbuf, sbufbase, sbaseType) ; + + if (id == root) { + ompi_java_releaseBufPtr(env, recvbuf, rbufbase, rbaseType); + } +} + +/* + * Class: mpi_Intracomm + * Method: Gatherv + * Signature: + (Ljava/lang/Object;IILmpi/Datatype;Ljava/lang/Object;I[I[ILmpi/Datatype;I)V +*/ +JNIEXPORT void JNICALL Java_mpi_Intracomm_gatherv(JNIEnv *env, jobject jthis, + jobject sendbuf, jint sendoffset, + jint sendcount, jobject sendtype, + jobject recvbuf, jint recvoffset, + jintArray recvcounts, jintArray displs, + jobject recvtype, jint root) +{ + int id ; + jint *rcount = NULL, *dps = NULL; + jboolean isCopy ; + + MPI_Comm mpi_comm = + (MPI_Comm) ((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + + MPI_Datatype mpi_stype = (MPI_Datatype) + ((*env)->GetLongField(env,sendtype,ompi_java.DatatypehandleID)) ; + MPI_Datatype mpi_rtype = mpi_stype; + + int sbaseType = (*env)->GetIntField(env, sendtype, ompi_java.DatatypebaseTypeID) ; + int rbaseType = 0; + + void *sendptr, *recvptr = NULL; + void *sbufbase, *rbufbase ; + + ompi_java_clearFreeList(env) ; + + MPI_Comm_rank(mpi_comm, &id) ; + if(id == root) { + rcount=(*env)->GetIntArrayElements(env,recvcounts,&isCopy); + dps=(*env)->GetIntArrayElements(env,displs,&isCopy); + + mpi_rtype = (MPI_Datatype) + ((*env)->GetLongField(env,recvtype,ompi_java.DatatypehandleID)) ; + + rbaseType = (*env)->GetIntField(env, recvtype, ompi_java.DatatypebaseTypeID) ; + + recvptr = ompi_java_getBufPtr(&rbufbase, + env, recvbuf, rbaseType, recvoffset) ; + } + + sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, sbaseType, sendoffset) ; + + MPI_Gatherv(sendptr, sendcount, mpi_stype, + recvptr, (int*) rcount, (int*) dps, mpi_rtype, + root, mpi_comm) ; + + ompi_java_releaseBufPtr(env, sendbuf, sbufbase, sbaseType) ; + if (id == root) { + ompi_java_releaseBufPtr(env, recvbuf, rbufbase, rbaseType); + } + + if (id == root) { + (*env)->ReleaseIntArrayElements(env,recvcounts,rcount,JNI_ABORT); + (*env)->ReleaseIntArrayElements(env,displs,dps,JNI_ABORT); + } +} + +/* + * Class: mpi_Intracomm + * Method: Scatter + * Signature: + (Ljava/lang/Object;IILmpi/Datatype;Ljava/lang/Object;IILmpi/Datatype;I)V +*/ +JNIEXPORT void JNICALL Java_mpi_Intracomm_scatter(JNIEnv *env, jobject jthis, + jobject sendbuf, jint sendoffset, + jint sendcount, jobject sendtype, + jobject recvbuf, jint recvoffset, + jint recvcount, jobject recvtype, + jint root) +{ + int id ; + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + + MPI_Datatype mpi_stype = + (MPI_Datatype) ((*env)->GetLongField(env,sendtype,ompi_java.DatatypehandleID)) ; + /* MPICH complains if `mpi_stype' is not defined + * in all processes, notwithstanding what the spec says. */ + + MPI_Datatype mpi_rtype = + (MPI_Datatype)((*env)->GetLongField(env,recvtype,ompi_java.DatatypehandleID)) ; + + + int sbaseType ; + int rbaseType = (*env)->GetIntField(env, recvtype, ompi_java.DatatypebaseTypeID) ; + + void *sendptr = NULL, *recvptr ; + void *sbufbase, *rbufbase ; + + ompi_java_clearFreeList(env) ; + + MPI_Comm_rank(mpi_comm, &id) ; + if (id == root) { + sbaseType = (*env)->GetIntField(env, sendtype, ompi_java.DatatypebaseTypeID) ; + } + + recvptr = ompi_java_getBufPtr(&rbufbase, env, recvbuf, rbaseType, recvoffset) ; + + if (id == root) { + sendptr = ompi_java_getBufPtr(&sbufbase, + env, sendbuf, sbaseType, sendoffset); + } + + MPI_Scatter(sendptr, sendcount, mpi_stype, + recvptr, recvcount, mpi_rtype, root, mpi_comm) ; + + if (id == root) { + ompi_java_releaseBufPtr(env, sendbuf, sbufbase, sbaseType); + } + + ompi_java_releaseBufPtr(env, recvbuf, rbufbase, rbaseType); +} + + + +/* + * Class: mpi_Intracomm + * Method: Scatterv + * Signature: + (Ljava/lang/Object;II[ILmpi/Datatype;Ljava/lang/Object;I[ILmpi/Datatype;I)V +*/ +JNIEXPORT void JNICALL Java_mpi_Intracomm_scatterv(JNIEnv *env, jobject jthis, + jobject sendbuf, jint sendoffset, + jintArray sendcount, jintArray displs, + jobject sendtype, + jobject recvbuf, jint recvoffset, + jint recvcount, jobject recvtype, + jint root) +{ + int id ; + jint *scount = NULL, *dps = NULL; + jboolean isCopy ; + + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + + MPI_Datatype mpi_rtype = + (MPI_Datatype)((*env)->GetLongField(env,recvtype,ompi_java.DatatypehandleID)) ; + MPI_Datatype mpi_stype = mpi_rtype; + + int sbaseType ; + int rbaseType = (*env)->GetIntField(env, recvtype, ompi_java.DatatypebaseTypeID) ; + + void *sendptr = NULL, *recvptr ; + void *sbufbase, *rbufbase ; + + ompi_java_clearFreeList(env) ; + + MPI_Comm_rank(mpi_comm, &id) ; + if(id == root) { + mpi_stype = (MPI_Datatype) + ((*env)->GetLongField(env,sendtype,ompi_java.DatatypehandleID)) ; + + sbaseType = (*env)->GetIntField(env, sendtype, ompi_java.DatatypebaseTypeID) ; + + scount = (*env)->GetIntArrayElements(env,sendcount,&isCopy); + dps = (*env)->GetIntArrayElements(env,displs,&isCopy); + } + + recvptr = ompi_java_getBufPtr(&rbufbase, env, recvbuf, rbaseType, recvoffset) ; + + if (id == root) { + sendptr = ompi_java_getBufPtr(&sbufbase, + env, sendbuf, sbaseType, sendoffset); + } + + MPI_Scatterv(sendptr, (int*) scount, (int*) dps, mpi_stype, + recvptr, recvcount, mpi_rtype, + root, mpi_comm) ; + + if (id == root) { + ompi_java_releaseBufPtr(env, sendbuf, sbufbase, sbaseType); + } + + ompi_java_releaseBufPtr(env, recvbuf, rbufbase, rbaseType) ; + + if (id == root) { + (*env)->ReleaseIntArrayElements(env, sendcount, scount, JNI_ABORT); + (*env)->ReleaseIntArrayElements(env, displs, dps, JNI_ABORT); + } +} + +/* + * Class: mpi_Intracomm + * Method: Allgather + * Signature: + (Ljava/lang/Object;IILmpi/Datatype;Ljava/lang/Object;IILmpi/Datatype;)V +*/ +JNIEXPORT void JNICALL Java_mpi_Intracomm_allgather(JNIEnv *env, jobject jthis, + jobject sendbuf, jint sendoffset, + jint sendcount, jobject sendtype, + jobject recvbuf, jint recvoffset, + jint recvcount, jobject recvtype) +{ + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + + MPI_Datatype mpi_stype = (MPI_Datatype) + ((*env)->GetLongField(env,sendtype,ompi_java.DatatypehandleID)) ; + MPI_Datatype mpi_rtype = (MPI_Datatype) + ((*env)->GetLongField(env, recvtype, ompi_java.DatatypehandleID)) ; + + int sbaseType = (*env)->GetIntField(env, sendtype, ompi_java.DatatypebaseTypeID) ; + int rbaseType = (*env)->GetIntField(env, recvtype, ompi_java.DatatypebaseTypeID) ; + + void *sendptr, *recvptr ; + void *sbufbase, *rbufbase ; + + ompi_java_clearFreeList(env) ; + + recvptr = ompi_java_getBufPtr(&rbufbase, env, recvbuf, rbaseType, recvoffset) ; + sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, sbaseType, sendoffset) ; + + MPI_Allgather(sendptr, sendcount, mpi_stype, + recvptr, recvcount, mpi_rtype, mpi_comm) ; + + ompi_java_releaseBufPtr(env, sendbuf, sbufbase, sbaseType) ; + ompi_java_releaseBufPtr(env, recvbuf, rbufbase, rbaseType) ; +} + +/* + * Class: mpi_Intracomm + * Method: Allgatherv + * Signature: + (Ljava/lang/Object;IILmpi/Datatype;Ljava/lang/Object;I[I[ILmpi/Datatype;)V +*/ +JNIEXPORT void JNICALL Java_mpi_Intracomm_allgatherv(JNIEnv *env, jobject jthis, + jobject sendbuf, jint sendoffset, + jint sendcount,jobject sendtype, + jobject recvbuf, jint recvoffset, + jintArray recvcount, jintArray displs, + jobject recvtype) +{ + jint *rcount, *dps; + jboolean isCopy ; + + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + + MPI_Datatype mpi_stype = (MPI_Datatype) + ((*env)->GetLongField(env,sendtype,ompi_java.DatatypehandleID)) ; + MPI_Datatype mpi_rtype = (MPI_Datatype) + ((*env)->GetLongField(env, recvtype, ompi_java.DatatypehandleID)) ; + + int sbaseType = (*env)->GetIntField(env, sendtype, ompi_java.DatatypebaseTypeID) ; + int rbaseType = (*env)->GetIntField(env, recvtype, ompi_java.DatatypebaseTypeID) ; + + void *sendptr, *recvptr ; + void *sbufbase, *rbufbase ; + + ompi_java_clearFreeList(env) ; + + rcount = (*env)->GetIntArrayElements(env, recvcount, &isCopy); + dps = (*env)->GetIntArrayElements(env, displs, &isCopy); + + recvptr = ompi_java_getBufPtr(&rbufbase, env, recvbuf, rbaseType, recvoffset) ; + sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, sbaseType, sendoffset) ; + + MPI_Allgatherv(sendptr, sendcount, mpi_stype, + recvptr, (int*) rcount, (int*) dps, mpi_rtype, + mpi_comm) ; + + ompi_java_releaseBufPtr(env, sendbuf, sbufbase, sbaseType) ; + ompi_java_releaseBufPtr(env, recvbuf, rbufbase, rbaseType) ; + + (*env)->ReleaseIntArrayElements(env, recvcount, rcount, JNI_ABORT); + (*env)->ReleaseIntArrayElements(env, displs, dps, JNI_ABORT); +} + +/* + * Class: mpi_Intracomm + * Method: Alltoall + * Signature: + (Ljava/lang/Object;IILmpi/Datatype;Ljava/lang/Object;IILmpi/Datatype;)V +*/ +JNIEXPORT void JNICALL Java_mpi_Intracomm_alltoall(JNIEnv *env, jobject jthis, + jobject sendbuf, jint sendoffset, + jint sendcount, jobject sendtype, + jobject recvbuf, jint recvoffset, + jint recvcount, jobject recvtype) +{ + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + + MPI_Datatype mpi_stype = (MPI_Datatype) + ((*env)->GetLongField(env,sendtype,ompi_java.DatatypehandleID)) ; + MPI_Datatype mpi_rtype = (MPI_Datatype) + ((*env)->GetLongField(env, recvtype, ompi_java.DatatypehandleID)) ; + + int sbaseType = (*env)->GetIntField(env, sendtype, ompi_java.DatatypebaseTypeID) ; + int rbaseType = (*env)->GetIntField(env, recvtype, ompi_java.DatatypebaseTypeID) ; + + void *sendptr, *recvptr ; + void *sbufbase, *rbufbase ; + + ompi_java_clearFreeList(env) ; + + recvptr = ompi_java_getBufPtr(&rbufbase, env, recvbuf, rbaseType, recvoffset) ; + sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, sbaseType, sendoffset) ; + + MPI_Alltoall(sendptr, sendcount, mpi_stype, + recvptr, recvcount, mpi_rtype, mpi_comm) ; + + ompi_java_releaseBufPtr(env, sendbuf, sbufbase, sbaseType) ; + ompi_java_releaseBufPtr(env, recvbuf, rbufbase, rbaseType) ; +} + +/* + * Class: mpi_Intracomm + * Method: Alltoallv + * Signature: + (Ljava/lang/Object;II[ILmpi/Datatype;Ljava/lang/Object;I[I[ILmpi/Datatype;)V +*/ +JNIEXPORT void JNICALL Java_mpi_Intracomm_alltoallv(JNIEnv *env, jobject jthis, + jobject sendbuf, jint sendoffset, jintArray sendcount, + jintArray sdispls, jobject sendtype, + jobject recvbuf, jint recvoffset, jintArray recvcount, + jintArray rdispls, jobject recvtype) +{ + jint *rcount, *scount, *sdps, *rdps ; + jboolean isCopy ; + + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + + MPI_Datatype mpi_stype = (MPI_Datatype) + ((*env)->GetLongField(env,sendtype,ompi_java.DatatypehandleID)) ; + MPI_Datatype mpi_rtype = (MPI_Datatype) + ((*env)->GetLongField(env, recvtype, ompi_java.DatatypehandleID)) ; + + int sbaseType = (*env)->GetIntField(env, sendtype, ompi_java.DatatypebaseTypeID) ; + int rbaseType = (*env)->GetIntField(env, recvtype, ompi_java.DatatypebaseTypeID) ; + + void *sendptr, *recvptr ; + void *sbufbase, *rbufbase ; + + ompi_java_clearFreeList(env) ; + + scount=(*env)->GetIntArrayElements(env,sendcount,&isCopy); + rcount=(*env)->GetIntArrayElements(env,recvcount,&isCopy); + sdps=(*env)->GetIntArrayElements(env,sdispls,&isCopy); + rdps=(*env)->GetIntArrayElements(env,rdispls,&isCopy); + + recvptr = ompi_java_getBufPtr(&rbufbase, env, recvbuf, rbaseType, recvoffset) ; + sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, sbaseType, sendoffset) ; + + MPI_Alltoallv(sendptr, (int*) scount, (int*) sdps, mpi_stype, + recvptr, (int*) rcount, (int*) rdps, mpi_rtype, + mpi_comm) ; + + ompi_java_releaseBufPtr(env, sendbuf, sbufbase, sbaseType) ; + ompi_java_releaseBufPtr(env, recvbuf, rbufbase, rbaseType) ; + + (*env)->ReleaseIntArrayElements(env,recvcount,rcount,JNI_ABORT); + (*env)->ReleaseIntArrayElements(env,sendcount,scount,JNI_ABORT); + (*env)->ReleaseIntArrayElements(env,sdispls,sdps,JNI_ABORT); + (*env)->ReleaseIntArrayElements(env,rdispls,rdps,JNI_ABORT); +} + +/* + * Class: mpi_Intracomm + * Method: Reduce + * Signature: + (Ljava/lang/Object;ILjava/lang/Object;IILmpi/Datatype;Lmpi/Op;I)V +*/ +JNIEXPORT void JNICALL Java_mpi_Intracomm_reduce(JNIEnv *env, jobject jthis, + jobject sendbuf, jint sendoffset, + jobject recvbuf, jint recvoffset, + jint count, jobject type, jobject op, jint root) +{ + int id ; + + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + + MPI_Datatype mpi_type = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + void *sendptr, *recvptr = NULL; + void *sbufbase, *rbufbase ; + + ompi_java_clearFreeList(env) ; + + MPI_Comm_rank(mpi_comm, &id) ; + + if (id == root) { + recvptr = ompi_java_getBufPtr(&rbufbase, + env, recvbuf, baseType, recvoffset); + } + + sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, baseType, sendoffset) ; + + MPI_Reduce(sendptr, recvptr, count, mpi_type, + (MPI_Op)((*env)->GetLongField(env,op,ompi_java.OphandleID)), + root, mpi_comm) ; + + ompi_java_releaseBufPtr(env, sendbuf, sbufbase, baseType) ; + + if (id == root) { + ompi_java_releaseBufPtr(env, recvbuf, rbufbase, baseType); + } +} + +/* + * Class: mpi_Intracomm + * Method: Allreduce + * Signature: + (Ljava/lang/Object;ILjava/lang/Object;IILmpi/Datatype;Lmpi/Op;)V +*/ +JNIEXPORT void JNICALL Java_mpi_Intracomm_allreduce(JNIEnv *env, jobject jthis, + jobject sendbuf, jint sendoffset, + jobject recvbuf, jint recvoffset, + jint count, jobject type, jobject op) +{ + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + + MPI_Datatype mpi_type = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + void *sendptr, *recvptr ; + void *sbufbase, *rbufbase ; + + ompi_java_clearFreeList(env) ; + + recvptr = ompi_java_getBufPtr(&rbufbase, env, recvbuf, baseType, recvoffset) ; + sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, baseType, sendoffset) ; + + MPI_Allreduce(sendptr, recvptr, count, mpi_type, + (MPI_Op)((*env)->GetLongField(env,op,ompi_java.OphandleID)), + mpi_comm) ; + + ompi_java_releaseBufPtr(env, sendbuf, sbufbase, baseType) ; + ompi_java_releaseBufPtr(env, recvbuf, rbufbase, baseType) ; +} + +/* + * Class: mpi_Intracomm + * Method: Reduce_scatter + * Signature: + (Ljava/lang/Object;ILjava/lang/Object;I[ILmpi/Datatype;Lmpi/Op;)V +*/ +JNIEXPORT void JNICALL Java_mpi_Intracomm_reduce_1scatter(JNIEnv *env, + jobject jthis, + jobject sendbuf, jint sendoffset, + jobject recvbuf, jint recvoffset, + jintArray recvcount, + jobject type, jobject op) +{ + jint *rcount; + jboolean isCopy ; + + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + + MPI_Datatype mpi_type = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + void *sendptr, *recvptr ; + void *sbufbase, *rbufbase ; + + ompi_java_clearFreeList(env) ; + + rcount=(*env)->GetIntArrayElements(env,recvcount,&isCopy); + + recvptr = ompi_java_getBufPtr(&rbufbase, env, recvbuf, baseType, recvoffset) ; + sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, baseType, sendoffset) ; + + MPI_Reduce_scatter(sendptr, recvptr, (int*) rcount, mpi_type, + (MPI_Op)((*env)->GetLongField(env,op,ompi_java.OphandleID)), + mpi_comm) ; + + ompi_java_releaseBufPtr(env, sendbuf, sbufbase, baseType) ; + ompi_java_releaseBufPtr(env, recvbuf, rbufbase, baseType) ; + + (*env)->ReleaseIntArrayElements(env,recvcount,rcount,JNI_ABORT); +} + +/* + * Class: mpi_Intracomm + * Method: Reduce_local + * Signature: + (Ljava/lang/Object;ILjava/lang/Object;IILmpi/Datatype;Lmpi/Op;I)V +*/ +JNIEXPORT void JNICALL Java_mpi_Intracomm_reduce_1local(JNIEnv *env, jobject jthis, + jobject inbuf, jobject inoutbuf, + jint count, jobject type, + jobject op) +{ + MPI_Datatype mpi_type = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + void *inptr, *inoutptr = NULL; + void *inbase, *inoutbase ; + + ompi_java_clearFreeList(env) ; + + inptr = ompi_java_getBufPtr(&inbase, env, inbuf, baseType, 0) ; + inoutptr = ompi_java_getBufPtr(&inoutbase, env, inoutbuf, baseType, 0) ; + + MPI_Reduce_local(inptr, inoutptr, count, mpi_type, + (MPI_Op)((*env)->GetLongField(env,op,ompi_java.OphandleID))) ; + + ompi_java_releaseBufPtr(env, inbuf, inbase, baseType) ; + ompi_java_releaseBufPtr(env, inoutbuf, inoutbase, baseType) ; +} + +/* + * Class: mpi_Intracomm + * Method: Scan + * Signature: + (Ljava/lang/Object;ILjava/lang/Object;IILmpi/Datatype;Lmpi/Op;)V +*/ +JNIEXPORT void JNICALL Java_mpi_Intracomm_scan(JNIEnv *env, jobject jthis, + jobject sendbuf, jint sendoffset, + jobject recvbuf, jint recvoffset, + jint count, jobject type, jobject op) +{ + MPI_Comm mpi_comm = + (MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ; + + MPI_Datatype mpi_type = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + void *sendptr, *recvptr ; + void *sbufbase, *rbufbase ; + + ompi_java_clearFreeList(env) ; + + recvptr = ompi_java_getBufPtr(&rbufbase, env, recvbuf, baseType, recvoffset) ; + sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, baseType, sendoffset) ; + + MPI_Scan(sendptr, recvptr, count, mpi_type, + (MPI_Op)((*env)->GetLongField(env,op,ompi_java.OphandleID)), + mpi_comm) ; + + ompi_java_releaseBufPtr(env, sendbuf, sbufbase, baseType) ; + ompi_java_releaseBufPtr(env, recvbuf, rbufbase, baseType) ; +} + diff --git a/ompi/mpi/java/c/mpi_MPI.c b/ompi/mpi/java/c/mpi_MPI.c new file mode 100644 index 0000000000..21a9a98fd2 --- /dev/null +++ b/ompi/mpi/java/c/mpi_MPI.c @@ -0,0 +1,348 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : mpi_MPI.c + * Headerfile : mpi_MPI.h + * Author : SungHoon Ko, Xinying Li (contributions from MAEDA Atusi) + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.17 $ + * Updated : $Date: 2003/01/17 01:50:37 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ +#include "ompi_config.h" + +#include +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_STDLIB_H +#include +#endif +#ifdef HAVE_STRING_H +#include +#endif +#ifdef HAVE_TARGETCONDITIONALS_H +#include +#endif +#ifdef HAVE_SYS_STAT_H +#include +#endif + +#if OPAL_WANT_LIBLTDL + #ifndef __WINDOWS__ + #if OPAL_LIBLTDL_INTERNAL + #include "opal/libltdl/ltdl.h" + #else + #include "ltdl.h" + #endif + #else + #include "ltdl.h" + #endif +#endif + +#include "opal/util/output.h" + +#include "mpi.h" +#include "mpi_MPI.h" +#include "mpiJava.h" + +ompi_java_globals_t ompi_java; + +static int len = 0; +static char** sargs = 0; + + +/* + * Class: mpi_MPI + * Method: loadGlobalLibraries + * + * Java implementations typically default to loading dynamic + * libraries strictly to a local namespace. This breaks the + * Open MPI model where components reference back up to the + * base libraries (e.g., libmpi) as it requires that the + * symbols in those base libraries be globally available. + * + * One option, of course, is to build with --disable-dlopen. + * However, this would preclude the ability to pickup 3rd-party + * binary plug-ins at time of execution. This is a valuable + * capability that would be a negative factor towards use of + * the Java bindings. + * + * The other option is to explicitly dlopen libmpi ourselves + * and instruct dlopen to add all those symbols to the global + * namespace. This must be done prior to calling any MPI + * function (e.g., MPI_Init) or else Java will have already + * loaded the library to the local namespace. So create a + * special JNI entry point that just loads the required libmpi + * to the global namespace and call it first (see MPI.java), + * thus making all symbols available to subsequent dlopen calls + * when opening OMPI components. + */ +JNIEXPORT jboolean JNICALL Java_mpi_MPI_loadGlobalLibraries(JNIEnv *env, jclass obj) +{ +#if OPAL_WANT_LIBLTDL + lt_dladvise advise; + + if (lt_dlinit() != 0) { + return JNI_FALSE; + } + +#if OPAL_HAVE_LTDL_ADVISE + /* open the library into the global namespace */ + if (lt_dladvise_init(&advise)) { + return JNI_FALSE; + } + + if (lt_dladvise_ext(&advise)) { + lt_dladvise_destroy(&advise); + return JNI_FALSE; + } + + if (lt_dladvise_global(&advise)) { + lt_dladvise_destroy(&advise); + return JNI_FALSE; + } + + /* we don't care about the return value + * on dlopen - it might return an error + * because the lib is already loaded, + * depending on the way we were built + */ + lt_dlopenadvise("libmpi", advise); + lt_dladvise_destroy(&advise); + + return JNI_TRUE; +#endif + /* need to balance the ltdl inits */ + lt_dlexit(); + /* if we don't have advise, then we are hosed */ + return JNI_FALSE; +#endif + /* if dlopen was disabled, then all symbols + * should have been pulled up into the libraries, + * so we don't need to do anything as the symbols + * are already available + */ + return JNI_TRUE; +} + +/* + * Class: mpi_MPI + * Method: InitNative + * Signature: ([Ljava/lang/String;)[Ljava/lang/String; + */ +JNIEXPORT jobjectArray JNICALL Java_mpi_MPI_InitNative(JNIEnv *env, jclass obj, jobjectArray argv) +{ + jsize i; + jstring jc; + jclass string; + jobject value; + + len = (*env)->GetArrayLength(env,argv); + sargs = (char**)calloc(len+1, sizeof(char*)); + for (i=0; iGetObjectArrayElement(env,argv,i); + sargs[i] = (char*)calloc(strlen((*env)->GetStringUTFChars(env,jc,0)) + 1, + sizeof(char)); + strcpy(sargs[i],(*env)->GetStringUTFChars(env,jc,0)); + } + + MPI_Init(&len, &sargs); + + string = (*env)->FindClass(env, "java/lang/String"); + value = (*env)->NewObjectArray(env, len, string, NULL); + for (i = 0; i < len; i++) { + jc = (*env)->NewStringUTF(env, sargs[i]); + (*env)->SetObjectArrayElement(env, value, i, jc); + } + + ompi_java_init_native_Datatype() ; + + return value; +} + +/* + * Class: mpi_MPI + * Method: Finalize + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_MPI_Finalize(JNIEnv *env, jclass obj) +{ + ompi_java_clearFreeList(env) ; + +#if OPAL_WANT_LIBLTDL + /* need to balance the ltdl inits */ + lt_dlexit(); +#endif + + MPI_Finalize(); +} + +/* + * Class: mpi_MPI + * Method: Get_processor_name + * Signature: ([B)I + */ +JNIEXPORT jint JNICALL Java_mpi_MPI_Get_1processor_1name(JNIEnv *env, jclass obj, jbyteArray buf) +{ + int len; + jboolean isCopy; + jbyte* bufc = (jbyte*)((*env)->GetByteArrayElements(env,buf,&isCopy)) ; + + ompi_java_clearFreeList(env) ; + + MPI_Get_processor_name((char*)bufc, &len); + (*env)->ReleaseByteArrayElements(env,buf,bufc,0) ; + return len; +} + +/* + * Class: mpi_MPI + * Method: Wtime + * Signature: ()D + */ +JNIEXPORT jdouble JNICALL Java_mpi_MPI_Wtime(JNIEnv *env, jclass jthis) +{ + ompi_java_clearFreeList(env) ; + + return MPI_Wtime(); +} + +/* + * Class: mpi_MPI + * Method: Wtick + * Signature: ()D + */ +JNIEXPORT jdouble JNICALL Java_mpi_MPI_Wtick(JNIEnv *env, jclass jthis) +{ + ompi_java_clearFreeList(env) ; + + return MPI_Wtick(); +} + +/* + * Class: mpi_MPI + * Method: Initialized + * Signature: ()Z + */ +JNIEXPORT jboolean JNICALL Java_mpi_MPI_Initialized(JNIEnv *env, jclass jthis) +{ + int flag; + + ompi_java_clearFreeList(env) ; + + MPI_Initialized(&flag); + if (flag==0) { + return JNI_FALSE; + } else { + return JNI_TRUE; + } +} + +/* + * Class: mpi_MPI + * Method: Buffer_attach_native + * Signature: ([B)V + */ +JNIEXPORT void JNICALL Java_mpi_MPI_Buffer_1attach_1native(JNIEnv *env, jclass jthis, jbyteArray buf) +{ + jboolean isCopy; + + int size=(*env)->GetArrayLength(env,buf); + jbyte* bufptr = (*env)->GetByteArrayElements(env,buf,&isCopy) ; + + ompi_java_clearFreeList(env) ; + + MPI_Buffer_attach(bufptr,size); +} + +/* + * Class: mpi_MPI + * Method: Buffer_detach_native + * Signature: ([B)V + */ +JNIEXPORT void JNICALL Java_mpi_MPI_Buffer_1detach_1native(JNIEnv *env, jclass jthis, jbyteArray buf) +{ + /*jboolean isCopy;*/ + + int size; + /*char* bufptr ;*/ + jbyte* bufptr ; + + ompi_java_clearFreeList(env) ; + + MPI_Buffer_detach(&bufptr, &size); + + if (buf != NULL) { + (*env)->ReleaseByteArrayElements(env,buf,bufptr,0); + } +} + +/* + * Class: mpi_MPI + * Method: SetConstant + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_MPI_SetConstant(JNIEnv *env, jclass jthis) +{ + jfieldID anysourceID=(*env)->GetStaticFieldID(env,jthis,"ANY_SOURCE","I"); + jfieldID anytagID=(*env)->GetStaticFieldID(env,jthis,"ANY_TAG","I"); + jfieldID procnullID=(*env)->GetStaticFieldID(env,jthis,"PROC_NULL","I"); + jfieldID graphID=(*env)->GetStaticFieldID(env,jthis,"GRAPH","I"); + jfieldID cartID=(*env)->GetStaticFieldID(env,jthis,"CART","I"); + jfieldID bsendoverID=(*env)->GetStaticFieldID(env,jthis,"BSEND_OVERHEAD","I"); + jfieldID undefinedID=(*env)->GetStaticFieldID(env,jthis,"UNDEFINED","I"); + + jfieldID identID=(*env)->GetStaticFieldID(env,jthis,"IDENT","I"); + jfieldID congruentID=(*env)->GetStaticFieldID(env,jthis,"CONGRUENT","I"); + jfieldID similarID=(*env)->GetStaticFieldID(env,jthis,"SIMILAR","I"); + jfieldID unequalID=(*env)->GetStaticFieldID(env,jthis,"UNEQUAL","I"); + jfieldID tagubID=(*env)->GetStaticFieldID(env,jthis,"TAG_UB","I"); + jfieldID hostID=(*env)->GetStaticFieldID(env,jthis,"HOST","I"); + jfieldID ioID=(*env)->GetStaticFieldID(env,jthis,"IO","I"); + + (*env)->SetStaticIntField(env,jthis,anysourceID,MPI_ANY_SOURCE); + (*env)->SetStaticIntField(env,jthis,anytagID,MPI_ANY_TAG); + (*env)->SetStaticIntField(env,jthis,procnullID,MPI_PROC_NULL); + (*env)->SetStaticIntField(env,jthis,graphID,MPI_GRAPH); + (*env)->SetStaticIntField(env,jthis,cartID,MPI_CART); +#ifdef GC_DOES_PINNING + (*env)->SetStaticIntField(env,jthis,bsendoverID,MPI_BSEND_OVERHEAD); +#else + (*env)->SetStaticIntField(env,jthis,bsendoverID, + MPI_BSEND_OVERHEAD + sizeof(int)); +#endif /* GC_DOES_PINNING */ + + (*env)->SetStaticIntField(env,jthis,undefinedID,MPI_UNDEFINED); + + (*env)->SetStaticIntField(env,jthis,identID,MPI_IDENT); + (*env)->SetStaticIntField(env,jthis,congruentID,MPI_CONGRUENT); + (*env)->SetStaticIntField(env,jthis,similarID,MPI_SIMILAR); + (*env)->SetStaticIntField(env,jthis,unequalID,MPI_UNEQUAL); + (*env)->SetStaticIntField(env,jthis,tagubID,MPI_TAG_UB); + (*env)->SetStaticIntField(env,jthis,hostID,MPI_HOST); + (*env)->SetStaticIntField(env,jthis,ioID,MPI_IO); +} + +void ompi_java_clearFreeList(JNIEnv *env) +{ + jclass mpi ; + jmethodID clearID ; + + mpi = (*env)->FindClass(env, "mpi/MPI"); + clearID = (*env)->GetStaticMethodID(env, mpi, "clearFreeList", "()V"); + (*env)->CallStaticVoidMethod(env, mpi, clearID) ; +} + diff --git a/ompi/mpi/java/c/mpi_Op.c b/ompi/mpi/java/c/mpi_Op.c new file mode 100644 index 0000000000..29da0fbd92 --- /dev/null +++ b/ompi/mpi/java/c/mpi_Op.c @@ -0,0 +1,72 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + * File : mpi_Op.c + * Headerfile : mpi_Op.h + * Author : Xinying Li, Bryan Carpenter + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.7 $ + * Updated : $Date: 2003/01/16 16:39:34 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ +#include "ompi_config.h" + +#ifdef HAVE_TARGETCONDITIONALS_H +#include +#endif + +#include "mpi.h" +#include "mpi_Op.h" +#include "mpiJava.h" + +/* + * Class: mpi_Op + * Method: init + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_Op_init(JNIEnv *env, jclass thisClass) +{ + ompi_java.OphandleID=(*env)->GetFieldID(env,thisClass,"handle","J"); +} + +/* + * Class: mpi_Op + * Method: GetOp + * Signature: (I)J + */ +JNIEXPORT void JNICALL Java_mpi_Op_GetOp(JNIEnv *env, jobject jthis, jint type) +{ + static MPI_Op Ops[] = { + MPI_OP_NULL, MPI_MAX, MPI_MIN, MPI_SUM, + MPI_PROD, MPI_LAND, MPI_BAND, MPI_LOR, MPI_BOR, MPI_LXOR, + MPI_BXOR, MPI_MINLOC, MPI_MAXLOC + }; + (*env)->SetLongField(env,jthis, ompi_java.OphandleID, (jlong)Ops[type]); +} + +/* + * Class: mpi_Op + * Method: free + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_Op_free(JNIEnv *env, jobject jthis) +{ + MPI_Op op; + op=(MPI_Op)((*env)->GetLongField(env,jthis,ompi_java.OphandleID)); + if(op != MPI_OP_NULL) + MPI_Op_free(&op); +} + diff --git a/ompi/mpi/java/c/mpi_Request.c b/ompi/mpi/java/c/mpi_Request.c new file mode 100644 index 0000000000..8cbad6a787 --- /dev/null +++ b/ompi/mpi/java/c/mpi_Request.c @@ -0,0 +1,682 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : mpi_Request.c + * Headerfile : mpi_Request.h + * Author : Sung-Hoon Ko, Xinying Li, Bryan Carpenter + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.11 $ + * Updated : $Date: 2003/01/16 16:39:34 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +#include "ompi_config.h" + +#include +#ifdef HAVE_TARGETCONDITIONALS_H +#include +#endif + +#include "mpi.h" +#include "mpi_Request.h" +#include "mpiJava.h" + + +#ifndef GC_DOES_PINNING + +extern MPI_Datatype Dts[] ; + +#endif /* GC_DOES_PINNING */ + + +static void releaseBuf(int* elements, JNIEnv *env, + jobject req, MPI_Status* status) +{ + int opTag = (*env)->GetIntField(env, req, ompi_java.opTagID) ; + + switch(opTag) { + case 0 : { /* Request.OP_SEND */ + +#ifdef GC_DOES_PINNING + + jobject buf = (*env)->GetObjectField(env, req, ompi_java.bufSaveID) ; + int baseType = (*env)->GetIntField(env, req, ompi_java.baseTypeSaveID) ; + void* bufbase = + (void*) (*env)->GetLongField(env, req, ompi_java.bufbaseSaveID) ; + + ompi_java_releaseBufPtr(env, buf, bufbase, baseType) ; + + /* Try not to create too many local references... */ + (*env)->DeleteLocalRef(env, buf) ; + +#else + + void* bufptr = (void*) (*env)->GetLongField(env, req, ompi_java.bufptrSaveID) ; + + ompi_java_releaseMPIReadBuf(bufptr) ; + +#endif /* GC_DOES_PINNING */ + + break ; + } + case 1 : { /* Request.OP_RECV */ + + jobject buf = (*env)->GetObjectField(env, req, ompi_java.bufSaveID) ; + int baseType = (*env)->GetIntField(env, req, ompi_java.baseTypeSaveID) ; + +#ifdef GC_DOES_PINNING + + void* bufbase = + (void*) (*env)->GetLongField(env, req, ompi_java.bufbaseSaveID) ; + + ompi_java_releaseBufPtr(env, buf, bufbase, baseType) ; + +#else + + int offset = (*env)->GetIntField(env, req, ompi_java.offsetSaveID) ; + int count = (*env)->GetIntField(env, req, ompi_java.countSaveID) ; + + MPI_Comm mpi_comm = + (MPI_Comm) (*env)->GetLongField(env, req, ompi_java.commSaveID) ; + MPI_Datatype mpi_type = + (MPI_Datatype) (*env)->GetLongField(env, req, ompi_java.typeSaveID) ; + void* bufptr = + (void*) (*env)->GetLongField(env, req, ompi_java.bufptrSaveID) ; + + ompi_java_releaseMPIRecvBuf(elements, env, buf, offset, count, mpi_type, + mpi_comm, bufptr, status, baseType) ; + +#endif /* GC_DOES_PINNING */ + + /* Try not to create too many local references... */ + (*env)->DeleteLocalRef(env, buf) ; + } + } +} + + +/* + * Class: mpi_Request + * Method: init + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_Request_init(JNIEnv *env, jclass thisClass) +{ + ompi_java.reqhandleID = (*env)->GetFieldID(env, thisClass, "handle", "J") ; + + ompi_java.opTagID = (*env)->GetFieldID(env, thisClass, "opTag", "I") ; + + ompi_java.bufSaveID = (*env)->GetFieldID(env, thisClass, "bufSave", "Ljava/lang/Object;") ; + ompi_java.countSaveID = (*env)->GetFieldID(env, thisClass, "countSave", "I") ; + ompi_java.offsetSaveID = (*env)->GetFieldID(env, thisClass, "offsetSave", "I") ; + + ompi_java.baseTypeSaveID = (*env)->GetFieldID(env, thisClass, "baseTypeSave", "I") ; + ompi_java.bufbaseSaveID = (*env)->GetFieldID(env, thisClass, "bufbaseSave", "J") ; + ompi_java.bufptrSaveID = (*env)->GetFieldID(env, thisClass, "bufptrSave", "J") ; + ompi_java.commSaveID = (*env)->GetFieldID(env, thisClass, "commSave", "J") ; + ompi_java.typeSaveID = (*env)->GetFieldID(env, thisClass, "typeSave", "J") ; +} + +/* + * Class: mpi_Request + * Method: GetReq + * Signature: (I)V + */ +JNIEXPORT void JNICALL Java_mpi_Request_GetReq(JNIEnv *env, jobject jthis, jint type) +{ + switch (type) { + case 0: + (*env)->SetLongField(env,jthis,ompi_java.reqhandleID,(jlong)MPI_REQUEST_NULL); + break; + default: + break; + } +} + +/* + * Class: mpi_Request + * Method: Cancel + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_Request_Cancel(JNIEnv *env, jobject jthis) +{ + MPI_Request req; + + ompi_java_clearFreeList(env) ; + + req=(MPI_Request)((*env)->GetLongField(env,jthis,ompi_java.reqhandleID)); + MPI_Cancel(&req); +} + +/* + * Class: mpi_Request + * Method: Free + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_Request_Free(JNIEnv *env, jobject jthis) +{ + MPI_Request req; + + ompi_java_clearFreeList(env) ; + + req=(MPI_Request)((*env)->GetLongField(env,jthis,ompi_java.reqhandleID)); + MPI_Request_free(&req); +} + +/* + * Class: mpi_Request + * Method: Is_null + * Signature: ()Z + */ +JNIEXPORT jboolean JNICALL Java_mpi_Request_Is_1null(JNIEnv *env, jobject jthis) +{ + MPI_Request req; + + ompi_java_clearFreeList(env) ; + + req=(MPI_Request)((*env)->GetLongField(env,jthis,ompi_java.reqhandleID)); + if(req==MPI_REQUEST_NULL) + return JNI_TRUE; + else + return JNI_FALSE; +} + +/* + * Class: mpi_Request + * Method: Wait + * Signature: (Lmpi/Status;)Lmpi/Status; + */ +JNIEXPORT jobject JNICALL Java_mpi_Request_Wait(JNIEnv *env, jobject jthis, jobject stat) +{ + int elements ; + + MPI_Request req = + (MPI_Request)((*env)->GetLongField(env,jthis,ompi_java.reqhandleID)) ; + + MPI_Status *status = + (MPI_Status *)((*env)->GetLongField(env,stat,ompi_java.stathandleID)); + + MPI_Wait(&req, status); + + ompi_java_clearFreeList(env) ; + + releaseBuf(&elements, env, jthis, status) ; + + (*env)->SetLongField(env,jthis,ompi_java.reqhandleID,(jlong)req); + + (*env)->SetIntField(env,stat, ompi_java.sourceID, status->MPI_SOURCE); + (*env)->SetIntField(env,stat, ompi_java.tagID, status->MPI_TAG); + + (*env)->SetIntField(env, stat, ompi_java.elementsID, elements); + + return stat; +} + +/* + * Class: mpi_Request + * Method: Test + * Signature: (Lmpi/Status;)Lmpi/Status; + */ +JNIEXPORT jobject JNICALL Java_mpi_Request_Test(JNIEnv *env, jobject jthis, jobject stat) +{ + int flag; + MPI_Request req = (MPI_Request)((*env)->GetLongField(env,jthis,ompi_java.reqhandleID)); + MPI_Status *status = + (MPI_Status *)((*env)->GetLongField(env,stat,ompi_java.stathandleID)); + + ompi_java_clearFreeList(env) ; + + MPI_Test(&req, &flag, status); + + (*env)->SetLongField(env,jthis,ompi_java.reqhandleID,(jlong)req); + + if(flag) { + int elements ; + + releaseBuf(&elements, env, jthis, status) ; + + (*env)->SetIntField(env,stat, ompi_java.sourceID, status->MPI_SOURCE); + (*env)->SetIntField(env,stat, ompi_java.tagID, status->MPI_TAG); + + (*env)->SetIntField(env, stat, ompi_java.elementsID, elements); + + return stat; + } + else + return NULL; +} + +/* + * Class: mpi_Request + * Method: Waitany + * Signature: ([Lmpi/Request;Lmpi/Status;)Lmpi/Status; + */ +JNIEXPORT jobject JNICALL Java_mpi_Request_Waitany(JNIEnv *env, jclass jthis, + jobjectArray array_of_request, + jobject stat) +{ + int i, index, elements ; + int count=(*env)->GetArrayLength(env,array_of_request); + MPI_Request *reqs=(MPI_Request*)calloc(count, sizeof(MPI_Request)); + + jobject req ; + + MPI_Status *status = + (MPI_Status *)((*env)->GetLongField(env,stat,ompi_java.stathandleID)); + + ompi_java_clearFreeList(env) ; + + for(i=0; iGetLongField(env, + (*env)->GetObjectArrayElement(env,array_of_request,i), + ompi_java.reqhandleID)) ; + + MPI_Waitany(count, reqs, &index, status); + + for(i=0; iGetObjectArrayElement(env,array_of_request,i) ; + (*env)->SetLongField(env, reqi, ompi_java.reqhandleID, (jlong) reqs[i]) ; + if(i == index) req = reqi ; + } + + releaseBuf(&elements, env, req, status) ; + + (*env)->SetIntField(env,stat, ompi_java.sourceID, status->MPI_SOURCE); + (*env)->SetIntField(env,stat, ompi_java.tagID, status->MPI_TAG); + + (*env)->SetIntField(env,stat, ompi_java.indexID, index); + (*env)->SetIntField(env, stat, ompi_java.elementsID, elements); + + free(reqs); + + return stat; +} + +/* + * Class: mpi_Request + * Method: Testany + * Signature: ([Lmpi/Request;Lmpi/Status;)Lmpi/Status; + */ +JNIEXPORT jobject JNICALL Java_mpi_Request_Testany(JNIEnv *env, jclass jthis, + jobjectArray array_of_request, jobject stat) +{ + int i,flag,index; + int count=(*env)->GetArrayLength(env,array_of_request); + MPI_Request *reqs=(MPI_Request*)calloc(count, sizeof(MPI_Request)); + MPI_Status *status = + (MPI_Status *)((*env)->GetLongField(env,stat,ompi_java.stathandleID)); + + ompi_java_clearFreeList(env) ; + + for(i=0; iGetLongField(env, + (*env)->GetObjectArrayElement(env,array_of_request,i), + ompi_java.reqhandleID)); + + MPI_Testany(count, reqs, &index,&flag, status); + + for(i=0; iSetLongField(env, + (*env)->GetObjectArrayElement(env,array_of_request,i), + ompi_java.reqhandleID, (jlong) reqs[i]); + + free(reqs); + + if(flag) { + int elements ; + + jobject req = (*env)->GetObjectArrayElement(env, array_of_request, index) ; + + releaseBuf(&elements, env, req, status) ; + + (*env)->SetIntField(env,stat, ompi_java.sourceID, status->MPI_SOURCE); + (*env)->SetIntField(env,stat, ompi_java.tagID, status->MPI_TAG); + + (*env)->SetIntField(env,stat, ompi_java.indexID, index); + (*env)->SetIntField(env, stat, ompi_java.elementsID, elements); + + return stat; + } + else + return NULL; +} + +/* + * Class: mpi_Request + * Method: waitall + * Signature: ([Lmpi/Request;)[Lmpi/Status; + */ +JNIEXPORT jobjectArray JNICALL Java_mpi_Request_waitall(JNIEnv *env, jclass jthis, + jobjectArray array_of_request) +{ + int i; + int count=(*env)->GetArrayLength(env,array_of_request); + MPI_Request *reqs=(MPI_Request*)calloc(2 * count, sizeof(MPI_Request)); + MPI_Request *reqs_ini = reqs + count ; + MPI_Status *stas=(MPI_Status*)calloc(count, sizeof(MPI_Status)); + + jclass status_class = (*env)->FindClass(env,"mpi/Status"); + jobjectArray array_of_status = + (*env)->NewObjectArray(env,count,status_class,NULL); + + jmethodID handleConstructorID = + (*env)->GetMethodID(env, status_class, "", "()V"); + + ompi_java_clearFreeList(env) ; + + /* Copy initial native requests in Java `array_of_request' to `reqs'. */ + + for(i=0; iGetLongField(env, + (*env)->GetObjectArrayElement(env,array_of_request,i), + ompi_java.reqhandleID)); + reqs_ini [i] = reqs [i] ; + } + + MPI_Waitall(count, reqs, stas); + + for(i=0; iGetObjectArrayElement(env,array_of_request,i) ; + + jobject jstas = (*env)->NewObject(env,status_class,handleConstructorID); + + MPI_Status *status = + (MPI_Status *)((*env)->GetLongField(env,jstas,ompi_java.stathandleID)); + + /* Copy final native request to `array_of_request'. */ + (*env)->SetLongField(env, req, ompi_java.reqhandleID, (jlong) reqs[i]) ; + + /* Copy final native status to Java `array_of_status'... */ + *status = stas [i] ; + + releaseBuf(&elements, env, req, status) ; + + (*env)->SetIntField(env,jstas,ompi_java.sourceID,status->MPI_SOURCE); + (*env)->SetIntField(env,jstas,ompi_java.tagID,status->MPI_TAG); + + (*env)->SetIntField(env, jstas, ompi_java.elementsID, elements); + + (*env)->SetObjectArrayElement(env,array_of_status,i,jstas); + + /* Try not to create too many local references... */ + (*env)->DeleteLocalRef(env, req) ; + (*env)->DeleteLocalRef(env, jstas) ; + } + + free(reqs); + free(stas); + + return array_of_status; +} + + +/* + * Class: mpi_Request + * Method: testall + * Signature: ([Lmpi/Request;)[Lmpi/Status; + */ +JNIEXPORT jobjectArray JNICALL Java_mpi_Request_testall(JNIEnv *env, jclass jthis, + jobjectArray array_of_request) +{ + int i,flag; + int count=(*env)->GetArrayLength(env,array_of_request); + MPI_Request *reqs=(MPI_Request*)calloc(2 * count, sizeof(MPI_Request)); + MPI_Request *reqs_ini = reqs + count ; + MPI_Status *stas=(MPI_Status*)calloc(count, sizeof(MPI_Status)); + + jclass status_class = (*env)->FindClass(env,"mpi/Status"); + jobjectArray array_of_status = + (*env)->NewObjectArray(env,count,status_class,NULL); + + jmethodID handleConstructorID = + (*env)->GetMethodID(env, status_class, "", "()V"); + + ompi_java_clearFreeList(env) ; + + /* Copy initial native requests in Java `array_of_request' to `reqs'. */ + + for(i=0; iGetLongField(env, + (*env)->GetObjectArrayElement(env,array_of_request,i), + ompi_java.reqhandleID)) ; + reqs_ini [i] = reqs [i] ; + } + + MPI_Testall(count, reqs, &flag, stas); + + if(flag) + for(i=0; iGetObjectArrayElement(env,array_of_request,i) ; + + jobject jstas = (*env)->NewObject(env,status_class, + handleConstructorID); + MPI_Status *status = + (MPI_Status *)((*env)->GetLongField(env,jstas,ompi_java.stathandleID)); + + /* Copy final native request to `array_of_request'. */ + + (*env)->SetLongField(env, req, ompi_java.reqhandleID, (jlong) reqs[i]) ; + + /* Copy final native status to Java `array_of_status'... */ + + *status = stas [i] ; + + releaseBuf(&elements, env, req, status) ; + + (*env)->SetIntField(env,jstas,ompi_java.sourceID,status->MPI_SOURCE); + (*env)->SetIntField(env,jstas,ompi_java.tagID,status->MPI_TAG); + + (*env)->SetIntField(env, jstas, ompi_java.elementsID, elements); + + (*env)->SetObjectArrayElement(env,array_of_status,i,jstas); + + /* Try not to create too many local references... */ + + (*env)->DeleteLocalRef(env, req) ; + (*env)->DeleteLocalRef(env, jstas) ; + } + + free(reqs); + free(stas); + + if(flag) + return array_of_status ; + else + return NULL; +} + +/* + * Class: mpi_Request + * Method: waitsome + * Signature: ([Lmpi/Request;)[Lmpi/Status; + */ +JNIEXPORT jobjectArray JNICALL Java_mpi_Request_waitsome(JNIEnv *env, jclass jthis, + jobjectArray array_of_request) +{ + int i; + int incount=(*env)->GetArrayLength(env,array_of_request); + MPI_Request *reqs=(MPI_Request*)calloc(incount, sizeof(MPI_Request)); + MPI_Status *stas=(MPI_Status*)calloc(incount, sizeof(MPI_Status)); + int *array_of_indices=(int*)calloc(incount,sizeof(int)); + int outcount; + + jclass status_class = (*env)->FindClass(env,"mpi/Status"); + jobjectArray array_of_status; + + jmethodID handleConstructorID = + (*env)->GetMethodID(env, status_class, "", "()V"); + + ompi_java_clearFreeList(env) ; + + /* Copy initial native requests in Java `array_of_request' to `reqs'. */ + + for(i=0; iGetLongField(env, + (*env)->GetObjectArrayElement(env,array_of_request,i), + ompi_java.reqhandleID)); + + MPI_Waitsome(incount, reqs, &outcount, array_of_indices, stas); + + if(outcount!=MPI_UNDEFINED) { + array_of_status=(*env)->NewObjectArray(env,outcount,status_class,NULL); + + for(i=0; iGetObjectArrayElement(env,array_of_request, + index) ; + + jobject jstas = (*env)->NewObject(env,status_class,handleConstructorID); + MPI_Status *status = + (MPI_Status *)((*env)->GetLongField(env,jstas,ompi_java.stathandleID)); + + /* Copy final native request to `array_of_request'. */ + + (*env)->SetLongField(env, req, ompi_java.reqhandleID, (jlong) reqs[index]) ; + + /* Copy final native status to Java `array_of_status'... */ + + *status = stas [i] ; + + releaseBuf(&elements, env, req, status) ; + + (*env)->SetIntField(env,jstas,ompi_java.sourceID,status->MPI_SOURCE); + (*env)->SetIntField(env,jstas,ompi_java.tagID,status->MPI_TAG); + + (*env)->SetIntField(env,jstas,ompi_java.indexID, index); + (*env)->SetIntField(env, jstas, ompi_java.elementsID, elements); + + + (*env)->SetObjectArrayElement(env,array_of_status,i,jstas); + + /* Try not to create too many local references... */ + + (*env)->DeleteLocalRef(env, req) ; + (*env)->DeleteLocalRef(env, jstas) ; + } + } + + free(reqs); + free(stas); + free(array_of_indices); + + if(outcount==MPI_UNDEFINED) + return NULL; + else + return array_of_status; +} + +/* + * Class: mpi_Request + * Method: testsome + * Signature: ([Lmpi/Request;)[Lmpi/Status; + */ +JNIEXPORT jobjectArray JNICALL Java_mpi_Request_testsome(JNIEnv *env, jclass jthis, + jobjectArray array_of_request) +{ + int i; + int incount=(*env)->GetArrayLength(env,array_of_request); + MPI_Request *reqs=(MPI_Request*)calloc(incount, sizeof(MPI_Request)); + MPI_Status *stas=(MPI_Status*)calloc(incount, sizeof(MPI_Status)); + int *array_of_indices=(int*)calloc(incount,sizeof(int)); + int outcount; + + jclass status_class = (*env)->FindClass(env,"mpi/Status"); + jobjectArray array_of_status; + + jmethodID handleConstructorID = + (*env)->GetMethodID(env, status_class, "", "()V"); + + ompi_java_clearFreeList(env) ; + + for(i=0; iGetLongField(env, + (*env)->GetObjectArrayElement(env,array_of_request,i), + ompi_java.reqhandleID) ); + } + + MPI_Testsome(incount,reqs,&outcount,array_of_indices, stas); + + if(outcount!=MPI_UNDEFINED) { + array_of_status=(*env)->NewObjectArray(env,outcount,status_class,NULL); + + for(i=0; iGetObjectArrayElement(env,array_of_request, + index) ; + + jobject jstas = (*env)->NewObject(env,status_class,handleConstructorID); + MPI_Status *status = + (MPI_Status *)((*env)->GetLongField(env,jstas,ompi_java.stathandleID)); + + /* Copy final native request to `array_of_request'. + Release buffer elements... */ + + (*env)->SetLongField(env, req, ompi_java.reqhandleID, (jlong) reqs[index]) ; + + /* Copy final native status to Java `array_of_status'... */ + + *status = stas [i] ; + + releaseBuf(&elements, env, req, status) ; + + (*env)->SetIntField(env,jstas,ompi_java.sourceID,status->MPI_SOURCE); + (*env)->SetIntField(env,jstas,ompi_java.tagID,status->MPI_TAG); + + (*env)->SetIntField(env,jstas,ompi_java.indexID, index); + (*env)->SetIntField(env, jstas, ompi_java.elementsID, elements); + + (*env)->SetObjectArrayElement(env,array_of_status,i,jstas); + + /* Try not to create too many local references... */ + + (*env)->DeleteLocalRef(env, req) ; + (*env)->DeleteLocalRef(env, jstas) ; + } + } + + free(reqs); + free(stas); + free(array_of_indices); + + if(outcount==MPI_UNDEFINED) + return NULL; + else + return array_of_status; +} + +/* + * Things to do: + * + * `Free' should release the buffer, if an operation was in progress? + * + * Should be able to cache a global reference to `status_class'. + * Doesn't work for some reason. Why? + * + * Should be able to cache handleConstructorID in a static variable. + * Doesn't work with Linux IBM-JDK1.1.6. Why? + * + * `bufptr' currently unused---may be deleted. + */ + diff --git a/ompi/mpi/java/c/mpi_Status.c b/ompi/mpi/java/c/mpi_Status.c new file mode 100644 index 0000000000..180e62932e --- /dev/null +++ b/ompi/mpi/java/c/mpi_Status.c @@ -0,0 +1,258 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : mpi_Status.c + * Headerfile : mpi_Status.h + * Author : Sung-Hoon Ko, Xinying Li + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.9 $ + * Updated : $Date: 2003/01/16 16:39:34 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +#include "ompi_config.h" + +#include +#ifdef HAVE_TARGETCONDITIONALS_H +#include +#endif + +#include "mpi.h" +#include "mpi_Status.h" +#include "mpiJava.h" + + +/*jmethodID handleConstructorID ;*/ + +/* jclass status_class ; */ + +/* + * Class: mpi_Status + * Method: init + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_Status_init(JNIEnv *env, jclass jthis) +{ + ompi_java.stathandleID = (*env)->GetFieldID(env,jthis,"handle","J"); + + ompi_java.sourceID = (*env)->GetFieldID(env,jthis,"source","I"); + ompi_java.tagID = (*env)->GetFieldID(env,jthis,"tag","I"); + ompi_java.indexID = (*env)->GetFieldID(env,jthis,"index","I"); + ompi_java.elementsID = (*env)->GetFieldID(env,jthis,"elements","I"); + + /* handleConstructorID = (*env)->GetMethodID(env, jthis, "", "()V");*/ + + /* status_class = (*env)->NewGlobalRef(env, jthis) ; */ +} + +/* + * Class: mpi_Status + * Method: alloc + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_Status_alloc(JNIEnv *env, jobject jthis) +{ + MPI_Status *status = (MPI_Status*) malloc(sizeof(MPI_Status)); + + (*env)->SetLongField(env, jthis, ompi_java.stathandleID, (jlong)status); +} + +/* + * Class: mpi_Status + * Method: free + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_mpi_Status_free(JNIEnv *env, jobject jthis) +{ + MPI_Status *status = + (MPI_Status *)((*env)->GetLongField(env,jthis,ompi_java.stathandleID)); + free(status) ; +} + +/* + * Class: mpi_Status + * Method: get_count + * Signature: (Lmpi/Datatype;)I + */ +JNIEXPORT jint JNICALL Java_mpi_Status_get_1count(JNIEnv *env, jobject jthis, + jobject type) +{ + int count; + + MPI_Datatype datatype = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + MPI_Status *stat = + (MPI_Status*)((*env)->GetLongField(env,jthis,ompi_java.stathandleID)); + +#ifdef GC_DOES_PINNING + + ompi_java_clearFreeList(env) ; + + MPI_Get_count(stat, datatype, &count) ; + return count; + +#else + + int elements = (*env)->GetIntField(env, jthis, ompi_java.elementsID) ; + + int dt_size; + + ompi_java_clearFreeList(env) ; + + MPI_Type_size(datatype, &dt_size) ; + + if (elements != -1) { + count = elements / dt_size ; /* Cached at start of send buffer. */ + + if (count * dt_size == elements) { + return count ; + } else { + return MPI_UNDEFINED; + } + } + else { + /* Status object returned by IPROBE or PROBE. + * + * Didn't have access to data buffer to find `elements' value, + * so only way to find `count' is to invert `MPI_PACK_SIZE'. + */ + + int bsize, bsizeTrial ; + MPI_Get_count(stat, MPI_BYTE, &bsize) ; + + bsize -= sizeof(int) ; + + count = bsize / dt_size ; + MPI_Pack_size(count, datatype, MPI_COMM_WORLD, &bsizeTrial) ; + /* Strictly, we should use the communicator the message was + * received on, but I'm too lazy to cache it. + */ + + while(bsizeTrial > bsize) { + count-- ; + MPI_Pack_size(count, datatype, MPI_COMM_WORLD, &bsizeTrial) ; + } + + if (bsizeTrial == bsize) { + return count ; + } else { + return MPI_UNDEFINED; + } + } + +#endif /* GC_DOES_PINNING */ +} + +/* + * Class: mpi_Status + * Method: Test_cancelled + * Signature: ()Z + */ +JNIEXPORT jboolean JNICALL Java_mpi_Status_Test_1cancelled(JNIEnv *env, jobject jthis) +{ + int flag; + MPI_Status *stat; /*shko*/ + + ompi_java_clearFreeList(env) ; + + stat=(MPI_Status *)((*env)->GetLongField(env,jthis,ompi_java.stathandleID));/*shko*/ + + MPI_Test_cancelled(stat, &flag); + if (flag==0) { + return JNI_FALSE; + } else { + return JNI_TRUE; + } +} + +/* + * Class: mpi_Status + * Method: get_elements + * Signature: (Lmpi/Datatype;)I + */ +JNIEXPORT jint JNICALL Java_mpi_Status_get_1elements(JNIEnv *env, + jobject jthis, jobject type) +{ + int count; + + MPI_Datatype datatype = + (MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ; + + MPI_Status *stat = + (MPI_Status*)((*env)->GetLongField(env,jthis,ompi_java.stathandleID)); + +#ifdef GC_DOES_PINNING + + ompi_java_clearFreeList(env) ; + + MPI_Get_elements(stat, datatype, &count) ; + return count; + +#else + + int elements = (*env)->GetIntField(env, jthis, ompi_java.elementsID) ; + int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ; + + int dt_size = ompi_java.dt_sizes[baseType] ; + + ompi_java_clearFreeList(env) ; + + if(elements != -1) { + count = elements / dt_size ; + + if(count * dt_size == elements) + return count ; + else + return MPI_UNDEFINED ; + /* Can only happen if illegal base type mismatch between + * sender and receiver? + */ + } + else { + /* Status object returned by IPROBE or PROBE. + * + * Didn't have access to data buffer to find `elements' value, + * so only way to find `count' is to invert `MPI_PACK_SIZE'. + */ + + int bsize, bsizeTrial ; + MPI_Get_count(stat, MPI_BYTE, &bsize) ; + + bsize -= sizeof(int) ; + + count = bsize / dt_size ; + MPI_Pack_size(count, datatype, MPI_COMM_WORLD, &bsizeTrial) ; + /* Strictly, we should use the communicator the message was + * received on, but I'm too lazy to cache it. + */ + + while(bsizeTrial > bsize) { + count-- ; + MPI_Pack_size(count, datatype, MPI_COMM_WORLD, &bsizeTrial) ; + } + + if(bsizeTrial == bsize) + return count ; + else + return MPI_UNDEFINED ; + /* Can only happen if illegal base type mismatch between + * sender and receiver? + */ + } + +#endif /* GC_DOES_PINNING */ +} + diff --git a/ompi/mpi/java/c/mpijava.exp b/ompi/mpi/java/c/mpijava.exp new file mode 100644 index 0000000000..6e5249a32a --- /dev/null +++ b/ompi/mpi/java/c/mpijava.exp @@ -0,0 +1,125 @@ +Java_mpi_Cartcomm_Get +Java_mpi_Cartcomm_Rank +Java_mpi_Cartcomm_Coords +Java_mpi_Cartcomm_Shift +Java_mpi_Cartcomm_sub +Java_mpi_Cartcomm_Map +Java_mpi_Cartcomm_Dims_1create +Java_mpi_Comm_GetComm +Java_mpi_Comm_dup +Java_mpi_Comm_Size +Java_mpi_Comm_Rank +Java_mpi_Comm_Compare +Java_mpi_Comm_Free +Java_mpi_Comm_Is_1null +Java_mpi_Comm_group +Java_mpi_Comm_Test_1inter +Java_mpi_Comm_GetIntercomm +Java_mpi_Comm_send +Java_mpi_Comm_Recv +Java_mpi_Comm_Sendrecv +Java_mpi_Comm_Sendrecv_1replace +Java_mpi_Comm_bsend +Java_mpi_Comm_ssend +Java_mpi_Comm_rsend +Java_mpi_Comm_Isend +Java_mpi_Comm_Ibsend +Java_mpi_Comm_Issend +Java_mpi_Comm_Irsend +Java_mpi_Comm_Irecv +Java_mpi_Comm_pack +Java_mpi_Comm_unpack +Java_mpi_Comm_Pack_1size +Java_mpi_Comm_Iprobe +Java_mpi_Comm_Probe +Java_mpi_Comm_Attr_1get +Java_mpi_Comm_Topo_1test +Java_mpi_Comm_Abort +Java_mpi_Comm_Errhandler_1set +Java_mpi_Comm_errorhandler_1get +Java_mpi_Comm_init +Java_mpi_Datatype_init +Java_mpi_Datatype_GetDatatype +Java_mpi_Datatype_GetContiguous +Java_mpi_Datatype_GetVector +Java_mpi_Datatype_GetHvector +Java_mpi_Datatype_GetIndexed +Java_mpi_Datatype_GetHindexed +Java_mpi_Datatype_GetStruct +Java_mpi_Datatype_extent +Java_mpi_Datatype_size +Java_mpi_Datatype_lB +Java_mpi_Datatype_uB +Java_mpi_Datatype_commit +Java_mpi_Datatype_free +Java_mpi_Errhandler_init +Java_mpi_Errhandler_GetErrhandler +Java_mpi_Graphcomm_Get +Java_mpi_Graphcomm_Neighbours +Java_mpi_Graphcomm_Map +Java_mpi_Group_init +Java_mpi_Group_GetGroup +Java_mpi_Group_Size +Java_mpi_Group_Rank +Java_mpi_Group_free +Java_mpi_Group_Translate_1ranks +Java_mpi_Group_Compare +Java_mpi_Group_union +Java_mpi_Group_intersection +Java_mpi_Group_difference +Java_mpi_Group_incl +Java_mpi_Group_excl +Java_mpi_Group_range_1incl +Java_mpi_Group_range_1excl +Java_mpi_Intercomm_Remote_1size +Java_mpi_Intercomm_remote_1group +Java_mpi_Intercomm_merge +Java_mpi_Intracomm_split +Java_mpi_Intracomm_creat +Java_mpi_Intracomm_Barrier +Java_mpi_Intracomm_bcast +Java_mpi_Intracomm_gather +Java_mpi_Intracomm_gatherv +Java_mpi_Intracomm_scatter +Java_mpi_Intracomm_scatterv +Java_mpi_Intracomm_allgather +Java_mpi_Intracomm_allgatherv +Java_mpi_Intracomm_alltoall +Java_mpi_Intracomm_alltoallv +Java_mpi_Intracomm_reduce +Java_mpi_Intracomm_allreduce +Java_mpi_Intracomm_reduce_1scatter +Java_mpi_Intracomm_scan +Java_mpi_Intracomm_GetCart +Java_mpi_Intracomm_GetGraph +Java_mpi_MPI_InitNative +Java_mpi_MPI_SetConstant +Java_mpi_MPI_Finalize +Java_mpi_MPI_Wtime +Java_mpi_MPI_Wtick +Java_mpi_MPI_Get_1processor_1name +Java_mpi_MPI_Initialized +Java_mpi_MPI_Buffer_1attach_1native +Java_mpi_MPI_Buffer_1detach_1native +Java_mpi_Op_init +Java_mpi_Op_GetOp +Java_mpi_Op_free +Java_mpi_Request_init +Java_mpi_Request_GetReq +Java_mpi_Request_Free +Java_mpi_Request_Cancel +Java_mpi_Request_Is_1null +Java_mpi_Request_Wait +Java_mpi_Request_Test +Java_mpi_Request_Waitany +Java_mpi_Request_Testany +Java_mpi_Request_waitall +Java_mpi_Request_testall +Java_mpi_Request_waitsome +Java_mpi_Request_testsome +Java_mpi_Status_alloc +Java_mpi_Status_free +Java_mpi_Status_get_1count +Java_mpi_Status_Test_1cancelled +Java_mpi_Status_get_1elements +Java_mpi_Status_init diff --git a/ompi/mpi/java/c/savesignals.exp b/ompi/mpi/java/c/savesignals.exp new file mode 100644 index 0000000000..fbf5f367db --- /dev/null +++ b/ompi/mpi/java/c/savesignals.exp @@ -0,0 +1,2 @@ +Java_mpi_MPI_saveSignalHandlers +Java_mpi_MPI_restoreSignalHandlers \ No newline at end of file diff --git a/ompi/mpi/java/java/CartParms.java b/ompi/mpi/java/java/CartParms.java new file mode 100644 index 0000000000..1ef790bc7b --- /dev/null +++ b/ompi/mpi/java/java/CartParms.java @@ -0,0 +1,31 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : CartParms.java + * Author : Xinying Li + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.1 $ + * Updated : $Date: 1998/08/26 18:49:50 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +package mpi; + +public class CartParms { + public int [] dims; + public boolean [] periods; + public int [] coords; +} + diff --git a/ompi/mpi/java/java/Cartcomm.java b/ompi/mpi/java/java/Cartcomm.java new file mode 100644 index 0000000000..ffe504437c --- /dev/null +++ b/ompi/mpi/java/java/Cartcomm.java @@ -0,0 +1,160 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : Cartcomm.java + * Author : Xinying Li + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.7 $ + * Updated : $Date: 2001/10/22 21:07:55 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +package mpi; + +public class Cartcomm extends Intracomm { + + protected Cartcomm(long handle) throws MPIException { + super(handle) ; + } + + public Object clone() { + try { + return new Cartcomm(super.dup()) ; + } + catch (MPIException e) { + throw new RuntimeException(e.getMessage()) ; + } + } + + /** + * Returns Cartesian topology information. + *

+ * + * + *
returns: object containing dimensions, + * periods and local coordinates
+ *

+ * Java binding of the MPI operations MPI_CARTDIM_GET and + * MPI_CART_GET. + *

+ * The number of dimensions can be obtained from the size of (eg) + * dims field of the returned object. + */ + + public native CartParms Get() throws MPIException ; + + /** + * Translate logical process coordinates to process rank. + *

+ * + * + * + *
coords Cartesian coordinates of a + * process
returns: rank of the specified process
+ *

+ * Java binding of the MPI operation MPI_CART_RANK. + */ + + public native int Rank(int[] coords) throws MPIException ; + + /** + * Translate process rank to logical process coordinates. + *

+ * + * + * + *
rank rank of a process
returns: Cartesian coordinates of the + * specified process
+ *

+ * Java binding of the MPI operation MPI_CART_COORDS. + */ + + public native int [] Coords(int rank) throws MPIException ; + + /** + * Compute source and destination ranks for ``shift'' communication. + *

+ * + * + * + * + *
direction coordinate dimension of shift
disp displacement
returns: object containing ranks of source + * and destination processes
+ *

+ * Java binding of the MPI operation MPI_CART_SHIFT. + */ + + public native ShiftParms Shift(int direction, int disp) throws MPIException ; + + /** + * Partition Cartesian communicator into subgroups of lower dimension. + *

+ * + * + * + *
remain_dims by dimension, true if + * dimension is to be kept, + * false otherwise
returns: communicator containing subgrid + * including this process
+ *

+ * Java binding of the MPI operation MPI_CART_SUB. + */ + + public Cartcomm Sub(boolean [] remain_dims) throws MPIException { + return new Cartcomm(sub(remain_dims)) ; + } + + private native long sub(boolean [] remain_dims); + + /** + * Compute an optimal placement. + *

+ * + * + * + * + *
dims the number of processes in each + * dimension
periods true if grid is periodic, + * false if not, in each + * dimension
returns: reordered rank of calling + * process
+ *

+ * Java binding of the MPI operation MPI_CART_MAP. + *

+ * The number of dimensions is taken to be size of the dims argument. + */ + + public native int Map(int [] dims, boolean [] periods) throws MPIException ; + + /** + * Select a balanced distribution of processes per coordinate direction. + *

+ * + * + * + * + *
nnodes number of nodes in a grid
ndims number of dimensions of grid
dims array specifying the number of nodes + * in each dimension
+ *

+ * Java binding of the MPI operation MPI_DIMS_CREATE. + *

+ * Size dims should be ndims. Note that + * dims is an inout parameter. + */ + + static public native void Dims_create(int nnodes, int[] dims) + throws MPIException ; +} + diff --git a/ompi/mpi/java/java/Comm.java b/ompi/mpi/java/java/Comm.java new file mode 100644 index 0000000000..9485b05f2a --- /dev/null +++ b/ompi/mpi/java/java/Comm.java @@ -0,0 +1,1379 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : Comm.java + * Author : Sang Lim, Sung-Hoon Ko, Xinying Li, Bryan Carpenter + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.20 $ + * Updated : $Date: 2001/08/07 16:36:25 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +package mpi; +import java.io.*; +import java.lang.*; + +public class Comm { + protected final static int SELF = 1; + protected final static int WORLD = 2; + + protected static long nullHandle ; + + Comm() {} + + void setType(int Type) { + GetComm(Type); + } + + private native void GetComm(int Type); + + protected Comm(long handle) { + this.handle = handle; + } + + /** + * Duplicate this communicator. + *

+ * + * + *
returns: copy of this communicator
+ *

+ * Java binding of the MPI operation MPI_COMM_DUP. + *

+ * The new communicator is ``congruent'' to the old one, but has a + * different context. + */ + + public Object clone() { + return new Comm(dup()); + } + + protected native long dup(); + + /** + * Size of group of this communicator. + *

+ * + * + *
returns: number of processors in the group + * of this communicator
+ *

+ * Java binding of the MPI operation MPI_COMM_SIZE. + */ + + public native int Size() throws MPIException ; + + /** + * Rank of this process in group of this communicator. + *

+ * + * + *
returns: rank of the calling process in the + * group of this communicator
+ *

+ * Java binding of the MPI operation MPI_COMM_RANK. + */ + + public native int Rank() throws MPIException ; + + /** + * Compare two communicators. + *

+ * + * + * + * + *
comm1 first communicator
comm2 second communicator
returns: result
+ *

+ * Java binding of the MPI operation MPI_COMM_COMPARE. + *

+ * MPI.IDENT results if the comm1 and comm2 + * are references to the same object (ie, if comm1 == comm2). + * MPI.CONGRUENT results if the underlying groups are identical + * but the communicators differ by context. + * MPI.SIMILAR results if the underlying groups are similar + * but the communicators differ by context. + * MPI.UNEQUAL results otherwise. + */ + + public static native int Compare(Comm comm1, Comm comm2) throws MPIException ; + + /** + * Destroy this communicator. + *

+ * Java binding of the MPI operation MPI_COMM_FREE. + */ + + public native void Free() throws MPIException ; + + /** + * Test if communicator object is void (has been freed). + *

+ * + * + *
returns: true if the comm object is void, + * false otherwise
+ */ + + public native boolean Is_null(); + + /** + * Return group associated with a communicator. + *

+ * + * + *
returns: group corresponding to this + * communicator group
+ *

+ * Java binding of the MPI operation MPI_COMM_GROUP. + */ + + public Group Group() throws MPIException { + return new Group(group()); + } + + private native long group(); + + // Inter-communication + + /** + * Test if this communicator is an inter-communicator. + *

+ * + * + *
returns: true if this is an + * inter-communicator, + * false otherwise
+ *

+ * Java binding of the MPI operation MPI_COMM_TEST_INTER. + */ + + public native boolean Test_inter() throws MPIException ; + + /** + * Create an inter-communicator. + *

+ * + * + * + * + * + * + *
local_comm local intra-communicator
local_leader rank of local group leader + * in localComm
remote_leader rank of remote group leader + * in this communictor
tag ``safe'' tag
returns: new inter-communicator
+ *

+ * Java binding of the MPI operation MPI_INTERCOMM_CREATE. + *

+ * (This operation is defined as a method on the ``peer communicator'', + * making it analogous to a send or recv communication + * with the remote group leader.) + */ + + public Intercomm Create_intercomm(Comm local_comm, + int local_leader, + int remote_leader, + int tag) throws MPIException { + return new Intercomm(GetIntercomm(local_comm, local_leader, + remote_leader, tag)) ; + } + + public native long GetIntercomm(Comm local_comm, + int local_leader, + int remote_leader, + int tag) ; + + + // Object serialization support + + public byte[] Object_Serialize(Object buf, + int offset, + int count, + Datatype type) throws MPIException { + if(type.Size() != 0) { + byte[] byte_buf ; + Object buf_els [] = (Object[])buf; + try { + ByteArrayOutputStream o = new ByteArrayOutputStream(); + ObjectOutputStream out = new ObjectOutputStream(o); + int base; + for (int i = 0; i < count; i++){ + base = type.Extent() * i; + for (int j = 0 ; j < type.displacements.length ; j++) + out.writeObject(buf_els[base + offset + + type.displacements[j]]); + } + + out.flush(); + out.close(); + byte_buf = o.toByteArray(); + + } catch(Exception ex){ + ex.printStackTrace(); + byte_buf = null ; + } + return byte_buf ; + } + else return new byte[0]; + } + + public void Object_Deserialize(Object buf, + byte[] byte_buf, + int offset, + int count, + Datatype type) throws MPIException { + + if(type.Size() != 0) { + Object buf_els [] = (Object[])buf; + + try { + ByteArrayInputStream in = new ByteArrayInputStream(byte_buf); + ObjectInputStream s = new ObjectInputStream(in); + int base; + for (int i = 0; i < count; i++){ + base = type.Extent() * i; + for (int j = 0 ; j < type.displacements.length ; j++) + buf_els[base + offset + type.displacements[j]]=s.readObject(); + } + s.close(); + }catch(Exception ex){ex.printStackTrace();} + } + } + + + // Blocking Send and Recv + + /** + * Blocking send operation. + *

+ * + * + * + * + * + * + * + *
buf send buffer array
offset initial offset in send buffer
count number of items to send
datatype datatype of each item in send + * buffer
dest rank of destination
tag message tag
+ *

+ * Java binding of the MPI operation MPI_SEND. + *

+ * The actual argument associated with buf must be + * one-dimensional array. The value offset is a subscript in + * this array, defining the position of the first item of the message. + *

+ * If the datatype argument represents an MPI basic type, its + * value must agree with the element type of buf---either + * a primitive type or a reference (object) type. If the + * datatype argument represents an MPI derived type, its + * base type must agree with the element type of buf + */ + + public void Send(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag) throws MPIException { + + if (type.isObject()){ + byte[] byte_buf = Object_Serialize(buf,offset,count,type); + + int[] length_buf = {byte_buf.length, count} ; + + send(length_buf, 0, 2, MPI.INT, dest, tag); // header + + send(byte_buf, 0, byte_buf.length, MPI.BYTE,dest, tag) ; + } + else { + send(buf, offset, count, type, dest, tag); + } + } + + private native void send(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag); + + /** + * Blocking receive operation. + *

+ * + * + * + * + * + * + * + * + *
buf receive buffer array
offset initial offset in receive buffer
count number of items in receive + * buffer
datatype datatype of each item in receive + * buffer
source rank of source
tag message tag
returns: status object
+ *

+ * Java binding of the MPI operation MPI_RECV. + *

+ * The actual argument associated with buf must be + * one-dimensional array. The value offset is a subscript in + * this array, defining the position into which the first item of the + * incoming message will be copied. + *

+ * If the datatype argument represents an MPI basic type, its + * value must agree with the element type of buf---either + * a primitive type or a reference (object) type. If the + * datatype argument represents an MPI derived type, its + * base type must agree with the element type of buf + */ + + public Status Recv(Object buf, + int offset, + int count, + Datatype type, + int source, + int tag) throws MPIException { + + if (type.isObject()){ + Status status = new Status(); + + int[] length_buf= new int[2]; + Recv(length_buf,0,2, MPI.INT, source, tag, status); + + byte[] byte_buf = new byte[length_buf[0]]; + Recv(byte_buf,0,length_buf[0], MPI.BYTE, status.source, tag, status); + + Object_Deserialize(buf,byte_buf,offset,length_buf[1],type); + + status.object_count = length_buf[1]; + return status; + } + else + return Recv(buf, offset, count, type, source, tag, new Status()); + } + + private native Status Recv(Object buf, + int offset, + int count, + Datatype type, + int source, + int tag, + Status stat); + + // Send-Recv + + /** + * Execute a blocking send and receive operation. + *

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
sendbuf send buffer array
sendoffset initial offset in send + * buffer
sendcount number of items to send
sendtype datatype of each item in send + * buffer
dest rank of destination
sendtag send tag
recvbuf receive buffer array
recvoffset initial offset in receive + * buffer
recvcount number of items in receive + * buffer
recvtype datatype of each item in receive + * buffer
source rank of source
recvtag receive tag
returns: status object
+ *

+ * Java binding of the MPI operation MPI_SENDRECV. + *

+ * Further comments as for Send and Recv. + */ + + public Status Sendrecv(Object sendbuf, + int sendoffset, + int sendcount, + Datatype sendtype, + int dest, + int sendtag, + Object recvbuf, + int recvoffset, + int recvcount, + Datatype recvtype, + int source, + int recvtag) throws MPIException { + + if(sendtype.isObject() || recvtype.isObject()) { + Request reqs [] = {Isend(sendbuf, sendoffset, sendcount, sendtype, + dest, sendtag), + Irecv(recvbuf, recvoffset, recvcount, recvtype, + source, recvtag)} ; + + Status stas [] = Request.Waitall(reqs) ; + + return stas [1] ; + } + else return Sendrecv(sendbuf, sendoffset, + sendcount, sendtype, + dest, sendtag, + recvbuf, recvoffset, + recvcount, recvtype, + source, recvtag, + new Status()); + } + + private native Status Sendrecv(Object sbuf, + int soffset, + int scount, + Datatype stype, + int dest, + int stag, + Object rbuf, + int roffset, + int rcount, + Datatype rtype, + int source, + int rtag, + Status stat); + + /** + * Execute a blocking send and receive operation, receiving message + * into send buffer. + *

+ * + * + * + * + * + * + * + * + * + * + *
buf buffer array
offset initial offset in buffer
count number of items to send
type datatype of each item in + * buffer
dest rank of destination
sendtag send tag
source rank of source
recvtag receive tag
returns: status object
+ *

+ * Java binding of the MPI operation MPI_SENDRECV_REPLACE. + *

+ * Further comments as for Send and Recv. + */ + + public Status Sendrecv_replace(Object buf, + int offset, + int count, + Datatype type, + int dest, + int sendtag, + int source, + int recvtag) throws MPIException { + + if(type.isObject()) { + + // Might as well do this natively, to avoid allocation of one more + // buffer. + + Status status = new Status() ; + + byte[] sendbytes = Object_Serialize(buf,offset,count,type); + + int[] length_buf = {sendbytes.length, count} ; + + Sendrecv_replace(length_buf, 0, 2, MPI.INT, + dest, sendtag, source, recvtag, + status) ; + + byte [] recvbytes = new byte [length_buf[0]] ; + + Sendrecv(sendbytes, 0, sendbytes.length, MPI.BYTE, dest, sendtag, + recvbytes, 0, recvbytes.length, MPI.BYTE, status.source, recvtag, + status) ; + + Object_Deserialize(buf,recvbytes,offset,length_buf[1],type); + + status.object_count = length_buf[1] ; + return status; + } + else + return Sendrecv_replace(buf, offset, count, type, + dest, sendtag, source, recvtag, new Status()); + } + + private native Status Sendrecv_replace(Object buf, + int offset, + int count, + Datatype type, + int dest, + int stag, + int source, + int rtag, + Status stat); + + // Communication Modes + + /** + * Send in buffered mode. + *

+ * + * + * + * + * + * + * + *
buf send buffer array
offset initial offset in send buffer
count number of items to send
datatype datatype of each item in send + * buffer
dest rank of destination
tag message tag
+ *

+ * Java binding of the MPI operation MPI_BSEND. + *

+ * Further comments as for Send. + */ + + public void Bsend(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag) throws MPIException { + + if (type.isObject()){ + byte[] byte_buf = Object_Serialize(buf,offset,count,type); + + int[] length_buf = {byte_buf.length, count} ; + + bsend(length_buf, 0, 2, MPI.INT, dest, tag); + bsend(byte_buf, 0, length_buf[0], MPI.BYTE, dest, tag); + } + else bsend(buf, offset, count, type, dest, tag); + } + + private native void bsend(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag) ; + + /** + * Send in synchronous mode. + *

+ * + * + * + * + * + * + * + *
buf send buffer array
offset initial offset in send buffer
count number of items to send
datatype datatype of each item in send + * buffer
dest rank of destination
tag message tag
+ *

+ * Java binding of the MPI operation MPI_SSEND. + *

+ * Further comments as for Send. + */ + + public void Ssend(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag) throws MPIException { + + if (type.isObject()){ + byte[] byte_buf = Object_Serialize(buf,offset,count,type); + + int[] length_buf = {byte_buf.length, count} ; + + send(length_buf, 0, 2, MPI.INT, dest, tag); + ssend(byte_buf , 0, byte_buf.length, MPI.BYTE, dest, tag); + } + else ssend(buf, offset, count, type, dest, tag); + } + + private native void ssend(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag); + + /** + * Send in ready mode. + *

+ * + * + * + * + * + * + * + *
buf send buffer array
offset initial offset in send buffer
count number of items to send
datatype datatype of each item in send + * buffer
dest rank of destination
tag message tag
+ *

+ * Java binding of the MPI operation MPI_RSEND. + *

+ * Further comments as for Send. + */ + + public void Rsend(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag) throws MPIException { + + if (type.isObject()){ + byte[] byte_buf = Object_Serialize(buf,offset,count,type); + + int[] length_buf = {byte_buf.length, count} ; + + rsend(length_buf, 0, 2, MPI.INT, dest, tag); + rsend(byte_buf , 0, byte_buf.length, MPI.BYTE, dest, tag); + } + else rsend(buf, offset, count, type, dest, tag); + } + + private native void rsend(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag) ; + + // Nonblocking communication + + /** + * Start a standard mode, nonblocking send. + *

+ * + * + * + * + * + * + * + * + *
buf send buffer array
offset initial offset in send buffer
count number of items to send
datatype datatype of each item in send + * buffer
dest rank of destination
tag message tag
returns: communication request
+ *

+ * Java binding of the MPI operation MPI_ISEND. + *

+ * Further comments as for Send. + */ + + public Request Isend(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag) throws MPIException { + if (type.isObject()) { + byte[] byte_buf = Object_Serialize(buf,offset,count,type); + + int[] length_buf = {byte_buf.length, count} ; + + Request hdrReq = Isend(length_buf, 0, 2, MPI.INT, dest, tag, + new Request()); + Request req = new Request(hdrReq) ; + + Isend(byte_buf, 0, byte_buf.length, MPI.BYTE, dest, tag, req); + + return req; + } + else + return Isend(buf, offset, count, type, dest, tag, new Request()); + } + + /** + * Protected member used internally by Prequest.Start + */ + + protected native Request Isend(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag, + Request req); + + /** + * Start a buffered mode, nonblocking send. + *

+ * + * + * + * + * + * + * + * + *
buf send buffer array
offset initial offset in send buffer
count number of items to send
datatype datatype of each item in send + * buffer
dest rank of destination
tag message tag
returns: communication request
+ *

+ * Java binding of the MPI operation MPI_IBSEND. + *

+ * Further comments as for Send. + */ + + public Request Ibsend(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag) throws MPIException { + + if (type.isObject()){ + byte[] byte_buf = Object_Serialize(buf,offset,count,type); + + int[] length_buf = {byte_buf.length, count} ; + + Request hdrReq = Ibsend(length_buf, 0, 2, MPI.INT, dest, tag, + new Request()); + Request req = new Request(hdrReq) ; + + Ibsend(byte_buf, 0, byte_buf.length, MPI.BYTE, dest, tag, req); + + return req; + } + else + return Ibsend(buf, offset, count, type, dest, tag, new Request()); + } + + /** + * Protected member used internally by Prequest.Start + */ + + protected native Request Ibsend(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag, + Request req); + + /** + * Start a synchronous mode, nonblocking send. + *

+ * + * + * + * + * + * + * + * + *
buf send buffer array
offset initial offset in send buffer
count number of items to send
datatype datatype of each item in send + * buffer
dest rank of destination
tag message tag
returns: communication request
+ *

+ * Java binding of the MPI operation MPI_ISSEND. + *

+ * Further comments as for Send. + */ + + public Request Issend(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag) throws MPIException { + if (type.isObject()){ + byte[] byte_buf = Object_Serialize(buf,offset,count,type); + + int[] length_buf = {byte_buf.length, count} ; + + Request hdrReq = Issend(length_buf, 0, 2, MPI.INT, dest, tag, + new Request()); + Request req = new Request(hdrReq) ; + + Isend(byte_buf, 0, byte_buf.length, MPI.BYTE, dest, tag, req); + + return req; + } + else + return Issend(buf, offset, count, type, dest, tag, new Request()); + } + + /** + * Protected member used internally by Prequest.Start + */ + + protected native Request Issend(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag, + Request req); + + /** + * Start a ready mode, nonblocking send. + *

+ * + * + * + * + * + * + * + * + *
buf send buffer array
offset initial offset in send buffer
count number of items to send
datatype datatype of each item in send + * buffer
dest rank of destination
tag message tag
returns: communication request
+ *

+ * Java binding of the MPI operation MPI_IRSEND. + *

+ * Further comments as for Send. + */ + + public Request Irsend(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag) throws MPIException { + + if (type.isObject()){ + byte[] byte_buf = Object_Serialize(buf,offset,count,type); + + int[] length_buf = {byte_buf.length, count} ; + + Request hdrReq = Irsend(length_buf, 0, 2, MPI.INT, dest, tag, + new Request()); + Request req = new Request(hdrReq) ; + + Isend(byte_buf, 0, byte_buf.length, MPI.BYTE, dest, tag, req); + + return req; + } + else + return Irsend(buf, offset, count, type, dest, tag, new Request()); + } + + /** + * Protected member used internally by Prequest.Start + */ + + protected native Request Irsend(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag, + Request req); + + /** + * Start a nonblocking receive. + *

+ * + * + * + * + * + * + * + * + *
buf receive buffer array
offset initial offset in receive + * buffer
count number of items in receive + * buffer
datatype datatype of each item in receive + * buffer
source rank of source
tag message tag
returns: communication request
+ *

+ * Java binding of the MPI operation MPI_IRECV. + *

+ * Further comments as for Recv. + */ + + public Request Irecv(Object buf, + int offset, + int count, + Datatype type, + int source, + int tag) throws MPIException { + + if (type.isObject()){ + int[] length_buf= new int[2]; + + Request req = new Request(buf, offset, count, type, + tag, this, length_buf) ; + + Irecv(length_buf, 0, 2, MPI.INT, source, tag, req); + + return req; + } + else + return Irecv(buf, offset, count, type, source, tag, new Request()); + } + + /** + * Protected member used internally by Prequest.Start + */ + + protected native Request Irecv(Object buf, + int offset, + int count, + Datatype type, + int source, + int tag, + Request req); + + + // Persistent communication requests + + /** + * Creates a persistent communication request for a standard mode send. + *

+ * + * + * + * + * + * + * + * + *
buf send buffer array
offset initial offset in send buffer
count number of items to send
datatype datatype of each item in send + * buffer
dest rank of destination
tag message tag
returns: persistent communication + * request
+ *

+ * Java binding of the MPI operation MPI_SEND_INIT. + *

+ * Further comments as for Send. + */ + + public Prequest Send_init(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag) throws MPIException { + + return new Prequest(Prequest.MODE_STANDARD, buf, offset, count, type, + dest, tag, this) ; + } + + /** + * Creates a persistent communication request for a buffered mode send. + *

+ * + * + * + * + * + * + * + * + *
buf send buffer array
offset initial offset in send buffer
count number of items to send
datatype datatype of each item in send + * buffer
dest rank of destination
tag message tag
returns: persistent communication + * request
+ *

+ * Java binding of the MPI operation MPI_BSEND_INIT. + *

+ * Further comments as for Send. + */ + + public Prequest Bsend_init(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag) throws MPIException { + + return new Prequest(Prequest.MODE_BUFFERED, buf, offset, count, type, + dest, tag, this) ; + } + + /** + * Creates a persistent communication request for a synchronous mode send. + *

+ * + * + * + * + * + * + * + * + *
buf send buffer array
offset initial offset in send buffer
count number of items to send
datatype datatype of each item in send + * buffer
dest rank of destination
tag message tag
returns: persistent communication + * request
+ *

+ * Java binding of the MPI operation MPI_SSEND_INIT. + *

+ * Further comments as for Send. + */ + + public Prequest Ssend_init(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag) throws MPIException { + + return new Prequest(Prequest.MODE_SYNCHRONOUS, buf, offset, count, type, + dest, tag, this) ; + } + + /** + * Creates a persistent communication request for a ready mode send. + *

+ * + * + * + * + * + * + * + * + *
buf send buffer array
offset initial offset in send buffer
count number of items to send
datatype datatype of each item in send + * buffer
dest rank of destination
tag message tag
returns: persistent communication + * request
+ *

+ * Java binding of the MPI operation MPI_RSEND_INIT. + *

+ * Further comments as for Send. + */ + + public Prequest Rsend_init(Object buf, + int offset, + int count, + Datatype type, + int dest, + int tag) throws MPIException { + + return new Prequest(Prequest.MODE_READY, buf, offset, count, type, + dest, tag, this) ; + } + + + /** + * Creates a persistent communication request for a receive operation. + *

+ * + * + * + * + * + * + * + * + *
buf receive buffer array
offset initial offset in receive + * buffer
count number of items in receive + * buffer
datatype datatype of each item in receive + * buffer
source rank of source
tag message tag
returns: communication request
+ *

+ * Java binding of the MPI operation MPI_RECV_INIT. + *

+ * Further comments as for Recv. + */ + + public Prequest Recv_init(Object buf, + int offset, + int count, + Datatype type, + int source, + int tag) throws MPIException { + + return new Prequest(buf, offset, count, type, source, tag, this) ; + } + + + // Pack and Unpack + + /** + * Packs message in send buffer inbuf into space specified in + * outbuf. + *

+ * + * + * + * + * + * + * + * + *
inbuf input buffer array
offset initial offset in input buffer
incount number of items in input + * buffer
datatype datatype of each item in input + * buffer
outbuf output buffer
position initial position in output buffer + *
returns: final position in output buffer + *
+ *

+ * Java binding of the MPI operation MPI_PACK. + *

+ * The return value is the output value of position - the + * inital value incremented by the number of bytes written. + */ + + public int Pack(Object inbuf, + int offset, + int incount, + Datatype datatype, + byte[] outbuf, + int position) throws MPIException { + + if (datatype.isObject()){ + byte[] byte_buf = Object_Serialize(inbuf,offset,incount,datatype); + + System.arraycopy(byte_buf,0,outbuf,position,byte_buf.length); + + return (position + byte_buf.length); + + } + else + return pack(inbuf, offset, incount, datatype, outbuf, position); + } + + private native int pack(Object inbuf, + int offset, + int incount, + Datatype data, + byte[] outbuf, + int position); + + /** + * Unpacks message in receive buffer outbuf into space specified in + * inbuf. + *

+ * + * + * + * + * + * + * + * + *
inbuf input buffer
position initial position in input buffer + *
outbuf output buffer array
offset initial offset in output buffer
outcount number of items in output + * buffer
datatype datatype of each item in output + * buffer
returns: final position in input buffer + *
+ *

+ * Java binding of the MPI operation MPI_UNPACK. + *

+ * The return value is the output value of position - the + * inital value incremented by the number of bytes read. + */ + + public int Unpack(byte[] inbuf, + int position, + Object outbuf, + int offset, + int outcount, + Datatype datatype) throws MPIException { + + if (datatype.isObject()){ + + Object buf_els [] = (Object[])outbuf; + int ava=0; + + try { + ByteArrayInputStream in = + new ByteArrayInputStream(inbuf,position, + inbuf.length-position); + + ObjectInputStream s = new ObjectInputStream(in); + + int base; + for (int i = 0; i < outcount; i++){ + base = datatype.Extent() * i; + for (int j = 0 ; j < datatype.displacements.length ; j++) + buf_els[base + offset + datatype.displacements[j]]=s.readObject(); + } + ava= in.available(); + s.close(); + }catch(Exception ex){ex.printStackTrace();} + + return inbuf.length- ava; + } + else + return unpack(inbuf, position, outbuf, offset, outcount, datatype); + } + + private native int unpack(byte[] inbuf, + int position, + Object outbuf, + int offset, + int outcount, + Datatype type); + + /** + * Returns an upper bound on the increment of position effected + * by pack. + *

+ * + * + * + * + *
incount number of items in input + * buffer
datatype datatype of each item in input + * buffer
returns: upper bound on size of packed + * message
+ *

+ * Java binding of the MPI operation MPI_PACK_SIZE. + *

+ * It is an error to call this function if the base type of + * datatype is MPI.OBJECT. + */ + + public native int Pack_size(int incount, Datatype datatype) + throws MPIException ; + + // Probe and Cancel + + /** + * Check if there is an incoming message matching the pattern specified. + *

+ * + * + * + * + *
source rank of source
tag message tag
returns: status object or null handle
+ *

+ * Java binding of the MPI operation MPI_IPROBE. + *

+ * If such a message is currently available, a status object + * similar to the return value of a matching Recv operation + * is returned. + * Otherwise a null handle is returned. + */ + + public Status Iprobe(int source, int tag) throws MPIException { + return Iprobe(source,tag,new Status()); + } + + private native Status Iprobe(int source, int tag,Status stat) + throws MPIException ; + + /** + * Wait until there is an incoming message matching the pattern specified. + *

+ * + * + * + * + *
source rank of source
tag message tag
returns: status object
+ *

+ * Java binding of the MPI operation MPI_PROBE. + *

+ * Returns a status object similar to the return value of a matching + * Recv operation. + */ + + public Status Probe(int source, int tag) throws MPIException { + return Probe(source,tag,new Status()); + } + + private native Status Probe(int source, int tag,Status stat) + throws MPIException ; + + // Caching + + /** + * Retrieves attribute value by key. + *

+ * + * + * + *
keyval one of the key values predefined + * by the implementation
returns: attribute value
+ *

+ * Java binding of the MPI operation MPI_ATTR_GET. + */ + + public native int Attr_get(int keyval) throws MPIException ; + + public native void Attr_delete(int keyval); + + public native void Attr_put(int keyval, int value); + + // Process Topologies + + /** + * Returns the type of topology associated with the communicator. + *

+ * + * + *
returns: topology type of communicator
+ *

+ * Java binding of the MPI operation MPI_TOPO_TEST. + *

+ * The return value will be one of MPI.GRAPH, MPI.CART + * or MPI.UNDEFINED. + */ + + public native int Topo_test() throws MPIException ; + + // Enviromental Management + + /** + * Abort MPI. + *

+ * + * + *
errorcode error code for Unix or POSIX + * environments
+ *

+ * Java binding of the MPI operation MPI_ABORT. + */ + + public native void Abort(int errorcode) throws MPIException ; + + // Error handler + + /** + * Associates a new error handler with communicator at the calling process. + *

+ * + * + *
errhandler new MPI error handler for + * communicator
+ *

+ * Java binding of the MPI operation MPI_ERRORHANDLER_SET. + */ + + public native void Errhandler_set(Errhandler errhandler) throws MPIException ; + + /** + * Returns the error handler currently associated with the communicator. + *

+ * + * + *
returns: MPI error handler currently + * associated with communicator
+ *

+ * Java binding of the MPI operation MPI_ERRORHANDLER_GET. + */ + + public Errhandler Errorhandler_get() throws MPIException { + return new Errhandler(errorhandler_get()) ; + } + + private native long errorhandler_get(); + + protected long handle; + + static { + init(); + } + + private static native void init(); +} + +// Things to do: +// +// Should `Object_Serialize', `Object_Deserialize' really be instance +// methods on `Comm'? +// +// Mystery: Why does `startO' test hang on the `Ssend_init' case if +// header is sent standard-mode and data is sent synchronous? +// +// Selection of constructors on the basis of `long' vs `int' argument +// is bad. Too easy to get the wrong one. + diff --git a/ompi/mpi/java/java/Datatype.java b/ompi/mpi/java/java/Datatype.java new file mode 100644 index 0000000000..c43c778623 --- /dev/null +++ b/ompi/mpi/java/java/Datatype.java @@ -0,0 +1,845 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : Datatype.java + * Author : Sang Lim, Sung-Hoon Ko, Xinying Li, Bryan Carpenter + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.14 $ + * Updated : $Date: 2003/01/16 16:39:34 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +package mpi; + +public class Datatype extends Freeable { + + private final static int UNDEFINED = -1 ; + private final static int NULL = 0 ; + private final static int BYTE = 1 ; + private final static int CHAR = 2 ; + private final static int SHORT = 3 ; + private final static int BOOLEAN = 4 ; + private final static int INT = 5 ; + private final static int LONG = 6 ; + private final static int FLOAT = 7 ; + private final static int DOUBLE = 8 ; + private final static int PACKED = 9 ; + private final static int LB = 10 ; + private final static int UB = 11 ; + private final static int OBJECT = 12 ; + + private static native void init(); + + + /* + * Constructor used in static initializer of `MPI'. + * + * (Called before MPI.Init(), so cannot make any native MPI calls.) + */ + + Datatype() {} + //public Datatype() {} // debug + + /* + * Constructor for basic datatypes. + * + * (Initialization done in separate `setBasic', so can create + * datatype objects for `BYTE', etc in static initializers invoked before + * MPI.Init(), then initialize objects after MPI initialized.) + */ + + Datatype(int Type) { + setBasic(Type) ; + } + + void setBasic (int Type) { + switch(Type) { + case OBJECT : + baseType = OBJECT ; + displacements = new int [1] ; + lb = 0 ; + ub = 1 ; + lbSet = false ; + ubSet = false ; + + break ; + + case LB : + baseType = UNDEFINED ; + displacements = new int [0] ; + lb = 0 ; + ub = 0 ; + lbSet = true ; + ubSet = false ; + + break ; + + case UB : + baseType = UNDEFINED ; + displacements = new int [0] ; + lb = 0 ; + ub = 0 ; + lbSet = false ; + ubSet = true ; + + break ; + + default : // Native case + + baseType = Type ; // what about PACKED? + GetDatatype(Type); + + baseSize = size() ; + } + } + + private native void GetDatatype(int Type); + + /* + * Constructor used by `Contiguous' + * + * (Initialization done in separate `setContiguous', so can create + * datatype objects for `SHORT2', etc in static initializers invoked before + * MPI.Init(), then initialize objects after MPI initialized.) + */ + + private Datatype(int count, Datatype oldtype) throws MPIException { + setContiguous(count, oldtype) ; + } + + void setContiguous(int count, Datatype oldtype) throws MPIException { + + baseType = oldtype.baseType ; + + if(baseType == OBJECT || baseType == UNDEFINED) { + + int oldSize = oldtype.Size() ; + boolean oldUbSet = oldtype.ubSet ; + boolean oldLbSet = oldtype.lbSet ; + + displacements = new int [count * oldSize] ; + + ubSet = count > 0 && oldUbSet ; + lbSet = count > 0 && oldLbSet ; + + lb = Integer.MAX_VALUE ; + ub = Integer.MIN_VALUE ; + + if(oldSize != 0 || oldLbSet || oldUbSet) { + + // `oldType.ub', `oldType.lb', `oldType.Extent()' all well-defined. + + int oldExtent = oldtype.Extent() ; + + if(count > 0) { + + // Compose proper displacements... + + int ptr = 0 ; + for (int i = 0 ; i < count ; i++) { + int startElement = i * oldExtent ; + + for (int l = 0; l < oldSize; l++, ptr++) + displacements [ptr] = startElement + oldtype.displacements[l] ; + } + + // Now maximize/minimize upper/lower bounds + + int maxStartElement = oldExtent > 0 ? (count - 1) * oldExtent : 0 ; + int max_ub = maxStartElement + oldtype.ub ; + if (max_ub > ub) + ub = max_ub ; + + int minStartElement = oldExtent > 0 ? 0 : (count - 1) * oldExtent ; + int min_lb = minStartElement + oldtype.lb ; + if (min_lb < lb) + lb = min_lb ; + } + } + else { + + // `oldType.ub', `oldType.lb' and `oldType.Extent()' are undefined. + // Can ignore unless... + + if(count > 1) { + System.out.println("Datatype.Contiguous: repeat-count specified " + + "for component with undefined extent"); + MPI.COMM_WORLD.Abort(1); + } + } + } + else { + baseSize = oldtype.baseSize ; + + GetContiguous(count, oldtype) ; + } + } + + private native void GetContiguous(int count, Datatype oldtype); + + + /* + * Constructor used by `Vector', `Hvector' + */ + + private Datatype(int count, int blocklength, int stride, Datatype oldtype, + boolean unitsOfOldExtent) throws MPIException { + + baseType = oldtype.baseType ; + + if(baseType == OBJECT || baseType == UNDEFINED) { + + int oldSize = oldtype.Size() ; + boolean oldUbSet = oldtype.ubSet ; + boolean oldLbSet = oldtype.lbSet ; + + int repetitions = count * blocklength ; + + displacements = new int [repetitions * oldSize] ; + + ubSet = repetitions > 0 && oldUbSet ; + lbSet = repetitions > 0 && oldLbSet ; + + lb = Integer.MAX_VALUE ; + ub = Integer.MIN_VALUE ; + + if(repetitions > 0) { + if(oldSize != 0 || oldLbSet || oldUbSet) { + + // `oldType.ub', `oldType.lb', `oldType.Extent()' all well-defined. + + int oldExtent = oldtype.Extent() ; + + int ptr = 0 ; + for (int i = 0 ; i < count ; i++) { + + int startBlock = stride * i ; + if(unitsOfOldExtent) startBlock *= oldExtent ; + + // Compose proper displacements... + + for (int j = 0; j < blocklength ; j++) { + int startElement = startBlock + j * oldExtent ; + + for (int l = 0; l < oldSize; l++, ptr++) + displacements [ptr] = startElement + oldtype.displacements[l] ; + } + + // Now maximize/minimize upper/lower bounds + + int maxStartElement = + oldExtent > 0 ? startBlock + (blocklength - 1) * oldExtent + : startBlock ; + int max_ub = maxStartElement + oldtype.ub ; + if (max_ub > ub) + ub = max_ub ; + + int minStartElement = + oldExtent > 0 ? startBlock + : startBlock + (blocklength - 1) * oldExtent ; + int min_lb = minStartElement + oldtype.lb ; + if (min_lb < lb) + lb = min_lb ; + } + } + else { + + // `oldType.ub', `oldType.lb' and `oldType.Extent()' are undefined. + + if(unitsOfOldExtent) { + System.out.println("Datatype.Vector: " + + "old type has undefined extent"); + MPI.COMM_WORLD.Abort(1); + } + else { + + // For `Hvector' can ignore unless... + + if(blocklength > 1) { + System.out.println("Datatype.Hvector: repeat-count specified " + + "for component with undefined extent"); + MPI.COMM_WORLD.Abort(1); + } + } + } + } + } + else { + baseSize = oldtype.baseSize ; + + if(unitsOfOldExtent) + GetVector(count, blocklength, stride, oldtype) ; + else + GetHvector(count, blocklength, stride, oldtype) ; + } + } + + private native void GetVector(int count, int blocklength, int stride, + Datatype oldtype); + + private native void GetHvector(int count, int blocklength, int stride, + Datatype oldtype) ; + + + /* + * Constructor used by `Indexed', `Hindexed' + */ + + private Datatype(int[] array_of_blocklengths, int[] array_of_displacements, + Datatype oldtype, boolean unitsOfOldExtent) + throws MPIException { + + baseType = oldtype.baseType ; + + if(baseType == OBJECT || baseType == UNDEFINED) { + + int oldSize = oldtype.Size() ; + boolean oldUbSet = oldtype.ubSet ; + boolean oldLbSet = oldtype.lbSet ; + + int count = 0 ; + for (int i = 0; i < array_of_blocklengths.length; i++) + count += array_of_blocklengths[i] ; + + displacements = new int [count * oldSize] ; + + ubSet = count > 0 && oldUbSet ; + lbSet = count > 0 && oldLbSet ; + + lb = Integer.MAX_VALUE ; + ub = Integer.MIN_VALUE ; + + if(oldSize != 0 || oldLbSet || oldUbSet) { + + // `oldType.ub', `oldType.lb', `oldType.Extent()' all well-defined. + + int oldExtent = oldtype.Extent() ; + + int ptr = 0 ; + for (int i = 0; i < array_of_blocklengths.length; i++) { + int blockLen = array_of_blocklengths [i] ; + if(blockLen > 0) { + + int startBlock = array_of_displacements [i] ; + if(unitsOfOldExtent) startBlock *= oldExtent ; + + // Compose proper displacements... + + for (int j = 0; j < blockLen ; j++) { + int startElement = startBlock + j * oldExtent ; + + for (int l = 0; l < oldSize; l++, ptr++) + displacements [ptr] = startElement + oldtype.displacements[l] ; + } + + // Now maximize/minimize upper/lower bounds + + int maxStartElement = + oldExtent > 0 ? startBlock + (blockLen - 1) * oldExtent + : startBlock ; + int max_ub = maxStartElement + oldtype.ub ; + if (max_ub > ub) + ub = max_ub ; + + int minStartElement = + oldExtent > 0 ? startBlock + : startBlock + (blockLen - 1) * oldExtent ; + int min_lb = minStartElement + oldtype.lb ; + if (min_lb < lb) + lb = min_lb ; + } + } + } + else { + + // `oldType.ub', `oldType.lb' and `oldType.Extent()' are undefined. + + if(unitsOfOldExtent) { + System.out.println("Datatype.Indexed: old type has undefined extent"); + MPI.COMM_WORLD.Abort(1); + } + else { + // Can ignore unless... + + for (int i = 0; i < array_of_blocklengths.length; i++) + if(array_of_blocklengths [i] > 1) { + System.out.println("Datatype.Hindexed: repeat-count specified " + + "for component with undefined extent"); + MPI.COMM_WORLD.Abort(1); + } + } + } + } + else { + baseSize = oldtype.baseSize ; + + if(unitsOfOldExtent) + GetIndexed(array_of_blocklengths, array_of_displacements, oldtype) ; + else + GetHindexed(array_of_blocklengths, array_of_displacements, oldtype) ; + } + } + + private native void GetIndexed(int[] array_of_blocklengths, + int[] array_of_displacements, + Datatype oldtype) ; + + private native void GetHindexed(int[] array_of_blocklengths, + int[] array_of_displacements, + Datatype oldtype) ; + + + /* + * Constructor used by `Struct' + */ + + private Datatype(int[] array_of_blocklengths, int[] array_of_displacements, + Datatype[] array_of_types) throws MPIException { + + // Compute new base type + + baseType = UNDEFINED; + for (int i = 0; i < array_of_types.length; i++) { + int oldBaseType = array_of_types[i].baseType ; + if(oldBaseType != baseType) { + if(baseType == UNDEFINED) { + baseType = oldBaseType ; + + if(baseType != OBJECT) + baseSize = array_of_types[i].baseSize ; + } + else if(oldBaseType != UNDEFINED) { + System.out.println("Datatype.Struct: All base types must agree..."); + MPI.COMM_WORLD.Abort(1); + } + } + } + + // Allocate `displacements' if required + + if(baseType == OBJECT || baseType == UNDEFINED) { + int size = 0 ; + for (int i = 0; i < array_of_blocklengths.length; i++) + size += array_of_blocklengths[i] * array_of_types[i].Size(); + + displacements = new int [size] ; + } + + ubSet = false ; + lbSet = false ; + + lb = Integer.MAX_VALUE ; + ub = Integer.MIN_VALUE ; + + int ptr = 0 ; + for (int i = 0; i < array_of_blocklengths.length; i++) { + int blockLen = array_of_blocklengths [i] ; + if(blockLen > 0) { + Datatype oldtype = array_of_types [i] ; + int oldBaseType = oldtype.baseType ; + + if(oldBaseType == OBJECT || oldBaseType == UNDEFINED) { + + int oldSize = oldtype.Size() ; + boolean oldUbSet = oldtype.ubSet ; + boolean oldLbSet = oldtype.lbSet ; + + if(oldSize != 0 || oldLbSet || oldUbSet) { + + // `oldType.ub', `oldType.lb', `oldType.Extent()' all well-defined. + + int oldExtent = oldtype.Extent() ; + + int startBlock = array_of_displacements [i] ; + + // Compose normal displacements... + + for (int j = 0; j < blockLen ; j++) { + int startElement = startBlock + j * oldExtent ; + + for (int l = 0; l < oldSize; l++, ptr++) + displacements [ptr] = startElement + oldtype.displacements[l] ; + } + + // Now maximize/minimize upper/lower bounds + + // `ubSet' acts like a most significant positive bit in + // the maximization operation. + + if (oldUbSet == ubSet) { + int maxStartElement = + oldExtent > 0 ? startBlock + (blockLen - 1) * oldExtent + : startBlock ; + int max_ub = maxStartElement + oldtype.ub ; + if (max_ub > ub) + ub = max_ub ; + } + else if(oldUbSet) { + int maxStartElement = + oldExtent > 0 ? startBlock + (blockLen - 1) * oldExtent + : startBlock ; + ub = maxStartElement + oldtype.ub ; + ubSet = true ; + } + + // `lbSet' acts like a most significant negative bit in + // the minimization operation. + + if (oldLbSet == lbSet) { + int minStartElement = + oldExtent > 0 ? startBlock + : startBlock + (blockLen - 1) * oldExtent ; + int min_lb = minStartElement + oldtype.lb ; + if (min_lb < lb) + lb = min_lb ; + } + else if(oldLbSet) { + int minStartElement = + oldExtent > 0 ? startBlock + : startBlock + (blockLen - 1) * oldExtent ; + lb = minStartElement + oldtype.lb ; + lbSet = true ; + } + } + else { + + // `oldType.ub', `oldType.lb' and `oldType.Extent()' are undefined. + // Can ignore unless... + + if(blockLen > 1) { + System.out.println("Datatype.Struct: repeat-count specified " + + "for component with undefined extent"); + MPI.COMM_WORLD.Abort(1); + } + } + } + } + } + + if(baseType != OBJECT && baseType != UNDEFINED) + GetStruct(array_of_blocklengths, array_of_displacements, array_of_types, + lbSet, lb, ubSet, ub) ; + } + + + private native void GetStruct(int[] array_of_blocklengths, + int[] array_of_displacements, + Datatype[] array_of_types, + boolean lbSet, int lb, boolean ubSet, int ub) ; + + + protected boolean isObject() { + return baseType == OBJECT || baseType == UNDEFINED ; + } + + /** + * Returns the extent of a datatype - the difference between + * upper and lower bound. + *

+ * + * + *
returns: datatype extent
+ *

+ * Java binding of the MPI operation MPI_TYPE_EXTENT. + */ + + public int Extent() throws MPIException { + if(baseType == OBJECT || baseType == UNDEFINED) + return ub - lb ; + else + return extent() / baseSize ; + } + + private native int extent(); + + /** + * Returns the total size of a datatype - the number of buffer + * elements it represents. + *

+ * + * + *
returns: datatype size
+ *

+ * Java binding of the MPI operation MPI_TYPE_SIZE. + */ + + public int Size() throws MPIException { + if(baseType == OBJECT || baseType == UNDEFINED) + return displacements.length; + else + return size() / baseSize ; + } + + private native int size(); + + /** + * Find the lower bound of a datatype - the least value + * in its displacement sequence. + *

+ * + * + *
returns: displacement of lower bound + * from origin
+ *

+ * Java binding of the MPI operation MPI_TYPE_LB. + */ + + public int Lb() throws MPIException { + if(baseType == OBJECT || baseType == UNDEFINED) + return lb; + else + return lB() / baseSize ; + } + + private native int lB(); + + /** + * Find the upper bound of a datatype - the greatest value + * in its displacement sequence. + *

+ * + * + *
returns: displacement of upper bound + * from origin
+ *

+ * Java binding of the MPI operation MPI_TYPE_UB. + */ + + public int Ub() throws MPIException { + if(baseType == OBJECT || baseType == UNDEFINED) + return ub; + else + return uB() / baseSize ; + } + + private native int uB(); + + /** + * Commit a derived datatype. + * Java binding of the MPI operation MPI_TYPE_COMMIT. + */ + + public void Commit() throws MPIException { + if (baseType != OBJECT && baseType != UNDEFINED) + commit() ; + } + + private native void commit(); + + @SuppressWarnings("unchecked") + public void finalize() throws MPIException { + synchronized(MPI.class) { + MPI.freeList.addFirst(this) ; + } + } + + native void free() ; + + /** + * Construct new datatype representing replication of old datatype into + * contiguous locations. + *

+ * + * + * + * + *
count replication count
oldtype old datatype
returns: new datatype
+ *

+ * Java binding of the MPI operation MPI_TYPE_CONTIGUOUS. + *

+ * The base type of the new datatype is the same as the base type of + * oldtype. + */ + + public static Datatype Contiguous(int count, + Datatype oldtype) throws MPIException { + + return new Datatype(count, oldtype) ; + } + + /** + * Construct new datatype representing replication of old datatype into + * locations that consist of equally spaced blocks. + *

+ * + * + * + * + * + * + *
count number of blocks
blocklength number of elements in each + * block
stride number of elements between + * start of each block
oldtype old datatype
returns: new datatype
+ *

+ * Java binding of the MPI operation MPI_TYPE_VECTOR. + *

+ * The base type of the new datatype is the same as the base type of + * oldtype. + */ + + public static Datatype Vector(int count, + int blocklength, + int stride, + Datatype oldtype) throws MPIException { + + return new Datatype(count, blocklength, stride, oldtype, true) ; + } + + /** + * Identical to vector except that the stride is expressed + * directly in terms of the buffer index, rather than the units of + * the old type. + *

+ * + * + * + * + * + * + *
count number of blocks
blocklength number of elements in each + * block
stride number of elements between + * start of each block
oldtype old datatype
returns: new datatype
+ *

+ * Java binding of the MPI operation MPI_TYPE_HVECTOR. + *

+ * Unlike other language bindings, the value of stride + * is not measured in bytes. + */ + + public static Datatype Hvector(int count, + int blocklength, + int stride, + Datatype oldtype) throws MPIException { + + return new Datatype(count, blocklength, stride, oldtype, false) ; + } + + /** + * Construct new datatype representing replication of old datatype into + * a sequence of blocks where each block can contain a different number + * of copies and have a different displacement. + *

+ * + * + * + * + * + *
array_of_blocklengths number of elements per + * block
array_of_displacements displacement of each + * block in units of + * old type
oldtype old datatype
returns: new datatype
+ *

+ * Java binding of the MPI operation MPI_TYPE_INDEXED. + *

+ * The number of blocks is taken to be size of the + * array_of_blocklengths argument. The second argument, + * array_of_displacements, should be the same size. + * The base type of the new datatype is the same as the base type of + * oldtype. + */ + + public static Datatype Indexed(int[] array_of_blocklengths, + int[] array_of_displacements, + Datatype oldtype) throws MPIException { + + return new Datatype(array_of_blocklengths, array_of_displacements, + oldtype, true) ; + } + + /** + * Identical to indexed except that the displacements are + * expressed directly in terms of the buffer index, rather than the + * units of the old type. + *

+ * + * + * + * + * + *
array_of_blocklengths number of elements per + * block
array_of_displacements displacement in buffer + * for each block
oldtype old datatype
returns: new datatype
+ *

+ * Java binding of the MPI operation MPI_TYPE_HINDEXED. + *

+ * Unlike other language bindings, the values in + * array_of_displacements are not measured in bytes. + */ + + public static Datatype Hindexed(int[] array_of_blocklengths, + int[] array_of_displacements, + Datatype oldtype) throws MPIException { + return new Datatype(array_of_blocklengths, array_of_displacements, + oldtype, false) ; + } + + /** + * The most general type constructor. + *

+ * + * + * + * + * + *
array_of_blocklengths number of elements per + * block
array_of_displacements displacement in buffer + * for each block
array_of_types type of elements in + * each block
returns: new datatype
+ *

+ * Java binding of the MPI operation MPI_TYPE_STRUCT. + *

+ * The number of blocks is taken to be size of the + * array_of_blocklengths argument. The second and third + * arguments, array_of_displacements, and array_of_types, + * should be the same size. + * Unlike other language bindings, the values in + * array_of_displacements are not measured in bytes. + * All elements of array_of_types with definite base types + * must have the same base type: this will be the base + * type of new datatype. + */ + + public static Datatype Struct(int[] array_of_blocklengths, + int[] array_of_displacements, + Datatype[] array_of_types) throws MPIException { + return new Datatype(array_of_blocklengths, array_of_displacements, + array_of_types) ; + } + + protected long handle; + protected int baseType ; + protected int baseSize ; // or private + + protected int displacements[] ; + + protected int lb, ub ; + + protected boolean ubSet, lbSet ; + // Flags set if MPI.UB, MPI.LB respectively appears as a component type. + + static { + init(); + } + +} + +// Things to do: +// +// Initialization and use of `baseSize' should probably be done entirely +// on JNI side. +// +// `baseType' could just take values from {UNDEFINED, OBJECT, NATIVE}? +// (But in future may want to add runtime checks using exact value.) + diff --git a/ompi/mpi/java/java/Errhandler.java b/ompi/mpi/java/java/Errhandler.java new file mode 100644 index 0000000000..c45a173091 --- /dev/null +++ b/ompi/mpi/java/java/Errhandler.java @@ -0,0 +1,45 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : Errhandler.java + * Author : Xinying Li + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.3 $ + * Updated : $Date: 2001/08/07 16:36:25 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +package mpi; +//import mpi.*; + +public class Errhandler{ + public final static int FATAL = 1; + public final static int RETURN = 0; + + private static native void init(); + + //public Errhandler() {} + public Errhandler(int Type) { GetErrhandler(Type);} + public Errhandler(long _handle) { handle = _handle;} + + protected native void GetErrhandler(int Type); + + protected long handle; + + static { + init(); + } + +} diff --git a/ompi/mpi/java/java/Freeable.java b/ompi/mpi/java/java/Freeable.java new file mode 100644 index 0000000000..839ff972d9 --- /dev/null +++ b/ompi/mpi/java/java/Freeable.java @@ -0,0 +1,27 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : Freeable.java + * Author : Bryan Carpenter + * Created : Wed Jan 15 23:14:43 EST 2003 + * Revision : $Revision: 1.1 $ + * Updated : $Date: 2003/01/16 16:39:34 $ + */ + +package mpi; + +abstract class Freeable { + abstract void free() ; +} + diff --git a/ompi/mpi/java/java/GraphParms.java b/ompi/mpi/java/java/GraphParms.java new file mode 100644 index 0000000000..61d43ce934 --- /dev/null +++ b/ompi/mpi/java/java/GraphParms.java @@ -0,0 +1,30 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : GraphParms.java + * Author : Xinying Li + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.1 $ + * Updated : $Date: 1998/08/26 18:49:55 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +package mpi; + +public class GraphParms { + public int [] index; + public int [] edges; +} + diff --git a/ompi/mpi/java/java/Graphcomm.java b/ompi/mpi/java/java/Graphcomm.java new file mode 100644 index 0000000000..d7869f33a1 --- /dev/null +++ b/ompi/mpi/java/java/Graphcomm.java @@ -0,0 +1,94 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : Graphcomm.java + * Author : Xinying Li + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.5 $ + * Updated : $Date: 2001/10/22 21:07:55 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +package mpi; + +public class Graphcomm extends Intracomm { + + protected Graphcomm(long handle) throws MPIException { + super(handle) ; + } + + public Object clone() { + try { + return new Graphcomm(super.dup()) ; + } + catch (MPIException e) { + throw new RuntimeException(e.getMessage()) ; + } + } + + /** + * Returns graph topology information. + *

+ * + * + *
returns: object defining node degress and + * edges of graph
+ *

+ * Java binding of the MPI operation MPI_GRAPHDIMS_GET. + *

+ * The number of nodes and number of edges can be extracted + * from the sizes of the index and edges fields + * of the returned object. + */ + + public native GraphParms Get() throws MPIException ; + + /** + * Provides adjacency information for general graph topology. + *

+ * + * + * + *
rank rank of a process in the group + * of this communicator
returns: array of ranks of neighbouring + * processes to one specified
+ *

+ * Java binding of the MPI operations MPI_GRAPH_NEIGHBOURS_COUNT + * and MPI_GRAPH_NEIGHBOURS. + *

+ * The number of neighbours can be extracted from the size of the result. + */ + + public native int [] Neighbours(int rank) throws MPIException ; + + /** + * Compute an optimal placement. + *

+ * + * + * + * + *
index node degrees
edges graph edges
returns: reordered rank of calling + * process
+ *

+ * Java binding of the MPI operation MPI_GRAPH_MAP. + *

+ * The number of nodes is taken to be size of the index argument. + */ + + public native int Map(int [] index, int [] edges) throws MPIException ; + +} + diff --git a/ompi/mpi/java/java/Group.java b/ompi/mpi/java/java/Group.java new file mode 100644 index 0000000000..930b127946 --- /dev/null +++ b/ompi/mpi/java/java/Group.java @@ -0,0 +1,266 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : Group.java + * Author : Xinying Li, Bryan Carpenter + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.8 $ + * Updated : $Date: 2003/01/16 16:39:34 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +package mpi; +//import mpi.*; + +public class Group extends Freeable { + protected final static int EMPTY = 0; + + private static native void init(); + protected long handle; + + //public Group() {} + protected Group(int Type) { GetGroup(Type); } + protected Group(long _handle) { handle = _handle;} + private native void GetGroup(int Type); + + /** + * Size of group. + *

+ * + * + *
returns: number of processors in the + * group
+ *

+ * Java binding of the MPI operation MPI_GROUP_SIZE. + */ + + public native int Size() throws MPIException ; + + /** + * Rank of this process in group. + *

+ * + * + *
returns: rank of the calling process in + * the group
+ *

+ * Java binding of the MPI operation MPI_GROUP_RANK. + * + * Result value is MPI.UNDEFINED if this process is not + * a member of the group. + */ + + public native int Rank() throws MPIException ; + + /** + * Destructor. + *

+ * Java binding of the MPI operation MPI_GROUP_FREE. + */ + + @SuppressWarnings("unchecked") + public void finalize() throws MPIException { + synchronized(MPI.class) { + MPI.freeList.addFirst(this) ; + } + } + + native void free() ; + + /** + * Translate ranks within one group to ranks within another. + *

+ * + * + * + * + * + *
group1 a group
ranks1 array of valid ranks in + * group1
group2 another group
returns: array of corresponding ranks in + * group2
+ *

+ * Java binding of the MPI operation MPI_GROUP_TRANSLATE_RANKS. + *

+ * Result elements are MPI.UNDEFINED where no correspondence + * exists. + */ + + public static native int [] Translate_ranks(Group group1,int [] ranks1, + Group group2) + throws MPIException ; + + /** + * Compare two groups. + *

+ * + * + * + * + *
group1 first group
group2 second group
returns: result
+ *

+ * Java binding of the MPI operation MPI_GROUP_COMPARE. + *

+ * MPI.IDENT results if the group members and group order are + * exactly the same in both groups. MPI.SIMILAR results if + * the group members are the same but the order is different. + * MPI.UNEQUAL results otherwise. + */ + + public static native int Compare(Group group1, Group group2) + throws MPIException ; + + /** + * Set union of two groups. + *

+ * + * + * + * + *
group1 first group
group2 second group
returns: union group
+ *

+ * Java binding of the MPI operation MPI_GROUP_UNION. + */ + + public static Group Union(Group group1, Group group2) throws MPIException { + return new Group(union(group1, group2)) ; + } + + private static native long union(Group group1, Group group2); + + /** + * Set intersection of two groups. + *

+ * + * + * + * + *
group1 first group
group2 second group
returns: intersection group
+ *

+ * Java binding of the MPI operation MPI_GROUP_INTERSECTION. + */ + + public static Group Intersection(Group group1,Group group2) + throws MPIException { + return new Group(intersection(group1, group2)) ; + } + + private static native long intersection(Group group1, Group group2); + + /** + * Result contains all elements of the first group that are not in the + * second group. + *

+ * + * + * + * + *
group1 first group
group2 second group
returns: difference group
+ *

+ * Java binding of the MPI operation MPI_GROUP_DIFFERENCE. + */ + + public static Group Difference(Group group1, Group group2) + throws MPIException { + return new Group(difference(group1, group2)) ; + } + + private static native long difference(Group group1, Group group2) ; + + /** + * Create a subset group including specified processes. + *

+ * + * + * + *
ranks ranks from this group to appear in + * new group
returns: new group
+ *

+ * Java binding of the MPI operation MPI_GROUP_INCL. + */ + + public Group Incl(int [] ranks) throws MPIException { + return new Group(incl(ranks)) ; + } + + private native long incl(int [] ranks); + + /** + * Create a subset group excluding specified processes. + *

+ * + * + * + *
ranks ranks from this group not + * to appear in new group
returns: new group
+ *

+ * Java binding of the MPI operation MPI_GROUP_EXCL. + */ + + public Group Excl(int [] ranks) throws MPIException { + return new Group(excl(ranks)) ; + } + + private native long excl(int [] ranks) ; + + /** + * Create a subset group including processes specified + * by strided intervals of ranks. + *

+ * + * + * + *
ranges array of integer triplets
returns: new group
+ *

+ * Java binding of the MPI operation MPI_GROUP_RANGE_INCL. + *

+ * The triplets are of the form (first rank, last rank, stride) + * indicating ranks in this group to be included in the new group. + * The size of the first dimension of ranges is the number + * of triplets. The size of the second dimension is 3. + */ + + public Group Range_incl(int [][] ranges) throws MPIException { + return new Group(range_incl(ranges)) ; + } + + private native long range_incl(int [][] ranges) ; + + /** + * Create a subset group excluding processes specified + * by strided intervals of ranks. + *

+ * + * + * + *
ranges array of integer triplets
returns: new group
+ *

+ * Java binding of the MPI operation MPI_GROUP_RANGE_EXCL. + *

+ * Triplet array is defined as for Range_incl, the ranges + * indicating ranks in this group to be excluded from the new group. + */ + + public Group Range_excl(int [][] ranges) throws MPIException { + return new Group(range_excl(ranges)) ; + } + + private native long range_excl(int [][] ranges) ; + + static { + init(); + } + +} + diff --git a/ompi/mpi/java/java/Intercomm.java b/ompi/mpi/java/java/Intercomm.java new file mode 100644 index 0000000000..03ccf51233 --- /dev/null +++ b/ompi/mpi/java/java/Intercomm.java @@ -0,0 +1,85 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : Intercomm.java + * Author : Xinying Li + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.5 $ + * Updated : $Date: 1999/09/14 20:50:11 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +package mpi; +//import mpi.*; + +public class Intercomm extends Comm { + + protected Intercomm(long handle) {super(handle) ;} + + public Object clone() { + return new Intercomm(super.dup()); + } + + // Inter-Communication + + /** + * Size of remote group. + *

+ * + * + *
returns: number of process in remote group + * of this communicator
+ *

+ * Java binding of the MPI operation MPI_COMM_REMOTE_SIZE. + */ + + public native int Remote_size() throws MPIException ; + + /** + * Return the remote group. + *

+ * + * + *
returns: remote group of this + * communicator
+ *

+ * Java binding of the MPI operation MPI_COMM_REMOTE_GROUP. + */ + + public Group Remote_group() throws MPIException { + return new Group(remote_group()); + } + + private native long remote_group(); + + /** + * Create an inter-communicator. + *

+ * + * + * + *
high true if the local group has higher + * ranks in combined group
returns: new intra-communicator
+ *

+ * Java binding of the MPI operation MPI_INTERCOMM_MERGE. + */ + + public Intracomm Merge(boolean high) throws MPIException { + return new Intracomm(merge(high)) ; + } + + private native long merge(boolean high); +} + diff --git a/ompi/mpi/java/java/Intracomm.java b/ompi/mpi/java/java/Intracomm.java new file mode 100644 index 0000000000..63bd6aad31 --- /dev/null +++ b/ompi/mpi/java/java/Intracomm.java @@ -0,0 +1,992 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : Intracommm.java + * Author : Sang Lim, Xinying Li, Bryan Carpenter + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.14 $ + * Updated : $Date: 2002/12/16 15:25:13 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +package mpi; + +public class Intracomm extends Comm { + + Intracomm() {} + + void setType(int type) { + super.setType(type) ; + + shadow = new Comm(dup()) ; + } + + protected Intracomm(long handle) throws MPIException { + super(handle) ; + + shadow = new Comm(dup()) ; + } + + public Object clone() { + try { + return new Intracomm(dup()) ; + } + catch (MPIException e) { + throw new RuntimeException(e.getMessage()) ; + } + } + + /** + * Partition the group associated with this communicator and create + * a new communicator within each subgroup. + *

+ * + * + * + * + *
color control of subset assignment
key control of rank assignment
returns: new communicator
+ *

+ * Java binding of the MPI operation MPI_COMM_SPLIT. + */ + + public Intracomm Split(int colour, int key) throws MPIException { + long splitHandle = split(colour,key) ; + if(splitHandle == nullHandle) + return null ; + else + return new Intracomm(splitHandle) ; + } + + private native long split(int colour, int key); + + /** + * Create a new communicator. + *

+ * + * + * + *
group group which is a subset of the + * group of this communicator
returns: new communicator
+ *

+ * Java binding of the MPI operation MPI_COMM_CREATE. + */ + + public Intracomm Creat(Group group) throws MPIException { + long creatHandle = creat(group) ; + if(creatHandle == nullHandle) + return null ; + else + return new Intracomm(creatHandle) ; + } + + private native long creat(Group group); + + // Collective Communication + + /** + * A call to Barrier blocks the caller until all process + * in the group have called it. + *

+ * Java binding of the MPI operation MPI_BARRIER. + */ + + public native void Barrier() throws MPIException ; + + /* + * The type signature of `incount * intype' must be equal to the type + * signature of `outcount * outtype' (ie they must represent the same + * number of basic elements of the same type). + */ + + private void copyBuffer(Object inbuf, + int inoffset, int incount, Datatype intype, + Object outbuf, + int outoffset, int outcount, Datatype outtype) + throws MPIException { + if(intype.isObject()) { + Object [] inbufArray = (Object[])inbuf; + Object [] outbufArray = (Object[])outbuf; + + int outbase = outoffset, inbase = inoffset ; + int kout = 0 ; + for (int j = 0 ; j < incount ; j++) { + for (int k = 0 ; k < intype.displacements.length ; k++) + outbufArray [outbase + outtype.displacements [kout]] = + inbufArray [inbase + intype.displacements [k]] ; + + inbase += intype.Extent() ; + + kout++; + if (kout == outtype.displacements.length){ + kout = 0; + outbase += outtype.Extent() ; + } + } + } + else { + byte [] tmpbuf = new byte [Pack_size(incount, intype)] ; + Pack(inbuf, inoffset, incount, intype, tmpbuf, 0) ; + Unpack(tmpbuf, 0, outbuf, outoffset, outcount, outtype) ; + } + } + + private Object newBuffer(Object template) { + if(template instanceof Object[]) + return new Object [((Object[]) template).length] ; + + if(template instanceof byte[]) + return new byte [((byte[]) template).length] ; + + if(template instanceof char[]) + return new char [((char[]) template).length] ; + + if(template instanceof short[]) + return new short [((short[]) template).length] ; + + if(template instanceof boolean[]) + return new boolean [((boolean[]) template).length] ; + + if(template instanceof int[]) + return new int [((int[]) template).length] ; + + if(template instanceof long[]) + return new long [((long[]) template).length] ; + + if(template instanceof float[]) + return new float [((float[]) template).length] ; + + if(template instanceof double[]) + return new double [((double[]) template).length] ; + + return null ; + } + + /** + * Broadcast a message from the process with rank root + * to all processes of the group. + *

+ * + * + * + * + * + * + *
buf buffer array
offset initial offset in buffer
count number of items in buffer
datatype datatype of each item in + * buffer
root rank of broadcast root
+ *

+ * Java binding of the MPI operation MPI_BCST. + */ + + public void Bcast(Object buf, + int offset, + int count, + Datatype type, + int root) throws MPIException { + + if (type.isObject()){ + if (Rank() == root){ + for (int dst = 0; dst < Size(); dst++) + if (dst != root) + shadow.Send(buf, offset, count, type, dst, 0); + } + else + shadow.Recv(buf, offset, count, type, root, 0); + } + else + bcast(buf, offset*type.Size(), count, type, root); + } + + private native void bcast(Object buf, + int offset, + int count, + Datatype type, + int root); + + /** + * Each process sends the contents of its send buffer to the + * root process. + *

+ * + * + * + * + * + * + * + * + * + * + *
sendbuf send buffer array
sendoffset initial offset in send buffer
sendcount number of items to send
sendtype datatype of each item in send + * buffer
recvbuf receive buffer array
recvoffset initial offset in receive + * buffer
recvcount number of items to receive
recvtype datatype of each item in receive + * buffer
root rank of receiving process
+ *

+ * Java binding of the MPI operation MPI_GATHER. + */ + + public void Gather(Object sendbuf, + int sendoffset, + int sendcount, + Datatype sendtype, + Object recvbuf, + int recvoffset, + int recvcount, + Datatype recvtype, + int root) throws MPIException { + + if (sendtype.isObject()) { + if (Rank() == root) { + for (int src = 0; src < Size(); src++) { + int dstOffset = recvoffset + recvcount * recvtype.Extent() * src ; + if (src == root) + copyBuffer(sendbuf, sendoffset, sendcount, sendtype, + recvbuf, dstOffset, recvcount, recvtype) ; + else + shadow.Recv(recvbuf, dstOffset, recvcount, recvtype, src, 0); + } + } + else + shadow.Send(sendbuf, sendoffset, sendcount, sendtype, root, 0); + } + else + gather(sendbuf, sendoffset*sendtype.Size(), sendcount,sendtype, + recvbuf, recvoffset*recvtype.Size(), recvcount,recvtype, + root); + } + + private native void gather(Object sendbuf, + int sendoffset, + int sendcount, + Datatype sendtype, + Object recvbuf, + int recvoffset, + int recvcount, + Datatype recvtype, + int root); + + /** + * Extends functionality of Gather by allowing varying + * counts of data from each process. + *

+ * + * + * + * + * + * + * + * + * + * + * + *
sendbuf send buffer array
sendoffset initial offset in send buffer
sendcount number of items to send
sendtype datatype of each item in send + * buffer
recvbuf receive buffer array
recvoffset initial offset in receive + * buffer
recvcounts number of elements received from + * each process
displs displacements at which to place + * incoming data
recvtype datatype of each item in receive + * buffer
root rank of receiving process
+ *

+ * Java binding of the MPI operation MPI_GATHERV. + *

+ * The sizes of arrays recvcounts and displs should be the + * size of the group. Entry i of displs specifies the + * displacement relative to element recvoffset of recvbuf + * at which to place incoming data. + */ + + public void Gatherv(Object sendbuf, + int sendoffset, + int sendcount, + Datatype sendtype, + Object recvbuf, + int recvoffset, + int [] recvcount, + int [] displs, + Datatype recvtype, + int root) throws MPIException { + + if (sendtype.isObject()){ + if (Rank() == root){ + for (int src = 0; src < Size(); src++){ + int dstOffset = recvoffset + sendtype.Extent() * displs[src] ; + if (src == root) + copyBuffer(sendbuf, sendoffset, sendcount,sendtype, + recvbuf, dstOffset, recvcount[src], recvtype); + else + shadow.Recv(recvbuf, dstOffset, recvcount[src], recvtype, src, 0); + } + } + else + shadow.Send(sendbuf, sendoffset, sendcount, sendtype, root, 0); + } + else + gatherv(sendbuf , sendoffset*sendtype.Size(), + sendcount, sendtype, + recvbuf , recvoffset*recvtype.Size(), + recvcount, displs, + recvtype , root); + } + + private native void gatherv(Object sendbuf, + int sendoffset, + int sendcount, + Datatype sendtype, + Object recvbuf, + int recvoffset, + int [] recvcount, + int [] displs, + Datatype recvtype, + int root); + + /** + * Inverse of the operation Gather. + *

+ * + * + * + * + * + * + * + * + * + * + *
sendbuf send buffer array
sendoffset initial offset in send buffer
sendcount number of items to send
sendtype datatype of each item in send + * buffer
recvbuf receive buffer array
recvoffset initial offset in receive + * buffer
recvcount number of items to receive
recvtype datatype of each item in receive + * buffer
root rank of sending process
+ *

+ * Java binding of the MPI operation MPI_SCATTER. + */ + + public void Scatter(Object sendbuf, + int sendoffset, + int sendcount, + Datatype sendtype, + Object recvbuf, + int recvoffset, + int recvcount, + Datatype recvtype, + int root) throws MPIException { + + if (sendtype.isObject()){ + if (Rank() == root){ + for (int dst = 0; dst < Size() ; dst++){ + int srcOffset = sendoffset + sendcount * sendtype.Extent() * dst ; + if (dst == root) + copyBuffer(sendbuf, srcOffset, sendcount, sendtype, + recvbuf, recvoffset, recvcount, recvtype); + else + shadow.Send(sendbuf, srcOffset, sendcount, sendtype, dst, 0); + } + } + else + shadow.Recv(recvbuf, recvoffset, recvcount, recvtype, root, 0); + } + else + scatter(sendbuf, sendoffset*sendtype.Size(), sendcount, sendtype, + recvbuf, recvoffset*recvtype.Size(), recvcount, recvtype, + root); + } + + private native void scatter(Object sendbuf, + int sendoffset, + int sendcount, + Datatype sendtype, + Object recvbuf, + int recvoffset, + int recvcount, + Datatype recvtype, + int root); + + /** + * Inverse of the operation Gatherv. + *

+ * + * + * + * + * + * + * + * + * + * + * + *
sendbuf send buffer array
sendoffset initial offset in send buffer
sendcounts number of items sent to each + * process
displs displacements from which to take + * outgoing data
sendtype datatype of each item in send + * buffer
recvbuf receive buffer array
recvoffset initial offset in receive + * buffer
recvcount number of items to receive
recvtype datatype of each item in receive + * buffer
root rank of sending process
+ *

+ * Java binding of the MPI operation MPI_SCATTERV. + */ + + public void Scatterv(Object sendbuf, + int sendoffset, + int [] sendcount, + int [] displs, + Datatype sendtype, + Object recvbuf, + int recvoffset, + int recvcount, + Datatype recvtype, + int root) throws MPIException { + + if (sendtype.isObject()){ + if (Rank() == root){ + for (int dst = 0 ; dst < Size() ; dst++){ + int srcOffset = sendoffset + sendtype.Extent() * displs[dst] ; + if (dst == root) + copyBuffer(sendbuf, srcOffset, sendcount[dst], sendtype, + recvbuf, recvoffset, recvcount, recvtype); + else + shadow.Send(sendbuf, srcOffset, sendcount[dst], sendtype, dst, 0); + } + } + else + shadow.Recv(recvbuf, recvoffset, recvcount, recvtype, root, 0); + } + else + scatterv(sendbuf, sendoffset * sendtype.Size(), sendcount, + displs, sendtype, + recvbuf, recvoffset * recvtype.Size(), recvcount, recvtype, + root); + } + + private native void scatterv(Object sendbuf, + int sendoffset, + int [] sendcount, + int [] displs, + Datatype sendtype, + Object recvbuf, + int recvoffset, + int recvcount, + Datatype recvtype, + int root); + + /** + * Similar to Gather, but all processes receive the result. + *

+ * + * + * + * + * + * + * + * + * + *
sendbuf send buffer array
sendoffset initial offset in send buffer
sendcount number of items to send
sendtype datatype of each item in send + * buffer
recvbuf receive buffer array
recvoffset initial offset in receive + * buffer
recvcount number of items to receive
recvtype datatype of each item in receive + * buffer
+ *

+ * Java binding of the MPI operation MPI_ALLGATHER. + */ + + public void Allgather(Object sendbuf, + int sendoffset, + int sendcount, + Datatype sendtype, + Object recvbuf, + int recvoffset, + int recvcount, + Datatype recvtype) throws MPIException { + + if (sendtype.isObject()){ + Gather(sendbuf, sendoffset, sendcount, sendtype, + recvbuf, recvoffset, recvcount, recvtype, 0); + Bcast(recvbuf, recvoffset, Size() * recvcount, recvtype, 0); + } + else + allgather(sendbuf, sendoffset*sendtype.Size(), + sendcount, sendtype, + recvbuf, recvoffset*recvtype.Size(), + recvcount, recvtype); + } + + private native void allgather(Object sendbuf, + int sendoffset, + int sendcount, + Datatype sendtype, + Object recvbuf, + int recvoffset, + int recvcount, + Datatype recvtype); + + + /** + * Similar to Gatherv, but all processes receive the result. + *

+ * + * + * + * + * + * + * + * + * + * + *
sendbuf send buffer array
sendoffset initial offset in send buffer
sendcount number of items to send
sendtype datatype of each item in send + * buffer
recvbuf receive buffer array
recvoffset initial offset in receive + * buffer
recvcounts number of elements received from + * each process
displs displacements at which to place + * incoming data
recvtype datatype of each item in receive + * buffer
+ *

+ * Java binding of the MPI operation MPI_ALLGATHERV. + */ + + public void Allgatherv(Object sendbuf, + int sendoffset, + int sendcount, + Datatype sendtype, + Object recvbuf, + int recvoffset, + int [] recvcount, + int [] displs, + Datatype recvtype) throws MPIException { + + if (sendtype.isObject()){ + Gatherv(sendbuf, sendoffset, sendcount, sendtype, + recvbuf, recvoffset, recvcount, displs, recvtype, 0); + + for (int src = 0; src < Size(); src++){ + int dstOffset = recvoffset + sendtype.Extent() * displs[src] ; + Bcast(recvbuf, dstOffset, recvcount[src], recvtype, 0); + } + } + else + allgatherv(sendbuf , sendoffset*sendtype.Size(), + sendcount, sendtype, + recvbuf , recvoffset*recvtype.Size(), + recvcount, displs, + recvtype); + } + + private native void allgatherv(Object sendbuf, + int sendoffset, + int sendcount, + Datatype sendtype, + Object recvbuf, + int recvoffset, + int [] recvcount, + int [] displs, + Datatype recvtype); + + /** + * Extension of Allgather to the case where each process sends + * distinct data to each of the receivers. + *

+ * + * + * + * + * + * + * + * + *
sendbuf send buffer array
sendoffset initial offset in send buffer
sendcount number of items sent to each + * process
sendtype datatype send buffer items
recvbuf receive buffer array
recvoffset initial offset in receive + * buffer
recvcount number of items received from any + * process + *
recvtype datatype of receive buffer + * items
+ *

+ * Java binding of the MPI operation MPI_ALLTOALL. + */ + + public void Alltoall(Object sendbuf, + int sendoffset, + int sendcount, + Datatype sendtype, + Object recvbuf, + int recvoffset, + int recvcount, + Datatype recvtype) throws MPIException { + + if (sendtype.isObject()) + for (int dst = 0; dst < Size(); dst++) { + int srcOffset = sendoffset + sendcount * sendtype.Extent() * dst ; + Gather(sendbuf, srcOffset, sendcount, sendtype, + recvbuf, recvoffset, recvcount, recvtype, dst); + } + else + alltoall(sendbuf, sendoffset*sendtype.Size(), + sendcount, sendtype, + recvbuf, recvoffset*recvtype.Size(), + recvcount, recvtype); + } + + private native void alltoall(Object sendbuf, + int sendoffset, + int sendcount, + Datatype sendtype, + Object recvbuf, + int recvoffset, + int recvcount, + Datatype recvtype); + + /** + * Adds flexibility to Alltoall: location of data for send is + * specified by sdispls and location to place data on receive + * side is specified by rdispls. + *

+ * + * + * + * + * + * + * + * + * + * + *
sendbuf send buffer array
sendoffset initial offset in send buffer
sendcounts number of items sent to each + * process
sdispls displacements from which to take + * outgoing data
sendtype datatype send buffer items
recvbuf receive buffer array
recvoffset initial offset in receive + * buffer
recvcounts number of elements received from + * each process + *
rdispls displacements at which to place + * incoming data
recvtype datatype of each item in receive + * buffer
+ *

+ * Java binding of the MPI operation MPI_ALLTOALLV. + */ + + public void Alltoallv(Object sendbuf, + int sendoffset, + int [] sendcount, + int [] sdispls, + Datatype sendtype, + Object recvbuf, + int recvoffset, + int [] recvcount, + int [] rdispls, + Datatype recvtype) throws MPIException { + + if (sendtype.isObject()) + for (int dst = 0; dst < Size(); dst++) { + int srcOffset = sendoffset + sendtype.Extent() * sdispls[dst] ; + Gatherv(sendbuf, srcOffset, sendcount[dst], sendtype, + recvbuf, recvoffset, recvcount, rdispls, recvtype, dst); + } + else + alltoallv(sendbuf, sendoffset*sendtype.Size(), + sendcount, sdispls, sendtype, + recvbuf, recvoffset*recvtype.Size(), + recvcount, rdispls, recvtype); + } + + private native void alltoallv(Object sendbuf, + int sendoffset, + int [] sendcount, + int [] sdispls, + Datatype sendtype, + Object recvbuf, + int recvoffset, + int [] recvcount, + int [] displs, + Datatype recvtype); + + /** + * Combine elements in input buffer of each process using the reduce + * operation, and return the combined value in the output buffer of the + * root process. + *

+ * + * + * + * + * + * + * + * + * + *
sendbuf send buffer array
sendoffset initial offset in send buffer
recvbuf receive buffer array
recvoffset initial offset in receive + * buffer
count number of items in send buffer
datatype data type of each item in send + * buffer
op reduce operation
root rank of root process
+ *

+ * Java binding of the MPI operation MPI_REDUCE. + *

+ * The predefined operations are available in Java as MPI.MAX, + * MPI.MIN, MPI.SUM, MPI.PROD, MPI.LAND, + * MPI.BAND, MPI.LOR, MPI.BOR, MPI.LXOR, + * MPI.BXOR, MPI.MINLOC and MPI.MAXLOC. + */ + + public void Reduce(Object sendbuf, int sendoffset, + Object recvbuf, int recvoffset, int count, + Datatype datatype, Op op, int root) throws MPIException { + + if (op.isUser()) { + if (Rank() == root) { + copyBuffer(sendbuf,sendoffset,count,datatype, + recvbuf,recvoffset,count,datatype); + + Object tempbuf = newBuffer(recvbuf) ; + for (int src = 0; src < Size(); src++) + if(src != root) { + shadow.Recv(tempbuf, 0, count, datatype, src, 0); + op.Call(tempbuf, 0, recvbuf, recvoffset, count, datatype); + } + } + else + shadow.Send(sendbuf, sendoffset, count, datatype, root, 0); + } + else + reduce(sendbuf, sendoffset, recvbuf, recvoffset, count, + datatype, op, root) ; + } + + private native void reduce(Object sendbuf, int sendoffset, + Object recvbuf, int recvoffset, int count, + Datatype datatype, Op op, int root); + + /** + * Same as reduce except that the result appears in receive + * buffer of all process in the group. + *

+ * + * + * + * + * + * + * + * + *
sendbuf send buffer array
sendoffset initial offset in send buffer
recvbuf receive buffer array
recvoffset initial offset in receive + * buffer
count number of items in send buffer
datatype data type of each item in send + * buffer
op reduce operation
+ *

+ * Java binding of the MPI operation MPI_ALLREDUCE. + */ + + public void Allreduce(Object sendbuf, int sendoffset, + Object recvbuf, int recvoffset, int count, + Datatype datatype, Op op) throws MPIException { + + if (op.isUser()){ + Reduce(sendbuf, sendoffset, + recvbuf, recvoffset, count, datatype, op, 0); + + Bcast(recvbuf, recvoffset, count, datatype, 0); + } + else { + allreduce(sendbuf, sendoffset, recvbuf, recvoffset, count, + datatype, op) ; + } + } + + private native void allreduce(Object sendbuf, int sendoffset, + Object recvbuf, int recvoffset, int count, + Datatype datatype, Op op) ; + + /** + * Combine elements in input buffer of each process using the reduce + * operation, and scatter the combined values over the output buffers + * of the processes. + *

+ * + * + * + * + * + * + * + * + *
sendbuf send buffer array
sendoffset initial offset in send buffer
recvbuf receive buffer array
recvoffset initial offset in receive + * buffer
recvcounts numbers of result elements + * distributed to each process
datatype data type of each item in send + * buffer
op reduce operation
+ *

+ * Java binding of the MPI operation MPI_REDUCE_SCATTER. + */ + + public void Reduce_scatter(Object sendbuf, int sendoffset, + Object recvbuf, int recvoffset, int [] recvcounts, + Datatype datatype, Op op) throws MPIException { + + if (op.isUser()) { + int [] displs = new int [recvcounts.length] ; + int count = 0 ; + for (int i = 0; i < recvcounts.length; i++) { + displs [i] = count ; + count += recvcounts [i] ; + } + + Object tempbuf = newBuffer(sendbuf) ; + copyBuffer(sendbuf,sendoffset,count,datatype, + tempbuf,sendoffset,count,datatype); + + Reduce(tempbuf, sendoffset, sendbuf, sendoffset, count, + datatype, op, 0); + + Scatterv(tempbuf, sendoffset, recvcounts, displs, datatype, + recvbuf, recvoffset, recvcounts[Rank()], datatype, 0); + } + else + reduce_scatter(sendbuf, sendoffset, recvbuf, recvoffset, recvcounts, + datatype, op) ; + } + + private native void reduce_scatter(Object sendbuf, int sendoffset, + Object recvbuf, int recvoffset, + int [] recvcounts, + Datatype datatype, Op op) ; + + /** + * Combine elements in input buffer of each process using the reduce + * operation, and scatter the combined values over the output buffers + * of the processes. + *

+ * + * + * + * + * + * + *
inbuf input buffer array
inoutbuf input buffer array, will contain combined output
count number of elements
datatype data type of each item
op reduce operation
+ *

+ * Java binding of the MPI operation MPI_REDUCE_LOCAL. + */ + public void Reduce_local(Object inbuf, Object inoutbuf, int count, + Datatype datatype, Op op) throws MPIException { + + reduce_local(inbuf, inoutbuf, count, datatype, op) ; + } + private native void reduce_local(Object inbuf, Object inoutbuf, int count, + Datatype datatype, Op op); + + /** + * Perform a prefix reduction on data distributed across the group. + *

+ * + * + * + * + * + * + * + * + *
sendbuf send buffer array
sendoffset initial offset in send buffer
recvbuf receive buffer array
recvoffset initial offset in receive + * buffer
count number of items in input + * buffer
datatype data type of each item in input + * buffer
op reduce operation
+ *

+ * Java binding of the MPI operation MPI_SCAN. + */ + + public void Scan(Object sendbuf, int sendoffset, + Object recvbuf, int recvoffset, int count, + Datatype datatype, Op op) throws MPIException { + + if (op.isUser()){ + if (Rank() == 0) + copyBuffer(sendbuf,sendoffset,count,datatype, + recvbuf,recvoffset,count,datatype); + else{ + shadow.Recv(recvbuf, recvoffset, count, datatype, Rank() - 1, 0); + + op.Call(sendbuf, sendoffset, recvbuf, recvoffset, count, datatype); + } + if (Rank() < Size() - 1) + shadow.Send(recvbuf, recvoffset, count, datatype, Rank() + 1, 0); + } + else + scan(sendbuf, sendoffset, recvbuf, recvoffset, count, datatype, op); + } + + private native void scan(Object sendbuf, int sendoffset, + Object recvbuf, int recvoffset, int count, + Datatype datatype, Op op) ; + + // Topology Constructors + + /** + * Create a Cartesian topology communicator whose group is a subset + * of the group of this communicator. + *

+ * + * + * + * + * + *
dims the number of processes in each + * dimension
periods true if grid is periodic, + * false if not, in each + * dimension
reorder true if ranking may be + * reordered, false if not
returns: new Cartesian topology + * communicator
+ *

+ * Java binding of the MPI operation MPI_CART_CREATE. + *

+ * The number of dimensions of the Cartesian grid is taken to be the size + * of the dims argument. The array periods must be the + * same size. + */ + + public Cartcomm Create_cart(int [] dims, boolean [] periods, + boolean reorder) throws MPIException { + long cartHandle = GetCart(dims, periods, reorder) ; + if(cartHandle == nullHandle) + return null ; + else + return new Cartcomm(cartHandle) ; + } + + private native long GetCart(int [] dims, boolean [] periods, + boolean reorder) ; + + /** + * Create a graph topology communicator whose group is a subset + * of the group of this communicator. + *

+ * + * + * + * + * + *
index node degrees
edges graph edges
reorder true if ranking may be + * reordered, false if not
returns: new graph topology communicator
+ *

+ * Java binding of the MPI operation MPI_GRAPH_CREATE. + *

+ * The number of nodes in the graph, nnodes, is taken + * to be size of the index argument. The size of array + * edges must be index [nnodes} - 1]. + */ + + public Graphcomm Create_graph(int [] index, int [] edges, boolean reorder) + throws MPIException { + long graphHandle = GetGraph(index,edges,reorder) ; + if(graphHandle == nullHandle) + return null ; + else + return new Graphcomm(graphHandle) ; + } + + private native long GetGraph(int [] index,int [] edges, boolean reorder); + + private Comm shadow ; // Used by non-native collectives. +} + + +// Things to do +// + diff --git a/ompi/mpi/java/java/MPI.java b/ompi/mpi/java/java/MPI.java new file mode 100644 index 0000000000..e2060e2c59 --- /dev/null +++ b/ompi/mpi/java/java/MPI.java @@ -0,0 +1,556 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : MPI.java + * Author : Sang Lim, Sung-Hoon Ko, Xinying Li, Bryan Carpenter + * (contributions from MAEDA Atusi) + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.18 $ + * Updated : $Date: 2003/01/16 16:39:34 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +package mpi; + +import java.util.LinkedList ; + +public class MPI { + + static int MAX_PROCESSOR_NAME = 256; + + static public Intracomm COMM_WORLD; + static public Comm COMM_SELF; + + static public int GRAPH, CART; + static public int ANY_SOURCE, ANY_TAG; + + static public Op MAX, MIN, SUM, PROD, LAND, BAND, + LOR, BOR, LXOR, BXOR, MINLOC, MAXLOC; + + static public Datatype BYTE, CHAR, SHORT, BOOLEAN, + INT, LONG, FLOAT, DOUBLE, PACKED, LB, UB, OBJECT; + static public Datatype SHORT2, INT2, LONG2, FLOAT2, DOUBLE2; + + static public Request REQUEST_NULL; + static public Group GROUP_EMPTY; + + static public int PROC_NULL; + static public int BSEND_OVERHEAD; + static public int UNDEFINED; + static public int IDENT, CONGRUENT, SIMILAR, UNEQUAL; + static public int TAG_UB, HOST, IO; + + static Errhandler ERRORS_ARE_FATAL, ERRORS_RETURN; + + static { + + // System.loadLibrary("savesignals"); + // Actually only needed for JVMs that don't provide + // JDK 1.4-like signal chaining, but doesn't do any harm. + + //saveSignalHandlers(); + + System.loadLibrary("mpi_java"); + if (!loadGlobalLibraries()) { + System.out.println("JAVA BINDINGS FAILED TO LOAD REQUIRED LIBRARIES"); + System.exit(1); + } + + + //restoreSignalHandlers(); + // On SP2, JVM signal handlers overridden during loadLibrary(). + + try { + BYTE = new Datatype(); + CHAR = new Datatype(); + SHORT = new Datatype(); + BOOLEAN = new Datatype(); + INT = new Datatype(); + LONG = new Datatype(); + FLOAT = new Datatype(); + DOUBLE = new Datatype(); + PACKED = new Datatype(); + LB = new Datatype(); + UB = new Datatype(); + OBJECT = new Datatype(); + + SHORT2 = new Datatype() ; + INT2 = new Datatype() ; + LONG2 = new Datatype() ; + FLOAT2 = new Datatype() ; + DOUBLE2 = new Datatype() ; + + MAX = new Op(1); + MIN = new Op(2); + SUM = new Op(3); + PROD = new Op(4); + LAND = new Op(5); + BAND = new Op(6); + LOR = new Op(7); + BOR = new Op(8); + LXOR = new Op(9); + BXOR = new Op(10); + + MINLOC = new Op(new Minloc(), true); + MAXLOC = new Op(new Maxloc(), true); + + GROUP_EMPTY = new Group(Group.EMPTY); + REQUEST_NULL = new Request(Request.NULL); + + // Constant + SetConstant(); + + ERRORS_ARE_FATAL = new Errhandler(Errhandler.FATAL); + ERRORS_RETURN = new Errhandler(Errhandler.RETURN); + + COMM_WORLD = new Intracomm() ; + } + catch (MPIException e) { + System.out.println(e.getMessage()) ; + System.exit(1) ; + } + } + + static private native boolean loadGlobalLibraries(); + static private native void saveSignalHandlers(); + // static private native void restoreSignalHandlers(); + + /** + * Initialize MPI. + *

+ * + * + *
args arguments to main method.
+ *

+ * Java binding of the MPI operation MPI_INIT. + */ + + static public String [] Init(String[] args) throws MPIException { + + String [] newArgs = InitNative(args); + + // restoreSignalHandlers(); + // On MPICH, etc, JVM signal handlers overridden during MPI_Init(). + + + BYTE.setBasic(1); + CHAR.setBasic(2); + SHORT.setBasic(3); + BOOLEAN.setBasic(4); + INT.setBasic(5); + LONG.setBasic(6); + FLOAT.setBasic(7); + DOUBLE.setBasic(8); + PACKED.setBasic(9); + LB.setBasic(10); + UB.setBasic(11); + OBJECT.setBasic(12); + + SHORT2.setContiguous(2, MPI.SHORT); + INT2.setContiguous(2, MPI.INT); + LONG2.setContiguous(2, MPI.LONG); + FLOAT2.setContiguous(2, MPI.FLOAT); + DOUBLE2.setContiguous(2, MPI.DOUBLE); + + SHORT2.Commit(); + INT2.Commit(); + LONG2.Commit(); + FLOAT2.Commit(); + DOUBLE2.Commit(); + + COMM_WORLD.setType(Intracomm.WORLD); + + return newArgs ; + } + + static private native String [] InitNative(String[] args); + static private native void SetConstant(); + + /** + * Finalize MPI. + *

+ * Java binding of the MPI operation MPI_FINALIZE. + */ + + static public native void Finalize() throws MPIException ; + + /** + * Returns wallclock time. + *

+ * + * + *
returns: elapsed wallclock time in seconds + * since some time in the past
+ *

+ * Java binding of the MPI operation MPI_WTIME. + */ + + static public native double Wtime(); + + /** + * Returns resolution of timer. + *

+ * + * + *
returns: resolution of wtime in + * seconds.
+ *

+ * Java binding of the MPI operation MPI_WTICK. + */ + + static public native double Wtick(); + + /** + * Returns the name of the processor on which it is called. + *

+ * + * + *
returns: A unique specifier for the actual + * node.
+ *

+ * Java binding of the MPI operation MPI_GET_PROCESSOR_NAME. + */ + + static public String Get_processor_name() throws MPIException { + byte[] buf = new byte[MAX_PROCESSOR_NAME] ; + int lengh = Get_processor_name(buf) ; + return new String(buf,0,lengh) ; + } + + static private native int Get_processor_name(byte[] buf) ; + + /** + * Test if MPI has been initialized. + *

+ * + * + *
returns: true if Init has + * been called, false + * otherwise.
+ *

+ * Java binding of the MPI operation MPI_INITIALIZED. + */ + + static public native boolean Initialized() throws MPIException ; + + // Buffer allocation + + private static byte [] buffer = null ; + + /** + * Provides to MPI a buffer in user's memory to be used for buffering + * outgoing messages. + * Java binding of the MPI operation MPI_BUFFER_ATTACH. + */ + + static public void Buffer_attach(byte[] buffer) throws MPIException { + MPI.buffer = buffer ; + Buffer_attach_native(buffer); + } + + static private native void Buffer_attach_native(byte[] buffer); + + + /** + * Detach the buffer currently associated with MPI. + * Java binding of the MPI operation MPI_BUFFER_DETACH. + */ + + static public byte[] Buffer_detach() throws MPIException { + Buffer_detach_native(buffer); + byte [] result = MPI.buffer ; + MPI.buffer = null ; + return result ; + } + + static private native void Buffer_detach_native(byte[] buffer); + + static LinkedList freeList = new LinkedList() ; + + synchronized static void clearFreeList() { + + while(!freeList.isEmpty()) + ((Freeable) freeList.removeFirst()).free() ; + } +} + + +// Minloc and Maxloc +// BC Note: moved to separate source files +/* +class Maxloc extends User_function{ + public void Call(Object invec, int inoffset, Object outvec, int outoffset, + int count, Datatype datatype){ + + // *** should work also for derived datatypes with following as + // as bases ? *** + + if(datatype == MPI.SHORT2) { + short [] in_array = (short[])invec; + short [] out_array = (short[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2) { + short inval = in_array [indisp] ; + short outval = out_array [outdisp] ; + + if(inval > outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + short inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else if(datatype == MPI.INT2) { + int [] in_array = (int[])invec; + int [] out_array = (int[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){ + + int inval = in_array [indisp] ; + int outval = out_array [outdisp] ; + + if(inval > outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + int inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else if(datatype == MPI.LONG2) { + long [] in_array = (long[])invec; + long [] out_array = (long[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){ + + long inval = in_array [indisp] ; + long outval = out_array [outdisp] ; + + if(inval > outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + long inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else if(datatype == MPI.FLOAT2) { + float [] in_array = (float[])invec; + float [] out_array = (float[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){ + + float inval = in_array [indisp] ; + float outval = out_array [outdisp] ; + + if(inval > outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + float inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else if(datatype == MPI.DOUBLE2) { + double [] in_array = (double[])invec; + double [] out_array = (double[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){ + + double inval = in_array [indisp] ; + double outval = out_array [outdisp] ; + + if(inval > outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + double inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else { + System.out.println("MPI.MAXLOC: invalid datatype") ; + try { + MPI.COMM_WORLD.Abort(1); + } + catch(MPIException e) {} + } + } +} + +class Minloc extends User_function{ + public void Call(Object invec, int inoffset, Object outvec, int outoffset, + int count, Datatype datatype){ + if(datatype == MPI.SHORT2) { + short [] in_array = (short[])invec; + short [] out_array = (short[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){ + + short inval = in_array [indisp] ; + short outval = out_array [outdisp] ; + + if(inval < outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + short inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else if(datatype == MPI.INT2) { + int [] in_array = (int[])invec; + int [] out_array = (int[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){ + + int inval = in_array [indisp] ; + int outval = out_array [outdisp] ; + + if(inval < outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + int inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else if(datatype == MPI.LONG2) { + long [] in_array = (long[])invec; + long [] out_array = (long[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){ + + long inval = in_array [indisp] ; + long outval = out_array [outdisp] ; + + if(inval < outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + long inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else if(datatype == MPI.FLOAT2) { + float [] in_array = (float[])invec; + float [] out_array = (float[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){ + + float inval = in_array [indisp] ; + float outval = out_array [outdisp] ; + + if(inval < outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + float inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else if(datatype == MPI.DOUBLE2) { + double [] in_array = (double[])invec; + double [] out_array = (double[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){ + + double inval = in_array [indisp] ; + double outval = out_array [outdisp] ; + + if(inval < outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + double inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else { + System.out.println("MPI.MINLOC: invalid datatype") ; + try { + MPI.COMM_WORLD.Abort(1); + } + catch(MPIException e) {} + } + } +} +*/ + +// Things to do: +// +// Check if `Maxloc'/`Minloc' should work with derived types. + diff --git a/ompi/mpi/java/java/MPIException.java b/ompi/mpi/java/java/MPIException.java new file mode 100644 index 0000000000..2e8f5706eb --- /dev/null +++ b/ompi/mpi/java/java/MPIException.java @@ -0,0 +1,30 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : MPIException.java + * Author : Bryan Carpenter + * Created : Tue Sep 14 13:03:57 EDT 1999 + * Revision : $Revision: 1.1 $ + * Updated : $Date: 1999/09/14 22:01:52 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1999 + */ + +package mpi; + +public class MPIException extends Exception { + public MPIException() {super() ;} + public MPIException(String message) {super(message) ;} +} + diff --git a/ompi/mpi/java/java/Makefile.am b/ompi/mpi/java/java/Makefile.am new file mode 100644 index 0000000000..0c3447d461 --- /dev/null +++ b/ompi/mpi/java/java/Makefile.am @@ -0,0 +1,145 @@ +# -*- makefile -*- +# +# Copyright (c) 2011 Cisco Systems, Inc. All rights reserved. +# $COPYRIGHT$ +# +# Additional copyrights may follow +# +# $HEADER$ +# + +# These are the source files. However, Automake doesn't directly know +# about them, and we compile them via *.java below (ick!). So we just +# list them here in EXTRA_DIST so that they get picked up by "make +# dist". +JAVA_SRC_FILES = \ + Cartcomm.java \ + CartParms.java \ + Comm.java \ + Datatype.java \ + Errhandler.java \ + Freeable.java \ + Graphcomm.java \ + GraphParms.java \ + Group.java \ + Intercomm.java \ + Intracomm.java \ + Maxloc.java \ + Minloc.java \ + MPI.java \ + MPIException.java \ + Op.java \ + Prequest.java \ + Request.java \ + ShiftParms.java \ + Status.java \ + User_function.java + +JAVA_CLASS_FILES = $(JAVA_SRC_FILES:%.java=mpi/%.class) +EXTRA_DIST = $(JAVA_SRC_FILES) + +# Only do this stuff if we want the Java bindings +if OMPI_WANT_JAVA_BINDINGS + +# These files get generated. They have a 1:1 correspondence to .java +# files, but there is not a .h file for every .java file. That's why +# we have a specific list of files here, as opposed to deriving them +# from JAVA_SRC_FILES. +JAVA_H = \ + mpi_MPI.h \ + mpi_CartParms.h \ + mpi_Cartcomm.h \ + mpi_Comm.h \ + mpi_Datatype.h \ + mpi_Errhandler.h \ + mpi_GraphParms.h \ + mpi_Graphcomm.h \ + mpi_Group.h \ + mpi_Intercomm.h \ + mpi_Intracomm.h \ + mpi_Op.h \ + mpi_Prequest.h \ + mpi_Request.h \ + mpi_ShiftParms.h \ + mpi_Status.h \ + mpi_User_function.h + +# A little verbosity magic; "make" will show the terse output. "make +# V=1" will show the actual commands used (just like the other +# Automake-generated compilation/linker rules). +OMPI_V_JAVAC = $(ompi__v_JAVAC_$(V)) +ompi__v_JAVAC_ = $(ompi__v_JAVAC_$(AM_DEFAULT_VERBOSITY)) +ompi__v_JAVAC_0 = @echo " JAVAC " `basename $@`; + +OMPI_V_JAVAH = $(ompi__v_JAVAH_$(V)) +ompi__v_JAVAH_ = $(ompi__v_JAVAH_$(AM_DEFAULT_VERBOSITY)) +ompi__v_JAVAH_0 = @echo " JAVAH " `basename $@`; + +OMPI_V_JAR = $(ompi__v_JAR_$(V)) +ompi__v_JAR_ = $(ompi__v_JAR_$(AM_DEFAULT_VERBOSITY)) +ompi__v_JAR_0 = @echo " JAR " `basename $@`; + +OMPI_V_MKDIR = $(ompi__v_MKDIR_$(V)) +ompi__v_MKDIR_ = $(ompi__v_MKDIR_$(AM_DEFAULT_VERBOSITY)) +ompi__v_MKDIR_0 = @echo " MKDIR " $@; + +# All the .java files seem to have circular references, such that I +# can't figure out a linear order in which to compile them +# sequentially that does not generate dependency errors. Hence, the +# only way I can figure out how to compile them is via *.java -- this +# could well be due to my own misunderstanding of Java or the +# compiler. Shrug. +# +# So instead of listing all the .class files, just use mpi/MPI.class +# as a token class file for both the rule and all the dependencies +# below. +# +# Note too, that all of them will be recompiled if any of them change, +# since Automake doesn't know how to automatically generate +# dependencies for Java source files. So I made the token stamp1.class +# file dependent upon *all* the .java source files. +mpi/MPI.class: $(JAVA_SRC_FILES) + $(OMPI_V_JAVAC) CLASSPATH=. ; \ + export CLASSPATH ; \ + $(JAVAC) -d . $(top_srcdir)/ompi/mpi/java/java/*.java + +# Similar to above, all the generated .h files are dependent upon the +# token mpi/MPI.class file. Hence, all the classes will be generated +# first, then we'll individuall generate each of the .h files. +$(JAVA_H): mpi/MPI.class + $(OMPI_V_JAVAH) sourcename=mpi.`echo $@ | sed -e s/^mpi_// -e s/.h$$//`; \ + CLASSPATH=. ; \ + export CLASSPATH ; \ + $(JAVAH) -d . -jni $$sourcename + +# Generate the .jar file from all the class files. Use the token +# mpi/MPI.class file (see explanation above). If we try to use +# JAVA_CLASS_FILES in the dependency here, make will complain that it +# doesn't know how to build all the files. +mpi.jar: mpi/MPI.class + $(OMPI_V_JAR) $(JAR) cf mpi.jar $(JAVA_CLASS_FILES) + +# Install the jar file into libdir. Use the DATA Automake primary, +# because Automake will complain if you try to use LIBRARIES with a +# filename that doesn't fit the lib.* format. Also use an +# indirection to get to the libdir -- Automake does not allow putting +# libdir for the DATA primary. +javadir = $(libdir) +java_DATA = mpi.jar + +# List all the header files in BUILT_SOURCES so that Automake's "all" +# target will build them. This will also force the building of the +# JAVA_CLASS_FILES. +BUILT_SOURCES = $(JAVA_H) + +# Clean up all the things that this Makefile.am generates. The +# generated .class files are all within the "mpi" subdirectory. +CLEANFILES += -rf mpi/* $(JAVA_H) + +# Conditionally install the header files +if WANT_INSTALL_HEADERS +ompihdir = $(includedir)/openmpi/$(subdir) +nobase_ompih_HEADERS = $(JAVA_H) +endif + +endif diff --git a/ompi/mpi/java/java/Maxloc.java b/ompi/mpi/java/java/Maxloc.java new file mode 100644 index 0000000000..fc9ffbbccc --- /dev/null +++ b/ompi/mpi/java/java/Maxloc.java @@ -0,0 +1,145 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package mpi; + +class Maxloc extends User_function{ + public void Call(Object invec, int inoffset, Object outvec, int outoffset, + int count, Datatype datatype){ + + // *** should work also for derived datatypes with following as + // as bases ? *** + + if(datatype == MPI.SHORT2) { + short [] in_array = (short[])invec; + short [] out_array = (short[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2) { + short inval = in_array [indisp] ; + short outval = out_array [outdisp] ; + + if(inval > outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + short inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else if(datatype == MPI.INT2) { + int [] in_array = (int[])invec; + int [] out_array = (int[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){ + + int inval = in_array [indisp] ; + int outval = out_array [outdisp] ; + + if(inval > outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + int inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else if(datatype == MPI.LONG2) { + long [] in_array = (long[])invec; + long [] out_array = (long[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){ + + long inval = in_array [indisp] ; + long outval = out_array [outdisp] ; + + if(inval > outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + long inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else if(datatype == MPI.FLOAT2) { + float [] in_array = (float[])invec; + float [] out_array = (float[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){ + + float inval = in_array [indisp] ; + float outval = out_array [outdisp] ; + + if(inval > outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + float inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else if(datatype == MPI.DOUBLE2) { + double [] in_array = (double[])invec; + double [] out_array = (double[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){ + + double inval = in_array [indisp] ; + double outval = out_array [outdisp] ; + + if(inval > outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + double inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else { + System.out.println("MPI.MAXLOC: invalid datatype") ; + try { + MPI.COMM_WORLD.Abort(1); + } + catch(MPIException e) {} + } + } +} diff --git a/ompi/mpi/java/java/Minloc.java b/ompi/mpi/java/java/Minloc.java new file mode 100644 index 0000000000..639d274158 --- /dev/null +++ b/ompi/mpi/java/java/Minloc.java @@ -0,0 +1,142 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package mpi; + +class Minloc extends User_function{ + public void Call(Object invec, int inoffset, Object outvec, int outoffset, + int count, Datatype datatype){ + if(datatype == MPI.SHORT2) { + short [] in_array = (short[])invec; + short [] out_array = (short[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){ + + short inval = in_array [indisp] ; + short outval = out_array [outdisp] ; + + if(inval < outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + short inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else if(datatype == MPI.INT2) { + int [] in_array = (int[])invec; + int [] out_array = (int[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){ + + int inval = in_array [indisp] ; + int outval = out_array [outdisp] ; + + if(inval < outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + int inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else if(datatype == MPI.LONG2) { + long [] in_array = (long[])invec; + long [] out_array = (long[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){ + + long inval = in_array [indisp] ; + long outval = out_array [outdisp] ; + + if(inval < outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + long inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else if(datatype == MPI.FLOAT2) { + float [] in_array = (float[])invec; + float [] out_array = (float[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){ + + float inval = in_array [indisp] ; + float outval = out_array [outdisp] ; + + if(inval < outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + float inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else if(datatype == MPI.DOUBLE2) { + double [] in_array = (double[])invec; + double [] out_array = (double[])outvec; + + int indisp = inoffset ; + int outdisp = outoffset ; + for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){ + + double inval = in_array [indisp] ; + double outval = out_array [outdisp] ; + + if(inval < outval) { + out_array [outdisp ] = inval ; + out_array [outdisp + 1] = in_array [outdisp + 1] ; + } + else if(inval == outval) { + double inloc = in_array [indisp + 1] ; + + if(inloc < out_array [outdisp + 1]) + out_array [outdisp + 1] = inloc ; + } + } + } + else { + System.out.println("MPI.MINLOC: invalid datatype") ; + try { + MPI.COMM_WORLD.Abort(1); + } + catch(MPIException e) {} + } + } +} diff --git a/ompi/mpi/java/java/Op.java b/ompi/mpi/java/java/Op.java new file mode 100644 index 0000000000..fcd87acb2a --- /dev/null +++ b/ompi/mpi/java/java/Op.java @@ -0,0 +1,91 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : Op.java + * Author : Xinying Li, Sang LIm + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.11 $ + * Updated : $Date: 2003/01/16 16:39:34 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +package mpi; +//import mpi.*; + +public class Op extends Freeable { + private final static int NULL = 0; + private final static int MAX = 1; + private final static int MIN = 2; + private final static int SUM = 3; + private final static int PROD = 4; + private final static int LAND = 5; + private final static int BAND = 6; + private final static int LOR = 7; + private final static int BOR = 8; + private final static int LXOR = 9; + private final static int BXOR =10; + private final static int MINLOC=11; + private final static int MAXLOC=12; + + private static native void init(); + + private User_function uf = null ; + + protected Op(int Type) { GetOp(Type);} + + /** + * Bind a user-defined global reduction operation to an Op object. + *

+ * + * + * + *
function user defined function
commute true if commutative, + * false otherwise
+ *

+ * Java binding of the MPI operation MPI_OP_CREATE. + */ + + public Op(User_function function, boolean commute) throws MPIException { + uf = function; + } + + protected boolean isUser() { + return uf != null ; + } + + public final void Call(Object invec, int inoffset, + Object outvec, int outoffset, + int count, Datatype datatype) { + uf.Call(invec, inoffset, outvec, outoffset, count, datatype); + } + + private native void GetOp(int Type); + + protected long handle ; + + @SuppressWarnings("unchecked") + public void finalize() throws MPIException { + synchronized(MPI.class) { + MPI.freeList.addFirst(this) ; + } + } + + native void free() ; + + static { + init(); + } +} + diff --git a/ompi/mpi/java/java/Prequest.java b/ompi/mpi/java/java/Prequest.java new file mode 100644 index 0000000000..76af1849c4 --- /dev/null +++ b/ompi/mpi/java/java/Prequest.java @@ -0,0 +1,217 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : Prequest.java + * Author : Sang Lim, Xinying Li, Bryan Carpenter + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.11 $ + * Updated : $Date: 2001/10/22 21:07:55 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + + +/* + * Note: in the end there is no sensible way to use the native + * persistent requests here. For every `start'/`wait' cycle you need + * to do `get...Elements', `release...Elements', otherwise the behaviour + * is wrong if pinning isn't supported (because then get/release ops + * need active copying to move values between C and Java). + * + * (Even if pinning is supported, the arrays would have to be pinned + * almost permanently, which presumably isn't a good thing.) + */ + + +package mpi; + +public class Prequest extends Request { + + protected final static int MODE_STANDARD = 0 ; + protected final static int MODE_BUFFERED = 1 ; + protected final static int MODE_SYNCHRONOUS = 2 ; + protected final static int MODE_READY = 3 ; + + private int src ; + + /** + * Constructor used by `Send_init', etc. + */ + + protected Prequest(int mode, + Object buf, int offset, int count, Datatype type, + int dest, int tag, Comm comm) { + + opTag = Request.OP_SEND ; + + this.mode = mode ; + + this.buf = buf; + this.offset = offset; + this.count = count; + this.type = type; + this.dest = dest; + this.tag = tag; + this.comm = comm ; + + if(type.isObject()) { + typeTag = Request.TYPE_OBJECT ; + + length_buf = new int [2] ; + hdrReq = new Request() ; + } + else + typeTag = Request.TYPE_NORMAL ; + } + + /** + * Constructor used by `Recv_init'. + */ + + protected Prequest(Object buf, int offset, int count, Datatype type, + int source, int tag, Comm comm) { + + opTag = Request.OP_RECV ; + + this.buf = buf; + this.offset = offset; + this.count = count; + this.type = type; + this.src = source; + this.tag = tag; + this.comm = comm; + + if(type.isObject()) { + typeTag = Request.TYPE_OBJECT ; + + length_buf = new int [2] ; + } + else + typeTag = Request.TYPE_NORMAL ; + } + + + /** + * Activate a persistent communication request. + * Java binding of the MPI operation MPI_START. + * The communication is completed by using the request in + * one of the wait or test operations. + * On successful completion the request becomes inactive again. + * It can be reactivated by a further call to Start. + */ + + public void Start() throws MPIException { + switch(typeTag) { + case TYPE_NORMAL : + switch(opTag) { + case OP_SEND : + + switch(mode) { + case MODE_STANDARD : + comm.Isend(buf, offset, count, type, dest, tag, this); + + break; + case MODE_BUFFERED : + comm.Ibsend(buf, offset, count, type, dest, tag, this); + + break; + case MODE_SYNCHRONOUS : + comm.Issend(buf, offset, count, type, dest, tag, this); + + break; + case MODE_READY : + comm.Irsend(buf, offset, count, type, dest, tag, this); + + break; + } + + break ; + case OP_RECV : + comm.Irecv(buf, offset, count, type, src, tag, this) ; + + break ; + } + + break ; + case TYPE_OBJECT : + switch(opTag) { + case OP_SEND : + + byte [] byte_buf = comm.Object_Serialize(buf,offset,count,type); + + length_buf[0] = byte_buf.length; + length_buf[1] = count ; + + switch(mode) { + case MODE_STANDARD : + comm.Isend(length_buf, 0, 2, MPI.INT, dest, tag, hdrReq) ; + comm.Isend(byte_buf, 0, byte_buf.length, + MPI.BYTE, dest, tag, this); + + break; + case MODE_BUFFERED : + comm.Ibsend(length_buf, 0, 2, MPI.INT, dest, tag, hdrReq) ; + comm.Ibsend(byte_buf, 0, byte_buf.length, + MPI.BYTE, dest, tag, this); + + break; + case MODE_SYNCHRONOUS : + comm.Issend(length_buf, 0, 2, MPI.INT, dest, tag, hdrReq) ; + comm.Isend(byte_buf, 0, byte_buf.length, + MPI.BYTE, dest, tag, this); + + break; + case MODE_READY : + comm.Irsend(length_buf, 0, 2, MPI.INT, dest, tag, hdrReq) ; + comm.Isend(byte_buf, 0, byte_buf.length, + MPI.BYTE, dest, tag, this); + + break; + } + + break ; + case OP_RECV : + comm.Irecv(length_buf, 0, 2, MPI.INT, src, tag, this) ; + + break ; + } + + break ; + } + } + + //private native void start(); + + /** + * Activate a list of communication requests. + *

+ * + * + *
array_of_requests array of requests
+ *

+ * Java binding of the MPI operation MPI_STARTALL. + */ + + public static void Startall(Prequest [] array_of_request) + throws MPIException { + int req_length = array_of_request.length ; + + for (int i = 0; iIsend, etc. + */ + + protected Request(Request hdrReq) { + + typeTag = Request.TYPE_OBJECT ; + opTag = Request.OP_SEND ; + + this.hdrReq = hdrReq ; + } + + /** + * Constructor used by Irecv. + */ + + protected Request(Object buf, int offset, int count, Datatype type, + int tag, Comm comm, + int [] length_buf) { + + typeTag = Request.TYPE_OBJECT ; + opTag = Request.OP_RECV ; + + this.buf = buf; + this.offset = offset; + this.count = count; + this.type = type; + this.tag = tag; + this.comm = comm; + + this.length_buf = length_buf; + } + + + /** + * Set the request object to be void. + * Java binding of the MPI operation MPI_REQUEST_FREE. + */ + + public native void Free() throws MPIException ; + + /** + * Mark a pending nonblocking communication for cancellation. + * Java binding of the MPI operation MPI_CANCEL. + */ + + public native void Cancel() throws MPIException ; + + /** + * Test if request object is void. + *

+ * + * + *
returns: true if the request object is void, + * false otherwise
+ */ + + public native boolean Is_null(); + + /* + * After initial wait succeeds with some status, complete as necessary. + */ + + private Status complete(Status status) throws MPIException { + switch(typeTag) { + case TYPE_NORMAL : + + break; + case TYPE_OBJECT : + switch(opTag) { + case OP_SEND : + hdrReq.Wait(new Status()) ; // Data has already gone, but must + // still do `wait' on header send. + break; + case OP_RECV : + + int index = status.index ; + + // Header has arrived, now read the actual data. + + byte[] byte_buf = new byte[length_buf[0]]; + status = comm.Recv(byte_buf, 0, length_buf[0], MPI.BYTE, + status.source, tag) ; + comm.Object_Deserialize(buf, byte_buf, offset, length_buf[1], + type); + + status.object_count = length_buf[1]; + status.index = index ; + + break; + } + + break ; + } + return status ; + } + + /** + * Blocks until the operation identified by the request is complete. + *

+ * + * + *
returns: status object
+ *

+ * Java binding of the MPI operation MPI_WAIT. + *

+ * After the call returns, the request object becomes inactive. + */ + + public Status Wait() throws MPIException { + Status result = new Status(); + Wait(result); + + return complete(result) ; + } + + private native Status Wait(Status stat); + + /** + * Returns a status object if the operation identified by the request + * is complete, or a null reference otherwise. + *

+ * + * + *
returns: status object or null reference
+ *

+ * Java binding of the MPI operation MPI_TEST. + *

+ * After the call, if the operation is complete (ie, if the return value + * is non-null), the request object becomes inactive. + */ + + public Status Test() throws MPIException { + + Status result = new Status(); + if (Test(result) == null) + return null; + else + return complete(result) ; + } + + private native Status Test(Status stat); + + /** + * Blocks until one of the operations associated with the active + * requests in the array has completed. + *

+ * + * + * + *
array_of_requests array of requests
returns: status object
+ *

+ * Java binding of the MPI operation MPI_WAITANY. + *

+ * The index in array_of_requests for the request that completed + * can be obtained from the returned status object through the + * Status.index field. The corresponding element + * of array_of_requests becomes inactive. + */ + + public static Status Waitany(Request [] array_of_request) + throws MPIException { + Status result = new Status(); + Waitany(array_of_request, result); + + if(result == null) + return null; + else + return array_of_request[result.index].complete(result) ; + } + + private static native Status Waitany(Request [] array_of_request, + Status stat); + + /** + * Tests for completion of either one or none of the operations associated + * with active requests. + *

+ * + * + * + *
array_of_requests array of requests
returns: status object or + * null reference
+ *

+ * Java binding of the MPI operation MPI_TESTANY. + *

+ * If some request completed, the index in array_of_requests + * for that request can be obtained from the returned status object + * through the. The corresponding element of array_of_requests + * becomes inactive. + * If no request completed, Testany returns a null reference. + */ + + public static Status Testany(Request [] array_of_request) + throws MPIException { + + Status result = new Status(); + result = Testany(array_of_request, result); + + if(result == null) + return null; + else + return array_of_request[result.index].complete(result) ; + } + + private static native Status Testany(Request [] array_of_request, + Status stat); + + /** + * Blocks until all of the operations associated with the active + * requests in the array have completed. + *

+ * + * + * + *
array_of_requests array of requests
returns: array of status objects
+ *

+ * Java binding of the MPI operation MPI_WAITALL. + *

+ * The result array will be the same size as array_of_requests. + * On exit, requests become inactive. If the input value of + * arrayOfRequests contains inactive requests, corresponding + * elements of the result array will contain null status references. + */ + + public static Status[] Waitall (Request [] array_of_request) + throws MPIException { + Status result[] = waitall(array_of_request); + + for (int i = 0 ; i < array_of_request.length ; i++) + result [i] = array_of_request [i].complete(result [i]) ; + + return result; + } + + private static native Status[] waitall(Request [] array_of_request); + + /** + * Tests for completion of all of the operations associated + * with active requests. + *

+ * + * + * + *
array_of_requests array of requests
returns: array of status objects
+ *

+ * Java binding of the MPI operation MPI_TESTALL. + *

+ * If all operations have completed, the exit value of the argument array + * and the result array are as for Waitall. If any + * operation has not completed, the result value is null and no + * element of the argument array is modified. + */ + + public static Status[] Testall(Request [] array_of_request) + throws MPIException { + Status result[] = testall(array_of_request); + + if (result == null) + return null; + else { + for (int i = 0 ; i < array_of_request.length ; i++) + result [i] = array_of_request [i].complete(result [i]) ; + + return result; + } + } + + private static native Status[] testall(Request [] array_of_request); + + /** + * Blocks until at least one of the operations associated with the active + * requests in the array has completed. + *

+ * + * + * + *
array_of_requests array of requests
returns: array of status objects
+ *

+ * Java binding of the MPI operation MPI_WAITSOME. + *

+ * The size of the result array will be the number of operations that + * completed. The index in array_of_requests for each request that + * completed can be obtained from the returned status objects through the + * Status.index field. The corresponding element in + * array_of_requests becomes inactive. + */ + + public static Status[] Waitsome(Request [] array_of_request) + throws MPIException { + Status result[] = waitsome(array_of_request); + + for (int i = 0 ; i < result.length ; i++) + result [i] = array_of_request [result [i].index].complete(result [i]) ; + + return result; + } + + private static native Status[] waitsome(Request [] array_of_request); + + /** + * Behaves like Waitsome, except that it returns immediately. + *

+ * + * + * + *
array_of_requests array of requests
returns: array of status objects
+ *

+ * Java binding of the MPI operation MPI_TESTSOME. + *

+ * If no operation has completed, TestSome returns an array of + * length zero and elements of array_of_requests are unchanged. + * Otherwise, arguments and return value are as for Waitsome. + */ + + public static Status[] Testsome(Request [] array_of_request) + throws MPIException { + Status result[] = testsome(array_of_request); + + if (result == null) + return null; + else { + for (int i = 0 ; i < result.length ; i++) + result [i] = array_of_request [result [i].index].complete(result [i]) ; + + return result; + } + } + + private static native Status[] testsome(Request [] array_of_request); + + // Fields manipulated only by native methods... + + protected long handle; + + // `bufSave', etc, not generally the same as `buf', etc. + // In `MPJ.OBJECT' receive case `buf', etc, refer to the array of objects, + // `bufSave', etc refer to header buffer. + + protected Object bufSave ; + protected int countSave, offsetSave ; + + protected long bufbaseSave, bufptrSave ; + protected int baseTypeSave ; + protected long commSave, typeSave ; + + static { + init(); + } +} + +// Things to do +// +// What happens to `Cancel' in the object case? + diff --git a/ompi/mpi/java/java/ShiftParms.java b/ompi/mpi/java/java/ShiftParms.java new file mode 100644 index 0000000000..88c33243fb --- /dev/null +++ b/ompi/mpi/java/java/ShiftParms.java @@ -0,0 +1,29 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : ShiftParms.java + * Author : Xinying Li + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.1 $ + * Updated : $Date: 1998/08/26 18:50:05 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +package mpi; + +public class ShiftParms { + public int rank_source; + public int rank_dest; +} diff --git a/ompi/mpi/java/java/Status.java b/ompi/mpi/java/java/Status.java new file mode 100644 index 0000000000..332f7a7adc --- /dev/null +++ b/ompi/mpi/java/java/Status.java @@ -0,0 +1,121 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : Status.java + * Author : Sang Lim, Sung-Hoon Ko, Xinying Li, Bryan Carpenter + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.15 $ + * Updated : $Date: 2003/01/16 16:39:34 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +package mpi; + +public class Status extends Freeable { + + public int index; + public int source; + public int tag; + + int elements; + + //protected int count; + protected int object_count; + +// protected Status(long _handle) { handle = _handle;} + + public Status() {alloc() ;} + private native void alloc() ; + + @SuppressWarnings("unchecked") + public void finalize() throws MPIException { + synchronized(MPI.class) { + MPI.freeList.addFirst(this) ; + } + } + + native void free() ; + + /** + * Get the number of received entries. + *

+ * + * + * + *
datatype datatype of each item in receive + * buffer
returns: number of received entries
+ *

+ * Java binding of the MPI operation MPI_GET_COUNT. + */ + + public int Get_count(Datatype datatype) throws MPIException { + + if (datatype.isObject()) + return object_count; // Is this correct? + else + return get_count(datatype); + } + + private native int get_count(Datatype datatype); + + /** + * Test if communication was cancelled. + *

+ * + *
returns: true if the operation was + * succesfully cancelled, + * false otherwise + *
+ *

+ * Java binding of the MPI operation MPI_TEST_CANCELLED. + */ + + public native boolean Test_cancelled() throws MPIException ; + + /** + * Retrieve number of basic elements from status. + *

+ * + * + * + *
datatype datatype used by receive + * operation
returns: number of received basic + * elements
+ *

+ * Java binding of the MPI operation MPI_GET_ELEMENTS. + */ + + public int Get_elements(Datatype datatype) throws MPIException { + if(datatype.isObject()) + return MPI.UNDEFINED; // Is this correct? + else + return get_elements(datatype) ; + } + + private native int get_elements(Datatype datatype); + + private static native void init(); + + protected long handle; + + static { + init(); + } + +} + +// Things to do +// + diff --git a/ompi/mpi/java/java/User_function.java b/ompi/mpi/java/java/User_function.java new file mode 100644 index 0000000000..179f666342 --- /dev/null +++ b/ompi/mpi/java/java/User_function.java @@ -0,0 +1,51 @@ +/* + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +/* + * File : User_function.java + * Author : Xinying Li + * Created : Thu Apr 9 12:22:15 1998 + * Revision : $Revision: 1.4 $ + * Updated : $Date: 1999/09/13 16:14:30 $ + * Copyright: Northeast Parallel Architectures Center + * at Syracuse University 1998 + */ + +package mpi; + +public abstract class User_function{ + + /** + * User-defined function for a new Op. + *

+ * + * + * + * + * + * + * + *
invec array of values to combine with + * inoutvec elements
inoffset initial offset in + * invec
inoutvec in-out array of accumulator + * locations
inoutoffset initial offset in + * inoutvec
count number of items in arrays
datatype type of each item
+ *

+ * Java equivalent of the MPI USER_FUNCTION. + */ + + public abstract void Call(Object invec, int inoffset, + Object inoutvec, int inoutoffset, + int count, Datatype datatype) ; +} + diff --git a/ompi/mpi/man/man3/MPI_Abort.3in b/ompi/mpi/man/man3/MPI_Abort.3in index 5bdfb9c64b..d219b6f9ba 100644 --- a/ompi/mpi/man/man3/MPI_Abort.3in +++ b/ompi/mpi/man/man3/MPI_Abort.3in @@ -26,6 +26,12 @@ MPI_ABORT(\fICOMM\fP, \fIERRORCODE\fP, \fIIERROR\fP) #include void Comm::Abort(int \fIerrorcode\fP) +.fi +.SH Java Syntax +.nf +import mpi.*; +void MPI.Abort(int \fIerrorcode\fP) + .fi .SH INPUT PARAMETERS .ft R @@ -50,12 +56,12 @@ action with the error code. However, a UNIX or POSIX environment should handle this as a return errorcode from the main program or an abort (errorcode). .sp -The Open MPI implementation terminates all processes in all tasks that contain a process in \fIcomm\fP, and the error code is not returned to the invoking environment. +The long-term goal of the Open MPI implementation is to terminate all processes in all tasks that contain a process in \fIcomm\fP, and the error code is not returned to the invoking environment. At the moment, this isn't fully implemented and MPI_Abort will terminate the entire job. The Java implementation currently does not allow specification of a communicator and aborts the entire job - this will be updated to support the long-term implementation at a later date. .sp Note: All associated processes are sent a SIGTERM. .SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. C++ functions do not return errors. If the default error handler is set to MPI::ERRORS_THROW_EXCEPTIONS, then on error the C++ exception mechanism will be used to throw an MPI::Exception object. +Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. C++ and Java functions do not return errors. If the default error handler is set to MPI::ERRORS_THROW_EXCEPTIONS, then on error the C++ exception mechanism will be used to throw an MPI::Exception object - similar behavior is followed by Java. .sp Before the error value is returned, the current MPI error handler is called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler diff --git a/ompi/mpi/man/man3/MPI_Allgather.3in b/ompi/mpi/man/man3/MPI_Allgather.3in index 024e02f1bf..988a7ef736 100644 --- a/ompi/mpi/man/man3/MPI_Allgather.3in +++ b/ompi/mpi/man/man3/MPI_Allgather.3in @@ -32,6 +32,13 @@ void MPI::Comm::Allgather(const void* \fIsendbuf\fP, int \fIsendcount\fP, const MPI::Datatype& \fIsendtype\fP, void* \fIrecvbuf\fP, int \fIrecvcount\fP, const MPI::Datatype& \fIrecvtype\fP) const = 0 +.fi +.SH Java Syntax +.nf +import mpi.*; +void MPI.COMM_WORLD.Allgather(Object \fIsendbuf\fP, int \fIsendoffset\fP, int \fIsendcount\fP, MPI.Datatype \fIsendtype\fP, + Object \fIrecvbuf\fP, int \fIrecvoffset\fP, int \fIrecvcount\fP, Datatype \fIrecvtype\fP) + .fi .SH INPUT PARAMETERS .ft R @@ -39,12 +46,21 @@ void MPI::Comm::Allgather(const void* \fIsendbuf\fP, int \fIsendcount\fP, const sendbuf Starting address of send buffer (choice). .TP 1i +sendoffset +Number of elements to skip at beginning of buffer (integer, Java-only). +.TP 1i sendcount Number of elements in send buffer (integer). .TP 1i sendtype Datatype of send buffer elements (handle). .TP 1i +recvbuf +Starting address of recv buffer (choice). +.TP 1i +recvoffset +Number of elements to skip at beginning of buffer (integer, Java-only). +.TP 1i recvcount Number of elements received from any process (integer). .TP 1i diff --git a/ompi/mpi/man/man3/MPI_Allgatherv.3in b/ompi/mpi/man/man3/MPI_Allgatherv.3in index 852ab01e5a..1a6ccbfbe9 100644 --- a/ompi/mpi/man/man3/MPI_Allgatherv.3in +++ b/ompi/mpi/man/man3/MPI_Allgatherv.3in @@ -33,6 +33,14 @@ void MPI::Comm::Allgatherv(const void* \fIsendbuf\fP, int \fIsendcount\fP, const int \fIrecvcounts\fP[], const int \fIdispls\fP[], const MPI::Datatype& \fIrecvtype\fP) const = 0 +.fi +.SH Java Syntax +.nf +import mpi.*; +void MPI.COMM_WORLD.Allgatherv(Object \fIsendbuf\fP, int \fIsendoffset\fP, int \fIsendcount\fP, MPI.Datatype \fIsendtype\fP, + Object \fIrecvbuf\fP, int \fIrecvoffset\fP, int \fI*recvcount\fP, int \fI*displs\fP, + Datatype \fIrecvtype\fP) + .fi .SH INPUT PARAMETERS .ft R @@ -40,12 +48,18 @@ void MPI::Comm::Allgatherv(const void* \fIsendbuf\fP, int \fIsendcount\fP, sendbuf Starting address of send buffer (choice). .TP 1i +sendoffset +Number of elements to skip at beginning of buffer (integer, Java-only). +.TP 1i sendcount Number of elements in send buffer (integer). .TP 1i sendtype Datatype of send buffer elements (handle). .TP 1i +recvoffset +Number of elements to skip at beginning of buffer (integer, Java-only). +.TP 1i recvcount Integer array (of length group size) containing the number of elements that are received from each process. .TP 1i diff --git a/ompi/mpi/man/man3/MPI_Alloc_mem.3in b/ompi/mpi/man/man3/MPI_Alloc_mem.3in index 74926d61e8..2be075c5cb 100644 --- a/ompi/mpi/man/man3/MPI_Alloc_mem.3in +++ b/ompi/mpi/man/man3/MPI_Alloc_mem.3in @@ -84,6 +84,9 @@ For example, CALL MPI_FREE_MEM(A, IERR) .fi +.SH JAVA NOTES +.ft R +There is no Java syntax for using MPI_Alloc_mem. .SH ERRORS Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. C++ functions do not return errors. If the default error handler is set to MPI::ERRORS_THROW_EXCEPTIONS, then on error the C++ exception mechanism will be used to throw an MPI::Exception object. diff --git a/ompi/mpi/man/man3/MPI_Allreduce.3in b/ompi/mpi/man/man3/MPI_Allreduce.3in index af233b76b8..f6cbd28e28 100644 --- a/ompi/mpi/man/man3/MPI_Allreduce.3in +++ b/ompi/mpi/man/man3/MPI_Allreduce.3in @@ -31,15 +31,29 @@ void MPI::Comm::Allreduce(const void* \fIsendbuf\fP, void* \fIrecvbuf\fP, MPI::Op& \fIop\fP) const=0 .fi +.SH Java Syntax +.nf +import mpi.*; +void MPI.COMM_WORLD.Allreduce(Object \fIsendbuf\fP, int \fIsendoffset\fP, + Object \fIrecvbuf\fP, int \fIrecvoffset\fP, + int \fIcount\fP, MPI.Datatype \fIsendtype\fP, + MPI.Op \fIop\fP) +.fi .SH INPUT PARAMETERS .ft R .TP 1i sendbuf Starting address of send buffer (choice). .TP 1i +sendoffset +Number of elements to skip at beginning of buffer (integer, Java-only). +.TP 1i count Number of elements in send buffer (integer). .TP 1i +recvoffset +Number of elements to skip at beginning of buffer (integer, Java-only). +.TP 1i datatype Datatype of elements of send buffer (handle). .TP 1i diff --git a/ompi/mpi/man/man3/MPI_Alltoall.3in b/ompi/mpi/man/man3/MPI_Alltoall.3in index bddfd57bf0..1ae8fc065b 100644 --- a/ompi/mpi/man/man3/MPI_Alltoall.3in +++ b/ompi/mpi/man/man3/MPI_Alltoall.3in @@ -36,18 +36,30 @@ void MPI::Comm::Alltoall(const void* \fIsendbuf\fP, int \fIsendcount\fP, int \fIrecvcount\fP, const MPI::Datatype& \fIrecvtype\fP) .fi +.SH Java Syntax +.nf +import mpi.*; +void MPI.COMM_WORLD.Alltoall(Object \fIsendbuf\fP, int \fIsendoffset\fP, int \fIsendcount\fP, MPI.Datatype \fIsendtype\fP, + Object \fIrecvbuf\fP, int \fIrecvoffset\fP, int \fIrecvcount\fP, MPI.Datatype \fIrecvtype\fP) +.fi .SH INPUT PARAMETERS .ft R .TP 1.2i sendbuf Starting address of send buffer (choice). .TP 1.2i +sendoffset +Number of elements to skip at beginning of buffer (integer, Java-only). +.TP 1.2i sendcount Number of elements to send to each process (integer). .TP 1.2i sendtype Datatype of send buffer elements (handle). .TP 1.2i +recvoffset +Number of elements to skip at beginning of buffer (integer, Java-only). +.TP 1.2i recvcount Number of elements to receive from each process (integer). .TP 1.2i diff --git a/ompi/mpi/man/man3/MPI_Alltoallv.3in b/ompi/mpi/man/man3/MPI_Alltoallv.3in index a2bed14731..8d5d381e4b 100644 --- a/ompi/mpi/man/man3/MPI_Alltoallv.3in +++ b/ompi/mpi/man/man3/MPI_Alltoallv.3in @@ -40,12 +40,23 @@ void MPI::Comm::Alltoallv(const void* \fIsendbuf\fP, const MPI::Datatype& \fIrecvtype\fP) .fi +.SH Java Syntax +.nf +import mpi.*; +void MPI.COMM_WORLD.Alltoallv(Object \fIsendbuf\fP, int \fIsendoffset\fP, int \fIsendcount\fP[], + int \fIsdispls\fP[], MPI.Datatype \fIsendtype\fP, + Object \fIrecvbuf\fP, int \fIrecvoffset\fP, int \fIrecvcount\fP[], + int \fIrdispls\fP[], MPI.Datatype \fIrecvtype\fP) +.fi .SH INPUT PARAMETERS .ft R .TP 1.2i sendbuf Starting address of send buffer. .TP 1.2i +sendoffset +Initial number of elements to skip at beginning of buffer (integer, Java-only). +.TP 1.2i sendcounts Integer array, where entry i specifies the number of elements to send to rank i. @@ -58,6 +69,9 @@ rank i. sendtype Datatype of send buffer elements. .TP 1.2i +recvoffset +Initial number of elements to skip at beginning of buffer (integer, Java-only). +.TP 1.2i recvcounts Integer array, where entry j specifies the number of elements to receive from rank j. diff --git a/ompi/mpi/man/man3/MPI_Attr_delete.3in b/ompi/mpi/man/man3/MPI_Attr_delete.3in index 37b09cf85c..0d3684994e 100644 --- a/ompi/mpi/man/man3/MPI_Attr_delete.3in +++ b/ompi/mpi/man/man3/MPI_Attr_delete.3in @@ -20,6 +20,12 @@ MPI_ATTR_DELETE(\fICOMM\fP,\fI KEYVAL\fP, \fIIERROR\fP) INTEGER \fICOMM\fP,\fI KEYVAL\fP,\fI IERROR\fP +.fi +.SH Java Syntax +.nf +import mpi.*; +int MPI.COMM_WORLD.Attr_delete(MPI.\fIkeyval\fP) + .fi .SH INPUT PARAMETERS .ft R diff --git a/ompi/mpi/man/man3/MPI_Attr_get.3in b/ompi/mpi/man/man3/MPI_Attr_get.3in index 21d942f13c..d4b8a6cfcd 100644 --- a/ompi/mpi/man/man3/MPI_Attr_get.3in +++ b/ompi/mpi/man/man3/MPI_Attr_get.3in @@ -22,6 +22,12 @@ MPI_ATTR_GET(\fICOMM\fP,\fI KEYVAL\fP, \fIATTRIBUTE_VAL\fP,\fI FLAG\fP,\fI IERRO LOGICAL \fIFLAG\fP +.fi +.SH Java Syntax +.nf +import mpi.*; +int MPI.COMM_WORLD.Attr_get(MPI.\fIkeyval\fP) + .fi .SH INPUT PARAMETERS .ft R diff --git a/ompi/mpi/man/man3/MPI_Attr_put.3in b/ompi/mpi/man/man3/MPI_Attr_put.3in index 36ba4fd1e0..aa6d4fda09 100644 --- a/ompi/mpi/man/man3/MPI_Attr_put.3in +++ b/ompi/mpi/man/man3/MPI_Attr_put.3in @@ -20,6 +20,12 @@ MPI_ATTR_PUT(\fICOMM\fP,\fI KEYVAL\fP, \fIATTRIBUTE_VAL\fP,\fI IERROR\fP) INTEGER \fICOMM\fP,\fI KEYVAL\fP,\fI ATTRIBUTE_VAL\fP,\fI IERROR +.fi +.SH Java Syntax +.nf +import mpi.*; +int MPI.COMM_WORLD.Attr_put(MPI.\fIkeyval\fP, int \fIattribute_val\fP) + .fi .SH INPUT PARAMETERS .ft R diff --git a/ompi/mpi/man/man3/MPI_Barrier.3in b/ompi/mpi/man/man3/MPI_Barrier.3in index 1a766bfdec..2e9ad24cc5 100644 --- a/ompi/mpi/man/man3/MPI_Barrier.3in +++ b/ompi/mpi/man/man3/MPI_Barrier.3in @@ -22,6 +22,12 @@ MPI_BARRIER(\fICOMM\fP,\fI IERROR\fP) .fi .SH C++ Syntax .nf +import mpi.*; +void MPI.COMM_WORLD.Barrier() + +.fi +.SH Java Syntax +.nf #include void MPI::Comm::Barrier() const = 0 diff --git a/ompi/mpi/man/man3/MPI_Bcast.3in b/ompi/mpi/man/man3/MPI_Bcast.3in index 9400be19f7..5da87dade3 100644 --- a/ompi/mpi/man/man3/MPI_Bcast.3in +++ b/ompi/mpi/man/man3/MPI_Bcast.3in @@ -28,6 +28,13 @@ MPI_BCAST(\fIBUFFER\fP,\fI COUNT\fP, \fIDATATYPE\fP,\fI ROOT\fP,\fI COMM\fP,\fI void MPI::Comm::Bcast(void* \fIbuffer\fP, int \fIcount\fP, const MPI::Datatype& \fIdatatype\fP, int \fIroot\fP) const = 0 +.fi +.SH Java Syntax +.nf +import mpi.*; +void MPI.COMM_WORLD.Bcast(Object \fIbuffer\fP, int \fIoffset\fP, int \fIcount\fP, + MPI.Datatype \fIdatatype\fP, int \fIroot\fP) + .fi .SH INPUT/OUTPUT PARAMETERS .ft R @@ -35,6 +42,9 @@ void MPI::Comm::Bcast(void* \fIbuffer\fP, int \fIcount\fP, buffer Starting address of buffer (choice). .TP 1i +offset +Offset of starting point in buffer (Java-only) +.TP 1i count Number of entries in buffer (integer). .TP 1i diff --git a/ompi/mpi/man/man3/MPI_Bsend.3in b/ompi/mpi/man/man3/MPI_Bsend.3in index 119e770c01..ac74d0c1f3 100644 --- a/ompi/mpi/man/man3/MPI_Bsend.3in +++ b/ompi/mpi/man/man3/MPI_Bsend.3in @@ -28,6 +28,13 @@ MPI_BSEND(\fIBUF\fP,\fI COUNT\fP,\fIDATATYPE\fP,\fI DEST\fP,\fI TAG\fP,\fI COMM\ void Comm::Bsend(const void* \fIbuf\fP, int \fIcount\fP, const Datatype& \fIdatatype\fP, int \fIdest\fP, int \fItag\fP) const +.fi +.SH Java Syntax +.nf +import mpi.*; +void MPI.COMM_WORLD.Bsend(Object \fIbuf\fP, int \fIoffset\fP, int \fIcount\fP, + MPI.Datatype \fIdatatype\fP, int \fIdest\fP, int \fItag\fP) + .fi .SH INPUT PARAMETERS .ft R @@ -35,6 +42,9 @@ void Comm::Bsend(const void* \fIbuf\fP, int \fIcount\fP, const buf Initial address of send buffer (choice). .TP 1i +offset +Offset of starting point in buffer (Java-only). +.TP 1i count Number of entries in send buffer (nonnegative integer). .TP 1i diff --git a/ompi/mpi/man/man3/MPI_Bsend_init.3in b/ompi/mpi/man/man3/MPI_Bsend_init.3in index fa9edf4589..6656635da5 100644 --- a/ompi/mpi/man/man3/MPI_Bsend_init.3in +++ b/ompi/mpi/man/man3/MPI_Bsend_init.3in @@ -26,10 +26,17 @@ MPI_BSEND_INIT(\fIBUF\fP,\fI COUNT\fP, \fIDATATYPE\fP,\fI DEST\fP,\fI TAG\fP,\fI .fi .SH C++ Syntax .nf -#include -Prequest Comm::Bsend_init(const void* \fIbuf\fP, int \fIcount\fP, const +import mpi.*; +Prequest MPI.COMM_WORLD.Bsend_init(const void* \fIbuf\fP, int \fIcount\fP, const Datatype& \fIdatatype\fP, int \fIdest\fP, int \fItag\fP) const +.fi +.SH Java Syntax +.nf +#include +Prequest Comm::Bsend_init(Object \fIbuf\fP, int \fIoffset\fP, int \fIcount\fP, + MPI.Datatype \fIdatatype\fP, int \fIdest\fP, int \fItag\fP) + .fi .SH INPUT PARAMETERS .ft R @@ -37,6 +44,9 @@ Prequest Comm::Bsend_init(const void* \fIbuf\fP, int \fIcount\fP, const buf Initial address of send buffer (choice). .TP 1i +offset +Offset of starting point in buffer (Java-only). +.TP 1i count Number of elements sent (integer). .TP 1i diff --git a/ompi/mpi/man/man3/MPI_Buffer_attach.3in b/ompi/mpi/man/man3/MPI_Buffer_attach.3in index b290e1380a..acc6f38c5e 100644 --- a/ompi/mpi/man/man3/MPI_Buffer_attach.3in +++ b/ompi/mpi/man/man3/MPI_Buffer_attach.3in @@ -26,6 +26,12 @@ MPI_BUFFER_ATTACH(\fIBUF\fP,\fI SIZE\fP, \fIIERROR\fP) #include void Attach_buffer(void* \fIbuffer\fP, int \fIsize\fP) +.fi +.SH Java Syntax +.nf +import mpi.*; +void MPI.Buffer_attach(byte[] \fIbuffer\fP) + .fi .SH INPUT PARAMETERS .ft R diff --git a/ompi/mpi/man/man3/MPI_Buffer_detach.3in b/ompi/mpi/man/man3/MPI_Buffer_detach.3in index 32fc21e19e..f0a9a63cf4 100644 --- a/ompi/mpi/man/man3/MPI_Buffer_detach.3in +++ b/ompi/mpi/man/man3/MPI_Buffer_detach.3in @@ -26,6 +26,12 @@ MPI_BUFFER_DETACH(\fIBUF\fP,\fI SIZE\fP, \fIIERROR\fP) #include int Detach_buffer(void*& \fIbuffer\fP) +.fi +.SH Java Syntax +.nf +import mpi.*; +byte[] MPI.Buffer_detach() + .fi .SH OUTPUT PARAMETERS .ft R diff --git a/ompi/mpi/man/man3/MPI_Cancel.3in b/ompi/mpi/man/man3/MPI_Cancel.3in index 240ab1b067..664e08a3a9 100644 --- a/ompi/mpi/man/man3/MPI_Cancel.3in +++ b/ompi/mpi/man/man3/MPI_Cancel.3in @@ -25,6 +25,12 @@ MPI_CANCEL(\fIREQUEST\fP, \fIIERROR\fP) #include void Request::Cancel() const +.fi +.SH Java Syntax +.nf +import mpi.*; +void MPI.Request.Cancel() + .fi .SH INPUT PARAMETER .ft R diff --git a/ompi/mpi/man/man3/MPI_Cart_coords.3in b/ompi/mpi/man/man3/MPI_Cart_coords.3in index f06bb5aa2d..2c724df3fe 100644 --- a/ompi/mpi/man/man3/MPI_Cart_coords.3in +++ b/ompi/mpi/man/man3/MPI_Cart_coords.3in @@ -27,6 +27,12 @@ MPI_CART_COORDS(\fICOMM\fP,\fI RANK\fP,\fI MAXDIMS\fP,\fI COORDS\fP, \fIIERROR\f void Cartcomm::Get_coords(int \fIrank\fP, int \fImaxdims\fP, int \fIcoords\fP[]) const +.fi +.SH Java Syntax +.nf +import mpi.*; +int[] Cartcomm.Coords(int \fIrank\fP) + .fi .SH INPUT PARAMETERS .ft R diff --git a/ompi/mpi/man/man3/MPI_Cart_create.3in b/ompi/mpi/man/man3/MPI_Cart_create.3in index 42d59300d5..8857f4e732 100644 --- a/ompi/mpi/man/man3/MPI_Cart_create.3in +++ b/ompi/mpi/man/man3/MPI_Cart_create.3in @@ -26,9 +26,15 @@ MPI_CART_CREATE(\fICOMM_OLD\fP,\fI NDIMS\fP,\fI DIMS\fP,\fI PERIODS\fP,\fI REORD .SH C++ Syntax .nf #include -Cartcomm Intracomm::Create_cart(int \fIndims\fP, const int \fIdims\fP[], +Cartcomm Intracomm.Create_cart(int[] \fIndims\fP, int[] \fIdims\fP[], const bool \fIperiods\fP[], bool \fIreorder\fP) const +.fi +.SH Java Syntax +.nf +import mpi.*; +Cartcomm Intracomm.Create_cart(int \fIdims\fP[], const bool \fIperiods\fP[], bool \fIreorder\fP) + .fi .SH INPUT PARAMETERS .ft R diff --git a/ompi/mpi/man/man3/MPI_Cart_get.3in b/ompi/mpi/man/man3/MPI_Cart_get.3in index 130e1c65ca..e6018320eb 100644 --- a/ompi/mpi/man/man3/MPI_Cart_get.3in +++ b/ompi/mpi/man/man3/MPI_Cart_get.3in @@ -28,6 +28,13 @@ MPI_CART_GET(\fICOMM\fP, \fIMAXDIMS\fP, \fIDIMS\fP, \fIPERIODS\fP, \fICOORDS\fP, void Cartcomm::Get_topo(int \fImaxdims\fP, int \fIdims\fP[], bool \fIperiods\fP[], int \fIcoords\fP[]) const +.fi +.SH Java Syntax +.nf +import mpi.*; +void Cartcomm.Get_topo(int \fImaxdims\fP, int \fIdims\fP[], + bool \fIperiods\fP[], int \fIcoords\fP[]) const + .fi .SH INPUT PARAMETERS .ft R diff --git a/ompi/tools/ompi_info/param.c b/ompi/tools/ompi_info/param.c index 125046ff4a..f1fdd8d5c8 100644 --- a/ompi/tools/ompi_info/param.c +++ b/ompi/tools/ompi_info/param.c @@ -496,6 +496,7 @@ void ompi_info_do_config(bool want_all) char *f77; char *f90; char *f90_size; + char *java; char *heterogeneous; char *memprofile; char *memdebug; @@ -545,6 +546,7 @@ void ompi_info_do_config(bool want_all) cxx = OMPI_WANT_CXX_BINDINGS ? "yes" : "no"; f90 = OMPI_WANT_F90_BINDINGS ? "yes" : "no"; f90_size = OMPI_F90_BUILD_SIZE; + java = OMPI_WANT_JAVA_BINDINGS ? "yes" : "no"; heterogeneous = OPAL_ENABLE_HETEROGENEOUS_SUPPORT ? "yes" : "no"; memprofile = OPAL_ENABLE_MEM_PROFILE ? "yes" : "no"; memdebug = OPAL_ENABLE_MEM_DEBUG ? "yes" : "no"; @@ -607,7 +609,8 @@ void ompi_info_do_config(bool want_all) ompi_info_out("Fortran90 bindings", "bindings:f90", f90); ompi_info_out("Fortran90 bindings size", "bindings:f90:size", OMPI_WANT_F90_BINDINGS ? f90_size : "na"); - + ompi_info_out("Java bindings", "bindings:java", java); + ompi_info_out("C compiler", "compiler:c:command", OPAL_CC); ompi_info_out("C compiler absolute", "compiler:c:absolute", OPAL_CC_ABSOLUTE); ompi_info_out("C compiler family name", "compiler:c:familyname", _STRINGIFY(OPAL_BUILD_PLATFORM_COMPILER_FAMILYNAME)); diff --git a/ompi/tools/wrappers/Makefile.am b/ompi/tools/wrappers/Makefile.am index 92f930db9e..deeb760dd6 100644 --- a/ompi/tools/wrappers/Makefile.am +++ b/ompi/tools/wrappers/Makefile.am @@ -10,6 +10,7 @@ # Copyright (c) 2004-2005 The Regents of the University of California. # All rights reserved. # Copyright (c) 2006-2010 Cisco Systems, Inc. All rights reserved. +# Copyright (c) 2012 Los Alamos National Security, LLC. All rights reserved. # $COPYRIGHT$ # # Additional copyrights may follow @@ -17,7 +18,7 @@ # $HEADER$ # -man_pages = mpicc.1 mpic++.1 mpicxx.1 mpif77.1 mpif90.1 +man_pages = mpicc.1 mpic++.1 mpicxx.1 mpif77.1 mpif90.1 mpijavac.1 if OPAL_WANT_SCRIPT_WRAPPER_COMPILERS @@ -26,17 +27,22 @@ CLEANFILES += $(bin_SCRIPTS) install-exec-hook-always: test -z "$(bindir)" || $(mkdir_p) "$(DESTDIR)$(bindir)" + (cp mpijavac.pl $(DESTDIR)$(bindir)) (cd $(DESTDIR)$(bindir); rm -f mpicc; $(LN_S) ompi_wrapper_script mpicc) (cd $(DESTDIR)$(bindir); rm -f mpic++; $(LN_S) ompi_wrapper_script mpic++) (cd $(DESTDIR)$(bindir); rm -f mpicxx; $(LN_S) ompi_wrapper_script mpicxx) (cd $(DESTDIR)$(bindir); rm -f mpif77; $(LN_S) ompi_wrapper_script mpif77) (cd $(DESTDIR)$(bindir); rm -f mpif90; $(LN_S) ompi_wrapper_script mpif90) + (cd $(DESTDIR)$(bindir); chmod +x mpijavac.pl; rm -f mpijavac; $(LN_S) mpijavac.pl mpijavac) + uninstall-local-always: rm -f $(DESTDIR)$(bindir)/mpicc \ $(DESTDIR)$(bindir)/mpic++ \ $(DESTDIR)$(bindir)/mpicxx \ $(DESTDIR)$(bindir)/mpif77 \ - $(DESTDIR)$(bindir)/mpif90 + $(DESTDIR)$(bindir)/mpif90 \ + $(DESTDIR)$(bindir)/mpijavac + if CASE_SENSITIVE_FS install-exec-hook: install-exec-hook-always (cd $(DESTDIR)$(bindir); rm -f mpiCC; $(LN_S) ompi_wrapper_script mpiCC) @@ -64,11 +70,13 @@ dist_pkgdata_DATA = \ install-exec-hook-always: test -z "$(bindir)" || $(mkdir_p) "$(DESTDIR)$(bindir)" + (cp mpijavac.pl $(DESTDIR)$(bindir)) (cd $(DESTDIR)$(bindir); rm -f mpicc$(EXEEXT); $(LN_S) opal_wrapper mpicc) (cd $(DESTDIR)$(bindir); rm -f mpic++$(EXEEXT); $(LN_S) opal_wrapper mpic++) (cd $(DESTDIR)$(bindir); rm -f mpicxx$(EXEEXT); $(LN_S) opal_wrapper mpicxx) (cd $(DESTDIR)$(bindir); rm -f mpif77$(EXEEXT); $(LN_S) opal_wrapper mpif77) (cd $(DESTDIR)$(bindir); rm -f mpif90$(EXEEXT); $(LN_S) opal_wrapper mpif90) + (cd $(DESTDIR)$(bindir); chmod +x mpijavac.pl; rm -f mpijavac; $(LN_S) mpijavac.pl mpijavac) install-data-hook-always: (cd $(DESTDIR)$(pkgdatadir); rm -f mpicxx-wrapper-data.txt; $(LN_S) mpic++-wrapper-data.txt mpicxx-wrapper-data.txt) @@ -79,6 +87,7 @@ uninstall-local-always: $(DESTDIR)$(bindir)/mpicxx$(EXEEXT) \ $(DESTDIR)$(bindir)/mpif77$(EXEEXT) \ $(DESTDIR)$(bindir)/mpif90$(EXEEXT) \ + $(DESTDIR)$(bindir)/mpijavac \ $(DESTDIR)$(pkgdatadir)/mpicxx-wrapper-data.txt if CASE_SENSITIVE_FS diff --git a/ompi/tools/wrappers/mpijavac.1 b/ompi/tools/wrappers/mpijavac.1 new file mode 100644 index 0000000000..39c35c405f --- /dev/null +++ b/ompi/tools/wrappers/mpijavac.1 @@ -0,0 +1,146 @@ +.\" Copyright (c) 2012 Los Alamos National Security, LLC. All rights reserved. +.TH mpijava 1 "Unreleased developer copy" "1.7a1r25839M" "Open MPI" +. +.SH NAME +mpijava -- Open MPI Java wrapper compiler +. +.SH SYNTAX +mpijava [-showme|-showme:compile|-showme:link] ... +. +.SH OPTIONS +.TP +--showme +This option comes in several different variants (see below). None of +the variants invokes the underlying compiler; they all provide +information on how the underlying compiler would have been invoked had +.I --showme +not been used. +The basic +.I --showme +option outputs the command line that would be executed to compile the +program. \fBNOTE:\fR If a non-filename argument is passed on the +command line, the \fI-showme\fR option will \fInot\fR display any +additional flags. For example, both "mpijava --showme" and +"mpijava --showme my_source.java" will show all the wrapper-supplied +flags. But "mpijava --showme -v" will only show the underlying +compiler name and "-v". +.TP +--showme:compile +Output the compiler flags that would have been supplied to the +java compiler. +.TP +--showme:link +Output the linker flags that would have been supplied to the +java compiler. +.TP +--showme:command +Outputs the underlying java compiler command (which may be one +or more tokens). +.TP +--showme:incdirs +Outputs a space-delimited (but otherwise undecorated) list of +directories that the wrapper compiler would have provided to the +underlying java compiler to indicate where relevant header files +are located. +.TP +--showme:libdirs +Outputs a space-delimited (but otherwise undecorated) list of +directories that the wrapper compiler would have provided to the +underlying linker to indicate where relevant libraries are located. +.TP +--showme:libs +Outputs a space-delimited (but otherwise undecorated) list of library +names that the wrapper compiler would have used to link an +application. For example: "mpi open-rte open-pal util". +.TP +--showme:version +Outputs the version number of Open MPI. +.PP +See the man page for your underlying java compiler for other +options that can be passed through mpijava. +. +. +.SH DESCRIPTION +.PP +Conceptually, the role of these commands is quite simple: +transparently add relevant compiler and linker flags to the user's +command line that are necessary to compile / link Open MPI +programs, and then invoke the underlying compiler to actually perform +the command. +. +.PP +As such, these commands are frequently referred to as "wrapper" +compilers because they do not actually compile or link applications +themselves; they only add in command line flags and invoke the +back-end compiler. +. +. +.SS Overview +\fImpijava\fR is a convenience wrapper for the underlying +java compiler. Translation of an Open MPI program requires the +linkage of the Open MPI-specific libraries which may not reside in +one of the standard search directories of ld(1). It also often +requires the inclusion of header files what may also not be found in a +standard location. +. +.PP +\fImpijava\fR passes its arguments to the underlying java +compiler along with the -I, -L and -l options required by Open MPI +programs. +. +.PP +The Open MPI Team \fIstrongly\fR encourages using the wrapper +compilers instead of attempting to link to the Open MPI libraries +manually. This allows the specific implementation of Open MPI to +change without forcing changes to linker directives in users' +Makefiles. Indeed, the specific set of flags and libraries used by +the wrapper compilers depends on how Open MPI was configured and +built; the values can change between different installations of the +same version of Open MPI. +. +.PP +Indeed, since the wrappers are simply thin shells on top of an +underlying compiler, there are very, very few compelling reasons +\fInot\fR to use \fImpijava\fR. When it is not possible to use the +wrappers directly, the \fI-showme:compile\fR and \fI-showme:link\fR +options should be used to determine what flags the wrappers would have +used. +. +. +.SH NOTES +.PP +It is possible to make the wrapper compilers multi-lib aware. That +is, the libraries and includes specified may differ based on the +compiler flags specified (for example, with the GNU compilers on +Linux, a different library path may be used if -m32 is seen versus +-m64 being seen). This is not the default behavior in a standard +build, but can be activated (for example, in a binary package +providing both 32 and 64 bit support). More information can be found +at: +.PP + https://svn.open-mpi.org/trac/ompi/wiki/compilerwrapper3264 +. +. +.SH FILES +.PP +The string that the wrapper compilers insert into the command line +before invoking the underlying compiler are stored in a text file +created by Open MPI and installed to +\fI$pkgdata/mpijava-wrapper-data.txt\fR, where \fI$pkgdata\fR +is typically \fI$prefix/share/openmpi\fR, and \fI$prefix\fR is the top +installation directory of Open MPI. +. +.PP +It is rarely necessary to edit this file, but it can be examined to +gain insight into what flags the wrappers are placing on the command +line. +. +. +.SH ENVIRONMENT VARIABLES +.PP +By default, the wrappers use the compilers that were selected when +Open MPI was configured. These compilers were either found +automatically by Open MPI's "configure" script, or were selected by +the user in the CC, CXX, F77, JAVAC, and/or FC environment variables +before "configure" was invoked. Additionally, other arguments +specific to the compiler may have been selected by configure. diff --git a/ompi/tools/wrappers/mpijavac.pl.in b/ompi/tools/wrappers/mpijavac.pl.in new file mode 100644 index 0000000000..cf4e70c7ca --- /dev/null +++ b/ompi/tools/wrappers/mpijavac.pl.in @@ -0,0 +1,146 @@ +#!/usr/bin/env perl + +# WARNING: DO NOT EDIT THE mpijava.pl FILE AS IT IS GENERATED! +# MAKE ALL CHANGES IN mpijava.pl.in + +# Copyright (c) 2011 Cisco Systems, Inc. All rights reserved. + +use strict; + +# The main purpose of this wrapper compiler is to check for +# and adjust the Java class path to include the OMPI classes +# in mpi.jar. The user may have specified a class path on +# our cmd line, or it may be in the environment, so we have +# to check for both. We also need to be careful not to +# just override the class path as it probably includes classes +# they need for their application! It also may already include +# the path to mpi.jar, and while it doesn't hurt anything, we +# don't want to include our class path more than once to avoid +# user astonishment + +# Let the build system provide us with some critical values +my $my_compiler = "@JAVAC@"; +my $ompi_classpath = "@OMPI_WRAPPER_LIBDIR@/mpi.jar"; + +# globals +my $showme_arg = 0; +my $verbose = 0; +my $my_arg; + +# Cannot use the usual GetOpts library as the user might +# be passing -options to the Java compiler! So have to +# parse the options ourselves to look for help and showme +foreach $my_arg (@ARGV) { + if ($my_arg eq "-h" || + $my_arg eq "--h" || + $my_arg eq "-help" || + $my_arg eq "--help") { + print "Options: + --showme Show the wrapper compiler command without executing it + --help | -h This help list\n"; + exit; + } elsif ($my_arg eq "-showme" || + $my_arg eq "--showme") { + $showme_arg = 1; + shift(@ARGV); + } elsif ($my_arg eq "-verbose" || + $my_arg eq "-v" || + $my_arg eq "--verbose") { + $verbose = 1; + shift(@ARGV); + } +} + +# Create a place to save our argv array so we can edit any +# provide class path option +my @arguments = (); + +# Check the command line for a class path +my $where; +my $where2; +my $cp_found = 0; +my $my_cp; +foreach $my_arg (@ARGV) { + if (1 == $cp_found) { + $where = index($my_arg, "mpi.jar"); + if ($where < 0) { + # not found, so we add our path + $where = rindex($my_arg, ":"); + if ($where == length($my_arg)-1) { + # already have a colon at the end + $my_cp = $my_arg . $ompi_classpath; + } else { + # need to add the colon between paths + $my_cp = $my_arg . ":" . $ompi_classpath; + } + push(@arguments, $my_cp); + } else { + # it was found, so just leave it alone + push(@arguments, $my_arg); + } + $cp_found = 2; + } else { + $where = index($my_arg, "-cp"); + if ($where < 0) { + # check for -- variant */ + $where = index($my_arg, "--cp"); + } + $where2 = index($my_arg, "-classpath"); + if ($where2 < 0) { + # check for -- variant */ + $where = index($my_arg, "--classpath"); + } + if (0 <= $where || 0 <= $where2) { + # the next argument will contain the class path + $cp_found = 1; + } + push(@arguments, $my_arg); + } +} + +# If the class path wasn't found on the cmd line, then +# we next check the class path in the environment, if it exists +if (2 != $cp_found && exists $ENV{'CLASSPATH'} && length($ENV{'CLASSPATH'}) > 0) { + $where = index($ENV{'CLASSPATH'}, "mpi.jar"); + if (0 <= $where) { + # their environ classpath already points to mpi.jar + unshift(@arguments, $ENV{'CLASSPATH'}); + unshift(@arguments, "-cp"); + } else { + # java will default to using this envar unless + # we provide an override on the javac cmd line. + # however, we are about to do just that so we + # can add the path to the mpi classes! thus, + # we want to grab the class path since they + # have it set and append our path to it + $where = rindex($ENV{'CLASSPATH'}, ":"); + if ($where == length($ENV{'CLASSPATH'})-1) { + # already has a colon at the end + $my_cp = $ENV{'CLASSPATH'} . $ompi_classpath; + } else { + # need to add colon between paths + $my_cp = $ENV{'CLASSPATH'} . ":" . $ompi_classpath; + } + unshift(@arguments, $my_cp); + unshift(@arguments, "-cp"); + } + # ensure we mark that we "found" the class path + $cp_found = 1; +} + +# If the class path wasn't found in either location, then +# we have to insert it as the first argument +if (0 == $cp_found) { + unshift(@arguments, $ompi_classpath); + unshift(@arguments, "-cp"); +} + +# Construct the command +if ($showme_arg) { + print "$my_compiler @arguments\n"; +} else { + if ($verbose) { + print "$my_compiler @arguments\n"; + } + system $my_compiler, @arguments; +} diff --git a/opal/util/argv.c b/opal/util/argv.c index 1121d69ad3..b110144c8a 100644 --- a/opal/util/argv.c +++ b/opal/util/argv.c @@ -10,6 +10,7 @@ * Copyright (c) 2004-2005 The Regents of the University of California. * All rights reserved. * Copyright (c) 2007 Voltaire. All rights reserved. + * Copyright (c) 2012 Los Alamos National Security, LLC. All rights reserved. * * $COPYRIGHT$ * @@ -542,3 +543,42 @@ int opal_argv_insert(char ***target, int start, char **source) return OPAL_SUCCESS; } + +int opal_argv_insert_element(char ***target, int location, char *source) +{ + int i, target_count; + int suffix_count; + + /* Check for the bozo cases */ + + if (NULL == target || NULL == *target || location < 0) { + return OPAL_ERR_BAD_PARAM; + } else if (NULL == source) { + return OPAL_SUCCESS; + } + + /* Easy case: appending to the end */ + target_count = opal_argv_count(*target); + if (location > target_count) { + opal_argv_append(&target_count, target, source); + return OPAL_SUCCESS; + } + + /* Alloc out new space */ + *target = (char**) realloc(*target, + sizeof(char*) * (target_count + 2)); + + /* Move suffix items down to the end */ + suffix_count = target_count - location; + for (i = suffix_count - 1; i >= 0; --i) { + (*target)[location + 1 + i] = + (*target)[location + i]; + } + (*target)[location + suffix_count + 1] = NULL; + + /* Strdup in the source */ + (*target)[location] = strdup(source); + + /* All done */ + return OPAL_SUCCESS; +} diff --git a/opal/util/argv.h b/opal/util/argv.h index 5c75ada822..8c6973447a 100644 --- a/opal/util/argv.h +++ b/opal/util/argv.h @@ -12,6 +12,7 @@ * Copyright (c) 2007 Los Alamos National Security, LLC. * All rights reserved. * Copyright (c) 2007 Voltaire. All rights reserved. + * Copyright (c) 2012 Los Alamos National Security, LLC. All rights reserved. * * $COPYRIGHT$ * @@ -270,6 +271,29 @@ OPAL_DECLSPEC int opal_argv_delete(int *argc, char ***argv, */ OPAL_DECLSPEC int opal_argv_insert(char ***target, int start, char **source); +/** + * Insert one argv element in front of a specific position in an array + * + * @param target The argv to insert tokens into + * @param location Index where the token will be placed in target + * @param source The token to be inserted + * + * @retval OPAL_SUCCESS upon success + * @retval OPAL_BAD_PARAM if any parameters are non-sensical + * + * This function takes one arg and inserts it in the middle of + * another. The token will be inserted at the specified index + * in the target argv; all other tokens will be shifted down. + * Similar to opal_argv_append(), the target may be realloc()'ed + * to accomodate the new storage requirements. + * + * The source token is left unaffected -- its contents are copied + * by value over to the target array (i.e., the string that + * source points to is strdup'ed into the new location in + * target). + */ +OPAL_DECLSPEC int opal_argv_insert_element(char ***target, int location, char *source); + END_C_DECLS #endif /* OPAL_ARGV_H */ diff --git a/orte/tools/orterun/orterun.c b/orte/tools/orterun/orterun.c index fadaa5fdb7..2bb906c3a0 100644 --- a/orte/tools/orterun/orterun.c +++ b/orte/tools/orterun/orterun.c @@ -12,7 +12,7 @@ * All rights reserved. * Copyright (c) 2006-2011 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007-2009 Sun Microsystems, Inc. All rights reserved. - * Copyright (c) 2007-2011 Los Alamos National Security, LLC. All rights + * Copyright (c) 2007-2012 Los Alamos National Security, LLC. All rights * reserved. * $COPYRIGHT$ * @@ -1383,7 +1383,8 @@ static int create_app(int argc, char* argv[], char *param, *value, *value2; orte_app_context_t *app = NULL; bool cmd_line_made = false; - + bool found = false; + *made_app = false; /* Pre-process the command line if we are going to parse an appfile later. @@ -1707,6 +1708,105 @@ static int create_app(int argc, char* argv[], goto cleanup; } + /* if this is a Java application, we have a bit more work to do. Such + * applications actually need to be run under the Java virtual machine + * and the "java" command will start the "executable". So we need to ensure + * that all the proper java-specific paths are provided + */ + if (0 == strcmp(app->app, "java")) { + /* see if we were given a library path */ + found = false; + for (i=0; NULL != app->argv[i]; i++) { + if (NULL != strstr(app->argv[i], "java.library.path")) { + /* yep - but does it include the path to the mpi libs? */ + found = true; + if (NULL == strstr(app->argv[i], opal_install_dirs.libdir)) { + /* doesn't appear to - add it to be safe */ + if (':' == app->argv[i][strlen(app->argv[i]-1)]) { + asprintf(&value, "-Djava.library.path=%s%s", app->argv[i], opal_install_dirs.libdir); + } else { + asprintf(&value, "-Djava.library.path=%s:%s", app->argv[i], opal_install_dirs.libdir); + } + free(app->argv[i]); + app->argv[i] = value; + } + } + } + if (!found) { + /* need to add it right after the java command */ + asprintf(&value, "-Djava.library.path=%s", opal_install_dirs.libdir); + opal_argv_insert_element(&app->argv, 1, value); + free(value); + } + + /* see if we were given a class path */ + found = false; + for (i=0; NULL != app->argv[i]; i++) { + if (NULL != strstr(app->argv[i], "cp") || + NULL != strstr(app->argv[i], "classpath")) { + /* yep - but does it include the path to the mpi libs? */ + found = true; + if (NULL == strstr(app->argv[i+1], "mpi.jar")) { + /* nope - need to add it */ + if (':' == app->argv[i+1][strlen(app->argv[i+1]-1)]) { + asprintf(&value, "%s%s/mpi.jar", app->argv[i+1], opal_install_dirs.libdir); + } else { + asprintf(&value, "%s:%s/mpi.jar", app->argv[i+1], opal_install_dirs.libdir); + } + free(app->argv[i+1]); + app->argv[i+1] = value; + } + break; + } + } + if (!found) { + /* check to see if CLASSPATH is in the environment */ + for (i=0; NULL != environ[i]; i++) { + if (0 == strncmp(environ[i], "CLASSPATH", strlen("CLASSPATH"))) { + /* check if mpi.jar is present */ + if (NULL != strstr(environ[i], "mpi.jar")) { + /* yes - just add the envar to the argv in the + * right format + */ + value = strchr(environ[i], '='); + ++value; /* step over the = */ + opal_argv_insert_element(&app->argv, 1, value); + opal_argv_insert_element(&app->argv, 1, "-cp"); + } else { + /* need to add it */ + value = strchr(environ[i], '='); + ++value; /* step over the = */ + if (':' == value[strlen(value-1)]) { + asprintf(¶m, "%s%s/mpi.jar", value, opal_install_dirs.libdir); + } else { + asprintf(¶m, "%s:%s/mpi.jar", value, opal_install_dirs.libdir); + } + opal_argv_insert_element(&app->argv, 1, param); + opal_argv_insert_element(&app->argv, 1, "-cp"); + free(param); + } + found = true; + break; + } + } + if (!found) { + /* need to add it right after the java command - have + * to include the current directory and trust that + * the user set cwd if necessary + */ + asprintf(&value, ".:%s/mpi.jar", opal_install_dirs.libdir); + opal_argv_insert_element(&app->argv, 1, value); + free(value); + opal_argv_insert_element(&app->argv, 1, "-cp"); + } + } + } + if (orterun_globals.verbose) { + value = opal_argv_join(app->argv, ' '); + opal_output(0, "DONE PARSING APP: %s", value); + free(value); + } + *app_ptr = app; app = NULL; *made_app = true;