1
1

Roll in Java bindings per telecon discussion. Man pages still under revision

This commit was SVN r25973.
This commit is contained in:
Ralph Castain 2012-02-20 22:12:43 +00:00
parent bcd2c88274
commit 47c64ec837
87 changed files with 13506 additions and 32 deletions

View File

@ -10,6 +10,7 @@
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2010 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2012 Los Alamos National Security, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
@ -18,7 +19,8 @@
#
SUBDIRS = config contrib $(MCA_PROJECT_SUBDIRS) test
EXTRA_DIST = README INSTALL VERSION Doxyfile LICENSE autogen.pl autogen.sh CMakeLists.txt README.WINDOWS.txt
EXTRA_DIST = README INSTALL VERSION Doxyfile LICENSE autogen.pl autogen.sh \
CMakeLists.txt README.WINDOWS.txt README.JAVA.txt
include examples/Makefile.include

99
README.JAVA.txt Normal file
View File

@ -0,0 +1,99 @@
Feb 10, 2012
---------------
***************************************************************************
IMPORTANT NOTE
JAVA BINDINGS ARE PROVIDED ON A "PROVISIONAL" BASIS - I.E., THEY ARE NOT
PART OF THE CURRENT OR PROPOSED MPI-3 STANDARDS. THUS, INCLUSION OF
JAVA SUPPORT IS NOT REQUIRED BY THE STANDARD. CONTINUED INCLUSION
OF THE JAVA BINDINGS IS CONTINGENT UPON ACTIVE USER INTEREST AND
CONTINUED DEVELOPER SUPPORT
***************************************************************************
This version of Open MPI provides support for Java-based
MPI applications. At the time of this writing, not all MPI functions
are supported. However, work on extending the Java bindings to
provide full MPI coverage is underway.
The rest of this document provides step-by-step instructions on
building OMPI with Java bindings, and compiling and running
Java-based MPI applications
============================================================================
Building Java Bindings
If this software was obtained as a developer-level
checkout as opposed to a tarball, you will need to start your build by
running ./autogen.pl. This will also require that you have a fairly
recent version of autotools on your system - see the HACKING file for
details.
Java support requires that Open MPI be built at least with shared libraries
(i.e., --enable-shared) - any additional options are fine and will not
conflict. Note that this is the default for Open MPI, so you don't
have to explicitly add the option. The Java bindings will build only
if --enable-mpi-java is specified, and a JDK is found in a typical
system default location.
If the JDK is not in a place where we automatically find it, you can
specify the location. For example, this is required on the Mac
platform as the JDK headers are located in a non-typical location. Two
options are available for this purpose:
--with-jdk-bindir=<foo> - the location of javac and javah
--with-jdk-headers=<bar> - the directory containing jni.h
For simplicity, typical configurations are provided in platform files
under contrib/platform/hadoop. These will meet the needs of most
users, or at least provide a starting point for your own custom
configuration.
In summary, therefore, you can configure the system using the
following Java-related options:
./configure --with-platform=contrib/platform/hadoop/<your-platform>
...
or
./configure --enable-mpi-java --with-jdk-bindir=<foo>
--with-jdk-headers=bar ...
or simply
./configure --enable-mpi-java ...
if jdk is in a "standard" place that we automatically find.
----------------------------------------------------------------------------
Running Java Applications
For convenience, the "mpijavac" wrapper compiler has been provided for
compiling Java-based MPI applications. It ensures that all required MPI
libraries and class paths are defined. You can see the actual command
line using the --showme option, if you are interested.
Once your application has been compiled, you can run it with the
standard "mpirun" command line:
mpirun <options> java <your-java-options> <my-app>
For convenience, mpirun has been updated to detect the "java" command
and ensure that the required MPI libraries and class paths are defined
to support execution. You therefore do NOT need to specify the Java
library path to the MPI installation, nor the MPI classpath. Any class
path definitions required for your application should be specified
either on the command line or via the CLASSPATH environmental
variable. Note that the local directory will be added to the class
path if nothing is specified.
As always, the "java" executable, all required libraries, and your application classes
must be available on all nodes.
----------------------------------------------------------------------------
If you have any problems, or find any bugs, please feel free to report
them to Open MPI user's mailing list (see
http://www.open-mpi.org/community/lists/ompi.php).

View File

@ -94,6 +94,7 @@ libmpi_f77_so_version=0:0:0
libmpi_f90_so_version=0:0:0
libopen_rte_so_version=0:0:0
libopen_pal_so_version=0:0:0
libmpi_java_so_version=0:0:0
# "Common" components install standalone libraries that are run-time
# linked by one or more components. So they need to be versioned as

View File

@ -123,6 +123,7 @@ m4_ifdef([project_ompi],
AC_SUBST(libmpi_cxx_so_version)
AC_SUBST(libmpi_f77_so_version)
AC_SUBST(libmpi_f90_so_version)
AC_SUBST(libmpi_java_so_version)
# It's icky that we have to hard-code the names of the
# common components here. :-( This could probably be done
# transparently by adding some intelligence in autogen.sh
@ -551,6 +552,15 @@ OPAL_CHECK_ATTRIBUTES
OPAL_CHECK_COMPILER_VERSION_ID
##################################
# Java compiler characteristics
##################################
# We don't need Java unless we're building Open MPI; ORTE and OPAL do
# not use Java at all
m4_ifdef([project_ompi], [OMPI_SETUP_JAVA])
##################################
# Assembler Configuration
##################################
@ -594,7 +604,8 @@ AC_CHECK_HEADERS([alloca.h aio.h arpa/inet.h dirent.h \
sys/types.h sys/uio.h net/uio.h sys/utsname.h sys/vfs.h sys/wait.h syslog.h \
time.h termios.h ulimit.h unistd.h util.h utmp.h malloc.h \
ifaddrs.h sys/sysctl.h crt_externs.h regex.h signal.h \
ioLib.h sockLib.h hostLib.h shlwapi.h sys/synch.h limits.h db.h ndbm.h])
ioLib.h sockLib.h hostLib.h shlwapi.h sys/synch.h limits.h db.h ndbm.h \
TargetConditionals.h])
# Needed to work around Darwin requiring sys/socket.h for
# net/if.h

View File

@ -128,15 +128,13 @@ EXTRA_DIST = \
platform/cisco/macosx-dynamic.conf \
platform/cisco/linux \
platform/cisco/linux.conf \
platform/cisco/ebuild/hlfr \
platform/cisco/ebuild/hlfr.conf \
platform/cisco/ebuild/ludd \
platform/cisco/ebuild/ludd.conf \
platform/cisco/ebuild/native \
platform/cisco/ebuild/native.conf \
platform/ibm/debug-ppc32-gcc \
platform/ibm/debug-ppc64-gcc \
platform/ibm/optimized-ppc32-gcc \
platform/ibm/optimized-ppc64-gcc
platform/ibm/optimized-ppc64-gcc \
platform/hadoop/linux \
platform/hadoop/linux.conf \
platform/hadoop/mac \
platform/hadoop/mac.conf
dist_pkgdata_DATA = openmpi-valgrind.supp

View File

@ -0,0 +1,34 @@
enable_opal_multi_threads=no
enable_ft_thread=no
enable_mem_debug=no
enable_mem_profile=no
enable_debug_symbols=yes
enable_binaries=yes
enable_heterogeneous=no
enable_picky=yes
enable_debug=yes
enable_shared=yes
enable_static=no
enable_memchecker=no
enable_ipv6=yes
enable_mpi_f77=no
enable_mpi_f90=no
enable_mpi_cxx=no
enable_mpi_cxx_seek=no
enable_cxx_exceptions=no
enable_mpi_java=yes
enable_per_user_config_files=no
enable_script_wrapper_compilers=no
enable_orterun_prefix_by_default=yes
enable_io_romio=no
enable_vt=no
enable_mca_no_build=carto,crs,memchecker,snapc,crcp,paffinity,filem,sstore,compress,rml-ftrm
with_memory_manager=no
with_tm=no
with_devel_headers=yes
with_portals=no
with_valgrind=no
with_slurm=/opt/slurm/2.1.0
with_openib=no
with_jdk_bindir=/usr/lib/jvm/java-1.6.0/bin
with_jdk_headers=/usr/lib/jvm/java-1.6.0/include

View File

@ -0,0 +1,59 @@
#
# Copyright (c) 2009 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
# This is the default system-wide MCA parameters defaults file.
# Specifically, the MCA parameter "mca_param_files" defaults to a
# value of
# "$HOME/.openmpi/mca-params.conf:$sysconf/openmpi-mca-params.conf"
# (this file is the latter of the two). So if the default value of
# mca_param_files is not changed, this file is used to set system-wide
# MCA parameters. This file can therefore be used to set system-wide
# default MCA parameters for all users. Of course, users can override
# these values if they want, but this file is an excellent location
# for setting system-specific MCA parameters for those users who don't
# know / care enough to investigate the proper values for them.
# Note that this file is only applicable where it is visible (in a
# filesystem sense). Specifically, MPI processes each read this file
# during their startup to determine what default values for MCA
# parameters should be used. mpirun does not bundle up the values in
# this file from the node where it was run and send them to all nodes;
# the default value decisions are effectively distributed. Hence,
# these values are only applicable on nodes that "see" this file. If
# $sysconf is a directory on a local disk, it is likely that changes
# to this file will need to be propagated to other nodes. If $sysconf
# is a directory that is shared via a networked filesystem, changes to
# this file will be visible to all nodes that share this $sysconf.
# The format is straightforward: one per line, mca_param_name =
# rvalue. Quoting is ignored (so if you use quotes or escape
# characters, they'll be included as part of the value). For example:
# Disable run-time MPI parameter checking
# mpi_param_check = 0
# Note that the value "~/" will be expanded to the current user's home
# directory. For example:
# Change component loading path
# component_path = /usr/local/lib/openmpi:~/my_openmpi_components
# See "ompi_info --param all all" for a full listing of Open MPI MCA
# parameters available and their default values.
#
# Basic behavior to smooth startup
orte_abort_timeout = 10
opal_set_max_sys_limits = 1
## Add the interface for out-of-band communication
## and set it up
#oob_tcp_listen_mode = listen_thread
oob_tcp_sndbuf = 32768
oob_tcp_rcvbuf = 32768

View File

@ -0,0 +1,27 @@
enable_opal_multi_threads=no
enable_ft_thread=no
enable_mem_debug=no
enable_mem_profile=no
enable_debug_symbols=yes
enable_binaries=yes
enable_heterogeneous=no
enable_picky=yes
enable_debug=yes
enable_shared=yes
enable_static=no
enable_memchecker=no
enable_ipv6=no
enable_mpi_f77=no
enable_mpi_f90=no
enable_mpi_cxx=no
enable_mpi_cxx_seek=no
enable_cxx_exceptions=no
enable_mpi_java=yes
enable_io_romio=no
enable_vt=no
enable_mca_no_build=carto,crs,memchecker,snapc,crcp,paffinity,filem,sstore,compress,rml-ftrm
with_memory_manager=no
with_tm=no
with_devel_headers=yes
with_portals=no
with_valgrind=no

View File

@ -0,0 +1,59 @@
#
# Copyright (c) 2009 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
# This is the default system-wide MCA parameters defaults file.
# Specifically, the MCA parameter "mca_param_files" defaults to a
# value of
# "$HOME/.openmpi/mca-params.conf:$sysconf/openmpi-mca-params.conf"
# (this file is the latter of the two). So if the default value of
# mca_param_files is not changed, this file is used to set system-wide
# MCA parameters. This file can therefore be used to set system-wide
# default MCA parameters for all users. Of course, users can override
# these values if they want, but this file is an excellent location
# for setting system-specific MCA parameters for those users who don't
# know / care enough to investigate the proper values for them.
# Note that this file is only applicable where it is visible (in a
# filesystem sense). Specifically, MPI processes each read this file
# during their startup to determine what default values for MCA
# parameters should be used. mpirun does not bundle up the values in
# this file from the node where it was run and send them to all nodes;
# the default value decisions are effectively distributed. Hence,
# these values are only applicable on nodes that "see" this file. If
# $sysconf is a directory on a local disk, it is likely that changes
# to this file will need to be propagated to other nodes. If $sysconf
# is a directory that is shared via a networked filesystem, changes to
# this file will be visible to all nodes that share this $sysconf.
# The format is straightforward: one per line, mca_param_name =
# rvalue. Quoting is ignored (so if you use quotes or escape
# characters, they'll be included as part of the value). For example:
# Disable run-time MPI parameter checking
# mpi_param_check = 0
# Note that the value "~/" will be expanded to the current user's home
# directory. For example:
# Change component loading path
# component_path = /usr/local/lib/openmpi:~/my_openmpi_components
# See "ompi_info --param all all" for a full listing of Open MPI MCA
# parameters available and their default values.
#
# Basic behavior to smooth startup
orte_abort_timeout = 10
opal_set_max_sys_limits = 1
## Add the interface for out-of-band communication
## and set it up
#oob_tcp_listen_mode = listen_thread
oob_tcp_sndbuf = 32768
oob_tcp_rcvbuf = 32768

View File

@ -0,0 +1,25 @@
enable_mem_debug=yes
enable_mem_profile=no
enable_debug_symbols=yes
enable_binaries=yes
enable_heterogeneous=no
enable_picky=yes
enable_debug=yes
enable_shared=yes
enable_static=no
enable_io_romio=no
enable_ipv6=no
enable_mpi_f77=no
enable_mpi_f90=no
enable_mpi_cxx=no
enable_mpi_cxx_seek=no
enable_mpi_java=yes
enable_memchecker=no
enable_vt=no
enable_mca_no_build=carto,crs,memchecker,snapc,crcp,paffinity,filem,sstore,compress,rml-ftrm,db,notifier
with_memory_manager=no
with_devel_headers=yes
with_xgrid=no
with_slurm=no
with_jdk_bindir=/usr/bin
with_jdk_headers=/System/Library/Frameworks/JavaVM.framework/Versions/Current/Headers

View File

@ -0,0 +1,69 @@
#
# Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The University of Tennessee and The University
# of Tennessee Research Foundation. All rights
# reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2011 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
# This is the default system-wide MCA parameters defaults file.
# Specifically, the MCA parameter "mca_param_files" defaults to a
# value of
# "$HOME/.openmpi/mca-params.conf:$sysconf/openmpi-mca-params.conf"
# (this file is the latter of the two). So if the default value of
# mca_param_files is not changed, this file is used to set system-wide
# MCA parameters. This file can therefore be used to set system-wide
# default MCA parameters for all users. Of course, users can override
# these values if they want, but this file is an excellent location
# for setting system-specific MCA parameters for those users who don't
# know / care enough to investigate the proper values for them.
# Note that this file is only applicable where it is visible (in a
# filesystem sense). Specifically, MPI processes each read this file
# during their startup to determine what default values for MCA
# parameters should be used. mpirun does not bundle up the values in
# this file from the node where it was run and send them to all nodes;
# the default value decisions are effectively distributed. Hence,
# these values are only applicable on nodes that "see" this file. If
# $sysconf is a directory on a local disk, it is likely that changes
# to this file will need to be propagated to other nodes. If $sysconf
# is a directory that is shared via a networked filesystem, changes to
# this file will be visible to all nodes that share this $sysconf.
# The format is straightforward: one per line, mca_param_name =
# rvalue. Quoting is ignored (so if you use quotes or escape
# characters, they'll be included as part of the value). For example:
# Disable run-time MPI parameter checking
# mpi_param_check = 0
# Note that the value "~/" will be expanded to the current user's home
# directory. For example:
# Change component loading path
# component_path = /usr/local/lib/openmpi:~/my_openmpi_components
# See "ompi_info --param all all" for a full listing of Open MPI MCA
# parameters available and their default values.
#
# Basic behavior to smooth startup
mca_component_show_load_errors = 0
orte_abort_timeout = 10
## Add the interface for out-of-band communication
## and set it up
oob_tcp_listen_mode = listen_thread
oob_tcp_sndbuf = 32768
oob_tcp_rcvbuf = 32768

39
examples/Hello.java Normal file
View File

@ -0,0 +1,39 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* Author of revised version: Franklyn Pinedo
*
* Adapted from Source Code in C of Tutorial/User's Guide for MPI by
* Peter Pacheco.
*/
/*
* Copyright (c) 2011 Cisco Systems, Inc. All rights reserved.
*
*/
import mpi.*;
class Hello {
static public void main(String[] args) throws MPIException {
MPI.Init(args);
int myrank = MPI.COMM_WORLD.Rank();
int size = MPI.COMM_WORLD.Size() ;
System.out.println("Hello world from rank " + myrank + " of " + size);
MPI.Finalize();
}
}

View File

@ -10,6 +10,7 @@
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2007 Sun Microsystems, Inc. All rights reserved.
# Copyright (c) 2012 Los Alamos National Security, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
@ -26,6 +27,7 @@ CXX = mpic++
CCC = mpic++
F77 = mpif77
FC = mpif90
JAVAC = mpijavac
# Using -g is not necessary, but it is helpful for example programs,
# especially if users want to examine them with debuggers. Note that
@ -40,8 +42,8 @@ FCFLAGS = -g
# Example programs to build
EXAMPLES = hello_c hello_cxx hello_f77 hello_f90 \
ring_c ring_cxx ring_f77 ring_f90 connectivity_c
EXAMPLES = hello_c hello_cxx hello_f77 hello_f90 Hello.class \
ring_c ring_cxx ring_f77 ring_f90 connectivity_c Ring.class
# Default target. Always build the C example. Only build the others
# if Open MPI was build with the relevant language bindings.
@ -56,6 +58,12 @@ all: hello_c ring_c connectivity_c
@ if test "`ompi_info --parsable | grep bindings:f90:yes`" != ""; then \
$(MAKE) hello_f90 ring_f90; \
fi
@ if test "`ompi_info --parsable | grep bindings:java:yes`" != ""; then \
$(MAKE) Hello.class; \
fi
@ if test "`ompi_info --parsable | grep bindings:java:yes`" != ""; then \
$(MAKE) Ring.class; \
fi
# The usual "clean" target
@ -63,7 +71,7 @@ all: hello_c ring_c connectivity_c
clean:
rm -f $(EXAMPLES) *~ *.o
# Don't rely on default rules for the fortran examples
# Don't rely on default rules for the fortran and Java examples
hello_f77: hello_f77.f
$(F77) $(F77FLAGS) $^ -o $@
@ -75,3 +83,10 @@ hello_f90: hello_f90.f90
ring_f90: ring_f90.f90
$(FC) $(FCFLAGS) $^ -o $@
Hello.class: Hello.java
$(JAVAC) Hello.java
Ring.class: Ring.java
$(JAVAC) Ring.java

View File

@ -12,6 +12,7 @@
# All rights reserved.
# Copyright (c) 2006 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2007 Sun Microsystems, Inc. All rights reserved.
# Copyright (c) 2012 Los Alamos National Security, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
@ -39,4 +40,6 @@ EXTRA_DIST += \
examples/ring_cxx.cc \
examples/ring_f77.f \
examples/ring_f90.f90 \
examples/connectivity_c.c
examples/connectivity_c.c \
examples/Hello.java \
examples/Ring.java

View File

@ -17,19 +17,21 @@ not enough. Excellent MPI tutorials are available here:
Get a free account and login; you can then browse to the list of
available courses. Look for the ones with "MPI" in the title.
There are 2 MPI examples in this directory, each in four languages:
There are 2 MPI examples in this directory, each in five languages:
- Hello world
C: hello_c.c
C++: hello_cxx.cc
F77: hello_f77.f
F90: hello_f90.f90
C: hello_c.c
C++: hello_cxx.cc
F77: hello_f77.f
F90: hello_f90.f90
Java: Hello.java
- Send a trivial message around in a ring
C: ring_c.c
C++: ring_cxx.cc
F77: ring_f77.f
F90: ring_f90.f90
C: ring_c.c
C++: ring_cxx.cc
F77: ring_f77.f
F90: ring_f90.f90
Java: Ring.java
- Test the connectivity between all processes
C: connectivity_c.c

75
examples/Ring.java Normal file
View File

@ -0,0 +1,75 @@
/*
* Copyright (c) 2011 Cisco Systems, Inc. All rights reserved.
*
* Simple ring test program
*/
import mpi.* ;
class Ring {
static public void main(String[] args) throws MPIException {
MPI.Init(args) ;
int source; // Rank of sender
int dest; // Rank of receiver
int tag=50; // Tag for messages
int next;
int prev;
int message[] = new int [1];
int myrank = MPI.COMM_WORLD.Rank() ;
int size = MPI.COMM_WORLD.Size() ;
/* Calculate the rank of the next process in the ring. Use the
modulus operator so that the last process "wraps around" to
rank zero. */
next = (myrank + 1) % size;
prev = (myrank + size - 1) % size;
/* If we are the "master" process (i.e., MPI_COMM_WORLD rank 0),
put the number of times to go around the ring in the
message. */
if (0 == myrank) {
message[0] = 10;
System.out.println("Process 0 sending " + message[0] + " to rank " + next + " (" + size + " processes in ring)");
MPI.COMM_WORLD.Send(message, 0, 1, MPI.INT, next, tag);
}
/* Pass the message around the ring. The exit mechanism works as
follows: the message (a positive integer) is passed around the
ring. Each time it passes rank 0, it is decremented. When
each processes receives a message containing a 0 value, it
passes the message on to the next process and then quits. By
passing the 0 message first, every process gets the 0 message
and can quit normally. */
while (true) {
MPI.COMM_WORLD.Recv(message, 0, 1, MPI.INT, prev, tag);
if (0 == myrank) {
--message[0];
System.out.println("Process 0 decremented value: " + message[0]);
}
MPI.COMM_WORLD.Send(message, 0, 1, MPI.INT, next, tag);
if (0 == message[0]) {
System.out.println("Process " + myrank + " exiting");
break;
}
}
/* The last process does one extra send to process 0, which needs
to be received before the program can exit */
if (0 == myrank) {
MPI.COMM_WORLD.Recv(message, 0, 1, MPI.INT, prev, tag);
}
MPI.Finalize();
}
}

View File

@ -80,6 +80,7 @@ SUBDIRS = \
mpi/cxx \
mpi/f77 \
mpi/f90 \
mpi/java \
$(MCA_ompi_FRAMEWORK_COMPONENT_DSO_SUBDIRS) \
$(OMPI_CONTRIB_SUBDIRS)
@ -92,6 +93,7 @@ DIST_SUBDIRS = \
mpi/cxx \
mpi/f77 \
mpi/f90 \
mpi/java \
$(OMPI_MPIEXT_ALL_SUBDIRS) \
$(MCA_ompi_FRAMEWORKS_SUBDIRS) \
$(MCA_ompi_FRAMEWORK_COMPONENT_ALL_SUBDIRS) \

View File

@ -40,6 +40,7 @@ AC_DEFUN([OMPI_CONFIG_FILES],[
ompi/tools/wrappers/ompi-cxx.pc
ompi/tools/wrappers/ompi-f77.pc
ompi/tools/wrappers/ompi-f90.pc
ompi/tools/wrappers/mpijavac.pl
ompi/tools/ortetools/Makefile
ompi/tools/ompi-server/Makefile
])

View File

@ -0,0 +1,276 @@
dnl -*- shell-script -*-
dnl
dnl Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
dnl University Research and Technology
dnl Corporation. All rights reserved.
dnl Copyright (c) 2004-2006 The University of Tennessee and The University
dnl of Tennessee Research Foundation. All rights
dnl reserved.
dnl Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
dnl University of Stuttgart. All rights reserved.
dnl Copyright (c) 2004-2006 The Regents of the University of California.
dnl All rights reserved.
dnl Copyright (c) 2006-2012 Los Alamos National Security, LLC. All rights
dnl reserved.
dnl Copyright (c) 2007-2009 Sun Microsystems, Inc. All rights reserved.
dnl Copyright (c) 2008-2012 Cisco Systems, Inc. All rights reserved.
dnl $COPYRIGHT$
dnl
dnl Additional copyrights may follow
dnl
dnl $HEADER$
dnl
# This macro is necessary to get the title to be displayed first. :-)
AC_DEFUN([OMPI_SETUP_JAVA_BANNER],[
ompi_show_subtitle "Java compiler"
])
# OMPI_SETUP_JAVA()
# ----------------
# Do everything required to setup the Java compiler. Safe to AC_REQUIRE
# this macro.
AC_DEFUN([OMPI_SETUP_JAVA],[
AC_REQUIRE([OMPI_SETUP_JAVA_BANNER])
AC_MSG_CHECKING([if want Java bindings])
AC_ARG_ENABLE(mpi-java,
AC_HELP_STRING([--enable-mpi-java],
[enable Java MPI bindings (default: enabled)]))
# Only build the Java bindings if requested
if test "$enable_mpi_java" = "yes"; then
AC_MSG_RESULT([yes])
WANT_MPI_JAVA_SUPPORT=1
AC_MSG_CHECKING([if shared libraries are enabled])
AS_IF([test "$enable_shared" != "yes"],
[AC_MSG_RESULT([no])
AS_IF([test "$enable_mpi_java" = "yes"],
[AC_MSG_WARN([Java bindings cannot be built without shared libraries])
AC_MSG_ERROR([Cannot continue])],
[AC_MSG_WARN([Java bindings will not build as they require --enable-shared])
WANT_MPI_JAVA_SUPPORT=0])],
[AC_MSG_RESULT([yes])])
else
AC_MSG_RESULT([no])
WANT_MPI_JAVA_SUPPORT=0
fi
AC_DEFINE_UNQUOTED([OMPI_WANT_JAVA_BINDINGS], [$WANT_MPI_JAVA_SUPPORT],
[do we want java mpi bindings])
AM_CONDITIONAL(OMPI_WANT_JAVA_BINDINGS, test "$WANT_MPI_JAVA_SUPPORT" = "1")
AC_ARG_WITH(jdk-dir,
AC_HELP_STRING([--with-jdk-dir(=DIR)],
[Location of the JDK header directory. If you use this option, do not specify --with-jdk-bindir or --with-jdk-headers.]))
AC_ARG_WITH(jdk-bindir,
AC_HELP_STRING([--with-jdk-bindir(=DIR)],
[Location of the JDK bin directory. If you use this option, you must also use --with-jdk-headers (and you must NOT use --with-jdk-dir)]))
AC_ARG_WITH(jdk-headers,
AC_HELP_STRING([--with-jdk-headers(=DIR)],
[Location of the JDK header directory. If you use this option, you must also use --with-jdk-bindir (and you must NOT use --with-jdk-dir)]))
# Check for bozo case: ensue a directory was specified
AS_IF([test "$with_jdk_dir" = "yes" -o "$with_jdk_dir" = "no"],
[AC_MSG_WARN([Must specify a directory name for --with-jdk-dir])
AC_MSG_ERROR([Cannot continue])])
AS_IF([test "$with_jdk_bindir" = "yes" -o "$with_jdk_bindir" = "no"],
[AC_MSG_WARN([Must specify a directory name for --with-jdk-bindir])
AC_MSG_ERROR([Cannot continue])])
AS_IF([test "$with_jdk_headers" = "yes" -o "$with_jdk_headers" = "no"],
[AC_MSG_WARN([Must specify a directory name for --with-jdk-headers])
AC_MSG_ERROR([Cannot continue])])
# Check for bozo case: either specify --with-jdk-dir or
# (--with-jdk-bindir, --with-jdk-headers) -- not both.
bad=0
AS_IF([test -n "$with_jdk_dir" -a -n "$with_jdk_bindir" -o \
-n "$with_jdk_dir" -a -n "$with_jdk_headers"],[bad=1])
AS_IF([test -z "$with_jdk_bindir" -a -n "$with_jdk_headers" -o \
-n "$with_jdk_bindir" -a -z "$with_jdk_headers"],[bad=1])
AS_IF([test "$bad" = "1"],
[AC_MSG_WARN([Either specify --with-jdk-dir or both of (--with-jdk_bindir, --with-jdk-headers) -- not both.])
AC_MSG_ERROR([Cannot continue])])
AS_IF([test -n "$with_jdk_dir"],
[with_jdk_bindir=$with_jdk_dir/bin
with_jdk_headers=$with_jdk_dir/include])
##################################################################
# with_jdk_dir can now be ignored; with_jdk_bindir and
# with_jdk_headers will be either empty or have valid values.
##################################################################
# Some java installations are in obscure places. So let's
# hard-code a few of the common ones so that users don't have to
# specify --with-java-<foo>=LONG_ANNOYING_DIRECTORY.
AS_IF([test "$WANT_MPI_JAVA_SUPPORT" = "1" -a -z "$with_jdk_dir" \
-a -z "$with_jdk_dir" -a -z "$with_jdk_bindir"],
[ # OS X Snow Leopard and Lion (10.6 and 10.7 -- did not
# check prior versions)
dir=/System/Library/Frameworks/JavaVM.framework/Versions/Current/Headers
AS_IF([test -d $dir], [with_jdk_headers=$dir
with_jdk_bindir=/usr/bin])
# Various Linux
dir='/usr/lib/jvm/java-*-openjdk-*/include/'
jnih=`ls $dir/jni.h 2>/dev/null | head -n 1`
AS_IF([test -r "$jnih"],
[with_jdk_headers=`dirname $jnih`
OPAL_WHICH([javac], [with_jdk_bindir])
AS_IF([test -n "$with_jdk_bindir"],
[with_jdk_bindir=`dirname $with_jdk_bindir`],
[with_jdk_headers=])],
[dir='/usr/lib/jvm/default-java/include/'
jnih=`ls $dir/jni.h 2>/dev/null | head -n 1`
AS_IF([test -r "$jnih"],
[with_jdk_headers=`dirname $jnih`
OPAL_WHICH([javac], [with_jdk_bindir])
AS_IF([test -n "$with_jdk_bindir"],
[with_jdk_bindir=`dirname $with_jdk_bindir`],
[with_jdk_headers=])])])
# If we think we found them, announce
AS_IF([test -n "$with_jdk_headers" -a "$with_jdk_bindir"],
[AC_MSG_NOTICE([guessing that JDK headers are in $with_jdk_headers])
AC_MSG_NOTICE([guessing that JDK javac is in $with_jdk_bindir])])
])
# Find javac and jni.h
AS_IF([test "$WANT_MPI_JAVA_SUPPORT" = "1"],
[OMPI_CHECK_WITHDIR([jdk-bindir], [$with_jdk_bindir], [javac])
OMPI_CHECK_WITHDIR([jdk-headers], [$with_jdk_headers], [jni.h])])
# Look for various Java-related programs
ompi_java_happy=no
AS_IF([test "$WANT_MPI_JAVA_SUPPORT" = "1"],
[PATH_save=$PATH
AS_IF([test -n "$with_jdk_bindir" -a "$with_jdk_bindir" != "yes" -a "$with_jdk_bindir" != "no"],
[PATH="$PATH:$with_jdk_bindir"])
AC_PATH_PROG(JAVAC, javac)
AC_PATH_PROG(JAVAH, javah)
AC_PATH_PROG(JAR, jar)
PATH=$PATH_save
# Check to see if we have all 3 programs.
AS_IF([test -z "$JAVAC" -o -z "$JAVAH" -o -z "$JAR"],
[ompi_java_happy=no],
[ompi_java_happy=yes])
])
# Look for jni.h
AS_IF([test "$WANT_MPI_JAVA_SUPPORT" = "1" -a "$ompi_java_happy" = "yes"],
[CPPFLAGS_save=$CPPFLAGS
AS_IF([test -n "$with_jdk_headers" -a "$with_jdk_headers" != "yes" -a "$with_jdk_headers" != "no"],
[OMPI_JDK_CPPFLAGS="-I$with_jdk_headers"
# Some flavors of JDK also require -I<blah>/linux.
# See if that's there, and if so, add a -I for that,
# too. Ugh.
AS_IF([test -d "$with_jdk_headers/linux"],
[OMPI_JDK_CPPFLAGS="$OMPI_JDK_CPPFLAGS -I$with_jdk_headers/linux"])
CPPFLAGS="$CPPFLAGS $OMPI_JDK_CPPFLAGS"])
AC_CHECK_HEADER([jni.h], [],
[ompi_java_happy=no])
CPPFLAGS=$CPPFLAGS_save
])
AC_SUBST(OMPI_JDK_CPPFLAGS)
# Check for pinning support
# Uncomment when ready (or delete if we don't want it)
AS_IF([test "$WANT_MPI_JAVA_SUPPORT" = "1" -a "$ompi_java_happy" = "yes"],
[dnl OMPI_JAVA_CHECK_PINNING
echo ======we should check for java pinning support here...
])
# Are we happy?
AS_IF([test "$WANT_MPI_JAVA_SUPPORT" = "1" -a "$ompi_java_happy" = "no"],
[AC_MSG_WARN([Java MPI bindings requested, but unable to find proper support])
AC_MSG_ERROR([Cannot continue])],
[AC_MSG_WARN([Java MPI bindings are provided on a provisional basis - i.e., they are not])
AC_MSG_WARN([part of the current or proposed MPI standard. Continued inclusion of the])
AC_MSG_WARN([Java bindings is contingent upon user interest and developer support])])
AC_CONFIG_FILES([
ompi/mpi/java/Makefile
ompi/mpi/java/java/Makefile
ompi/mpi/java/c/Makefile
])
])
###########################################################################
AC_DEFUN([OMPI_JAVA_CHECK_PINNING],[
###
dnl testing if Java GC supports pinning
###
AC_MSG_CHECKING(whether Java garbage collector supports pinning)
######################
# JMS This has not been touched yet. It needs to be OMPI-ified.
# Change to AC_DEFINE (instead of the AC_SUBST of DEFPINS at the end)
######################
changequote(,)
cat > conftest.java <<END
public class conftest {
public static void main(String [] args) {
System.loadLibrary("conftest") ;
int a [] = new int [100] ;
System.exit(isCopy(a) ? 1 : 0) ;
}
static native boolean isCopy(int [] a) ;
}
END
cat > conftest.c <<END
#include "conftest.h"
int p_xargc ; char **p_xargv ; /* Stop AIX linker complaining */
jboolean JNICALL Java_conftest_isCopy(JNIEnv* env, jclass cls, jintArray a) {
jboolean isCopy ;
(*env)->GetIntArrayElements(env, a, &isCopy) ;
return isCopy ;
}
END
# For AIX shared object generation:
cat > conftest.exp <<END
Java_conftest_isCopy
END
changequote([,])
$JAVA/bin/javac -classpath . conftest.java
$JAVA/bin/javah -classpath . -jni conftest
# Following are hacks... should find cc, etc by autoconf mechanisms
cc -I$JAVA/include -I$JAVA/include/$JOS -c conftest.c
case $target in
*aix* )
cc -G -bE:conftest.exp -o libconftest.so conftest.o
;;
*)
cc $LDFLAG -o libconftest.so conftest.o
;;
esac
if $JAVA/bin/java -cp "." -Djava.library.path="." conftest
then
GC_SUPPORTS_PINNING=yes
else
GC_SUPPORTS_PINNING=no
fi
AC_MSG_RESULT($GC_SUPPORTS_PINNING)
rm -f conftest.* libconftest.so
if test "$GC_SUPPORTS_PINNING" = "yes"
then
DEFPINS=-DGC_DOES_PINNING
fi
AC_SUBST(DEFPINS)
])

30
ompi/mpi/java/License.txt Normal file
View File

@ -0,0 +1,30 @@
mpiJava - A Java Interface to MPI
---------------------------------
Copyright 2003
Bryan Carpenter, Sung Hoon Ko, Sang Boem Lim
Pervasive Technology Labs, Indiana University
email {shko,slim,dbc}@grids.ucs.indiana.edu
Xinying Li
Syracuse University
Mark Baker
CSM, University of Portsmouth
email mark.baker@computer.org
(Bugfixes/Additions, CMake based configure/build)
Blasius Czink
HLRS, University of Stuttgart
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

13
ompi/mpi/java/Makefile.am Normal file
View File

@ -0,0 +1,13 @@
# -*- makefile -*-
#
# Copyright (c) 2011 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
SUBDIRS = java c
EXTRA_DIST = README.txt License.txt

873
ompi/mpi/java/README.txt Normal file
View File

@ -0,0 +1,873 @@
This set of Java bindings was originally derived from mpiJava v1.2.7:
http://sourceforge.net/projects/mpijava/
The 1.2.7 tarball was uploaded to SourceForce on April 8, 2011
(although the README in the 1.2.7 tarball claims that it is version
1.2.5, and is dated January 2003).
There are home pages on the internet for "mpiJava" that might well be
ancestors of this project:
http://www.hpjava.org/mpiJava.html
http://aspen.ucs.indiana.edu/pss/HPJava/mpiJava.html
The source code and configure/build system have been heavily modified
to be in Open MPI.
The README file from the original 1.2.7 tarball is included below.
The License.txt file the 1.2.7 tarball is included in this directory.
=========================================================================
mpiJava - A Java Interface to MPI
---------------------------------
Version 1.2.5, January 2003
Bryan Carpenter, Sung Hoon Ko, Sang Boem Lim
Pervasive Technology Labs, Indiana University
email {shko,slim,dbc}@grids.ucs.indiana.edu
Xinying Li
Syracuse University
Mark Baker
CSM, University of Portsmouth
email mark.baker@computer.org
This package provides an object-oriented Java interface to the Message
Passing Interface (MPI) standard, for use on parallel or distributed
computing platforms. The release includes the Java Native Interface
(JNI) C stubs that binds the Java interface to an underlying native MPI
C interface (which must be obtained and installed independently). The
release also includes a comprehensive test suite for the Java
interface, created by translating the IBM MPI test suite to Java.
It includes some simple examples and demos.
The Java API is defined in the document "mpiJava 1.2: API specification"
in the `doc/' directory.
Platforms
---------
We have tested this release on the platforms listed below.
For the marked configurations, please note remarks in later sections
of this README. Some platforms need special configuration options for
the native MPI. All test cases and examples in this release have been run
on all platforms. Except for few occasions where programs completed but
terminated awkwardly, no failures were observed, provided the recommended
configuration was followed.
Operating System Java Native MPI
Redhat Linux 7.3 Sun SDK 1.4.1 MPICH 1.2.5
Redhat Linux 7.3 Sun SDK 1.4.1 LAM 6.5.8
Redhat Linux 7.3 IBM JDK 1.4.0 MPICH 1.2.4 (*)
Redhat Linux 7.3 IBM JDK 1.4.0 LAM 6.5.8 (*)
SunOS 5.8 Sun SDK 1.4.1 SunHPC-MPI 4
SunOS 5.8 Sun SDK 1.4.1 MPICH 1.2.5 (*)
SunOS 5.8 Sun SDK 1.4.1 LAM 6.5.8 (*)
AIX 3.4 IBM JDK 1.3.0 IBM MPI (SP2/3)
The software was also tested on the following platform, but occasional
intermittent failures were observed:
AIX 3.4 IBM JDK 1.3.0 MPICH 1.2.5 (*)
See the file TEST_REPORTS for more information.
(*): Note the remarks in the section below on MPI configuration options.
Of course it is possible to build mpiJava on other platforms, but expect
to do some trouble-shooting.
At various times we have successfully tested versions of this software
using combinations of systems including:
1. Operating systems:
Sun Machines running SunOS 5.4 (Solaris2.5.1)
Redhat Linux 7.3
AIX
WinTel NT 4 (SP3)
SGI Challange Machines running IRIX 6.2
2. Java Development environments:
Sun SDK 1.4(Linux, Solaris)
IBM Developer Kit for Linux, J2RE 1.4.0
AIX JDK 1.3
Java JDK 1.1.x(SGI)
3. Native MPI installations:
MPICH 1.2.5
SunHPC-MPI(4.0)
IBM POE
WMPI 1.1
Users have reported ports to other platforms (for Alpha processors, see
the message at the end of this file).
Updates and further information about mpiJava can be found on the home
page, `www.hpjava.org/mpiJava.html'.
If you find bugs, have comments, or want further information about
mpiJava or the team of developers, email `dbcarpen@indiana.edu' or
`sblim@indiana.edu'.
Installation
------------
The following instructions apply to UNIX.
[Some earlier releases of mpiJava have been tested on Windows NT. This
release has *not*. For old instructions on installation under Windows NT,
see the file `NT_INSTALL.TXT'. Because recent releases of mpiJava haven't
been tested on that platform, those instructions will certainly need
updating---we leave this option available for "experts" only.]
1. Install your preferred Java programming environment.
For Solaris and Linux, we strongly recommend to use JDK 1.4 or later.
This release provides a signal-chaining feature that can be used to
avoid some unpleasant non-deterministic bugs encountered with previous
releases of mpiJava.
After Java JDK is installed successfully, you should add the Java JDK
`bin' directory to your path setting, so that the `mpiJava/configure'
script can find the `java', `javac', and `javah' commands.
2. Install your preferred MPI software.
Add the MPI `bin' directory to your path setting. Test the MPI
installation before attempting to install mpiJava!
(See the "Recommended MPI configuration options" sections below,
for some recommended options when installing MPI.)
3. Now, you are ready to install the mpiJava interface.
step 1. Unpack the software, eg
gunzip -c mpiJava-x.x.x.tar.gz | tar -xvf -
A subdirectory `mpiJava/' is created.
step 2. Go to the `mpiJava/' directory. Configure the software for
your platform:
./configure
You may specify various standard options to the configure
process.
Try
./configure --help
for various option.
The default MPI is MPICH. Use
./configure --with-MPI=lam
for LAM. Use
./configure --with-MPI=sunhpc
for SunHPC. Use
./configure --with-MPI=sp2
for AIX + POE.
step 3. Build (compile) the software:
make
After successful compilation, the makefile will put the generated class
files in directory `lib/classes/mpi/', and also place a native dynamic
library in directory `lib/'. Now:
Add the directory `<mpiJava-pathname>/src/scripts' to your path environment
variable.
Add the directory `<mpiJava-pathname>/lib/classes' to your CLASSPATH
environment variable.
Add the directory `<mpiJava-pathname>/lib' to your LD_LIBRARY_PATH
(Linux, Solaris, etc) or LIBPATH (AIX) environment variable.
(Some of these variables may be unnecesary if you are using the
`prunjava' script.)
step 4. Test the installation:
make check
NOTE: Several of the the scripts in this release assume your target
machines share user directories (presumably through NFS or equivalent),
and have compatible system commands *and library files* installed on
all nodes (e.g. in `/usr/lib'). Although it is possible to adapt the
basic mpiJava software to more heterogeneous situations, you will need
to do more work!
Using the software
------------------
If everything goes well, you can compile and run the test programs by
issuing the command
make check
in the mpiJava installation directory.
An example of how to compile and run a program:
javac Life.java
prunjava 4 Life
The `prunjava' script is a wrapper for the various MPI run commands.
The first argument is the number of processors on which the program will be
executed. A list of available host computers may be given in an
MPICH-style `machines' file in the local directory.
The `prunjava' script is provided mainly for purposes of testing. It is
not very general and in real situations you will often have to modify
this script, or start the program directly using the native MPI run
commands to achieve the effect you need.
With MPICH on some platforms you may be able to run mpiJava programs by
mpirun <mpirun options> java <java command arguments>
With this approach, you may be responsible for ensuring the remote
environment is set up correctly, e.g. by setting appropriate class
paths and library paths in your `.cshrc', `.bashrc', etc, on the remote
machines (the `prunjava' script adopts a different approach it
dynamically creates a script that sets up the required environment and
invokes the `java' command. This script is run across nodes using
`mpirun'.)
On SP2 you might run mpiJava by
poe java <java command arguments> <poe options>
Some MPI environments (SunHPC 4.0) may require that the native MPI library
be preloaded into the executable command---it may not be possible to
load the native `libmpi' with the Java `System.loadLibrary()' method.
Preloading can be achieved in Solaris or Linux by setting the LD_PRELOAD
environment variable. So for example with SunHPC you may start mpiJava by:
LD_PRELOAD=/opt/SUNWhpc/lib/libmpi.so
export LD_PRELOAD
mprun <mprun options> java <java command arguments>
(It is best to restrict of the LD_PRELOAD variable scope
by defining it only within a script, like our `prunjava'. Otherwise the
library may get loaded into *every* executable you run!
For reliable operation you should also add the `libjsig' library, where
available, to the LD_PRELOAD variable. See the notes below. Check the
source of the `mpiJava/src/scripts/prunjava' script for examples.)
API
---
The API definition is in
mpiJava/doc/api/mpi/mpiJava-spec.ps
Javadoc documentation for the API is preinstalled at
mpiJava/doc/api/mpi/package-summary.html
For questions and comments, email us.
Recommended MPI configuration options
-------------------------------------
In many case mpiJava will work using default MPI options. But after
much experimentation the options recommended below have been found to
eliminate certain failure modes. See the technical notes below for
more discussion.
Note all `configure' options specified in this section are for MPICH
or LAM `configure' scripts, *not* mpiJava!
1) Redhat Linux 7.3 + Sun SDK 1.4.1 + MPICH 1.2.5
Default
2) Redhat Linux 7.3 + Sun SDK 1.4.1 + LAM 6.5.6
Default is recommended.
If, however, problems are encountered, you may try reconfiguring LAM to
use a different signal, e.g.:
./configure ... --with-signal=SIGIO
3) Redhat Linux 7.3 + IBM 1.4 Java for Linux + MPICH 1.2.4
MPICH must be configured to use a signal other than the default SIGUSR1,
e.g.:
./configure ... -listener_sig=SIGIO
4) Redhat Linux 7.3 + IBM 1.4 Java for Linux + LAM 6.5.8
LAM must be configured to use a signal other than the default SIGUSR2,
e.g.:
./configure ... --with-signal=SIGIO
5) SunOS 5.8 + Sun SDK 1.4.1 + SunHPC-MPI 4
Default.
6) SunOS 5.8 + Sun SDK 1.4.1 + MPICH 1.2.4
Use:
./configure ... -cflags=-D_REENTRANT
(Note: on Solaris mpiJava has been tested with MPICH built using cc.)
7) SunOS 5.8 + Sun SDK 1.4.1 + LAM 6.5.6
Use:
./configure ... --with-cflags=-D_REENTRANT
8) AIX 3.4 + IBM JDK 1.3.0 Java + IBM MPI (SP2/3)
Default
9) AIX 3.4 + IBM JDK 1.3.0 Java + MPICH 1.2.5
Use:
./configure ... -cflags=-D_THREAD_SAFE
Note however that certain test cases have been observed to intermittently
hang on this platform for unknown reasons. It's use is not recommended.
(Note: on AIX mpiJava has been tested with MPICH built using cc.)
Technical Notes
===============
The following technical notes and case studies are largely for the benefit
of people trying to port mpiJava to other platforms, but in some cases
they also bear on the required configuration of the native MPI...
Problems with Signal Handlers (mpiJava 1.2.5)
---------------------------------------------
A problem in porting mpiJava to different platforms is conflicts in
uses of OS signal handlers by the Java Virtual Machine (and Java
libraries) and by the native MPI implementation.
Typical JVMs make use of OS signals and signal-handlers internally.
Typical MPI implementations override the default signal handlers.
If suitable measures are not taken, the MPI may blindly override the
signal-handlers installed by the JVM, leading to failures.
If you are using Sun's Java, we recommended to upgrade to JDK 1.4,
and set the environment variable `LD_PRELOAD' described in
http://java.sun.com/j2se/1.4/docs/guide/vm/signal-chaining.html
For example:
export LD_PRELOAD=$JAVA_HOME/jre/lib/$JARCH/$VM/libjsig.so
This resolves various intermittent bugs reported with previous versions
of mpiJava (on many important platforms).
In some cases this option is not sufficient or not available. Sometimes
it is nevertheless possible to work around problems by saving the signal
handlers installed by JVM, and restoring them after the MPI has overriden
them. The current release of mpiJava introduces a second native library
for saving and restoring relevant signal handlers. In other cases it may
be possible and/or necessary to reconfigure MPI to use a "safe" signal.
[In the following notes we have tried to give plausible causes for
observed problems. But appearances can be deceptive and we don't always
have access to sources of the software concerned; even where we do,
it can be very labour intensive to trace intermittent failure modes
in detail. Nevertheless we hope the workarounds we found may suggest
ones that work in other situations.]
KNOWN SIGNAL-HANDLING ISSUES for specific platforms, with workarounds:
The workarounds are configured in automatically for mpiJava 1.2.5 where
appropriate, but in some cases you may have to change your native MPI
configuration to avoid conflicting signals.
1) Redhat Linux 7.3 + Sun SDK 1.4.1 + MPICH 1.2.5
Hotspot sometimes deliberately throws and catches SIGSEGV and
similar signals. `MPI_Init' overrides the JVM signal handlers
leading to intermittent failures (especially in complex recursive
code, like object serialization). With earlier versions of JDK
many mpiJava programs ran successfully despite this conflict.
JDK 1.4 signal-chaining using `libjsig' resolves all remaining issues
we are aware of. This is configured automatically into the mpiJava
1.2.5 `prunjava' script, if mpiJava is built with JDK 1.4.
2) Redhat Linux 7.3 + Sun SDK 1.4.1 + LAM 6.5.6
We expect the same issues with SIGSEGV, etc as in MPICH case, which
should be resolved by using `libjsig'.
Additionally, there is a special problem with SIGUSR2, which causes
frequent, intermittent hanging of mpiJava programs. Just loading
`libjsig' doesn't resolve this problem (the signal handlers don't
seem to chain properly?) We found empirically that restoring the
original JVM signal handler for SIGUSR2 after `MPI_Init' eliminated
problems in all our test cases. This approach is automatically
configured into mpiJava 1.2.5.
An alternative solution is to configure LAM to use a signal
that Hotspot doesn't use, e.g.:
./configure ... --with-signal=SIGIO
(Note well this is the `configure' script for LAM, *not* mpiJava!
We randomly suggested SIGIO as the alternate signal.)
3) Redhat Linux 7.3 + IBM 1.4 Java for Linux + MPICH 1.2.4
The IBM classic JVM uses SIGUSR1, and (we found) may block this signal
during JNI calls. By default MPICH (on the default P4 device) uses
SIGUSR1 as its listener signal. This conflict causes most mpiJava
programs to hang. The only known solution is to to configure MPICH
to use a different signal, e.g:
./configure ... -listener_sig=SIGIO
(Note well this is the `configure' script for MPICH, *not* mpiJava!
We randomly suggested SIGIO rather than the more obvious SIGUSR2.
SIGUSR2 mostly worked, but apparently produced conflicts in GUI-based
example codes.)
This resolves all problems we are currently aware of.
4) Redhat Linux 7.3 + IBM 1.4 Java for Linux + LAM 6.5.8
We had some success. But the `tests/signals/' test case and
`examples/Nozzle', `examples/potts' examples hang on some of our
installations. Configuring LAM to use e.g. SIGIO -- see 2), above
-- appeared to help, but we aren't certain this is a complete
solution -- we had conflicting experiences.
For now this configuration should be considered experimental.
5) SunOS 5.8 + Sun SDK 1.4.1 + SunHPC-MPI 4
Comments similar to the Linux MPICH case, 1). No known problems
provide the `libjsig' signal interception library is loaded.
6) SunOS 5.8 + Sun SDK 1.4.1 + MPICH 1.2.5
Comments similar to the Linux case, 1) above, except that on Solaris
the 1.4 JVM detects the occurrence of signal chaining it doesn't like,
and insists the java option "-Xusealtsigs" be set. This is configured
automatically into the mpiJava 1.2.5 `prunjava' script.
SEE ALSO the notes on thread safety issues, below.
(Note: on Solaris mpiJava has been tested assuming MPICH is built
with cc.)
7) SunOS 5.8 + Sun SDK 1.4.1 + LAM 6.5.6
Comments similar to the Linux MPICH case, 1). No known problems.
SEE ALSO the notes on thread safety issues, below.
8) AIX 3.4 + IBM JDK 1.3.0 Java + IBM MPI (SP2/3)
The JVM sometimes deliberately throws and catches SIGTRAP signals
(in a pattern similar to SIGSEGV, etc with Hotspot?), and the SP2
MPI apparently overrides the JVM handler. We know of no `libjsig'
analogue for this platform, but we found empirically that restoring
the original JVM signal handler for SIGTRAP after the
`System.loadLibrary(mpijava)' call eliminated problems in all our
test cases. This solution is automatically configured into mpiJava
1.2.5.
9) AIX 3.4 + IBM JDK 1.3.0 Java + MPICH 1.2.5
Certain test cases have been observed to intermittently hang on this
platform for unknown reasons. It's use is not recommended.
SEE ALSO the notes on thread safety issues, below.
(Note: on AIX the mpiJava configure script assumes MPICH is built
with cc, not GNU C.)
Issues of Thread Safety (mpiJava 1.2.5)
---------------------------------------
Most MPI implementations are not "thread-safe", and of course Java
uses threads in an essential way---even a single-threaded user program
will have system daemon threads running in the background.
In principle this could be a serious issue for mpiJava. To make
progress we have mainly disregarded the problem, and worked on the
optimistic assumption that provided *MPI* CALLS ARE NEVER MADE
CONCURRENTLY (and, by the way, it is *your* responsibility as the mpiJava
programmer to ensure this!) interference between Java threads should
not cause problems.
A priori this is not guaranteed. The native MPI implementation might
be making OS system calls to send messages over sockets. Daemon
threads or other user threads could also (through the standard Java
API) be concurrently making system calls (e.g. an AWT program could be
communicating with an X server). If the MPI implementation happens not
to invoke its system calls in a thread-safe way, there could still be
interference effects with the system calls invoked internally by the
other "pure Java" threads. (One example is that the MPICH
implementation relies on the `errno' variable; in principle this
could be modified by other threads.)
We have not encountered problems that were *provably* attributable to
this kind of effect. But we *have* encountered problems with graphics
codes (e.g. `examples/Nozzle', `example/potts') running on the Solaris
+ MPICH, Solaris + LAM and AIX + MPICH platforms that look suspiciously
like this. With the default build of MPICH and LAM, these programs
usually fail on these platforms.
Experimentally we found that on Solaris these problems could be eliminated by
reconfiguring MPICH to compile with the flag `-D_REENTRANT':
./configure ... -cflags=-D_REENTRANT
and similarly configuring LAM as follows:
./configure ... --with-cflags=-D_REENTRANT
(Note well these are the `configure' scripts for MPICH and LAM,
*not* mpiJava!)
On AIX the corresponding recipe that worked was:
./configure ... -cflags=-D_THREAD_SAFE
(Note well this is for the `configure' scripts for MPICH, not mpiJava!
Unfortunately we failed to install LAM on AIX. As noted above AIX
+ MPICH has other problems, which are unresolved.)
We were unable to trace the detailed cause of the observed failures, so
it is not 100% certain whether this is really a thread safety issue.
But in general setting `-D_REENTRANT' on Solaris or `-D_THREAD_SAFE'
on AIX would be expected to improve the thread safety characteristics
of C code.
Another change in this release related to thread safety is in the
implementation of the `finalize()' methods of the `Datatype', `Group',
`Op' and `Status' classes. In earlier releases of mpiJava these were
native methods that directly called the corresponding `MPI_Free'
functions. Although this wasn't observed to cause problems, in principle
it is not thread safe because the `finalize()' methods may be called in
a separate garbage collector thread. In the current release the calls
to the native methods are deferred, and invoked in the user thread when
the next MPI operation is explicitly called.
JVMs and "pinning" (mpiJava 1.2.3)
----------------------------------
The garbage collectors associated with early JVMs, such as the
"classic" JVM, supported pinning of Java arrays---fixing the arrays
to a specific physical location while a JNI call was in progress.
Several more modern JVMs (e.g. Hotspot and others) do not support
pinning. Instead JNI calls access elements of Java arrays by first obtaining
a C copy of the Java array. The elements are typically copied back
from the C array to the Java array when the JNI call returns.
mpiJava 1.2.3 supports two approaches to message buffers, reflecting
these two JNI mechanisms---pinning or copying. If you are using a
JVM which is known to support pinning, you may wish to uncomment the
definition of the macro `GC_DOES_PINNING' in the file `src/C/mpiJava.h'.
If this macro is left undefined---presumably meaning the garbage
collector does *not* support pinning---mpiJava will copy buffers
from and to Java arrays explicitly using `MPI_Pack' and `MPI_Unpack'.
This works well with MPICH.
Unfortunately this strategy doesn't always work with IBM MPI,
due to an apparent difference in the semantics of `MPI_Unpack'.
Luckily it turns out that many installations of Java on AIX still use
a variant of the classic JVM, which *does* support pinning. So on AIX
it is probably safest to define the `GC_DOES_PINNING' macro.
[Note added: the `configure' script now attempts to determine whether
the JVM supports pinning and will define the `GC_DOES_PINNING' macro in
make files, if it thinks it does.]
Revision History
----------------
Significant changes from version 1.2.4:
1) Fixes various problems associated with signal handlers
(see discussion above).
2) README file greatly extended to better document supported platforms and
portability issues.
3) Fixes a bug related to the behavior of `MPI_Unpack' on certain
MPI platforms.
4) Fixed some programming errors in the `examples/potts' and
`examples/metropolis' codes.
5) No longer use custom `jvmlauncher' for SunHPC. Instead use
LD_PRELOAD to preload -lmpi library into standard `java' command.
6) Moves freeing of native MPI objects out of the garbage collector
thread, into MPI user thread (no particular problems were observed
with the old strategy, but in principle it isn't thread-unsafe).
Significant changes from version 1.2.3:
1) Supports SunHPC version 4.0. Executable `src/bin/jvmlauncher' added.
Significant changes from version 1.2.2:
1) Supports AIX + POE platform.
Significant changes from version 1.2.1:
1) Major reorganization in handling communication buffers, the better to
support current JVMs, whose garbage collectors often don't implement
pinning.
2) Fix related bug in `Sendrecv', afflicting the `Life.java' example.
3) Fix bug reported by Jatinder Singh when `MPI.ANY_SOURCE' is used with
and `MPI.OBJECT' datatype.
Significant changes from version 1.2:
1) Mainly bug fixes.
Significant changes from version 1.1:
1) Support for the `MPI.OBJECT' basic type (note that this release
uses default JDK serialization, which can be quite inefficient).
2) Support for Linux platforms.
3) Inclusion of new demo programs.
4) Inclusion of `javadoc' documentation.
5) Other minor changes to the API---see the spec in the `doc' directory.
6) Bug fixes.
Known bugs and omissions
------------------------
1) The subclasses of `MPIException' documented in the mpiJava spec are still
not implemented (and in reality mpiJava methods never throw
exceptions---they generally abort the program in case of error).
2) In general, sanity-checking method arguments is not nearly as thorough
as it should be.
mpiJava Directory Structure
---------------------------
mpiJava/
bin/
This directory contains binaries or installed scripts.
For NT releases, sub-directories contain Win32 Dynamic
Link Libraries (.dll).
WMPI/
For NT releases, contains wmpi.dll created by
compiling the JNI C stubs. The directory where the
DLL resides needs to be added to the PATH
environment variable so that it can be found at
run-time by Java.
mpiJava.dll
doc/
examples/
metropolis/
A Monte Carlo program
Nozzle/
A CFD program, with GUI
PingPong/
A simple benchmark, with C and Java versions
potts/
Another Monte Carlo program, with a GUI
simple/
A "Game of Life" program; a "Hello World" program.
lib/
For UNIX releases this directory contains shared libraries.
Class files are contained in a subdirectory.
classes/
The mpiJava class files live here. This directory
should be added to your CLASSPATH enviroment
variable.
mpiJava.zip
src/
C/
The JNI C stubs for mpiJava. This directory
contains the JNI C wrappers and the header files for
mpiJava. These files are compiled into a shared
(.so in UNIX) or dynamic-load-library (.dll in
Win32) that is loaded at runtime by the JVM
(loadlibary(mpiJava)) when the Java MPI interface is
used.
Java/
The Java interface to MPI. This directory includes
a sub-directory (mpi) holding the Java interface to
MPI. These files need to be compiled using a Java
compiler, such as javac. The resulting class files
are copied into the mpiJava/lib/classes directory.
mpi/
scripts/
Various scripts for configuraing and testing mpiJava
under UNIX.
wmpi_jni/
See notes in `NT_INSTALL.TXT'
release/
bin/
The `jvmlauncher' program
tests/
ccl/
comm/
dtyp/
env/
group/
pt2pt/
topo/
References
==========
MPI Home Page:
http://www.mcs.anl.gov/mpi/index.html
MPICH home page:
http://www.mcs.anl.gov/mpi/mpich
LAM home page:
http://www.lam-mpi.org/
WMPI (an MPI for Windows NT):
http://dsg.dei.uc.pt/w32mpi/
Sun J2SE 1.4 download:
http://java.sun.com/j2se/1.4/download.html
IBM Java Developer Kit for Linux:
http://www.ibm.com/java/jdk/download
Contributions
-------------
From Hiromitsu Takagi:
I'd like to inform you that we have successfully built and run it on
Digital UNIX V4.0D (OSF JDK1.1.6) / MPICH but a few modifications are
required.
o add "-I$(JDK)/include/java -I$(JDK)/include/java/alpha" into
INCLUDE of mpiJava-1.1/src/C/Makefile
(jni.h is placed on $(JDK)/include/java/ and jni_md.h is placed on
$(JDK)/include/alpha/.)
o set LDFLAG of mpiJava-1.1/src/C/Makefile "-shared"
[...]
--
Hiromitsu Takagi
Computer Science Division, Electrotechnical Laboratory
Sep 1, 98
---=+ O +=---
Thanks to Rutger Hofman who pointed out a bug in `Request.Waitany',
`Request.Testany' and gave corrections.
Feb 28, 01
---=+ O +=---
The test case in `tests/signals/' is adapted from a bug
report submitted by Sivakumar Venkata Pabolu.
Jan 10, 03

View File

@ -0,0 +1,41 @@
# -*- makefile -*-
#
# Copyright (c) 2011 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
if OMPI_WANT_JAVA_BINDINGS
# Get the include files that were generated from the .java source files
AM_CPPFLAGS = -I$(top_builddir)/ompi/mpi/java/java $(OMPI_JDK_CPPFLAGS) $(LTDLINCL)
headers = \
mpiJava.h
ompidir = $(includedir)/openmpi/ompi/mpi/java
ompi_HEADERS = \
$(headers)
lib_LTLIBRARIES = libmpi_java.la
libmpi_java_la_SOURCES = \
mpi_Cartcomm.c \
mpi_Comm.c \
mpi_Datatype.c \
mpi_Errhandler.c \
mpi_Graphcomm.c \
mpi_Group.c \
mpi_Intercomm.c \
mpi_Intracomm.c \
mpi_MPI.c \
mpi_Op.c \
mpi_Request.c \
mpi_Status.c
libmpi_java_la_LIBADD = $(top_builddir)/ompi/libmpi.la $(LIBLTDL)
libmpi_java_la_LDFLAGS = -version-info $(libmpi_java_so_version)
endif

73
ompi/mpi/java/c/mpiJava.h Normal file
View File

@ -0,0 +1,73 @@
/*
* Copyright (c) 2011 Cisco Systems, Inc. All rights reserved.
*
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
*/
#include "mpi.h"
typedef struct {
jfieldID CommhandleID;
jfieldID ErrhandleID;
jfieldID GrouphandleID;
jfieldID DatatypehandleID;
jfieldID DatatypebaseTypeID;
jfieldID DatatypebaseSizeID;
jfieldID OphandleID;
jfieldID stathandleID;
jfieldID sourceID;
jfieldID tagID;
jfieldID indexID;
jfieldID elementsID;
jfieldID reqhandleID;
jfieldID opTagID;
jfieldID bufSaveID;
jfieldID countSaveID;
jfieldID offsetSaveID;
jfieldID baseTypeSaveID;
jfieldID bufbaseSaveID;
jfieldID bufptrSaveID;
jfieldID commSaveID;
jfieldID typeSaveID;
int *dt_sizes;
} ompi_java_globals_t;
extern ompi_java_globals_t ompi_java;
void ompi_java_clearFreeList(JNIEnv*);
void ompi_java_init_native_Datatype(void);
void* ompi_java_getBufPtr(void** bufbase,
JNIEnv *env, jobject buf,
int baseType, int offset);
void ompi_java_releaseBufPtr(JNIEnv *env, jobject buf,
void* bufbase, int baseType);
void* ompi_java_getMPIWriteBuf(int* bsize, int count,
MPI_Datatype type, MPI_Comm comm);
#ifndef GC_DOES_PINNING
void* ompi_java_getMPIBuf(int* size, JNIEnv *env, jobject buf, int offset,
int count, MPI_Datatype type, MPI_Comm comm,
int baseType);
void ompi_java_releaseMPIBuf(JNIEnv *env, jobject buf, int offset,
int count, MPI_Datatype type, MPI_Comm comm,
void* bufptr, int size, int baseType);
void ompi_java_releaseMPIRecvBuf(int* elements, JNIEnv *env, jobject buf, int offset,
int count, MPI_Datatype type, MPI_Comm comm,
void* bufptr, MPI_Status* status,
int baseType);
void ompi_java_releaseMPIReadBuf(void* bufptr);
#endif /* GC_DOES_PINNING */

View File

@ -0,0 +1,257 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : mpi_Cartcomm.c
* Headerfile : mpi_Cartcomm.h
* Author : Sung-Hoon Ko, Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.6 $
* Updated : $Date: 2003/01/16 16:39:34 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
#include "ompi_config.h"
#include <stdlib.h>
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi.h"
#include "mpi_Cartcomm.h"
#include "mpiJava.h"
/*
* Class: mpi_Cartcomm
* Method: Get
* Signature: (I)Lmpi/CartParms;
*/
JNIEXPORT jobject JNICALL Java_mpi_Cartcomm_Get(JNIEnv *env, jobject jthis)
{
jintArray dims, coords;
jbooleanArray periods;
jint *ds, *cs;
jboolean *ps;
int *ips ;
jboolean isCopy1=JNI_TRUE, isCopy2=JNI_TRUE ,isCopy3=JNI_TRUE;
int maxdims;
int i ;
jclass cartparms_class=(*env)->FindClass(env,"mpi/CartParms");
jfieldID dimsID,periodsID,coordsID;
jmethodID handleConstructorID =
(*env)->GetMethodID(env, cartparms_class, "<init>", "()V");
jobject cartparms =
(*env)->NewObject(env,cartparms_class, handleConstructorID);
ompi_java_clearFreeList(env) ;
MPI_Cartdim_get((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),&maxdims);
dims=(*env)->NewIntArray(env,maxdims);
periods=(*env)->NewBooleanArray(env,maxdims);
coords=(*env)->NewIntArray(env,maxdims);
ips = (int*) malloc(sizeof(int) * maxdims) ;
ds=(*env)->GetIntArrayElements(env,dims,&isCopy1);
cs=(*env)->GetIntArrayElements(env,coords,&isCopy3);
MPI_Cart_get((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
maxdims, (int*)ds, ips, (int*)cs);
ps=(*env)->GetBooleanArrayElements(env,periods,&isCopy2);
for (i = 0 ; i < maxdims ; i++) {
ps [i] = ips [i] ? JNI_TRUE : JNI_FALSE ;
}
dimsID=(*env)->GetFieldID(env,cartparms_class,"dims","[I");
periodsID=(*env)->GetFieldID(env,cartparms_class,"periods","[Z");
coordsID=(*env)->GetFieldID(env,cartparms_class , "coords", "[I");
(*env)->SetObjectField(env, cartparms, dimsID, dims);
(*env)->SetObjectField(env, cartparms, periodsID, periods);
(*env)->SetObjectField(env, cartparms, coordsID, coords);
(*env)->ReleaseIntArrayElements(env,dims,ds,0);
(*env)->ReleaseBooleanArrayElements(env,periods,ps,0);
(*env)->ReleaseIntArrayElements(env,coords,cs,0);
return cartparms;
}
/*
* Class: mpi_Cartcomm
* Method: Shift
* Signature: (II)Lmpi/ShiftParms;
*/
JNIEXPORT jobject JNICALL Java_mpi_Cartcomm_Shift(JNIEnv *env, jobject jthis,
jint direction, jint disp)
{
int sr, dr;
jclass shiftparms_class=(*env)->FindClass(env,"mpi/ShiftParms");
jfieldID rsID,rdID;
jmethodID handleConstructorID = (*env)->GetMethodID(env,
shiftparms_class, "<init>", "()V");
jobject shiftparms=(*env)->NewObject(env,shiftparms_class,
handleConstructorID);
ompi_java_clearFreeList(env) ;
MPI_Cart_shift((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
direction, disp, &sr, &dr);
rsID=(*env)->GetFieldID(env,shiftparms_class,"rank_source","I");
rdID=(*env)->GetFieldID(env,shiftparms_class,"rank_dest", "I");
(*env)->SetIntField(env, shiftparms, rsID, sr);
(*env)->SetIntField(env, shiftparms, rdID, dr);
/* printf("Shift finished.\n"); */
return shiftparms;
}
/*
* Class: mpi_Cartcomm
* Method: Coords
* Signature: (I)[I
*/
JNIEXPORT jintArray JNICALL Java_mpi_Cartcomm_Coords(JNIEnv *env, jobject jthis, jint rank)
{
jint *coords;
jboolean isCopy=JNI_TRUE;
jintArray jcoords;
int maxdims;
/*
jclass jthis_class=(*env)->FindClass(env,"mpi/Cartcomm");
jfieldID maxdimsID=(*env)->GetFieldID(env,jthis_class,"maxdims","I");
maxdims=(*env)->GetIntField(env,jthis, maxdimsID);
*/
ompi_java_clearFreeList(env) ;
MPI_Cartdim_get((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
&maxdims);
jcoords=(*env)->NewIntArray(env,maxdims);
coords=(*env)->GetIntArrayElements(env,jcoords,&isCopy);
MPI_Cart_coords((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
rank,maxdims,(int*)coords);
(*env)->ReleaseIntArrayElements(env,jcoords,coords,0);
return jcoords;
}
/*
* Class: mpi_Cartcomm
* Method: Map
* Signature: ([I[Z)I
*/
JNIEXPORT jint JNICALL Java_mpi_Cartcomm_Map(JNIEnv *env, jobject jthis,
jintArray dims, jbooleanArray periods)
{
int newrank;
jint *ds;
jboolean *ps;
jboolean isCopy=JNI_TRUE;
int ndims;
int *int_re_ds=(int*)calloc((*env)->GetArrayLength(env,periods), sizeof(int));
int i;
ompi_java_clearFreeList(env) ;
ndims=(*env)->GetArrayLength(env,dims);
ds=(*env)->GetIntArrayElements(env,dims,&isCopy);
ps=(*env)->GetBooleanArrayElements(env,periods,&isCopy);
for (i=0;i<=(*env)->GetArrayLength(env,periods);i++)
if(ps[i]==JNI_TRUE)
int_re_ds[i]=1;
else
int_re_ds[i]=0;
MPI_Cart_map((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
ndims,(int*)ds,int_re_ds, &newrank);
(*env)->ReleaseIntArrayElements(env,dims,ds,0);
(*env)->ReleaseBooleanArrayElements(env,periods,ps,0);
free(int_re_ds);
return newrank;
}
/*
* Class: mpi_Cartcomm
* Method: Rank
* Signature: ([I)I
*/
JNIEXPORT jint JNICALL Java_mpi_Cartcomm_Rank(JNIEnv *env, jobject jthis, jintArray coords)
{
int rank;
jint *crds;
jboolean isCopy=JNI_TRUE;
ompi_java_clearFreeList(env) ;
crds=(*env)->GetIntArrayElements(env,coords,&isCopy);
MPI_Cart_rank((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
(int*)crds, &rank);
(*env)->ReleaseIntArrayElements(env,coords,crds,0);
return rank;
}
/*
* Class: mpi_Cartcomm
* Method: Sub
* Signature: ([Z)Lmpi/Cartcomm;
*/
JNIEXPORT jlong JNICALL Java_mpi_Cartcomm_sub(JNIEnv *env, jobject jthis,
jbooleanArray remain_dims)
{
MPI_Comm newcomm;
jboolean *re_ds;
jboolean isCopy=JNI_TRUE;
int *int_re_ds=(int*)calloc((*env)->GetArrayLength(env,remain_dims), sizeof(int));
int i;
ompi_java_clearFreeList(env) ;
re_ds=(*env)->GetBooleanArrayElements(env,remain_dims,&isCopy);
for(i=0;i<=(*env)->GetArrayLength(env,remain_dims);i++)
if(re_ds[i]==JNI_TRUE)
int_re_ds[i]=1;
else
int_re_ds[i]=0;
MPI_Cart_sub((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
int_re_ds, &newcomm);
(*env)->ReleaseBooleanArrayElements(env,remain_dims,re_ds,0);
free(int_re_ds);
return (jlong)newcomm;
}
/*
* Class: mpi_Cartcomm
* Method: Dims_create
* Signature: (I[I)V
*/
JNIEXPORT void JNICALL Java_mpi_Cartcomm_Dims_1create(JNIEnv *env, jclass jthis,
jint nnodes, jintArray dims )
{
jint *cdims;
jboolean isCopy=JNI_TRUE;
int ndims = (*env)->GetArrayLength(env,dims) ;
ompi_java_clearFreeList(env) ;
cdims=(*env)->GetIntArrayElements(env,dims,&isCopy);
MPI_Dims_create(nnodes,ndims,(int*)cdims);
(*env)->ReleaseIntArrayElements(env,dims,cdims,0);
}

1544
ompi/mpi/java/c/mpi_Comm.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,394 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : mpi_Datatype.c
* Headerfile : mpi_Datatype.h
* Author : Sung-Hoon Ko, Xinying Li, Sang Lim, Bryan Carpenter
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.10 $
* Updated : $Date: 2003/01/16 16:39:34 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
#include "ompi_config.h"
#include <stdlib.h>
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi.h"
#include "mpi_Datatype.h"
#include "mpiJava.h"
/*
* public class Datatype {
* private final static int UNDEFINED = -1;
* public final static int NULL = 0;
* public final static int BYTE = 1;
* public final static int CHAR = 2;
*
* public final static int SHORT = 3;
* public final static int BOOLEAN = 4;
* public final static int INT = 5;
*
* public final static int LONG = 6;
* public final static int FLOAT = 7;
* public final static int DOUBLE = 8;
*
* public final static int PACKED = 9;
* public final static int LB =10;
* public final static int UB =11;
*
* public final static int OBJECT =12;
*
* ...
* }
*/
MPI_Datatype Dts[] = { MPI_DATATYPE_NULL, MPI_BYTE, MPI_SHORT,
MPI_SHORT, MPI_BYTE, MPI_INT,
MPI_LONG_INT, MPI_FLOAT, MPI_DOUBLE,
MPI_PACKED, MPI_LB, MPI_UB,
MPI_BYTE };
void ompi_java_init_native_Datatype(void)
{
/* Initialization that can only be done after MPI_Init() has
* been called. Called from `mpi_MPI.c'.
*/
int i ;
ompi_java.dt_sizes = (int*) malloc(13 * sizeof(int)) ;
for (i = 1 ; i < 13 ; i++) {
MPI_Type_size(Dts[i], &(ompi_java.dt_sizes[i])) ;
}
}
/*
* Class: mpi_Datatype
* Method: init
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_init(JNIEnv *env, jclass thisClass)
{
ompi_java.DatatypehandleID = (*env)->GetFieldID(env,thisClass,"handle","J");
ompi_java.DatatypebaseTypeID = (*env)->GetFieldID(env,thisClass,"baseType","I");
ompi_java.DatatypebaseSizeID = (*env)->GetFieldID(env,thisClass,"baseSize","I");
}
/*
* Class: mpi_Datatype
* Method: GetDatatype
* Signature: (I)J
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_GetDatatype(JNIEnv *env, jobject jthis, jint type)
{
(*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)Dts[type]);
}
/*
* Class: mpi_Datatype
* Method: size
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_mpi_Datatype_size(JNIEnv *env, jobject jthis)
{
int result;
ompi_java_clearFreeList(env) ;
MPI_Type_size((MPI_Datatype)((*env)->GetLongField(env,jthis,ompi_java.DatatypehandleID)),
&result );
return result;
}
/*
* Class: mpi_Datatype
* Method: extent
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_mpi_Datatype_extent(JNIEnv *env, jobject jthis)
{
MPI_Aint result;
ompi_java_clearFreeList(env) ;
MPI_Type_extent((MPI_Datatype)((*env)->GetLongField(env,jthis,ompi_java.DatatypehandleID)),
&result);
return result;
}
/*
* Class: mpi_Datatype
* Method: lB
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_mpi_Datatype_lB(JNIEnv *env, jobject jthis)
{
MPI_Aint result;
ompi_java_clearFreeList(env) ;
MPI_Type_lb((MPI_Datatype)((*env)->GetLongField(env,jthis,ompi_java.DatatypehandleID)),
&result);
return result;
}
/*
* Class: mpi_Datatype
* Method: uB
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_mpi_Datatype_uB(JNIEnv *env, jobject jthis)
{
MPI_Aint result;
ompi_java_clearFreeList(env) ;
MPI_Type_ub((MPI_Datatype)((*env)->GetLongField(env,jthis,ompi_java.DatatypehandleID)),
&result);
return result;
}
/*
* Class: mpi_Datatype
* Method: commit
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_commit(JNIEnv *env, jobject jthis)
{
MPI_Datatype type;
ompi_java_clearFreeList(env) ;
type=(MPI_Datatype)((*env)->GetLongField(env,jthis,ompi_java.DatatypehandleID));
MPI_Type_commit(&type);
}
/*
* Class: mpi_Datatype
* Method: free
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_free(JNIEnv *env, jobject jthis)
{
MPI_Datatype type;
type=(MPI_Datatype)((*env)->GetLongField(env,jthis,ompi_java.DatatypehandleID));
if (type != MPI_DATATYPE_NULL) {
MPI_Type_free(&type);
}
}
/*
* Class: mpi_Datatype
* Method: GetContiguous
* Signature: (I)V
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_GetContiguous(JNIEnv *env, jobject jthis,
jint count,jobject oldtype)
{
MPI_Datatype type;
ompi_java_clearFreeList(env) ;
MPI_Type_contiguous(count,
(MPI_Datatype)((*env)->GetLongField(env,oldtype,ompi_java.DatatypehandleID)),
&type);
(*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)type);
}
/*
* Class: mpi_Datatype
* Method: GetVector
* Signature: (III)V
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_GetVector(JNIEnv *env, jobject jthis,
jint count, jint blocklength, jint stride,
jobject oldtype)
{
MPI_Datatype type;
ompi_java_clearFreeList(env) ;
MPI_Type_vector(count, blocklength, stride,
(MPI_Datatype)((*env)->GetLongField(env,oldtype,ompi_java.DatatypehandleID)),
&type);
(*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)type);
}
/*
* Class: mpi_Datatype
* Method: GetHvector
* Signature: (III)V
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_GetHvector(JNIEnv *env, jobject jthis,
jint count, jint blocklength, jint stride,
jobject oldtype)
{
MPI_Datatype type;
jint baseSize = (*env)->GetIntField(env, jthis, ompi_java.DatatypebaseSizeID) ;
ompi_java_clearFreeList(env) ;
MPI_Type_hvector(count, blocklength, baseSize * stride,
(MPI_Datatype)((*env)->GetLongField(env,oldtype,ompi_java.DatatypehandleID)),
&type);
(*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)type);
}
/*
* Class: mpi_Datatype
* Method: GetIndexed
* Signature: (I[I[I)V
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_GetIndexed(JNIEnv *env, jobject jthis,
jintArray blocklengths, jintArray
displacements, jobject oldtype)
{
MPI_Datatype type;
int count=(*env)->GetArrayLength(env,blocklengths);
jboolean isCopy=JNI_TRUE;
jint *lengths; jint *disps;
ompi_java_clearFreeList(env) ;
lengths=(*env)->GetIntArrayElements(env,blocklengths,&isCopy);
disps = (*env)->GetIntArrayElements(env,displacements,&isCopy);
MPI_Type_indexed(count, (int*)lengths, (int*)disps,
(MPI_Datatype)((*env)->GetLongField(env,oldtype,ompi_java.DatatypehandleID)), &type);
(*env)->ReleaseIntArrayElements(env,blocklengths,lengths,0);
(*env)->ReleaseIntArrayElements(env,displacements,disps,0);
(*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)type);
}
/*
* Class: mpi_Datatype
* Method: GetHindexed
* Signature: (I[I[I)V
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_GetHindexed(JNIEnv *env, jobject jthis,
jintArray blocklengths,
jintArray displacements,
jobject oldtype)
{
MPI_Datatype type ;
int count = (*env)->GetArrayLength(env,blocklengths);
jboolean isCopy ;
jint *lengths; jint *disps;
jint baseSize = (*env)->GetIntField(env, jthis, ompi_java.DatatypebaseSizeID) ;
MPI_Aint* cdisps ;
int i ;
ompi_java_clearFreeList(env) ;
lengths=(*env)->GetIntArrayElements(env,blocklengths,&isCopy);
disps = (*env)->GetIntArrayElements(env,displacements,&isCopy);
cdisps = (MPI_Aint*) calloc(count, sizeof(MPI_Aint)) ;
for(i = 0 ; i < count ; i++)
cdisps [i] = baseSize * disps [i] ;
MPI_Type_hindexed(count, (int*)lengths, cdisps,
(MPI_Datatype)((*env)->GetLongField(env,oldtype,ompi_java.DatatypehandleID)),
&type);
free(cdisps) ;
(*env)->ReleaseIntArrayElements(env,blocklengths,lengths,0);
(*env)->ReleaseIntArrayElements(env,displacements,disps,0);
(*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)type);
}
/*
* Class: mpi_Datatype
* Method: GetStruct
* Signature: ([I[I[Lmpi/Datatype;ZIZI)V
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_GetStruct(JNIEnv *env, jobject jthis,
jintArray blocklengths, jintArray displacements,
jobjectArray datatypes,
jboolean lbSet, jint lb, jboolean ubSet, jint ub)
{
MPI_Datatype type;
int count, ptr, i ;
jboolean isCopy ;
jint *lengths, *disps ;
MPI_Datatype *ctypes ;
int *clengths ;
MPI_Aint *cdisps ;
jint baseSize = (*env)->GetIntField(env, jthis, ompi_java.DatatypebaseSizeID) ;
ompi_java_clearFreeList(env) ;
count = (*env)->GetArrayLength(env,blocklengths);
lengths = (*env)->GetIntArrayElements(env,blocklengths,&isCopy);
disps = (*env)->GetIntArrayElements(env,displacements,&isCopy);
/* Remove components with UNDEFINED base type, but add upper bound
and lower bound markers if required. */
ctypes = (MPI_Datatype*) calloc(count + 2, sizeof(MPI_Datatype)) ;
clengths = (int*) calloc(count + 2, sizeof(int)) ;
cdisps = (MPI_Aint*) calloc(count + 2, sizeof(MPI_Aint)) ;
ptr = 0 ;
for(i = 0 ; i < count ; i++) {
jobject type = (*env)->GetObjectArrayElement(env, datatypes, i) ;
jint baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ;
if(baseType != -1) {
jlong handle = (*env)->GetLongField(env, type, ompi_java.DatatypehandleID) ;
ctypes [ptr] = (MPI_Datatype) handle ;
clengths [ptr] = lengths [i] ;
cdisps [ptr] = baseSize * disps [i] ;
ptr++ ;
}
}
if(lbSet == JNI_TRUE) {
ctypes [ptr] = MPI_LB ;
clengths [ptr] = 1 ;
cdisps [ptr] = baseSize * lb ;
ptr++ ;
}
if(ubSet == JNI_TRUE) {
ctypes [ptr] = MPI_UB ;
clengths [ptr] = 1 ;
cdisps [ptr] = baseSize * ub ;
ptr++ ;
}
MPI_Type_struct(ptr, clengths, cdisps, ctypes, &type);
free(cdisps);
free(clengths);
free(ctypes);
(*env)->ReleaseIntArrayElements(env,blocklengths,lengths,0);
(*env)->ReleaseIntArrayElements(env,displacements,disps,0);
(*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)type);
}

View File

@ -0,0 +1,62 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : mpi_Errhandler.c
* Headerfile : mpi_Errhandler.h
* Author : Bryan Carpenter
* Created : 1999
* Revision : $Revision: 1.2 $
* Updated : $Date: 2001/08/07 16:36:15 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
#include "ompi_config.h"
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include <mpi.h>
#include "mpi_Errhandler.h"
#include "mpiJava.h"
jfieldID ErrhandleID;
/*
* Class: mpi_Errhandler
* Method: init
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Errhandler_init(JNIEnv *env, jclass thisClass)
{
ompi_java.ErrhandleID = (*env)->GetFieldID(env,thisClass,"handle","J");
}
/*
* Class: mpi_Errhandler
* Method: GetErrhandler
* Signature: (I)V
*/
JNIEXPORT void JNICALL Java_mpi_Errhandler_GetErrhandler(JNIEnv *env, jobject jthis, jint type)
{
switch (type) {
case 0:
(*env)->SetLongField(env,jthis, ompi_java.ErrhandleID, (jlong)MPI_ERRORS_RETURN);
case 1:
(*env)->SetLongField(env,jthis, ompi_java.ErrhandleID, (jlong)MPI_ERRORS_ARE_FATAL);
}
}

View File

@ -0,0 +1,126 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : mpi_Graphcomm.c
* Headerfile : mpi_Graphcomm.h
* Author : Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.2 $
* Updated : $Date: 2003/01/16 16:39:34 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
#include "ompi_config.h"
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi.h"
#include "mpi_Graphcomm.h"
#include "mpiJava.h"
/*
* Class: mpi_Graphcomm
* Method: Get
* Signature: ()Lmpi/GraphParms;
*/
JNIEXPORT jobject JNICALL Java_mpi_Graphcomm_Get(JNIEnv *env, jobject jthis)
{
jintArray index, edges;
jint *ind, *edg;
jboolean isCopy=JNI_TRUE;
int maxind, maxedg;
jclass graphparms_class=(*env)->FindClass(env,"mpi/GraphParms");
jfieldID indexID,edgesID;
jmethodID handleConstructorID = (*env)->GetMethodID(env,
graphparms_class, "<init>", "()V");
jobject graphparms=(*env)->NewObject(env,graphparms_class, handleConstructorID);
ompi_java_clearFreeList(env) ;
MPI_Graphdims_get((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),&maxind,&maxedg);
index=(*env)->NewIntArray(env,maxind);
edges=(*env)->NewIntArray(env,maxedg);
ind=(*env)->GetIntArrayElements(env,index,&isCopy);
edg=(*env)->GetIntArrayElements(env,edges,&isCopy);
MPI_Graph_get((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
maxind,maxedg, (int*)ind, (int*)edg);
(*env)->ReleaseIntArrayElements(env,index,ind,0);
(*env)->ReleaseIntArrayElements(env,edges,edg,0);
indexID=(*env)->GetFieldID(env,graphparms_class,"index","[I");
edgesID=(*env)->GetFieldID(env,graphparms_class , "edges", "[I");
(*env)->SetObjectField(env, graphparms, indexID, index);
(*env)->SetObjectField(env, graphparms, edgesID, edges);
/* printf("Graphcomm Get finished.\n"); */
return graphparms;
}
/*
* Class: mpi_Graphcomm
* Method: Neighbours
* Signature: (I)[I
*/
JNIEXPORT jintArray JNICALL Java_mpi_Graphcomm_Neighbours(JNIEnv *env, jobject jthis, jint rank)
{
jint *neighbors;
jboolean isCopy=JNI_TRUE;
jintArray jneighbors;
int maxns;
ompi_java_clearFreeList(env) ;
MPI_Graph_neighbors_count((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),rank,&maxns);
jneighbors=(*env)->NewIntArray(env,maxns);
neighbors=(*env)->GetIntArrayElements(env,jneighbors,&isCopy);
MPI_Graph_neighbors((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
rank,maxns,(int*)neighbors);
(*env)->ReleaseIntArrayElements(env,jneighbors,neighbors,0);
return jneighbors;
}
/*
* Class: mpi_Graphcomm
* Method: Map
* Signature: ([I[I)I
*/
JNIEXPORT jint JNICALL Java_mpi_Graphcomm_Map(JNIEnv *env, jobject jthis, jintArray index, jintArray edges)
{
int newrank;
jint *ind, *edg;
jboolean isCopy=JNI_TRUE;
int nnodes;
ompi_java_clearFreeList(env) ;
nnodes=(*env)->GetArrayLength(env,index);
ind=(*env)->GetIntArrayElements(env,index,&isCopy);
edg=(*env)->GetIntArrayElements(env,edges,&isCopy);
MPI_Graph_map((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
nnodes,(int*)index,(int*)edges, &newrank);
(*env)->ReleaseIntArrayElements(env,index,ind,0);
(*env)->ReleaseIntArrayElements(env,edges,edg,0);
return newrank;
}

322
ompi/mpi/java/c/mpi_Group.c Normal file
View File

@ -0,0 +1,322 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : mpi_Group.c
* Headerfile : mpi_Group.h
* Author : Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.3 $
* Updated : $Date: 2003/01/16 16:39:34 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
#include "ompi_config.h"
#include <stdlib.h>
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi.h"
#include "mpi_Group.h"
#include "mpiJava.h"
/*
* Class: mpi_Group
* Method: init
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Group_init(JNIEnv *env, jclass thisClass)
{
ompi_java.GrouphandleID = (*env)->GetFieldID(env,thisClass,"handle","J");
}
/*
* Class: mpi_Group
* Method: GetGroup
* Signature: (I)V
*/
JNIEXPORT void JNICALL Java_mpi_Group_GetGroup(JNIEnv *env, jobject jthis, jint type)
{
switch (type) {
case 0:
(*env)->SetLongField(env,jthis, ompi_java.GrouphandleID, (jlong)MPI_GROUP_EMPTY);
break;
default:
break;
}
}
/*
* Class: mpi_Group
* Method: Size
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_mpi_Group_Size(JNIEnv *env, jobject jthis)
{
int size;
ompi_java_clearFreeList(env) ;
MPI_Group_size((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)),
&size);
return size;
}
/* * Class: mpi_Group
* Method: Rank
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_mpi_Group_Rank(JNIEnv *env, jobject jthis)
{
int rank;
ompi_java_clearFreeList(env) ;
MPI_Group_rank((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)),
&rank);
return rank;
}
/*
* Class: mpi_Group
* Method: free
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Group_free(JNIEnv *env, jobject jthis)
{
MPI_Group group=(MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID));
MPI_Group_free(&group);
(*env)->SetLongField(env,jthis, ompi_java.GrouphandleID,(jlong)MPI_GROUP_NULL);
}
/*
* Class: mpi_Group
* Method: Translate_ranks
* Signature: (Lmpi/Group;[ILmpi/Group;)[I
*/
JNIEXPORT jintArray JNICALL Java_mpi_Group_Translate_1ranks(JNIEnv *env, jclass jthis,
jobject group1, jintArray ranks1,
jobject group2)
{
jboolean isCopy=JNI_TRUE;
int n=(*env)->GetArrayLength(env,ranks1);
jint *rks1,*rks2;
jintArray jranks2;
ompi_java_clearFreeList(env) ;
rks1=(*env)->GetIntArrayElements(env,ranks1,&isCopy);
jranks2=(*env)->NewIntArray(env,n);
rks2=(*env)->GetIntArrayElements(env,jranks2,&isCopy);
MPI_Group_translate_ranks((MPI_Group)((*env)->GetLongField(env,group1,ompi_java.GrouphandleID)),
n, (int*)rks1,
(MPI_Group)((*env)->GetLongField(env,group2,ompi_java.GrouphandleID)),
(int*)rks2);
(*env)->ReleaseIntArrayElements(env,ranks1,rks1,0);
(*env)->ReleaseIntArrayElements(env,jranks2,rks2,0);
return jranks2;
}
/*
* Class: mpi_Group
* Method: Compare
* Signature: (Lmpi/Group;Lmpi/Group;)I
*/
JNIEXPORT jint JNICALL Java_mpi_Group_Compare(JNIEnv *env, jclass jthis,
jobject group1, jobject group2)
{
int result;
ompi_java_clearFreeList(env) ;
MPI_Group_compare((MPI_Group)((*env)->GetLongField(env,group1,ompi_java.GrouphandleID)),
(MPI_Group)((*env)->GetLongField(env,group2,ompi_java.GrouphandleID)),
&result);
return result;
}
/*
* Class: mpi_Group
* Method: union
* Signature: (Lmpi/Group;Lmpi/Group;)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Group_union(JNIEnv *env, jclass jthis,
jobject group1, jobject group2)
{
MPI_Group newgroup;
ompi_java_clearFreeList(env) ;
MPI_Group_union((MPI_Group)((*env)->GetLongField(env,group1,ompi_java.GrouphandleID)),
(MPI_Group)((*env)->GetLongField(env,group2,ompi_java.GrouphandleID)),
&newgroup);
return (jlong)newgroup;
}
/*
* Class: mpi_Group
* Method: intersection
* Signature: (Lmpi/Group;Lmpi/Group;)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Group_intersection(JNIEnv *env, jclass jthis,
jobject group1, jobject group2)
{
MPI_Group newgroup;
ompi_java_clearFreeList(env) ;
MPI_Group_intersection((MPI_Group)((*env)->GetLongField(env,group1,ompi_java.GrouphandleID)),
(MPI_Group)((*env)->GetLongField(env,group2,ompi_java.GrouphandleID)),
&newgroup);
return (jlong)newgroup;
}
/*
* Class: mpi_Group
* Method: difference
* Signature: (Lmpi/Group;Lmpi/Group;)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Group_difference(JNIEnv *env, jclass jthis,
jobject group1, jobject group2)
{
MPI_Group newgroup;
ompi_java_clearFreeList(env) ;
MPI_Group_difference((MPI_Group)((*env)->GetLongField(env,group1,ompi_java.GrouphandleID)),
(MPI_Group)((*env)->GetLongField(env,group2,ompi_java.GrouphandleID)),
&newgroup);
return (jlong)newgroup;
}
/*
* Class: mpi_Group
* Method: incl
* Signature: ([I)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Group_incl(JNIEnv *env, jobject jthis, jintArray ranks)
{
int n;
jint *rks;
jboolean isCopy=JNI_TRUE;
MPI_Group newgroup;
ompi_java_clearFreeList(env) ;
n=(*env)->GetArrayLength(env,ranks);
rks=(*env)->GetIntArrayElements(env,ranks,&isCopy);
MPI_Group_incl((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)),
n, (int*)rks,
&newgroup);
(*env)->ReleaseIntArrayElements(env,ranks,rks,0);
return (jlong)newgroup;
}
/*
* Class: mpi_Group
* Method: excl
* Signature: ([I)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Group_excl(JNIEnv *env, jobject jthis, jintArray ranks)
{
int n;
jint *rks;
jboolean isCopy=JNI_TRUE;
MPI_Group newgroup;
ompi_java_clearFreeList(env) ;
n=(*env)->GetArrayLength(env,ranks);
rks=(*env)->GetIntArrayElements(env,ranks,&isCopy);
MPI_Group_excl((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)),
n, (int*)rks,
&newgroup);
(*env)->ReleaseIntArrayElements(env,ranks,rks,0);
return (jlong)newgroup;
}
/*
* Class: mpi_Group
* Method: range_incl
* Signature: ([[I)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Group_range_1incl(JNIEnv *env, jobject jthis, jobjectArray ranges)
{
int i;
int n=(*env)->GetArrayLength(env,ranges);
jboolean isCopy=JNI_TRUE;
MPI_Group newgroup;
/* jint **rngs=(jint**)calloc(n,sizeof(jint[3])); */
int (*rngs) [3] =(int (*) [3])calloc(n,sizeof(int[3]));
jintArray *jrngs=(jobject*)calloc(n,sizeof(jintArray));
ompi_java_clearFreeList(env) ;
for(i=0;i<n;i++) {
jint *vec ;
jrngs[i]=(*env)->GetObjectArrayElement(env,ranges,i);
vec=(*env)->GetIntArrayElements(env, jrngs[i],&isCopy);
rngs [i] [0] = vec [0] ;
rngs [i] [1] = vec [1] ;
rngs [i] [2] = vec [2] ;
(*env)->ReleaseIntArrayElements(env,jrngs[i],vec,0);
}
MPI_Group_range_incl((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)),
n,rngs,&newgroup);
free(rngs);
free(jrngs);
return (jlong)newgroup;
}
/*
* Class: mpi_Group
* Method: range_excl
* Signature: ([[I)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Group_range_1excl(JNIEnv *env, jobject jthis, jobjectArray ranges)
{
int i;
int n=(*env)->GetArrayLength(env,ranges);
jboolean isCopy=JNI_TRUE;
MPI_Group newgroup;
/* jint **rngs=(jint**)calloc(n,sizeof(jint*)); */
int (*rngs) [3] =(int (*) [3])calloc(n,sizeof(int[3]));
jintArray *jrngs=(jobject*)calloc(n,sizeof(jintArray));
ompi_java_clearFreeList(env) ;
for(i=0;i<n;i++) {
jint* vec;
jrngs[i]=(*env)->GetObjectArrayElement(env,ranges,i);
vec=(*env)->GetIntArrayElements(env,
jrngs[i],&isCopy);
rngs [i] [0] = vec [0] ;
rngs [i] [1] = vec [1] ;
rngs [i] [2] = vec [2] ;
(*env)->ReleaseIntArrayElements(env,jrngs[i],vec,0);
}
MPI_Group_range_excl((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)),
n, rngs,&newgroup);
free(rngs);
free(jrngs);
return (jlong)newgroup;
}

View File

@ -0,0 +1,81 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : mpi_Intercomm.c
* Headerfile : mpi_Intercomm.h
* Author : Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.3 $
* Updated : $Date: 2003/01/16 16:39:34 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
#include "ompi_config.h"
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi.h"
#include "mpi_Intercomm.h"
#include "mpiJava.h"
/*
* Class: mpi_Intercomm
* Method: Remote_size
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_mpi_Intercomm_Remote_1size(JNIEnv *env, jobject jthis)
{
int size;
ompi_java_clearFreeList(env) ;
MPI_Comm_remote_size((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
&size);
return size;
}
/*
* Class: mpi_Intercomm
* Method: remote_group
* Signature: ()J
*/
JNIEXPORT jlong JNICALL Java_mpi_Intercomm_remote_1group(JNIEnv *env, jobject jthis)
{
MPI_Group group;
ompi_java_clearFreeList(env) ;
MPI_Comm_remote_group((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
&group);
return (jlong)group;
}
/*
* Class: mpi_Intercomm
* Method: merge
* Signature: (Z)Lmpi/Intracomm;
*/
JNIEXPORT jlong JNICALL Java_mpi_Intercomm_merge(JNIEnv *env, jobject jthis, jboolean high)
{
MPI_Comm newintracomm;
ompi_java_clearFreeList(env) ;
MPI_Intercomm_merge((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), high,
&newintracomm);
return (jlong)newintracomm;
}

View File

@ -0,0 +1,827 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : mpi_Intracomm.c
* Headerfile : mpi_Intracomm.h
* Author : Xinying Li, Bryan Carpenter
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.10 $
* Updated : $Date: 2003/01/16 16:39:34 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
#include "ompi_config.h"
#include <stdlib.h>
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi.h"
#include "mpi_Comm.h"
#include "mpi_Intracomm.h"
#include "mpiJava.h"
/* Collectives are not particularly amenable to the strategies used
* in point-to-point to reduce copying when the GC does not support pinning.
*
* It's possibly doable, but may too complex to be worth the effort.
* A general problem is that the relation between positions in the
* original buffer and positions in a packed buffer is not very
* well-defined.
*
* Collectives that use `Op' have an additional problem that
* `MPI_User_function' prototype expects the actual user-specified
* datatype as an argument. Packing, then operating on data transferred
* as a more primitive datatype is not generally correct.
*/
extern MPI_Datatype Dts[] ;
/*
* Class: mpi_Intracomm
* Method: split
* Signature: (II)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Intracomm_split(JNIEnv *env, jobject jthis,
jint colour, jint key)
{
MPI_Comm newcomm;
ompi_java_clearFreeList(env) ;
MPI_Comm_split((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
colour, key, &newcomm);
return (jlong)newcomm;
}
/*
* Class: mpi_Intracomm
* Method: creat
* Signature: (Lmpi/Group;)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Intracomm_creat(JNIEnv *env, jobject jthis,
jobject group)
{
MPI_Comm newcomm;
ompi_java_clearFreeList(env) ;
MPI_Comm_create((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
(MPI_Group)((*env)->GetLongField(env,group,ompi_java.GrouphandleID)),
&newcomm);
return (jlong)newcomm;
}
/*
* Class: mpi_Intracomm
* Method: Barrier
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Intracomm_Barrier(JNIEnv *env, jobject jthis)
{
ompi_java_clearFreeList(env) ;
MPI_Barrier((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)));
}
/*
* Class: mpi_Intracomm
* Method: GetCart
* Signature: ([I[ZZ)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Intracomm_GetCart(JNIEnv *env, jobject jthis,
jintArray dims, jbooleanArray periods,
jboolean reorder)
{
MPI_Comm cart;
int ndims=(*env)->GetArrayLength(env,dims);
jboolean isCopy=JNI_TRUE;
jint *ds; jboolean *ps;
int i;
int *int_re_ds=(int*)calloc((*env)->GetArrayLength(env,periods),
sizeof(int));
ompi_java_clearFreeList(env) ;
ds=(*env)->GetIntArrayElements(env,dims,&isCopy);
ps=(*env)->GetBooleanArrayElements(env,periods,&isCopy);
for(i=0;i<=(*env)->GetArrayLength(env,periods);i++)
if(ps[i]==JNI_TRUE)
int_re_ds[i]=1;
else
int_re_ds[i]=0;
MPI_Cart_create((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
ndims, (int*)ds, int_re_ds, reorder, &cart);
(*env)->ReleaseIntArrayElements(env,dims,ds,0);
(*env)->ReleaseBooleanArrayElements(env,periods,ps,0);
free(int_re_ds);
return (jlong)cart;
}
/*
* Class: mpi_Intracomm
* Method: GetGraph
* Signature: ([I[IZ)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Intracomm_GetGraph(JNIEnv *env, jobject jthis,
jintArray index, jintArray edges,
jboolean reorder)
{
MPI_Comm graph;
int nnodes=(*env)->GetArrayLength(env,index);
jboolean isCopy=JNI_TRUE;
jint *ind, *edg;
ompi_java_clearFreeList(env) ;
ind=(*env)->GetIntArrayElements(env,index,&isCopy);
edg=(*env)->GetIntArrayElements(env,edges,&isCopy);
MPI_Graph_create((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
nnodes, (int*)ind, (int*)edg, reorder, &graph);
(*env)->ReleaseIntArrayElements(env,index,ind,0);
(*env)->ReleaseIntArrayElements(env,edges,edg,0);
return (jlong)graph;
}
/*
* Class: mpi_Intracomm
* Method: bcast
* Signature: (Ljava/lang/Object;IILmpi/Datatype;I)V
*/
JNIEXPORT void JNICALL Java_mpi_Intracomm_bcast(JNIEnv *env, jobject jthis,
jobject buf, jint offset,
jint count, jobject type, jint root)
{
MPI_Comm mpi_comm =
(MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ;
MPI_Datatype mpi_type =
(MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ;
int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ;
void *bufptr ;
#ifdef GC_DOES_PINNING
void *bufbase ;
ompi_java_clearFreeList(env) ;
bufptr = ompi_java_getBufPtr(&bufbase, env, buf, baseType, offset) ;
MPI_Bcast(bufptr, count, mpi_type, root, mpi_comm) ;
ompi_java_releaseBufPtr(env, buf, bufbase, baseType) ;
#else
int size ;
ompi_java_clearFreeList(env) ;
bufptr = ompi_java_getMPIBuf(&size, env, buf, offset,
count, mpi_type, mpi_comm, baseType) ;
MPI_Bcast(bufptr, size, MPI_BYTE, root, mpi_comm) ;
ompi_java_releaseMPIBuf(env, buf, offset, count, mpi_type, mpi_comm,
bufptr, size, baseType) ;
#endif /* GC_DOES_PINNING */
}
/*
* Class: mpi_Intracomm
* Method: Gather
* Signature:
(Ljava/lang/Object;IILmpi/Datatype;Ljava/lang/Object;IILmpi/Datatype;I)V
*/
JNIEXPORT void JNICALL Java_mpi_Intracomm_gather(JNIEnv *env, jobject jthis,
jobject sendbuf, jint sendoffset,
jint sendcount, jobject sendtype,
jobject recvbuf, jint recvoffset,
jint recvcount, jobject recvtype,
jint root)
{
int id ;
MPI_Comm mpi_comm =
(MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ;
MPI_Datatype mpi_stype =
(MPI_Datatype)((*env)->GetLongField(env,sendtype,ompi_java.DatatypehandleID)) ;
MPI_Datatype mpi_rtype = (MPI_Datatype)
((*env)->GetLongField(env, recvtype, ompi_java.DatatypehandleID)) ;
int sbaseType = (*env)->GetIntField(env, sendtype, ompi_java.DatatypebaseTypeID) ;
int rbaseType ;
void *sendptr, *recvptr = NULL;
void *sbufbase, *rbufbase ;
ompi_java_clearFreeList(env) ;
MPI_Comm_rank(mpi_comm, &id) ;
if(id == root) {
/*
* In principle need the "id == root" check here and elsewere for
* correctness, in case arguments that are not supposed to be
* significant except on root are legitimately passed in as `null',
* say. Shouldn't produce null pointer exception.
*
* (However in this case MPICH complains if `mpi_rtype' is not defined
* in all processes, notwithstanding what the spec says.)
*/
rbaseType = (*env)->GetIntField(env, recvtype, ompi_java.DatatypebaseTypeID) ;
recvptr = ompi_java_getBufPtr(&rbufbase,
env, recvbuf, rbaseType, recvoffset) ;
}
sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, sbaseType, sendoffset) ;
MPI_Gather(sendptr, sendcount, mpi_stype,
recvptr, recvcount, mpi_rtype, root, mpi_comm) ;
ompi_java_releaseBufPtr(env, sendbuf, sbufbase, sbaseType) ;
if (id == root) {
ompi_java_releaseBufPtr(env, recvbuf, rbufbase, rbaseType);
}
}
/*
* Class: mpi_Intracomm
* Method: Gatherv
* Signature:
(Ljava/lang/Object;IILmpi/Datatype;Ljava/lang/Object;I[I[ILmpi/Datatype;I)V
*/
JNIEXPORT void JNICALL Java_mpi_Intracomm_gatherv(JNIEnv *env, jobject jthis,
jobject sendbuf, jint sendoffset,
jint sendcount, jobject sendtype,
jobject recvbuf, jint recvoffset,
jintArray recvcounts, jintArray displs,
jobject recvtype, jint root)
{
int id ;
jint *rcount = NULL, *dps = NULL;
jboolean isCopy ;
MPI_Comm mpi_comm =
(MPI_Comm) ((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ;
MPI_Datatype mpi_stype = (MPI_Datatype)
((*env)->GetLongField(env,sendtype,ompi_java.DatatypehandleID)) ;
MPI_Datatype mpi_rtype = mpi_stype;
int sbaseType = (*env)->GetIntField(env, sendtype, ompi_java.DatatypebaseTypeID) ;
int rbaseType = 0;
void *sendptr, *recvptr = NULL;
void *sbufbase, *rbufbase ;
ompi_java_clearFreeList(env) ;
MPI_Comm_rank(mpi_comm, &id) ;
if(id == root) {
rcount=(*env)->GetIntArrayElements(env,recvcounts,&isCopy);
dps=(*env)->GetIntArrayElements(env,displs,&isCopy);
mpi_rtype = (MPI_Datatype)
((*env)->GetLongField(env,recvtype,ompi_java.DatatypehandleID)) ;
rbaseType = (*env)->GetIntField(env, recvtype, ompi_java.DatatypebaseTypeID) ;
recvptr = ompi_java_getBufPtr(&rbufbase,
env, recvbuf, rbaseType, recvoffset) ;
}
sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, sbaseType, sendoffset) ;
MPI_Gatherv(sendptr, sendcount, mpi_stype,
recvptr, (int*) rcount, (int*) dps, mpi_rtype,
root, mpi_comm) ;
ompi_java_releaseBufPtr(env, sendbuf, sbufbase, sbaseType) ;
if (id == root) {
ompi_java_releaseBufPtr(env, recvbuf, rbufbase, rbaseType);
}
if (id == root) {
(*env)->ReleaseIntArrayElements(env,recvcounts,rcount,JNI_ABORT);
(*env)->ReleaseIntArrayElements(env,displs,dps,JNI_ABORT);
}
}
/*
* Class: mpi_Intracomm
* Method: Scatter
* Signature:
(Ljava/lang/Object;IILmpi/Datatype;Ljava/lang/Object;IILmpi/Datatype;I)V
*/
JNIEXPORT void JNICALL Java_mpi_Intracomm_scatter(JNIEnv *env, jobject jthis,
jobject sendbuf, jint sendoffset,
jint sendcount, jobject sendtype,
jobject recvbuf, jint recvoffset,
jint recvcount, jobject recvtype,
jint root)
{
int id ;
MPI_Comm mpi_comm =
(MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ;
MPI_Datatype mpi_stype =
(MPI_Datatype) ((*env)->GetLongField(env,sendtype,ompi_java.DatatypehandleID)) ;
/* MPICH complains if `mpi_stype' is not defined
* in all processes, notwithstanding what the spec says. */
MPI_Datatype mpi_rtype =
(MPI_Datatype)((*env)->GetLongField(env,recvtype,ompi_java.DatatypehandleID)) ;
int sbaseType ;
int rbaseType = (*env)->GetIntField(env, recvtype, ompi_java.DatatypebaseTypeID) ;
void *sendptr = NULL, *recvptr ;
void *sbufbase, *rbufbase ;
ompi_java_clearFreeList(env) ;
MPI_Comm_rank(mpi_comm, &id) ;
if (id == root) {
sbaseType = (*env)->GetIntField(env, sendtype, ompi_java.DatatypebaseTypeID) ;
}
recvptr = ompi_java_getBufPtr(&rbufbase, env, recvbuf, rbaseType, recvoffset) ;
if (id == root) {
sendptr = ompi_java_getBufPtr(&sbufbase,
env, sendbuf, sbaseType, sendoffset);
}
MPI_Scatter(sendptr, sendcount, mpi_stype,
recvptr, recvcount, mpi_rtype, root, mpi_comm) ;
if (id == root) {
ompi_java_releaseBufPtr(env, sendbuf, sbufbase, sbaseType);
}
ompi_java_releaseBufPtr(env, recvbuf, rbufbase, rbaseType);
}
/*
* Class: mpi_Intracomm
* Method: Scatterv
* Signature:
(Ljava/lang/Object;II[ILmpi/Datatype;Ljava/lang/Object;I[ILmpi/Datatype;I)V
*/
JNIEXPORT void JNICALL Java_mpi_Intracomm_scatterv(JNIEnv *env, jobject jthis,
jobject sendbuf, jint sendoffset,
jintArray sendcount, jintArray displs,
jobject sendtype,
jobject recvbuf, jint recvoffset,
jint recvcount, jobject recvtype,
jint root)
{
int id ;
jint *scount = NULL, *dps = NULL;
jboolean isCopy ;
MPI_Comm mpi_comm =
(MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ;
MPI_Datatype mpi_rtype =
(MPI_Datatype)((*env)->GetLongField(env,recvtype,ompi_java.DatatypehandleID)) ;
MPI_Datatype mpi_stype = mpi_rtype;
int sbaseType ;
int rbaseType = (*env)->GetIntField(env, recvtype, ompi_java.DatatypebaseTypeID) ;
void *sendptr = NULL, *recvptr ;
void *sbufbase, *rbufbase ;
ompi_java_clearFreeList(env) ;
MPI_Comm_rank(mpi_comm, &id) ;
if(id == root) {
mpi_stype = (MPI_Datatype)
((*env)->GetLongField(env,sendtype,ompi_java.DatatypehandleID)) ;
sbaseType = (*env)->GetIntField(env, sendtype, ompi_java.DatatypebaseTypeID) ;
scount = (*env)->GetIntArrayElements(env,sendcount,&isCopy);
dps = (*env)->GetIntArrayElements(env,displs,&isCopy);
}
recvptr = ompi_java_getBufPtr(&rbufbase, env, recvbuf, rbaseType, recvoffset) ;
if (id == root) {
sendptr = ompi_java_getBufPtr(&sbufbase,
env, sendbuf, sbaseType, sendoffset);
}
MPI_Scatterv(sendptr, (int*) scount, (int*) dps, mpi_stype,
recvptr, recvcount, mpi_rtype,
root, mpi_comm) ;
if (id == root) {
ompi_java_releaseBufPtr(env, sendbuf, sbufbase, sbaseType);
}
ompi_java_releaseBufPtr(env, recvbuf, rbufbase, rbaseType) ;
if (id == root) {
(*env)->ReleaseIntArrayElements(env, sendcount, scount, JNI_ABORT);
(*env)->ReleaseIntArrayElements(env, displs, dps, JNI_ABORT);
}
}
/*
* Class: mpi_Intracomm
* Method: Allgather
* Signature:
(Ljava/lang/Object;IILmpi/Datatype;Ljava/lang/Object;IILmpi/Datatype;)V
*/
JNIEXPORT void JNICALL Java_mpi_Intracomm_allgather(JNIEnv *env, jobject jthis,
jobject sendbuf, jint sendoffset,
jint sendcount, jobject sendtype,
jobject recvbuf, jint recvoffset,
jint recvcount, jobject recvtype)
{
MPI_Comm mpi_comm =
(MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ;
MPI_Datatype mpi_stype = (MPI_Datatype)
((*env)->GetLongField(env,sendtype,ompi_java.DatatypehandleID)) ;
MPI_Datatype mpi_rtype = (MPI_Datatype)
((*env)->GetLongField(env, recvtype, ompi_java.DatatypehandleID)) ;
int sbaseType = (*env)->GetIntField(env, sendtype, ompi_java.DatatypebaseTypeID) ;
int rbaseType = (*env)->GetIntField(env, recvtype, ompi_java.DatatypebaseTypeID) ;
void *sendptr, *recvptr ;
void *sbufbase, *rbufbase ;
ompi_java_clearFreeList(env) ;
recvptr = ompi_java_getBufPtr(&rbufbase, env, recvbuf, rbaseType, recvoffset) ;
sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, sbaseType, sendoffset) ;
MPI_Allgather(sendptr, sendcount, mpi_stype,
recvptr, recvcount, mpi_rtype, mpi_comm) ;
ompi_java_releaseBufPtr(env, sendbuf, sbufbase, sbaseType) ;
ompi_java_releaseBufPtr(env, recvbuf, rbufbase, rbaseType) ;
}
/*
* Class: mpi_Intracomm
* Method: Allgatherv
* Signature:
(Ljava/lang/Object;IILmpi/Datatype;Ljava/lang/Object;I[I[ILmpi/Datatype;)V
*/
JNIEXPORT void JNICALL Java_mpi_Intracomm_allgatherv(JNIEnv *env, jobject jthis,
jobject sendbuf, jint sendoffset,
jint sendcount,jobject sendtype,
jobject recvbuf, jint recvoffset,
jintArray recvcount, jintArray displs,
jobject recvtype)
{
jint *rcount, *dps;
jboolean isCopy ;
MPI_Comm mpi_comm =
(MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ;
MPI_Datatype mpi_stype = (MPI_Datatype)
((*env)->GetLongField(env,sendtype,ompi_java.DatatypehandleID)) ;
MPI_Datatype mpi_rtype = (MPI_Datatype)
((*env)->GetLongField(env, recvtype, ompi_java.DatatypehandleID)) ;
int sbaseType = (*env)->GetIntField(env, sendtype, ompi_java.DatatypebaseTypeID) ;
int rbaseType = (*env)->GetIntField(env, recvtype, ompi_java.DatatypebaseTypeID) ;
void *sendptr, *recvptr ;
void *sbufbase, *rbufbase ;
ompi_java_clearFreeList(env) ;
rcount = (*env)->GetIntArrayElements(env, recvcount, &isCopy);
dps = (*env)->GetIntArrayElements(env, displs, &isCopy);
recvptr = ompi_java_getBufPtr(&rbufbase, env, recvbuf, rbaseType, recvoffset) ;
sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, sbaseType, sendoffset) ;
MPI_Allgatherv(sendptr, sendcount, mpi_stype,
recvptr, (int*) rcount, (int*) dps, mpi_rtype,
mpi_comm) ;
ompi_java_releaseBufPtr(env, sendbuf, sbufbase, sbaseType) ;
ompi_java_releaseBufPtr(env, recvbuf, rbufbase, rbaseType) ;
(*env)->ReleaseIntArrayElements(env, recvcount, rcount, JNI_ABORT);
(*env)->ReleaseIntArrayElements(env, displs, dps, JNI_ABORT);
}
/*
* Class: mpi_Intracomm
* Method: Alltoall
* Signature:
(Ljava/lang/Object;IILmpi/Datatype;Ljava/lang/Object;IILmpi/Datatype;)V
*/
JNIEXPORT void JNICALL Java_mpi_Intracomm_alltoall(JNIEnv *env, jobject jthis,
jobject sendbuf, jint sendoffset,
jint sendcount, jobject sendtype,
jobject recvbuf, jint recvoffset,
jint recvcount, jobject recvtype)
{
MPI_Comm mpi_comm =
(MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ;
MPI_Datatype mpi_stype = (MPI_Datatype)
((*env)->GetLongField(env,sendtype,ompi_java.DatatypehandleID)) ;
MPI_Datatype mpi_rtype = (MPI_Datatype)
((*env)->GetLongField(env, recvtype, ompi_java.DatatypehandleID)) ;
int sbaseType = (*env)->GetIntField(env, sendtype, ompi_java.DatatypebaseTypeID) ;
int rbaseType = (*env)->GetIntField(env, recvtype, ompi_java.DatatypebaseTypeID) ;
void *sendptr, *recvptr ;
void *sbufbase, *rbufbase ;
ompi_java_clearFreeList(env) ;
recvptr = ompi_java_getBufPtr(&rbufbase, env, recvbuf, rbaseType, recvoffset) ;
sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, sbaseType, sendoffset) ;
MPI_Alltoall(sendptr, sendcount, mpi_stype,
recvptr, recvcount, mpi_rtype, mpi_comm) ;
ompi_java_releaseBufPtr(env, sendbuf, sbufbase, sbaseType) ;
ompi_java_releaseBufPtr(env, recvbuf, rbufbase, rbaseType) ;
}
/*
* Class: mpi_Intracomm
* Method: Alltoallv
* Signature:
(Ljava/lang/Object;II[ILmpi/Datatype;Ljava/lang/Object;I[I[ILmpi/Datatype;)V
*/
JNIEXPORT void JNICALL Java_mpi_Intracomm_alltoallv(JNIEnv *env, jobject jthis,
jobject sendbuf, jint sendoffset, jintArray sendcount,
jintArray sdispls, jobject sendtype,
jobject recvbuf, jint recvoffset, jintArray recvcount,
jintArray rdispls, jobject recvtype)
{
jint *rcount, *scount, *sdps, *rdps ;
jboolean isCopy ;
MPI_Comm mpi_comm =
(MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ;
MPI_Datatype mpi_stype = (MPI_Datatype)
((*env)->GetLongField(env,sendtype,ompi_java.DatatypehandleID)) ;
MPI_Datatype mpi_rtype = (MPI_Datatype)
((*env)->GetLongField(env, recvtype, ompi_java.DatatypehandleID)) ;
int sbaseType = (*env)->GetIntField(env, sendtype, ompi_java.DatatypebaseTypeID) ;
int rbaseType = (*env)->GetIntField(env, recvtype, ompi_java.DatatypebaseTypeID) ;
void *sendptr, *recvptr ;
void *sbufbase, *rbufbase ;
ompi_java_clearFreeList(env) ;
scount=(*env)->GetIntArrayElements(env,sendcount,&isCopy);
rcount=(*env)->GetIntArrayElements(env,recvcount,&isCopy);
sdps=(*env)->GetIntArrayElements(env,sdispls,&isCopy);
rdps=(*env)->GetIntArrayElements(env,rdispls,&isCopy);
recvptr = ompi_java_getBufPtr(&rbufbase, env, recvbuf, rbaseType, recvoffset) ;
sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, sbaseType, sendoffset) ;
MPI_Alltoallv(sendptr, (int*) scount, (int*) sdps, mpi_stype,
recvptr, (int*) rcount, (int*) rdps, mpi_rtype,
mpi_comm) ;
ompi_java_releaseBufPtr(env, sendbuf, sbufbase, sbaseType) ;
ompi_java_releaseBufPtr(env, recvbuf, rbufbase, rbaseType) ;
(*env)->ReleaseIntArrayElements(env,recvcount,rcount,JNI_ABORT);
(*env)->ReleaseIntArrayElements(env,sendcount,scount,JNI_ABORT);
(*env)->ReleaseIntArrayElements(env,sdispls,sdps,JNI_ABORT);
(*env)->ReleaseIntArrayElements(env,rdispls,rdps,JNI_ABORT);
}
/*
* Class: mpi_Intracomm
* Method: Reduce
* Signature:
(Ljava/lang/Object;ILjava/lang/Object;IILmpi/Datatype;Lmpi/Op;I)V
*/
JNIEXPORT void JNICALL Java_mpi_Intracomm_reduce(JNIEnv *env, jobject jthis,
jobject sendbuf, jint sendoffset,
jobject recvbuf, jint recvoffset,
jint count, jobject type, jobject op, jint root)
{
int id ;
MPI_Comm mpi_comm =
(MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ;
MPI_Datatype mpi_type =
(MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ;
int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ;
void *sendptr, *recvptr = NULL;
void *sbufbase, *rbufbase ;
ompi_java_clearFreeList(env) ;
MPI_Comm_rank(mpi_comm, &id) ;
if (id == root) {
recvptr = ompi_java_getBufPtr(&rbufbase,
env, recvbuf, baseType, recvoffset);
}
sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, baseType, sendoffset) ;
MPI_Reduce(sendptr, recvptr, count, mpi_type,
(MPI_Op)((*env)->GetLongField(env,op,ompi_java.OphandleID)),
root, mpi_comm) ;
ompi_java_releaseBufPtr(env, sendbuf, sbufbase, baseType) ;
if (id == root) {
ompi_java_releaseBufPtr(env, recvbuf, rbufbase, baseType);
}
}
/*
* Class: mpi_Intracomm
* Method: Allreduce
* Signature:
(Ljava/lang/Object;ILjava/lang/Object;IILmpi/Datatype;Lmpi/Op;)V
*/
JNIEXPORT void JNICALL Java_mpi_Intracomm_allreduce(JNIEnv *env, jobject jthis,
jobject sendbuf, jint sendoffset,
jobject recvbuf, jint recvoffset,
jint count, jobject type, jobject op)
{
MPI_Comm mpi_comm =
(MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ;
MPI_Datatype mpi_type =
(MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ;
int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ;
void *sendptr, *recvptr ;
void *sbufbase, *rbufbase ;
ompi_java_clearFreeList(env) ;
recvptr = ompi_java_getBufPtr(&rbufbase, env, recvbuf, baseType, recvoffset) ;
sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, baseType, sendoffset) ;
MPI_Allreduce(sendptr, recvptr, count, mpi_type,
(MPI_Op)((*env)->GetLongField(env,op,ompi_java.OphandleID)),
mpi_comm) ;
ompi_java_releaseBufPtr(env, sendbuf, sbufbase, baseType) ;
ompi_java_releaseBufPtr(env, recvbuf, rbufbase, baseType) ;
}
/*
* Class: mpi_Intracomm
* Method: Reduce_scatter
* Signature:
(Ljava/lang/Object;ILjava/lang/Object;I[ILmpi/Datatype;Lmpi/Op;)V
*/
JNIEXPORT void JNICALL Java_mpi_Intracomm_reduce_1scatter(JNIEnv *env,
jobject jthis,
jobject sendbuf, jint sendoffset,
jobject recvbuf, jint recvoffset,
jintArray recvcount,
jobject type, jobject op)
{
jint *rcount;
jboolean isCopy ;
MPI_Comm mpi_comm =
(MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ;
MPI_Datatype mpi_type =
(MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ;
int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ;
void *sendptr, *recvptr ;
void *sbufbase, *rbufbase ;
ompi_java_clearFreeList(env) ;
rcount=(*env)->GetIntArrayElements(env,recvcount,&isCopy);
recvptr = ompi_java_getBufPtr(&rbufbase, env, recvbuf, baseType, recvoffset) ;
sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, baseType, sendoffset) ;
MPI_Reduce_scatter(sendptr, recvptr, (int*) rcount, mpi_type,
(MPI_Op)((*env)->GetLongField(env,op,ompi_java.OphandleID)),
mpi_comm) ;
ompi_java_releaseBufPtr(env, sendbuf, sbufbase, baseType) ;
ompi_java_releaseBufPtr(env, recvbuf, rbufbase, baseType) ;
(*env)->ReleaseIntArrayElements(env,recvcount,rcount,JNI_ABORT);
}
/*
* Class: mpi_Intracomm
* Method: Reduce_local
* Signature:
(Ljava/lang/Object;ILjava/lang/Object;IILmpi/Datatype;Lmpi/Op;I)V
*/
JNIEXPORT void JNICALL Java_mpi_Intracomm_reduce_1local(JNIEnv *env, jobject jthis,
jobject inbuf, jobject inoutbuf,
jint count, jobject type,
jobject op)
{
MPI_Datatype mpi_type =
(MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ;
int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ;
void *inptr, *inoutptr = NULL;
void *inbase, *inoutbase ;
ompi_java_clearFreeList(env) ;
inptr = ompi_java_getBufPtr(&inbase, env, inbuf, baseType, 0) ;
inoutptr = ompi_java_getBufPtr(&inoutbase, env, inoutbuf, baseType, 0) ;
MPI_Reduce_local(inptr, inoutptr, count, mpi_type,
(MPI_Op)((*env)->GetLongField(env,op,ompi_java.OphandleID))) ;
ompi_java_releaseBufPtr(env, inbuf, inbase, baseType) ;
ompi_java_releaseBufPtr(env, inoutbuf, inoutbase, baseType) ;
}
/*
* Class: mpi_Intracomm
* Method: Scan
* Signature:
(Ljava/lang/Object;ILjava/lang/Object;IILmpi/Datatype;Lmpi/Op;)V
*/
JNIEXPORT void JNICALL Java_mpi_Intracomm_scan(JNIEnv *env, jobject jthis,
jobject sendbuf, jint sendoffset,
jobject recvbuf, jint recvoffset,
jint count, jobject type, jobject op)
{
MPI_Comm mpi_comm =
(MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)) ;
MPI_Datatype mpi_type =
(MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ;
int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ;
void *sendptr, *recvptr ;
void *sbufbase, *rbufbase ;
ompi_java_clearFreeList(env) ;
recvptr = ompi_java_getBufPtr(&rbufbase, env, recvbuf, baseType, recvoffset) ;
sendptr = ompi_java_getBufPtr(&sbufbase, env, sendbuf, baseType, sendoffset) ;
MPI_Scan(sendptr, recvptr, count, mpi_type,
(MPI_Op)((*env)->GetLongField(env,op,ompi_java.OphandleID)),
mpi_comm) ;
ompi_java_releaseBufPtr(env, sendbuf, sbufbase, baseType) ;
ompi_java_releaseBufPtr(env, recvbuf, rbufbase, baseType) ;
}

348
ompi/mpi/java/c/mpi_MPI.c Normal file
View File

@ -0,0 +1,348 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : mpi_MPI.c
* Headerfile : mpi_MPI.h
* Author : SungHoon Ko, Xinying Li (contributions from MAEDA Atusi)
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.17 $
* Updated : $Date: 2003/01/17 01:50:37 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
#include "ompi_config.h"
#include <stdio.h>
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
#endif
#ifdef HAVE_STRING_H
#include <string.h>
#endif
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#ifdef HAVE_SYS_STAT_H
#include <sys/stat.h>
#endif
#if OPAL_WANT_LIBLTDL
#ifndef __WINDOWS__
#if OPAL_LIBLTDL_INTERNAL
#include "opal/libltdl/ltdl.h"
#else
#include "ltdl.h"
#endif
#else
#include "ltdl.h"
#endif
#endif
#include "opal/util/output.h"
#include "mpi.h"
#include "mpi_MPI.h"
#include "mpiJava.h"
ompi_java_globals_t ompi_java;
static int len = 0;
static char** sargs = 0;
/*
* Class: mpi_MPI
* Method: loadGlobalLibraries
*
* Java implementations typically default to loading dynamic
* libraries strictly to a local namespace. This breaks the
* Open MPI model where components reference back up to the
* base libraries (e.g., libmpi) as it requires that the
* symbols in those base libraries be globally available.
*
* One option, of course, is to build with --disable-dlopen.
* However, this would preclude the ability to pickup 3rd-party
* binary plug-ins at time of execution. This is a valuable
* capability that would be a negative factor towards use of
* the Java bindings.
*
* The other option is to explicitly dlopen libmpi ourselves
* and instruct dlopen to add all those symbols to the global
* namespace. This must be done prior to calling any MPI
* function (e.g., MPI_Init) or else Java will have already
* loaded the library to the local namespace. So create a
* special JNI entry point that just loads the required libmpi
* to the global namespace and call it first (see MPI.java),
* thus making all symbols available to subsequent dlopen calls
* when opening OMPI components.
*/
JNIEXPORT jboolean JNICALL Java_mpi_MPI_loadGlobalLibraries(JNIEnv *env, jclass obj)
{
#if OPAL_WANT_LIBLTDL
lt_dladvise advise;
if (lt_dlinit() != 0) {
return JNI_FALSE;
}
#if OPAL_HAVE_LTDL_ADVISE
/* open the library into the global namespace */
if (lt_dladvise_init(&advise)) {
return JNI_FALSE;
}
if (lt_dladvise_ext(&advise)) {
lt_dladvise_destroy(&advise);
return JNI_FALSE;
}
if (lt_dladvise_global(&advise)) {
lt_dladvise_destroy(&advise);
return JNI_FALSE;
}
/* we don't care about the return value
* on dlopen - it might return an error
* because the lib is already loaded,
* depending on the way we were built
*/
lt_dlopenadvise("libmpi", advise);
lt_dladvise_destroy(&advise);
return JNI_TRUE;
#endif
/* need to balance the ltdl inits */
lt_dlexit();
/* if we don't have advise, then we are hosed */
return JNI_FALSE;
#endif
/* if dlopen was disabled, then all symbols
* should have been pulled up into the libraries,
* so we don't need to do anything as the symbols
* are already available
*/
return JNI_TRUE;
}
/*
* Class: mpi_MPI
* Method: InitNative
* Signature: ([Ljava/lang/String;)[Ljava/lang/String;
*/
JNIEXPORT jobjectArray JNICALL Java_mpi_MPI_InitNative(JNIEnv *env, jclass obj, jobjectArray argv)
{
jsize i;
jstring jc;
jclass string;
jobject value;
len = (*env)->GetArrayLength(env,argv);
sargs = (char**)calloc(len+1, sizeof(char*));
for (i=0; i<len; i++) {
jc=(jstring)(*env)->GetObjectArrayElement(env,argv,i);
sargs[i] = (char*)calloc(strlen((*env)->GetStringUTFChars(env,jc,0)) + 1,
sizeof(char));
strcpy(sargs[i],(*env)->GetStringUTFChars(env,jc,0));
}
MPI_Init(&len, &sargs);
string = (*env)->FindClass(env, "java/lang/String");
value = (*env)->NewObjectArray(env, len, string, NULL);
for (i = 0; i < len; i++) {
jc = (*env)->NewStringUTF(env, sargs[i]);
(*env)->SetObjectArrayElement(env, value, i, jc);
}
ompi_java_init_native_Datatype() ;
return value;
}
/*
* Class: mpi_MPI
* Method: Finalize
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_MPI_Finalize(JNIEnv *env, jclass obj)
{
ompi_java_clearFreeList(env) ;
#if OPAL_WANT_LIBLTDL
/* need to balance the ltdl inits */
lt_dlexit();
#endif
MPI_Finalize();
}
/*
* Class: mpi_MPI
* Method: Get_processor_name
* Signature: ([B)I
*/
JNIEXPORT jint JNICALL Java_mpi_MPI_Get_1processor_1name(JNIEnv *env, jclass obj, jbyteArray buf)
{
int len;
jboolean isCopy;
jbyte* bufc = (jbyte*)((*env)->GetByteArrayElements(env,buf,&isCopy)) ;
ompi_java_clearFreeList(env) ;
MPI_Get_processor_name((char*)bufc, &len);
(*env)->ReleaseByteArrayElements(env,buf,bufc,0) ;
return len;
}
/*
* Class: mpi_MPI
* Method: Wtime
* Signature: ()D
*/
JNIEXPORT jdouble JNICALL Java_mpi_MPI_Wtime(JNIEnv *env, jclass jthis)
{
ompi_java_clearFreeList(env) ;
return MPI_Wtime();
}
/*
* Class: mpi_MPI
* Method: Wtick
* Signature: ()D
*/
JNIEXPORT jdouble JNICALL Java_mpi_MPI_Wtick(JNIEnv *env, jclass jthis)
{
ompi_java_clearFreeList(env) ;
return MPI_Wtick();
}
/*
* Class: mpi_MPI
* Method: Initialized
* Signature: ()Z
*/
JNIEXPORT jboolean JNICALL Java_mpi_MPI_Initialized(JNIEnv *env, jclass jthis)
{
int flag;
ompi_java_clearFreeList(env) ;
MPI_Initialized(&flag);
if (flag==0) {
return JNI_FALSE;
} else {
return JNI_TRUE;
}
}
/*
* Class: mpi_MPI
* Method: Buffer_attach_native
* Signature: ([B)V
*/
JNIEXPORT void JNICALL Java_mpi_MPI_Buffer_1attach_1native(JNIEnv *env, jclass jthis, jbyteArray buf)
{
jboolean isCopy;
int size=(*env)->GetArrayLength(env,buf);
jbyte* bufptr = (*env)->GetByteArrayElements(env,buf,&isCopy) ;
ompi_java_clearFreeList(env) ;
MPI_Buffer_attach(bufptr,size);
}
/*
* Class: mpi_MPI
* Method: Buffer_detach_native
* Signature: ([B)V
*/
JNIEXPORT void JNICALL Java_mpi_MPI_Buffer_1detach_1native(JNIEnv *env, jclass jthis, jbyteArray buf)
{
/*jboolean isCopy;*/
int size;
/*char* bufptr ;*/
jbyte* bufptr ;
ompi_java_clearFreeList(env) ;
MPI_Buffer_detach(&bufptr, &size);
if (buf != NULL) {
(*env)->ReleaseByteArrayElements(env,buf,bufptr,0);
}
}
/*
* Class: mpi_MPI
* Method: SetConstant
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_MPI_SetConstant(JNIEnv *env, jclass jthis)
{
jfieldID anysourceID=(*env)->GetStaticFieldID(env,jthis,"ANY_SOURCE","I");
jfieldID anytagID=(*env)->GetStaticFieldID(env,jthis,"ANY_TAG","I");
jfieldID procnullID=(*env)->GetStaticFieldID(env,jthis,"PROC_NULL","I");
jfieldID graphID=(*env)->GetStaticFieldID(env,jthis,"GRAPH","I");
jfieldID cartID=(*env)->GetStaticFieldID(env,jthis,"CART","I");
jfieldID bsendoverID=(*env)->GetStaticFieldID(env,jthis,"BSEND_OVERHEAD","I");
jfieldID undefinedID=(*env)->GetStaticFieldID(env,jthis,"UNDEFINED","I");
jfieldID identID=(*env)->GetStaticFieldID(env,jthis,"IDENT","I");
jfieldID congruentID=(*env)->GetStaticFieldID(env,jthis,"CONGRUENT","I");
jfieldID similarID=(*env)->GetStaticFieldID(env,jthis,"SIMILAR","I");
jfieldID unequalID=(*env)->GetStaticFieldID(env,jthis,"UNEQUAL","I");
jfieldID tagubID=(*env)->GetStaticFieldID(env,jthis,"TAG_UB","I");
jfieldID hostID=(*env)->GetStaticFieldID(env,jthis,"HOST","I");
jfieldID ioID=(*env)->GetStaticFieldID(env,jthis,"IO","I");
(*env)->SetStaticIntField(env,jthis,anysourceID,MPI_ANY_SOURCE);
(*env)->SetStaticIntField(env,jthis,anytagID,MPI_ANY_TAG);
(*env)->SetStaticIntField(env,jthis,procnullID,MPI_PROC_NULL);
(*env)->SetStaticIntField(env,jthis,graphID,MPI_GRAPH);
(*env)->SetStaticIntField(env,jthis,cartID,MPI_CART);
#ifdef GC_DOES_PINNING
(*env)->SetStaticIntField(env,jthis,bsendoverID,MPI_BSEND_OVERHEAD);
#else
(*env)->SetStaticIntField(env,jthis,bsendoverID,
MPI_BSEND_OVERHEAD + sizeof(int));
#endif /* GC_DOES_PINNING */
(*env)->SetStaticIntField(env,jthis,undefinedID,MPI_UNDEFINED);
(*env)->SetStaticIntField(env,jthis,identID,MPI_IDENT);
(*env)->SetStaticIntField(env,jthis,congruentID,MPI_CONGRUENT);
(*env)->SetStaticIntField(env,jthis,similarID,MPI_SIMILAR);
(*env)->SetStaticIntField(env,jthis,unequalID,MPI_UNEQUAL);
(*env)->SetStaticIntField(env,jthis,tagubID,MPI_TAG_UB);
(*env)->SetStaticIntField(env,jthis,hostID,MPI_HOST);
(*env)->SetStaticIntField(env,jthis,ioID,MPI_IO);
}
void ompi_java_clearFreeList(JNIEnv *env)
{
jclass mpi ;
jmethodID clearID ;
mpi = (*env)->FindClass(env, "mpi/MPI");
clearID = (*env)->GetStaticMethodID(env, mpi, "clearFreeList", "()V");
(*env)->CallStaticVoidMethod(env, mpi, clearID) ;
}

72
ompi/mpi/java/c/mpi_Op.c Normal file
View File

@ -0,0 +1,72 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : mpi_Op.c
* Headerfile : mpi_Op.h
* Author : Xinying Li, Bryan Carpenter
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.7 $
* Updated : $Date: 2003/01/16 16:39:34 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
#include "ompi_config.h"
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi.h"
#include "mpi_Op.h"
#include "mpiJava.h"
/*
* Class: mpi_Op
* Method: init
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Op_init(JNIEnv *env, jclass thisClass)
{
ompi_java.OphandleID=(*env)->GetFieldID(env,thisClass,"handle","J");
}
/*
* Class: mpi_Op
* Method: GetOp
* Signature: (I)J
*/
JNIEXPORT void JNICALL Java_mpi_Op_GetOp(JNIEnv *env, jobject jthis, jint type)
{
static MPI_Op Ops[] = {
MPI_OP_NULL, MPI_MAX, MPI_MIN, MPI_SUM,
MPI_PROD, MPI_LAND, MPI_BAND, MPI_LOR, MPI_BOR, MPI_LXOR,
MPI_BXOR, MPI_MINLOC, MPI_MAXLOC
};
(*env)->SetLongField(env,jthis, ompi_java.OphandleID, (jlong)Ops[type]);
}
/*
* Class: mpi_Op
* Method: free
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Op_free(JNIEnv *env, jobject jthis)
{
MPI_Op op;
op=(MPI_Op)((*env)->GetLongField(env,jthis,ompi_java.OphandleID));
if(op != MPI_OP_NULL)
MPI_Op_free(&op);
}

View File

@ -0,0 +1,682 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : mpi_Request.c
* Headerfile : mpi_Request.h
* Author : Sung-Hoon Ko, Xinying Li, Bryan Carpenter
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.11 $
* Updated : $Date: 2003/01/16 16:39:34 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
#include "ompi_config.h"
#include <stdlib.h>
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi.h"
#include "mpi_Request.h"
#include "mpiJava.h"
#ifndef GC_DOES_PINNING
extern MPI_Datatype Dts[] ;
#endif /* GC_DOES_PINNING */
static void releaseBuf(int* elements, JNIEnv *env,
jobject req, MPI_Status* status)
{
int opTag = (*env)->GetIntField(env, req, ompi_java.opTagID) ;
switch(opTag) {
case 0 : { /* Request.OP_SEND */
#ifdef GC_DOES_PINNING
jobject buf = (*env)->GetObjectField(env, req, ompi_java.bufSaveID) ;
int baseType = (*env)->GetIntField(env, req, ompi_java.baseTypeSaveID) ;
void* bufbase =
(void*) (*env)->GetLongField(env, req, ompi_java.bufbaseSaveID) ;
ompi_java_releaseBufPtr(env, buf, bufbase, baseType) ;
/* Try not to create too many local references... */
(*env)->DeleteLocalRef(env, buf) ;
#else
void* bufptr = (void*) (*env)->GetLongField(env, req, ompi_java.bufptrSaveID) ;
ompi_java_releaseMPIReadBuf(bufptr) ;
#endif /* GC_DOES_PINNING */
break ;
}
case 1 : { /* Request.OP_RECV */
jobject buf = (*env)->GetObjectField(env, req, ompi_java.bufSaveID) ;
int baseType = (*env)->GetIntField(env, req, ompi_java.baseTypeSaveID) ;
#ifdef GC_DOES_PINNING
void* bufbase =
(void*) (*env)->GetLongField(env, req, ompi_java.bufbaseSaveID) ;
ompi_java_releaseBufPtr(env, buf, bufbase, baseType) ;
#else
int offset = (*env)->GetIntField(env, req, ompi_java.offsetSaveID) ;
int count = (*env)->GetIntField(env, req, ompi_java.countSaveID) ;
MPI_Comm mpi_comm =
(MPI_Comm) (*env)->GetLongField(env, req, ompi_java.commSaveID) ;
MPI_Datatype mpi_type =
(MPI_Datatype) (*env)->GetLongField(env, req, ompi_java.typeSaveID) ;
void* bufptr =
(void*) (*env)->GetLongField(env, req, ompi_java.bufptrSaveID) ;
ompi_java_releaseMPIRecvBuf(elements, env, buf, offset, count, mpi_type,
mpi_comm, bufptr, status, baseType) ;
#endif /* GC_DOES_PINNING */
/* Try not to create too many local references... */
(*env)->DeleteLocalRef(env, buf) ;
}
}
}
/*
* Class: mpi_Request
* Method: init
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Request_init(JNIEnv *env, jclass thisClass)
{
ompi_java.reqhandleID = (*env)->GetFieldID(env, thisClass, "handle", "J") ;
ompi_java.opTagID = (*env)->GetFieldID(env, thisClass, "opTag", "I") ;
ompi_java.bufSaveID = (*env)->GetFieldID(env, thisClass, "bufSave", "Ljava/lang/Object;") ;
ompi_java.countSaveID = (*env)->GetFieldID(env, thisClass, "countSave", "I") ;
ompi_java.offsetSaveID = (*env)->GetFieldID(env, thisClass, "offsetSave", "I") ;
ompi_java.baseTypeSaveID = (*env)->GetFieldID(env, thisClass, "baseTypeSave", "I") ;
ompi_java.bufbaseSaveID = (*env)->GetFieldID(env, thisClass, "bufbaseSave", "J") ;
ompi_java.bufptrSaveID = (*env)->GetFieldID(env, thisClass, "bufptrSave", "J") ;
ompi_java.commSaveID = (*env)->GetFieldID(env, thisClass, "commSave", "J") ;
ompi_java.typeSaveID = (*env)->GetFieldID(env, thisClass, "typeSave", "J") ;
}
/*
* Class: mpi_Request
* Method: GetReq
* Signature: (I)V
*/
JNIEXPORT void JNICALL Java_mpi_Request_GetReq(JNIEnv *env, jobject jthis, jint type)
{
switch (type) {
case 0:
(*env)->SetLongField(env,jthis,ompi_java.reqhandleID,(jlong)MPI_REQUEST_NULL);
break;
default:
break;
}
}
/*
* Class: mpi_Request
* Method: Cancel
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Request_Cancel(JNIEnv *env, jobject jthis)
{
MPI_Request req;
ompi_java_clearFreeList(env) ;
req=(MPI_Request)((*env)->GetLongField(env,jthis,ompi_java.reqhandleID));
MPI_Cancel(&req);
}
/*
* Class: mpi_Request
* Method: Free
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Request_Free(JNIEnv *env, jobject jthis)
{
MPI_Request req;
ompi_java_clearFreeList(env) ;
req=(MPI_Request)((*env)->GetLongField(env,jthis,ompi_java.reqhandleID));
MPI_Request_free(&req);
}
/*
* Class: mpi_Request
* Method: Is_null
* Signature: ()Z
*/
JNIEXPORT jboolean JNICALL Java_mpi_Request_Is_1null(JNIEnv *env, jobject jthis)
{
MPI_Request req;
ompi_java_clearFreeList(env) ;
req=(MPI_Request)((*env)->GetLongField(env,jthis,ompi_java.reqhandleID));
if(req==MPI_REQUEST_NULL)
return JNI_TRUE;
else
return JNI_FALSE;
}
/*
* Class: mpi_Request
* Method: Wait
* Signature: (Lmpi/Status;)Lmpi/Status;
*/
JNIEXPORT jobject JNICALL Java_mpi_Request_Wait(JNIEnv *env, jobject jthis, jobject stat)
{
int elements ;
MPI_Request req =
(MPI_Request)((*env)->GetLongField(env,jthis,ompi_java.reqhandleID)) ;
MPI_Status *status =
(MPI_Status *)((*env)->GetLongField(env,stat,ompi_java.stathandleID));
MPI_Wait(&req, status);
ompi_java_clearFreeList(env) ;
releaseBuf(&elements, env, jthis, status) ;
(*env)->SetLongField(env,jthis,ompi_java.reqhandleID,(jlong)req);
(*env)->SetIntField(env,stat, ompi_java.sourceID, status->MPI_SOURCE);
(*env)->SetIntField(env,stat, ompi_java.tagID, status->MPI_TAG);
(*env)->SetIntField(env, stat, ompi_java.elementsID, elements);
return stat;
}
/*
* Class: mpi_Request
* Method: Test
* Signature: (Lmpi/Status;)Lmpi/Status;
*/
JNIEXPORT jobject JNICALL Java_mpi_Request_Test(JNIEnv *env, jobject jthis, jobject stat)
{
int flag;
MPI_Request req = (MPI_Request)((*env)->GetLongField(env,jthis,ompi_java.reqhandleID));
MPI_Status *status =
(MPI_Status *)((*env)->GetLongField(env,stat,ompi_java.stathandleID));
ompi_java_clearFreeList(env) ;
MPI_Test(&req, &flag, status);
(*env)->SetLongField(env,jthis,ompi_java.reqhandleID,(jlong)req);
if(flag) {
int elements ;
releaseBuf(&elements, env, jthis, status) ;
(*env)->SetIntField(env,stat, ompi_java.sourceID, status->MPI_SOURCE);
(*env)->SetIntField(env,stat, ompi_java.tagID, status->MPI_TAG);
(*env)->SetIntField(env, stat, ompi_java.elementsID, elements);
return stat;
}
else
return NULL;
}
/*
* Class: mpi_Request
* Method: Waitany
* Signature: ([Lmpi/Request;Lmpi/Status;)Lmpi/Status;
*/
JNIEXPORT jobject JNICALL Java_mpi_Request_Waitany(JNIEnv *env, jclass jthis,
jobjectArray array_of_request,
jobject stat)
{
int i, index, elements ;
int count=(*env)->GetArrayLength(env,array_of_request);
MPI_Request *reqs=(MPI_Request*)calloc(count, sizeof(MPI_Request));
jobject req ;
MPI_Status *status =
(MPI_Status *)((*env)->GetLongField(env,stat,ompi_java.stathandleID));
ompi_java_clearFreeList(env) ;
for(i=0; i<count; i++)
reqs[i]=(MPI_Request)((*env)->GetLongField(env,
(*env)->GetObjectArrayElement(env,array_of_request,i),
ompi_java.reqhandleID)) ;
MPI_Waitany(count, reqs, &index, status);
for(i=0; i<count; i++) {
jobject reqi = (*env)->GetObjectArrayElement(env,array_of_request,i) ;
(*env)->SetLongField(env, reqi, ompi_java.reqhandleID, (jlong) reqs[i]) ;
if(i == index) req = reqi ;
}
releaseBuf(&elements, env, req, status) ;
(*env)->SetIntField(env,stat, ompi_java.sourceID, status->MPI_SOURCE);
(*env)->SetIntField(env,stat, ompi_java.tagID, status->MPI_TAG);
(*env)->SetIntField(env,stat, ompi_java.indexID, index);
(*env)->SetIntField(env, stat, ompi_java.elementsID, elements);
free(reqs);
return stat;
}
/*
* Class: mpi_Request
* Method: Testany
* Signature: ([Lmpi/Request;Lmpi/Status;)Lmpi/Status;
*/
JNIEXPORT jobject JNICALL Java_mpi_Request_Testany(JNIEnv *env, jclass jthis,
jobjectArray array_of_request, jobject stat)
{
int i,flag,index;
int count=(*env)->GetArrayLength(env,array_of_request);
MPI_Request *reqs=(MPI_Request*)calloc(count, sizeof(MPI_Request));
MPI_Status *status =
(MPI_Status *)((*env)->GetLongField(env,stat,ompi_java.stathandleID));
ompi_java_clearFreeList(env) ;
for(i=0; i<count; i++)
reqs[i]=(MPI_Request)((*env)->GetLongField(env,
(*env)->GetObjectArrayElement(env,array_of_request,i),
ompi_java.reqhandleID));
MPI_Testany(count, reqs, &index,&flag, status);
for(i=0; i<count; i++)
(*env)->SetLongField(env,
(*env)->GetObjectArrayElement(env,array_of_request,i),
ompi_java.reqhandleID, (jlong) reqs[i]);
free(reqs);
if(flag) {
int elements ;
jobject req = (*env)->GetObjectArrayElement(env, array_of_request, index) ;
releaseBuf(&elements, env, req, status) ;
(*env)->SetIntField(env,stat, ompi_java.sourceID, status->MPI_SOURCE);
(*env)->SetIntField(env,stat, ompi_java.tagID, status->MPI_TAG);
(*env)->SetIntField(env,stat, ompi_java.indexID, index);
(*env)->SetIntField(env, stat, ompi_java.elementsID, elements);
return stat;
}
else
return NULL;
}
/*
* Class: mpi_Request
* Method: waitall
* Signature: ([Lmpi/Request;)[Lmpi/Status;
*/
JNIEXPORT jobjectArray JNICALL Java_mpi_Request_waitall(JNIEnv *env, jclass jthis,
jobjectArray array_of_request)
{
int i;
int count=(*env)->GetArrayLength(env,array_of_request);
MPI_Request *reqs=(MPI_Request*)calloc(2 * count, sizeof(MPI_Request));
MPI_Request *reqs_ini = reqs + count ;
MPI_Status *stas=(MPI_Status*)calloc(count, sizeof(MPI_Status));
jclass status_class = (*env)->FindClass(env,"mpi/Status");
jobjectArray array_of_status =
(*env)->NewObjectArray(env,count,status_class,NULL);
jmethodID handleConstructorID =
(*env)->GetMethodID(env, status_class, "<init>", "()V");
ompi_java_clearFreeList(env) ;
/* Copy initial native requests in Java `array_of_request' to `reqs'. */
for(i=0; i<count; i++) {
reqs [i] = (MPI_Request)((*env)->GetLongField(env,
(*env)->GetObjectArrayElement(env,array_of_request,i),
ompi_java.reqhandleID));
reqs_ini [i] = reqs [i] ;
}
MPI_Waitall(count, reqs, stas);
for(i=0; i<count; i++)
if(reqs_ini [i] != MPI_REQUEST_NULL) {
int elements ;
jobject req = (*env)->GetObjectArrayElement(env,array_of_request,i) ;
jobject jstas = (*env)->NewObject(env,status_class,handleConstructorID);
MPI_Status *status =
(MPI_Status *)((*env)->GetLongField(env,jstas,ompi_java.stathandleID));
/* Copy final native request to `array_of_request'. */
(*env)->SetLongField(env, req, ompi_java.reqhandleID, (jlong) reqs[i]) ;
/* Copy final native status to Java `array_of_status'... */
*status = stas [i] ;
releaseBuf(&elements, env, req, status) ;
(*env)->SetIntField(env,jstas,ompi_java.sourceID,status->MPI_SOURCE);
(*env)->SetIntField(env,jstas,ompi_java.tagID,status->MPI_TAG);
(*env)->SetIntField(env, jstas, ompi_java.elementsID, elements);
(*env)->SetObjectArrayElement(env,array_of_status,i,jstas);
/* Try not to create too many local references... */
(*env)->DeleteLocalRef(env, req) ;
(*env)->DeleteLocalRef(env, jstas) ;
}
free(reqs);
free(stas);
return array_of_status;
}
/*
* Class: mpi_Request
* Method: testall
* Signature: ([Lmpi/Request;)[Lmpi/Status;
*/
JNIEXPORT jobjectArray JNICALL Java_mpi_Request_testall(JNIEnv *env, jclass jthis,
jobjectArray array_of_request)
{
int i,flag;
int count=(*env)->GetArrayLength(env,array_of_request);
MPI_Request *reqs=(MPI_Request*)calloc(2 * count, sizeof(MPI_Request));
MPI_Request *reqs_ini = reqs + count ;
MPI_Status *stas=(MPI_Status*)calloc(count, sizeof(MPI_Status));
jclass status_class = (*env)->FindClass(env,"mpi/Status");
jobjectArray array_of_status =
(*env)->NewObjectArray(env,count,status_class,NULL);
jmethodID handleConstructorID =
(*env)->GetMethodID(env, status_class, "<init>", "()V");
ompi_java_clearFreeList(env) ;
/* Copy initial native requests in Java `array_of_request' to `reqs'. */
for(i=0; i<count; i++) {
reqs [i] =(MPI_Request)((*env)->GetLongField(env,
(*env)->GetObjectArrayElement(env,array_of_request,i),
ompi_java.reqhandleID)) ;
reqs_ini [i] = reqs [i] ;
}
MPI_Testall(count, reqs, &flag, stas);
if(flag)
for(i=0; i<count; i++)
if(reqs_ini [i] != MPI_REQUEST_NULL) {
int elements ;
jobject req = (*env)->GetObjectArrayElement(env,array_of_request,i) ;
jobject jstas = (*env)->NewObject(env,status_class,
handleConstructorID);
MPI_Status *status =
(MPI_Status *)((*env)->GetLongField(env,jstas,ompi_java.stathandleID));
/* Copy final native request to `array_of_request'. */
(*env)->SetLongField(env, req, ompi_java.reqhandleID, (jlong) reqs[i]) ;
/* Copy final native status to Java `array_of_status'... */
*status = stas [i] ;
releaseBuf(&elements, env, req, status) ;
(*env)->SetIntField(env,jstas,ompi_java.sourceID,status->MPI_SOURCE);
(*env)->SetIntField(env,jstas,ompi_java.tagID,status->MPI_TAG);
(*env)->SetIntField(env, jstas, ompi_java.elementsID, elements);
(*env)->SetObjectArrayElement(env,array_of_status,i,jstas);
/* Try not to create too many local references... */
(*env)->DeleteLocalRef(env, req) ;
(*env)->DeleteLocalRef(env, jstas) ;
}
free(reqs);
free(stas);
if(flag)
return array_of_status ;
else
return NULL;
}
/*
* Class: mpi_Request
* Method: waitsome
* Signature: ([Lmpi/Request;)[Lmpi/Status;
*/
JNIEXPORT jobjectArray JNICALL Java_mpi_Request_waitsome(JNIEnv *env, jclass jthis,
jobjectArray array_of_request)
{
int i;
int incount=(*env)->GetArrayLength(env,array_of_request);
MPI_Request *reqs=(MPI_Request*)calloc(incount, sizeof(MPI_Request));
MPI_Status *stas=(MPI_Status*)calloc(incount, sizeof(MPI_Status));
int *array_of_indices=(int*)calloc(incount,sizeof(int));
int outcount;
jclass status_class = (*env)->FindClass(env,"mpi/Status");
jobjectArray array_of_status;
jmethodID handleConstructorID =
(*env)->GetMethodID(env, status_class, "<init>", "()V");
ompi_java_clearFreeList(env) ;
/* Copy initial native requests in Java `array_of_request' to `reqs'. */
for(i=0; i<incount; i++)
reqs[i] = (MPI_Request)((*env)->GetLongField(env,
(*env)->GetObjectArrayElement(env,array_of_request,i),
ompi_java.reqhandleID));
MPI_Waitsome(incount, reqs, &outcount, array_of_indices, stas);
if(outcount!=MPI_UNDEFINED) {
array_of_status=(*env)->NewObjectArray(env,outcount,status_class,NULL);
for(i=0; i<outcount; i++) {
int elements ;
int index = array_of_indices[i] ;
jobject req = (*env)->GetObjectArrayElement(env,array_of_request,
index) ;
jobject jstas = (*env)->NewObject(env,status_class,handleConstructorID);
MPI_Status *status =
(MPI_Status *)((*env)->GetLongField(env,jstas,ompi_java.stathandleID));
/* Copy final native request to `array_of_request'. */
(*env)->SetLongField(env, req, ompi_java.reqhandleID, (jlong) reqs[index]) ;
/* Copy final native status to Java `array_of_status'... */
*status = stas [i] ;
releaseBuf(&elements, env, req, status) ;
(*env)->SetIntField(env,jstas,ompi_java.sourceID,status->MPI_SOURCE);
(*env)->SetIntField(env,jstas,ompi_java.tagID,status->MPI_TAG);
(*env)->SetIntField(env,jstas,ompi_java.indexID, index);
(*env)->SetIntField(env, jstas, ompi_java.elementsID, elements);
(*env)->SetObjectArrayElement(env,array_of_status,i,jstas);
/* Try not to create too many local references... */
(*env)->DeleteLocalRef(env, req) ;
(*env)->DeleteLocalRef(env, jstas) ;
}
}
free(reqs);
free(stas);
free(array_of_indices);
if(outcount==MPI_UNDEFINED)
return NULL;
else
return array_of_status;
}
/*
* Class: mpi_Request
* Method: testsome
* Signature: ([Lmpi/Request;)[Lmpi/Status;
*/
JNIEXPORT jobjectArray JNICALL Java_mpi_Request_testsome(JNIEnv *env, jclass jthis,
jobjectArray array_of_request)
{
int i;
int incount=(*env)->GetArrayLength(env,array_of_request);
MPI_Request *reqs=(MPI_Request*)calloc(incount, sizeof(MPI_Request));
MPI_Status *stas=(MPI_Status*)calloc(incount, sizeof(MPI_Status));
int *array_of_indices=(int*)calloc(incount,sizeof(int));
int outcount;
jclass status_class = (*env)->FindClass(env,"mpi/Status");
jobjectArray array_of_status;
jmethodID handleConstructorID =
(*env)->GetMethodID(env, status_class, "<init>", "()V");
ompi_java_clearFreeList(env) ;
for(i=0; i<incount; i++) {
reqs[i]=(MPI_Request)( (*env)->GetLongField(env,
(*env)->GetObjectArrayElement(env,array_of_request,i),
ompi_java.reqhandleID) );
}
MPI_Testsome(incount,reqs,&outcount,array_of_indices, stas);
if(outcount!=MPI_UNDEFINED) {
array_of_status=(*env)->NewObjectArray(env,outcount,status_class,NULL);
for(i=0; i<outcount; i++) {
int elements ;
int index = array_of_indices[i] ;
jobject req = (*env)->GetObjectArrayElement(env,array_of_request,
index) ;
jobject jstas = (*env)->NewObject(env,status_class,handleConstructorID);
MPI_Status *status =
(MPI_Status *)((*env)->GetLongField(env,jstas,ompi_java.stathandleID));
/* Copy final native request to `array_of_request'.
Release buffer elements... */
(*env)->SetLongField(env, req, ompi_java.reqhandleID, (jlong) reqs[index]) ;
/* Copy final native status to Java `array_of_status'... */
*status = stas [i] ;
releaseBuf(&elements, env, req, status) ;
(*env)->SetIntField(env,jstas,ompi_java.sourceID,status->MPI_SOURCE);
(*env)->SetIntField(env,jstas,ompi_java.tagID,status->MPI_TAG);
(*env)->SetIntField(env,jstas,ompi_java.indexID, index);
(*env)->SetIntField(env, jstas, ompi_java.elementsID, elements);
(*env)->SetObjectArrayElement(env,array_of_status,i,jstas);
/* Try not to create too many local references... */
(*env)->DeleteLocalRef(env, req) ;
(*env)->DeleteLocalRef(env, jstas) ;
}
}
free(reqs);
free(stas);
free(array_of_indices);
if(outcount==MPI_UNDEFINED)
return NULL;
else
return array_of_status;
}
/*
* Things to do:
*
* `Free' should release the buffer, if an operation was in progress?
*
* Should be able to cache a global reference to `status_class'.
* Doesn't work for some reason. Why?
*
* Should be able to cache handleConstructorID in a static variable.
* Doesn't work with Linux IBM-JDK1.1.6. Why?
*
* `bufptr' currently unused---may be deleted.
*/

View File

@ -0,0 +1,258 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : mpi_Status.c
* Headerfile : mpi_Status.h
* Author : Sung-Hoon Ko, Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.9 $
* Updated : $Date: 2003/01/16 16:39:34 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
#include "ompi_config.h"
#include <stdlib.h>
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi.h"
#include "mpi_Status.h"
#include "mpiJava.h"
/*jmethodID handleConstructorID ;*/
/* jclass status_class ; */
/*
* Class: mpi_Status
* Method: init
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Status_init(JNIEnv *env, jclass jthis)
{
ompi_java.stathandleID = (*env)->GetFieldID(env,jthis,"handle","J");
ompi_java.sourceID = (*env)->GetFieldID(env,jthis,"source","I");
ompi_java.tagID = (*env)->GetFieldID(env,jthis,"tag","I");
ompi_java.indexID = (*env)->GetFieldID(env,jthis,"index","I");
ompi_java.elementsID = (*env)->GetFieldID(env,jthis,"elements","I");
/* handleConstructorID = (*env)->GetMethodID(env, jthis, "<init>", "()V");*/
/* status_class = (*env)->NewGlobalRef(env, jthis) ; */
}
/*
* Class: mpi_Status
* Method: alloc
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Status_alloc(JNIEnv *env, jobject jthis)
{
MPI_Status *status = (MPI_Status*) malloc(sizeof(MPI_Status));
(*env)->SetLongField(env, jthis, ompi_java.stathandleID, (jlong)status);
}
/*
* Class: mpi_Status
* Method: free
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Status_free(JNIEnv *env, jobject jthis)
{
MPI_Status *status =
(MPI_Status *)((*env)->GetLongField(env,jthis,ompi_java.stathandleID));
free(status) ;
}
/*
* Class: mpi_Status
* Method: get_count
* Signature: (Lmpi/Datatype;)I
*/
JNIEXPORT jint JNICALL Java_mpi_Status_get_1count(JNIEnv *env, jobject jthis,
jobject type)
{
int count;
MPI_Datatype datatype =
(MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ;
MPI_Status *stat =
(MPI_Status*)((*env)->GetLongField(env,jthis,ompi_java.stathandleID));
#ifdef GC_DOES_PINNING
ompi_java_clearFreeList(env) ;
MPI_Get_count(stat, datatype, &count) ;
return count;
#else
int elements = (*env)->GetIntField(env, jthis, ompi_java.elementsID) ;
int dt_size;
ompi_java_clearFreeList(env) ;
MPI_Type_size(datatype, &dt_size) ;
if (elements != -1) {
count = elements / dt_size ; /* Cached at start of send buffer. */
if (count * dt_size == elements) {
return count ;
} else {
return MPI_UNDEFINED;
}
}
else {
/* Status object returned by IPROBE or PROBE.
*
* Didn't have access to data buffer to find `elements' value,
* so only way to find `count' is to invert `MPI_PACK_SIZE'.
*/
int bsize, bsizeTrial ;
MPI_Get_count(stat, MPI_BYTE, &bsize) ;
bsize -= sizeof(int) ;
count = bsize / dt_size ;
MPI_Pack_size(count, datatype, MPI_COMM_WORLD, &bsizeTrial) ;
/* Strictly, we should use the communicator the message was
* received on, but I'm too lazy to cache it.
*/
while(bsizeTrial > bsize) {
count-- ;
MPI_Pack_size(count, datatype, MPI_COMM_WORLD, &bsizeTrial) ;
}
if (bsizeTrial == bsize) {
return count ;
} else {
return MPI_UNDEFINED;
}
}
#endif /* GC_DOES_PINNING */
}
/*
* Class: mpi_Status
* Method: Test_cancelled
* Signature: ()Z
*/
JNIEXPORT jboolean JNICALL Java_mpi_Status_Test_1cancelled(JNIEnv *env, jobject jthis)
{
int flag;
MPI_Status *stat; /*shko*/
ompi_java_clearFreeList(env) ;
stat=(MPI_Status *)((*env)->GetLongField(env,jthis,ompi_java.stathandleID));/*shko*/
MPI_Test_cancelled(stat, &flag);
if (flag==0) {
return JNI_FALSE;
} else {
return JNI_TRUE;
}
}
/*
* Class: mpi_Status
* Method: get_elements
* Signature: (Lmpi/Datatype;)I
*/
JNIEXPORT jint JNICALL Java_mpi_Status_get_1elements(JNIEnv *env,
jobject jthis, jobject type)
{
int count;
MPI_Datatype datatype =
(MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ;
MPI_Status *stat =
(MPI_Status*)((*env)->GetLongField(env,jthis,ompi_java.stathandleID));
#ifdef GC_DOES_PINNING
ompi_java_clearFreeList(env) ;
MPI_Get_elements(stat, datatype, &count) ;
return count;
#else
int elements = (*env)->GetIntField(env, jthis, ompi_java.elementsID) ;
int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ;
int dt_size = ompi_java.dt_sizes[baseType] ;
ompi_java_clearFreeList(env) ;
if(elements != -1) {
count = elements / dt_size ;
if(count * dt_size == elements)
return count ;
else
return MPI_UNDEFINED ;
/* Can only happen if illegal base type mismatch between
* sender and receiver?
*/
}
else {
/* Status object returned by IPROBE or PROBE.
*
* Didn't have access to data buffer to find `elements' value,
* so only way to find `count' is to invert `MPI_PACK_SIZE'.
*/
int bsize, bsizeTrial ;
MPI_Get_count(stat, MPI_BYTE, &bsize) ;
bsize -= sizeof(int) ;
count = bsize / dt_size ;
MPI_Pack_size(count, datatype, MPI_COMM_WORLD, &bsizeTrial) ;
/* Strictly, we should use the communicator the message was
* received on, but I'm too lazy to cache it.
*/
while(bsizeTrial > bsize) {
count-- ;
MPI_Pack_size(count, datatype, MPI_COMM_WORLD, &bsizeTrial) ;
}
if(bsizeTrial == bsize)
return count ;
else
return MPI_UNDEFINED ;
/* Can only happen if illegal base type mismatch between
* sender and receiver?
*/
}
#endif /* GC_DOES_PINNING */
}

125
ompi/mpi/java/c/mpijava.exp Normal file
View File

@ -0,0 +1,125 @@
Java_mpi_Cartcomm_Get
Java_mpi_Cartcomm_Rank
Java_mpi_Cartcomm_Coords
Java_mpi_Cartcomm_Shift
Java_mpi_Cartcomm_sub
Java_mpi_Cartcomm_Map
Java_mpi_Cartcomm_Dims_1create
Java_mpi_Comm_GetComm
Java_mpi_Comm_dup
Java_mpi_Comm_Size
Java_mpi_Comm_Rank
Java_mpi_Comm_Compare
Java_mpi_Comm_Free
Java_mpi_Comm_Is_1null
Java_mpi_Comm_group
Java_mpi_Comm_Test_1inter
Java_mpi_Comm_GetIntercomm
Java_mpi_Comm_send
Java_mpi_Comm_Recv
Java_mpi_Comm_Sendrecv
Java_mpi_Comm_Sendrecv_1replace
Java_mpi_Comm_bsend
Java_mpi_Comm_ssend
Java_mpi_Comm_rsend
Java_mpi_Comm_Isend
Java_mpi_Comm_Ibsend
Java_mpi_Comm_Issend
Java_mpi_Comm_Irsend
Java_mpi_Comm_Irecv
Java_mpi_Comm_pack
Java_mpi_Comm_unpack
Java_mpi_Comm_Pack_1size
Java_mpi_Comm_Iprobe
Java_mpi_Comm_Probe
Java_mpi_Comm_Attr_1get
Java_mpi_Comm_Topo_1test
Java_mpi_Comm_Abort
Java_mpi_Comm_Errhandler_1set
Java_mpi_Comm_errorhandler_1get
Java_mpi_Comm_init
Java_mpi_Datatype_init
Java_mpi_Datatype_GetDatatype
Java_mpi_Datatype_GetContiguous
Java_mpi_Datatype_GetVector
Java_mpi_Datatype_GetHvector
Java_mpi_Datatype_GetIndexed
Java_mpi_Datatype_GetHindexed
Java_mpi_Datatype_GetStruct
Java_mpi_Datatype_extent
Java_mpi_Datatype_size
Java_mpi_Datatype_lB
Java_mpi_Datatype_uB
Java_mpi_Datatype_commit
Java_mpi_Datatype_free
Java_mpi_Errhandler_init
Java_mpi_Errhandler_GetErrhandler
Java_mpi_Graphcomm_Get
Java_mpi_Graphcomm_Neighbours
Java_mpi_Graphcomm_Map
Java_mpi_Group_init
Java_mpi_Group_GetGroup
Java_mpi_Group_Size
Java_mpi_Group_Rank
Java_mpi_Group_free
Java_mpi_Group_Translate_1ranks
Java_mpi_Group_Compare
Java_mpi_Group_union
Java_mpi_Group_intersection
Java_mpi_Group_difference
Java_mpi_Group_incl
Java_mpi_Group_excl
Java_mpi_Group_range_1incl
Java_mpi_Group_range_1excl
Java_mpi_Intercomm_Remote_1size
Java_mpi_Intercomm_remote_1group
Java_mpi_Intercomm_merge
Java_mpi_Intracomm_split
Java_mpi_Intracomm_creat
Java_mpi_Intracomm_Barrier
Java_mpi_Intracomm_bcast
Java_mpi_Intracomm_gather
Java_mpi_Intracomm_gatherv
Java_mpi_Intracomm_scatter
Java_mpi_Intracomm_scatterv
Java_mpi_Intracomm_allgather
Java_mpi_Intracomm_allgatherv
Java_mpi_Intracomm_alltoall
Java_mpi_Intracomm_alltoallv
Java_mpi_Intracomm_reduce
Java_mpi_Intracomm_allreduce
Java_mpi_Intracomm_reduce_1scatter
Java_mpi_Intracomm_scan
Java_mpi_Intracomm_GetCart
Java_mpi_Intracomm_GetGraph
Java_mpi_MPI_InitNative
Java_mpi_MPI_SetConstant
Java_mpi_MPI_Finalize
Java_mpi_MPI_Wtime
Java_mpi_MPI_Wtick
Java_mpi_MPI_Get_1processor_1name
Java_mpi_MPI_Initialized
Java_mpi_MPI_Buffer_1attach_1native
Java_mpi_MPI_Buffer_1detach_1native
Java_mpi_Op_init
Java_mpi_Op_GetOp
Java_mpi_Op_free
Java_mpi_Request_init
Java_mpi_Request_GetReq
Java_mpi_Request_Free
Java_mpi_Request_Cancel
Java_mpi_Request_Is_1null
Java_mpi_Request_Wait
Java_mpi_Request_Test
Java_mpi_Request_Waitany
Java_mpi_Request_Testany
Java_mpi_Request_waitall
Java_mpi_Request_testall
Java_mpi_Request_waitsome
Java_mpi_Request_testsome
Java_mpi_Status_alloc
Java_mpi_Status_free
Java_mpi_Status_get_1count
Java_mpi_Status_Test_1cancelled
Java_mpi_Status_get_1elements
Java_mpi_Status_init

View File

@ -0,0 +1,2 @@
Java_mpi_MPI_saveSignalHandlers
Java_mpi_MPI_restoreSignalHandlers

View File

@ -0,0 +1,31 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : CartParms.java
* Author : Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.1 $
* Updated : $Date: 1998/08/26 18:49:50 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
package mpi;
public class CartParms {
public int [] dims;
public boolean [] periods;
public int [] coords;
}

View File

@ -0,0 +1,160 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : Cartcomm.java
* Author : Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.7 $
* Updated : $Date: 2001/10/22 21:07:55 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
package mpi;
public class Cartcomm extends Intracomm {
protected Cartcomm(long handle) throws MPIException {
super(handle) ;
}
public Object clone() {
try {
return new Cartcomm(super.dup()) ;
}
catch (MPIException e) {
throw new RuntimeException(e.getMessage()) ;
}
}
/**
* Returns Cartesian topology information.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> object containing dimensions,
* periods and local coordinates </tr>
* </table>
* <p>
* Java binding of the MPI operations <tt>MPI_CARTDIM_GET</tt> and
* <tt>MPI_CART_GET</tt>.
* <p>
* The number of dimensions can be obtained from the size of (eg)
* <tt>dims</tt> field of the returned object.
*/
public native CartParms Get() throws MPIException ;
/**
* Translate logical process coordinates to process rank.
* <p>
* <table>
* <tr><td><tt> coords </tt></td><td> Cartesian coordinates of a
* process </tr>
* <tr><td><em> returns: </em></td><td> rank of the specified process </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_CART_RANK</tt>.
*/
public native int Rank(int[] coords) throws MPIException ;
/**
* Translate process rank to logical process coordinates.
* <p>
* <table>
* <tr><td><tt> rank </tt></td><td> rank of a process </tr>
* <tr><td><em> returns: </em></td><td> Cartesian coordinates of the
* specified process </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_CART_COORDS</tt>.
*/
public native int [] Coords(int rank) throws MPIException ;
/**
* Compute source and destination ranks for ``shift'' communication.
* <p>
* <table>
* <tr><td><tt> direction </tt></td><td> coordinate dimension of shift </tr>
* <tr><td><tt> disp </tt></td><td> displacement </tr>
* <tr><td><em> returns: </em></td><td> object containing ranks of source
* and destination processes </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_CART_SHIFT</tt>.
*/
public native ShiftParms Shift(int direction, int disp) throws MPIException ;
/**
* Partition Cartesian communicator into subgroups of lower dimension.
* <p>
* <table>
* <tr><td><tt> remain_dims </tt></td><td> by dimension, <tt>true</tt> if
* dimension is to be kept,
* <tt>false</tt> otherwise </tr>
* <tr><td><em> returns: </em></td><td> communicator containing subgrid
* including this process </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_CART_SUB</tt>.
*/
public Cartcomm Sub(boolean [] remain_dims) throws MPIException {
return new Cartcomm(sub(remain_dims)) ;
}
private native long sub(boolean [] remain_dims);
/**
* Compute an optimal placement.
* <p>
* <table>
* <tr><td><tt> dims </tt></td><td> the number of processes in each
* dimension </tr>
* <tr><td><tt> periods </tt></td><td> <tt>true</tt> if grid is periodic,
* <tt>false</tt> if not, in each
* dimension </tr>
* <tr><td><em> returns: </em></td><td> reordered rank of calling
* process </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_CART_MAP</tt>.
* <p>
* The number of dimensions is taken to be size of the <tt>dims</tt> argument.
*/
public native int Map(int [] dims, boolean [] periods) throws MPIException ;
/**
* Select a balanced distribution of processes per coordinate direction.
* <p>
* <table>
* <tr><td><tt> nnodes </tt></td><td> number of nodes in a grid </tr>
* <tr><td><tt> ndims </tt></td><td> number of dimensions of grid </tr>
* <tr><td><tt> dims </tt></td><td> array specifying the number of nodes
* in each dimension </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_DIMS_CREATE</tt>.
* <p>
* Size <tt>dims</tt> should be <tt>ndims</tt>. Note that
* <tt>dims</tt> is an <em>inout</em> parameter.
*/
static public native void Dims_create(int nnodes, int[] dims)
throws MPIException ;
}

1379
ompi/mpi/java/java/Comm.java Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,845 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : Datatype.java
* Author : Sang Lim, Sung-Hoon Ko, Xinying Li, Bryan Carpenter
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.14 $
* Updated : $Date: 2003/01/16 16:39:34 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
package mpi;
public class Datatype extends Freeable {
private final static int UNDEFINED = -1 ;
private final static int NULL = 0 ;
private final static int BYTE = 1 ;
private final static int CHAR = 2 ;
private final static int SHORT = 3 ;
private final static int BOOLEAN = 4 ;
private final static int INT = 5 ;
private final static int LONG = 6 ;
private final static int FLOAT = 7 ;
private final static int DOUBLE = 8 ;
private final static int PACKED = 9 ;
private final static int LB = 10 ;
private final static int UB = 11 ;
private final static int OBJECT = 12 ;
private static native void init();
/*
* Constructor used in static initializer of `MPI'.
*
* (Called before MPI.Init(), so cannot make any native MPI calls.)
*/
Datatype() {}
//public Datatype() {} // debug
/*
* Constructor for basic datatypes.
*
* (Initialization done in separate `setBasic', so can create
* datatype objects for `BYTE', etc in static initializers invoked before
* MPI.Init(), then initialize objects after MPI initialized.)
*/
Datatype(int Type) {
setBasic(Type) ;
}
void setBasic (int Type) {
switch(Type) {
case OBJECT :
baseType = OBJECT ;
displacements = new int [1] ;
lb = 0 ;
ub = 1 ;
lbSet = false ;
ubSet = false ;
break ;
case LB :
baseType = UNDEFINED ;
displacements = new int [0] ;
lb = 0 ;
ub = 0 ;
lbSet = true ;
ubSet = false ;
break ;
case UB :
baseType = UNDEFINED ;
displacements = new int [0] ;
lb = 0 ;
ub = 0 ;
lbSet = false ;
ubSet = true ;
break ;
default : // Native case
baseType = Type ; // what about PACKED?
GetDatatype(Type);
baseSize = size() ;
}
}
private native void GetDatatype(int Type);
/*
* Constructor used by `Contiguous'
*
* (Initialization done in separate `setContiguous', so can create
* datatype objects for `SHORT2', etc in static initializers invoked before
* MPI.Init(), then initialize objects after MPI initialized.)
*/
private Datatype(int count, Datatype oldtype) throws MPIException {
setContiguous(count, oldtype) ;
}
void setContiguous(int count, Datatype oldtype) throws MPIException {
baseType = oldtype.baseType ;
if(baseType == OBJECT || baseType == UNDEFINED) {
int oldSize = oldtype.Size() ;
boolean oldUbSet = oldtype.ubSet ;
boolean oldLbSet = oldtype.lbSet ;
displacements = new int [count * oldSize] ;
ubSet = count > 0 && oldUbSet ;
lbSet = count > 0 && oldLbSet ;
lb = Integer.MAX_VALUE ;
ub = Integer.MIN_VALUE ;
if(oldSize != 0 || oldLbSet || oldUbSet) {
// `oldType.ub', `oldType.lb', `oldType.Extent()' all well-defined.
int oldExtent = oldtype.Extent() ;
if(count > 0) {
// Compose proper displacements...
int ptr = 0 ;
for (int i = 0 ; i < count ; i++) {
int startElement = i * oldExtent ;
for (int l = 0; l < oldSize; l++, ptr++)
displacements [ptr] = startElement + oldtype.displacements[l] ;
}
// Now maximize/minimize upper/lower bounds
int maxStartElement = oldExtent > 0 ? (count - 1) * oldExtent : 0 ;
int max_ub = maxStartElement + oldtype.ub ;
if (max_ub > ub)
ub = max_ub ;
int minStartElement = oldExtent > 0 ? 0 : (count - 1) * oldExtent ;
int min_lb = minStartElement + oldtype.lb ;
if (min_lb < lb)
lb = min_lb ;
}
}
else {
// `oldType.ub', `oldType.lb' and `oldType.Extent()' are undefined.
// Can ignore unless...
if(count > 1) {
System.out.println("Datatype.Contiguous: repeat-count specified " +
"for component with undefined extent");
MPI.COMM_WORLD.Abort(1);
}
}
}
else {
baseSize = oldtype.baseSize ;
GetContiguous(count, oldtype) ;
}
}
private native void GetContiguous(int count, Datatype oldtype);
/*
* Constructor used by `Vector', `Hvector'
*/
private Datatype(int count, int blocklength, int stride, Datatype oldtype,
boolean unitsOfOldExtent) throws MPIException {
baseType = oldtype.baseType ;
if(baseType == OBJECT || baseType == UNDEFINED) {
int oldSize = oldtype.Size() ;
boolean oldUbSet = oldtype.ubSet ;
boolean oldLbSet = oldtype.lbSet ;
int repetitions = count * blocklength ;
displacements = new int [repetitions * oldSize] ;
ubSet = repetitions > 0 && oldUbSet ;
lbSet = repetitions > 0 && oldLbSet ;
lb = Integer.MAX_VALUE ;
ub = Integer.MIN_VALUE ;
if(repetitions > 0) {
if(oldSize != 0 || oldLbSet || oldUbSet) {
// `oldType.ub', `oldType.lb', `oldType.Extent()' all well-defined.
int oldExtent = oldtype.Extent() ;
int ptr = 0 ;
for (int i = 0 ; i < count ; i++) {
int startBlock = stride * i ;
if(unitsOfOldExtent) startBlock *= oldExtent ;
// Compose proper displacements...
for (int j = 0; j < blocklength ; j++) {
int startElement = startBlock + j * oldExtent ;
for (int l = 0; l < oldSize; l++, ptr++)
displacements [ptr] = startElement + oldtype.displacements[l] ;
}
// Now maximize/minimize upper/lower bounds
int maxStartElement =
oldExtent > 0 ? startBlock + (blocklength - 1) * oldExtent
: startBlock ;
int max_ub = maxStartElement + oldtype.ub ;
if (max_ub > ub)
ub = max_ub ;
int minStartElement =
oldExtent > 0 ? startBlock
: startBlock + (blocklength - 1) * oldExtent ;
int min_lb = minStartElement + oldtype.lb ;
if (min_lb < lb)
lb = min_lb ;
}
}
else {
// `oldType.ub', `oldType.lb' and `oldType.Extent()' are undefined.
if(unitsOfOldExtent) {
System.out.println("Datatype.Vector: " +
"old type has undefined extent");
MPI.COMM_WORLD.Abort(1);
}
else {
// For `Hvector' can ignore unless...
if(blocklength > 1) {
System.out.println("Datatype.Hvector: repeat-count specified " +
"for component with undefined extent");
MPI.COMM_WORLD.Abort(1);
}
}
}
}
}
else {
baseSize = oldtype.baseSize ;
if(unitsOfOldExtent)
GetVector(count, blocklength, stride, oldtype) ;
else
GetHvector(count, blocklength, stride, oldtype) ;
}
}
private native void GetVector(int count, int blocklength, int stride,
Datatype oldtype);
private native void GetHvector(int count, int blocklength, int stride,
Datatype oldtype) ;
/*
* Constructor used by `Indexed', `Hindexed'
*/
private Datatype(int[] array_of_blocklengths, int[] array_of_displacements,
Datatype oldtype, boolean unitsOfOldExtent)
throws MPIException {
baseType = oldtype.baseType ;
if(baseType == OBJECT || baseType == UNDEFINED) {
int oldSize = oldtype.Size() ;
boolean oldUbSet = oldtype.ubSet ;
boolean oldLbSet = oldtype.lbSet ;
int count = 0 ;
for (int i = 0; i < array_of_blocklengths.length; i++)
count += array_of_blocklengths[i] ;
displacements = new int [count * oldSize] ;
ubSet = count > 0 && oldUbSet ;
lbSet = count > 0 && oldLbSet ;
lb = Integer.MAX_VALUE ;
ub = Integer.MIN_VALUE ;
if(oldSize != 0 || oldLbSet || oldUbSet) {
// `oldType.ub', `oldType.lb', `oldType.Extent()' all well-defined.
int oldExtent = oldtype.Extent() ;
int ptr = 0 ;
for (int i = 0; i < array_of_blocklengths.length; i++) {
int blockLen = array_of_blocklengths [i] ;
if(blockLen > 0) {
int startBlock = array_of_displacements [i] ;
if(unitsOfOldExtent) startBlock *= oldExtent ;
// Compose proper displacements...
for (int j = 0; j < blockLen ; j++) {
int startElement = startBlock + j * oldExtent ;
for (int l = 0; l < oldSize; l++, ptr++)
displacements [ptr] = startElement + oldtype.displacements[l] ;
}
// Now maximize/minimize upper/lower bounds
int maxStartElement =
oldExtent > 0 ? startBlock + (blockLen - 1) * oldExtent
: startBlock ;
int max_ub = maxStartElement + oldtype.ub ;
if (max_ub > ub)
ub = max_ub ;
int minStartElement =
oldExtent > 0 ? startBlock
: startBlock + (blockLen - 1) * oldExtent ;
int min_lb = minStartElement + oldtype.lb ;
if (min_lb < lb)
lb = min_lb ;
}
}
}
else {
// `oldType.ub', `oldType.lb' and `oldType.Extent()' are undefined.
if(unitsOfOldExtent) {
System.out.println("Datatype.Indexed: old type has undefined extent");
MPI.COMM_WORLD.Abort(1);
}
else {
// Can ignore unless...
for (int i = 0; i < array_of_blocklengths.length; i++)
if(array_of_blocklengths [i] > 1) {
System.out.println("Datatype.Hindexed: repeat-count specified " +
"for component with undefined extent");
MPI.COMM_WORLD.Abort(1);
}
}
}
}
else {
baseSize = oldtype.baseSize ;
if(unitsOfOldExtent)
GetIndexed(array_of_blocklengths, array_of_displacements, oldtype) ;
else
GetHindexed(array_of_blocklengths, array_of_displacements, oldtype) ;
}
}
private native void GetIndexed(int[] array_of_blocklengths,
int[] array_of_displacements,
Datatype oldtype) ;
private native void GetHindexed(int[] array_of_blocklengths,
int[] array_of_displacements,
Datatype oldtype) ;
/*
* Constructor used by `Struct'
*/
private Datatype(int[] array_of_blocklengths, int[] array_of_displacements,
Datatype[] array_of_types) throws MPIException {
// Compute new base type
baseType = UNDEFINED;
for (int i = 0; i < array_of_types.length; i++) {
int oldBaseType = array_of_types[i].baseType ;
if(oldBaseType != baseType) {
if(baseType == UNDEFINED) {
baseType = oldBaseType ;
if(baseType != OBJECT)
baseSize = array_of_types[i].baseSize ;
}
else if(oldBaseType != UNDEFINED) {
System.out.println("Datatype.Struct: All base types must agree...");
MPI.COMM_WORLD.Abort(1);
}
}
}
// Allocate `displacements' if required
if(baseType == OBJECT || baseType == UNDEFINED) {
int size = 0 ;
for (int i = 0; i < array_of_blocklengths.length; i++)
size += array_of_blocklengths[i] * array_of_types[i].Size();
displacements = new int [size] ;
}
ubSet = false ;
lbSet = false ;
lb = Integer.MAX_VALUE ;
ub = Integer.MIN_VALUE ;
int ptr = 0 ;
for (int i = 0; i < array_of_blocklengths.length; i++) {
int blockLen = array_of_blocklengths [i] ;
if(blockLen > 0) {
Datatype oldtype = array_of_types [i] ;
int oldBaseType = oldtype.baseType ;
if(oldBaseType == OBJECT || oldBaseType == UNDEFINED) {
int oldSize = oldtype.Size() ;
boolean oldUbSet = oldtype.ubSet ;
boolean oldLbSet = oldtype.lbSet ;
if(oldSize != 0 || oldLbSet || oldUbSet) {
// `oldType.ub', `oldType.lb', `oldType.Extent()' all well-defined.
int oldExtent = oldtype.Extent() ;
int startBlock = array_of_displacements [i] ;
// Compose normal displacements...
for (int j = 0; j < blockLen ; j++) {
int startElement = startBlock + j * oldExtent ;
for (int l = 0; l < oldSize; l++, ptr++)
displacements [ptr] = startElement + oldtype.displacements[l] ;
}
// Now maximize/minimize upper/lower bounds
// `ubSet' acts like a most significant positive bit in
// the maximization operation.
if (oldUbSet == ubSet) {
int maxStartElement =
oldExtent > 0 ? startBlock + (blockLen - 1) * oldExtent
: startBlock ;
int max_ub = maxStartElement + oldtype.ub ;
if (max_ub > ub)
ub = max_ub ;
}
else if(oldUbSet) {
int maxStartElement =
oldExtent > 0 ? startBlock + (blockLen - 1) * oldExtent
: startBlock ;
ub = maxStartElement + oldtype.ub ;
ubSet = true ;
}
// `lbSet' acts like a most significant negative bit in
// the minimization operation.
if (oldLbSet == lbSet) {
int minStartElement =
oldExtent > 0 ? startBlock
: startBlock + (blockLen - 1) * oldExtent ;
int min_lb = minStartElement + oldtype.lb ;
if (min_lb < lb)
lb = min_lb ;
}
else if(oldLbSet) {
int minStartElement =
oldExtent > 0 ? startBlock
: startBlock + (blockLen - 1) * oldExtent ;
lb = minStartElement + oldtype.lb ;
lbSet = true ;
}
}
else {
// `oldType.ub', `oldType.lb' and `oldType.Extent()' are undefined.
// Can ignore unless...
if(blockLen > 1) {
System.out.println("Datatype.Struct: repeat-count specified " +
"for component with undefined extent");
MPI.COMM_WORLD.Abort(1);
}
}
}
}
}
if(baseType != OBJECT && baseType != UNDEFINED)
GetStruct(array_of_blocklengths, array_of_displacements, array_of_types,
lbSet, lb, ubSet, ub) ;
}
private native void GetStruct(int[] array_of_blocklengths,
int[] array_of_displacements,
Datatype[] array_of_types,
boolean lbSet, int lb, boolean ubSet, int ub) ;
protected boolean isObject() {
return baseType == OBJECT || baseType == UNDEFINED ;
}
/**
* Returns the extent of a datatype - the difference between
* upper and lower bound.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> datatype extent </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_TYPE_EXTENT</tt>.
*/
public int Extent() throws MPIException {
if(baseType == OBJECT || baseType == UNDEFINED)
return ub - lb ;
else
return extent() / baseSize ;
}
private native int extent();
/**
* Returns the total size of a datatype - the number of buffer
* elements it represents.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> datatype size </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_TYPE_SIZE</tt>.
*/
public int Size() throws MPIException {
if(baseType == OBJECT || baseType == UNDEFINED)
return displacements.length;
else
return size() / baseSize ;
}
private native int size();
/**
* Find the lower bound of a datatype - the least value
* in its displacement sequence.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> displacement of lower bound
* from origin </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_TYPE_LB</tt>.
*/
public int Lb() throws MPIException {
if(baseType == OBJECT || baseType == UNDEFINED)
return lb;
else
return lB() / baseSize ;
}
private native int lB();
/**
* Find the upper bound of a datatype - the greatest value
* in its displacement sequence.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> displacement of upper bound
* from origin </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_TYPE_UB</tt>.
*/
public int Ub() throws MPIException {
if(baseType == OBJECT || baseType == UNDEFINED)
return ub;
else
return uB() / baseSize ;
}
private native int uB();
/**
* Commit a derived datatype.
* Java binding of the MPI operation <tt>MPI_TYPE_COMMIT</tt>.
*/
public void Commit() throws MPIException {
if (baseType != OBJECT && baseType != UNDEFINED)
commit() ;
}
private native void commit();
@SuppressWarnings("unchecked")
public void finalize() throws MPIException {
synchronized(MPI.class) {
MPI.freeList.addFirst(this) ;
}
}
native void free() ;
/**
* Construct new datatype representing replication of old datatype into
* contiguous locations.
* <p>
* <table>
* <tr><td><tt> count </tt></td><td> replication count </tr>
* <tr><td><tt> oldtype </tt></td><td> old datatype </tr>
* <tr><td><em> returns: </em></td><td> new datatype </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_TYPE_CONTIGUOUS</tt>.
* <p>
* The base type of the new datatype is the same as the base type of
* <tt>oldtype</tt>.
*/
public static Datatype Contiguous(int count,
Datatype oldtype) throws MPIException {
return new Datatype(count, oldtype) ;
}
/**
* Construct new datatype representing replication of old datatype into
* locations that consist of equally spaced blocks.
* <p>
* <table>
* <tr><td><tt> count </tt></td><td> number of blocks </tr>
* <tr><td><tt> blocklength </tt></td><td> number of elements in each
* block </tr>
* <tr><td><tt> stride </tt></td><td> number of elements between
* start of each block </tr>
* <tr><td><tt> oldtype </tt></td><td> old datatype </tr>
* <tr><td><em> returns: </em></td><td> new datatype </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_TYPE_VECTOR</tt>.
* <p>
* The base type of the new datatype is the same as the base type of
* <tt>oldtype</tt>.
*/
public static Datatype Vector(int count,
int blocklength,
int stride,
Datatype oldtype) throws MPIException {
return new Datatype(count, blocklength, stride, oldtype, true) ;
}
/**
* Identical to <tt>vector</tt> except that the stride is expressed
* directly in terms of the buffer index, rather than the units of
* the old type.
* <p>
* <table>
* <tr><td><tt> count </tt></td><td> number of blocks </tr>
* <tr><td><tt> blocklength </tt></td><td> number of elements in each
* block </tr>
* <tr><td><tt> stride </tt></td><td> number of elements between
* start of each block </tr>
* <tr><td><tt> oldtype </tt></td><td> old datatype </tr>
* <tr><td><em> returns: </em></td><td> new datatype </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_TYPE_HVECTOR</tt>.
* <p>
* <em>Unlike other language bindings</em>, the value of <tt>stride</tt>
* is <em>not</em> measured in bytes.
*/
public static Datatype Hvector(int count,
int blocklength,
int stride,
Datatype oldtype) throws MPIException {
return new Datatype(count, blocklength, stride, oldtype, false) ;
}
/**
* Construct new datatype representing replication of old datatype into
* a sequence of blocks where each block can contain a different number
* of copies and have a different displacement.
* <p>
* <table>
* <tr><td><tt> array_of_blocklengths </tt></td><td> number of elements per
* block </tr>
* <tr><td><tt> array_of_displacements </tt></td><td> displacement of each
* block in units of
* old type </tr>
* <tr><td><tt> oldtype </tt></td><td> old datatype </tr>
* <tr><td><em> returns: </em></td><td> new datatype </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_TYPE_INDEXED</tt>.
* <p>
* The number of blocks is taken to be size of the
* <tt>array_of_blocklengths</tt> argument. The second argument,
* <tt>array_of_displacements</tt>, should be the same size.
* The base type of the new datatype is the same as the base type of
* <tt>oldtype</tt>.
*/
public static Datatype Indexed(int[] array_of_blocklengths,
int[] array_of_displacements,
Datatype oldtype) throws MPIException {
return new Datatype(array_of_blocklengths, array_of_displacements,
oldtype, true) ;
}
/**
* Identical to <tt>indexed</tt> except that the displacements are
* expressed directly in terms of the buffer index, rather than the
* units of the old type.
* <p>
* <table>
* <tr><td><tt> array_of_blocklengths </tt></td><td> number of elements per
* block </tr>
* <tr><td><tt> array_of_displacements </tt></td><td> displacement in buffer
* for each block </tr>
* <tr><td><tt> oldtype </tt></td><td> old datatype </tr>
* <tr><td><em> returns: </em></td><td> new datatype </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_TYPE_HINDEXED</tt>.
* <p>
* <em>Unlike other language bindings</em>, the values in
* <tt>array_of_displacements</tt> are <em>not</em> measured in bytes.
*/
public static Datatype Hindexed(int[] array_of_blocklengths,
int[] array_of_displacements,
Datatype oldtype) throws MPIException {
return new Datatype(array_of_blocklengths, array_of_displacements,
oldtype, false) ;
}
/**
* The most general type constructor.
* <p>
* <table>
* <tr><td><tt> array_of_blocklengths </tt></td><td> number of elements per
* block </tr>
* <tr><td><tt> array_of_displacements </tt></td><td> displacement in buffer
* for each block </tr>
* <tr><td><tt> array_of_types </tt></td><td> type of elements in
* each block </tr>
* <tr><td><em> returns: </em></td><td> new datatype </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_TYPE_STRUCT</tt>.
* <p>
* The number of blocks is taken to be size of the
* <tt>array_of_blocklengths</tt> argument. The second and third
* arguments, <tt>array_of_displacements</tt>, and <tt>array_of_types</tt>,
* should be the same size.
* <em>Unlike other language bindings</em>, the values in
* <tt>array_of_displacements</tt> are <em>not</em> measured in bytes.
* All elements of <tt>array_of_types</tt> with definite base types
* <em>must have the <em>same</em> base type</em>: this will be the base
* type of new datatype.
*/
public static Datatype Struct(int[] array_of_blocklengths,
int[] array_of_displacements,
Datatype[] array_of_types) throws MPIException {
return new Datatype(array_of_blocklengths, array_of_displacements,
array_of_types) ;
}
protected long handle;
protected int baseType ;
protected int baseSize ; // or private
protected int displacements[] ;
protected int lb, ub ;
protected boolean ubSet, lbSet ;
// Flags set if MPI.UB, MPI.LB respectively appears as a component type.
static {
init();
}
}
// Things to do:
//
// Initialization and use of `baseSize' should probably be done entirely
// on JNI side.
//
// `baseType' could just take values from {UNDEFINED, OBJECT, NATIVE}?
// (But in future may want to add runtime checks using exact value.)

View File

@ -0,0 +1,45 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : Errhandler.java
* Author : Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.3 $
* Updated : $Date: 2001/08/07 16:36:25 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
package mpi;
//import mpi.*;
public class Errhandler{
public final static int FATAL = 1;
public final static int RETURN = 0;
private static native void init();
//public Errhandler() {}
public Errhandler(int Type) { GetErrhandler(Type);}
public Errhandler(long _handle) { handle = _handle;}
protected native void GetErrhandler(int Type);
protected long handle;
static {
init();
}
}

View File

@ -0,0 +1,27 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : Freeable.java
* Author : Bryan Carpenter
* Created : Wed Jan 15 23:14:43 EST 2003
* Revision : $Revision: 1.1 $
* Updated : $Date: 2003/01/16 16:39:34 $
*/
package mpi;
abstract class Freeable {
abstract void free() ;
}

View File

@ -0,0 +1,30 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : GraphParms.java
* Author : Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.1 $
* Updated : $Date: 1998/08/26 18:49:55 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
package mpi;
public class GraphParms {
public int [] index;
public int [] edges;
}

View File

@ -0,0 +1,94 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : Graphcomm.java
* Author : Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.5 $
* Updated : $Date: 2001/10/22 21:07:55 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
package mpi;
public class Graphcomm extends Intracomm {
protected Graphcomm(long handle) throws MPIException {
super(handle) ;
}
public Object clone() {
try {
return new Graphcomm(super.dup()) ;
}
catch (MPIException e) {
throw new RuntimeException(e.getMessage()) ;
}
}
/**
* Returns graph topology information.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> object defining node degress and
* edges of graph </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GRAPHDIMS_GET</tt>.
* <p>
* The number of nodes and number of edges can be extracted
* from the sizes of the <tt>index</tt> and <tt>edges</tt> fields
* of the returned object.
*/
public native GraphParms Get() throws MPIException ;
/**
* Provides adjacency information for general graph topology.
* <p>
* <table>
* <tr><td><tt> rank </tt></td><td> rank of a process in the group
* of this communicator </tr>
* <tr><td><em> returns: </em></td><td> array of ranks of neighbouring
* processes to one specified </tr>
* </table>
* <p>
* Java binding of the MPI operations <tt>MPI_GRAPH_NEIGHBOURS_COUNT</tt>
* and <tt>MPI_GRAPH_NEIGHBOURS</tt>.
* <p>
* The number of neighbours can be extracted from the size of the result.
*/
public native int [] Neighbours(int rank) throws MPIException ;
/**
* Compute an optimal placement.
* <p>
* <table>
* <tr><td><tt> index </tt></td><td> node degrees </tr>
* <tr><td><tt> edges </tt></td><td> graph edges </tr>
* <tr><td><em> returns: </em></td><td> reordered rank of calling
* process </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GRAPH_MAP</tt>.
* <p>
* The number of nodes is taken to be size of the <tt>index</tt> argument.
*/
public native int Map(int [] index, int [] edges) throws MPIException ;
}

View File

@ -0,0 +1,266 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : Group.java
* Author : Xinying Li, Bryan Carpenter
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.8 $
* Updated : $Date: 2003/01/16 16:39:34 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
package mpi;
//import mpi.*;
public class Group extends Freeable {
protected final static int EMPTY = 0;
private static native void init();
protected long handle;
//public Group() {}
protected Group(int Type) { GetGroup(Type); }
protected Group(long _handle) { handle = _handle;}
private native void GetGroup(int Type);
/**
* Size of group.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> number of processors in the
* group </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_SIZE</tt>.
*/
public native int Size() throws MPIException ;
/**
* Rank of this process in group.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> rank of the calling process in
* the group </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_RANK</tt>.
*
* Result value is <tt>MPI.UNDEFINED</tt> if this process is not
* a member of the group.
*/
public native int Rank() throws MPIException ;
/**
* Destructor.
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_FREE</tt>.
*/
@SuppressWarnings("unchecked")
public void finalize() throws MPIException {
synchronized(MPI.class) {
MPI.freeList.addFirst(this) ;
}
}
native void free() ;
/**
* Translate ranks within one group to ranks within another.
* <p>
* <table>
* <tr><td><tt> group1 </tt></td><td> a group </tr>
* <tr><td><tt> ranks1 </tt></td><td> array of valid ranks in
* <tt>group1</tt> </tr>
* <tr><td><tt> group2 </tt></td><td> another group </tr>
* <tr><td><em> returns: </em></td><td> array of corresponding ranks in
* <tt>group2</tt> </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_TRANSLATE_RANKS</tt>.
* <p>
* Result elements are <tt>MPI.UNDEFINED</tt> where no correspondence
* exists.
*/
public static native int [] Translate_ranks(Group group1,int [] ranks1,
Group group2)
throws MPIException ;
/**
* Compare two groups.
* <p>
* <table>
* <tr><td><tt> group1 </tt></td><td> first group </tr>
* <tr><td><tt> group2 </tt></td><td> second group </tr>
* <tr><td><em> returns: </em></td><td> result </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_COMPARE</tt>.
* <p>
* <tt>MPI.IDENT</tt> results if the group members and group order are
* exactly the same in both groups. <tt>MPI.SIMILAR</tt> results if
* the group members are the same but the order is different.
* <tt>MPI.UNEQUAL</tt> results otherwise.
*/
public static native int Compare(Group group1, Group group2)
throws MPIException ;
/**
* Set union of two groups.
* <p>
* <table>
* <tr><td><tt> group1 </tt></td><td> first group </tr>
* <tr><td><tt> group2 </tt></td><td> second group </tr>
* <tr><td><em> returns: </em></td><td> union group </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_UNION</tt>.
*/
public static Group Union(Group group1, Group group2) throws MPIException {
return new Group(union(group1, group2)) ;
}
private static native long union(Group group1, Group group2);
/**
* Set intersection of two groups.
* <p>
* <table>
* <tr><td><tt> group1 </tt></td><td> first group </tr>
* <tr><td><tt> group2 </tt></td><td> second group </tr>
* <tr><td><em> returns: </em></td><td> intersection group </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_INTERSECTION</tt>.
*/
public static Group Intersection(Group group1,Group group2)
throws MPIException {
return new Group(intersection(group1, group2)) ;
}
private static native long intersection(Group group1, Group group2);
/**
* Result contains all elements of the first group that are not in the
* second group.
* <p>
* <table>
* <tr><td><tt> group1 </tt></td><td> first group </tr>
* <tr><td><tt> group2 </tt></td><td> second group </tr>
* <tr><td><em> returns: </em></td><td> difference group </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_DIFFERENCE</tt>.
*/
public static Group Difference(Group group1, Group group2)
throws MPIException {
return new Group(difference(group1, group2)) ;
}
private static native long difference(Group group1, Group group2) ;
/**
* Create a subset group including specified processes.
* <p>
* <table>
* <tr><td><tt> ranks </tt></td><td> ranks from this group to appear in
* new group </tr>
* <tr><td><em> returns: </em></td><td> new group </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_INCL</tt>.
*/
public Group Incl(int [] ranks) throws MPIException {
return new Group(incl(ranks)) ;
}
private native long incl(int [] ranks);
/**
* Create a subset group excluding specified processes.
* <p>
* <table>
* <tr><td><tt> ranks </tt></td><td> ranks from this group <em>not</em>
* to appear in new group </tr>
* <tr><td><em> returns: </em></td><td> new group </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_EXCL</tt>.
*/
public Group Excl(int [] ranks) throws MPIException {
return new Group(excl(ranks)) ;
}
private native long excl(int [] ranks) ;
/**
* Create a subset group including processes specified
* by strided intervals of ranks.
* <p>
* <table>
* <tr><td><tt> ranges </tt></td><td> array of integer triplets </tr>
* <tr><td><em> returns: </em></td><td> new group </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_RANGE_INCL</tt>.
* <p>
* The triplets are of the form (first rank, last rank, stride)
* indicating ranks in this group to be included in the new group.
* The size of the first dimension of <tt>ranges</tt> is the number
* of triplets. The size of the second dimension is 3.
*/
public Group Range_incl(int [][] ranges) throws MPIException {
return new Group(range_incl(ranges)) ;
}
private native long range_incl(int [][] ranges) ;
/**
* Create a subset group excluding processes specified
* by strided intervals of ranks.
* <p>
* <table>
* <tr><td><tt> ranges </tt></td><td> array of integer triplets </tr>
* <tr><td><em> returns: </em></td><td> new group </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_RANGE_EXCL</tt>.
* <p>
* Triplet array is defined as for <tt>Range_incl</tt>, the ranges
* indicating ranks in this group to be excluded from the new group.
*/
public Group Range_excl(int [][] ranges) throws MPIException {
return new Group(range_excl(ranges)) ;
}
private native long range_excl(int [][] ranges) ;
static {
init();
}
}

View File

@ -0,0 +1,85 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : Intercomm.java
* Author : Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.5 $
* Updated : $Date: 1999/09/14 20:50:11 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
package mpi;
//import mpi.*;
public class Intercomm extends Comm {
protected Intercomm(long handle) {super(handle) ;}
public Object clone() {
return new Intercomm(super.dup());
}
// Inter-Communication
/**
* Size of remote group.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> number of process in remote group
* of this communicator </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_COMM_REMOTE_SIZE</tt>.
*/
public native int Remote_size() throws MPIException ;
/**
* Return the remote group.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> remote group of this
* communicator </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_COMM_REMOTE_GROUP</tt>.
*/
public Group Remote_group() throws MPIException {
return new Group(remote_group());
}
private native long remote_group();
/**
* Create an inter-communicator.
* <p>
* <table>
* <tr><td><tt> high </tt></td><td> true if the local group has higher
* ranks in combined group </tr>
* <tr><td><em> returns: </em></td><td> new intra-communicator </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_INTERCOMM_MERGE</tt>.
*/
public Intracomm Merge(boolean high) throws MPIException {
return new Intracomm(merge(high)) ;
}
private native long merge(boolean high);
}

View File

@ -0,0 +1,992 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : Intracommm.java
* Author : Sang Lim, Xinying Li, Bryan Carpenter
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.14 $
* Updated : $Date: 2002/12/16 15:25:13 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
package mpi;
public class Intracomm extends Comm {
Intracomm() {}
void setType(int type) {
super.setType(type) ;
shadow = new Comm(dup()) ;
}
protected Intracomm(long handle) throws MPIException {
super(handle) ;
shadow = new Comm(dup()) ;
}
public Object clone() {
try {
return new Intracomm(dup()) ;
}
catch (MPIException e) {
throw new RuntimeException(e.getMessage()) ;
}
}
/**
* Partition the group associated with this communicator and create
* a new communicator within each subgroup.
* <p>
* <table>
* <tr><td><tt> color </tt></td><td> control of subset assignment </tr>
* <tr><td><tt> key </tt></td><td> control of rank assignment </tr>
* <tr><td><em> returns: </em></td><td> new communicator </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_COMM_SPLIT</tt>.
*/
public Intracomm Split(int colour, int key) throws MPIException {
long splitHandle = split(colour,key) ;
if(splitHandle == nullHandle)
return null ;
else
return new Intracomm(splitHandle) ;
}
private native long split(int colour, int key);
/**
* Create a new communicator.
* <p>
* <table>
* <tr><td><tt> group </tt></td><td> group which is a subset of the
* group of this communicator </tr>
* <tr><td><em> returns: </em></td><td> new communicator </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_COMM_CREATE</tt>.
*/
public Intracomm Creat(Group group) throws MPIException {
long creatHandle = creat(group) ;
if(creatHandle == nullHandle)
return null ;
else
return new Intracomm(creatHandle) ;
}
private native long creat(Group group);
// Collective Communication
/**
* A call to <tt>Barrier</tt> blocks the caller until all process
* in the group have called it.
* <p>
* Java binding of the MPI operation <tt>MPI_BARRIER</tt>.
*/
public native void Barrier() throws MPIException ;
/*
* The type signature of `incount * intype' must be equal to the type
* signature of `outcount * outtype' (ie they must represent the same
* number of basic elements of the same type).
*/
private void copyBuffer(Object inbuf,
int inoffset, int incount, Datatype intype,
Object outbuf,
int outoffset, int outcount, Datatype outtype)
throws MPIException {
if(intype.isObject()) {
Object [] inbufArray = (Object[])inbuf;
Object [] outbufArray = (Object[])outbuf;
int outbase = outoffset, inbase = inoffset ;
int kout = 0 ;
for (int j = 0 ; j < incount ; j++) {
for (int k = 0 ; k < intype.displacements.length ; k++)
outbufArray [outbase + outtype.displacements [kout]] =
inbufArray [inbase + intype.displacements [k]] ;
inbase += intype.Extent() ;
kout++;
if (kout == outtype.displacements.length){
kout = 0;
outbase += outtype.Extent() ;
}
}
}
else {
byte [] tmpbuf = new byte [Pack_size(incount, intype)] ;
Pack(inbuf, inoffset, incount, intype, tmpbuf, 0) ;
Unpack(tmpbuf, 0, outbuf, outoffset, outcount, outtype) ;
}
}
private Object newBuffer(Object template) {
if(template instanceof Object[])
return new Object [((Object[]) template).length] ;
if(template instanceof byte[])
return new byte [((byte[]) template).length] ;
if(template instanceof char[])
return new char [((char[]) template).length] ;
if(template instanceof short[])
return new short [((short[]) template).length] ;
if(template instanceof boolean[])
return new boolean [((boolean[]) template).length] ;
if(template instanceof int[])
return new int [((int[]) template).length] ;
if(template instanceof long[])
return new long [((long[]) template).length] ;
if(template instanceof float[])
return new float [((float[]) template).length] ;
if(template instanceof double[])
return new double [((double[]) template).length] ;
return null ;
}
/**
* Broadcast a message from the<