1
1

Next generation of MPI Java bindings.

Includes all MPI functions supported by Open MPI, including MPI-3
functions (as of about 2 weeks ago).  Many changes compared to the
prior generation of Java bindings; not much is left from the prior
generation, actually.  The changes include (but are not limited to):

 * Add support for more than just a subset of MPI-1 functions
 * Use typical Java case for symbol names
 * Support Java Direct buffers (giving darn-near "native C"
   performance)
 * Support "type struct" better than the prior generation
 * Make more of an effort for the Java bindings to be a thin layer
   over the back-end C bindings
 * ...and more

A proper README with more information about what is supported, how to
use these bindings, etc. will be committed shortly.

This commit was SVN r29263.
Этот коммит содержится в:
Jeff Squyres 2013-09-26 21:44:39 +00:00
родитель 6c53711ac8
Коммит e4e3e411fc
62 изменённых файлов: 15879 добавлений и 9347 удалений

Просмотреть файл

@ -12,7 +12,7 @@
if OMPI_WANT_JAVA_BINDINGS if OMPI_WANT_JAVA_BINDINGS
# Get the include files that were generated from the .java source files # Get the include files that were generated from the .java source files
AM_CPPFLAGS = -I$(top_builddir)/ompi/mpi/java/java $(ORTE_JDK_CPPFLAGS) $(LTDLINCL) AM_CPPFLAGS = -I$(top_builddir)/ompi/mpi/java/java $(ORTE_JDK_CPPFLAGS)
headers = \ headers = \
mpiJava.h mpiJava.h
@ -22,20 +22,26 @@ ompi_HEADERS = \
lib_LTLIBRARIES = libmpi_java.la lib_LTLIBRARIES = libmpi_java.la
libmpi_java_la_SOURCES = \ libmpi_java_la_SOURCES = \
mpi_Cartcomm.c \ mpi_CartComm.c \
mpi_Comm.c \ mpi_Comm.c \
mpi_Constant.c \
mpi_Datatype.c \ mpi_Datatype.c \
mpi_Errhandler.c \ mpi_Errhandler.c \
mpi_Graphcomm.c \ mpi_File.c \
mpi_GraphComm.c \
mpi_Group.c \ mpi_Group.c \
mpi_Info.c \
mpi_Intercomm.c \ mpi_Intercomm.c \
mpi_Intracomm.c \ mpi_Intracomm.c \
mpi_Message.c \
mpi_MPI.c \ mpi_MPI.c \
mpi_Op.c \ mpi_Op.c \
mpi_Request.c \ mpi_Request.c \
mpi_Status.c mpi_Prequest.c \
mpi_Status.c \
mpi_Win.c
libmpi_java_la_LIBADD = $(top_builddir)/ompi/libmpi.la $(LIBLTDL) libmpi_java_la_LIBADD = $(top_builddir)/ompi/libmpi.la
libmpi_java_la_LDFLAGS = -version-info $(libmpi_java_so_version) libmpi_java_la_LDFLAGS = -version-info $(libmpi_java_so_version)
endif endif

Просмотреть файл

@ -1,73 +1,111 @@
/* /*
* Copyright (c) 2011 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2011-2013 Cisco Systems, Inc. All rights reserved.
* *
* $COPYRIGHT$ * $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
* *
* Additional copyrights may follow
*
* $HEADER$
*/ */
#ifndef _MPIJAVA_H_
#define _MPIJAVA_H_
#include "mpi.h" #include "mpi.h"
typedef struct { typedef struct {
jfieldID CommhandleID; jfieldID CommHandle;
jfieldID ErrhandleID; jfieldID ErrHandle;
jfieldID GrouphandleID; jfieldID GroupHandle;
jfieldID DatatypehandleID; jclass CartParmsClass;
jfieldID DatatypebaseTypeID; jmethodID CartParmsInit;
jfieldID DatatypebaseSizeID; jclass ShiftParmsClass;
jfieldID OphandleID; jmethodID ShiftParmsInit;
jfieldID stathandleID; jclass GraphParmsClass;
jfieldID sourceID; jmethodID GraphParmsInit;
jfieldID tagID; jclass DistGraphNeighborsClass;
jfieldID indexID; jmethodID DistGraphNeighborsInit;
jfieldID elementsID; jfieldID DatatypeHandle;
jfieldID reqhandleID; jfieldID DatatypeBaseType;
jfieldID opTagID; jfieldID DatatypeBaseSize;
jfieldID bufSaveID; jfieldID MessageHandle;
jfieldID countSaveID; jfieldID OpHandle;
jfieldID offsetSaveID; jfieldID OpCommute;
jfieldID baseTypeSaveID; jmethodID OpCall;
jfieldID bufbaseSaveID; jclass StatusClass;
jfieldID bufptrSaveID; jmethodID StatusInit;
jfieldID commSaveID; jfieldID StSource;
jfieldID typeSaveID; jfieldID StTag;
int *dt_sizes; jfieldID StError;
jfieldID St_cancelled;
jfieldID St_ucount;
jfieldID StIndex;
jfieldID StElements;
jfieldID StUsingBuffer;
jfieldID ReqHandle;
jclass ExceptionClass;
jmethodID ExceptionInit;
jclass IntegerClass;
jmethodID IntegerValueOf;
jclass LongClass;
jmethodID LongValueOf;
jclass ByteBufferClass;
jmethodID ByteBufferArray;
jmethodID ByteBufferArrayOffset;
int dtSizes[12];
} ompi_java_globals_t; } ompi_java_globals_t;
extern ompi_java_globals_t ompi_java; extern ompi_java_globals_t ompi_java;
void ompi_java_clearFreeList(JNIEnv*); void ompi_java_init_native_Datatype(JNIEnv *env);
void ompi_java_init_native_Datatype(void); void* ompi_java_getBufPtr(
void** bufBase, JNIEnv *env, jobject buf, int baseType, int offset);
void* ompi_java_getBufPtr(void** bufbase, void ompi_java_releaseBufPtr(
JNIEnv *env, jobject buf, JNIEnv *env, jobject buf, void* bufBase, int baseType);
int baseType, int offset);
void ompi_java_releaseBufPtr(JNIEnv *env, jobject buf, void ompi_java_releaseReadBufPtr(
void* bufbase, int baseType); JNIEnv *env, jobject buf, void *bufBase, int baseType);
void* ompi_java_getMPIWriteBuf(int* bsize, int count, void ompi_java_setStaticLongField(JNIEnv *env, jclass c,
MPI_Datatype type, MPI_Comm comm); char *field, jlong value);
#ifndef GC_DOES_PINNING void ompi_java_setIntField(JNIEnv *env, jclass c, jobject obj,
char *field, jint value);
void* ompi_java_getMPIBuf(int* size, JNIEnv *env, jobject buf, int offset, void ompi_java_findClasses(JNIEnv *env);
int count, MPI_Datatype type, MPI_Comm comm, jclass ompi_java_findClass(JNIEnv *env, const char *className);
int baseType); void ompi_java_deleteClasses(JNIEnv *env);
void ompi_java_releaseMPIBuf(JNIEnv *env, jobject buf, int offset, jobject ompi_java_Integer_valueOf(JNIEnv *env, jint i);
int count, MPI_Datatype type, MPI_Comm comm, jobject ompi_java_Long_valueOf(JNIEnv *env, jlong i);
void* bufptr, int size, int baseType);
void ompi_java_releaseMPIRecvBuf(int* elements, JNIEnv *env, jobject buf, int offset, void ompi_java_getIntArray(
int count, MPI_Datatype type, MPI_Comm comm, JNIEnv *env, jintArray array, jint **jptr, int **cptr);
void* bufptr, MPI_Status* status, void ompi_java_releaseIntArray(
int baseType); JNIEnv *env, jintArray array, jint *jptr, int *cptr);
void ompi_java_forgetIntArray(
JNIEnv *env, jintArray array, jint *jptr, int *cptr);
void ompi_java_releaseMPIReadBuf(void* bufptr); void ompi_java_getBooleanArray(
JNIEnv *env, jbooleanArray array, jboolean **jptr, int **cptr);
void ompi_java_releaseBooleanArray(
JNIEnv *env, jbooleanArray array, jboolean *jptr, int *cptr);
void ompi_java_forgetBooleanArray(
JNIEnv *env, jbooleanArray array, jboolean *jptr, int *cptr);
#endif /* GC_DOES_PINNING */ jboolean ompi_java_exceptionCheck(JNIEnv *env, int rc);
void* ompi_java_attrSet(JNIEnv *env, jbyteArray jval);
jbyteArray ompi_java_attrGet(JNIEnv *env, void *cval);
int ompi_java_attrCopy(void *attrValIn, void *attrValOut, int *flag);
int ompi_java_attrDelete(void *attrVal);
MPI_Op ompi_java_op_getHandle(JNIEnv *env, jobject jthis, int baseType);
void ompi_java_status_get(MPI_Status *status, JNIEnv *env, jobject obj);
void ompi_java_status_set(MPI_Status *status, JNIEnv *env, jobject obj);
jobject ompi_java_status_new(MPI_Status *status, JNIEnv *env);
#endif /* _MPIJAVA_H_ */

172
ompi/mpi/java/c/mpi_CartComm.c Обычный файл
Просмотреть файл

@ -0,0 +1,172 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : mpi_CartComm.c
* Headerfile : mpi_CartComm.h
* Author : Sung-Hoon Ko, Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.6 $
* Updated : $Date: 2003/01/16 16:39:34 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
#include "ompi_config.h"
#include <stdlib.h>
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi.h"
#include "mpi_CartComm.h"
#include "mpiJava.h"
JNIEXPORT void JNICALL Java_mpi_CartComm_init(JNIEnv *env, jclass clazz)
{
ompi_java.CartParmsInit = (*env)->GetMethodID(env,
ompi_java.CartParmsClass, "<init>", "([I[Z[I)V");
ompi_java.ShiftParmsInit = (*env)->GetMethodID(env,
ompi_java.ShiftParmsClass, "<init>", "(II)V");
}
JNIEXPORT jobject JNICALL Java_mpi_CartComm_getTopo(
JNIEnv *env, jobject jthis, jlong comm)
{
int maxdims;
int rc = MPI_Cartdim_get((MPI_Comm)comm, &maxdims);
if(ompi_java_exceptionCheck(env, rc))
return NULL;
jintArray dims = (*env)->NewIntArray(env, maxdims);
jbooleanArray periods = (*env)->NewBooleanArray(env, maxdims);
jintArray coords = (*env)->NewIntArray(env, maxdims);
if(maxdims != 0)
{
jint *jDims, *jCoords;
jboolean *jPeriods;
int *cDims, *cCoords, *cPeriods;
ompi_java_getIntArray(env, dims, &jDims, &cDims);
ompi_java_getIntArray(env, coords, &jCoords, &cCoords);
ompi_java_getBooleanArray(env, periods, &jPeriods, &cPeriods);
rc = MPI_Cart_get((MPI_Comm)comm, maxdims, cDims, cPeriods, cCoords);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseIntArray(env, dims, jDims, cDims);
ompi_java_releaseIntArray(env, coords, jCoords, cCoords);
ompi_java_releaseBooleanArray(env, periods, jPeriods, cPeriods);
}
return (*env)->NewObject(env, ompi_java.CartParmsClass,
ompi_java.CartParmsInit, dims, periods, coords);
}
JNIEXPORT jobject JNICALL Java_mpi_CartComm_shift(
JNIEnv *env, jobject jthis, jlong comm, jint direction, jint disp)
{
int sr, dr;
int rc = MPI_Cart_shift((MPI_Comm)comm, direction, disp, &sr, &dr);
ompi_java_exceptionCheck(env, rc);
return (*env)->NewObject(env, ompi_java.ShiftParmsClass,
ompi_java.ShiftParmsInit, sr, dr);
}
JNIEXPORT jintArray JNICALL Java_mpi_CartComm_getCoords(
JNIEnv *env, jobject jthis, jlong comm, jint rank)
{
int maxdims;
int rc = MPI_Cartdim_get((MPI_Comm)comm, &maxdims);
if(ompi_java_exceptionCheck(env, rc))
return NULL;
jintArray coords = (*env)->NewIntArray(env, maxdims);
jint *jCoords;
int *cCoords;
ompi_java_getIntArray(env, coords, &jCoords, &cCoords);
rc = MPI_Cart_coords((MPI_Comm)comm, rank, maxdims, cCoords);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseIntArray(env, coords, jCoords, cCoords);
return coords;
}
JNIEXPORT jint JNICALL Java_mpi_CartComm_map(
JNIEnv *env, jobject jthis, jlong comm,
jintArray dims, jbooleanArray periods)
{
int nDims = (*env)->GetArrayLength(env, dims);
jint *jDims;
jboolean *jPeriods;
int *cDims, *cPeriods;
ompi_java_getIntArray(env, dims, &jDims, &cDims);
ompi_java_getBooleanArray(env, periods, &jPeriods, &cPeriods);
int newrank;
int rc = MPI_Cart_map((MPI_Comm)comm, nDims, cDims, cPeriods, &newrank);
ompi_java_exceptionCheck(env, rc);
ompi_java_forgetIntArray(env, dims, jDims, cDims);
ompi_java_forgetBooleanArray(env, periods, jPeriods, cPeriods);
return newrank;
}
JNIEXPORT jint JNICALL Java_mpi_CartComm_getRank(
JNIEnv *env, jobject jthis, jlong comm, jintArray coords)
{
jint *jCoords;
int *cCoords;
ompi_java_getIntArray(env, coords, &jCoords, &cCoords);
int rank;
int rc = MPI_Cart_rank((MPI_Comm)comm, cCoords, &rank);
ompi_java_exceptionCheck(env, rc);
ompi_java_forgetIntArray(env, coords, jCoords, cCoords);
return rank;
}
JNIEXPORT jlong JNICALL Java_mpi_CartComm_sub(
JNIEnv *env, jobject jthis, jlong comm, jbooleanArray remainDims)
{
jboolean *jRemainDims;
int *cRemainDims;
ompi_java_getBooleanArray(env, remainDims, &jRemainDims, &cRemainDims);
MPI_Comm newcomm;
int rc = MPI_Cart_sub((MPI_Comm)comm, cRemainDims, &newcomm);
ompi_java_exceptionCheck(env, rc);
ompi_java_forgetBooleanArray(env, remainDims, jRemainDims, cRemainDims);
return (jlong)newcomm;
}
JNIEXPORT void JNICALL Java_mpi_CartComm_createDims_1jni(
JNIEnv *env, jclass jthis, jint nNodes, jintArray dims)
{
int nDims = (*env)->GetArrayLength(env, dims);
jint *jDims;
int *cDims;
ompi_java_getIntArray(env, dims, &jDims, &cDims);
int rc = MPI_Dims_create(nNodes, nDims, cDims);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseIntArray(env, dims, jDims, cDims);
}

Просмотреть файл

@ -1,257 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : mpi_Cartcomm.c
* Headerfile : mpi_Cartcomm.h
* Author : Sung-Hoon Ko, Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.6 $
* Updated : $Date: 2003/01/16 16:39:34 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
#include "ompi_config.h"
#include <stdlib.h>
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi.h"
#include "mpi_Cartcomm.h"
#include "mpiJava.h"
/*
* Class: mpi_Cartcomm
* Method: Get
* Signature: (I)Lmpi/CartParms;
*/
JNIEXPORT jobject JNICALL Java_mpi_Cartcomm_Get(JNIEnv *env, jobject jthis)
{
jintArray dims, coords;
jbooleanArray periods;
jint *ds, *cs;
jboolean *ps;
int *ips ;
jboolean isCopy1=JNI_TRUE, isCopy2=JNI_TRUE ,isCopy3=JNI_TRUE;
int maxdims;
int i ;
jclass cartparms_class=(*env)->FindClass(env,"mpi/CartParms");
jfieldID dimsID,periodsID,coordsID;
jmethodID handleConstructorID =
(*env)->GetMethodID(env, cartparms_class, "<init>", "()V");
jobject cartparms =
(*env)->NewObject(env,cartparms_class, handleConstructorID);
ompi_java_clearFreeList(env) ;
MPI_Cartdim_get((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),&maxdims);
dims=(*env)->NewIntArray(env,maxdims);
periods=(*env)->NewBooleanArray(env,maxdims);
coords=(*env)->NewIntArray(env,maxdims);
ips = (int*) malloc(sizeof(int) * maxdims) ;
ds=(*env)->GetIntArrayElements(env,dims,&isCopy1);
cs=(*env)->GetIntArrayElements(env,coords,&isCopy3);
MPI_Cart_get((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
maxdims, (int*)ds, ips, (int*)cs);
ps=(*env)->GetBooleanArrayElements(env,periods,&isCopy2);
for (i = 0 ; i < maxdims ; i++) {
ps [i] = ips [i] ? JNI_TRUE : JNI_FALSE ;
}
dimsID=(*env)->GetFieldID(env,cartparms_class,"dims","[I");
periodsID=(*env)->GetFieldID(env,cartparms_class,"periods","[Z");
coordsID=(*env)->GetFieldID(env,cartparms_class , "coords", "[I");
(*env)->SetObjectField(env, cartparms, dimsID, dims);
(*env)->SetObjectField(env, cartparms, periodsID, periods);
(*env)->SetObjectField(env, cartparms, coordsID, coords);
(*env)->ReleaseIntArrayElements(env,dims,ds,0);
(*env)->ReleaseBooleanArrayElements(env,periods,ps,0);
(*env)->ReleaseIntArrayElements(env,coords,cs,0);
return cartparms;
}
/*
* Class: mpi_Cartcomm
* Method: Shift
* Signature: (II)Lmpi/ShiftParms;
*/
JNIEXPORT jobject JNICALL Java_mpi_Cartcomm_Shift(JNIEnv *env, jobject jthis,
jint direction, jint disp)
{
int sr, dr;
jclass shiftparms_class=(*env)->FindClass(env,"mpi/ShiftParms");
jfieldID rsID,rdID;
jmethodID handleConstructorID = (*env)->GetMethodID(env,
shiftparms_class, "<init>", "()V");
jobject shiftparms=(*env)->NewObject(env,shiftparms_class,
handleConstructorID);
ompi_java_clearFreeList(env) ;
MPI_Cart_shift((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
direction, disp, &sr, &dr);
rsID=(*env)->GetFieldID(env,shiftparms_class,"rank_source","I");
rdID=(*env)->GetFieldID(env,shiftparms_class,"rank_dest", "I");
(*env)->SetIntField(env, shiftparms, rsID, sr);
(*env)->SetIntField(env, shiftparms, rdID, dr);
/* printf("Shift finished.\n"); */
return shiftparms;
}
/*
* Class: mpi_Cartcomm
* Method: Coords
* Signature: (I)[I
*/
JNIEXPORT jintArray JNICALL Java_mpi_Cartcomm_Coords(JNIEnv *env, jobject jthis, jint rank)
{
jint *coords;
jboolean isCopy=JNI_TRUE;
jintArray jcoords;
int maxdims;
/*
jclass jthis_class=(*env)->FindClass(env,"mpi/Cartcomm");
jfieldID maxdimsID=(*env)->GetFieldID(env,jthis_class,"maxdims","I");
maxdims=(*env)->GetIntField(env,jthis, maxdimsID);
*/
ompi_java_clearFreeList(env) ;
MPI_Cartdim_get((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
&maxdims);
jcoords=(*env)->NewIntArray(env,maxdims);
coords=(*env)->GetIntArrayElements(env,jcoords,&isCopy);
MPI_Cart_coords((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
rank,maxdims,(int*)coords);
(*env)->ReleaseIntArrayElements(env,jcoords,coords,0);
return jcoords;
}
/*
* Class: mpi_Cartcomm
* Method: Map
* Signature: ([I[Z)I
*/
JNIEXPORT jint JNICALL Java_mpi_Cartcomm_Map(JNIEnv *env, jobject jthis,
jintArray dims, jbooleanArray periods)
{
int newrank;
jint *ds;
jboolean *ps;
jboolean isCopy=JNI_TRUE;
int ndims;
int *int_re_ds=(int*)calloc((*env)->GetArrayLength(env,periods), sizeof(int));
int i;
ompi_java_clearFreeList(env) ;
ndims=(*env)->GetArrayLength(env,dims);
ds=(*env)->GetIntArrayElements(env,dims,&isCopy);
ps=(*env)->GetBooleanArrayElements(env,periods,&isCopy);
for (i=0;i<=(*env)->GetArrayLength(env,periods);i++)
if(ps[i]==JNI_TRUE)
int_re_ds[i]=1;
else
int_re_ds[i]=0;
MPI_Cart_map((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
ndims,(int*)ds,int_re_ds, &newrank);
(*env)->ReleaseIntArrayElements(env,dims,ds,0);
(*env)->ReleaseBooleanArrayElements(env,periods,ps,0);
free(int_re_ds);
return newrank;
}
/*
* Class: mpi_Cartcomm
* Method: Rank
* Signature: ([I)I
*/
JNIEXPORT jint JNICALL Java_mpi_Cartcomm_Rank(JNIEnv *env, jobject jthis, jintArray coords)
{
int rank;
jint *crds;
jboolean isCopy=JNI_TRUE;
ompi_java_clearFreeList(env) ;
crds=(*env)->GetIntArrayElements(env,coords,&isCopy);
MPI_Cart_rank((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
(int*)crds, &rank);
(*env)->ReleaseIntArrayElements(env,coords,crds,0);
return rank;
}
/*
* Class: mpi_Cartcomm
* Method: Sub
* Signature: ([Z)Lmpi/Cartcomm;
*/
JNIEXPORT jlong JNICALL Java_mpi_Cartcomm_sub(JNIEnv *env, jobject jthis,
jbooleanArray remain_dims)
{
MPI_Comm newcomm;
jboolean *re_ds;
jboolean isCopy=JNI_TRUE;
int *int_re_ds=(int*)calloc((*env)->GetArrayLength(env,remain_dims), sizeof(int));
int i;
ompi_java_clearFreeList(env) ;
re_ds=(*env)->GetBooleanArrayElements(env,remain_dims,&isCopy);
for(i=0;i<=(*env)->GetArrayLength(env,remain_dims);i++)
if(re_ds[i]==JNI_TRUE)
int_re_ds[i]=1;
else
int_re_ds[i]=0;
MPI_Cart_sub((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
int_re_ds, &newcomm);
(*env)->ReleaseBooleanArrayElements(env,remain_dims,re_ds,0);
free(int_re_ds);
return (jlong)newcomm;
}
/*
* Class: mpi_Cartcomm
* Method: Dims_create
* Signature: (I[I)V
*/
JNIEXPORT void JNICALL Java_mpi_Cartcomm_Dims_1create(JNIEnv *env, jclass jthis,
jint nnodes, jintArray dims )
{
jint *cdims;
jboolean isCopy=JNI_TRUE;
int ndims = (*env)->GetArrayLength(env,dims) ;
ompi_java_clearFreeList(env) ;
cdims=(*env)->GetIntArrayElements(env,dims,&isCopy);
MPI_Dims_create(nnodes,ndims,(int*)cdims);
(*env)->ReleaseIntArrayElements(env,dims,cdims,0);
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

175
ompi/mpi/java/c/mpi_Constant.c Обычный файл
Просмотреть файл

@ -0,0 +1,175 @@
/*
* Copyright (c) 2013 Cisco Systems, Inc. All rights reserved.
*
* $COPYRIGHT$
*
* Additional copyrights may follow.
*/
#include "ompi_config.h"
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi_Constant.h"
#include "mpiJava.h"
void ompi_java_setStaticLongField(JNIEnv *env, jclass c,
char *field, jlong value)
{
jfieldID id = (*env)->GetStaticFieldID(env, c, field, "J");
(*env)->SetStaticLongField(env, c, id, value);
}
void ompi_java_setIntField(JNIEnv *env, jclass c, jobject obj,
char *field, jint value)
{
jfieldID id = (*env)->GetFieldID(env, c, field, "I");
(*env)->SetIntField(env, obj, id, value);
}
/*
* Class: mpi_Constant
* Method: setConstant
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Constant_setConstant(JNIEnv *env, jobject obj)
{
jclass c = (*env)->GetObjectClass(env, obj);
ompi_java_setIntField(env, c, obj, "THREAD_SINGLE", MPI_THREAD_SINGLE);
ompi_java_setIntField(env, c, obj, "THREAD_FUNNELED", MPI_THREAD_FUNNELED);
ompi_java_setIntField(env, c, obj, "THREAD_SERIALIZED", MPI_THREAD_SERIALIZED);
ompi_java_setIntField(env, c, obj, "THREAD_MULTIPLE", MPI_THREAD_MULTIPLE);
ompi_java_setIntField(env, c, obj, "ANY_SOURCE", MPI_ANY_SOURCE);
ompi_java_setIntField(env, c, obj, "ANY_TAG", MPI_ANY_TAG);
ompi_java_setIntField(env, c, obj, "PROC_NULL", MPI_PROC_NULL);
ompi_java_setIntField(env, c, obj, "GRAPH", MPI_GRAPH);
ompi_java_setIntField(env, c, obj, "DIST_GRAPH", MPI_DIST_GRAPH);
ompi_java_setIntField(env, c, obj, "CART", MPI_CART);
ompi_java_setIntField(env, c, obj, "UNDEFINED", MPI_UNDEFINED);
ompi_java_setIntField(env, c, obj, "IDENT", MPI_IDENT);
ompi_java_setIntField(env, c, obj, "CONGRUENT", MPI_CONGRUENT);
ompi_java_setIntField(env, c, obj, "SIMILAR", MPI_SIMILAR);
ompi_java_setIntField(env, c, obj, "UNEQUAL", MPI_UNEQUAL);
ompi_java_setIntField(env, c, obj, "TAG_UB", MPI_TAG_UB);
ompi_java_setIntField(env, c, obj, "HOST", MPI_HOST);
ompi_java_setIntField(env, c, obj, "IO", MPI_IO);
ompi_java_setIntField(env, c, obj, "WTIME_IS_GLOBAL", MPI_WTIME_IS_GLOBAL);
ompi_java_setIntField(env, c, obj, "APPNUM", MPI_APPNUM);
ompi_java_setIntField(env, c, obj, "LASTUSEDCODE", MPI_LASTUSEDCODE);
ompi_java_setIntField(env, c, obj, "UNIVERSE_SIZE", MPI_UNIVERSE_SIZE);
ompi_java_setIntField(env, c, obj, "WIN_BASE", MPI_WIN_BASE);
ompi_java_setIntField(env, c, obj, "WIN_SIZE", MPI_WIN_SIZE);
ompi_java_setIntField(env, c, obj, "WIN_DISP_UNIT", MPI_WIN_DISP_UNIT);
ompi_java_setIntField(env, c, obj, "VERSION", MPI_VERSION);
ompi_java_setIntField(env, c, obj, "SUBVERSION", MPI_SUBVERSION);
ompi_java_setIntField(env, c, obj, "ROOT", MPI_ROOT);
ompi_java_setIntField(env, c, obj, "KEYVAL_INVALID", MPI_KEYVAL_INVALID);
#ifdef GC_DOES_PINNING
ompi_java_setIntField(env, c, obj, "BSEND_OVERHEAD", MPI_BSEND_OVERHEAD);
#else
ompi_java_setIntField(env, c, obj, "BSEND_OVERHEAD",
MPI_BSEND_OVERHEAD + sizeof(int));
#endif /* GC_DOES_PINNING */
ompi_java_setIntField(env, c, obj, "MAX_OBJECT_NAME", MPI_MAX_OBJECT_NAME);
ompi_java_setIntField(env, c, obj, "MAX_PORT_NAME", MPI_MAX_PORT_NAME);
ompi_java_setIntField(env, c, obj, "MAX_DATAREP_STRING", MPI_MAX_DATAREP_STRING);
ompi_java_setIntField(env, c, obj, "MAX_INFO_KEY", MPI_MAX_INFO_KEY);
ompi_java_setIntField(env, c, obj, "MAX_INFO_VAL", MPI_MAX_INFO_VAL);
ompi_java_setIntField(env, c, obj, "ORDER_C", MPI_ORDER_C);
ompi_java_setIntField(env, c, obj, "ORDER_FORTRAN", MPI_ORDER_FORTRAN);
ompi_java_setIntField(env, c, obj, "DISTRIBUTE_BLOCK", MPI_DISTRIBUTE_BLOCK);
ompi_java_setIntField(env, c, obj, "DISTRIBUTE_CYCLIC", MPI_DISTRIBUTE_CYCLIC);
ompi_java_setIntField(env, c, obj, "DISTRIBUTE_NONE", MPI_DISTRIBUTE_NONE);
ompi_java_setIntField(env, c, obj, "DISTRIBUTE_DFLT_DARG", MPI_DISTRIBUTE_DFLT_DARG);
ompi_java_setIntField(env, c, obj, "MODE_CREATE", MPI_MODE_CREATE);
ompi_java_setIntField(env, c, obj, "MODE_RDONLY", MPI_MODE_RDONLY);
ompi_java_setIntField(env, c, obj, "MODE_WRONLY", MPI_MODE_WRONLY);
ompi_java_setIntField(env, c, obj, "MODE_RDWR", MPI_MODE_RDWR);
ompi_java_setIntField(env, c, obj, "MODE_DELETE_ON_CLOSE", MPI_MODE_DELETE_ON_CLOSE);
ompi_java_setIntField(env, c, obj, "MODE_UNIQUE_OPEN", MPI_MODE_UNIQUE_OPEN);
ompi_java_setIntField(env, c, obj, "MODE_EXCL", MPI_MODE_EXCL);
ompi_java_setIntField(env, c, obj, "MODE_APPEND", MPI_MODE_APPEND);
ompi_java_setIntField(env, c, obj, "MODE_SEQUENTIAL", MPI_MODE_SEQUENTIAL);
ompi_java_setIntField(env, c, obj, "DISPLACEMENT_CURRENT", MPI_DISPLACEMENT_CURRENT);
ompi_java_setIntField(env, c, obj, "SEEK_SET", MPI_SEEK_SET);
ompi_java_setIntField(env, c, obj, "SEEK_CUR", MPI_SEEK_CUR);
ompi_java_setIntField(env, c, obj, "SEEK_END", MPI_SEEK_END);
ompi_java_setIntField(env, c, obj, "MODE_NOCHECK", MPI_MODE_NOCHECK);
ompi_java_setIntField(env, c, obj, "MODE_NOPRECEDE", MPI_MODE_NOPRECEDE);
ompi_java_setIntField(env, c, obj, "MODE_NOPUT", MPI_MODE_NOPUT);
ompi_java_setIntField(env, c, obj, "MODE_NOSTORE", MPI_MODE_NOSTORE);
ompi_java_setIntField(env, c, obj, "MODE_NOSUCCEED", MPI_MODE_NOSUCCEED);
ompi_java_setIntField(env, c, obj, "LOCK_EXCLUSIVE", MPI_LOCK_EXCLUSIVE);
ompi_java_setIntField(env, c, obj, "LOCK_SHARED", MPI_LOCK_SHARED);
// Error classes and codes
ompi_java_setIntField(env, c, obj, "SUCCESS", MPI_SUCCESS);
ompi_java_setIntField(env, c, obj, "ERR_BUFFER", MPI_ERR_BUFFER);
ompi_java_setIntField(env, c, obj, "ERR_COUNT", MPI_ERR_COUNT);
ompi_java_setIntField(env, c, obj, "ERR_TYPE", MPI_ERR_TYPE);
ompi_java_setIntField(env, c, obj, "ERR_TAG", MPI_ERR_TAG);
ompi_java_setIntField(env, c, obj, "ERR_COMM", MPI_ERR_COMM);
ompi_java_setIntField(env, c, obj, "ERR_RANK", MPI_ERR_RANK);
ompi_java_setIntField(env, c, obj, "ERR_REQUEST", MPI_ERR_REQUEST);
ompi_java_setIntField(env, c, obj, "ERR_ROOT", MPI_ERR_ROOT);
ompi_java_setIntField(env, c, obj, "ERR_GROUP", MPI_ERR_GROUP);
ompi_java_setIntField(env, c, obj, "ERR_OP", MPI_ERR_OP);
ompi_java_setIntField(env, c, obj, "ERR_TOPOLOGY", MPI_ERR_TOPOLOGY);
ompi_java_setIntField(env, c, obj, "ERR_DIMS", MPI_ERR_DIMS);
ompi_java_setIntField(env, c, obj, "ERR_ARG", MPI_ERR_ARG);
ompi_java_setIntField(env, c, obj, "ERR_UNKNOWN", MPI_ERR_UNKNOWN);
ompi_java_setIntField(env, c, obj, "ERR_TRUNCATE", MPI_ERR_TRUNCATE);
ompi_java_setIntField(env, c, obj, "ERR_OTHER", MPI_ERR_OTHER);
ompi_java_setIntField(env, c, obj, "ERR_INTERN", MPI_ERR_INTERN);
ompi_java_setIntField(env, c, obj, "ERR_IN_STATUS", MPI_ERR_IN_STATUS);
ompi_java_setIntField(env, c, obj, "ERR_PENDING", MPI_ERR_PENDING);
ompi_java_setIntField(env, c, obj, "ERR_ACCESS", MPI_ERR_ACCESS);
ompi_java_setIntField(env, c, obj, "ERR_AMODE", MPI_ERR_AMODE);
ompi_java_setIntField(env, c, obj, "ERR_ASSERT", MPI_ERR_ASSERT);
ompi_java_setIntField(env, c, obj, "ERR_BAD_FILE", MPI_ERR_BAD_FILE);
ompi_java_setIntField(env, c, obj, "ERR_BASE", MPI_ERR_BASE);
ompi_java_setIntField(env, c, obj, "ERR_CONVERSION", MPI_ERR_CONVERSION);
ompi_java_setIntField(env, c, obj, "ERR_DISP", MPI_ERR_DISP);
ompi_java_setIntField(env, c, obj, "ERR_DUP_DATAREP", MPI_ERR_DUP_DATAREP);
ompi_java_setIntField(env, c, obj, "ERR_FILE_EXISTS", MPI_ERR_FILE_EXISTS);
ompi_java_setIntField(env, c, obj, "ERR_FILE_IN_USE", MPI_ERR_FILE_IN_USE);
ompi_java_setIntField(env, c, obj, "ERR_FILE", MPI_ERR_FILE);
ompi_java_setIntField(env, c, obj, "ERR_INFO_KEY", MPI_ERR_INFO_KEY);
ompi_java_setIntField(env, c, obj, "ERR_INFO_NOKEY", MPI_ERR_INFO_NOKEY);
ompi_java_setIntField(env, c, obj, "ERR_INFO_VALUE", MPI_ERR_INFO_VALUE);
ompi_java_setIntField(env, c, obj, "ERR_INFO", MPI_ERR_INFO);
ompi_java_setIntField(env, c, obj, "ERR_IO", MPI_ERR_IO);
ompi_java_setIntField(env, c, obj, "ERR_KEYVAL", MPI_ERR_KEYVAL);
ompi_java_setIntField(env, c, obj, "ERR_LOCKTYPE", MPI_ERR_LOCKTYPE);
ompi_java_setIntField(env, c, obj, "ERR_NAME", MPI_ERR_NAME);
ompi_java_setIntField(env, c, obj, "ERR_NO_MEM", MPI_ERR_NO_MEM);
ompi_java_setIntField(env, c, obj, "ERR_NOT_SAME", MPI_ERR_NOT_SAME);
ompi_java_setIntField(env, c, obj, "ERR_NO_SPACE", MPI_ERR_NO_SPACE);
ompi_java_setIntField(env, c, obj, "ERR_NO_SUCH_FILE", MPI_ERR_NO_SUCH_FILE);
ompi_java_setIntField(env, c, obj, "ERR_PORT", MPI_ERR_PORT);
ompi_java_setIntField(env, c, obj, "ERR_QUOTA", MPI_ERR_QUOTA);
ompi_java_setIntField(env, c, obj, "ERR_READ_ONLY", MPI_ERR_READ_ONLY);
ompi_java_setIntField(env, c, obj, "ERR_RMA_CONFLICT", MPI_ERR_RMA_CONFLICT);
ompi_java_setIntField(env, c, obj, "ERR_RMA_SYNC", MPI_ERR_RMA_SYNC);
ompi_java_setIntField(env, c, obj, "ERR_SERVICE", MPI_ERR_SERVICE);
ompi_java_setIntField(env, c, obj, "ERR_SIZE", MPI_ERR_SIZE);
ompi_java_setIntField(env, c, obj, "ERR_SPAWN", MPI_ERR_SPAWN);
ompi_java_setIntField(env, c, obj, "ERR_UNSUPPORTED_DATAREP",
MPI_ERR_UNSUPPORTED_DATAREP);
ompi_java_setIntField(env, c, obj, "ERR_UNSUPPORTED_OPERATION",
MPI_ERR_UNSUPPORTED_OPERATION);
ompi_java_setIntField(env, c, obj, "ERR_WIN", MPI_ERR_WIN);
ompi_java_setIntField(env, c, obj, "ERR_LASTCODE", MPI_ERR_LASTCODE);
ompi_java_setIntField(env, c, obj, "ERR_SYSRESOURCE", MPI_ERR_SYSRESOURCE);
}

Просмотреть файл

@ -13,7 +13,7 @@
*/ */
/* /*
* File : mpi_Datatype.c * File : mpi_Datatype.c
* Headerfile : mpi_Datatype.h * Headerfile : mpi_Datatype.h
* Author : Sung-Hoon Ko, Xinying Li, Sang Lim, Bryan Carpenter * Author : Sung-Hoon Ko, Xinying Li, Sang Lim, Bryan Carpenter
* Created : Thu Apr 9 12:22:15 1998 * Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.10 $ * Revision : $Revision: 1.10 $
@ -32,36 +32,6 @@
#include "mpi_Datatype.h" #include "mpi_Datatype.h"
#include "mpiJava.h" #include "mpiJava.h"
/*
* public class Datatype {
* private final static int UNDEFINED = -1;
* public final static int NULL = 0;
* public final static int BYTE = 1;
* public final static int CHAR = 2;
*
* public final static int SHORT = 3;
* public final static int BOOLEAN = 4;
* public final static int INT = 5;
*
* public final static int LONG = 6;
* public final static int FLOAT = 7;
* public final static int DOUBLE = 8;
*
* public final static int PACKED = 9;
* public final static int LB =10;
* public final static int UB =11;
*
* public final static int OBJECT =12;
*
* ...
* }
*
* Per
* http://docs.oracle.com/javase/tutorial/java/nutsandbolts/datatypes.html,
* the sizes of Java types are fixed. So we just assign them to their
* corresponding MPI fixed-size datatypes.
*/
MPI_Datatype Dts[] = { MPI_DATATYPE_NULL, /* NULL */ MPI_Datatype Dts[] = { MPI_DATATYPE_NULL, /* NULL */
MPI_UINT8_T, /* BYTE */ MPI_UINT8_T, /* BYTE */
MPI_UINT16_T, /* CHAR */ MPI_UINT16_T, /* CHAR */
@ -73,336 +43,315 @@ MPI_Datatype Dts[] = { MPI_DATATYPE_NULL, /* NULL */
MPI_FLOAT, /* FLOAT (let's hope it's the same!) */ MPI_FLOAT, /* FLOAT (let's hope it's the same!) */
MPI_DOUBLE, /* DOUBLE (let's hoe it's the same!) */ MPI_DOUBLE, /* DOUBLE (let's hoe it's the same!) */
MPI_PACKED, /* PACKED */ MPI_PACKED, /* PACKED */
MPI_LB, /* LB */ MPI_2INT,
MPI_UB, /* UB */ MPI_SHORT_INT,
MPI_BYTE /* JMS This one needs to go... */ MPI_LONG_INT,
MPI_FLOAT_INT,
MPI_DOUBLE_INT,
MPI_C_FLOAT_COMPLEX,
MPI_C_DOUBLE_COMPLEX
}; };
void ompi_java_init_native_Datatype(void) void ompi_java_init_native_Datatype(JNIEnv *env)
{ {
/* Initialization that can only be done after MPI_Init() has /* Initialization that can only be done after MPI_Init() has
* been called. Called from `mpi_MPI.c'. * been called. Called from `mpi_MPI.c'.
*/ */
int i ; int i, rc;
ompi_java.dtSizes[0] = 0;
ompi_java.dt_sizes = (int*) malloc(13 * sizeof(int)) ; for(i = 1; i < 12; i++)
for (i = 1 ; i < 13 ; i++) { {
MPI_Type_size(Dts[i], &(ompi_java.dt_sizes[i])) ; rc = MPI_Type_size(Dts[i], &(ompi_java.dtSizes[i]));
if(ompi_java_exceptionCheck(env, rc))
return;
} }
} }
/* JNIEXPORT void JNICALL Java_mpi_Datatype_init(JNIEnv *e, jclass clazz)
* Class: mpi_Datatype
* Method: init
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_init(JNIEnv *env, jclass thisClass)
{ {
ompi_java.DatatypehandleID = (*env)->GetFieldID(env,thisClass,"handle","J"); ompi_java.DatatypeHandle = (*e)->GetFieldID(e, clazz, "handle", "J");
ompi_java.DatatypebaseTypeID = (*env)->GetFieldID(env,thisClass,"baseType","I"); ompi_java.DatatypeBaseType = (*e)->GetFieldID(e, clazz, "baseType", "I");
ompi_java.DatatypebaseSizeID = (*env)->GetFieldID(env,thisClass,"baseSize","I"); ompi_java.DatatypeBaseSize = (*e)->GetFieldID(e, clazz, "baseSize", "I");
} }
/* JNIEXPORT jlong JNICALL Java_mpi_Datatype_getDatatype(
* Class: mpi_Datatype JNIEnv *e, jobject jthis, jint type)
* Method: GetDatatype
* Signature: (I)J
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_GetDatatype(JNIEnv *env, jobject jthis, jint type)
{ {
(*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)Dts[type]); return (jlong)Dts[type];
} }
/* JNIEXPORT void JNICALL Java_mpi_Datatype_getLbExtent(
* Class: mpi_Datatype JNIEnv *env, jobject jthis, jlong type, jintArray jLbExt)
* Method: size
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_mpi_Datatype_size(JNIEnv *env, jobject jthis)
{ {
int result; MPI_Aint lb, extent;
int rc = MPI_Type_get_extent((MPI_Datatype)type, &lb, &extent);
ompi_java_exceptionCheck(env, rc);
ompi_java_clearFreeList(env) ; jint *lbExt = (*env)->GetIntArrayElements(env, jLbExt, NULL);
lbExt[0] = (jint)lb;
lbExt[1] = (jint)extent;
(*env)->ReleaseIntArrayElements(env, jLbExt, lbExt, 0);
}
MPI_Type_size((MPI_Datatype)((*env)->GetLongField(env,jthis,ompi_java.DatatypehandleID)), JNIEXPORT void JNICALL Java_mpi_Datatype_getTrueLbExtent(
&result ); JNIEnv *env, jobject jthis, jlong type, jintArray jLbExt)
{
MPI_Aint lb, extent;
int rc = MPI_Type_get_true_extent((MPI_Datatype)type, &lb, &extent);
ompi_java_exceptionCheck(env, rc);
jint *lbExt = (*env)->GetIntArrayElements(env, jLbExt, NULL);
lbExt[0] = (jint)lb;
lbExt[1] = (jint)extent;
(*env)->ReleaseIntArrayElements(env, jLbExt, lbExt, 0);
}
JNIEXPORT jint JNICALL Java_mpi_Datatype_getSize(
JNIEnv *env, jobject jthis, jlong type)
{
int rc, result;
rc = MPI_Type_size((MPI_Datatype)type, &result);
ompi_java_exceptionCheck(env, rc);
return result; return result;
} }
/* JNIEXPORT void JNICALL Java_mpi_Datatype_commit(
* Class: mpi_Datatype JNIEnv *env, jobject jthis, jlong handle)
* Method: extent
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_mpi_Datatype_extent(JNIEnv *env, jobject jthis)
{ {
MPI_Aint result; MPI_Datatype type = (MPI_Datatype)handle;
int rc = MPI_Type_commit(&type);
ompi_java_clearFreeList(env) ; ompi_java_exceptionCheck(env, rc);
MPI_Type_extent((MPI_Datatype)((*env)->GetLongField(env,jthis,ompi_java.DatatypehandleID)),
&result);
return result;
} }
/* JNIEXPORT jlong JNICALL Java_mpi_Datatype_free(
* Class: mpi_Datatype JNIEnv *env, jobject jthis, jlong handle)
* Method: lB
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_mpi_Datatype_lB(JNIEnv *env, jobject jthis)
{ {
MPI_Aint result; MPI_Datatype type = (MPI_Datatype)handle;
ompi_java_clearFreeList(env) ; if(type != MPI_DATATYPE_NULL)
{
MPI_Type_lb((MPI_Datatype)((*env)->GetLongField(env,jthis,ompi_java.DatatypehandleID)), int rc = MPI_Type_free(&type);
&result); ompi_java_exceptionCheck(env, rc);
return result;
}
/*
* Class: mpi_Datatype
* Method: uB
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_mpi_Datatype_uB(JNIEnv *env, jobject jthis)
{
MPI_Aint result;
ompi_java_clearFreeList(env) ;
MPI_Type_ub((MPI_Datatype)((*env)->GetLongField(env,jthis,ompi_java.DatatypehandleID)),
&result);
return result;
}
/*
* Class: mpi_Datatype
* Method: commit
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_commit(JNIEnv *env, jobject jthis)
{
MPI_Datatype type;
ompi_java_clearFreeList(env) ;
type=(MPI_Datatype)((*env)->GetLongField(env,jthis,ompi_java.DatatypehandleID));
MPI_Type_commit(&type);
}
/*
* Class: mpi_Datatype
* Method: free
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_free(JNIEnv *env, jobject jthis)
{
MPI_Datatype type;
type=(MPI_Datatype)((*env)->GetLongField(env,jthis,ompi_java.DatatypehandleID));
if (type != MPI_DATATYPE_NULL) {
MPI_Type_free(&type);
}
}
/*
* Class: mpi_Datatype
* Method: GetContiguous
* Signature: (I)V
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_GetContiguous(JNIEnv *env, jobject jthis,
jint count,jobject oldtype)
{
MPI_Datatype type;
ompi_java_clearFreeList(env) ;
MPI_Type_contiguous(count,
(MPI_Datatype)((*env)->GetLongField(env,oldtype,ompi_java.DatatypehandleID)),
&type);
(*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)type);
}
/*
* Class: mpi_Datatype
* Method: GetVector
* Signature: (III)V
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_GetVector(JNIEnv *env, jobject jthis,
jint count, jint blocklength, jint stride,
jobject oldtype)
{
MPI_Datatype type;
ompi_java_clearFreeList(env) ;
MPI_Type_vector(count, blocklength, stride,
(MPI_Datatype)((*env)->GetLongField(env,oldtype,ompi_java.DatatypehandleID)),
&type);
(*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)type);
}
/*
* Class: mpi_Datatype
* Method: GetHvector
* Signature: (III)V
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_GetHvector(JNIEnv *env, jobject jthis,
jint count, jint blocklength, jint stride,
jobject oldtype)
{
MPI_Datatype type;
jint baseSize = (*env)->GetIntField(env, jthis, ompi_java.DatatypebaseSizeID) ;
ompi_java_clearFreeList(env) ;
MPI_Type_hvector(count, blocklength, baseSize * stride,
(MPI_Datatype)((*env)->GetLongField(env,oldtype,ompi_java.DatatypehandleID)),
&type);
(*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)type);
}
/*
* Class: mpi_Datatype
* Method: GetIndexed
* Signature: (I[I[I)V
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_GetIndexed(JNIEnv *env, jobject jthis,
jintArray blocklengths, jintArray
displacements, jobject oldtype)
{
MPI_Datatype type;
int count=(*env)->GetArrayLength(env,blocklengths);
jboolean isCopy=JNI_TRUE;
jint *lengths; jint *disps;
ompi_java_clearFreeList(env) ;
lengths=(*env)->GetIntArrayElements(env,blocklengths,&isCopy);
disps = (*env)->GetIntArrayElements(env,displacements,&isCopy);
MPI_Type_indexed(count, (int*)lengths, (int*)disps,
(MPI_Datatype)((*env)->GetLongField(env,oldtype,ompi_java.DatatypehandleID)), &type);
(*env)->ReleaseIntArrayElements(env,blocklengths,lengths,0);
(*env)->ReleaseIntArrayElements(env,displacements,disps,0);
(*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)type);
}
/*
* Class: mpi_Datatype
* Method: GetHindexed
* Signature: (I[I[I)V
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_GetHindexed(JNIEnv *env, jobject jthis,
jintArray blocklengths,
jintArray displacements,
jobject oldtype)
{
MPI_Datatype type ;
int count = (*env)->GetArrayLength(env,blocklengths);
jboolean isCopy ;
jint *lengths; jint *disps;
jint baseSize = (*env)->GetIntField(env, jthis, ompi_java.DatatypebaseSizeID) ;
MPI_Aint* cdisps ;
int i ;
ompi_java_clearFreeList(env) ;
lengths=(*env)->GetIntArrayElements(env,blocklengths,&isCopy);
disps = (*env)->GetIntArrayElements(env,displacements,&isCopy);
cdisps = (MPI_Aint*) calloc(count, sizeof(MPI_Aint)) ;
for(i = 0 ; i < count ; i++)
cdisps [i] = baseSize * disps [i] ;
MPI_Type_hindexed(count, (int*)lengths, cdisps,
(MPI_Datatype)((*env)->GetLongField(env,oldtype,ompi_java.DatatypehandleID)),
&type);
free(cdisps) ;
(*env)->ReleaseIntArrayElements(env,blocklengths,lengths,0);
(*env)->ReleaseIntArrayElements(env,displacements,disps,0);
(*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)type);
}
/*
* Class: mpi_Datatype
* Method: GetStruct
* Signature: ([I[I[Lmpi/Datatype;ZIZI)V
*/
JNIEXPORT void JNICALL Java_mpi_Datatype_GetStruct(JNIEnv *env, jobject jthis,
jintArray blocklengths, jintArray displacements,
jobjectArray datatypes,
jboolean lbSet, jint lb, jboolean ubSet, jint ub)
{
MPI_Datatype type;
int count, ptr, i ;
jboolean isCopy ;
jint *lengths, *disps ;
MPI_Datatype *ctypes ;
int *clengths ;
MPI_Aint *cdisps ;
jint baseSize = (*env)->GetIntField(env, jthis, ompi_java.DatatypebaseSizeID) ;
ompi_java_clearFreeList(env) ;
count = (*env)->GetArrayLength(env,blocklengths);
lengths = (*env)->GetIntArrayElements(env,blocklengths,&isCopy);
disps = (*env)->GetIntArrayElements(env,displacements,&isCopy);
/* Remove components with UNDEFINED base type, but add upper bound
and lower bound markers if required. */
ctypes = (MPI_Datatype*) calloc(count + 2, sizeof(MPI_Datatype)) ;
clengths = (int*) calloc(count + 2, sizeof(int)) ;
cdisps = (MPI_Aint*) calloc(count + 2, sizeof(MPI_Aint)) ;
ptr = 0 ;
for(i = 0 ; i < count ; i++) {
jobject type = (*env)->GetObjectArrayElement(env, datatypes, i) ;
jint baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ;
if(baseType != -1) {
jlong handle = (*env)->GetLongField(env, type, ompi_java.DatatypehandleID) ;
ctypes [ptr] = (MPI_Datatype) handle ;
clengths [ptr] = lengths [i] ;
cdisps [ptr] = baseSize * disps [i] ;
ptr++ ;
}
}
if(lbSet == JNI_TRUE) {
ctypes [ptr] = MPI_LB ;
clengths [ptr] = 1 ;
cdisps [ptr] = baseSize * lb ;
ptr++ ;
}
if(ubSet == JNI_TRUE) {
ctypes [ptr] = MPI_UB ;
clengths [ptr] = 1 ;
cdisps [ptr] = baseSize * ub ;
ptr++ ;
} }
MPI_Type_struct(ptr, clengths, cdisps, ctypes, &type); return (jlong)type;
}
free(cdisps);
free(clengths); JNIEXPORT jlong JNICALL Java_mpi_Datatype_dup(
free(ctypes); JNIEnv *env, jobject jthis, jlong oldType)
{
(*env)->ReleaseIntArrayElements(env,blocklengths,lengths,0); MPI_Datatype newType;
(*env)->ReleaseIntArrayElements(env,displacements,disps,0); int rc = MPI_Type_dup((MPI_Datatype)oldType, &newType);
ompi_java_exceptionCheck(env, rc);
(*env)->SetLongField(env,jthis, ompi_java.DatatypehandleID, (jlong)type); return (jlong)newType;
}
JNIEXPORT jlong JNICALL Java_mpi_Datatype_getContiguous(
JNIEnv *env, jclass clazz, jint count, jlong oldType)
{
MPI_Datatype type;
int rc = MPI_Type_contiguous(count, (MPI_Datatype)oldType, &type);
ompi_java_exceptionCheck(env, rc);
return (jlong)type;
}
JNIEXPORT jlong JNICALL Java_mpi_Datatype_getVector(
JNIEnv *env, jclass clazz, jint count,
jint blockLength, jint stride, jlong oldType)
{
MPI_Datatype type;
int rc = MPI_Type_vector(count, blockLength, stride,
(MPI_Datatype)oldType, &type);
ompi_java_exceptionCheck(env, rc);
return (jlong)type;
}
JNIEXPORT jlong JNICALL Java_mpi_Datatype_getHVector(
JNIEnv *env, jclass clazz, jint count,
jint blockLength, jint stride, jlong oldType)
{
MPI_Datatype type;
int rc = MPI_Type_hvector(count, blockLength, stride,
(MPI_Datatype)oldType, &type);
ompi_java_exceptionCheck(env, rc);
return (jlong)type;
}
JNIEXPORT jlong JNICALL Java_mpi_Datatype_getIndexed(
JNIEnv *env, jclass clazz, jintArray blockLengths,
jintArray disps, jlong oldType)
{
MPI_Datatype type;
int count = (*env)->GetArrayLength(env, blockLengths);
jint *jBlockLengths, *jDispl;
int *cBlockLengths, *cDispl;
ompi_java_getIntArray(env, blockLengths, &jBlockLengths, &cBlockLengths);
ompi_java_getIntArray(env, disps, &jDispl, &cDispl);
int rc = MPI_Type_indexed(count, cBlockLengths, cDispl,
(MPI_Datatype)oldType, &type);
ompi_java_exceptionCheck(env, rc);
ompi_java_forgetIntArray(env, blockLengths, jBlockLengths, cBlockLengths);
ompi_java_forgetIntArray(env, disps, jDispl, cDispl);
return (jlong)type;
}
JNIEXPORT jlong JNICALL Java_mpi_Datatype_getHIndexed(
JNIEnv *env, jclass clazz, jintArray blockLengths,
jintArray disps, jlong oldType)
{
MPI_Datatype type;
int count = (*env)->GetArrayLength(env, blockLengths);
jint *jBlockLengths;
int *cBlockLengths;
ompi_java_getIntArray(env, blockLengths, &jBlockLengths, &cBlockLengths);
jint *jDisps = (*env)->GetIntArrayElements(env, disps, NULL);
MPI_Aint *cDisps = (MPI_Aint*)calloc(count, sizeof(MPI_Aint));
int i;
for(i = 0; i < count; i++)
cDisps[i] = jDisps[i];
int rc = MPI_Type_hindexed(count, cBlockLengths, cDisps,
(MPI_Datatype)oldType, &type);
ompi_java_exceptionCheck(env, rc);
free(cDisps);
ompi_java_forgetIntArray(env, blockLengths, jBlockLengths, cBlockLengths);
(*env)->ReleaseIntArrayElements(env, disps, jDisps, JNI_ABORT);
return (jlong)type;
}
JNIEXPORT jlong JNICALL Java_mpi_Datatype_getStruct(
JNIEnv *env, jclass clazz, jintArray blockLengths,
jintArray disps, jobjectArray datatypes)
{
int count = (*env)->GetArrayLength(env, blockLengths);
jint *jBlockLengths;
int *cBlockLengths;
ompi_java_getIntArray(env, blockLengths, &jBlockLengths, &cBlockLengths);
jint *jDisps = (*env)->GetIntArrayElements(env, disps, NULL);
MPI_Aint *cDisps = (MPI_Aint*)calloc(count, sizeof(MPI_Aint));
MPI_Datatype *cTypes = (MPI_Datatype*)calloc(count, sizeof(MPI_Datatype));
int i;
for(i = 0; i < count; i++)
{
cDisps[i] = jDisps[i];
jobject type = (*env)->GetObjectArrayElement(env, datatypes, i);
cTypes[i] = (MPI_Datatype)(*env)->GetLongField(
env, type, ompi_java.DatatypeHandle);
(*env)->DeleteLocalRef(env, type);
}
MPI_Datatype type;
int rc = MPI_Type_struct(count, cBlockLengths, cDisps, cTypes, &type);
ompi_java_exceptionCheck(env, rc);
free(cDisps);
free(cTypes);
ompi_java_forgetIntArray(env, blockLengths, jBlockLengths, cBlockLengths);
(*env)->ReleaseIntArrayElements(env, disps, jDisps, JNI_ABORT);
return (jlong)type;
}
JNIEXPORT jlong JNICALL Java_mpi_Datatype_getResized(
JNIEnv *env, jclass clazz, jlong oldType, jint lb, jint extent)
{
MPI_Datatype type;
int rc = MPI_Type_create_resized((MPI_Datatype)oldType, lb, extent, &type);
ompi_java_exceptionCheck(env, rc);
return (jlong)type;
}
JNIEXPORT void JNICALL Java_mpi_Datatype_setName(
JNIEnv *env, jobject jthis, jlong handle, jstring jname)
{
const char *name = (*env)->GetStringUTFChars(env, jname, NULL);
int rc = MPI_Type_set_name((MPI_Datatype)handle, (char*)name);
ompi_java_exceptionCheck(env, rc);
(*env)->ReleaseStringUTFChars(env, jname, name);
}
JNIEXPORT jstring JNICALL Java_mpi_Datatype_getName(
JNIEnv *env, jobject jthis, jlong handle)
{
char name[MPI_MAX_OBJECT_NAME];
int len;
int rc = MPI_Type_get_name((MPI_Datatype)handle, name, &len);
if(ompi_java_exceptionCheck(env, rc))
return NULL;
return (*env)->NewStringUTF(env, name);
}
static int typeCopyAttr(MPI_Datatype oldType, int keyval, void *extraState,
void *attrValIn, void *attrValOut, int *flag)
{
return ompi_java_attrCopy(attrValIn, attrValOut, flag);
}
static int typeDeleteAttr(MPI_Datatype oldType, int keyval,
void *attrVal, void *extraState)
{
return ompi_java_attrDelete(attrVal);
}
JNIEXPORT jint JNICALL Java_mpi_Datatype_createKeyval_1jni(
JNIEnv *env, jclass clazz)
{
int rc, keyval;
rc = MPI_Type_create_keyval(typeCopyAttr, typeDeleteAttr, &keyval, NULL);
ompi_java_exceptionCheck(env, rc);
return keyval;
}
JNIEXPORT void JNICALL Java_mpi_Datatype_freeKeyval_1jni(
JNIEnv *env, jclass clazz, jint keyval)
{
int rc = MPI_Type_free_keyval((int*)(&keyval));
ompi_java_exceptionCheck(env, rc);
}
JNIEXPORT void JNICALL Java_mpi_Datatype_setAttr(
JNIEnv *env, jobject jthis, jlong type, jint keyval, jbyteArray jval)
{
void *cval = ompi_java_attrSet(env, jval);
int rc = MPI_Type_set_attr((MPI_Datatype)type, keyval, cval);
ompi_java_exceptionCheck(env, rc);
}
JNIEXPORT jobject JNICALL Java_mpi_Datatype_getAttr(
JNIEnv *env, jobject jthis, jlong type, jint keyval)
{
int flag;
void *val;
int rc = MPI_Type_get_attr((MPI_Datatype)type, keyval, &val, &flag);
if(ompi_java_exceptionCheck(env, rc) || !flag)
return NULL;
return ompi_java_attrGet(env, val);
}
JNIEXPORT void JNICALL Java_mpi_Datatype_deleteAttr(
JNIEnv *env, jobject jthis, jlong type, jint keyval)
{
int rc = MPI_Type_delete_attr((MPI_Datatype)type, keyval);
ompi_java_exceptionCheck(env, rc);
} }

Просмотреть файл

@ -32,31 +32,17 @@
#include "mpi_Errhandler.h" #include "mpi_Errhandler.h"
#include "mpiJava.h" #include "mpiJava.h"
jfieldID ErrhandleID;
/*
* Class: mpi_Errhandler
* Method: init
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Errhandler_init(JNIEnv *env, jclass thisClass) JNIEXPORT void JNICALL Java_mpi_Errhandler_init(JNIEnv *env, jclass thisClass)
{ {
ompi_java.ErrhandleID = (*env)->GetFieldID(env,thisClass,"handle","J"); ompi_java.ErrHandle = (*env)->GetFieldID(env, thisClass, "handle", "J");
} }
/* JNIEXPORT jlong JNICALL Java_mpi_Errhandler_getFatal(JNIEnv *env, jclass clazz)
* Class: mpi_Errhandler
* Method: GetErrhandler
* Signature: (I)V
*/
JNIEXPORT void JNICALL Java_mpi_Errhandler_GetErrhandler(JNIEnv *env, jobject jthis, jint type)
{ {
switch (type) { return (jlong)MPI_ERRORS_ARE_FATAL;
case 0:
(*env)->SetLongField(env,jthis, ompi_java.ErrhandleID, (jlong)MPI_ERRORS_RETURN);
case 1:
(*env)->SetLongField(env,jthis, ompi_java.ErrhandleID, (jlong)MPI_ERRORS_ARE_FATAL);
}
} }
JNIEXPORT jlong JNICALL Java_mpi_Errhandler_getReturn(JNIEnv *env, jclass clazz)
{
return (jlong)MPI_ERRORS_RETURN;
}

654
ompi/mpi/java/c/mpi_File.c Обычный файл
Просмотреть файл

@ -0,0 +1,654 @@
#include "ompi_config.h"
#include <stdlib.h>
#include <assert.h>
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi.h"
#include "mpi_File.h"
#include "mpiJava.h"
JNIEXPORT jlong JNICALL Java_mpi_File_open(
JNIEnv *env, jobject jthis, jlong comm,
jstring jfilename, jint amode, jlong info)
{
const char* filename = (*env)->GetStringUTFChars(env, jfilename, NULL);
MPI_File fh;
int rc = MPI_File_open((MPI_Comm)comm, (char*)filename,
amode, (MPI_Info)info, &fh);
ompi_java_exceptionCheck(env, rc);
(*env)->ReleaseStringUTFChars(env, jfilename, filename);
return (jlong)fh;
}
JNIEXPORT jlong JNICALL Java_mpi_File_close(
JNIEnv *env, jobject jthis, jlong fh)
{
MPI_File file = (MPI_File)fh;
int rc = MPI_File_close(&file);
ompi_java_exceptionCheck(env, rc);
return (jlong)file;
}
JNIEXPORT void JNICALL Java_mpi_File_delete_1jni(
JNIEnv *env, jclass clazz, jstring jfilename, jlong info)
{
const char* filename = (*env)->GetStringUTFChars(env, jfilename, NULL);
int rc = MPI_File_delete((char*)filename, (MPI_Info)info);
ompi_java_exceptionCheck(env, rc);
(*env)->ReleaseStringUTFChars(env, jfilename, filename);
}
JNIEXPORT void JNICALL Java_mpi_File_setSize(
JNIEnv *env, jobject jthis, jlong fh, jlong size)
{
int rc = MPI_File_set_size((MPI_File)fh, (MPI_Offset)size);
ompi_java_exceptionCheck(env, rc);
}
JNIEXPORT void JNICALL Java_mpi_File_preallocate(
JNIEnv *env, jobject jthis, jlong fh, jlong size)
{
int rc = MPI_File_preallocate((MPI_File)fh, (MPI_Offset)size);
ompi_java_exceptionCheck(env, rc);
}
JNIEXPORT jlong JNICALL Java_mpi_File_getSize(
JNIEnv *env, jobject jthis, jlong fh)
{
MPI_Offset size;
int rc = MPI_File_get_size((MPI_File)fh, &size);
ompi_java_exceptionCheck(env, rc);
return (jlong)size;
}
JNIEXPORT jlong JNICALL Java_mpi_File_getGroup(
JNIEnv *env, jobject jthis, jlong fh)
{
MPI_Group group;
int rc = MPI_File_get_group((MPI_File)fh, &group);
ompi_java_exceptionCheck(env, rc);
return (jlong)group;
}
JNIEXPORT jint JNICALL Java_mpi_File_getAMode(
JNIEnv *env, jobject jthis, jlong fh)
{
int amode;
int rc = MPI_File_get_amode((MPI_File)fh, &amode);
ompi_java_exceptionCheck(env, rc);
return amode;
}
JNIEXPORT void JNICALL Java_mpi_File_setInfo(
JNIEnv *env, jobject jthis, jlong fh, jlong info)
{
int rc = MPI_File_set_info((MPI_File)fh, (MPI_Info)info);
ompi_java_exceptionCheck(env, rc);
}
JNIEXPORT jlong JNICALL Java_mpi_File_getInfo(
JNIEnv *env, jobject jthis, jlong fh)
{
MPI_Info info;
int rc = MPI_File_get_info((MPI_File)fh, &info);
ompi_java_exceptionCheck(env, rc);
return (jlong)info;
}
JNIEXPORT void JNICALL Java_mpi_File_setView(
JNIEnv *env, jobject jthis, jlong fh, jlong disp,
jlong etype, jlong filetype, jstring jdatarep, jlong info)
{
const char* datarep = (*env)->GetStringUTFChars(env, jdatarep, NULL);
int rc = MPI_File_set_view(
(MPI_File)fh, (MPI_Offset)disp, (MPI_Datatype)etype,
(MPI_Datatype)filetype, (char*)datarep, (MPI_Info)info);
ompi_java_exceptionCheck(env, rc);
(*env)->ReleaseStringUTFChars(env, jdatarep, datarep);
}
JNIEXPORT void JNICALL Java_mpi_File_readAt(
JNIEnv *env, jobject jthis, jlong fh, jlong fileOffset,
jobject buf, jint offset, jint count, jobject jType, jobject stat)
{
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
int bType = (*env)->GetIntField(env, jType, ompi_java.DatatypeBaseType);
void *ptr, *base;
ptr = ompi_java_getBufPtr(&base, env, buf, bType, offset);
MPI_Status status;
int rc = MPI_File_read_at((MPI_File)fh, (MPI_Offset)fileOffset,
ptr, count, (MPI_Datatype)type, &status);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseBufPtr(env, buf, base, bType);
ompi_java_status_set(&status, env, stat);
}
JNIEXPORT void JNICALL Java_mpi_File_readAtAll(
JNIEnv *env, jobject jthis, jlong fh, jlong fileOffset,
jobject buf, jint offset, jint count, jobject jType, jobject stat)
{
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
int bType = (*env)->GetIntField(env, jType, ompi_java.DatatypeBaseType);
void *ptr, *base;
ptr = ompi_java_getBufPtr(&base, env, buf, bType, offset);
MPI_Status status;
int rc = MPI_File_read_at_all((MPI_File)fh, (MPI_Offset)fileOffset,
ptr, count, (MPI_Datatype)type, &status);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseBufPtr(env, buf, base, bType);
ompi_java_status_set(&status, env, stat);
}
JNIEXPORT void JNICALL Java_mpi_File_writeAt(
JNIEnv *env, jobject jthis, jlong fh, jlong fileOffset,
jobject buf, jint offset, jint count, jobject jType, jobject stat)
{
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
int bType = (*env)->GetIntField(env, jType, ompi_java.DatatypeBaseType);
void *ptr, *base;
ptr = ompi_java_getBufPtr(&base, env, buf, bType, offset);
MPI_Status status;
int rc = MPI_File_write_at((MPI_File)fh, (MPI_Offset)fileOffset,
ptr, count, (MPI_Datatype)type, &status);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseReadBufPtr(env, buf, base, bType);
ompi_java_status_set(&status, env, stat);
}
JNIEXPORT void JNICALL Java_mpi_File_writeAtAll(
JNIEnv *env, jobject jthis, jlong fh, jlong fileOffset,
jobject buf, jint offset, jint count, jobject jType, jobject stat)
{
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
int bType = (*env)->GetIntField(env, jType, ompi_java.DatatypeBaseType);
void *ptr, *base;
ptr = ompi_java_getBufPtr(&base, env, buf, bType, offset);
MPI_Status status;
int rc = MPI_File_write_at_all((MPI_File)fh, (MPI_Offset)fileOffset,
ptr, count, (MPI_Datatype)type, &status);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseReadBufPtr(env, buf, base, bType);
ompi_java_status_set(&status, env, stat);
}
JNIEXPORT jlong JNICALL Java_mpi_File_iReadAt(
JNIEnv *env, jobject jthis, jlong fh, jlong offset,
jobject buf, jint count, jlong type)
{
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
MPI_Request request;
int rc = MPI_File_iread_at((MPI_File)fh, (MPI_Offset)offset,
ptr, count, (MPI_Datatype)type, &request);
ompi_java_exceptionCheck(env, rc);
return (jlong)request;
}
JNIEXPORT jlong JNICALL Java_mpi_File_iWriteAt(
JNIEnv *env, jobject jthis, jlong fh, jlong offset,
jobject buf, jint count, jlong type)
{
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
MPI_Request request;
int rc = MPI_File_iwrite_at((MPI_File)fh, (MPI_Offset)offset,
ptr, count, (MPI_Datatype)type, &request);
ompi_java_exceptionCheck(env, rc);
return (jlong)request;
}
JNIEXPORT void JNICALL Java_mpi_File_read(
JNIEnv *env, jobject jthis, jlong fh, jobject buf,
jint offset, jint count, jobject jType, jobject stat)
{
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
int bType = (*env)->GetIntField(env, jType, ompi_java.DatatypeBaseType);
void *ptr, *base;
ptr = ompi_java_getBufPtr(&base, env, buf, bType, offset);
MPI_Status status;
int rc = MPI_File_read((MPI_File)fh, ptr, count,
(MPI_Datatype)type, &status);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseBufPtr(env, buf, base, bType);
ompi_java_status_set(&status, env, stat);
}
JNIEXPORT void JNICALL Java_mpi_File_readAll(
JNIEnv *env, jobject jthis, jlong fh, jobject buf,
jint offset, jint count, jobject jType, jobject stat)
{
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
int bType = (*env)->GetIntField(env, jType, ompi_java.DatatypeBaseType);
void *ptr, *base;
ptr = ompi_java_getBufPtr(&base, env, buf, bType, offset);
MPI_Status status;
int rc = MPI_File_read_all((MPI_File)fh, ptr, count,
(MPI_Datatype)type, &status);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseBufPtr(env, buf, base, bType);
ompi_java_status_set(&status, env, stat);
}
JNIEXPORT void JNICALL Java_mpi_File_write(
JNIEnv *env, jobject jthis, jlong fh, jobject buf,
jint offset, jint count, jobject jType, jobject stat)
{
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
int bType = (*env)->GetIntField(env, jType, ompi_java.DatatypeBaseType);
void *ptr, *base;
ptr = ompi_java_getBufPtr(&base, env, buf, bType, offset);
MPI_Status status;
int rc = MPI_File_write((MPI_File)fh, ptr, count,
(MPI_Datatype)type, &status);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseReadBufPtr(env, buf, base, bType);
ompi_java_status_set(&status, env, stat);
}
JNIEXPORT void JNICALL Java_mpi_File_writeAll(
JNIEnv *env, jobject jthis, jlong fh, jobject buf,
jint offset, jint count, jobject jType, jobject stat)
{
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
int bType = (*env)->GetIntField(env, jType, ompi_java.DatatypeBaseType);
void *ptr, *base;
ptr = ompi_java_getBufPtr(&base, env, buf, bType, offset);
MPI_Status status;
int rc = MPI_File_write_all((MPI_File)fh, ptr, count,
(MPI_Datatype)type, &status);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseReadBufPtr(env, buf, base, bType);
ompi_java_status_set(&status, env, stat);
}
JNIEXPORT jlong JNICALL Java_mpi_File_iRead(
JNIEnv *env, jobject jthis, jlong fh,
jobject buf, jint count, jlong type)
{
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
MPI_Request request;
int rc = MPI_File_iread((MPI_File)fh, ptr, count,
(MPI_Datatype)type, &request);
ompi_java_exceptionCheck(env, rc);
return (jlong)request;
}
JNIEXPORT jlong JNICALL Java_mpi_File_iWrite(
JNIEnv *env, jobject jthis, jlong fh,
jobject buf, jint count, jlong type)
{
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
MPI_Request request;
int rc = MPI_File_iwrite((MPI_File)fh, ptr, count,
(MPI_Datatype)type, &request);
ompi_java_exceptionCheck(env, rc);
return (jlong)request;
}
JNIEXPORT void JNICALL Java_mpi_File_seek(
JNIEnv *env, jobject jthis, jlong fh, jlong offset, jint whence)
{
int rc = MPI_File_seek((MPI_File)fh, (MPI_Offset)offset, whence);
ompi_java_exceptionCheck(env, rc);
}
JNIEXPORT jlong JNICALL Java_mpi_File_getPosition(
JNIEnv *env, jobject jthis, jlong fh)
{
MPI_Offset offset;
int rc = MPI_File_get_position((MPI_File)fh, &offset);
ompi_java_exceptionCheck(env, rc);
return (jlong)offset;
}
JNIEXPORT jlong JNICALL Java_mpi_File_getByteOffset(
JNIEnv *env, jobject jthis, jlong fh, jlong offset)
{
MPI_Offset disp;
int rc = MPI_File_get_byte_offset((MPI_File)fh, (MPI_Offset)offset, &disp);
ompi_java_exceptionCheck(env, rc);
return (jlong)disp;
}
JNIEXPORT void JNICALL Java_mpi_File_readShared(
JNIEnv *env, jobject jthis, jlong fh, jobject buf,
jint offset, jint count, jobject jType, jobject stat)
{
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
int bType = (*env)->GetIntField(env, jType, ompi_java.DatatypeBaseType);
void *ptr, *base;
ptr = ompi_java_getBufPtr(&base, env, buf, bType, offset);
MPI_Status status;
int rc = MPI_File_read_shared((MPI_File)fh, ptr, count,
(MPI_Datatype)type, &status);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseBufPtr(env, buf, base, bType);
ompi_java_status_set(&status, env, stat);
}
JNIEXPORT void JNICALL Java_mpi_File_writeShared(
JNIEnv *env, jobject jthis, jlong fh, jobject buf,
jint offset, jint count, jobject jType, jobject stat)
{
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
int bType = (*env)->GetIntField(env, jType, ompi_java.DatatypeBaseType);
void *ptr, *base;
ptr = ompi_java_getBufPtr(&base, env, buf, bType, offset);
MPI_Status status;
int rc = MPI_File_write_shared((MPI_File)fh, ptr, count,
(MPI_Datatype)type, &status);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseReadBufPtr(env, buf, base, bType);
ompi_java_status_set(&status, env, stat);
}
JNIEXPORT jlong JNICALL Java_mpi_File_iReadShared(
JNIEnv *env, jobject jthis, jlong fh,
jobject buf, jint count, jlong type)
{
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
MPI_Request request;
int rc = MPI_File_iread_shared((MPI_File)fh, ptr, count,
(MPI_Datatype)type, &request);
ompi_java_exceptionCheck(env, rc);
return (jlong)request;
}
JNIEXPORT jlong JNICALL Java_mpi_File_iWriteShared(
JNIEnv *env, jobject jthis, jlong fh,
jobject buf, jint count, jlong type)
{
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
MPI_Request request;
int rc = MPI_File_iwrite_shared((MPI_File)fh, ptr, count,
(MPI_Datatype)type, &request);
ompi_java_exceptionCheck(env, rc);
return (jlong)request;
}
JNIEXPORT void JNICALL Java_mpi_File_readOrdered(
JNIEnv *env, jobject jthis, jlong fh, jobject buf,
jint offset, jint count, jobject jType, jobject stat)
{
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
int bType = (*env)->GetIntField(env, jType, ompi_java.DatatypeBaseType);
void *ptr, *base;
ptr = ompi_java_getBufPtr(&base, env, buf, bType, offset);
MPI_Status status;
int rc = MPI_File_read_ordered((MPI_File)fh, ptr, count,
(MPI_Datatype)type, &status);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseBufPtr(env, buf, base, bType);
ompi_java_status_set(&status, env, stat);
}
JNIEXPORT void JNICALL Java_mpi_File_writeOrdered(
JNIEnv *env, jobject jthis, jlong fh, jobject buf,
jint offset, jint count, jobject jType, jobject stat)
{
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
int bType = (*env)->GetIntField(env, jType, ompi_java.DatatypeBaseType);
void *ptr, *base;
ptr = ompi_java_getBufPtr(&base, env, buf, bType, offset);
MPI_Status status;
int rc = MPI_File_write_ordered((MPI_File)fh, ptr, count,
(MPI_Datatype)type, &status);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseReadBufPtr(env, buf, base, bType);
ompi_java_status_set(&status, env, stat);
}
JNIEXPORT void JNICALL Java_mpi_File_seekShared(
JNIEnv *env, jobject jthis, jlong fh, jlong offset, jint whence)
{
int rc = MPI_File_seek_shared((MPI_File)fh, (MPI_Offset)offset, whence);
ompi_java_exceptionCheck(env, rc);
}
JNIEXPORT jlong JNICALL Java_mpi_File_getPositionShared(
JNIEnv *env, jobject jthis, jlong fh)
{
MPI_Offset offset;
int rc = MPI_File_get_position_shared((MPI_File)fh, &offset);
ompi_java_exceptionCheck(env, rc);
return (jlong)offset;
}
JNIEXPORT void JNICALL Java_mpi_File_readAtAllBegin(
JNIEnv *env, jobject jthis, jlong fh, jlong offset,
jobject buf, jint count, jobject jType)
{
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
int rc = MPI_File_read_at_all_begin((MPI_File)fh, (MPI_Offset)offset,
ptr, count, (MPI_Datatype)type);
ompi_java_exceptionCheck(env, rc);
}
JNIEXPORT void JNICALL Java_mpi_File_readAtAllEnd(
JNIEnv *env, jobject jthis, jlong fh, jobject buf, jobject stat)
{
MPI_Status status;
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
int rc = MPI_File_read_at_all_end((MPI_File)fh, ptr, &status);
ompi_java_exceptionCheck(env, rc);
ompi_java_status_set(&status, env, stat);
}
JNIEXPORT void JNICALL Java_mpi_File_writeAtAllBegin(
JNIEnv *env, jobject jthis, jlong fh, jlong fileOffset,
jobject buf, jint count, jobject jType)
{
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
int rc = MPI_File_write_at_all_begin((MPI_File)fh, (MPI_Offset)fileOffset,
ptr, count, (MPI_Datatype)type);
ompi_java_exceptionCheck(env, rc);
}
JNIEXPORT void JNICALL Java_mpi_File_writeAtAllEnd(
JNIEnv *env, jobject jthis, jlong fh, jobject buf, jobject stat)
{
MPI_Status status;
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
int rc = MPI_File_write_at_all_end((MPI_File)fh, ptr, &status);
ompi_java_exceptionCheck(env, rc);
ompi_java_status_set(&status, env, stat);
}
JNIEXPORT void JNICALL Java_mpi_File_readAllBegin(
JNIEnv *env, jobject jthis, jlong fh,
jobject buf, jint count, jobject jType)
{
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
int rc = MPI_File_read_all_begin(
(MPI_File)fh, ptr, count, (MPI_Datatype)type);
ompi_java_exceptionCheck(env, rc);
}
JNIEXPORT void JNICALL Java_mpi_File_readAllEnd(
JNIEnv *env, jobject jthis, jlong fh, jobject buf, jobject stat)
{
MPI_Status status;
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
int rc = MPI_File_read_all_end((MPI_File)fh, ptr, &status);
ompi_java_exceptionCheck(env, rc);
ompi_java_status_set(&status, env, stat);
}
JNIEXPORT void JNICALL Java_mpi_File_writeAllBegin(
JNIEnv *env, jobject jthis, jlong fh,
jobject buf, jint count, jobject jType)
{
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
int rc = MPI_File_write_all_begin(
(MPI_File)fh, ptr, count, (MPI_Datatype)type);
ompi_java_exceptionCheck(env, rc);
}
JNIEXPORT void JNICALL Java_mpi_File_writeAllEnd(
JNIEnv *env, jobject jthis, jlong fh, jobject buf, jobject stat)
{
MPI_Status status;
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
int rc = MPI_File_write_all_end((MPI_File)fh, ptr, &status);
ompi_java_exceptionCheck(env, rc);
ompi_java_status_set(&status, env, stat);
}
JNIEXPORT void JNICALL Java_mpi_File_readOrderedBegin(
JNIEnv *env, jobject jthis, jlong fh,
jobject buf, jint count, jobject jType)
{
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
int rc = MPI_File_read_ordered_begin(
(MPI_File)fh, ptr, count, (MPI_Datatype)type);
ompi_java_exceptionCheck(env, rc);
}
JNIEXPORT void JNICALL Java_mpi_File_readOrderedEnd(
JNIEnv *env, jobject jthis, jlong fh, jobject buf, jobject stat)
{
MPI_Status status;
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
int rc = MPI_File_read_ordered_end((MPI_File)fh, ptr, &status);
ompi_java_exceptionCheck(env, rc);
ompi_java_status_set(&status, env, stat);
}
JNIEXPORT void JNICALL Java_mpi_File_writeOrderedBegin(
JNIEnv *env, jobject jthis, jlong fh,
jobject buf, jint count, jobject jType)
{
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
int rc = MPI_File_write_ordered_begin(
(MPI_File)fh, ptr, count, (MPI_Datatype)type);
ompi_java_exceptionCheck(env, rc);
}
JNIEXPORT void JNICALL Java_mpi_File_writeOrderedEnd(
JNIEnv *env, jobject jthis, jlong fh, jobject buf, jobject stat)
{
MPI_Status status;
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
int rc = MPI_File_write_ordered_end((MPI_File)fh, ptr, &status);
ompi_java_exceptionCheck(env, rc);
ompi_java_status_set(&status, env, stat);
}
JNIEXPORT jint JNICALL Java_mpi_File_getTypeExtent(
JNIEnv *env, jobject jthis, jlong fh, jlong type)
{
MPI_Aint extent;
int rc = MPI_File_get_type_extent(
(MPI_File)fh, (MPI_Datatype)type, &extent);
ompi_java_exceptionCheck(env, rc);
return (int)extent;
}
JNIEXPORT void JNICALL Java_mpi_File_setAtomicity(
JNIEnv *env, jobject jthis, jlong fh, jboolean atomicity)
{
int rc = MPI_File_set_atomicity((MPI_File)fh, atomicity);
ompi_java_exceptionCheck(env, rc);
}
JNIEXPORT void JNICALL Java_mpi_File_sync(
JNIEnv *env, jobject jthis, jlong fh)
{
int rc = MPI_File_sync((MPI_File)fh);
ompi_java_exceptionCheck(env, rc);
}

147
ompi/mpi/java/c/mpi_GraphComm.c Обычный файл
Просмотреть файл

@ -0,0 +1,147 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : mpi_GraphComm.c
* Headerfile : mpi_GraphComm.h
* Author : Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.2 $
* Updated : $Date: 2003/01/16 16:39:34 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
#include "ompi_config.h"
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi.h"
#include "mpi_GraphComm.h"
#include "mpiJava.h"
JNIEXPORT void JNICALL Java_mpi_GraphComm_init(JNIEnv *env, jclass clazz)
{
ompi_java.GraphParmsInit = (*env)->GetMethodID(env,
ompi_java.GraphParmsClass, "<init>", "([I[I)V");
ompi_java.DistGraphNeighborsInit = (*env)->GetMethodID(env,
ompi_java.DistGraphNeighborsClass, "<init>", "([I[I[I[IZ)V");
}
JNIEXPORT jobject JNICALL Java_mpi_GraphComm_getDims(
JNIEnv *env, jobject jthis, jlong comm)
{
int maxInd, maxEdg;
int rc = MPI_Graphdims_get((MPI_Comm)comm, &maxInd, &maxEdg);
if(ompi_java_exceptionCheck(env, rc))
return NULL;
jintArray index = (*env)->NewIntArray(env, maxInd),
edges = (*env)->NewIntArray(env, maxEdg);
jint *jIndex, *jEdges;
int *cIndex, *cEdges;
ompi_java_getIntArray(env, index, &jIndex, &cIndex);
ompi_java_getIntArray(env, edges, &jEdges, &cEdges);
rc = MPI_Graph_get((MPI_Comm)comm, maxInd, maxEdg, cIndex, cEdges);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseIntArray(env, index, jIndex, cIndex);
ompi_java_releaseIntArray(env, edges, jEdges, cEdges);
return (*env)->NewObject(env, ompi_java.GraphParmsClass,
ompi_java.GraphParmsInit, index, edges);
}
JNIEXPORT jintArray JNICALL Java_mpi_GraphComm_getNeighbors(
JNIEnv *env, jobject jthis, jlong comm, jint rank)
{
int maxNs;
int rc = MPI_Graph_neighbors_count((MPI_Comm)comm, rank, &maxNs);
if(ompi_java_exceptionCheck(env, rc))
return NULL;
jintArray neighbors = (*env)->NewIntArray(env, maxNs);
jint *jNeighbors;
int *cNeighbors;
ompi_java_getIntArray(env, neighbors, &jNeighbors, &cNeighbors);
rc = MPI_Graph_neighbors((MPI_Comm)comm, rank, maxNs, cNeighbors);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseIntArray(env, neighbors, jNeighbors, cNeighbors);
return neighbors;
}
JNIEXPORT jobject JNICALL Java_mpi_GraphComm_getDistGraphNeighbors(
JNIEnv *env, jobject jthis, jlong comm)
{
int inDegree, outDegree, weighted;
int rc = MPI_Dist_graph_neighbors_count(
(MPI_Comm)comm, &inDegree, &outDegree, &weighted);
if(ompi_java_exceptionCheck(env, rc))
return NULL;
jintArray sources = (*env)->NewIntArray(env, inDegree),
srcWeights = (*env)->NewIntArray(env, inDegree),
destinations = (*env)->NewIntArray(env, outDegree),
destWeights = (*env)->NewIntArray(env, outDegree);
jint *jSources, *jSrcWeights, *jDestinations, *jDestWeights;
int *cSources, *cSrcWeights, *cDestinations, *cDestWeights;
ompi_java_getIntArray(env, sources, &jSources, &cSources);
ompi_java_getIntArray(env, srcWeights, &jSrcWeights, &cSrcWeights);
ompi_java_getIntArray(env, destinations, &jDestinations, &cDestinations);
ompi_java_getIntArray(env, destWeights, &jDestWeights, &cDestWeights);
rc = MPI_Dist_graph_neighbors((MPI_Comm)comm,
inDegree, cSources, cSrcWeights,
outDegree, cDestinations, cDestWeights);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseIntArray(env, sources, jSources, cSources);
ompi_java_releaseIntArray(env, srcWeights, jSrcWeights, cSrcWeights);
ompi_java_releaseIntArray(env, destinations, jDestinations, cDestinations);
ompi_java_releaseIntArray(env, destWeights, jDestWeights, cDestWeights);
return (*env)->NewObject(env,
ompi_java.DistGraphNeighborsClass, ompi_java.DistGraphNeighborsInit,
sources, srcWeights, destinations, destWeights,
weighted ? JNI_TRUE : JNI_FALSE);
}
JNIEXPORT jint JNICALL Java_mpi_GraphComm_map(
JNIEnv *env, jobject jthis, jlong comm,
jintArray index, jintArray edges)
{
int nNodes = (*env)->GetArrayLength(env, index);
jint *jIndex, *jEdges;
int *cIndex, *cEdges;
ompi_java_getIntArray(env, index, &jIndex, &cIndex);
ompi_java_getIntArray(env, edges, &jEdges, &cEdges);
int newrank;
int rc = MPI_Graph_map((MPI_Comm)comm, nNodes, cIndex, cEdges, &newrank);
ompi_java_exceptionCheck(env, rc);
ompi_java_releaseIntArray(env, index, jIndex, cIndex);
ompi_java_releaseIntArray(env, edges, jEdges, cEdges);
return newrank;
}

Просмотреть файл

@ -1,126 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : mpi_Graphcomm.c
* Headerfile : mpi_Graphcomm.h
* Author : Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.2 $
* Updated : $Date: 2003/01/16 16:39:34 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
#include "ompi_config.h"
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi.h"
#include "mpi_Graphcomm.h"
#include "mpiJava.h"
/*
* Class: mpi_Graphcomm
* Method: Get
* Signature: ()Lmpi/GraphParms;
*/
JNIEXPORT jobject JNICALL Java_mpi_Graphcomm_Get(JNIEnv *env, jobject jthis)
{
jintArray index, edges;
jint *ind, *edg;
jboolean isCopy=JNI_TRUE;
int maxind, maxedg;
jclass graphparms_class=(*env)->FindClass(env,"mpi/GraphParms");
jfieldID indexID,edgesID;
jmethodID handleConstructorID = (*env)->GetMethodID(env,
graphparms_class, "<init>", "()V");
jobject graphparms=(*env)->NewObject(env,graphparms_class, handleConstructorID);
ompi_java_clearFreeList(env) ;
MPI_Graphdims_get((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),&maxind,&maxedg);
index=(*env)->NewIntArray(env,maxind);
edges=(*env)->NewIntArray(env,maxedg);
ind=(*env)->GetIntArrayElements(env,index,&isCopy);
edg=(*env)->GetIntArrayElements(env,edges,&isCopy);
MPI_Graph_get((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
maxind,maxedg, (int*)ind, (int*)edg);
(*env)->ReleaseIntArrayElements(env,index,ind,0);
(*env)->ReleaseIntArrayElements(env,edges,edg,0);
indexID=(*env)->GetFieldID(env,graphparms_class,"index","[I");
edgesID=(*env)->GetFieldID(env,graphparms_class , "edges", "[I");
(*env)->SetObjectField(env, graphparms, indexID, index);
(*env)->SetObjectField(env, graphparms, edgesID, edges);
/* printf("Graphcomm Get finished.\n"); */
return graphparms;
}
/*
* Class: mpi_Graphcomm
* Method: Neighbours
* Signature: (I)[I
*/
JNIEXPORT jintArray JNICALL Java_mpi_Graphcomm_Neighbours(JNIEnv *env, jobject jthis, jint rank)
{
jint *neighbors;
jboolean isCopy=JNI_TRUE;
jintArray jneighbors;
int maxns;
ompi_java_clearFreeList(env) ;
MPI_Graph_neighbors_count((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),rank,&maxns);
jneighbors=(*env)->NewIntArray(env,maxns);
neighbors=(*env)->GetIntArrayElements(env,jneighbors,&isCopy);
MPI_Graph_neighbors((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
rank,maxns,(int*)neighbors);
(*env)->ReleaseIntArrayElements(env,jneighbors,neighbors,0);
return jneighbors;
}
/*
* Class: mpi_Graphcomm
* Method: Map
* Signature: ([I[I)I
*/
JNIEXPORT jint JNICALL Java_mpi_Graphcomm_Map(JNIEnv *env, jobject jthis, jintArray index, jintArray edges)
{
int newrank;
jint *ind, *edg;
jboolean isCopy=JNI_TRUE;
int nnodes;
ompi_java_clearFreeList(env) ;
nnodes=(*env)->GetArrayLength(env,index);
ind=(*env)->GetIntArrayElements(env,index,&isCopy);
edg=(*env)->GetIntArrayElements(env,edges,&isCopy);
MPI_Graph_map((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)),
nnodes,(int*)index,(int*)edges, &newrank);
(*env)->ReleaseIntArrayElements(env,index,ind,0);
(*env)->ReleaseIntArrayElements(env,edges,edg,0);
return newrank;
}

Просмотреть файл

@ -33,290 +33,185 @@
#include "mpi_Group.h" #include "mpi_Group.h"
#include "mpiJava.h" #include "mpiJava.h"
JNIEXPORT void JNICALL Java_mpi_Group_init(JNIEnv *env, jclass clazz)
/*
* Class: mpi_Group
* Method: init
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Group_init(JNIEnv *env, jclass thisClass)
{ {
ompi_java.GrouphandleID = (*env)->GetFieldID(env,thisClass,"handle","J"); ompi_java_setStaticLongField(env, clazz,
"nullHandle", (jlong)MPI_GROUP_NULL);
ompi_java.GroupHandle = (*env)->GetFieldID(env, clazz, "handle", "J");
} }
/* JNIEXPORT jlong JNICALL Java_mpi_Group_getEmpty(JNIEnv *env, jclass clazz)
* Class: mpi_Group
* Method: GetGroup
* Signature: (I)V
*/
JNIEXPORT void JNICALL Java_mpi_Group_GetGroup(JNIEnv *env, jobject jthis, jint type)
{ {
switch (type) { return (jlong)MPI_GROUP_EMPTY;
case 0:
(*env)->SetLongField(env,jthis, ompi_java.GrouphandleID, (jlong)MPI_GROUP_EMPTY);
break;
default:
break;
}
} }
/* JNIEXPORT jint JNICALL Java_mpi_Group_getSize(
* Class: mpi_Group JNIEnv *env, jobject jthis, jlong group)
* Method: Size
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_mpi_Group_Size(JNIEnv *env, jobject jthis)
{ {
int size; int size, rc;
rc = MPI_Group_size((MPI_Group)group, &size);
ompi_java_clearFreeList(env) ; ompi_java_exceptionCheck(env, rc);
MPI_Group_size((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)),
&size);
return size; return size;
} }
/* * Class: mpi_Group JNIEXPORT jint JNICALL Java_mpi_Group_getRank(
* Method: Rank JNIEnv *env, jobject jthis, jlong group)
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_mpi_Group_Rank(JNIEnv *env, jobject jthis)
{ {
int rank; int rank, rc;
rc = MPI_Group_rank((MPI_Group)group, &rank);
ompi_java_clearFreeList(env) ; ompi_java_exceptionCheck(env, rc);
MPI_Group_rank((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)),
&rank);
return rank; return rank;
} }
/* JNIEXPORT jlong JNICALL Java_mpi_Group_free(
* Class: mpi_Group JNIEnv *env, jobject jthis, jlong handle)
* Method: free
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Group_free(JNIEnv *env, jobject jthis)
{ {
MPI_Group group=(MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)); MPI_Group group = (MPI_Group)handle;
int rc = MPI_Group_free(&group);
MPI_Group_free(&group); ompi_java_exceptionCheck(env, rc);
(*env)->SetLongField(env,jthis, ompi_java.GrouphandleID,(jlong)MPI_GROUP_NULL); return (jlong)group;
} }
/* JNIEXPORT jintArray JNICALL Java_mpi_Group_translateRanks(
* Class: mpi_Group JNIEnv *env, jclass jthis, jlong group1,
* Method: Translate_ranks jintArray ranks1, jlong group2)
* Signature: (Lmpi/Group;[ILmpi/Group;)[I
*/
JNIEXPORT jintArray JNICALL Java_mpi_Group_Translate_1ranks(JNIEnv *env, jclass jthis,
jobject group1, jintArray ranks1,
jobject group2)
{ {
jboolean isCopy=JNI_TRUE; jsize n = (*env)->GetArrayLength(env, ranks1);
int n=(*env)->GetArrayLength(env,ranks1); jintArray ranks2 = (*env)->NewIntArray(env,n);
jint *rks1,*rks2; jint *jRanks1, *jRanks2;
jintArray jranks2; int *cRanks1, *cRanks2;
ompi_java_getIntArray(env, ranks1, &jRanks1, &cRanks1);
ompi_java_getIntArray(env, ranks2, &jRanks2, &cRanks2);
ompi_java_clearFreeList(env) ; int rc = MPI_Group_translate_ranks((MPI_Group)group1, n, cRanks1,
(MPI_Group)group2, cRanks2);
rks1=(*env)->GetIntArrayElements(env,ranks1,&isCopy); ompi_java_exceptionCheck(env, rc);
jranks2=(*env)->NewIntArray(env,n); ompi_java_forgetIntArray(env, ranks1, jRanks1, cRanks1);
rks2=(*env)->GetIntArrayElements(env,jranks2,&isCopy); ompi_java_releaseIntArray(env, ranks2, jRanks2, cRanks2);
MPI_Group_translate_ranks((MPI_Group)((*env)->GetLongField(env,group1,ompi_java.GrouphandleID)), return ranks2;
n, (int*)rks1,
(MPI_Group)((*env)->GetLongField(env,group2,ompi_java.GrouphandleID)),
(int*)rks2);
(*env)->ReleaseIntArrayElements(env,ranks1,rks1,0);
(*env)->ReleaseIntArrayElements(env,jranks2,rks2,0);
return jranks2;
} }
/* JNIEXPORT jint JNICALL Java_mpi_Group_compare(
* Class: mpi_Group JNIEnv *env, jclass jthis, jlong group1, jlong group2)
* Method: Compare
* Signature: (Lmpi/Group;Lmpi/Group;)I
*/
JNIEXPORT jint JNICALL Java_mpi_Group_Compare(JNIEnv *env, jclass jthis,
jobject group1, jobject group2)
{ {
int result; int result, rc;
rc = MPI_Group_compare((MPI_Group)group1, (MPI_Group)group2, &result);
ompi_java_clearFreeList(env) ; ompi_java_exceptionCheck(env, rc);
MPI_Group_compare((MPI_Group)((*env)->GetLongField(env,group1,ompi_java.GrouphandleID)),
(MPI_Group)((*env)->GetLongField(env,group2,ompi_java.GrouphandleID)),
&result);
return result; return result;
} }
/* JNIEXPORT jlong JNICALL Java_mpi_Group_union(
* Class: mpi_Group JNIEnv *env, jclass jthis, jlong group1, jlong group2)
* Method: union
* Signature: (Lmpi/Group;Lmpi/Group;)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Group_union(JNIEnv *env, jclass jthis,
jobject group1, jobject group2)
{ {
MPI_Group newgroup; MPI_Group newGroup;
int rc = MPI_Group_union((MPI_Group)group1, (MPI_Group)group2, &newGroup);
ompi_java_clearFreeList(env) ; ompi_java_exceptionCheck(env, rc);
return (jlong)newGroup;
MPI_Group_union((MPI_Group)((*env)->GetLongField(env,group1,ompi_java.GrouphandleID)),
(MPI_Group)((*env)->GetLongField(env,group2,ompi_java.GrouphandleID)),
&newgroup);
return (jlong)newgroup;
}
/*
* Class: mpi_Group
* Method: intersection
* Signature: (Lmpi/Group;Lmpi/Group;)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Group_intersection(JNIEnv *env, jclass jthis,
jobject group1, jobject group2)
{
MPI_Group newgroup;
ompi_java_clearFreeList(env) ;
MPI_Group_intersection((MPI_Group)((*env)->GetLongField(env,group1,ompi_java.GrouphandleID)),
(MPI_Group)((*env)->GetLongField(env,group2,ompi_java.GrouphandleID)),
&newgroup);
return (jlong)newgroup;
} }
/* JNIEXPORT jlong JNICALL Java_mpi_Group_intersection(
* Class: mpi_Group JNIEnv *env, jclass jthis, jlong group1, jlong group2)
* Method: difference
* Signature: (Lmpi/Group;Lmpi/Group;)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Group_difference(JNIEnv *env, jclass jthis,
jobject group1, jobject group2)
{ {
MPI_Group newgroup; MPI_Group newGroup;
ompi_java_clearFreeList(env) ; int rc = MPI_Group_intersection(
(MPI_Group)group1, (MPI_Group)group2, &newGroup);
MPI_Group_difference((MPI_Group)((*env)->GetLongField(env,group1,ompi_java.GrouphandleID)), ompi_java_exceptionCheck(env, rc);
(MPI_Group)((*env)->GetLongField(env,group2,ompi_java.GrouphandleID)), return (jlong)newGroup;
&newgroup);
return (jlong)newgroup;
} }
/* JNIEXPORT jlong JNICALL Java_mpi_Group_difference(
* Class: mpi_Group JNIEnv *env, jclass jthis, jlong group1, jlong group2)
* Method: incl
* Signature: ([I)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Group_incl(JNIEnv *env, jobject jthis, jintArray ranks)
{ {
int n; MPI_Group newGroup;
jint *rks;
jboolean isCopy=JNI_TRUE;
MPI_Group newgroup;
ompi_java_clearFreeList(env) ; int rc = MPI_Group_difference(
(MPI_Group)group1, (MPI_Group)group2, &newGroup);
n=(*env)->GetArrayLength(env,ranks); ompi_java_exceptionCheck(env, rc);
rks=(*env)->GetIntArrayElements(env,ranks,&isCopy); return (jlong)newGroup;
MPI_Group_incl((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)),
n, (int*)rks,
&newgroup);
(*env)->ReleaseIntArrayElements(env,ranks,rks,0);
return (jlong)newgroup;
} }
/* JNIEXPORT jlong JNICALL Java_mpi_Group_incl(
* Class: mpi_Group JNIEnv *env, jobject jthis, jlong group, jintArray ranks)
* Method: excl
* Signature: ([I)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Group_excl(JNIEnv *env, jobject jthis, jintArray ranks)
{ {
int n; jsize n = (*env)->GetArrayLength(env, ranks);
jint *rks; jint *jRanks;
jboolean isCopy=JNI_TRUE; int *cRanks;
MPI_Group newgroup; ompi_java_getIntArray(env, ranks, &jRanks, &cRanks);
ompi_java_clearFreeList(env) ; MPI_Group newGroup;
int rc = MPI_Group_incl((MPI_Group)group, n, cRanks, &newGroup);
ompi_java_exceptionCheck(env, rc);
n=(*env)->GetArrayLength(env,ranks); ompi_java_forgetIntArray(env, ranks, jRanks, cRanks);
rks=(*env)->GetIntArrayElements(env,ranks,&isCopy); return (jlong)newGroup;
MPI_Group_excl((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)),
n, (int*)rks,
&newgroup);
(*env)->ReleaseIntArrayElements(env,ranks,rks,0);
return (jlong)newgroup;
} }
/* JNIEXPORT jlong JNICALL Java_mpi_Group_excl(
* Class: mpi_Group JNIEnv *env, jobject jthis, jlong group, jintArray ranks)
* Method: range_incl {
* Signature: ([[I)J jsize n = (*env)->GetArrayLength(env, ranks);
*/ jint *jRanks;
JNIEXPORT jlong JNICALL Java_mpi_Group_range_1incl(JNIEnv *env, jobject jthis, jobjectArray ranges) int *cRanks;
ompi_java_getIntArray(env, ranks, &jRanks, &cRanks);
MPI_Group newGroup;
int rc = MPI_Group_excl((MPI_Group)group, n, cRanks, &newGroup);
ompi_java_exceptionCheck(env, rc);
ompi_java_forgetIntArray(env, ranks, jRanks, cRanks);
return (jlong)newGroup;
}
JNIEXPORT jlong JNICALL Java_mpi_Group_rangeIncl(
JNIEnv *env, jobject jthis, jlong group, jobjectArray ranges)
{ {
int i; int i;
int n=(*env)->GetArrayLength(env,ranges); MPI_Group newGroup;
jboolean isCopy=JNI_TRUE; jsize n = (*env)->GetArrayLength(env, ranges);
MPI_Group newgroup; int (*cRanges)[3] = (int(*)[3])calloc(n, sizeof(int[3]));
/* jint **rngs=(jint**)calloc(n,sizeof(jint[3])); */
int (*rngs) [3] =(int (*) [3])calloc(n,sizeof(int[3]));
jintArray *jrngs=(jobject*)calloc(n,sizeof(jintArray));
ompi_java_clearFreeList(env) ; for(i = 0; i < n; i++)
{
for(i=0;i<n;i++) { jintArray ri = (*env)->GetObjectArrayElement(env, ranges, i);
jint *vec ; jint *jri = (*env)->GetIntArrayElements(env, ri, NULL);
jrngs[i]=(*env)->GetObjectArrayElement(env,ranges,i); cRanges[i][0] = jri[0];
vec=(*env)->GetIntArrayElements(env, jrngs[i],&isCopy); cRanges[i][1] = jri[1];
rngs [i] [0] = vec [0] ; cRanges[i][2] = jri[2];
rngs [i] [1] = vec [1] ; (*env)->ReleaseIntArrayElements(env, ri, jri, JNI_ABORT);
rngs [i] [2] = vec [2] ; (*env)->DeleteLocalRef(env, ri);
(*env)->ReleaseIntArrayElements(env,jrngs[i],vec,0);
} }
MPI_Group_range_incl((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)), int rc = MPI_Group_range_incl((MPI_Group)group, n, cRanges, &newGroup);
n,rngs,&newgroup); ompi_java_exceptionCheck(env, rc);
free(cRanges);
free(rngs); return (jlong)newGroup;
free(jrngs);
return (jlong)newgroup;
} }
/* JNIEXPORT jlong JNICALL Java_mpi_Group_rangeExcl(
* Class: mpi_Group JNIEnv *env, jobject jthis, jlong group, jobjectArray ranges)
* Method: range_excl
* Signature: ([[I)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Group_range_1excl(JNIEnv *env, jobject jthis, jobjectArray ranges)
{ {
int i; int i;
int n=(*env)->GetArrayLength(env,ranges); MPI_Group newGroup;
jboolean isCopy=JNI_TRUE; jsize n = (*env)->GetArrayLength(env, ranges);
MPI_Group newgroup; int (*cRanges)[3] = (int(*)[3])calloc(n, sizeof(int[3]));
/* jint **rngs=(jint**)calloc(n,sizeof(jint*)); */
int (*rngs) [3] =(int (*) [3])calloc(n,sizeof(int[3]));
jintArray *jrngs=(jobject*)calloc(n,sizeof(jintArray));
ompi_java_clearFreeList(env) ; for(i = 0; i < n; i++)
{
for(i=0;i<n;i++) { jintArray ri = (*env)->GetObjectArrayElement(env, ranges, i);
jint* vec; jint *jri = (*env)->GetIntArrayElements(env, ri, NULL);
jrngs[i]=(*env)->GetObjectArrayElement(env,ranges,i); cRanges[i][0] = jri[0];
vec=(*env)->GetIntArrayElements(env, cRanges[i][1] = jri[1];
jrngs[i],&isCopy); cRanges[i][2] = jri[2];
rngs [i] [0] = vec [0] ; (*env)->ReleaseIntArrayElements(env, ri, jri, JNI_ABORT);
rngs [i] [1] = vec [1] ; (*env)->DeleteLocalRef(env, ri);
rngs [i] [2] = vec [2] ;
(*env)->ReleaseIntArrayElements(env,jrngs[i],vec,0);
} }
MPI_Group_range_excl((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)),
n, rngs,&newgroup);
free(rngs); int rc = MPI_Group_range_excl((MPI_Group)group, n, cRanges, &newGroup);
free(jrngs); ompi_java_exceptionCheck(env, rc);
return (jlong)newgroup; free(cRanges);
return (jlong)newGroup;
} }

178
ompi/mpi/java/c/mpi_Info.c Обычный файл
Просмотреть файл

@ -0,0 +1,178 @@
#include "ompi_config.h"
#include <stdlib.h>
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi.h"
#include "mpi_Info.h"
#include "mpiJava.h"
/*
* Class: mpi_Info
* Method: create
* Signature: ()J
*/
JNIEXPORT jlong JNICALL Java_mpi_Info_create(JNIEnv *env, jobject jthis)
{
MPI_Info info;
int rc = MPI_Info_create(&info);
ompi_java_exceptionCheck(env, rc);
return (jlong)info;
}
/*
* Class: mpi_Info
* Method: getEnv
* Signature: ()J
*/
JNIEXPORT jlong JNICALL Java_mpi_Info_getEnv(JNIEnv *env, jclass clazz)
{
return (jlong)MPI_INFO_ENV;
}
/*
* Class: mpi_Info
* Method: getNull
* Signature: ()J
*/
JNIEXPORT jlong JNICALL Java_mpi_Info_getNull(JNIEnv *env, jclass clazz)
{
return (jlong)MPI_INFO_NULL;
}
/*
* Class: mpi_Info
* Method: set
* Signature: (JLjava/lang/String;Ljava/lang/String;)V
*/
JNIEXPORT void JNICALL Java_mpi_Info_set(
JNIEnv *env, jobject jthis, jlong handle, jstring jkey, jstring jvalue)
{
const char *key = (*env)->GetStringUTFChars(env, jkey, NULL),
*value = (*env)->GetStringUTFChars(env, jvalue, NULL);
int rc = MPI_Info_set((MPI_Info)handle, (char*)key, (char*)value);
ompi_java_exceptionCheck(env, rc);
(*env)->ReleaseStringUTFChars(env, jkey, key);
(*env)->ReleaseStringUTFChars(env, jvalue, value);
}
/*
* Class: mpi_Info
* Method: get
* Signature: (JLjava/lang/String;)Ljava/lang/String;
*/
JNIEXPORT jstring JNICALL Java_mpi_Info_get(
JNIEnv *env, jobject jthis, jlong handle, jstring jkey)
{
MPI_Info info = (MPI_Info)handle;
const char *key = (*env)->GetStringUTFChars(env, jkey, NULL);
int rc, valueLen, flag;
rc = MPI_Info_get_valuelen(info, (char*)key, &valueLen, &flag);
if(ompi_java_exceptionCheck(env, rc) || !flag)
{
(*env)->ReleaseStringUTFChars(env, jkey, key);
return NULL;
}
char *value = (char*)calloc(valueLen + 1, sizeof(char));
rc = MPI_Info_get((MPI_Info)info, (char*)key, valueLen, value, &flag);
(*env)->ReleaseStringUTFChars(env, jkey, key);
if(ompi_java_exceptionCheck(env, rc) || !flag)
{
free(value);
return NULL;
}
jstring jvalue = (*env)->NewStringUTF(env, value);
free(value);
return jvalue;
}
/*
* Class: mpi_Info
* Method: delete
* Signature: (JLjava/lang/String;)V
*/
JNIEXPORT void JNICALL Java_mpi_Info_delete(
JNIEnv *env, jobject jthis, jlong handle, jstring jkey)
{
const char *key = (*env)->GetStringUTFChars(env, jkey, NULL);
int rc = MPI_Info_delete((MPI_Info)handle, (char*)key);
ompi_java_exceptionCheck(env, rc);
(*env)->ReleaseStringUTFChars(env, jkey, key);
}
/*
* Class: mpi_Info
* Method: size
* Signature: (J)I
*/
JNIEXPORT jint JNICALL Java_mpi_Info_size(
JNIEnv *env, jobject jthis, jlong handle)
{
int rc, nkeys;
rc = MPI_Info_get_nkeys((MPI_Info)handle, &nkeys);
ompi_java_exceptionCheck(env, rc);
return (jint)nkeys;
}
/*
* Class: mpi_Info
* Method: getKey
* Signature: (JI)Ljava/lang/String;
*/
JNIEXPORT jstring JNICALL Java_mpi_Info_getKey(
JNIEnv *env, jobject jthis, jlong handle, jint i)
{
char key[MPI_MAX_INFO_KEY + 1];
int rc = MPI_Info_get_nthkey((MPI_Info)handle, i, key);
return ompi_java_exceptionCheck(env, rc)
? NULL : (*env)->NewStringUTF(env, key);
}
/*
* Class: mpi_Info
* Method: clone
* Signature: (J)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Info_clone(
JNIEnv *env, jobject jthis, jlong handle)
{
MPI_Info newInfo;
int rc = MPI_Info_dup((MPI_Info)handle, &newInfo);
ompi_java_exceptionCheck(env, rc);
return (jlong)newInfo;
}
/*
* Class: mpi_Info
* Method: free
* Signature: (J)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Info_free(
JNIEnv *env, jobject jthis, jlong handle)
{
MPI_Info info = (MPI_Info)handle;
int rc = MPI_Info_free(&info);
ompi_java_exceptionCheck(env, rc);
return (jlong)info;
}
/*
* Class: mpi_Info
* Method: isNull
* Signature: (J)Z
*/
JNIEXPORT jboolean JNICALL Java_mpi_Info_isNull(
JNIEnv *env, jobject jthis, jlong handle)
{
return (MPI_Info)handle == MPI_INFO_NULL ? JNI_TRUE : JNI_FALSE;
}

Просмотреть файл

@ -35,47 +35,68 @@
/* /*
* Class: mpi_Intercomm * Class: mpi_Intercomm
* Method: Remote_size * Method: getRemoteSize_jni
* Signature: ()I * Signature: ()I
*/ */
JNIEXPORT jint JNICALL Java_mpi_Intercomm_Remote_1size(JNIEnv *env, jobject jthis) JNIEXPORT jint JNICALL Java_mpi_Intercomm_getRemoteSize_1jni(
JNIEnv *env, jobject jthis)
{ {
int size; int size, rc;
ompi_java_clearFreeList(env) ; rc = MPI_Comm_remote_size(
(MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommHandle)),
&size);
MPI_Comm_remote_size((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), ompi_java_exceptionCheck(env, rc);
&size);
return size; return size;
} }
/* /*
* Class: mpi_Intercomm * Class: mpi_Intercomm
* Method: remote_group * Method: getRemoteGroup_jni
* Signature: ()J * Signature: ()J
*/ */
JNIEXPORT jlong JNICALL Java_mpi_Intercomm_remote_1group(JNIEnv *env, jobject jthis) JNIEXPORT jlong JNICALL Java_mpi_Intercomm_getRemoteGroup_1jni(
JNIEnv *env, jobject jthis)
{ {
MPI_Group group; MPI_Group group;
ompi_java_clearFreeList(env) ; int rc = MPI_Comm_remote_group(
(MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommHandle)),
&group);
MPI_Comm_remote_group((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), ompi_java_exceptionCheck(env, rc);
&group);
return (jlong)group; return (jlong)group;
} }
/* /*
* Class: mpi_Intercomm * Class: mpi_Intercomm
* Method: merge * Method: merge_jni
* Signature: (Z)Lmpi/Intracomm; * Signature: (Z)Lmpi/Intracomm;
*/ */
JNIEXPORT jlong JNICALL Java_mpi_Intercomm_merge(JNIEnv *env, jobject jthis, jboolean high) JNIEXPORT jlong JNICALL Java_mpi_Intercomm_merge_1jni(
JNIEnv *env, jobject jthis, jboolean high)
{ {
MPI_Comm newintracomm; MPI_Comm newintracomm;
ompi_java_clearFreeList(env) ; int rc = MPI_Intercomm_merge(
(MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommHandle)),
high, &newintracomm);
MPI_Intercomm_merge((MPI_Comm)((*env)->GetLongField(env,jthis,ompi_java.CommhandleID)), high, ompi_java_exceptionCheck(env, rc);
&newintracomm);
return (jlong)newintracomm; return (jlong)newintracomm;
} }
/*
* Class: mpi_Intercomm
* Method: getParent_jni
* Signature: ()J
*/
JNIEXPORT jlong JNICALL Java_mpi_Intercomm_getParent_1jni(
JNIEnv *env, jclass clazz)
{
MPI_Comm parent;
int rc = MPI_Comm_get_parent(&parent);
ompi_java_exceptionCheck(env, rc);
return (jlong)parent;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,3 +1,10 @@
/*
* Copyright (c) 2013 Cisco Systems, Inc. All rights reserved.
*
* $COPYRIGHT$
*
* Additional copyrights may follow.
*/
/* /*
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -13,7 +20,7 @@
*/ */
/* /*
* File : mpi_MPI.c * File : mpi_MPI.c
* Headerfile : mpi_MPI.h * Headerfile : mpi_MPI.h
* Author : SungHoon Ko, Xinying Li (contributions from MAEDA Atusi) * Author : SungHoon Ko, Xinying Li (contributions from MAEDA Atusi)
* Created : Thu Apr 9 12:22:15 1998 * Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.17 $ * Revision : $Revision: 1.17 $
@ -39,18 +46,12 @@
#ifdef HAVE_SYS_STAT_H #ifdef HAVE_SYS_STAT_H
#include <sys/stat.h> #include <sys/stat.h>
#endif #endif
#include <dlfcn.h>
#if OPAL_WANT_LIBLTDL
#if OPAL_LIBLTDL_INTERNAL
#include "opal/libltdl/ltdl.h"
#else
#include "ltdl.h"
#endif
#endif
#include "opal/util/output.h" #include "opal/util/output.h"
#include "mpi.h" #include "mpi.h"
#include "ompi/errhandler/errcode.h"
#include "mpi_MPI.h" #include "mpi_MPI.h"
#include "mpiJava.h" #include "mpiJava.h"
@ -58,7 +59,7 @@ ompi_java_globals_t ompi_java;
static int len = 0; static int len = 0;
static char** sargs = 0; static char** sargs = 0;
static void *mpilibhandle=NULL;
/* /*
* Class: mpi_MPI * Class: mpi_MPI
@ -88,163 +89,222 @@ static char** sargs = 0;
*/ */
JNIEXPORT jboolean JNICALL Java_mpi_MPI_loadGlobalLibraries(JNIEnv *env, jclass obj) JNIEXPORT jboolean JNICALL Java_mpi_MPI_loadGlobalLibraries(JNIEnv *env, jclass obj)
{ {
#if OPAL_WANT_LIBLTDL if (NULL == (mpilibhandle = dlopen("libmpi.so", RTLD_NOW | RTLD_GLOBAL))) {
lt_dladvise advise;
if (lt_dlinit() != 0) {
opal_output(0, "LT_DLINIT FAILED - CANNOT LOAD LIBOMPI");
return JNI_FALSE; return JNI_FALSE;
} }
#if OPAL_HAVE_LTDL_ADVISE
/* open the library into the global namespace */
if (lt_dladvise_init(&advise)) {
opal_output(0, "LT_DLADVISE INIT FAILED - CANNOT LOAD LIBOMPI");
return JNI_FALSE;
}
if (lt_dladvise_ext(&advise)) {
opal_output(0, "LT_DLADVISE EXT FAILED - CANNOT LOAD LIBOMPI");
lt_dladvise_destroy(&advise);
return JNI_FALSE;
}
if (lt_dladvise_global(&advise)) {
opal_output(0, "LT_DLADVISE GLOBAL FAILED - CANNOT LOAD LIBOMPI");
lt_dladvise_destroy(&advise);
return JNI_FALSE;
}
/* we don't care about the return value
* on dlopen - it might return an error
* because the lib is already loaded,
* depending on the way we were built
*/
lt_dlopenadvise("libmpi", advise);
lt_dladvise_destroy(&advise);
return JNI_TRUE;
#endif
opal_output(0, "NO LT_DLADVISE - CANNOT LOAD LIBOMPI");
/* need to balance the ltdl inits */
lt_dlexit();
/* if we don't have advise, then we are hosed */
return JNI_FALSE;
#endif
/* if dlopen was disabled, then all symbols
* should have been pulled up into the libraries,
* so we don't need to do anything as the symbols
* are already available
*/
return JNI_TRUE; return JNI_TRUE;
} }
JNIEXPORT jobject JNICALL Java_mpi_MPI_newInt2(JNIEnv *env, jclass clazz)
{
struct { int a; int b; } s;
int iOff = (int)((MPI_Aint)(&(s.b)) - (MPI_Aint)(&(s.a)));
jclass c = (*env)->FindClass(env, "mpi/Int2");
jmethodID m = (*env)->GetMethodID(env, c, "<init>", "(II)V");
return (*env)->NewObject(env, c, m, iOff, sizeof(int));
}
JNIEXPORT jobject JNICALL Java_mpi_MPI_newShortInt(JNIEnv *env, jclass clazz)
{
struct { short a; int b; } s;
int iOff = (int)((MPI_Aint)(&(s.b)) - (MPI_Aint)(&(s.a)));
jclass c = (*env)->FindClass(env, "mpi/ShortInt");
jmethodID m = (*env)->GetMethodID(env, c, "<init>", "(III)V");
return (*env)->NewObject(env, c, m, sizeof(short), iOff, sizeof(int));
}
JNIEXPORT jobject JNICALL Java_mpi_MPI_newLongInt(JNIEnv *env, jclass clazz)
{
struct { long a; int b; } s;
int iOff = (int)((MPI_Aint)(&(s.b)) - (MPI_Aint)(&(s.a)));
jclass c = (*env)->FindClass(env, "mpi/LongInt");
jmethodID m = (*env)->GetMethodID(env, c, "<init>", "(III)V");
return (*env)->NewObject(env, c, m, sizeof(long), iOff, sizeof(int));
}
JNIEXPORT jobject JNICALL Java_mpi_MPI_newFloatInt(JNIEnv *env, jclass clazz)
{
struct { float a; int b; } s;
int iOff = (int)((MPI_Aint)(&(s.b)) - (MPI_Aint)(&(s.a)));
jclass c = (*env)->FindClass(env, "mpi/FloatInt");
jmethodID m = (*env)->GetMethodID(env, c, "<init>", "(II)V");
return (*env)->NewObject(env, c, m, iOff, sizeof(int));
}
JNIEXPORT jobject JNICALL Java_mpi_MPI_newDoubleInt(JNIEnv *env, jclass clazz)
{
struct { double a; int b; } s;
int iOff = (int)((MPI_Aint)(&(s.b)) - (MPI_Aint)(&(s.a)));
jclass c = (*env)->FindClass(env, "mpi/DoubleInt");
jmethodID m = (*env)->GetMethodID(env, c, "<init>", "(II)V");
return (*env)->NewObject(env, c, m, iOff, sizeof(int));
}
/* /*
* Class: mpi_MPI * Class: mpi_MPI
* Method: InitNative * Method: Init_jni
* Signature: ([Ljava/lang/String;)[Ljava/lang/String; * Signature: ([Ljava/lang/String;)[Ljava/lang/String;
*/ */
JNIEXPORT jobjectArray JNICALL Java_mpi_MPI_InitNative(JNIEnv *env, jclass obj, jobjectArray argv) JNIEXPORT jobjectArray JNICALL Java_mpi_MPI_Init_1jni(
JNIEnv *env, jclass clazz, jobjectArray argv)
{ {
jsize i; jsize i;
jstring jc;
jclass string; jclass string;
jobject value; jobject value;
len = (*env)->GetArrayLength(env,argv); len = (*env)->GetArrayLength(env,argv);
sargs = (char**)calloc(len+1, sizeof(char*)); sargs = (char**)calloc(len+1, sizeof(char*));
for (i=0; i<len; i++) {
jc=(jstring)(*env)->GetObjectArrayElement(env,argv,i); for(i = 0; i < len; i++)
sargs[i] = (char*)calloc(strlen((*env)->GetStringUTFChars(env,jc,0)) + 1, {
sizeof(char)); jstring jc = (jstring)(*env)->GetObjectArrayElement(env, argv, i);
strcpy(sargs[i],(*env)->GetStringUTFChars(env,jc,0)); const char *s = (*env)->GetStringUTFChars(env, jc, 0);
sargs[i] = (char*)calloc(strlen(s) + 1, sizeof(char));
strcpy(sargs[i], s);
(*env)->DeleteLocalRef(env, jc);
} }
MPI_Init(&len, &sargs); int rc = MPI_Init(&len, &sargs);
ompi_java_exceptionCheck(env, rc);
string = (*env)->FindClass(env, "java/lang/String"); string = (*env)->FindClass(env, "java/lang/String");
value = (*env)->NewObjectArray(env, len, string, NULL); value = (*env)->NewObjectArray(env, len, string, NULL);
for (i = 0; i < len; i++) {
jc = (*env)->NewStringUTF(env, sargs[i]); for(i = 0; i < len; i++)
{
jstring jc = (*env)->NewStringUTF(env, sargs[i]);
(*env)->SetObjectArrayElement(env, value, i, jc); (*env)->SetObjectArrayElement(env, value, i, jc);
(*env)->DeleteLocalRef(env, jc);
} }
ompi_java_init_native_Datatype() ; ompi_java_init_native_Datatype(env);
ompi_java_findClasses(env);
return value; return value;
} }
/* /*
* Class: mpi_MPI * Class: mpi_MPI
* Method: Finalize * Method: InitThread_jni
* Signature: ([Ljava/lang/String;I)I
*/
JNIEXPORT jint JNICALL Java_mpi_MPI_InitThread_1jni(
JNIEnv *env, jclass clazz, jobjectArray argv, jint required)
{
jsize i;
len = (*env)->GetArrayLength(env,argv);
sargs = (char**)calloc(len+1, sizeof(char*));
for(i = 0; i < len; i++)
{
jstring jc = (jstring)(*env)->GetObjectArrayElement(env, argv, i);
const char *s = (*env)->GetStringUTFChars(env, jc, 0);
sargs[i] = (char*)calloc(strlen(s) + 1, sizeof(char));
strcpy(sargs[i], s);
(*env)->DeleteLocalRef(env, jc);
}
int provided;
int rc = MPI_Init_thread(&len, &sargs, required, &provided);
ompi_java_exceptionCheck(env, rc);
ompi_java_init_native_Datatype(env);
ompi_java_findClasses(env);
return provided;
}
/*
* Class: mpi_MPI
* Method: queryThread_jni
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_mpi_MPI_queryThread_1jni(JNIEnv *env, jclass clazz)
{
int provided;
int rc = MPI_Query_thread(&provided);
ompi_java_exceptionCheck(env, rc);
return provided;
}
/*
* Class: mpi_MPI
* Method: isThreadMain_jni
* Signature: ()Z
*/
JNIEXPORT jboolean JNICALL Java_mpi_MPI_isThreadMain_1jni(
JNIEnv *env, jclass clazz)
{
int flag;
int rc = MPI_Is_thread_main(&flag);
ompi_java_exceptionCheck(env, rc);
return flag ? JNI_TRUE : JNI_FALSE;
}
/*
* Class: mpi_MPI
* Method: Finalize_jni
* Signature: ()V * Signature: ()V
*/ */
JNIEXPORT void JNICALL Java_mpi_MPI_Finalize(JNIEnv *env, jclass obj) JNIEXPORT void JNICALL Java_mpi_MPI_Finalize_1jni(JNIEnv *env, jclass obj)
{ {
ompi_java_clearFreeList(env) ; if (NULL != mpilibhandle) {
dlclose(mpilibhandle);
}
#if OPAL_WANT_LIBLTDL int rc = MPI_Finalize();
/* need to balance the ltdl inits */ ompi_java_exceptionCheck(env, rc);
lt_dlexit(); ompi_java_deleteClasses(env);
#endif
MPI_Finalize();
} }
/* /*
* Class: mpi_MPI * Class: mpi_MPI
* Method: Get_processor_name * Method: getProcessorName
* Signature: ([B)I * Signature: ([B)I
*/ */
JNIEXPORT jint JNICALL Java_mpi_MPI_Get_1processor_1name(JNIEnv *env, jclass obj, jbyteArray buf) JNIEXPORT jint JNICALL Java_mpi_MPI_getProcessorName(
JNIEnv *env, jclass obj, jbyteArray buf)
{ {
int len; int len, rc;
jboolean isCopy; jboolean isCopy;
jbyte* bufc = (jbyte*)((*env)->GetByteArrayElements(env,buf,&isCopy)) ; jbyte* bufc = (jbyte*)((*env)->GetByteArrayElements(env,buf,&isCopy)) ;
ompi_java_clearFreeList(env) ; rc = MPI_Get_processor_name((char*)bufc, &len);
ompi_java_exceptionCheck(env, rc);
MPI_Get_processor_name((char*)bufc, &len);
(*env)->ReleaseByteArrayElements(env,buf,bufc,0) ; (*env)->ReleaseByteArrayElements(env,buf,bufc,0) ;
return len; return len;
} }
/* /*
* Class: mpi_MPI * Class: mpi_MPI
* Method: Wtime * Method: wtime_jni
* Signature: ()D * Signature: ()D
*/ */
JNIEXPORT jdouble JNICALL Java_mpi_MPI_Wtime(JNIEnv *env, jclass jthis) JNIEXPORT jdouble JNICALL Java_mpi_MPI_wtime_1jni(JNIEnv *env, jclass jthis)
{ {
ompi_java_clearFreeList(env) ;
return MPI_Wtime(); return MPI_Wtime();
} }
/* /*
* Class: mpi_MPI * Class: mpi_MPI
* Method: Wtick * Method: wtick_jni
* Signature: ()D * Signature: ()D
*/ */
JNIEXPORT jdouble JNICALL Java_mpi_MPI_Wtick(JNIEnv *env, jclass jthis) JNIEXPORT jdouble JNICALL Java_mpi_MPI_wtick_1jni(JNIEnv *env, jclass jthis)
{ {
ompi_java_clearFreeList(env) ;
return MPI_Wtick(); return MPI_Wtick();
} }
/* /*
* Class: mpi_MPI * Class: mpi_MPI
* Method: Initialized * Method: isInitialized
* Signature: ()Z * Signature: ()Z
*/ */
JNIEXPORT jboolean JNICALL Java_mpi_MPI_Initialized(JNIEnv *env, jclass jthis) JNIEXPORT jboolean JNICALL Java_mpi_MPI_isInitialized(JNIEnv *env, jclass jthis)
{ {
int flag; int flag, rc;
ompi_java_clearFreeList(env) ; rc = MPI_Initialized(&flag);
ompi_java_exceptionCheck(env, rc);
MPI_Initialized(&flag);
if (flag==0) { if (flag==0) {
return JNI_FALSE; return JNI_FALSE;
} else { } else {
@ -254,96 +314,260 @@ JNIEXPORT jboolean JNICALL Java_mpi_MPI_Initialized(JNIEnv *env, jclass jthis)
/* /*
* Class: mpi_MPI * Class: mpi_MPI
* Method: Buffer_attach_native * Method: isFinalized
* Signature: ()Z
*/
JNIEXPORT jboolean JNICALL Java_mpi_MPI_isFinalized(JNIEnv *env, jclass jthis)
{
int flag, rc;
rc = MPI_Finalized(&flag);
ompi_java_exceptionCheck(env, rc);
if (flag==0) {
return JNI_FALSE;
} else {
return JNI_TRUE;
}
}
/*
* Class: mpi_MPI
* Method: attachBuffer_jni
* Signature: ([B)V * Signature: ([B)V
*/ */
JNIEXPORT void JNICALL Java_mpi_MPI_Buffer_1attach_1native(JNIEnv *env, jclass jthis, jbyteArray buf) JNIEXPORT void JNICALL Java_mpi_MPI_attachBuffer_1jni(
JNIEnv *env, jclass jthis, jbyteArray buf)
{ {
jboolean isCopy; jboolean isCopy;
int size=(*env)->GetArrayLength(env,buf); int size=(*env)->GetArrayLength(env,buf);
jbyte* bufptr = (*env)->GetByteArrayElements(env,buf,&isCopy) ; jbyte* bufptr = (*env)->GetByteArrayElements(env,buf,&isCopy) ;
ompi_java_clearFreeList(env) ; int rc = MPI_Buffer_attach(bufptr,size);
ompi_java_exceptionCheck(env, rc);
MPI_Buffer_attach(bufptr,size);
} }
/* /*
* Class: mpi_MPI * Class: mpi_MPI
* Method: Buffer_detach_native * Method: detachBuffer_jni
* Signature: ([B)V * Signature: ([B)V
*/ */
JNIEXPORT void JNICALL Java_mpi_MPI_Buffer_1detach_1native(JNIEnv *env, jclass jthis, jbyteArray buf) JNIEXPORT void JNICALL Java_mpi_MPI_detachBuffer_1jni(
JNIEnv *env, jclass jthis, jbyteArray buf)
{ {
/*jboolean isCopy;*/ /*jboolean isCopy;*/
int size; int size, rc;
/*char* bufptr ;*/ /*char* bufptr ;*/
jbyte* bufptr ; jbyte* bufptr ;
ompi_java_clearFreeList(env) ; rc = MPI_Buffer_detach(&bufptr, &size);
ompi_java_exceptionCheck(env, rc);
MPI_Buffer_detach(&bufptr, &size);
if (buf != NULL) { if (buf != NULL) {
(*env)->ReleaseByteArrayElements(env,buf,bufptr,0); (*env)->ReleaseByteArrayElements(env,buf,bufptr,0);
} }
} }
/* void ompi_java_findClasses(JNIEnv *env)
* Class: mpi_MPI
* Method: SetConstant
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_MPI_SetConstant(JNIEnv *env, jclass jthis)
{ {
jfieldID anysourceID=(*env)->GetStaticFieldID(env,jthis,"ANY_SOURCE","I"); ompi_java.CartParmsClass = ompi_java_findClass(env, "mpi/CartParms");
jfieldID anytagID=(*env)->GetStaticFieldID(env,jthis,"ANY_TAG","I"); ompi_java.ShiftParmsClass = ompi_java_findClass(env, "mpi/ShiftParms");
jfieldID procnullID=(*env)->GetStaticFieldID(env,jthis,"PROC_NULL","I"); ompi_java.GraphParmsClass = ompi_java_findClass(env, "mpi/GraphParms");
jfieldID graphID=(*env)->GetStaticFieldID(env,jthis,"GRAPH","I");
jfieldID cartID=(*env)->GetStaticFieldID(env,jthis,"CART","I");
jfieldID bsendoverID=(*env)->GetStaticFieldID(env,jthis,"BSEND_OVERHEAD","I");
jfieldID undefinedID=(*env)->GetStaticFieldID(env,jthis,"UNDEFINED","I");
jfieldID identID=(*env)->GetStaticFieldID(env,jthis,"IDENT","I");
jfieldID congruentID=(*env)->GetStaticFieldID(env,jthis,"CONGRUENT","I");
jfieldID similarID=(*env)->GetStaticFieldID(env,jthis,"SIMILAR","I");
jfieldID unequalID=(*env)->GetStaticFieldID(env,jthis,"UNEQUAL","I");
jfieldID tagubID=(*env)->GetStaticFieldID(env,jthis,"TAG_UB","I");
jfieldID hostID=(*env)->GetStaticFieldID(env,jthis,"HOST","I");
jfieldID ioID=(*env)->GetStaticFieldID(env,jthis,"IO","I");
(*env)->SetStaticIntField(env,jthis,anysourceID,MPI_ANY_SOURCE); ompi_java.DistGraphNeighborsClass = ompi_java_findClass(
(*env)->SetStaticIntField(env,jthis,anytagID,MPI_ANY_TAG); env, "mpi/DistGraphNeighbors");
(*env)->SetStaticIntField(env,jthis,procnullID,MPI_PROC_NULL);
(*env)->SetStaticIntField(env,jthis,graphID,MPI_GRAPH);
(*env)->SetStaticIntField(env,jthis,cartID,MPI_CART);
#ifdef GC_DOES_PINNING
(*env)->SetStaticIntField(env,jthis,bsendoverID,MPI_BSEND_OVERHEAD);
#else
(*env)->SetStaticIntField(env,jthis,bsendoverID,
MPI_BSEND_OVERHEAD + sizeof(int));
#endif /* GC_DOES_PINNING */
(*env)->SetStaticIntField(env,jthis,undefinedID,MPI_UNDEFINED); ompi_java.StatusClass = ompi_java_findClass(env, "mpi/Status");
ompi_java.ExceptionClass = ompi_java_findClass(env, "mpi/MPIException");
ompi_java.ExceptionInit = (*env)->GetMethodID(
env, ompi_java.ExceptionClass,
"<init>", "(IILjava/lang/String;)V");
ompi_java.IntegerClass = ompi_java_findClass(env, "java/lang/Integer");
ompi_java.LongClass = ompi_java_findClass(env, "java/lang/Long");
ompi_java.IntegerValueOf = (*env)->GetStaticMethodID(
env, ompi_java.IntegerClass, "valueOf", "(I)Ljava/lang/Integer;");
ompi_java.LongValueOf = (*env)->GetStaticMethodID(
env, ompi_java.LongClass, "valueOf", "(J)Ljava/lang/Long;");
}
jclass ompi_java_findClass(JNIEnv *env, const char *className)
{
jclass c = (*env)->FindClass(env, className),
r = (*env)->NewGlobalRef(env, c);
(*env)->DeleteLocalRef(env, c);
return r;
}
void ompi_java_deleteClasses(JNIEnv *env)
{
(*env)->DeleteGlobalRef(env, ompi_java.CartParmsClass);
(*env)->DeleteGlobalRef(env, ompi_java.ShiftParmsClass);
(*env)->DeleteGlobalRef(env, ompi_java.GraphParmsClass);
(*env)->DeleteGlobalRef(env, ompi_java.DistGraphNeighborsClass);
(*env)->DeleteGlobalRef(env, ompi_java.StatusClass);
(*env)->DeleteGlobalRef(env, ompi_java.ExceptionClass);
(*env)->DeleteGlobalRef(env, ompi_java.IntegerClass);
(*env)->DeleteGlobalRef(env, ompi_java.LongClass);
}
jobject ompi_java_Integer_valueOf(JNIEnv *env, jint i)
{
return (*env)->CallStaticObjectMethod(env,
ompi_java.IntegerClass, ompi_java.IntegerValueOf, i);
}
jobject ompi_java_Long_valueOf(JNIEnv *env, jlong i)
{
return (*env)->CallStaticObjectMethod(env,
ompi_java.LongClass, ompi_java.LongValueOf, i);
}
void ompi_java_getIntArray(JNIEnv *env, jintArray array,
jint **jptr, int **cptr)
{
jint *jInts = (*env)->GetIntArrayElements(env, array, NULL);
*jptr = jInts;
if(sizeof(int) == sizeof(jint))
{
*cptr = (int*)jInts;
}
else
{
int i, length = (*env)->GetArrayLength(env, array);
int *cInts = calloc(length, sizeof(int));
for(i = 0; i < length; i++)
cInts[i] = jInts[i];
*cptr = cInts;
}
}
void ompi_java_releaseIntArray(JNIEnv *env, jintArray array,
jint *jptr, int *cptr)
{
if(jptr != cptr)
{
int i, length = (*env)->GetArrayLength(env, array);
for(i = 0; i < length; i++)
jptr[i] = cptr[i];
free(cptr);
}
(*env)->ReleaseIntArrayElements(env, array, jptr, 0);
}
void ompi_java_forgetIntArray(JNIEnv *env, jintArray array,
jint *jptr, int *cptr)
{
if(jptr != cptr)
free(cptr);
(*env)->ReleaseIntArrayElements(env, array, jptr, JNI_ABORT);
}
void ompi_java_getBooleanArray(JNIEnv *env, jbooleanArray array,
jboolean **jptr, int **cptr)
{
int i, length = (*env)->GetArrayLength(env, array);
jboolean *jb = (*env)->GetBooleanArrayElements(env, array, NULL);
int *cb = (int*)calloc(length, sizeof(int));
for(i = 0; i < length; i++)
cb[i] = jb[i];
(*env)->SetStaticIntField(env,jthis,identID,MPI_IDENT); *jptr = jb;
(*env)->SetStaticIntField(env,jthis,congruentID,MPI_CONGRUENT); *cptr = cb;
(*env)->SetStaticIntField(env,jthis,similarID,MPI_SIMILAR);
(*env)->SetStaticIntField(env,jthis,unequalID,MPI_UNEQUAL);
(*env)->SetStaticIntField(env,jthis,tagubID,MPI_TAG_UB);
(*env)->SetStaticIntField(env,jthis,hostID,MPI_HOST);
(*env)->SetStaticIntField(env,jthis,ioID,MPI_IO);
} }
void ompi_java_clearFreeList(JNIEnv *env) void ompi_java_releaseBooleanArray(JNIEnv *env, jbooleanArray array,
jboolean *jptr, int *cptr)
{ {
jclass mpi ; int i, length = (*env)->GetArrayLength(env, array);
jmethodID clearID ;
mpi = (*env)->FindClass(env, "mpi/MPI"); for(i = 0; i < length; i++)
clearID = (*env)->GetStaticMethodID(env, mpi, "clearFreeList", "()V"); jptr[i] = cptr[i] ? JNI_TRUE : JNI_FALSE;
(*env)->CallStaticVoidMethod(env, mpi, clearID) ;
free(cptr);
(*env)->ReleaseBooleanArrayElements(env, array, jptr, 0);
} }
void ompi_java_forgetBooleanArray(JNIEnv *env, jbooleanArray array,
jboolean *jptr, int *cptr)
{
free(cptr);
(*env)->ReleaseBooleanArrayElements(env, array, jptr, JNI_ABORT);
}
jboolean ompi_java_exceptionCheck(JNIEnv *env, int rc)
{
if((*env)->ExceptionCheck(env))
{
return JNI_TRUE;
}
else if(MPI_SUCCESS == rc)
{
return JNI_FALSE;
}
else
{
int errClass = ompi_mpi_errcode_get_class(rc);
char *message = ompi_mpi_errnum_get_string(rc);
jstring jmessage = (*env)->NewStringUTF(env, (const char*)message);
jobject mpiex = (*env)->NewObject(env, ompi_java.ExceptionClass,
ompi_java.ExceptionInit,
rc, errClass, jmessage);
(*env)->Throw(env, mpiex);
(*env)->DeleteLocalRef(env, mpiex);
(*env)->DeleteLocalRef(env, jmessage);
return JNI_TRUE;
}
}
void* ompi_java_attrSet(JNIEnv *env, jbyteArray jval)
{
int length = (*env)->GetArrayLength(env, jval);
void *cval = malloc(sizeof(int) + length);
*((int*)cval) = length;
(*env)->GetByteArrayRegion(env, jval,
0, length, (jbyte*)cval + sizeof(int));
return cval;
}
jbyteArray ompi_java_attrGet(JNIEnv *env, void *cval)
{
int length = *((int*)cval);
jbyteArray jval = (*env)->NewByteArray(env, length);
(*env)->SetByteArrayRegion(env, jval,
0, length, (jbyte*)cval + sizeof(int));
return jval;
}
int ompi_java_attrCopy(void *attrValIn, void *attrValOut, int *flag)
{
int length = *((int*)attrValIn) + sizeof(int);
*((void**)attrValOut) = malloc(length);
memcpy(*((void**)attrValOut), attrValIn, length);
*flag = 1;
return MPI_SUCCESS;
}
int ompi_java_attrDelete(void *attrVal)
{
free(attrVal);
return MPI_SUCCESS;
}

91
ompi/mpi/java/c/mpi_Message.c Обычный файл
Просмотреть файл

@ -0,0 +1,91 @@
#include "ompi_config.h"
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi.h"
#include "mpi_Message.h"
#include "mpiJava.h"
JNIEXPORT void JNICALL Java_mpi_Message_init(JNIEnv *e, jclass c)
{
ompi_java_setStaticLongField(e, c, "NULL", (jlong)MPI_MESSAGE_NULL);
ompi_java_setStaticLongField(e, c, "NO_PROC", (jlong)MPI_MESSAGE_NO_PROC);
ompi_java.MessageHandle = (*e)->GetFieldID(e, c, "handle", "J");
}
JNIEXPORT jobject JNICALL Java_mpi_Message_mProbe(
JNIEnv *env, jobject jthis, jint source, jint tag, jlong comm)
{
MPI_Message message = (MPI_Message)((*env)->GetLongField(
env, jthis, ompi_java.MessageHandle));
int rc;
MPI_Status status;
rc = MPI_Mprobe(source, tag, (MPI_Comm)comm, &message, &status);
if(ompi_java_exceptionCheck(env, rc))
return NULL;
(*env)->SetLongField(env, jthis, ompi_java.MessageHandle, (jlong)message);
jobject stat = ompi_java_status_new(&status, env);
return stat;
}
JNIEXPORT jobject JNICALL Java_mpi_Message_imProbe(
JNIEnv *env, jobject jthis, jint source, jint tag, jlong comm)
{
MPI_Message message = (MPI_Message)((*env)->GetLongField(
env, jthis, ompi_java.MessageHandle));
int rc, flag;
MPI_Status status;
rc = MPI_Improbe(source, tag, (MPI_Comm)comm, &flag, &message, &status);
if(ompi_java_exceptionCheck(env, rc) || !flag)
return NULL;
(*env)->SetLongField(env, jthis, ompi_java.MessageHandle, (jlong)message);
jobject stat = ompi_java_status_new(&status, env);
return stat;
}
JNIEXPORT void JNICALL Java_mpi_Message_mRecv(
JNIEnv *env, jobject jthis, jobject buf, jint offset, jint count,
jobject jType, jobject stat)
{
MPI_Message msg = (MPI_Message)((*env)->GetLongField(
env, jthis, ompi_java.MessageHandle));
MPI_Datatype type = (MPI_Datatype)((*env)->GetLongField(
env, jType, ompi_java.DatatypeHandle));
int bType = (*env)->GetIntField(env, jType, ompi_java.DatatypeBaseType);
void *bufPtr, *bufBase;
bufPtr = ompi_java_getBufPtr(&bufBase, env, buf, bType, offset);
MPI_Status status;
int rc = MPI_Mrecv(bufPtr, count, type, &msg, &status);
if(!ompi_java_exceptionCheck(env, rc))
{
(*env)->SetLongField(env, jthis, ompi_java.MessageHandle, (jlong)msg);
ompi_java_status_set(&status, env, stat);
}
ompi_java_releaseBufPtr(env, buf, bufBase, bType);
}
JNIEXPORT jlong JNICALL Java_mpi_Message_imRecv(
JNIEnv *env, jobject jthis, jobject buf, jint count, jlong type)
{
MPI_Message msg = (MPI_Message)((*env)->GetLongField(
env, jthis, ompi_java.MessageHandle));
void *ptr = (*env)->GetDirectBufferAddress(env, buf);
MPI_Request request;
int rc = MPI_Imrecv(ptr, count, (MPI_Datatype)type, &msg, &request);
ompi_java_exceptionCheck(env, rc);
(*env)->SetLongField(env, jthis, ompi_java.MessageHandle, (jlong)msg);
return (jlong)request;
}

Просмотреть файл

@ -14,7 +14,7 @@
/* /*
* File : mpi_Op.c * File : mpi_Op.c
* Headerfile : mpi_Op.h * Headerfile : mpi_Op.h
* Author : Xinying Li, Bryan Carpenter * Author : Xinying Li, Bryan Carpenter
* Created : Thu Apr 9 12:22:15 1998 * Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.7 $ * Revision : $Revision: 1.7 $
@ -31,42 +31,137 @@
#include "mpi.h" #include "mpi.h"
#include "mpi_Op.h" #include "mpi_Op.h"
#include "mpiJava.h" #include "mpiJava.h"
#include "ompi/op/op.h"
/* /*
* Class: mpi_Op * Class: mpi_Op
* Method: init * Method: init
* Signature: ()V * Signature: ()V
*/ */
JNIEXPORT void JNICALL Java_mpi_Op_init(JNIEnv *env, jclass thisClass) JNIEXPORT void JNICALL Java_mpi_Op_init(JNIEnv *env, jclass clazz)
{ {
ompi_java.OphandleID=(*env)->GetFieldID(env,thisClass,"handle","J"); ompi_java.OpHandle = (*env)->GetFieldID(env, clazz, "handle", "J");
ompi_java.OpCommute = (*env)->GetFieldID(env, clazz, "commute", "Z");
ompi_java.OpCall = (*env)->GetMethodID(env, clazz, "call",
"(Ljava/lang/Object;Ljava/lang/Object;I)V");
} }
/* /*
* Class: mpi_Op * Class: mpi_Op
* Method: GetOp * Method: getOp
* Signature: (I)J * Signature: (I)J
*/ */
JNIEXPORT void JNICALL Java_mpi_Op_GetOp(JNIEnv *env, jobject jthis, jint type) JNIEXPORT void JNICALL Java_mpi_Op_getOp(JNIEnv *env, jobject jthis, jint type)
{ {
static MPI_Op Ops[] = { static MPI_Op Ops[] = {
MPI_OP_NULL, MPI_MAX, MPI_MIN, MPI_SUM, MPI_OP_NULL, MPI_MAX, MPI_MIN, MPI_SUM,
MPI_PROD, MPI_LAND, MPI_BAND, MPI_LOR, MPI_BOR, MPI_LXOR, MPI_PROD, MPI_LAND, MPI_BAND, MPI_LOR, MPI_BOR, MPI_LXOR,
MPI_BXOR, MPI_MINLOC, MPI_MAXLOC MPI_BXOR, MPI_MINLOC, MPI_MAXLOC
}; };
(*env)->SetLongField(env,jthis, ompi_java.OphandleID, (jlong)Ops[type]); (*env)->SetLongField(env,jthis, ompi_java.OpHandle, (jlong)Ops[type]);
}
static jobject setBooleanArray(JNIEnv *env, void *vec, int len)
{
jobject obj = (*env)->NewBooleanArray(env, len);
if(obj != NULL)
(*env)->SetBooleanArrayRegion(env, obj, 0, len, vec);
return obj;
}
static void getBooleanArray(JNIEnv *env, jobject obj, void *vec, int len)
{
(*env)->GetBooleanArrayRegion(env, obj, 0, len, vec);
}
static void opIntercept(void *invec, void *inoutvec, int *count,
MPI_Datatype *datatype, int baseType,
void *jnienv, void *object)
{
JNIEnv *env = jnienv;
jobject jthis = object;
jobject jin, jio;
MPI_Aint extent;
int rc = MPI_Type_extent(*datatype, &extent);
if(ompi_java_exceptionCheck(env, rc))
return;
int len = (*count) * extent;
if(baseType == 4)
{
jin = setBooleanArray(env, invec, len);
jio = setBooleanArray(env, inoutvec, len);
}
else
{
jin = (*env)->NewDirectByteBuffer(env, invec, len);
jio = (*env)->NewDirectByteBuffer(env, inoutvec, len);
}
if((*env)->ExceptionCheck(env))
return;
(*env)->CallVoidMethod(env, jthis, ompi_java.OpCall, jin, jio, *count);
if(baseType == 4)
getBooleanArray(env, jio, inoutvec, len);
(*env)->DeleteLocalRef(env, jin);
(*env)->DeleteLocalRef(env, jio);
}
MPI_Op ompi_java_op_getHandle(JNIEnv *env, jobject jthis, int baseType)
{
MPI_Op op = (MPI_Op)((*env)->GetLongField(env, jthis, ompi_java.OpHandle));
if(op == NULL)
{
/* It is an uninitialized user Op. */
int commute = (*env)->GetBooleanField(
env, jthis, ompi_java.OpCommute);
int rc = MPI_Op_create((MPI_User_function*)opIntercept, commute, &op);
if(ompi_java_exceptionCheck(env, rc))
return NULL;
(*env)->SetLongField(env, jthis, ompi_java.OpHandle, (jlong)op);
ompi_op_set_java_callback(op, env, jthis, baseType);
}
return op;
} }
/* /*
* Class: mpi_Op * Class: mpi_Op
* Method: free * Method: Free
* Signature: ()V * Signature: ()V
*/ */
JNIEXPORT void JNICALL Java_mpi_Op_free(JNIEnv *env, jobject jthis) JNIEXPORT void JNICALL Java_mpi_Op_free(JNIEnv *env, jobject jthis)
{ {
MPI_Op op; MPI_Op op = (MPI_Op)((*env)->GetLongField(env, jthis, ompi_java.OpHandle));
op=(MPI_Op)((*env)->GetLongField(env,jthis,ompi_java.OphandleID));
if(op != MPI_OP_NULL) if(op != NULL && op != MPI_OP_NULL)
MPI_Op_free(&op); {
int rc = MPI_Op_free(&op);
ompi_java_exceptionCheck(env, rc);
((*env)->SetLongField(env,jthis,ompi_java.OpHandle,(long)MPI_OP_NULL));
}
} }
/*
* Class: mpi_Op
* Method: isNull
* Signature: ()Z
*/
JNIEXPORT jboolean JNICALL Java_mpi_Op_isNull(JNIEnv *env, jobject jthis)
{
MPI_Op op = (MPI_Op)((*env)->GetLongField(env, jthis, ompi_java.OpHandle));
return op == NULL || op == MPI_OP_NULL ? JNI_TRUE : JNI_FALSE;
}

59
ompi/mpi/java/c/mpi_Prequest.c Обычный файл
Просмотреть файл

@ -0,0 +1,59 @@
#include "ompi_config.h"
#include <stdlib.h>
#include <assert.h>
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi.h"
#include "mpi_Prequest.h"
#include "mpiJava.h"
/*
* Class: mpi_Prequest
* Method: start_jni
* Signature: (J)V
*/
JNIEXPORT void JNICALL Java_mpi_Prequest_start_1jni(JNIEnv *env, jobject jthis)
{
MPI_Request request = (MPI_Request)(*env)->GetLongField(
env, jthis, ompi_java.ReqHandle);
int rc = MPI_Start(&request);
ompi_java_exceptionCheck(env, rc);
(*env)->SetLongField(env, jthis, ompi_java.ReqHandle, (jlong)request);
}
/*
* Class: mpi_Prequest
* Method: startAll_jni
* Signature: ([Lmpi/Prequest;)V
*/
JNIEXPORT void JNICALL Java_mpi_Prequest_startAll_1jni(
JNIEnv *env, jclass clazz, jobjectArray prequests)
{
int i, count = (*env)->GetArrayLength(env, prequests);
MPI_Request *requests = calloc(count, sizeof(MPI_Request));
for(i = 0; i < count; i++)
{
jobject r = (*env)->GetObjectArrayElement(env, prequests, i);
requests[i] = (MPI_Request)(*env)->GetLongField(
env, r, ompi_java.ReqHandle);
(*env)->DeleteLocalRef(env, r);
}
int rc = MPI_Startall(count, requests);
ompi_java_exceptionCheck(env, rc);
for(i = 0; i < count; i++)
{
jobject r = (*env)->GetObjectArrayElement(env, prequests, i);
(*env)->SetLongField(env, r, ompi_java.ReqHandle, (long)requests[i]);
(*env)->DeleteLocalRef(env, r);
}
free(requests);
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -13,7 +13,7 @@
*/ */
/* /*
* File : mpi_Status.c * File : mpi_Status.c
* Headerfile : mpi_Status.h * Headerfile : mpi_Status.h
* Author : Sung-Hoon Ko, Xinying Li * Author : Sung-Hoon Ko, Xinying Li
* Created : Thu Apr 9 12:22:15 1998 * Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.9 $ * Revision : $Revision: 1.9 $
@ -33,226 +33,104 @@
#include "mpi_Status.h" #include "mpi_Status.h"
#include "mpiJava.h" #include "mpiJava.h"
/*jmethodID handleConstructorID ;*/
/* jclass status_class ; */
/* /*
* Class: mpi_Status * Class: mpi_Status
* Method: init * Method: init
* Signature: ()V * Signature: ()V
*/ */
JNIEXPORT void JNICALL Java_mpi_Status_init(JNIEnv *env, jclass jthis) JNIEXPORT void JNICALL Java_mpi_Status_init(JNIEnv *env, jclass c)
{ {
ompi_java.stathandleID = (*env)->GetFieldID(env,jthis,"handle","J"); ompi_java.StatusInit = (*env)->GetMethodID(env, c, "<init>", "()V");
ompi_java.StSource = (*env)->GetFieldID(env, c, "source", "I");
ompi_java.sourceID = (*env)->GetFieldID(env,jthis,"source","I"); ompi_java.StTag = (*env)->GetFieldID(env, c, "tag", "I");
ompi_java.tagID = (*env)->GetFieldID(env,jthis,"tag","I"); ompi_java.StError = (*env)->GetFieldID(env, c, "error", "I");
ompi_java.indexID = (*env)->GetFieldID(env,jthis,"index","I"); ompi_java.St_cancelled = (*env)->GetFieldID(env, c, "_cancelled", "I");
ompi_java.elementsID = (*env)->GetFieldID(env,jthis,"elements","I"); ompi_java.St_ucount = (*env)->GetFieldID(env, c, "_ucount", "J");
ompi_java.StIndex = (*env)->GetFieldID(env, c, "index", "I");
/* handleConstructorID = (*env)->GetMethodID(env, jthis, "<init>", "()V");*/ ompi_java.StElements = (*env)->GetFieldID(env, c, "elements", "I");
/* status_class = (*env)->NewGlobalRef(env, jthis) ; */
} }
/* /*
* Class: mpi_Status * Class: mpi_Status
* Method: alloc * Method: getCount_jni
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Status_alloc(JNIEnv *env, jobject jthis)
{
MPI_Status *status = (MPI_Status*) malloc(sizeof(MPI_Status));
(*env)->SetLongField(env, jthis, ompi_java.stathandleID, (jlong)status);
}
/*
* Class: mpi_Status
* Method: free
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_mpi_Status_free(JNIEnv *env, jobject jthis)
{
MPI_Status *status =
(MPI_Status *)((*env)->GetLongField(env,jthis,ompi_java.stathandleID));
free(status) ;
}
/*
* Class: mpi_Status
* Method: get_count
* Signature: (Lmpi/Datatype;)I * Signature: (Lmpi/Datatype;)I
*/ */
JNIEXPORT jint JNICALL Java_mpi_Status_get_1count(JNIEnv *env, jobject jthis, JNIEXPORT jint JNICALL Java_mpi_Status_getCount_1jni(
jobject type) JNIEnv *env, jobject jthis, jobject type)
{ {
MPI_Status stat;
ompi_java_status_get(&stat, env, jthis);
MPI_Datatype datatype = (MPI_Datatype)(*env)->GetLongField(
env, type, ompi_java.DatatypeHandle);
int count; int count;
int rc = MPI_Get_count(&stat, datatype, &count);
MPI_Datatype datatype = ompi_java_exceptionCheck(env, rc);
(MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ;
MPI_Status *stat =
(MPI_Status*)((*env)->GetLongField(env,jthis,ompi_java.stathandleID));
#ifdef GC_DOES_PINNING
ompi_java_clearFreeList(env) ;
MPI_Get_count(stat, datatype, &count) ;
return count; return count;
#else
int elements = (*env)->GetIntField(env, jthis, ompi_java.elementsID) ;
int dt_size;
ompi_java_clearFreeList(env) ;
MPI_Type_size(datatype, &dt_size) ;
if (elements != -1) {
count = elements / dt_size ; /* Cached at start of send buffer. */
if (count * dt_size == elements) {
return count ;
} else {
return MPI_UNDEFINED;
}
}
else {
/* Status object returned by IPROBE or PROBE.
*
* Didn't have access to data buffer to find `elements' value,
* so only way to find `count' is to invert `MPI_PACK_SIZE'.
*/
int bsize, bsizeTrial ;
MPI_Get_count(stat, MPI_BYTE, &bsize) ;
bsize -= sizeof(int) ;
count = bsize / dt_size ;
MPI_Pack_size(count, datatype, MPI_COMM_WORLD, &bsizeTrial) ;
/* Strictly, we should use the communicator the message was
* received on, but I'm too lazy to cache it.
*/
while(bsizeTrial > bsize) {
count-- ;
MPI_Pack_size(count, datatype, MPI_COMM_WORLD, &bsizeTrial) ;
}
if (bsizeTrial == bsize) {
return count ;
} else {
return MPI_UNDEFINED;
}
}
#endif /* GC_DOES_PINNING */
} }
/* /*
* Class: mpi_Status * Class: mpi_Status
* Method: Test_cancelled * Method: isCancelled_jni
* Signature: ()Z * Signature: ()Z
*/ */
JNIEXPORT jboolean JNICALL Java_mpi_Status_Test_1cancelled(JNIEnv *env, jobject jthis) JNIEXPORT jboolean JNICALL Java_mpi_Status_isCancelled_1jni(
JNIEnv *env, jobject jthis)
{ {
int flag; int flag;
MPI_Status *stat; /*shko*/ MPI_Status stat;
ompi_java_status_get(&stat, env, jthis);
ompi_java_clearFreeList(env) ; int rc = MPI_Test_cancelled(&stat, &flag);
ompi_java_exceptionCheck(env, rc);
stat=(MPI_Status *)((*env)->GetLongField(env,jthis,ompi_java.stathandleID));/*shko*/ return flag==0 ? JNI_FALSE : JNI_TRUE;
MPI_Test_cancelled(stat, &flag);
if (flag==0) {
return JNI_FALSE;
} else {
return JNI_TRUE;
}
} }
/* /*
* Class: mpi_Status * Class: mpi_Status
* Method: get_elements * Method: getElements_jni
* Signature: (Lmpi/Datatype;)I * Signature: (Lmpi/Datatype;)I
*/ */
JNIEXPORT jint JNICALL Java_mpi_Status_get_1elements(JNIEnv *env, JNIEXPORT jint JNICALL Java_mpi_Status_getElements_1jni(
jobject jthis, jobject type) JNIEnv *env, jobject jthis, jobject type)
{ {
MPI_Status stat;
ompi_java_status_get(&stat, env, jthis);
MPI_Datatype datatype = (MPI_Datatype)(*env)->GetLongField(
env, type, ompi_java.DatatypeHandle);
int count; int count;
int rc = MPI_Get_elements(&stat, datatype, &count);
MPI_Datatype datatype = ompi_java_exceptionCheck(env, rc);
(MPI_Datatype)((*env)->GetLongField(env,type,ompi_java.DatatypehandleID)) ;
MPI_Status *stat =
(MPI_Status*)((*env)->GetLongField(env,jthis,ompi_java.stathandleID));
#ifdef GC_DOES_PINNING
ompi_java_clearFreeList(env) ;
MPI_Get_elements(stat, datatype, &count) ;
return count; return count;
#else
int elements = (*env)->GetIntField(env, jthis, ompi_java.elementsID) ;
int baseType = (*env)->GetIntField(env, type, ompi_java.DatatypebaseTypeID) ;
int dt_size = ompi_java.dt_sizes[baseType] ;
ompi_java_clearFreeList(env) ;
if(elements != -1) {
count = elements / dt_size ;
if(count * dt_size == elements)
return count ;
else
return MPI_UNDEFINED ;
/* Can only happen if illegal base type mismatch between
* sender and receiver?
*/
}
else {
/* Status object returned by IPROBE or PROBE.
*
* Didn't have access to data buffer to find `elements' value,
* so only way to find `count' is to invert `MPI_PACK_SIZE'.
*/
int bsize, bsizeTrial ;
MPI_Get_count(stat, MPI_BYTE, &bsize) ;
bsize -= sizeof(int) ;
count = bsize / dt_size ;
MPI_Pack_size(count, datatype, MPI_COMM_WORLD, &bsizeTrial) ;
/* Strictly, we should use the communicator the message was
* received on, but I'm too lazy to cache it.
*/
while(bsizeTrial > bsize) {
count-- ;
MPI_Pack_size(count, datatype, MPI_COMM_WORLD, &bsizeTrial) ;
}
if(bsizeTrial == bsize)
return count ;
else
return MPI_UNDEFINED ;
/* Can only happen if illegal base type mismatch between
* sender and receiver?
*/
}
#endif /* GC_DOES_PINNING */
} }
void ompi_java_status_get(MPI_Status *status, JNIEnv *env, jobject obj)
{
/* Copy the whole thing to C */
status->MPI_SOURCE = (*env)->GetIntField(env, obj, ompi_java.StSource);
status->MPI_TAG = (*env)->GetIntField(env, obj, ompi_java.StTag);
status->MPI_ERROR = (*env)->GetIntField(env, obj, ompi_java.StError);
status->_cancelled = (*env)->GetIntField(env, obj, ompi_java.St_cancelled);
status->_ucount = (*env)->GetLongField(env, obj, ompi_java.St_ucount);
}
void ompi_java_status_set(MPI_Status *status, JNIEnv *env, jobject obj)
{
/* Copy the whole thing to Java */
(*env)->SetIntField(env, obj, ompi_java.StSource, status->MPI_SOURCE);
(*env)->SetIntField(env, obj, ompi_java.StTag, status->MPI_TAG);
(*env)->SetIntField(env, obj, ompi_java.StError, status->MPI_ERROR);
(*env)->SetIntField(env, obj, ompi_java.St_cancelled, status->_cancelled);
(*env)->SetLongField(env, obj, ompi_java.St_ucount, status->_ucount);
}
jobject ompi_java_status_new(MPI_Status *status, JNIEnv *env)
{
jobject s = (*env)->NewObject(env, ompi_java.StatusClass,
ompi_java.StatusInit);
ompi_java_status_set(status, env, s);
return s;
}

333
ompi/mpi/java/c/mpi_Win.c Обычный файл
Просмотреть файл

@ -0,0 +1,333 @@
#include "ompi_config.h"
#include <stdlib.h>
#ifdef HAVE_TARGETCONDITIONALS_H
#include <TargetConditionals.h>
#endif
#include "mpi.h"
#include "mpi_Win.h"
#include "mpiJava.h"
/*
* Class: mpi_Win
* Method: createWin
* Signature: (Ljava/nio/Buffer;IIJJ)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Win_createWin(
JNIEnv *env, jobject jthis, jobject jBase,
jint size, jint dispUnit, jlong info, jlong comm)
{
void *base = (*env)->GetDirectBufferAddress(env, jBase);
MPI_Win win;
int rc = MPI_Win_create(base, (MPI_Aint)size, dispUnit,
(MPI_Info)info, (MPI_Comm)comm, &win);
ompi_java_exceptionCheck(env, rc);
return (jlong)win;
}
/*
* Class: mpi_Win
* Method: getGroup
* Signature: (J)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Win_getGroup(
JNIEnv *env, jobject jthis, jlong win)
{
MPI_Group group;
int rc = MPI_Win_get_group((MPI_Win)win, &group);
ompi_java_exceptionCheck(env, rc);
return (jlong)group;
}
/*
* Class: mpi_Win
* Method: put
* Signature: (JLjava/lang/Object;IJIIIJI)V
*/
JNIEXPORT void JNICALL Java_mpi_Win_put(
JNIEnv *env, jobject jthis, jlong win, jobject origin,
jint orgCount, jlong orgType, jint targetRank, jint targetDisp,
jint targetCount, jlong targetType, jint baseType)
{
void *orgPtr = (*env)->GetDirectBufferAddress(env, origin);
int rc = MPI_Put(orgPtr, orgCount, (MPI_Datatype)orgType,
targetRank, (MPI_Aint)targetDisp, targetCount,
(MPI_Datatype)targetType, (MPI_Win)win);
ompi_java_exceptionCheck(env, rc);
}
/*
* Class: mpi_Win
* Method: get
* Signature: (JLjava/lang/Object;IJIIIJI)V
*/
JNIEXPORT void JNICALL Java_mpi_Win_get(
JNIEnv *env, jobject jthis, jlong win, jobject origin,
jint orgCount, jlong orgType, jint targetRank, jint targetDisp,
jint targetCount, jlong targetType, jint baseType)
{
void *orgPtr = (*env)->GetDirectBufferAddress(env, origin);
int rc = MPI_Get(orgPtr, orgCount, (MPI_Datatype)orgType,
targetRank, (MPI_Aint)targetDisp, targetCount,
(MPI_Datatype)targetType, (MPI_Win)win);
ompi_java_exceptionCheck(env, rc);
}
/*
* Class: mpi_Win
* Method: accumulate
* Signature: (JLjava/lang/Object;IJIIIJLmpi/Op;I)V
*/
JNIEXPORT void JNICALL Java_mpi_Win_accumulate(
JNIEnv *env, jobject jthis, jlong win,
jobject origin, jint orgCount, jlong orgType,
jint targetRank, jint targetDisp, jint targetCount, jlong targetType,
jobject op, jint baseType)
{
void *orgPtr = (*env)->GetDirectBufferAddress(env, origin);
MPI_Op mpiOp = ompi_java_op_getHandle(env, op, baseType);
int rc = MPI_Accumulate(orgPtr, orgCount, (MPI_Datatype)orgType,
targetRank, (MPI_Aint)targetDisp, targetCount,
(MPI_Datatype)targetType, mpiOp, (MPI_Win)win);
ompi_java_exceptionCheck(env, rc);
}
/*
* Class: mpi_Win
* Method: fence
* Signature: (JI)V
*/
JNIEXPORT void JNICALL Java_mpi_Win_fence(
JNIEnv *env, jobject jthis, jlong win, jint assertion)
{
int rc = MPI_Win_fence(assertion, (MPI_Win)win);
ompi_java_exceptionCheck(env, rc);
}
/*
* Class: mpi_Win
* Method: start
* Signature: (JJI)V
*/
JNIEXPORT void JNICALL Java_mpi_Win_start(
JNIEnv *env, jobject jthis, jlong win, jlong group, jint assertion)
{
int rc = MPI_Win_start((MPI_Group)group, assertion, (MPI_Win)win);
ompi_java_exceptionCheck(env, rc);
}
/*
* Class: mpi_Win
* Method: complete
* Signature: (J)V
*/
JNIEXPORT void JNICALL Java_mpi_Win_complete(
JNIEnv *env, jobject jthis, jlong win)
{
int rc = MPI_Win_complete((MPI_Win)win);
ompi_java_exceptionCheck(env, rc);
}
/*
* Class: mpi_Win
* Method: post
* Signature: (JJI)V
*/
JNIEXPORT void JNICALL Java_mpi_Win_post(
JNIEnv *env, jobject jthis, jlong win, jlong group, jint assertion)
{
int rc = MPI_Win_post((MPI_Group)group, assertion, (MPI_Win)win);
ompi_java_exceptionCheck(env, rc);
}
/*
* Class: mpi_Win
* Method: waitFor
* Signature: (J)V
*/
JNIEXPORT void JNICALL Java_mpi_Win_waitFor(
JNIEnv *env, jobject jthis, jlong win)
{
int rc = MPI_Win_wait((MPI_Win)win);
ompi_java_exceptionCheck(env, rc);
}
/*
* Class: mpi_Win
* Method: test
* Signature: (J)Z
*/
JNIEXPORT jboolean JNICALL Java_mpi_Win_test(
JNIEnv *env, jobject jthis, jlong win)
{
int flag;
int rc = MPI_Win_test((MPI_Win)win, &flag);
ompi_java_exceptionCheck(env, rc);
return flag ? JNI_TRUE : JNI_FALSE;
}
/*
* Class: mpi_Win
* Method: lock
* Signature: (JIII)V
*/
JNIEXPORT void JNICALL Java_mpi_Win_lock(
JNIEnv *env, jobject jthis, jlong win,
jint lockType, jint rank, jint assertion)
{
int rc = MPI_Win_lock(lockType, rank, assertion, (MPI_Win)win);
ompi_java_exceptionCheck(env, rc);
}
/*
* Class: mpi_Win
* Method: unlock
* Signature: (JI)V
*/
JNIEXPORT void JNICALL Java_mpi_Win_unlock(
JNIEnv *env, jobject jthis, jlong win, jint rank)
{
int rc = MPI_Win_unlock(rank, (MPI_Win)win);
ompi_java_exceptionCheck(env, rc);
}
/*
* Class: mpi_Win
* Method: setErrhandler
* Signature: (JJ)V
*/
JNIEXPORT void JNICALL Java_mpi_Win_setErrhandler(
JNIEnv *env, jobject jthis, jlong win, jlong errhandler)
{
int rc = MPI_Win_set_errhandler(
(MPI_Win)win, (MPI_Errhandler)MPI_ERRORS_RETURN);
ompi_java_exceptionCheck(env, rc);
}
/*
* Class: mpi_Win
* Method: callErrhandler
* Signature: (JI)V
*/
JNIEXPORT void JNICALL Java_mpi_Win_callErrhandler(
JNIEnv *env, jobject jthis, jlong win, jint errorCode)
{
int rc = MPI_Win_call_errhandler((MPI_Win)win, errorCode);
ompi_java_exceptionCheck(env, rc);
}
static int winCopyAttr(MPI_Win oldwin, int keyval, void *extraState,
void *attrValIn, void *attrValOut, int *flag)
{
return ompi_java_attrCopy(attrValIn, attrValOut, flag);
}
static int winDeleteAttr(MPI_Win oldwin, int keyval,
void *attrVal, void *extraState)
{
return ompi_java_attrDelete(attrVal);
}
/*
* Class: mpi_Win
* Method: createKeyval_jni
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_mpi_Win_createKeyval_1jni(JNIEnv *env, jclass clazz)
{
int rc, keyval;
rc = MPI_Win_create_keyval(winCopyAttr, winDeleteAttr, &keyval, NULL);
ompi_java_exceptionCheck(env, rc);
return keyval;
}
/*
* Class: mpi_Win
* Method: freeKeyval_jni
* Signature: (I)V
*/
JNIEXPORT void JNICALL Java_mpi_Win_freeKeyval_1jni(
JNIEnv *env, jclass clazz, jint keyval)
{
int rc = MPI_Win_free_keyval((int*)(&keyval));
ompi_java_exceptionCheck(env, rc);
}
/*
* Class: mpi_Win
* Method: setAttr_jni
* Signature: (JI[B)V
*/
JNIEXPORT void JNICALL Java_mpi_Win_setAttr(
JNIEnv *env, jobject jthis, jlong win, jint keyval, jbyteArray jval)
{
void *cval = ompi_java_attrSet(env, jval);
int rc = MPI_Win_set_attr((MPI_Win)win, keyval, cval);
ompi_java_exceptionCheck(env, rc);
}
/*
* Class: mpi_Win
* Method: getAttr_predefined
* Signature: (JI)Ljava/lang/Object;
*/
JNIEXPORT jobject JNICALL Java_mpi_Win_getAttr(
JNIEnv *env, jobject jthis, jlong win, jint keyval)
{
int flag;
void *val;
int rc = MPI_Win_get_attr((MPI_Win)win, keyval, &val, &flag);
if(ompi_java_exceptionCheck(env, rc) || !flag)
return NULL;
switch(keyval)
{
case MPI_WIN_SIZE:
return ompi_java_Integer_valueOf(env, (jint)(*((MPI_Aint*)val)));
case MPI_WIN_DISP_UNIT:
return ompi_java_Integer_valueOf(env, (jint)(*((int*)val)));
case MPI_WIN_BASE:
return ompi_java_Long_valueOf(env, (jlong)val);
default:
return ompi_java_attrGet(env, val);
}
}
/*
* Class: mpi_Win
* Method: deleteAttr_jni
* Signature: (I)V
*/
JNIEXPORT void JNICALL Java_mpi_Win_deleteAttr(
JNIEnv *env, jobject jthis, jlong win, jint keyval)
{
int rc = MPI_Win_delete_attr((MPI_Win)win, keyval);
ompi_java_exceptionCheck(env, rc);
}
/*
* Class: mpi_Win
* Method: free
* Signature: (J)J
*/
JNIEXPORT jlong JNICALL Java_mpi_Win_free(
JNIEnv *env, jobject jthis, jlong handle)
{
MPI_Win win = (MPI_Win)handle;
int rc = MPI_Win_free(&win);
ompi_java_exceptionCheck(env, rc);
return (jlong)win;
}

178
ompi/mpi/java/java/CartComm.java Обычный файл
Просмотреть файл

@ -0,0 +1,178 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : Cartcomm.java
* Author : Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.7 $
* Updated : $Date: 2001/10/22 21:07:55 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
package mpi;
/**
* Communicator with cartesian structure.
*/
public final class CartComm extends Intracomm
{
static
{
init();
}
private static native void init();
protected CartComm(long handle) throws MPIException
{
super(handle);
}
/**
* Duplicate this communicator.
* <p>Java binding of the MPI operation {@code MPI_COMM_DUP}.
* <p>The new communicator is "congruent" to the old one,
* but has a different context.
* @return copy of this communicator
*/
@Override public CartComm clone()
{
try
{
MPI.check();
return new CartComm(dup());
}
catch(MPIException e)
{
throw new RuntimeException(e.getMessage());
}
}
/**
* Returns cartesian topology information.
* <p>Java binding of the MPI operations {@code MPI_CARTDIM_GET} and
* {@code MPI_CART_GET}.
* <p>The number of dimensions can be obtained from the size of (eg)
* {@code dims} field of the returned object.
* @return object containing dimensions, periods and local coordinates
* @throws MPIException
*/
public CartParms getTopo() throws MPIException
{
MPI.check();
return getTopo(handle);
}
private native CartParms getTopo(long comm) throws MPIException;
/**
* Translate logical process coordinates to process rank.
* <p>Java binding of the MPI operation {@code MPI_CART_RANK}.
* @param coords Cartesian coordinates of a process
* @return rank of the specified process
* @throws MPIException
*/
public int getRank(int[] coords) throws MPIException
{
MPI.check();
return getRank(handle, coords);
}
private native int getRank(long comm, int[] coords) throws MPIException;
/**
* Translate process rank to logical process coordinates.
* <p>Java binding of the MPI operation {@code MPI_CART_COORDS}.
* @param rank rank of a process
* @return Cartesian coordinates of the specified process
* @throws MPIException
*/
public int[] getCoords(int rank) throws MPIException
{
MPI.check();
return getCoords(handle, rank);
}
private native int[] getCoords(long comm, int rank) throws MPIException;
/**
* Compute source and destination ranks for "shift" communication.
* <p>Java binding of the MPI operation {@code MPI_CART_SHIFT}.
* @param direction coordinate dimension of shift
* @param disp displacement
* @return object containing ranks of source and destination processes
* @throws MPIException
*/
public ShiftParms shift(int direction, int disp) throws MPIException
{
MPI.check();
return shift(handle, direction, disp);
}
private native ShiftParms shift(long comm, int direction, int disp)
throws MPIException;
/**
* Partition cartesian communicator into subgroups of lower dimension.
* <p>Java binding of the MPI operation {@code MPI_CART_SUB}.
* @param remainDims by dimension, {@code true} if dimension is to be kept,
* {@code false} otherwise
* @return communicator containing subgrid including this process
* @throws MPIException
*/
public CartComm sub(boolean[] remainDims) throws MPIException
{
MPI.check();
return new CartComm(sub(handle, remainDims));
}
private native long sub(long comm, boolean[] remainDims) throws MPIException;
/**
* Compute an optimal placement.
* <p>Java binding of the MPI operation {@code MPI_CART_MAP}.
* <p>The number of dimensions is taken to be size of the {@code dims} argument.
* @param dims the number of processes in each dimension
* @param periods {@code true} if grid is periodic,
* {@code false} if not, in each dimension
* @return reordered rank of calling process
* @throws MPIException
*/
public int map(int[] dims, boolean[] periods) throws MPIException
{
MPI.check();
return map(handle, dims, periods);
}
private native int map(long comm, int[] dims, boolean[] periods)
throws MPIException;
/**
* Select a balanced distribution of processes per coordinate direction.
* <p>Java binding of the MPI operation {@code MPI_DIMS_CREATE}.
* @param nnodes number of nodes in a grid
* @param dims array specifying the number of nodes in each dimension
* @throws MPIException
*/
public static void createDims(int nnodes, int[] dims) throws MPIException
{
MPI.check();
createDims_jni(nnodes, dims);
}
private static native void createDims_jni(int nnodes, int[] dims)
throws MPIException;
} // Cartcomm

Просмотреть файл

@ -23,9 +23,70 @@
package mpi; package mpi;
public class CartParms { /**
public int [] dims; * Cartesian topology information associated with a communicator.
public boolean [] periods; */
public int [] coords; public final class CartParms
{
/** Number of processes for each cartesian dimension. */
private final int[] dims;
/** Periodicity (true/false) for each cartesian dimension. */
private final boolean[] periods;
/** Coordinates of calling process in cartesian structure. */
private final int[] coords;
/**
* Constructs a cartesian topology information object.
* @param dims number of processes for each cartesian dimension.
* @param periods periodicity (true/false) for each cartesian dimension.
* @param coords coordinates of calling process in cartesian structure.
*/
protected CartParms(int[] dims, boolean[] periods, int[] coords)
{
this.dims = dims;
this.periods = periods;
this.coords = coords;
} }
/**
* Returns the number of dimensions.
* @return number of dimensions.
*/
public int getDimCount()
{
return dims.length;
}
/**
* Returns the number of processes for a cartesian dimension.
* @param i cartesian dimension.
* @return number of processes for a cartesian dimension.
*/
public int getDim(int i)
{
return dims[i];
}
/**
* Returns the periodicity (true/false) for a cartesian dimension.
* @param i cartesian dimension.
* @return periodicity for a cartesian dimension.
*/
public boolean getPeriod(int i)
{
return periods[i];
}
/**
* Returns the coordinate of calling process for a cartesian dimension.
* @param i cartesian dimension.
* @return coordinate of calling process for a cartesian dimension.
*/
public int getCoord(int i)
{
return coords[i];
}
} // CartParms

Просмотреть файл

@ -1,160 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : Cartcomm.java
* Author : Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.7 $
* Updated : $Date: 2001/10/22 21:07:55 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
package mpi;
public class Cartcomm extends Intracomm {
protected Cartcomm(long handle) throws MPIException {
super(handle) ;
}
public Object clone() {
try {
return new Cartcomm(super.dup()) ;
}
catch (MPIException e) {
throw new RuntimeException(e.getMessage()) ;
}
}
/**
* Returns Cartesian topology information.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> object containing dimensions,
* periods and local coordinates </tr>
* </table>
* <p>
* Java binding of the MPI operations <tt>MPI_CARTDIM_GET</tt> and
* <tt>MPI_CART_GET</tt>.
* <p>
* The number of dimensions can be obtained from the size of (eg)
* <tt>dims</tt> field of the returned object.
*/
public native CartParms Get() throws MPIException ;
/**
* Translate logical process coordinates to process rank.
* <p>
* <table>
* <tr><td><tt> coords </tt></td><td> Cartesian coordinates of a
* process </tr>
* <tr><td><em> returns: </em></td><td> rank of the specified process </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_CART_RANK</tt>.
*/
public native int Rank(int[] coords) throws MPIException ;
/**
* Translate process rank to logical process coordinates.
* <p>
* <table>
* <tr><td><tt> rank </tt></td><td> rank of a process </tr>
* <tr><td><em> returns: </em></td><td> Cartesian coordinates of the
* specified process </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_CART_COORDS</tt>.
*/
public native int [] Coords(int rank) throws MPIException ;
/**
* Compute source and destination ranks for ``shift'' communication.
* <p>
* <table>
* <tr><td><tt> direction </tt></td><td> coordinate dimension of shift </tr>
* <tr><td><tt> disp </tt></td><td> displacement </tr>
* <tr><td><em> returns: </em></td><td> object containing ranks of source
* and destination processes </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_CART_SHIFT</tt>.
*/
public native ShiftParms Shift(int direction, int disp) throws MPIException ;
/**
* Partition Cartesian communicator into subgroups of lower dimension.
* <p>
* <table>
* <tr><td><tt> remain_dims </tt></td><td> by dimension, <tt>true</tt> if
* dimension is to be kept,
* <tt>false</tt> otherwise </tr>
* <tr><td><em> returns: </em></td><td> communicator containing subgrid
* including this process </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_CART_SUB</tt>.
*/
public Cartcomm Sub(boolean [] remain_dims) throws MPIException {
return new Cartcomm(sub(remain_dims)) ;
}
private native long sub(boolean [] remain_dims);
/**
* Compute an optimal placement.
* <p>
* <table>
* <tr><td><tt> dims </tt></td><td> the number of processes in each
* dimension </tr>
* <tr><td><tt> periods </tt></td><td> <tt>true</tt> if grid is periodic,
* <tt>false</tt> if not, in each
* dimension </tr>
* <tr><td><em> returns: </em></td><td> reordered rank of calling
* process </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_CART_MAP</tt>.
* <p>
* The number of dimensions is taken to be size of the <tt>dims</tt> argument.
*/
public native int Map(int [] dims, boolean [] periods) throws MPIException ;
/**
* Select a balanced distribution of processes per coordinate direction.
* <p>
* <table>
* <tr><td><tt> nnodes </tt></td><td> number of nodes in a grid </tr>
* <tr><td><tt> ndims </tt></td><td> number of dimensions of grid </tr>
* <tr><td><tt> dims </tt></td><td> array specifying the number of nodes
* in each dimension </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_DIMS_CREATE</tt>.
* <p>
* Size <tt>dims</tt> should be <tt>ndims</tt>. Note that
* <tt>dims</tt> is an <em>inout</em> parameter.
*/
static public native void Dims_create(int nnodes, int[] dims)
throws MPIException ;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

112
ompi/mpi/java/java/Constant.java Обычный файл
Просмотреть файл

@ -0,0 +1,112 @@
/*
* Copyright (c) 2013 Cisco Systems, Inc. All rights reserved.
*
* $COPYRIGHT$
*
* Additional copyrights may follow.
*/
package mpi;
class Constant
{
protected int THREAD_SINGLE, THREAD_FUNNELED, THREAD_SERIALIZED,
THREAD_MULTIPLE;
protected int GRAPH, DIST_GRAPH, CART;
protected int ANY_SOURCE, ANY_TAG;
protected int PROC_NULL;
protected int UNDEFINED;
protected int IDENT, CONGRUENT, SIMILAR, UNEQUAL;
protected int TAG_UB, HOST, IO, WTIME_IS_GLOBAL;
protected int APPNUM, LASTUSEDCODE, UNIVERSE_SIZE, WIN_BASE, WIN_SIZE,
WIN_DISP_UNIT;
protected int VERSION, SUBVERSION;
protected int ROOT, KEYVAL_INVALID, BSEND_OVERHEAD;
protected int MAX_OBJECT_NAME, MAX_PORT_NAME, MAX_DATAREP_STRING;
protected int MAX_INFO_KEY, MAX_INFO_VAL;
protected int ORDER_C, ORDER_FORTRAN;
protected int DISTRIBUTE_BLOCK, DISTRIBUTE_CYCLIC, DISTRIBUTE_NONE,
DISTRIBUTE_DFLT_DARG;
protected int MODE_CREATE, MODE_RDONLY, MODE_WRONLY, MODE_RDWR,
MODE_DELETE_ON_CLOSE, MODE_UNIQUE_OPEN, MODE_EXCL,
MODE_APPEND, MODE_SEQUENTIAL;
protected int DISPLACEMENT_CURRENT;
protected int SEEK_SET, SEEK_CUR, SEEK_END;
protected int MODE_NOCHECK, MODE_NOPRECEDE, MODE_NOPUT, MODE_NOSTORE,
MODE_NOSUCCEED;
protected int LOCK_EXCLUSIVE, LOCK_SHARED;
// Error classes and codes
protected int SUCCESS;
protected int ERR_BUFFER;
protected int ERR_COUNT;
protected int ERR_TYPE;
protected int ERR_TAG;
protected int ERR_COMM;
protected int ERR_RANK;
protected int ERR_REQUEST;
protected int ERR_ROOT;
protected int ERR_GROUP;
protected int ERR_OP;
protected int ERR_TOPOLOGY;
protected int ERR_DIMS;
protected int ERR_ARG;
protected int ERR_UNKNOWN;
protected int ERR_TRUNCATE;
protected int ERR_OTHER;
protected int ERR_INTERN;
protected int ERR_IN_STATUS;
protected int ERR_PENDING;
protected int ERR_ACCESS;
protected int ERR_AMODE;
protected int ERR_ASSERT;
protected int ERR_BAD_FILE;
protected int ERR_BASE;
protected int ERR_CONVERSION;
protected int ERR_DISP;
protected int ERR_DUP_DATAREP;
protected int ERR_FILE_EXISTS;
protected int ERR_FILE_IN_USE;
protected int ERR_FILE;
protected int ERR_INFO_KEY;
protected int ERR_INFO_NOKEY;
protected int ERR_INFO_VALUE;
protected int ERR_INFO;
protected int ERR_IO;
protected int ERR_KEYVAL;
protected int ERR_LOCKTYPE;
protected int ERR_NAME;
protected int ERR_NO_MEM;
protected int ERR_NOT_SAME;
protected int ERR_NO_SPACE;
protected int ERR_NO_SUCH_FILE;
protected int ERR_PORT;
protected int ERR_QUOTA;
protected int ERR_READ_ONLY;
protected int ERR_RMA_CONFLICT;
protected int ERR_RMA_SYNC;
protected int ERR_SERVICE;
protected int ERR_SIZE;
protected int ERR_SPAWN;
protected int ERR_UNSUPPORTED_DATAREP;
protected int ERR_UNSUPPORTED_OPERATION;
protected int ERR_WIN;
protected int ERR_LASTCODE;
protected int ERR_SYSRESOURCE;
protected Constant()
{
setConstant();
}
private native void setConstant();
} // Constant

Разница между файлами не показана из-за своего большого размера Загрузить разницу

89
ompi/mpi/java/java/DistGraphNeighbors.java Обычный файл
Просмотреть файл

@ -0,0 +1,89 @@
package mpi;
/**
* Adjacency information for a distributed graph topology.
*/
public final class DistGraphNeighbors
{
private final int[] sources, sourceWeights, destinations, destWeights;
private final boolean weighted;
protected DistGraphNeighbors(
int[] sources, int[] sourceWeights,
int[] destinations, int[] destWeights, boolean weighted)
{
this.sources = sources;
this.sourceWeights = sourceWeights;
this.destinations = destinations;
this.destWeights = destWeights;
this.weighted = weighted;
}
/**
* Gets the number of edges into this process.
* @return number of edges into this process
*/
public int getInDegree()
{
return sources.length;
}
/**
* Gets the number of edges out of this process.
* @return number of edges out of this process
*/
public int getOutDegree()
{
return destinations.length;
}
/**
* Returns false if {@code MPI_UNWEIGHTED} was supplied during creation.
* @return false if {@code MPI_UNWEIGHTED} was supplied, true otherwise
*/
public boolean isWeighted()
{
return weighted;
}
/**
* Gets a process for which the calling processs is a destination.
* @param i source index
* @return process for which the calling processs is a destination
*/
public int getSource(int i)
{
return sources[i];
}
/**
* Gets the weight of an edge into the calling process.
* @param i source index
* @return weight of the edge into the calling process
*/
public int getSourceWeight(int i)
{
return sourceWeights[i];
}
/**
* Gets a process for which the calling process is a source
* @param i destination index
* @return process for which the calling process is a source
*/
public int getDestination(int i)
{
return destinations[i];
}
/**
* Gets the weight of an edge out of the calling process.
* @param i destination index
* @return weight of an edge out of the calling process
*/
public int getDestinationWeight(int i)
{
return destWeights[i];
}
} // DistGraphNeighbors

130
ompi/mpi/java/java/DoubleComplex.java Обычный файл
Просмотреть файл

@ -0,0 +1,130 @@
package mpi;
import java.nio.*;
/**
* This class wraps a complex number stored in a buffer.
*/
public final class DoubleComplex
{
private final int offset;
private final DoubleBuffer buffer;
private DoubleComplex(DoubleBuffer buffer, int index)
{
this.buffer = buffer;
this.offset = index * 2;
}
/**
* Wraps a complex number stored in a buffer
* @param buffer buffer
* @return complex number
*/
public static DoubleComplex get(DoubleBuffer buffer)
{
return new DoubleComplex(buffer, 0);
}
/**
* Wraps the complex number at the specified position
* of an array of complex numbers stored in a buffer.
* @param buffer buffer
* @param index index
* @return complex number
*/
public static DoubleComplex get(DoubleBuffer buffer, int index)
{
return new DoubleComplex(buffer, index);
}
/**
* Wraps a complex number stored in the first two values of an array.
* @param array array
* @return complex number
*/
public static DoubleComplex get(double[] array)
{
return new DoubleComplex(DoubleBuffer.wrap(array), 0);
}
/**
* Wraps the complex number at the specified position of
* an array of complex numbers stored in an array of doubles.
* @param array array
* @param index index
* @return complex number
*/
public static DoubleComplex get(double[] array, int index)
{
return new DoubleComplex(DoubleBuffer.wrap(array), index);
}
/**
* Wraps a complex number stored in a buffer
* @param buffer buffer
* @return complex number
*/
public static DoubleComplex get(ByteBuffer buffer)
{
return new DoubleComplex(buffer.asDoubleBuffer(), 0);
}
/**
* Wraps the complex number at the specified position
* of an array of complex numbers stored in a buffer.
* @param buffer buffer
* @param index index
* @return complex number
*/
public static DoubleComplex get(ByteBuffer buffer, int index)
{
return new DoubleComplex(buffer.asDoubleBuffer(), index);
}
/**
* Gets the real value.
* @return real value
*/
public double getReal()
{
return buffer.get(offset);
}
/**
* Gets the imaginary value.
* @return imaginary value.
*/
public double getImag()
{
return buffer.get(offset + 1);
}
/**
* Puts the real value.
* @param real real value
*/
public void putReal(double real)
{
buffer.put(offset, real);
}
/**
* Puts the imaginary value.
* @param imag imaginary value
*/
public void putImag(double imag)
{
buffer.put(offset + 1, imag);
}
/**
* Gets the buffer where the complex number is stored.
* @return buffer where the complex number is stored
*/
public DoubleBuffer getBuffer()
{
return offset == 0 ? buffer : MPI.slice(buffer, offset);
}
} // DoubleComplex

90
ompi/mpi/java/java/DoubleInt.java Обычный файл
Просмотреть файл

@ -0,0 +1,90 @@
package mpi;
/**
* Java binding of the MPI data type {@code MPI_DOUBLE_INT}.
*/
public final class DoubleInt extends Struct
{
private final int iOff, iSize;
/** The struct will be created only in MPI class. */
protected DoubleInt(int intOff, int intSize)
{
int dOff = addDouble();
assert dOff == 0;
iSize = intSize;
setOffset(intOff);
switch(iSize)
{
case 4: iOff = addInt(); break;
case 8: iOff = addLong(); break;
default: throw new AssertionError("Unsupported int size: "+ iSize);
}
assert(intOff == iOff);
}
/**
* Creates a Data object.
* @return new Data object.
*/
@Override protected DoubleInt.Data newData()
{
return new DoubleInt.Data();
}
/**
* Class for reading/writing data in a struct stored in a byte buffer.
*/
public final class Data extends Struct.Data
{
/**
* Gets the double value.
* @return double value
*/
public double getValue()
{
return getDouble(0);
}
/**
* Gets the int value.
* @return int value
*/
public int getIndex()
{
switch(iSize)
{
case 4: return getInt(iOff);
case 8: return (int)getLong(iOff);
default: throw new AssertionError();
}
}
/**
* Puts the double value.
* @param v double value
*/
public void putValue(double v)
{
putDouble(0, v);
}
/**
* Puts the int value.
* @param v int value
*/
public void putIndex(int v)
{
switch(iSize)
{
case 4: putInt(iOff, v); break;
case 8: putLong(iOff, v); break;
default: throw new AssertionError();
}
}
} // Data
} // DoubleInt

Просмотреть файл

@ -22,24 +22,27 @@
*/ */
package mpi; package mpi;
//import mpi.*;
public class Errhandler{ /**
public final static int FATAL = 1; * Error handler.
public final static int RETURN = 0; */
public final class Errhandler
{
protected long handle;
private static native void init(); static
{
//public Errhandler() {}
public Errhandler(int Type) { GetErrhandler(Type);}
public Errhandler(long _handle) { handle = _handle;}
protected native void GetErrhandler(int Type);
protected long handle;
static {
init(); init();
}
} }
private static native void init();
protected static native long getFatal();
protected static native long getReturn();
protected Errhandler(long handle)
{
this.handle = handle;
}
} // Errhandler

1172
ompi/mpi/java/java/File.java Обычный файл

Разница между файлами не показана из-за своего большого размера Загрузить разницу

63
ompi/mpi/java/java/FileView.java Обычный файл
Просмотреть файл

@ -0,0 +1,63 @@
package mpi;
/**
* This class represents file views.
*/
public final class FileView
{
private final long disp;
private final Datatype etype, filetype;
private final String datarep;
/**
* Constructs a file view.
* @param disp displacement
* @param etype elementary datatype
* @param filetype file type
* @param datarep data representation
*/
public FileView(long disp, Datatype etype, Datatype filetype, String datarep)
{
this.disp = disp;
this.etype = etype;
this.filetype = filetype;
this.datarep = datarep;
}
/**
* Gets the displacement.
* @return displacement
*/
public long getDisp()
{
return disp;
}
/**
* Gets the elementary datatype.
* @return elementary datatype
*/
public Datatype getEType()
{
return etype;
}
/**
* Gets the file type.
* @return file type
*/
public Datatype getFileType()
{
return filetype;
}
/**
* Gets the data representation.
* @return data representation
*/
public String getDataRep()
{
return datarep;
}
} // FileView

130
ompi/mpi/java/java/FloatComplex.java Обычный файл
Просмотреть файл

@ -0,0 +1,130 @@
package mpi;
import java.nio.*;
/**
* This class wraps a complex number stored in a buffer.
*/
public final class FloatComplex
{
private final int offset;
private final FloatBuffer buffer;
private FloatComplex(FloatBuffer buffer, int index)
{
this.buffer = buffer;
this.offset = index * 2;
}
/**
* Wraps a complex number stored in a buffer
* @param buffer buffer
* @return complex number
*/
public static FloatComplex get(FloatBuffer buffer)
{
return new FloatComplex(buffer, 0);
}
/**
* Wraps the complex number at the specified position
* of an array of complex numbers stored in a buffer.
* @param buffer buffer
* @param index index
* @return complex number
*/
public static FloatComplex get(FloatBuffer buffer, int index)
{
return new FloatComplex(buffer, index);
}
/**
* Wraps a complex number stored in the first two values of an array.
* @param array array
* @return complex number
*/
public static FloatComplex get(float[] array)
{
return new FloatComplex(FloatBuffer.wrap(array), 0);
}
/**
* Wraps the complex number at the specified position of
* an array of complex numbers stored in an array of floats.
* @param array array
* @param index index
* @return complex number
*/
public static FloatComplex get(float[] array, int index)
{
return new FloatComplex(FloatBuffer.wrap(array), index);
}
/**
* Wraps a complex number stored in a buffer
* @param buffer buffer
* @return complex number
*/
public static FloatComplex get(ByteBuffer buffer)
{
return new FloatComplex(buffer.asFloatBuffer(), 0);
}
/**
* Wraps the complex number at the specified position
* of an array of complex numbers stored in a buffer.
* @param buffer buffer
* @param index index
* @return complex number
*/
public static FloatComplex get(ByteBuffer buffer, int index)
{
return new FloatComplex(buffer.asFloatBuffer(), index);
}
/**
* Gets the real value.
* @return real value
*/
public float getReal()
{
return buffer.get(offset);
}
/**
* Gets the imaginary value.
* @return imaginary value.
*/
public float getImag()
{
return buffer.get(offset + 1);
}
/**
* Puts the real value.
* @param real real value
*/
public void putReal(float real)
{
buffer.put(offset, real);
}
/**
* Puts the imaginary value.
* @param imag imaginary value
*/
public void putImag(float imag)
{
buffer.put(offset + 1, imag);
}
/**
* Gets the buffer where the complex number is stored.
* @return buffer where the complex number is stored
*/
public FloatBuffer getBuffer()
{
return offset == 0 ? buffer : MPI.slice(buffer, offset);
}
} // FloatComplex

90
ompi/mpi/java/java/FloatInt.java Обычный файл
Просмотреть файл

@ -0,0 +1,90 @@
package mpi;
/**
* Java binding of the MPI data type {@code MPI_FLOAT_INT}.
*/
public final class FloatInt extends Struct
{
private final int iOff, iSize;
/** The struct will be created only in MPI class. */
protected FloatInt(int intOff, int intSize)
{
int fOff = addFloat();
assert fOff == 0;
iSize = intSize;
setOffset(intOff);
switch(iSize)
{
case 4: iOff = addInt(); break;
case 8: iOff = addLong(); break;
default: throw new AssertionError("Unsupported int size: "+ iSize);
}
assert(intOff == iOff);
}
/**
* Creates a Data object.
* @return new Data object.
*/
@Override protected Data newData()
{
return new Data();
}
/**
* Class for reading/writing data in a struct stored in a byte buffer.
*/
public final class Data extends Struct.Data
{
/**
* Gets the float value.
* @return float value
*/
public float getValue()
{
return getFloat(0);
}
/**
* Gets the int value.
* @return int value
*/
public int getIndex()
{
switch(iSize)
{
case 4: return getInt(iOff);
case 8: return (int)getLong(iOff);
default: throw new AssertionError();
}
}
/**
* Puts the float value.
* @param v float value
*/
public void putValue(float v)
{
putFloat(0, v);
}
/**
* Puts the int value.
* @param v int value
*/
public void putIndex(int v)
{
switch(iSize)
{
case 4: putInt(iOff, v); break;
case 8: putLong(iOff, v); break;
default: throw new AssertionError();
}
}
} // Data
} // FloatInt

Просмотреть файл

@ -21,7 +21,15 @@
package mpi; package mpi;
abstract class Freeable { /**
abstract void free() ; * Objects freeables must be freed calling the method free.
*/
public interface Freeable
{
/**
* Frees a freeable object.
* @throws MPIException
*/
void free() throws MPIException;
} }

129
ompi/mpi/java/java/GraphComm.java Обычный файл
Просмотреть файл

@ -0,0 +1,129 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : Graphcomm.java
* Author : Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.5 $
* Updated : $Date: 2001/10/22 21:07:55 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
package mpi;
/**
* Communicator with graph structure.
*/
public final class GraphComm extends Intracomm
{
static
{
init();
}
private static native void init();
protected GraphComm(long handle) throws MPIException
{
super(handle);
}
/**
* Duplicate this communicator.
* <p>Java binding of the MPI operation {@code MPI_COMM_DUP}.
* <p>The new communicator is "congruent" to the old one,
* but has a different context.
* @return copy of this communicator
*/
@Override public GraphComm clone()
{
try
{
MPI.check();
return new GraphComm(dup());
}
catch(MPIException e)
{
throw new RuntimeException(e.getMessage());
}
}
/**
* Returns graph topology information.
* <p>Java binding of the MPI operations {@code MPI_GRAPHDIMS_GET}
* and {@code MPI_GRAPH_GET}.
* <p>The number of nodes and number of edges can be extracted
* from the sizes of the {@code index} and {@code edges} fields
* of the returned object.
* @return object defining node degress and edges of graph
* @throws MPIException
*/
public GraphParms getDims() throws MPIException
{
MPI.check();
return getDims(handle);
}
private native GraphParms getDims(long comm) throws MPIException;
/**
* Provides adjacency information for general graph topology.
* <p>Java binding of the MPI operations {@code MPI_GRAPH_NEIGHBORS_COUNT}
* and {@code MPI_GRAPH_NEIGHBORS}.
* <p>The number of neighbors can be extracted from the size of the result.
* @param rank rank of a process in the group of this communicator
* @return array of ranks of neighbouring processes to one specified
* @throws MPIException
*/
public int[] getNeighbors(int rank) throws MPIException
{
MPI.check();
return getNeighbors(handle, rank);
}
private native int[] getNeighbors(long comm, int rank) throws MPIException;
/**
* Gets the adjacency information for a distributed graph topology.
* @return adjacency information for a distributed graph topology
* @throws MPIException
*/
public DistGraphNeighbors getDistGraphNeighbors() throws MPIException
{
MPI.check();
return getDistGraphNeighbors(handle);
}
private native DistGraphNeighbors getDistGraphNeighbors(long comm)
throws MPIException;
/**
* Compute an optimal placement.
* <p>Java binding of the MPI operation {@code MPI_GRAPH_MAP}.
* <p>The number of nodes is taken to be size of the {@code index} argument.
* @param index node degrees
* @param edges graph edges
* @return reordered rank of calling process
* @throws MPIException
*/
public int map(int[] index, int[] edges) throws MPIException
{
MPI.check();
return map(handle, index, edges);
}
private native int map(long comm, int[] index, int[] edges) throws MPIException;
} // Graphcomm

Просмотреть файл

@ -23,8 +23,71 @@
package mpi; package mpi;
public class GraphParms { /**
public int [] index; * Graph topology information associated with a communicator.
public int [] edges; */
public final class GraphParms
{
/** Node degrees. */
private final int[] index;
/** Graph edges. */
private final int[] edges;
/**
* Constructs a graph topology information object.
* @param index node degrees.
* @param edges graph edges.
*/
protected GraphParms(int[] index, int[] edges)
{
this.index = index;
this.edges = edges;
} }
/**
* Returns the number of nodes.
* @return number of nodes.
*/
public int getIndexCount()
{
return index.length;
}
/**
* Returns the index of the node {@code i}.
* <p>{@code getIndex(0)} returns the degree of the node {@code 0}, and
* {@code getIndex(i)-getIndex(i-1)} is the degree of the node {@code i}.
* @param i position of the node.
* @return the index.
*/
public int getIndex(int i)
{
return index[i];
}
/**
* Returns the number of edges.
* @return number of edges.
*/
public int getEdgeCount()
{
return edges.length;
}
/**
* Returns the edge {@code i}.
* <p>The list of neighbors of node zero is stored in {@code getEdge(j)},
* for {@code 0} &le; {@code j} &le; {@code getIndex(0)-1} and the list
* of neighbors of node {@code i}, {@code i} &gt; {@code 0}, is stored
* in {@code getEdge(j)}, {@code getIndex(i-1)} &le; {@code j} &le;
* {@code getIndex(i)-1}.
* @param i index of the edge.
* @return the edge.
*/
public int getEdge(int i)
{
return edges[i];
}
} // GraphParms

Просмотреть файл

@ -1,94 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : Graphcomm.java
* Author : Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.5 $
* Updated : $Date: 2001/10/22 21:07:55 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
package mpi;
public class Graphcomm extends Intracomm {
protected Graphcomm(long handle) throws MPIException {
super(handle) ;
}
public Object clone() {
try {
return new Graphcomm(super.dup()) ;
}
catch (MPIException e) {
throw new RuntimeException(e.getMessage()) ;
}
}
/**
* Returns graph topology information.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> object defining node degress and
* edges of graph </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GRAPHDIMS_GET</tt>.
* <p>
* The number of nodes and number of edges can be extracted
* from the sizes of the <tt>index</tt> and <tt>edges</tt> fields
* of the returned object.
*/
public native GraphParms Get() throws MPIException ;
/**
* Provides adjacency information for general graph topology.
* <p>
* <table>
* <tr><td><tt> rank </tt></td><td> rank of a process in the group
* of this communicator </tr>
* <tr><td><em> returns: </em></td><td> array of ranks of neighbouring
* processes to one specified </tr>
* </table>
* <p>
* Java binding of the MPI operations <tt>MPI_GRAPH_NEIGHBOURS_COUNT</tt>
* and <tt>MPI_GRAPH_NEIGHBOURS</tt>.
* <p>
* The number of neighbours can be extracted from the size of the result.
*/
public native int [] Neighbours(int rank) throws MPIException ;
/**
* Compute an optimal placement.
* <p>
* <table>
* <tr><td><tt> index </tt></td><td> node degrees </tr>
* <tr><td><tt> edges </tt></td><td> graph edges </tr>
* <tr><td><em> returns: </em></td><td> reordered rank of calling
* process </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GRAPH_MAP</tt>.
* <p>
* The number of nodes is taken to be size of the <tt>index</tt> argument.
*/
public native int Map(int [] index, int [] edges) throws MPIException ;
}

Просмотреть файл

@ -22,245 +22,229 @@
*/ */
package mpi; package mpi;
//import mpi.*;
public class Group extends Freeable { /**
protected final static int EMPTY = 0; * This class represents {@code MPI_Group}.
*/
public final class Group implements Freeable
{
protected long handle;
private static long nullHandle;
private static native void init(); static
protected long handle; {
//public Group() {}
protected Group(int Type) { GetGroup(Type); }
protected Group(long _handle) { handle = _handle;}
private native void GetGroup(int Type);
/**
* Size of group.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> number of processors in the
* group </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_SIZE</tt>.
*/
public native int Size() throws MPIException ;
/**
* Rank of this process in group.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> rank of the calling process in
* the group </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_RANK</tt>.
*
* Result value is <tt>MPI.UNDEFINED</tt> if this process is not
* a member of the group.
*/
public native int Rank() throws MPIException ;
/**
* Destructor.
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_FREE</tt>.
*/
@SuppressWarnings("unchecked")
public void finalize() throws MPIException {
synchronized(MPI.class) {
MPI.freeList.addFirst(this) ;
}
}
native void free() ;
/**
* Translate ranks within one group to ranks within another.
* <p>
* <table>
* <tr><td><tt> group1 </tt></td><td> a group </tr>
* <tr><td><tt> ranks1 </tt></td><td> array of valid ranks in
* <tt>group1</tt> </tr>
* <tr><td><tt> group2 </tt></td><td> another group </tr>
* <tr><td><em> returns: </em></td><td> array of corresponding ranks in
* <tt>group2</tt> </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_TRANSLATE_RANKS</tt>.
* <p>
* Result elements are <tt>MPI.UNDEFINED</tt> where no correspondence
* exists.
*/
public static native int [] Translate_ranks(Group group1,int [] ranks1,
Group group2)
throws MPIException ;
/**
* Compare two groups.
* <p>
* <table>
* <tr><td><tt> group1 </tt></td><td> first group </tr>
* <tr><td><tt> group2 </tt></td><td> second group </tr>
* <tr><td><em> returns: </em></td><td> result </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_COMPARE</tt>.
* <p>
* <tt>MPI.IDENT</tt> results if the group members and group order are
* exactly the same in both groups. <tt>MPI.SIMILAR</tt> results if
* the group members are the same but the order is different.
* <tt>MPI.UNEQUAL</tt> results otherwise.
*/
public static native int Compare(Group group1, Group group2)
throws MPIException ;
/**
* Set union of two groups.
* <p>
* <table>
* <tr><td><tt> group1 </tt></td><td> first group </tr>
* <tr><td><tt> group2 </tt></td><td> second group </tr>
* <tr><td><em> returns: </em></td><td> union group </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_UNION</tt>.
*/
public static Group Union(Group group1, Group group2) throws MPIException {
return new Group(union(group1, group2)) ;
}
private static native long union(Group group1, Group group2);
/**
* Set intersection of two groups.
* <p>
* <table>
* <tr><td><tt> group1 </tt></td><td> first group </tr>
* <tr><td><tt> group2 </tt></td><td> second group </tr>
* <tr><td><em> returns: </em></td><td> intersection group </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_INTERSECTION</tt>.
*/
public static Group Intersection(Group group1,Group group2)
throws MPIException {
return new Group(intersection(group1, group2)) ;
}
private static native long intersection(Group group1, Group group2);
/**
* Result contains all elements of the first group that are not in the
* second group.
* <p>
* <table>
* <tr><td><tt> group1 </tt></td><td> first group </tr>
* <tr><td><tt> group2 </tt></td><td> second group </tr>
* <tr><td><em> returns: </em></td><td> difference group </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_DIFFERENCE</tt>.
*/
public static Group Difference(Group group1, Group group2)
throws MPIException {
return new Group(difference(group1, group2)) ;
}
private static native long difference(Group group1, Group group2) ;
/**
* Create a subset group including specified processes.
* <p>
* <table>
* <tr><td><tt> ranks </tt></td><td> ranks from this group to appear in
* new group </tr>
* <tr><td><em> returns: </em></td><td> new group </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_INCL</tt>.
*/
public Group Incl(int [] ranks) throws MPIException {
return new Group(incl(ranks)) ;
}
private native long incl(int [] ranks);
/**
* Create a subset group excluding specified processes.
* <p>
* <table>
* <tr><td><tt> ranks </tt></td><td> ranks from this group <em>not</em>
* to appear in new group </tr>
* <tr><td><em> returns: </em></td><td> new group </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_EXCL</tt>.
*/
public Group Excl(int [] ranks) throws MPIException {
return new Group(excl(ranks)) ;
}
private native long excl(int [] ranks) ;
/**
* Create a subset group including processes specified
* by strided intervals of ranks.
* <p>
* <table>
* <tr><td><tt> ranges </tt></td><td> array of integer triplets </tr>
* <tr><td><em> returns: </em></td><td> new group </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_RANGE_INCL</tt>.
* <p>
* The triplets are of the form (first rank, last rank, stride)
* indicating ranks in this group to be included in the new group.
* The size of the first dimension of <tt>ranges</tt> is the number
* of triplets. The size of the second dimension is 3.
*/
public Group Range_incl(int [][] ranges) throws MPIException {
return new Group(range_incl(ranges)) ;
}
private native long range_incl(int [][] ranges) ;
/**
* Create a subset group excluding processes specified
* by strided intervals of ranks.
* <p>
* <table>
* <tr><td><tt> ranges </tt></td><td> array of integer triplets </tr>
* <tr><td><em> returns: </em></td><td> new group </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GROUP_RANGE_EXCL</tt>.
* <p>
* Triplet array is defined as for <tt>Range_incl</tt>, the ranges
* indicating ranks in this group to be excluded from the new group.
*/
public Group Range_excl(int [][] ranges) throws MPIException {
return new Group(range_excl(ranges)) ;
}
private native long range_excl(int [][] ranges) ;
static {
init(); init();
}
} }
private static native void init();
protected static native long getEmpty();
protected Group(long handle)
{
this.handle = handle;
}
/**
* Java binding of the MPI operation {@code MPI_GROUP_SIZE}.
* @return number of processes in the group
* @throws MPIException
*/
public int getSize() throws MPIException
{
MPI.check();
return getSize(handle);
}
private native int getSize(long group) throws MPIException;
/**
* Rank of this process in the group.
* <p>Java binding of the MPI operation {@code MPI_GROUP_RANK}.
* @return rank of this process in the group, or {@code MPI.UNDEFINED}
* if this process is not a member of the group.
* @throws MPIException
*/
public int getRank() throws MPIException
{
MPI.check();
return getRank(handle);
}
private native int getRank(long group) throws MPIException;
/**
* Java binding of the MPI operation {@code MPI_GROUP_FREE}.
*/
@Override public void free() throws MPIException
{
MPI.check();
handle = free(handle);
}
private native long free(long group);
/**
* Test if group object is null.
* @return true if the group object is null.
*/
public boolean isNull()
{
return handle == nullHandle;
}
/**
* Translate ranks within one group to ranks within another.
* <p>Java binding of the MPI operation {@code MPI_GROUP_TRANSLATE_RANKS}.
* <p>Result elements are {@code MPI.UNDEFINED} where no correspondence exists.
* @param group1 a group
* @param ranks1 array of valid ranks in group1
* @param group2 another group
* @return array of corresponding ranks in group2
* @throws MPIException
*/
public static int[] translateRanks(Group group1, int[] ranks1, Group group2)
throws MPIException
{
MPI.check();
return translateRanks(group1.handle, ranks1, group2.handle);
}
private static native int[] translateRanks(
long group1, int[] ranks1, long group2) throws MPIException;
/**
* Compare two groups.
* <p>Java binding of the MPI operation {@code MPI_GROUP_COMPARE}.
* @param group1 first group
* @param group2 second group
* @return {@code MPI.IDENT} if the group members and group order are exactly
* the same in both groups, {@code MPI.SIMILAR} if the group members are
* the same but the order is different, {@code MPI.UNEQUAL} otherwise.
* @throws MPIException
*/
public static int compare(Group group1, Group group2) throws MPIException
{
MPI.check();
return compare(group1.handle, group2.handle);
}
private static native int compare(long group1, long group2) throws MPIException;
/**
* Set union of two groups.
* <p>Java binding of the MPI operation {@code MPI_GROUP_UNION}.
* @param group1 first group
* @param group2 second group
* @return union group
* @throws MPIException
*/
public static Group union(Group group1, Group group2) throws MPIException
{
MPI.check();
return new Group(union(group1.handle, group2.handle));
}
private static native long union(long group1, long group2);
/**
* Set intersection of two groups.
* Java binding of the MPI operation {@code MPI_GROUP_INTERSECTION}.
* @param group1 first group
* @param group2 second group
* @return intersection group
* @throws MPIException
*/
public static Group intersection(Group group1, Group group2) throws MPIException
{
MPI.check();
return new Group(intersection(group1.handle, group2.handle));
}
private static native long intersection(long group1, long group2);
/**
* Set difference of two groups.
* Java binding of the MPI operation {@code MPI_GROUP_DIFFERENCE}.
* @param group1 first group
* @param group2 second group
* @return difference group
* @throws MPIException
*/
public static Group difference(Group group1, Group group2) throws MPIException
{
MPI.check();
return new Group(difference(group1.handle, group2.handle));
}
private static native long difference(long group1, long group2);
/**
* Create a subset group including specified processes.
* <p>Java binding of the MPI operation {@code MPI_GROUP_INCL}.
* @param ranks ranks from this group to appear in new group
* @return new group
* @throws MPIException
*/
public Group incl(int[] ranks) throws MPIException
{
MPI.check();
return new Group(incl(handle, ranks));
}
private native long incl(long group, int[] ranks);
/**
* Create a subset group excluding specified processes.
* <p>Java binding of the MPI operation {@code MPI_GROUP_EXCL}.
* @param ranks ranks from this group <em>not</em> to appear in new group
* @return new group
* @throws MPIException
*/
public Group excl(int[] ranks) throws MPIException
{
MPI.check();
return new Group(excl(handle, ranks));
}
private native long excl(long group, int[] ranks);
/**
* Create a subset group including processes specified
* by strided intervals of ranks.
* <p>Java binding of the MPI operation {@code MPI_GROUP_RANGE_INCL}.
* <p>The triplets are of the form (first rank, last rank, stride)
* indicating ranks in this group to be included in the new group.
* The size of the first dimension of {@code ranges} is the number
* of triplets. The size of the second dimension is 3.
* @param ranges array of integer triplets
* @return new group
* @throws MPIException
*/
public Group rangeIncl(int[][] ranges) throws MPIException
{
MPI.check();
return new Group(rangeIncl(handle, ranges));
}
private native long rangeIncl(long group, int[][] ranges);
/**
* Create a subset group excluding processes specified
* by strided intervals of ranks.
* <p>Java binding of the MPI operation {@code MPI_GROUP_RANGE_EXCL}.
* <p>Triplet array is defined as for {@code rangeIncl}, the ranges
* indicating ranks in this group to be excluded from the new group.
* @param ranges array of integer triplets
* @return new group
* @throws MPIException
*/
public Group rangeExcl(int[][] ranges) throws MPIException
{
MPI.check();
return new Group(rangeExcl(handle, ranges));
}
private native long rangeExcl(long group, int[][] ranges);
} // Group

146
ompi/mpi/java/java/Info.java Обычный файл
Просмотреть файл

@ -0,0 +1,146 @@
package mpi;
/**
* This class represents {@code MPI_Info}.
*/
public final class Info implements Freeable
{
protected long handle;
protected static final long NULL = getNull();
/**
* Java binding of the MPI operation {@code MPI_INFO_CREATE}.
*/
public Info() throws MPIException
{
MPI.check();
handle = create();
}
protected Info(long handle)
{
this.handle = handle;
}
private native long create();
protected static Info newEnv()
{
return new Info(getEnv());
}
private native static long getEnv();
private native static long getNull();
/**
* Java binding of the MPI operation {@code MPI_INFO_SET}.
* @param key key
* @param value value
* @throws MPIException
*/
public void set(String key, String value) throws MPIException
{
MPI.check();
set(handle, key, value);
}
private native void set(long handle, String key, String value)
throws MPIException;
/**
* Java binding of the MPI operation {@code MPI_INFO_SET}.
* @param key key
* @return value or {@code null} if key is not defined
* @throws MPIException
*/
public String get(String key) throws MPIException
{
MPI.check();
return get(handle, key);
}
private native String get(long handle, String key) throws MPIException;
/**
* Java binding of the MPI operation {@code MPI_INFO_SET}.
* @param key key
* @throws MPIException
*/
public void delete(String key) throws MPIException
{
MPI.check();
delete(handle, key);
}
private native void delete(long handle, String key) throws MPIException;
/**
* Java binding of the MPI operation {@code MPI_INFO_GET_NKEYS}.
* @return number of defined keys
* @throws MPIException
*/
public int size() throws MPIException
{
MPI.check();
return size(handle);
}
private native int size(long handle) throws MPIException;
/**
* Java binding of the MPI operation {@code MPI_INFO_GET_NTHKEY}.
* @param i key number
* @return key
* @throws MPIException
*/
public String getKey(int i) throws MPIException
{
MPI.check();
return getKey(handle, i);
}
private native String getKey(long handle, int i) throws MPIException;
/**
* Java binding of the MPI operation {@code MPI_INFO_DUP}.
* @return info object
*/
@Override public Info clone()
{
try
{
MPI.check();
return new Info(clone(handle));
}
catch(MPIException e)
{
throw new RuntimeException(e.getMessage());
}
}
private native long clone(long handle) throws MPIException;
/**
* Java binding of the MPI operation {@code MPI_INFO_FREE}.
* @throws MPIException
*/
@Override public void free() throws MPIException
{
MPI.check();
handle = free(handle);
}
private native long free(long handle) throws MPIException;
/**
* Tests if the info object is {@code MPI_INFO_NULL} (has been freed).
* @return true if the info object is {@code MPI_INFO_NULL}, false otherwise.
*/
public boolean isNull()
{
return isNull(handle);
}
private native boolean isNull(long handle);
} // Info

102
ompi/mpi/java/java/Int2.java Обычный файл
Просмотреть файл

@ -0,0 +1,102 @@
package mpi;
/**
* Java binding of the MPI data type {@code MPI_2INT}.
*/
public class Int2 extends Struct
{
private final int iOff, iSize;
/** The struct will be created only in MPI class. */
protected Int2(int intOff, int intSize)
{
iSize = intSize;
int off = addIntField();
assert off == 0;
setOffset(intOff);
iOff = addIntField();
assert intOff == iOff;
}
private int addIntField()
{
switch(iSize)
{
case 4: return addInt();
case 8: return addLong();
default: throw new AssertionError("Unsupported int size: "+ iSize);
}
}
/**
* Creates a Data object.
* @return new Data object.
*/
@Override protected Int2.Data newData()
{
return new Int2.Data();
}
/**
* Class for reading/writing data in a struct stored in a byte buffer.
*/
public final class Data extends Struct.Data
{
/**
* Gets the first int.
* @return first int
*/
public int getValue()
{
return get(0);
}
/**
* Gets the second int.
* @return second int
*/
public int getIndex()
{
return get(iOff);
}
/**
* Puts the first int.
* @param v first value
*/
public void putValue(int v)
{
put(0, v);
}
/**
* Puts the second int.
* @param v second int
*/
public void putIndex(int v)
{
put(iOff, v);
}
private int get(int off)
{
switch(iSize)
{
case 4: return getInt(off);
case 8: return (int)getLong(off);
default: throw new AssertionError();
}
}
private void put(int off, int v)
{
switch(iSize)
{
case 4: putInt(off, v); break;
case 8: putLong(off, v); break;
default: throw new AssertionError();
}
}
} // Data
} // Int2

Просмотреть файл

@ -22,64 +22,93 @@
*/ */
package mpi; package mpi;
//import mpi.*;
public class Intercomm extends Comm { /**
* This class represents intercommunicators.
protected Intercomm(long handle) {super(handle) ;} */
public final class Intercomm extends Comm
public Object clone() { {
return new Intercomm(super.dup()); protected Intercomm(long handle)
} {
super(handle);
// Inter-Communication
/**
* Size of remote group.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> number of process in remote group
* of this communicator </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_COMM_REMOTE_SIZE</tt>.
*/
public native int Remote_size() throws MPIException ;
/**
* Return the remote group.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> remote group of this
* communicator </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_COMM_REMOTE_GROUP</tt>.
*/
public Group Remote_group() throws MPIException {
return new Group(remote_group());
}
private native long remote_group();
/**
* Create an inter-communicator.
* <p>
* <table>
* <tr><td><tt> high </tt></td><td> true if the local group has higher
* ranks in combined group </tr>
* <tr><td><em> returns: </em></td><td> new intra-communicator </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_INTERCOMM_MERGE</tt>.
*/
public Intracomm Merge(boolean high) throws MPIException {
return new Intracomm(merge(high)) ;
}
private native long merge(boolean high);
} }
/**
* Duplicate this communicator.
* <p>Java binding of the MPI operation {@code MPI_COMM_DUP}.
* <p>The new communicator is "congruent" to the old one,
* but has a different context.
* @return copy of this communicator
*/
@Override public Intercomm clone()
{
try
{
MPI.check();
return new Intercomm(dup());
}
catch(MPIException e)
{
throw new RuntimeException(e.getMessage());
}
}
// Inter-Communication
/**
* Size of remote group.
* <p>Java binding of the MPI operation {@code MPI_COMM_REMOTE_SIZE}.
* @return number of process in remote group of this communicator
* @throws MPIException
*/
public int getRemoteSize() throws MPIException
{
MPI.check();
return getRemoteSize_jni();
}
private native int getRemoteSize_jni() throws MPIException;
/**
* Return the remote group.
* <p>Java binding of the MPI operation {@code MPI_COMM_REMOTE_GROUP}.
* @return remote group of this communicator
* @throws MPIException
*/
public Group getRemoteGroup() throws MPIException
{
MPI.check();
return new Group(getRemoteGroup_jni());
}
private native long getRemoteGroup_jni();
/**
* Creates an intracommuncator from an intercommunicator
* <p>Java binding of the MPI operation {@code MPI_INTERCOMM_MERGE}.
* @param high true if the local group has higher ranks in combined group
* @return new intra-communicator
* @throws MPIException
*/
public Intracomm merge(boolean high) throws MPIException
{
MPI.check();
return new Intracomm(merge_jni(high));
}
private native long merge_jni(boolean high);
/**
* Java binding of {@code MPI_COMM_GET_PARENT}.
* @return the parent communicator
* @throws MPIException
*/
public static Intercomm getParent() throws MPIException
{
MPI.check();
return new Intercomm(getParent_jni());
}
private native static long getParent_jni() throws MPIException;
} // Intercomm

Разница между файлами не показана из-за своего большого размера Загрузить разницу

108
ompi/mpi/java/java/LongInt.java Обычный файл
Просмотреть файл

@ -0,0 +1,108 @@
package mpi;
/**
* Java binding of the MPI data type {@code MPI_LONG_INT}.
*/
public final class LongInt extends Struct
{
private final int lSize, iOff, iSize;
/** The struct will be created only in MPI class. */
protected LongInt(int longSize, int intOff, int intSize)
{
lSize = longSize;
iSize = intSize;
int lOff;
switch(lSize)
{
case 4: lOff = addInt(); break;
case 8: lOff = addLong(); break;
default: throw new AssertionError("Unsupported long size: "+ lSize);
}
assert lOff == 0;
setOffset(intOff);
switch(iSize)
{
case 4: iOff = addInt(); break;
case 8: iOff = addLong(); break;
default: throw new AssertionError("Unsupported int size: "+ iSize);
}
assert(intOff == iOff);
}
/**
* Creates a Data object.
* @return new Data object.
*/
@Override protected LongInt.Data newData()
{
return new LongInt.Data();
}
/**
* Class for reading/writing data in a struct stored in a byte buffer.
*/
public final class Data extends Struct.Data
{
/**
* Gets the long value.
* @return long value
*/
public long getValue()
{
switch(lSize)
{
case 8: return getLong(0);
case 4: return getInt(0);
default: throw new AssertionError();
}
}
/**
* Gets the int value.
* @return int value
*/
public int getIndex()
{
switch(iSize)
{
case 4: return getInt(iOff);
case 8: return (int)getLong(iOff);
default: throw new AssertionError();
}
}
/**
* Puts the long value.
* @param v long value
*/
public void putValue(long v)
{
switch(lSize)
{
case 8: putLong(0, v); break;
case 4: putInt(0, (int)v); break;
default: throw new AssertionError();
}
}
/**
* Puts the int value.
* @param v int value
*/
public void putIndex(int v)
{
switch(iSize)
{
case 4: putInt(iOff, v); break;
case 8: putLong(iOff, v); break;
default: throw new AssertionError();
}
}
} // Data
} // LongInt

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -23,8 +23,55 @@
package mpi; package mpi;
public class MPIException extends Exception { /**
public MPIException() {super() ;} * Signals that an MPI exception of some sort has occurred.
public MPIException(String message) {super(message) ;} */
public final class MPIException extends Exception
{
private int errorCode, errorClass;
protected MPIException(int code, int clazz, String message)
{
super(message);
errorCode = code;
errorClass = clazz;
} }
/**
* Creates an exception.
* @param message message associated to the exception
*/
public MPIException(String message)
{
super(message);
}
/**
* Creates an exception:
* @param cause cause associated to the exception
*/
public MPIException(Throwable cause)
{
super(cause);
setStackTrace(cause.getStackTrace());
}
/**
* Gets the MPI error code.
* @return error code
*/
public int getErrorCode()
{
return errorCode;
}
/**
* Gets the MPI error class.
* @return error class
*/
public int getErrorClass()
{
return errorClass;
}
} // MPIException

Просмотреть файл

@ -13,29 +13,42 @@
# list them here in EXTRA_DIST so that they get picked up by "make # list them here in EXTRA_DIST so that they get picked up by "make
# dist". # dist".
JAVA_SRC_FILES = \ JAVA_SRC_FILES = \
Cartcomm.java \ CartComm.java \
CartParms.java \ CartParms.java \
Comm.java \ Comm.java \
Constant.java \
Datatype.java \ Datatype.java \
DistGraphNeighbors.java \
DoubleInt.java \
DoubleComplex.java \
Errhandler.java \ Errhandler.java \
FloatComplex.java \
FloatInt.java \
File.java \
FileView.java \
Freeable.java \ Freeable.java \
Graphcomm.java \ GraphComm.java \
GraphParms.java \ GraphParms.java \
Group.java \ Group.java \
Info.java \
Int2.java \
Intercomm.java \ Intercomm.java \
Intracomm.java \ Intracomm.java \
Maxloc.java \ LongInt.java \
Minloc.java \ Message.java \
MPI.java \ MPI.java \
MPIException.java \ MPIException.java \
Op.java \ Op.java \
Prequest.java \ Prequest.java \
Request.java \ Request.java \
ShiftParms.java \ ShiftParms.java \
ShortInt.java \
Status.java \ Status.java \
User_function.java Struct.java \
UserFunction.java \
Win.java
JAVA_CLASS_FILES = $(JAVA_SRC_FILES:%.java=mpi/%.class) JAVA_CLASS_FILES = $(JAVA_SRC_FILES:%.java=mpi/%.class) mpi/*$*.class
EXTRA_DIST = $(JAVA_SRC_FILES) EXTRA_DIST = $(JAVA_SRC_FILES)
# Only do this stuff if we want the Java bindings # Only do this stuff if we want the Java bindings
@ -48,21 +61,25 @@ if OMPI_WANT_JAVA_BINDINGS
JAVA_H = \ JAVA_H = \
mpi_MPI.h \ mpi_MPI.h \
mpi_CartParms.h \ mpi_CartParms.h \
mpi_Cartcomm.h \ mpi_CartComm.h \
mpi_Comm.h \ mpi_Comm.h \
mpi_Constant.h \
mpi_Datatype.h \ mpi_Datatype.h \
mpi_Errhandler.h \ mpi_Errhandler.h \
mpi_File.h \
mpi_GraphParms.h \ mpi_GraphParms.h \
mpi_Graphcomm.h \ mpi_GraphComm.h \
mpi_Group.h \ mpi_Group.h \
mpi_Info.h \
mpi_Intercomm.h \ mpi_Intercomm.h \
mpi_Intracomm.h \ mpi_Intracomm.h \
mpi_Message.h \
mpi_Op.h \ mpi_Op.h \
mpi_Prequest.h \ mpi_Prequest.h \
mpi_Request.h \ mpi_Request.h \
mpi_ShiftParms.h \ mpi_ShiftParms.h \
mpi_Status.h \ mpi_Status.h \
mpi_User_function.h mpi_Win.h
# A little verbosity magic; "make" will show the terse output. "make # A little verbosity magic; "make" will show the terse output. "make
# V=1" will show the actual commands used (just like the other # V=1" will show the actual commands used (just like the other

Просмотреть файл

@ -1,145 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mpi;
class Maxloc extends User_function{
public void Call(Object invec, int inoffset, Object outvec, int outoffset,
int count, Datatype datatype){
// *** should work also for derived datatypes with following as
// as bases ? ***
if(datatype == MPI.SHORT2) {
short [] in_array = (short[])invec;
short [] out_array = (short[])outvec;
int indisp = inoffset ;
int outdisp = outoffset ;
for (int i = 0; i < count; i++, indisp += 2, outdisp += 2) {
short inval = in_array [indisp] ;
short outval = out_array [outdisp] ;
if(inval > outval) {
out_array [outdisp ] = inval ;
out_array [outdisp + 1] = in_array [outdisp + 1] ;
}
else if(inval == outval) {
short inloc = in_array [indisp + 1] ;
if(inloc < out_array [outdisp + 1])
out_array [outdisp + 1] = inloc ;
}
}
}
else if(datatype == MPI.INT2) {
int [] in_array = (int[])invec;
int [] out_array = (int[])outvec;
int indisp = inoffset ;
int outdisp = outoffset ;
for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){
int inval = in_array [indisp] ;
int outval = out_array [outdisp] ;
if(inval > outval) {
out_array [outdisp ] = inval ;
out_array [outdisp + 1] = in_array [outdisp + 1] ;
}
else if(inval == outval) {
int inloc = in_array [indisp + 1] ;
if(inloc < out_array [outdisp + 1])
out_array [outdisp + 1] = inloc ;
}
}
}
else if(datatype == MPI.LONG2) {
long [] in_array = (long[])invec;
long [] out_array = (long[])outvec;
int indisp = inoffset ;
int outdisp = outoffset ;
for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){
long inval = in_array [indisp] ;
long outval = out_array [outdisp] ;
if(inval > outval) {
out_array [outdisp ] = inval ;
out_array [outdisp + 1] = in_array [outdisp + 1] ;
}
else if(inval == outval) {
long inloc = in_array [indisp + 1] ;
if(inloc < out_array [outdisp + 1])
out_array [outdisp + 1] = inloc ;
}
}
}
else if(datatype == MPI.FLOAT2) {
float [] in_array = (float[])invec;
float [] out_array = (float[])outvec;
int indisp = inoffset ;
int outdisp = outoffset ;
for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){
float inval = in_array [indisp] ;
float outval = out_array [outdisp] ;
if(inval > outval) {
out_array [outdisp ] = inval ;
out_array [outdisp + 1] = in_array [outdisp + 1] ;
}
else if(inval == outval) {
float inloc = in_array [indisp + 1] ;
if(inloc < out_array [outdisp + 1])
out_array [outdisp + 1] = inloc ;
}
}
}
else if(datatype == MPI.DOUBLE2) {
double [] in_array = (double[])invec;
double [] out_array = (double[])outvec;
int indisp = inoffset ;
int outdisp = outoffset ;
for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){
double inval = in_array [indisp] ;
double outval = out_array [outdisp] ;
if(inval > outval) {
out_array [outdisp ] = inval ;
out_array [outdisp + 1] = in_array [outdisp + 1] ;
}
else if(inval == outval) {
double inloc = in_array [indisp + 1] ;
if(inloc < out_array [outdisp + 1])
out_array [outdisp + 1] = inloc ;
}
}
}
else {
System.out.println("MPI.MAXLOC: invalid datatype") ;
try {
MPI.COMM_WORLD.Abort(1);
}
catch(MPIException e) {}
}
}
}

129
ompi/mpi/java/java/Message.java Обычный файл
Просмотреть файл

@ -0,0 +1,129 @@
package mpi;
import java.nio.*;
import static mpi.MPI.isHeapBuffer;
import static mpi.MPI.assertDirectBuffer;
/**
* This class represents {@code MPI_Message}.
*/
public final class Message
{
protected long handle;
private static long NULL, NO_PROC;
static
{
init();
}
private static native void init();
/**
* Creates a {@code MPI_MESSAGE_NULL}.
*/
public Message()
{
handle = NULL;
}
/**
* Tests if the message is {@code MPI_MESSAGE_NULL}.
* @return true if the message is {@code MPI_MESSAGE_NULL}.
*/
public boolean isNull()
{
return handle == NULL;
}
/**
* Tests if the message is {@code MPI_MESSAGE_NO_PROC}.
* @return true if the message is {@code MPI_MESSAGE_NO_PROC}.
*/
public boolean isNoProc()
{
return handle == NO_PROC;
}
/**
* Java binding of {@code MPI_MPROBE}.
* @param source rank of the source
* @param tag message tag
* @param comm communicator
* @return status object
* @throws MPIException
*/
public Status mProbe(int source, int tag, Comm comm) throws MPIException
{
MPI.check();
return mProbe(source, tag, comm.handle);
}
private native Status mProbe(int source, int tag, long comm)
throws MPIException;
/**
* Java binding of {@code MPI_IMPROBE}.
* @param source rank of the source
* @param tag message tag
* @param comm communicator
* @return status object if there is a message, {@code null} otherwise
* @throws MPIException
*/
public Status imProbe(int source, int tag, Comm comm) throws MPIException
{
MPI.check();
return imProbe(source, tag, comm.handle);
}
private native Status imProbe(int source, int tag, long comm)
throws MPIException;
/**
* Java binding of {@code MPI_MRECV}.
* @param buf receive buffer
* @param count number of elements in receve buffer
* @param type datatype of each receive buffer element
* @return status object
*/
public Status mRecv(Object buf, int count, Datatype type)
throws MPIException
{
MPI.check();
int off = 0;
if(isHeapBuffer(buf))
{
off = ((Buffer)buf).arrayOffset();
buf = ((Buffer)buf).array();
}
Status status = new Status();
mRecv(buf, off, count, type, status);
return status;
}
private native void mRecv(
Object buf, int offset, int count, Datatype type, Status status)
throws MPIException;
/**
* Java binding of {@code MPI_IMRECV}.
* @param buf receive buffer
* @param count number of elements in receve buffer
* @param type datatype of each receive buffer element
* @return request object
* @throws MPIException
*/
public Request imRecv(Buffer buf, int count, Datatype type)
throws MPIException
{
MPI.check();
assertDirectBuffer(buf);
return new Request(imRecv(buf, count, type.handle));
}
private native long imRecv(Object buf, int count, long type)
throws MPIException;
} // Message

Просмотреть файл

@ -1,142 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mpi;
class Minloc extends User_function{
public void Call(Object invec, int inoffset, Object outvec, int outoffset,
int count, Datatype datatype){
if(datatype == MPI.SHORT2) {
short [] in_array = (short[])invec;
short [] out_array = (short[])outvec;
int indisp = inoffset ;
int outdisp = outoffset ;
for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){
short inval = in_array [indisp] ;
short outval = out_array [outdisp] ;
if(inval < outval) {
out_array [outdisp ] = inval ;
out_array [outdisp + 1] = in_array [outdisp + 1] ;
}
else if(inval == outval) {
short inloc = in_array [indisp + 1] ;
if(inloc < out_array [outdisp + 1])
out_array [outdisp + 1] = inloc ;
}
}
}
else if(datatype == MPI.INT2) {
int [] in_array = (int[])invec;
int [] out_array = (int[])outvec;
int indisp = inoffset ;
int outdisp = outoffset ;
for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){
int inval = in_array [indisp] ;
int outval = out_array [outdisp] ;
if(inval < outval) {
out_array [outdisp ] = inval ;
out_array [outdisp + 1] = in_array [outdisp + 1] ;
}
else if(inval == outval) {
int inloc = in_array [indisp + 1] ;
if(inloc < out_array [outdisp + 1])
out_array [outdisp + 1] = inloc ;
}
}
}
else if(datatype == MPI.LONG2) {
long [] in_array = (long[])invec;
long [] out_array = (long[])outvec;
int indisp = inoffset ;
int outdisp = outoffset ;
for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){
long inval = in_array [indisp] ;
long outval = out_array [outdisp] ;
if(inval < outval) {
out_array [outdisp ] = inval ;
out_array [outdisp + 1] = in_array [outdisp + 1] ;
}
else if(inval == outval) {
long inloc = in_array [indisp + 1] ;
if(inloc < out_array [outdisp + 1])
out_array [outdisp + 1] = inloc ;
}
}
}
else if(datatype == MPI.FLOAT2) {
float [] in_array = (float[])invec;
float [] out_array = (float[])outvec;
int indisp = inoffset ;
int outdisp = outoffset ;
for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){
float inval = in_array [indisp] ;
float outval = out_array [outdisp] ;
if(inval < outval) {
out_array [outdisp ] = inval ;
out_array [outdisp + 1] = in_array [outdisp + 1] ;
}
else if(inval == outval) {
float inloc = in_array [indisp + 1] ;
if(inloc < out_array [outdisp + 1])
out_array [outdisp + 1] = inloc ;
}
}
}
else if(datatype == MPI.DOUBLE2) {
double [] in_array = (double[])invec;
double [] out_array = (double[])outvec;
int indisp = inoffset ;
int outdisp = outoffset ;
for (int i = 0; i < count; i++, indisp += 2, outdisp += 2){
double inval = in_array [indisp] ;
double outval = out_array [outdisp] ;
if(inval < outval) {
out_array [outdisp ] = inval ;
out_array [outdisp + 1] = in_array [outdisp + 1] ;
}
else if(inval == outval) {
double inloc = in_array [indisp + 1] ;
if(inloc < out_array [outdisp + 1])
out_array [outdisp + 1] = inloc ;
}
}
}
else {
System.out.println("MPI.MINLOC: invalid datatype") ;
try {
MPI.COMM_WORLD.Abort(1);
}
catch(MPIException e) {}
}
}
}

Просмотреть файл

@ -22,70 +22,101 @@
*/ */
package mpi; package mpi;
//import mpi.*;
public class Op extends Freeable { import java.nio.*;
private final static int NULL = 0;
private final static int MAX = 1;
private final static int MIN = 2;
private final static int SUM = 3;
private final static int PROD = 4;
private final static int LAND = 5;
private final static int BAND = 6;
private final static int LOR = 7;
private final static int BOR = 8;
private final static int LXOR = 9;
private final static int BXOR =10;
private final static int MINLOC=11;
private final static int MAXLOC=12;
private static native void init(); /**
* This class represents {@code MPI_Op}.
*/
public final class Op implements Freeable
{
private final static int NULL = 0;
private final static int MAX = 1;
private final static int MIN = 2;
private final static int SUM = 3;
private final static int PROD = 4;
private final static int LAND = 5;
private final static int BAND = 6;
private final static int LOR = 7;
private final static int BOR = 8;
private final static int LXOR = 9;
private final static int BXOR = 10;
private final static int MINLOC = 11;
private final static int MAXLOC = 12;
private User_function uf = null ; private UserFunction uf = null;
private boolean commute;
private Datatype datatype;
private long handle;
protected Op(int Type) { GetOp(Type);} static
{
/**
* Bind a user-defined global reduction operation to an <tt>Op</tt> object.
* <p>
* <table>
* <tr><td><tt> function </tt></td><td> user defined function </tr>
* <tr><td><tt> commute </tt></td><td> <tt>true</tt> if commutative,
* <tt>false</tt> otherwise </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_OP_CREATE</tt>.
*/
public Op(User_function function, boolean commute) throws MPIException {
uf = function;
}
protected boolean isUser() {
return uf != null ;
}
public final void Call(Object invec, int inoffset,
Object outvec, int outoffset,
int count, Datatype datatype) {
uf.Call(invec, inoffset, outvec, outoffset, count, datatype);
}
private native void GetOp(int Type);
protected long handle ;
@SuppressWarnings("unchecked")
public void finalize() throws MPIException {
synchronized(MPI.class) {
MPI.freeList.addFirst(this) ;
}
}
native void free() ;
static {
init(); init();
}
} }
private static native void init();
protected Op(int type)
{
getOp(type);
commute = true;
}
private native void getOp(int type);
/**
* Bind a user-defined global reduction operation to an {@code Op} object.
* <p>Java binding of the MPI operation {@code MPI_OP_CREATE}.
* @param function user defined function
* @param commute {@code true} if commutative, {@code false} otherwise
*/
public Op(UserFunction function, boolean commute)
{
handle = 0; // When JNI code gets the handle it will be initialized.
uf = function;
this.commute = commute;
}
protected void setDatatype(Datatype t)
{
datatype = t;
}
protected void call(Object invec, Object inoutvec, int count)
throws MPIException
{
if(datatype.baseType == Datatype.BOOLEAN)
{
uf.call(invec, inoutvec, count, datatype);
}
else
{
uf.call(((ByteBuffer)invec).order(ByteOrder.nativeOrder()),
((ByteBuffer)inoutvec).order(ByteOrder.nativeOrder()),
count, datatype);
}
}
/**
* Test if the operation is conmutative.
* <p>Java binding of the MPI operation {@code MPI_OP_COMMUTATIVE}.
* @return {@code true} if commutative, {@code false} otherwise
*/
public boolean isCommutative()
{
return commute;
}
/**
* Java binding of the MPI operation {@code MPI_OP_FREE}.
* @throws MPIException
*/
@Override public native void free() throws MPIException;
/**
* Test if operation object is null.
* @return true if the operation object is null, false otherwise
*/
public native boolean isNull();
} // Op

Просмотреть файл

@ -33,185 +33,69 @@
* almost permanently, which presumably isn't a good thing.) * almost permanently, which presumably isn't a good thing.)
*/ */
package mpi; package mpi;
public class Prequest extends Request { /**
* Persistent request object.
*/
public final class Prequest extends Request
{
protected final static int MODE_STANDARD = 0;
protected final static int MODE_BUFFERED = 1;
protected final static int MODE_SYNCHRONOUS = 2;
protected final static int MODE_READY = 3;
protected final static int MODE_STANDARD = 0 ; private int mode, offset, count, src, dest, tag;
protected final static int MODE_BUFFERED = 1 ; private Object buf;
protected final static int MODE_SYNCHRONOUS = 2 ; private Datatype type;
protected final static int MODE_READY = 3 ; private Comm comm;
private int src ;
/**
* Constructor used by `Send_init', etc.
*/
protected Prequest(int mode,
Object buf, int offset, int count, Datatype type,
int dest, int tag, Comm comm) {
opTag = Request.OP_SEND ;
this.mode = mode ;
this.buf = buf;
this.offset = offset;
this.count = count;
this.type = type;
this.dest = dest;
this.tag = tag;
this.comm = comm ;
if(type.isObject()) {
typeTag = Request.TYPE_OBJECT ;
length_buf = new int [2] ;
hdrReq = new Request() ;
}
else
typeTag = Request.TYPE_NORMAL ;
}
/**
* Constructor used by `Recv_init'.
*/
protected Prequest(Object buf, int offset, int count, Datatype type,
int source, int tag, Comm comm) {
opTag = Request.OP_RECV ;
this.buf = buf;
this.offset = offset;
this.count = count;
this.type = type;
this.src = source;
this.tag = tag;
this.comm = comm;
if(type.isObject()) {
typeTag = Request.TYPE_OBJECT ;
length_buf = new int [2] ;
}
else
typeTag = Request.TYPE_NORMAL ;
}
/**
* Activate a persistent communication request.
* Java binding of the MPI operation <tt>MPI_START</tt>.
* The communication is completed by using the request in
* one of the <tt>wait</tt> or <tt>test</tt> operations.
* On successful completion the request becomes inactive again.
* It can be reactivated by a further call to <tt>Start</tt>.
*/
public void Start() throws MPIException {
switch(typeTag) {
case TYPE_NORMAL :
switch(opTag) {
case OP_SEND :
switch(mode) {
case MODE_STANDARD :
comm.Isend(buf, offset, count, type, dest, tag, this);
break;
case MODE_BUFFERED :
comm.Ibsend(buf, offset, count, type, dest, tag, this);
break;
case MODE_SYNCHRONOUS :
comm.Issend(buf, offset, count, type, dest, tag, this);
break;
case MODE_READY :
comm.Irsend(buf, offset, count, type, dest, tag, this);
break;
}
break ;
case OP_RECV :
comm.Irecv(buf, offset, count, type, src, tag, this) ;
break ;
}
break ;
case TYPE_OBJECT :
switch(opTag) {
case OP_SEND :
byte [] byte_buf = comm.Object_Serialize(buf,offset,count,type);
length_buf[0] = byte_buf.length;
length_buf[1] = count ;
switch(mode) {
case MODE_STANDARD :
comm.Isend(length_buf, 0, 2, MPI.INT, dest, tag, hdrReq) ;
comm.Isend(byte_buf, 0, byte_buf.length,
MPI.BYTE, dest, tag, this);
break;
case MODE_BUFFERED :
comm.Ibsend(length_buf, 0, 2, MPI.INT, dest, tag, hdrReq) ;
comm.Ibsend(byte_buf, 0, byte_buf.length,
MPI.BYTE, dest, tag, this);
break;
case MODE_SYNCHRONOUS :
comm.Issend(length_buf, 0, 2, MPI.INT, dest, tag, hdrReq) ;
comm.Isend(byte_buf, 0, byte_buf.length,
MPI.BYTE, dest, tag, this);
break;
case MODE_READY :
comm.Irsend(length_buf, 0, 2, MPI.INT, dest, tag, hdrReq) ;
comm.Isend(byte_buf, 0, byte_buf.length,
MPI.BYTE, dest, tag, this);
break;
}
break ;
case OP_RECV :
comm.Irecv(length_buf, 0, 2, MPI.INT, src, tag, this) ;
break ;
}
break ;
}
}
//private native void start();
/**
* Activate a list of communication requests.
* <p>
* <table>
* <tr><td><tt> array_of_requests </tt></td><td> array of requests </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_STARTALL</tt>.
*/
public static void Startall(Prequest [] array_of_request)
throws MPIException {
int req_length = array_of_request.length ;
for (int i = 0; i<req_length; i++)
array_of_request[i].Start() ;
}
//private static native void startall(Prequest [] array_of_request);
/**
* Constructor used by {@code sendInit}, etc.
*/
protected Prequest(long handle)
{
super(handle);
} }
/**
* Activate a persistent communication request.
* <p>Java binding of the MPI operation {@code MPI_START}.
* The communication is completed by using the request in
* one of the {@code wait} or {@code test} operations.
* On successful completion the request becomes inactive again.
* It can be reactivated by a further call to {@code Start}.
*/
public void start() throws MPIException
{
start_jni();
}
private native void start_jni() throws MPIException;
/**
* Activate a list of communication requests.
* <p>Java binding of the MPI operation {@code MPI_STARTALL}.
* @param requests array of requests
* @throws MPIException
*/
public static void startAll(Prequest[] requests) throws MPIException
{
MPI.check();
startAll_jni(requests);
}
private native static void startAll_jni(Prequest[] requests)
throws MPIException;
/**
* Set the request object to be void.
* Java binding of the MPI operation {@code MPI_REQUEST_FREE}.
*/
@Override public void free() throws MPIException
{
buf = null;
super.free();
}
} // Prequest

Просмотреть файл

@ -11,7 +11,7 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* * File : Request.java /* File : Request.java
* Author : Sang Lim, Xinying Li, Bryan Carpenter * Author : Sang Lim, Xinying Li, Bryan Carpenter
* Created : Thu Apr 9 12:22:15 1998 * Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.11 $ * Revision : $Revision: 1.11 $
@ -39,390 +39,359 @@
package mpi; package mpi;
public class Request { /**
protected final static int NULL = 0; * Request object.
*/
protected final static int TYPE_NORMAL = 0; public class Request implements Freeable
protected final static int TYPE_OBJECT = 1; {
protected long handle;
protected final static int OP_SEND = 0;
protected final static int OP_RECV = 1; static
{
protected Request hdrReq ;
protected int typeTag = TYPE_NORMAL ;
protected int opTag ;
protected int mode ;
protected Object buf;
protected int offset;
protected int count;
protected Datatype type;
protected int dest;
protected int tag;
protected Comm comm;
protected int[] length_buf;
private static native void init();
private native void GetReq(int Type);
protected Request() {}
protected Request(int Type) { GetReq(Type); }
/**
* Constructor used by <tt>Isend</tt>, etc.
*/
protected Request(Request hdrReq) {
typeTag = Request.TYPE_OBJECT ;
opTag = Request.OP_SEND ;
this.hdrReq = hdrReq ;
}
/**
* Constructor used by <tt>Irecv</tt>.
*/
protected Request(Object buf, int offset, int count, Datatype type,
int tag, Comm comm,
int [] length_buf) {
typeTag = Request.TYPE_OBJECT ;
opTag = Request.OP_RECV ;
this.buf = buf;
this.offset = offset;
this.count = count;
this.type = type;
this.tag = tag;
this.comm = comm;
this.length_buf = length_buf;
}
/**
* Set the request object to be void.
* Java binding of the MPI operation <tt>MPI_REQUEST_FREE</tt>.
*/
public native void Free() throws MPIException ;
/**
* Mark a pending nonblocking communication for cancellation.
* Java binding of the MPI operation <tt>MPI_CANCEL</tt>.
*/
public native void Cancel() throws MPIException ;
/**
* Test if request object is void.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> true if the request object is void,
* false otherwise </tr>
* </table>
*/
public native boolean Is_null();
/*
* After initial wait succeeds with some status, complete as necessary.
*/
private Status complete(Status status) throws MPIException {
switch(typeTag) {
case TYPE_NORMAL :
break;
case TYPE_OBJECT :
switch(opTag) {
case OP_SEND :
hdrReq.Wait(new Status()) ; // Data has already gone, but must
// still do `wait' on header send.
break;
case OP_RECV :
int index = status.index ;
// Header has arrived, now read the actual data.
byte[] byte_buf = new byte[length_buf[0]];
status = comm.Recv(byte_buf, 0, length_buf[0], MPI.BYTE,
status.source, tag) ;
comm.Object_Deserialize(buf, byte_buf, offset, length_buf[1],
type);
status.object_count = length_buf[1];
status.index = index ;
break;
}
break ;
}
return status ;
}
/**
* Blocks until the operation identified by the request is complete.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> status object </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_WAIT</tt>.
* <p>
* After the call returns, the request object becomes inactive.
*/
public Status Wait() throws MPIException {
Status result = new Status();
Wait(result);
return complete(result) ;
}
private native Status Wait(Status stat);
/**
* Returns a status object if the operation identified by the request
* is complete, or a null reference otherwise.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> status object or null reference </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_TEST</tt>.
* <p>
* After the call, if the operation is complete (ie, if the return value
* is non-null), the request object becomes inactive.
*/
public Status Test() throws MPIException {
Status result = new Status();
if (Test(result) == null)
return null;
else
return complete(result) ;
}
private native Status Test(Status stat);
/**
* Blocks until one of the operations associated with the active
* requests in the array has completed.
* <p>
* <table>
* <tr><td><tt> array_of_requests </tt></td><td> array of requests </tr>
* <tr><td><em> returns: </em></td><td> status object </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_WAITANY</tt>.
* <p>
* The index in <tt>array_of_requests</tt> for the request that completed
* can be obtained from the returned status object through the
* <tt>Status.index</tt> field. The corresponding element
* of <tt>array_of_requests</tt> becomes inactive.
*/
public static Status Waitany(Request [] array_of_request)
throws MPIException {
Status result = new Status();
Waitany(array_of_request, result);
if(result == null)
return null;
else
return array_of_request[result.index].complete(result) ;
}
private static native Status Waitany(Request [] array_of_request,
Status stat);
/**
* Tests for completion of either one or none of the operations associated
* with active requests.
* <p>
* <table>
* <tr><td><tt> array_of_requests </tt></td><td> array of requests </tr>
* <tr><td><em> returns: </em></td><td> status object or
* null reference </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_TESTANY</tt>.
* <p>
* If some request completed, the index in <tt>array_of_requests</tt>
* for that request can be obtained from the returned status object
* through the. The corresponding element of <tt>array_of_requests</tt>
* becomes inactive.
* If no request completed, <tt>Testany</tt> returns a null reference.
*/
public static Status Testany(Request [] array_of_request)
throws MPIException {
Status result = new Status();
result = Testany(array_of_request, result);
if(result == null)
return null;
else
return array_of_request[result.index].complete(result) ;
}
private static native Status Testany(Request [] array_of_request,
Status stat);
/**
* Blocks until all of the operations associated with the active
* requests in the array have completed.
* <p>
* <table>
* <tr><td><tt> array_of_requests </tt></td><td> array of requests </tr>
* <tr><td><em> returns: </em></td><td> array of status objects </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_WAITALL</tt>.
* <p>
* The result array will be the same size as <tt>array_of_requests</tt>.
* On exit, requests become inactive. If the <em>input</em> value of
* <tt>arrayOfRequests</tt> contains inactive requests, corresponding
* elements of the result array will contain null status references.
*/
public static Status[] Waitall (Request [] array_of_request)
throws MPIException {
Status result[] = waitall(array_of_request);
for (int i = 0 ; i < array_of_request.length ; i++)
result [i] = array_of_request [i].complete(result [i]) ;
return result;
}
private static native Status[] waitall(Request [] array_of_request);
/**
* Tests for completion of <em>all</em> of the operations associated
* with active requests.
* <p>
* <table>
* <tr><td><tt> array_of_requests </tt></td><td> array of requests </tr>
* <tr><td><em> returns: </em></td><td> array of status objects </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_TESTALL</tt>.
* <p>
* If all operations have completed, the exit value of the argument array
* and the result array are as for <tt>Waitall</tt>. If any
* operation has not completed, the result value is null and no
* element of the argument array is modified.
*/
public static Status[] Testall(Request [] array_of_request)
throws MPIException {
Status result[] = testall(array_of_request);
if (result == null)
return null;
else {
for (int i = 0 ; i < array_of_request.length ; i++)
result [i] = array_of_request [i].complete(result [i]) ;
return result;
}
}
private static native Status[] testall(Request [] array_of_request);
/**
* Blocks until at least one of the operations associated with the active
* requests in the array has completed.
* <p>
* <table>
* <tr><td><tt> array_of_requests </tt></td><td> array of requests </tr>
* <tr><td><em> returns: </em></td><td> array of status objects </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_WAITSOME</tt>.
* <p>
* The size of the result array will be the number of operations that
* completed. The index in <tt>array_of_requests</tt> for each request that
* completed can be obtained from the returned status objects through the
* <tt>Status.index</tt> field. The corresponding element in
* <tt>array_of_requests</tt> becomes inactive.
*/
public static Status[] Waitsome(Request [] array_of_request)
throws MPIException {
Status result[] = waitsome(array_of_request);
for (int i = 0 ; i < result.length ; i++)
result [i] = array_of_request [result [i].index].complete(result [i]) ;
return result;
}
private static native Status[] waitsome(Request [] array_of_request);
/**
* Behaves like <tt>Waitsome</tt>, except that it returns immediately.
* <p>
* <table>
* <tr><td><tt> array_of_requests </tt></td><td> array of requests </tr>
* <tr><td><em> returns: </em></td><td> array of status objects </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_TESTSOME</tt>.
* <p>
* If no operation has completed, <tt>TestSome</tt> returns an array of
* length zero and elements of <tt>array_of_requests</tt> are unchanged.
* Otherwise, arguments and return value are as for <tt>Waitsome</tt>.
*/
public static Status[] Testsome(Request [] array_of_request)
throws MPIException {
Status result[] = testsome(array_of_request);
if (result == null)
return null;
else {
for (int i = 0 ; i < result.length ; i++)
result [i] = array_of_request [result [i].index].complete(result [i]) ;
return result;
}
}
private static native Status[] testsome(Request [] array_of_request);
// Fields manipulated only by native methods...
protected long handle;
// `bufSave', etc, not generally the same as `buf', etc.
// In `MPJ.OBJECT' receive case `buf', etc, refer to the array of objects,
// `bufSave', etc refer to header buffer.
protected Object bufSave ;
protected int countSave, offsetSave ;
protected long bufbaseSave, bufptrSave ;
protected int baseTypeSave ;
protected long commSave, typeSave ;
static {
init(); init();
}
} }
// Things to do private static native void init();
//
// What happens to `Cancel' in the object case?
protected static native long getNull();
protected Request(long handle)
{
this.handle = handle;
}
/**
* Set the request object to be void.
* Java binding of the MPI operation {@code MPI_REQUEST_FREE}.
*/
@Override public void free() throws MPIException
{
MPI.check();
handle = free(handle);
}
private native long free(long req) throws MPIException;
/**
* Mark a pending nonblocking communication for cancellation.
* Java binding of the MPI operation {@code MPI_CANCEL}.
*/
public final void cancel() throws MPIException
{
MPI.check();
cancel_jni();
}
private native void cancel_jni() throws MPIException;
/**
* Test if request object is null.
* @return true if the request object is null, false otherwise
*/
public final native boolean isNull();
/**
* Blocks until the operation identified by the request is complete.
* <p>Java binding of the MPI operation {@code MPI_WAIT}.
* <p>After the call returns, the request object becomes inactive.
* @return status object
* @throws MPIException
*/
public final Status waitStatus() throws MPIException
{
MPI.check();
Status stat = new Status();
waitStatus(stat);
return stat;
}
private native void waitStatus(Status stat) throws MPIException;
/**
* Blocks until the operation identified by the request is complete.
* <p>Java binding of the MPI operation {@code MPI_WAIT}.
* <p>After the call returns, the request object becomes inactive.
* @throws MPIException
*/
public final void waitFor() throws MPIException
{
MPI.check();
waitNoStatus();
}
private native void waitNoStatus() throws MPIException;
/**
* Returns a status object if the operation identified by the request
* is complete, or a null reference otherwise.
* <p>Java binding of the MPI operation {@code MPI_TEST}.
* <p>After the call, if the operation is complete (ie, if the return
* value is non-null), the request object becomes inactive.
* @return status object
* @throws MPIException
*/
public final Status testStatus() throws MPIException
{
MPI.check();
return testStatus_jni();
}
private native Status testStatus_jni() throws MPIException;
/**
* Returns true if the operation identified by the request
* is complete, or false otherwise.
* <p>Java binding of the MPI operation {@code MPI_TEST}.
* <p>After the call, if the operation is complete (ie, if the return
* value is true), the request object becomes inactive.
* @return true if the operation identified by the request, false otherwise
* @throws MPIException
*/
public final boolean test() throws MPIException
{
MPI.check();
return testNoStatus();
}
private native boolean testNoStatus() throws MPIException;
/**
* Blocks until one of the operations associated with the active
* requests in the array has completed.
* <p>Java binding of the MPI operation {@code MPI_WAITANY}.
* <p>The index in array of {@code requests} for the request that
* completed can be obtained from the returned status object through
* the {@code Status.getIndex()} method. The corresponding element
* of array of {@code requests} becomes inactive.
* @param requests array of requests
* @return status object
* @throws MPIException
*/
public static Status waitAnyStatus(Request[] requests) throws MPIException
{
MPI.check();
Status stat = new Status();
waitAnyStatus(requests, stat);
return stat;
}
private static native void waitAnyStatus(Request[] requests, Status stat)
throws MPIException;
/**
* Blocks until one of the operations associated with the active
* requests in the array has completed.
* <p>Java binding of the MPI operation {@code MPI_WAITANY}.
* <p>The request that completed becomes inactive.
* @param requests array of requests
* @return The index in array of {@code requests} for the request that
* completed. If all of the requests are MPI_REQUEST_NULL, then index
* is returned as {@code MPI.UNDEFINED}.
* @throws MPIException
*/
public static int waitAny(Request[] requests) throws MPIException
{
MPI.check();
return waitAnyNoStatus(requests);
}
private static native int waitAnyNoStatus(Request[] requests)
throws MPIException;
/**
* Tests for completion of either one or none of the operations
* associated with active requests.
* <p>Java binding of the MPI operation {@code MPI_TESTANY}.
* <p>If some request completed, the index in array of {@code requests}
* for that request can be obtained from the returned status object.
* The corresponding element in array of {@code requests} becomes inactive.
* If no request completed, {testAny} returns {@code null}.
* @param requests array of requests
* @return status object if one request completed, {@code null} otherwise.
* @throws MPIException
*/
public static Status testAnyStatus(Request[] requests) throws MPIException
{
MPI.check();
return testAnyStatus_jni(requests);
}
private static native Status testAnyStatus_jni(Request[] requests)
throws MPIException;
/**
* Tests for completion of either one or none of the operations
* associated with active requests.
* <p>Java binding of the MPI operation {@code MPI_TESTANY}.
* <p>If some request completed, ii becomes inactive.
* @param requests array of requests
* @return index of operation that completed, or {@code MPI.UNDEFINED}
* if none completed.
* @throws MPIException
*/
public static int testAny(Request[] requests) throws MPIException
{
MPI.check();
return testAnyNoStatus(requests);
}
private static native int testAnyNoStatus(Request[] requests)
throws MPIException;
/**
* Blocks until all of the operations associated with the active
* requests in the array have completed.
* <p>Java binding of the MPI operation {@code MPI_WAITALL}.
* <p>On exit, requests become inactive. If the <em>input</em> value of
* array of {@code requests} contains inactive requests, corresponding
* elements of the status array will contain null status references.
* @param requests array of requests
* @return array of statuses
* @throws MPIException
*/
public static Status[] waitAllStatus(Request[] requests) throws MPIException
{
MPI.check();
Status[] statuses = new Status[requests.length];
waitAllStatus(requests, statuses);
return statuses;
}
private static native void waitAllStatus(Request[] requests, Status[] statuses)
throws MPIException;
/**
* Blocks until all of the operations associated with the active
* requests in the array have completed.
* <p>Java binding of the MPI operation {@code MPI_WAITALL}.
* @param requests array of requests
* @throws MPIException
*/
public static void waitAll(Request[] requests) throws MPIException
{
MPI.check();
waitAllNoStatus(requests);
}
private static native void waitAllNoStatus(Request[] requests)
throws MPIException;
/**
* Tests for completion of <em>all</em> of the operations associated
* with active requests.
* <p>Java binding of the MPI operation {@code MPI_TESTALL}.
* <p>If all operations have completed, the exit value of the argument array
* is as for {@code waitAllStatus}.
* @param requests array of requests
* @return array of statuses if all operations have completed,
* {@code null} otherwise.
* @throws MPIException
*/
public static Status[] testAllStatus(Request[] requests) throws MPIException
{
MPI.check();
return testAllStatus_jni(requests);
}
private static native Status[] testAllStatus_jni(Request[] requests)
throws MPIException;
/**
* Tests for completion of <em>all</em> of the operations associated
* with active requests.
* <p>Java binding of the MPI operation {@code MPI_TESTALL}.
* @param requests array of requests
* @return {@code true} if all operations have completed,
* {@code false} otherwise.
* @throws MPIException
*/
public static boolean testAll(Request[] requests) throws MPIException
{
MPI.check();
return testAllNoStatus(requests);
}
private static native boolean testAllNoStatus(Request[] requests)
throws MPIException;
/**
* Blocks until at least one of the operations associated with the active
* requests in the array has completed.
* <p>Java binding of the MPI operation {@code MPI_WAITSOME}.
* <p>The size of the result array will be the number of operations that
* completed. The index in array of {@code requests} for each request that
* completed can be obtained from the returned status objects through the
* {@code Status.getIndex()} method. The corresponding element in
* array of {@code requests} becomes inactive.
* @param requests array of requests
* @return array of statuses or {@code null} if the number of operations
* completed is {@code MPI_UNDEFINED}.
* @throws MPIException
*/
public static Status[] waitSomeStatus(Request[] requests) throws MPIException
{
MPI.check();
return waitSomeStatus_jni(requests);
}
private static native Status[] waitSomeStatus_jni(Request[] requests)
throws MPIException;
/**
* Blocks until at least one of the operations associated with the active
* active requests in the array has completed.
* <p>Java binding of the MPI operation {@code MPI_WAITSOME}.
* <p>The size of the result array will be the number of operations that
* completed. The corresponding element in array of {@code requests} becomes
* inactive.
* @param requests array of requests
* @return array of indexes of {@code requests} that completed or {@code null}
* if the number of operations completed is {@code MPI_UNDEFINED}.
* @throws MPIException
*/
public static int[] waitSome(Request[] requests) throws MPIException
{
MPI.check();
return waitSomeNoStatus(requests);
}
private static native int[] waitSomeNoStatus(Request[] requests)
throws MPIException;
/**
* Behaves like {@code waitSome}, except that it returns immediately.
* <p>Java binding of the MPI operation {@code MPI_TESTSOME}.
* <p>If no operation has completed, {@code testSome} returns an array of
* length zero, otherwise the return value are as for {@code waitSome}.
* @param requests array of requests
* @return array of statuses
* @throws MPIException
*/
public static Status[] testSomeStatus(Request[] requests) throws MPIException
{
MPI.check();
return testSomeStatus_jni(requests);
}
private static native Status[] testSomeStatus_jni(Request[] requests)
throws MPIException;
/**
* Behaves like {@code waitSome}, except that it returns immediately.
* <p>Java binding of the MPI operation {@code MPI_TESTSOME}.
* <p>If no operation has completed, {@code testSome} returns an array of
* length zero, otherwise the return value are as for {@code waitSome}.
* @param requests array of requests
* @return array of indexes of {@code requests} that completed.
* @throws MPIException
*/
public static int[] testSome(Request[] requests) throws MPIException
{
MPI.check();
return testSomeNoStatus(requests);
}
private static native int[] testSomeNoStatus(Request[] requests)
throws MPIException;
} // Request

Просмотреть файл

@ -23,7 +23,36 @@
package mpi; package mpi;
public class ShiftParms { /**
public int rank_source; * Source and destination ranks for "shift" communication.
public int rank_dest; */
public final class ShiftParms
{
private final int rankSource;
private final int rankDest;
protected ShiftParms(int rankSource, int rankDest)
{
this.rankSource = rankSource;
this.rankDest = rankDest;
} }
/**
* Gets the source rank.
* @return source rank
*/
public int getRankSource()
{
return rankSource;
}
/**
* Gets the destination rank.
* @return destination rank
*/
public int getRankDest()
{
return rankDest;
}
} // ShiftParms

111
ompi/mpi/java/java/ShortInt.java Обычный файл
Просмотреть файл

@ -0,0 +1,111 @@
package mpi;
/**
* Java binding of the MPI data type {@code MPI_SHORT_INT}.
*/
public final class ShortInt extends Struct
{
private final int sSize, iOff, iSize;
/** The struct will be created only in MPI class. */
protected ShortInt(int shortSize, int intOff, int intSize)
{
sSize = shortSize;
iSize = intSize;
int sOff;
switch(sSize)
{
case 2: sOff = addShort(); break;
case 4: sOff = addInt(); break;
case 8: sOff = addLong(); break;
default: throw new AssertionError("Unsupported short size: "+ sSize);
}
assert sOff == 0;
setOffset(intOff);
switch(iSize)
{
case 4: iOff = addInt(); break;
case 8: iOff = addLong(); break;
default: throw new AssertionError("Unsupported int size: "+ iSize);
}
assert(intOff == iOff);
}
/**
* Creates a Data object.
* @return new Data object.
*/
@Override protected Data newData()
{
return new Data();
}
/**
* Class for reading/writing data in a struct stored in a byte buffer.
*/
public final class Data extends Struct.Data
{
/**
* Gets the short value.
* @return short value
*/
public short getValue()
{
switch(sSize)
{
case 2: return getShort(0);
case 4: return (short)getInt(0);
case 8: return (short)getLong(0);
default: throw new AssertionError();
}
}
/**
* Gets the int value.
* @return int value
*/
public int getIndex()
{
switch(iSize)
{
case 4: return getInt(iOff);
case 8: return (int)getLong(iOff);
default: throw new AssertionError();
}
}
/**
* Puts the short value.
* @param v short value
*/
public void putValue(short v)
{
switch(sSize)
{
case 2: putShort(0, v); break;
case 4: putInt(0, v); break;
case 8: putLong(0, v); break;
default: throw new AssertionError();
}
}
/**
* Puts the int value.
* @param v int value
*/
public void putIndex(int v)
{
switch(iSize)
{
case 4: putInt(iOff, v); break;
case 8: putLong(iOff, v); break;
default: throw new AssertionError();
}
}
} // Data
} // ShortInt

Просмотреть файл

@ -23,99 +23,113 @@
package mpi; package mpi;
public class Status extends Freeable { /**
* This class represents {@code MPI_Status}.
*/
public final class Status
{
private int source;
private int tag;
private int error;
private int index;
private int elements;
private int _cancelled;
private long _ucount;
public int index; static
public int source; {
public int tag;
int elements;
//protected int count;
protected int object_count;
// protected Status(long _handle) { handle = _handle;}
public Status() {alloc() ;}
private native void alloc() ;
@SuppressWarnings("unchecked")
public void finalize() throws MPIException {
synchronized(MPI.class) {
MPI.freeList.addFirst(this) ;
}
}
native void free() ;
/**
* Get the number of received entries.
* <p>
* <table>
* <tr><td><tt> datatype </tt></td><td> datatype of each item in receive
* buffer </tr>
* <tr><td><em> returns: </em></td><td> number of received entries </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GET_COUNT</tt>.
*/
public int Get_count(Datatype datatype) throws MPIException {
if (datatype.isObject())
return object_count; // Is this correct?
else
return get_count(datatype);
}
private native int get_count(Datatype datatype);
/**
* Test if communication was cancelled.
* <p>
* <table>
* <tr><td><em> returns: </em></td><td> true if the operation was
* succesfully cancelled,
* false otherwise
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_TEST_CANCELLED</tt>.
*/
public native boolean Test_cancelled() throws MPIException ;
/**
* Retrieve number of basic elements from status.
* <p>
* <table>
* <tr><td><tt> datatype </tt></td><td> datatype used by receive
* operation </tr>
* <tr><td><em> returns: </em></td><td> number of received basic
* elements </tr>
* </table>
* <p>
* Java binding of the MPI operation <tt>MPI_GET_ELEMENTS</tt>.
*/
public int Get_elements(Datatype datatype) throws MPIException {
if(datatype.isObject())
return MPI.UNDEFINED; // Is this correct?
else
return get_elements(datatype) ;
}
private native int get_elements(Datatype datatype);
private static native void init();
protected long handle;
static {
init(); init();
}
} }
// Things to do /**
// * Status objects must be created only by the MPI methods.
*/
protected Status()
{
}
private static native void init();
/**
* Returns the number of received entries.
* <p>Java binding of the MPI operation {@code MPI_GET_COUNT}.
* @param datatype datatype of each item in receive buffer
* @return number of received entries
* @throws MPIException
*/
public int getCount(Datatype datatype) throws MPIException
{
MPI.check();
return getCount_jni(datatype);
}
private native int getCount_jni(Datatype datatype) throws MPIException;
/**
* Tests if the communication was cancelled.
* <p>Java binding of the MPI operation {@code MPI_TEST_CANCELLED}.
* @return true if the operation was succesfully cancelled, false otherwise
* @throws MPIException
*/
public boolean isCancelled() throws MPIException
{
MPI.check();
return isCancelled_jni();
}
private native boolean isCancelled_jni() throws MPIException;
/**
* Retrieves the number of basic elements from status.
* <p>Java binding of the MPI operation {@code MPI_GET_ELEMENTS}.
* @param datatype datatype used by receive operation
* @return number of received basic elements
* @throws MPIException
*/
public int getElements(Datatype datatype) throws MPIException
{
MPI.check();
return getElements_jni(datatype);
}
private native int getElements_jni(Datatype datatype) throws MPIException;
/**
* Returns the "source" of message.
* <p>Java binding of the MPI value {@code MPI_SOURCE}.
* @return source of message
*/
public int getSource()
{
return source;
}
/**
* Returns the "tag" of message.
* <p>Java binding of the MPI value {@code MPI_TAG}.
* @return tag of message
*/
public int getTag()
{
return tag;
}
/**
* Returns the {@code MPI_ERROR} of message.
* @return error of message.
*/
public int getError()
{
return error;
}
/**
* Returns the index of message.
* @return index of message.
*/
public int getIndex()
{
return index;
}
} // Status

776
ompi/mpi/java/java/Struct.java Обычный файл
Просмотреть файл

@ -0,0 +1,776 @@
package mpi;
import java.nio.*;
import java.util.*;
/**
* Base class for defining struct data types.
*/
public abstract class Struct
{
private int extent;
private ArrayList<Field> fields = new ArrayList<Field>();
private Datatype datatype, types[];
private int offsets[], lengths[];
private static final String typeMismatch = "Type mismatch";
private void commit() throws MPIException
{
if(datatype == null)
createStruct();
}
private void createStruct() throws MPIException
{
int count = fields.size();
types = new Datatype[count];
offsets = new int[count];
lengths = new int[count];
for(int i = 0; i < count; i++)
{
Field f = fields.get(i);
types[i] = f.type instanceof Struct ? ((Struct)f.type).datatype
: (Datatype)f.type;
offsets[i] = f.offset;
lengths[i] = f.length;
}
datatype = Datatype.createStruct(lengths, offsets, types);
datatype.commit();
extent = datatype.getExtent();
}
/**
* Returns the extent of the struct data type.
* @return Extent of the struct data type.
* @throws MPIException
*/
public final int getExtent() throws MPIException
{
commit();
return extent;
}
/**
* Returns the data type of the struct.
* @return The data type of the struct.
* @throws MPIException
*/
public final Datatype getType() throws MPIException
{
commit();
return datatype;
}
/**
* Creates a Data object.
* @return New Data object.
*/
protected abstract Data newData();
@SuppressWarnings("unchecked")
private <T extends Data> T newData(ByteBuffer buffer, int offset)
{
Data d = newData();
d.buffer = buffer;
d.offset = offset;
return (T)d;
}
/**
* Gets a Data object in order to access to the buffer.
* @param buffer the Data object will read/write on this buffer.
* @return Data object
* @throws MPIException
*/
public final <T extends Data> T getData(ByteBuffer buffer) throws MPIException
{
commit();
return newData(buffer, 0);
}
/**
* Gets a Data object in order to access to the struct at the
* specified position of a struct array stored in a Buffer.
* @param buffer The Data object will read/write on this buffer.
* @param index Index of the struct in the buffer.
* @return Data object.
* @throws MPIException
*/
public final <T extends Data> T getData(ByteBuffer buffer, int index)
throws MPIException
{
commit();
return newData(buffer, index * extent);
}
/**
* Gets a Data object in order to access to the byte array.
* @param array The Data object will read/write on this byte array.
* @return Data object.
* @throws MPIException
*/
public final <T extends Data> T getData(byte[] array) throws MPIException
{
ByteBuffer buffer = ByteBuffer.wrap(array);
buffer.order(ByteOrder.nativeOrder());
return getData(buffer);
}
/**
* Gets a Data object in order to access to the struct at the
* specified position of a struct array stored in a byte array.
* @param array The Data object will read/write on this byte array.
* @param index Index of the struct in the array.
* @return Data object.
* @throws MPIException
*/
public final <T extends Data> T getData(byte[] array, int index)
throws MPIException
{
ByteBuffer buffer = ByteBuffer.wrap(array);
buffer.order(ByteOrder.nativeOrder());
return getData(buffer, index);
}
private int addField(Object type, int typeExtent, int length)
{
if(datatype != null)
throw new AssertionError("The struct data type was committed.");
int offset = extent;
extent += typeExtent * length;
fields.add(new Field(type, offset, length));
return offset;
}
/**
* Sets the offset of the next field.
* <p>The offset must be greater or equal to the accumulated extent.
* @param offset offset of the next field
* @return this object in order to allow adding fields in a chained expression
*/
public final Struct setOffset(int offset)
{
if(datatype != null)
throw new AssertionError("The struct data type was committed.");
if(offset < extent)
{
throw new IllegalArgumentException(
"The offset must be greater or equal to the accumulated extent.");
}
extent = offset;
return this;
}
/**
* Adds a byte field to this struct.
* @return Offset of the new field.
*/
public final int addByte()
{
return addByte(1);
}
/**
* Adds a byte array to this struct.
* @param length Length of the array.
* @return Offset of the new field.
*/
public final int addByte(int length)
{
return addField(MPI.BYTE, 1, length);
}
/**
* Adds a char field to this struct.
* @return Offset of the new field.
*/
public final int addChar()
{
return addChar(1);
}
/**
* Adds a char array to this struct.
* @param length Length of the array.
* @return Offset of the new field.
*/
public final int addChar(int length)
{
return addField(MPI.CHAR, 2, length);
}
/**
* Adds a short field to this struct.
* @return Offset of the new field.
*/
public final int addShort()
{
return addShort(1);
}
/**
* Adds a short array to this struct.
* @param length Length of the array.
* @return Offset of the new field.
*/
public final int addShort(int length)
{
return addField(MPI.SHORT, 2, length);
}
/**
* Adds an int field to this struct.
* @return Offset of the new field.
*/
public final int addInt()
{
return addInt(1);
}
/**
* Adds an int array to this struct.
* @param length Length of the array.
* @return Offset of the new field.
*/
public final int addInt(int length)
{
return addField(MPI.INT, 4, length);
}
/**
* Adds a long field to this struct.
* @return Offset of the new field.
*/
public final int addLong()
{
return addLong(1);
}
/**
* Adds a long array to this struct.
* @param length Length of the array.
* @return Offset of the new field.
*/
public final int addLong(int length)
{
return addField(MPI.LONG, 8, length);
}
/**
* Adds a float field to this struct.
* @return Offset of the new field.
*/
public final int addFloat()
{
return addFloat(1);
}
/**
* Adds a float array to this struct.
* @param length Length of the array.
* @return Offset of the new field.
*/
public final int addFloat(int length)
{
return addField(MPI.FLOAT, 4, length);
}
/**
* Adds a double field to this struct.
* @return Offset of the new field.
*/
public final int addDouble()
{
return addDouble(1);
}
/**
* Adds a double array to this struct.
* @param length Length of the array.
* @return Offset of the new field.
*/
public final int addDouble(int length)
{
return addField(MPI.DOUBLE, 8, length);
}
/**
* Adds a struct field to this struct.
* @param struct Type of the field.
* @return Offset of the new field.
* @throws MPIException
*/
public final int addStruct(Struct struct) throws MPIException
{
return addStruct(struct, 1);
}
/**
* Adds an array of structs to this struct.
* @param struct Type of the array.
* @param length Length of the array.
* @return Offset of the new field.
* @throws MPIException
*/
public final int addStruct(Struct struct, int length) throws MPIException
{
struct.commit();
return addField(struct, struct.extent, length);
}
/**
* Adds a field of the specified data type.
* @param type Data type.
* @return Offset of the new field.
* @throws MPIException
*/
public final int addData(Datatype type) throws MPIException
{
return addData(type, 1);
}
/**
* Adds an array of the specified data type.
* @param type Data type.
* @param length Length of the array.
* @return Offset of the new field.
* @throws MPIException
*/
public final int addData(Datatype type, int length) throws MPIException
{
return addField(type, type.getExtent() * type.baseSize, length);
}
private boolean validType(int fieldOffset, int index, Datatype type)
{
int i = Arrays.binarySearch(offsets, fieldOffset);
return index >= 0 && index < lengths[i] && type == types[i];
}
private static class Field
{
private Object type;
private int offset, length;
private Field(Object type, int offset, int length)
{
this.type = type;
this.offset = offset;
this.length = length;
}
} // Field
/**
* Base class for reading/writing data in a struct stored in a byte buffer.
*/
public abstract class Data
{
private ByteBuffer buffer;
private int offset;
/**
* Gets the buffer where this struct data is stored.
* <p>The buffer can be used in {@code send}/{@code recv} operations.
* @return Buffer where the struct data is stored.
*/
public final ByteBuffer getBuffer()
{
return offset == 0 ? buffer : MPI.slice(buffer, offset);
}
/**
* Gets the byte value of a field.
* @param field Offset of the field.
* @return Byte value.
*/
protected final byte getByte(int field)
{
assert validType(field, 0, MPI.BYTE) : typeMismatch;
return buffer.get(offset + field);
}
/**
* Gets the byte value at the specified position of a byte array.
* @param field Offset of the byte array.
* @param index Index of the byte in the array.
* @return Byte value.
*/
protected final byte getByte(int field, int index)
{
assert validType(field, index, MPI.BYTE) : typeMismatch;
return buffer.get(offset + field + index);
}
/**
* Puts a byte value in a field.
* @param field Offset of the field.
* @param v Byte value.
*/
protected final void putByte(int field, byte v)
{
assert validType(field, 0, MPI.BYTE) : typeMismatch;
buffer.put(offset + field, v);
}
/**
* Puts a byte value at the specified position of a byte array.
* @param field Offset of the byte array.
* @param index Index of the byte in the array.
* @param v Byte value.
*/
protected final void putByte(int field, int index, byte v)
{
assert validType(field, index, MPI.BYTE) : typeMismatch;
buffer.put(offset + field + index, v);
}
/**
* Gets the char value of a field.
* @param field Offset of the field.
* @return Char value.
*/
protected final char getChar(int field)
{
assert validType(field, 0, MPI.CHAR) : typeMismatch;
return buffer.getChar(offset + field);
}
/**
* Gets the char value at the specified position of a char array.
* @param field Offset of the char array.
* @param index Index of the char in the array.
* @return Char value.
*/
protected final char getChar(int field, int index)
{
assert validType(field, index, MPI.CHAR) : typeMismatch;
return buffer.getChar(offset + field + index * 2);
}
/**
* Puts a char value in a field.
* @param field Offset of the field.
* @param v Char value.
*/
protected final void putChar(int field, char v)
{
assert validType(field, 0, MPI.CHAR) : typeMismatch;
buffer.putChar(offset + field, v);
}
/**
* Puts a char value at the specified position of a char array.
* @param field Offset of the char array.
* @param index Index of the char in the array.
* @param v Char value.
*/
protected final void putChar(int field, int index, char v)
{
assert validType(field, index, MPI.CHAR) : typeMismatch;
buffer.putChar(offset + field + index * 2, v);
}
/**
* Gets the short value of a field.
* @param field Offset of the field.
* @return Short value.
*/
protected final short getShort(int field)
{
assert validType(field, 0, MPI.SHORT) : typeMismatch;
return buffer.getShort(offset + field);
}
/**
* Gets the short value at the specified position of a short array.
* @param field Offset of the short array.
* @param index Index of the short in the array.
* @return Short value.
*/
protected final short getShort(int field, int index)
{
assert validType(field, index, MPI.SHORT) : typeMismatch;
return buffer.getShort(offset + field + index * 2);
}
/**
* Puts a short value in a field.
* @param field Offset of the field.
* @param v Short value.
*/
protected final void putShort(int field, short v)
{
assert validType(field, 0, MPI.SHORT) : typeMismatch;
buffer.putShort(offset + field, v);
}
/**
* Puts a short value at the specified position of a short array.
* @param field Offset of the short array.
* @param index Index of the short in the array.
* @param v Short value.
*/
protected final void putShort(int field, int index, short v)
{
assert validType(field, index, MPI.SHORT) : typeMismatch;
buffer.putShort(offset + field + index * 2, v);
}
/**
* Gets the int value of a field.
* @param field Offset of the field.
* @return Int value.
*/
protected final int getInt(int field)
{
assert validType(field, 0, MPI.INT) : typeMismatch;
return buffer.getInt(offset + field);
}
/**
* Gets the int value at the specified position of an int array.
* @param field Offset of the int array.
* @param index Index of the int in the array.
* @return Int value.
*/
protected final int getInt(int field, int index)
{
assert validType(field, index, MPI.INT) : typeMismatch;
return buffer.getInt(offset + field + index * 4);
}
/**
* Puts an int value in a field.
* @param field Offset of the field.
* @param v Int value.
*/
protected final void putInt(int field, int v)
{
assert validType(field, 0, MPI.INT) : typeMismatch;
buffer.putInt(offset + field, v);
}
/**
* Puts an int value at the specified position of an int array.
* @param field Offset of the int array.
* @param index Index of the int in the array.
* @param v Int value.
*/
protected final void putInt(int field, int index, int v)
{
assert validType(field, index, MPI.INT) : typeMismatch;
buffer.putInt(offset + field + index * 4, v);
}
/**
* Gets the long value of a field.
* @param field Offset of the field.
* @return Long value.
*/
protected final long getLong(int field)
{
assert validType(field, 0, MPI.LONG) : typeMismatch;
return buffer.getLong(offset + field);
}
/**
* Gets the long value at the specified position of a long array.
* @param field Offset of the long array.
* @param index Index of the long in the array.
* @return Long value.
*/
protected final long getLong(int field, int index)
{
assert validType(field, index, MPI.LONG) : typeMismatch;
return buffer.getLong(offset + field + index * 8);
}
/**
* Puts a long value in a field.
* @param field Offset of the field.
* @param v Long value.
*/
protected final void putLong(int field, long v)
{
assert validType(field, 0, MPI.LONG) : typeMismatch;
buffer.putLong(offset + field, v);
}
/**
* Puts a long value at the specified position of a long array.
* @param field Offset of the long array.
* @param index Index of the long in the array.
* @param v Long value.
*/
protected final void putLong(int field, int index, long v)
{
assert validType(field, index, MPI.LONG) : typeMismatch;
buffer.putLong(offset + field + index * 8, v);
}
/**
* Gets the float value of a field.
* @param field Offset of the field.
* @return Float value.
*/
protected final float getFloat(int field)
{
assert validType(field, 0, MPI.FLOAT) : typeMismatch;
return buffer.getFloat(offset + field);
}
/**
* Gets the float value at the specified position of a float array.
* @param field Offset of the float array.
* @param index Index of the float in the array.
* @return Float value.
*/
protected final float getFloat(int field, int index)
{
assert validType(field, index, MPI.FLOAT) : typeMismatch;
return buffer.getFloat(offset + field + index * 4);
}
/**
* Puts a float value in a field.
* @param field Offset of the field.
* @param v Float value.
*/
protected final void putFloat(int field, float v)
{
assert validType(field, 0, MPI.FLOAT) : typeMismatch;
buffer.putFloat(offset + field, v);
}
/**
* Puts a float value at the specified position of a float array.
* @param field Offset of the float array.
* @param index Index of the float in the array.
* @param v Float value.
*/
protected final void putFloat(int field, int index, float v)
{
assert validType(field, index, MPI.FLOAT) : typeMismatch;
buffer.putFloat(offset + field + index * 4, v);
}
/**
* Gets the double value of a field.
* @param field Offset of the field.
* @return Double value.
*/
protected final double getDouble(int field)
{
assert validType(field, 0, MPI.DOUBLE) : typeMismatch;
return buffer.getDouble(offset + field);
}
/**
* Gets the double value at the specified position of a double array.
* @param field Offset of the double array.
* @param index Index of the double in the array.
* @return Double value.
*/
protected final double getDouble(int field, int index)
{
assert validType(field, index, MPI.DOUBLE) : typeMismatch;
return buffer.getDouble(offset + field + index * 8);
}
/**
* Puts a double value in a field.
* @param field Offset of the field.
* @param v Double value.
*/
protected final void putDouble(int field, double v)
{
assert validType(field, 0, MPI.DOUBLE) : typeMismatch;
buffer.putDouble(offset + field, v);
}
/**
* Puts a double value at the specified position of a double array.
* @param field Offset of the double array.
* @param index Index of the double in the array.
* @param v Double value.
*/
protected final void putDouble(int field, int index, double v)
{
assert validType(field, index, MPI.DOUBLE) : typeMismatch;
buffer.putDouble(offset + field + index * 8, v);
}
/**
* Gets the struct data of a field.
* @param struct Struct type.
* @param field Offset of the field.
* @return Struct data.
*/
protected final <S extends Struct, D extends Struct.Data>
D getData(S struct, int field)
{
Struct s = (Struct)struct;
assert validType(field, 0, s.datatype) : typeMismatch;
return s.newData(buffer, offset + field);
}
/**
* Gets the struct data at the specified position of a struct array.
* @param struct Struct type.
* @param field Offset of the struct array.
* @param index Index of the struct in the array.
* @return Struct data.
*/
protected final <S extends Struct, D extends Struct.Data>
D getData(S struct, int field, int index)
{
Struct s = (Struct)struct;
assert validType(field, index, s.datatype) : typeMismatch;
return s.newData(buffer, offset + field + index * s.extent);
}
/**
* Gets the buffer of a field.
* <p>The buffer can be used in {@code send}/{@code recv} operations.
* @param type Data type of the buffer.
* @param field Offset of the field.
* @return Buffer object.
*/
protected final ByteBuffer getBuffer(Datatype type, int field)
{
assert validType(field, 0, type) : typeMismatch;
int position = offset + field;
return position == 0 ? buffer : MPI.slice(buffer, position);
}
/**
* Gets the buffer data at the specified position of a buffer array.
* <p>The buffer can be used in {@code send}/{@code recv} operations.
* @param type Data type of the buffer.
* @param field Offset of the buffer array.
* @param index Index of the buffer in the array.
* @return Buffer object.
* @throws MPIException
*/
protected final ByteBuffer getBuffer(Datatype type, int field, int index)
throws MPIException
{
assert validType(field, index, type) : typeMismatch;
int extent = type.getExtent() * type.baseSize,
position = offset + field + index * extent;
return position == 0 ? buffer : MPI.slice(buffer, position);
}
} // Data
} // Struct

185
ompi/mpi/java/java/UserFunction.java Обычный файл
Просмотреть файл

@ -0,0 +1,185 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : User_function.java
* Author : Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.4 $
* Updated : $Date: 1999/09/13 16:14:30 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
package mpi;
import java.nio.*;
/**
* Java equivalent of the {@code MPI_USER_FUNCTION}.
*/
public abstract class UserFunction
{
/**
* User-defined function for a new {@code Op}.
* @param inVec array of values to combine with {@code inoutvec} elements
* @param inOutVec in-out array of accumulator locations
* @param count number of items in arrays
* @param datatype type of each item
*/
public void call(Object inVec, Object inOutVec, int count, Datatype datatype)
throws MPIException
{
throw new UnsupportedOperationException("Not supported yet.");
}
/**
* User-defined function for a new {@code Op}.
* @param in direct byte buffer to combine with {@code inOut} buffer
* @param inOut in-out direct byte buffer of accumulator locations
* @param count number of items in buffers
* @param datatype type of each item
*/
public void call(ByteBuffer in, ByteBuffer inOut, int count, Datatype datatype)
throws MPIException
{
switch(datatype.baseType)
{
case Datatype.BYTE:
vCall(in, inOut, count, datatype);
break;
case Datatype.CHAR:
vCall(in.asCharBuffer(), inOut.asCharBuffer(), count, datatype);
break;
case Datatype.SHORT:
vCall(in.asShortBuffer(), inOut.asShortBuffer(), count, datatype);
break;
case Datatype.INT:
vCall(in.asIntBuffer(), inOut.asIntBuffer(), count, datatype);
break;
case Datatype.LONG:
vCall(in.asLongBuffer(), inOut.asLongBuffer(), count, datatype);
break;
case Datatype.FLOAT:
vCall(in.asFloatBuffer(), inOut.asFloatBuffer(), count, datatype);
break;
case Datatype.DOUBLE:
vCall(in.asDoubleBuffer(), inOut.asDoubleBuffer(), count, datatype);
break;
case Datatype.PACKED:
vCall(in, inOut, count, datatype);
break;
default:
throw new IllegalArgumentException("Unsupported datatype.");
}
}
private void vCall(ByteBuffer in, ByteBuffer inOut,
int count, Datatype datatype) throws MPIException
{
int extent = datatype.getExtent();
byte[] inVec = new byte[count * extent],
inOutVec = new byte[count * extent];
in.get(inVec);
inOut.get(inOutVec);
call(inVec, inOutVec, count, datatype);
inOut.clear();
inOut.put(inOutVec);
}
private void vCall(CharBuffer inBuf, CharBuffer inOutBuf,
int count, Datatype datatype) throws MPIException
{
int extent = datatype.getExtent();
char[] inVec = new char[count * extent],
inOutVec = new char[count * extent];
inBuf.get(inVec);
inOutBuf.get(inOutVec);
call(inVec, inOutVec, count, datatype);
inOutBuf.clear();
inOutBuf.put(inOutVec);
}
private void vCall(ShortBuffer inBuf, ShortBuffer inOutBuf,
int count, Datatype datatype) throws MPIException
{
int extent = datatype.getExtent();
short[] inVec = new short[count * extent],
inOutVec = new short[count * extent];
inBuf.get(inVec);
inOutBuf.get(inOutVec);
call(inVec, inOutVec, count, datatype);
inOutBuf.clear();
inOutBuf.put(inOutVec);
}
private void vCall(IntBuffer inBuf, IntBuffer inOutBuf,
int count, Datatype datatype) throws MPIException
{
int extent = datatype.getExtent();
int[] inVec = new int[count * extent],
inOutVec = new int[count * extent];
inBuf.get(inVec);
inOutBuf.get(inOutVec);
call(inVec, inOutVec, count, datatype);
inOutBuf.clear();
inOutBuf.put(inOutVec);
}
private void vCall(LongBuffer inBuf, LongBuffer inOutBuf,
int count, Datatype datatype) throws MPIException
{
int extent = datatype.getExtent();
long[] inVec = new long[count * extent],
inOutVec = new long[count * extent];
inBuf.get(inVec);
inOutBuf.get(inOutVec);
call(inVec, inOutVec, count, datatype);
inOutBuf.clear();
inOutBuf.put(inOutVec);
}
private void vCall(FloatBuffer inBuf, FloatBuffer inOutBuf,
int count, Datatype datatype) throws MPIException
{
int extent = datatype.getExtent();
float[] inVec = new float[count * extent],
inOutVec = new float[count * extent];
inBuf.get(inVec);
inOutBuf.get(inOutVec);
call(inVec, inOutVec, count, datatype);
inOutBuf.clear();
inOutBuf.put(inOutVec);
}
private void vCall(DoubleBuffer inBuf, DoubleBuffer inOutBuf,
int count, Datatype datatype) throws MPIException
{
int extent = datatype.getExtent();
double[] inVec = new double[count * extent],
inOutVec = new double[count * extent];
inBuf.get(inVec);
inOutBuf.get(inOutVec);
call(inVec, inOutVec, count, datatype);
inOutBuf.clear();
inOutBuf.put(inOutVec);
}
} // UserFunction

Просмотреть файл

@ -1,51 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* File : User_function.java
* Author : Xinying Li
* Created : Thu Apr 9 12:22:15 1998
* Revision : $Revision: 1.4 $
* Updated : $Date: 1999/09/13 16:14:30 $
* Copyright: Northeast Parallel Architectures Center
* at Syracuse University 1998
*/
package mpi;
public abstract class User_function{
/**
* User-defined function for a new <tt>Op</tt>.
* <p>
* <table>
* <tr><td><tt> invec </tt></td><td> array of values to combine with
* <tt>inoutvec</tt> elements </tr>
* <tr><td><tt> inoffset </tt></td><td> initial offset in
* <tt>invec<tt> </tr>
* <tr><td><tt> inoutvec </tt></td><td> in-out array of accumulator
* locations </tr>
* <tr><td><tt> inoutoffset </tt></td><td> initial offset in
* <tt>inoutvec<tt> </tr>
* <tr><td><tt> count </tt></td><td> number of items in arrays </tr>
* <tr><td><tt> datatype </tt></td><td> type of each item </tr>
* </table>
* <p>
* Java equivalent of the MPI <tt>USER_FUNCTION</tt>.
*/
public abstract void Call(Object invec, int inoffset,
Object inoutvec, int inoutoffset,
int count, Datatype datatype) ;
}

390
ompi/mpi/java/java/Win.java Обычный файл
Просмотреть файл

@ -0,0 +1,390 @@
package mpi;
import java.nio.*;
/**
* This class represents {@code MPI_Win}.
*/
public final class Win implements Freeable
{
private long handle;
/**
* Java binding of {@code MPI_WIN_CREATE}.
* @param base initial address of window
* @param size size of window (buffer elements)
* @param dispUnit local unit size for displacements (buffer elements)
* @param info info object
* @param comm communicator
* @throws MPIException
*/
public Win(Buffer base, int size, int dispUnit, Info info, Comm comm)
throws MPIException
{
if(!base.isDirect())
throw new IllegalArgumentException("The buffer must be direct.");
int baseSize;
if(base instanceof ByteBuffer)
baseSize = 1;
else if(base instanceof CharBuffer || base instanceof ShortBuffer)
baseSize = 2;
else if(base instanceof IntBuffer || base instanceof FloatBuffer)
baseSize = 4;
else if(base instanceof LongBuffer || base instanceof DoubleBuffer)
baseSize = 8;
else
throw new AssertionError();
int sizeBytes = size * baseSize,
dispBytes = dispUnit * baseSize;
handle = createWin(base, sizeBytes, dispBytes, info.handle, comm.handle);
}
private native long createWin(
Buffer base, int size, int dispUnit, long info, long comm)
throws MPIException;
private int getBaseType(Datatype orgType, Datatype targetType)
{
int baseType = orgType.baseType;
if(baseType != targetType.baseType)
{
throw new IllegalArgumentException(
"Both datatype arguments must be constructed "+
"from the same predefined datatype.");
}
return baseType;
}
/**
* Java binding of the MPI operation {@code MPI_GET_GROUP}.
* @return group of processes which share access to the window
* @throws MPIException
*/
public Group getGroup() throws MPIException
{
MPI.check();
return new Group(getGroup(handle));
}
private native long getGroup(long win) throws MPIException;
/**
* Java binding of {@code MPI_PUT}.
* @param origin origin buffer
* @param orgCount number of entries in origin buffer
* @param orgType datatype of each entry in origin buffer
* @param targetRank rank of target
* @param targetDisp displacement from start of window to target buffer
* @param targetCount number of entries in target buffer
* @param targetType datatype of each entry in target buffer
* @throws MPIException
*/
public void put(Buffer origin, int orgCount, Datatype orgType,
int targetRank, int targetDisp, int targetCount,
Datatype targetType)
throws MPIException
{
MPI.check();
if(!origin.isDirect())
throw new IllegalArgumentException("The origin must be direct buffer.");
put(handle, origin, orgCount, orgType.handle,
targetRank, targetDisp, targetCount, targetType.handle,
getBaseType(orgType, targetType));
}
private native void put(
long win, Buffer origin, int orgCount, long orgType,
int targetRank, int targetDisp, int targetCount, long targetType,
int baseType) throws MPIException;
/**
* Java binding of {@code MPI_GET}.
* @param origin origin buffer
* @param orgCount number of entries in origin buffer
* @param orgType datatype of each entry in origin buffer
* @param targetRank rank of target
* @param targetDisp displacement from start of window to target buffer
* @param targetCount number of entries in target buffer
* @param targetType datatype of each entry in target buffer
*/
public void get(Buffer origin, int orgCount, Datatype orgType,
int targetRank, int targetDisp, int targetCount,
Datatype targetType)
throws MPIException
{
MPI.check();
if(!origin.isDirect())
throw new IllegalArgumentException("The origin must be direct buffer.");
get(handle, origin, orgCount, orgType.handle,
targetRank, targetDisp, targetCount, targetType.handle,
getBaseType(orgType, targetType));
}
private native void get(
long win, Buffer origin, int orgCount, long orgType,
int targetRank, int targetDisp, int targetCount, long targetType,
int baseType) throws MPIException;
/**
* Java binding of {@code MPI_ACCUMULATE}.
* @param origin origin buffer
* @param orgCount number of entries in origin buffer
* @param orgType datatype of each entry in origin buffer
* @param targetRank rank of target
* @param targetDisp displacement from start of window to target buffer
* @param targetCount number of entries in target buffer
* @param targetType datatype of each entry in target buffer
* @param op reduce operation
*/
public void accumulate(Buffer origin, int orgCount, Datatype orgType,
int targetRank, int targetDisp, int targetCount,
Datatype targetType, Op op)
throws MPIException
{
MPI.check();
if(!origin.isDirect())
throw new IllegalArgumentException("The origin must be direct buffer.");
accumulate(handle, origin, orgCount, orgType.handle,
targetRank, targetDisp, targetCount, targetType.handle,
op, getBaseType(orgType, targetType));
}
private native void accumulate(
long win, Buffer origin, int orgCount, long orgType,
int targetRank, int targetDisp, int targetCount, long targetType,
Op op, int baseType) throws MPIException;
/**
* Java binding of {@code MPI_WIN_FENCE}.
* @param assertion program assertion
*/
public void fence(int assertion) throws MPIException
{
MPI.check();
fence(handle, assertion);
}
private native void fence(long win, int assertion) throws MPIException;
/**
* Java binding of the MPI operation {@code MPI_WIN_START}.
* @param group group of target processes
* @param assertion program assertion
* @throws MPIException
*/
public void start(Group group, int assertion) throws MPIException
{
MPI.check();
start(handle, group.handle, assertion);
}
private native void start(long win, long group, int assertion)
throws MPIException;
/**
* Java binding of the MPI operation {@code MPI_WIN_COMPLETE}.
* @throws MPIException
*/
public void complete() throws MPIException
{
MPI.check();
complete(handle);
}
private native void complete(long win) throws MPIException;
/**
* Java binding of the MPI operation {@code MPI_WIN_POST}.
* @param group group of origin processes
* @param assertion program assertion
* @throws MPIException
*/
public void post(Group group, int assertion) throws MPIException
{
MPI.check();
post(handle, group.handle, assertion);
}
private native void post(long win, long group, int assertion)
throws MPIException;
/**
* Java binding of the MPI operation {@code MPI_WIN_WAIT}.
* @throws MPIException
*/
public void waitFor() throws MPIException
{
MPI.check();
waitFor(handle);
}
private native void waitFor(long win) throws MPIException;
/**
* Java binding of the MPI operation {@code MPI_WIN_TEST}.
* @return true if success
* @throws MPIException
*/
public boolean test() throws MPIException
{
MPI.check();
return test(handle);
}
private native boolean test(long win) throws MPIException;
/**
* Java binding of the MPI operation {@code MPI_WIN_LOCK}.
* @param lockType either MPI.LOCK_EXCLUSIVE or MPI.LOCK_SHARED
* @param rank rank of locked window
* @param assertion program assertion
* @throws MPIException
*/
public void lock(int lockType, int rank, int assertion) throws MPIException
{
MPI.check();
lock(handle, lockType, rank, assertion);
}
private native void lock(long win, int lockType, int rank, int assertion)
throws MPIException;
/**
* Java binding of the MPI operation {@code MPI_WIN_UNLOCK}.
* @param rank rank of window
* @throws MPIException
*/
public void unlock(int rank) throws MPIException
{
MPI.check();
unlock(handle, rank);
}
private native void unlock(long win, int rank) throws MPIException;
/**
* Java binding of the MPI operation {@code MPI_WIN_SET_ERRHANDLER}.
* @param errhandler new MPI error handler for window
* @throws MPIException
*/
public void setErrhandler(Errhandler errhandler) throws MPIException
{
MPI.check();
setErrhandler(handle, errhandler.handle);
}
private native void setErrhandler(long win, long errhandler)
throws MPIException;
/**
* Java binding of the MPI operation {@code MPI_WIN_CALL_ERRHANDLER}.
* @param errorCode error code
* @throws MPIException
*/
public void callErrhandler(int errorCode) throws MPIException
{
callErrhandler(handle, errorCode);
}
private native void callErrhandler(long handle, int errorCode)
throws MPIException;
/**
* Create a new attribute key.
* <p>Java binding of the MPI operation {@code MPI_WIN_CREATE_KEYVAL}.
* @return attribute key for future access
* @throws MPIException
*/
public static int createKeyval() throws MPIException
{
MPI.check();
return createKeyval_jni();
}
private static native int createKeyval_jni() throws MPIException;
/**
* Frees an attribute key.
* <p>Java binding of the MPI operation {@code MPI_WIN_FREE_KEYVAL}.
* @param keyval attribute key
* @throws MPIException
*/
public static void freeKeyval(int keyval) throws MPIException
{
MPI.check();
freeKeyval_jni(keyval);
}
private static native void freeKeyval_jni(int keyval) throws MPIException;
/**
* Stores attribute value associated with a key.
* <p>Java binding of the MPI operation {@code MPI_WIN_SET_ATTR}.
* @param keyval attribute key
* @param value attribute value
* @throws MPIException
*/
public void setAttr(int keyval, Object value) throws MPIException
{
MPI.check();
setAttr(handle, keyval, MPI.attrSet(value));
}
private native void setAttr(long win, int keyval, byte[] value)
throws MPIException;
/**
* Retrieves attribute value by key.
* <p>Java binding of the MPI operation {@code MPI_WIN_GET_ATTR}.
* @param keyval attribute key
* @return attribute value or null if no attribute is associated with the key.
* @throws MPIException
*/
public Object getAttr(int keyval) throws MPIException
{
MPI.check();
Object obj = getAttr(handle, keyval);
return obj instanceof byte[] ? MPI.attrGet((byte[])obj) : obj;
}
private native Object getAttr(long win, int keyval) throws MPIException;
/**
* Deletes an attribute value associated with a key.
* <p>Java binding of the MPI operation {@code MPI_WIN_DELETE_ATTR}.
* @param keyval attribute key
* @throws MPIException
*/
public void deleteAttr(int keyval) throws MPIException
{
MPI.check();
deleteAttr(handle, keyval);
}
private native void deleteAttr(long win, int keyval) throws MPIException;
/**
* Java binding of {@code MPI_WIN_FREE}.
* @throws MPIException
*/
@Override public void free() throws MPIException
{
MPI.check();
handle = free(handle);
}
private native long free(long win) throws MPIException;
} // Win