1
1

Merge pull request #825 from hjelmn/white_space_purge

periodic trailing whitespace purge
Этот коммит содержится в:
Nathan Hjelm 2015-08-25 19:23:52 -06:00
родитель ea935df632 156ce6af21
Коммит f451876058
45 изменённых файлов: 144 добавлений и 144 удалений

Просмотреть файл

@ -160,7 +160,7 @@ program falignment
write (10,'(I5)') LOC(t2)-LOC(t1)
endif
CLOSE(10)
end program]])],
[AS_IF([test "$cross_compiling" = "yes"],
[AC_MSG_ERROR([Can not determine alignment of $1 when cross-compiling])],

Просмотреть файл

@ -104,7 +104,7 @@ mpif-c-constants-decl.h:
--single $(OMPI_FORTRAN_SINGLE_UNDERSCORE) \
--double $(OMPI_FORTRAN_DOUBLE_UNDERSCORE)
if WANT_INSTALL_HEADERS
if WANT_INSTALL_HEADERS
ompidir = $(ompiincludedir)
nobase_dist_ompi_HEADERS = $(headers)
nobase_nodist_ompi_HEADERS = $(nodist_headers)

Просмотреть файл

@ -280,8 +280,8 @@ int ompi_io_ompio_scatterv_array (void *sbuf,
reqs[i] = MPI_REQUEST_NULL;
}
}
if (OMPI_SUCCESS != err) {
free ( reqs );
if (OMPI_SUCCESS != err) {
free ( reqs );
return err;
}
}
@ -364,7 +364,7 @@ int ompi_io_ompio_gather_array (void *sbuf,
OPAL_PTRDIFF_TYPE extent, lb;
int err = OMPI_SUCCESS;
ompi_request_t ** reqs=NULL;
rank = ompi_comm_rank (comm);
/* Everyone but the writers sends data and returns. */

Просмотреть файл

@ -322,9 +322,9 @@ ompio_io_ompio_file_close (mca_io_ompio_file_t *ompio_fh)
ret = ompio_fh->f_sharedfp->sharedfp_file_close(ompio_fh);
}
if ( NULL != ompio_fh->f_fs ) {
/* The pointer might not be set if file_close() is
/* The pointer might not be set if file_close() is
** called from the file destructor in case of an error
** during file_open()
** during file_open()
*/
ret = ompio_fh->f_fs->fs_file_close (ompio_fh);
}

Просмотреть файл

@ -468,7 +468,7 @@ int mca_io_ompio_file_iread_at_all (ompi_file_t *fh,
ret = ompio_io_ompio_file_iread_at_all ( &data->ompio_fh, offset, buf, count, datatype, request );
return ret;
}
int ompio_io_ompio_file_iread_at_all (mca_io_ompio_file_t *fp,
OMPI_MPI_OFFSET_TYPE offset,
void *buf,

Просмотреть файл

@ -83,22 +83,22 @@ int mca_io_ompio_set_view_internal(mca_io_ompio_file_t *fh,
fh->f_flags |= OMPIO_FILE_VIEW_IS_SET;
fh->f_datarep = strdup (datarep);
ompi_datatype_duplicate (filetype, &fh->f_orig_filetype );
opal_datatype_get_extent(&filetype->super, &lb, &ftype_extent);
opal_datatype_type_size (&filetype->super, &ftype_size);
if ( etype == filetype &&
if ( etype == filetype &&
ompi_datatype_is_predefined (filetype ) &&
ftype_extent == (OPAL_PTRDIFF_TYPE)ftype_size ){
ompi_datatype_create_contiguous(MCA_IO_DEFAULT_FILE_VIEW_SIZE,
&ompi_mpi_byte.dt,
&newfiletype);
ompi_datatype_commit (&newfiletype);
}
}
else {
newfiletype = filetype;
}
fh->f_iov_count = 0;
@ -166,11 +166,11 @@ int mca_io_ompio_set_view_internal(mca_io_ompio_file_t *fh,
}
free(contg_groups);
if ( etype == filetype &&
if ( etype == filetype &&
ompi_datatype_is_predefined (filetype ) &&
ftype_extent == (OPAL_PTRDIFF_TYPE)ftype_size ){
ompi_datatype_destroy ( &newfiletype );
}
}
if (OMPI_SUCCESS != mca_fcoll_base_file_select (fh, NULL)) {
@ -201,7 +201,7 @@ int mca_io_ompio_file_set_view (ompi_file_t *fp,
fh = &data->ompio_fh;
ret = mca_io_ompio_set_view_internal(fh, disp, etype, filetype, datarep, info);
if ( NULL != fh->f_sharedfp_data) {
if ( NULL != fh->f_sharedfp_data) {
sh = ((struct mca_sharedfp_base_data_t *)fh->f_sharedfp_data)->sharedfh;
ret = mca_io_ompio_set_view_internal(sh, disp, etype, filetype, datarep, info);
}

Просмотреть файл

@ -816,7 +816,7 @@ int mca_io_ompio_file_write_at_all_end (ompi_file_t *fh,
int ret = OMPI_SUCCESS;
mca_io_ompio_data_t *data;
mca_io_ompio_file_t *fp=NULL;
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
fp = &data->ompio_fh;
ret = ompi_request_wait ( &fp->f_split_coll_req, status );

Просмотреть файл

@ -59,9 +59,9 @@ int mca_sharedfp_addproc_file_open (struct ompi_communicator_t *comm,
data = (mca_io_ompio_data_t *) fh->f_fh->f_io_selected_data;
ompio_fh = &data->ompio_fh;
err = mca_io_ompio_set_view_internal (shfileHandle,
ompio_fh->f_disp,
ompio_fh->f_etype,
err = mca_io_ompio_set_view_internal (shfileHandle,
ompio_fh->f_disp,
ompio_fh->f_etype,
ompio_fh->f_orig_filetype,
ompio_fh->f_datarep,
MPI_INFO_NULL);

Просмотреть файл

@ -124,7 +124,7 @@ struct mca_sharedfp_base_module_1_0_0_t * mca_sharedfp_individual_component_file
}
else {
if ( mca_sharedfp_individual_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"mca_sharedfp_individual_component_file_query: "
"OMPIO_SHAREDFP_RELAXED_ORDERING MPI_Info key not set, "
"got MPI_INFO_NULL. Set this key in order to increase "

Просмотреть файл

@ -53,7 +53,7 @@ int mca_sharedfp_individual_file_open (struct ompi_communicator_t *comm,
opal_output(0, "mca_sharedfp_individual_file_open: unable to allocate memory\n");
return OMPI_ERR_OUT_OF_RESOURCE;
}
err = ompio_io_ompio_file_open ( comm, filename, amode, info, shfileHandle, false);
if ( OMPI_SUCCESS != err ) {
opal_output(0, "mca_sharedfp_individual_file_open: Error during file open\n");

Просмотреть файл

@ -105,7 +105,7 @@ int mca_sharedfp_individual_write_ordered (mca_io_ompio_file_t *fh,
if(fh->f_sharedfp_data==NULL){
if ( mca_sharedfp_individual_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"sharedfp_individual_write_ordered - opening the shared file pointer\n");
}
shared_fp_base_module = fh->f_sharedfp;

Просмотреть файл

@ -60,9 +60,9 @@ int mca_sharedfp_lockedfile_file_open (struct ompi_communicator_t *comm,
data = (mca_io_ompio_data_t *) fh->f_fh->f_io_selected_data;
ompio_fh = &data->ompio_fh;
err = mca_io_ompio_set_view_internal (shfileHandle,
ompio_fh->f_disp,
ompio_fh->f_etype,
err = mca_io_ompio_set_view_internal (shfileHandle,
ompio_fh->f_disp,
ompio_fh->f_etype,
ompio_fh->f_orig_filetype,
ompio_fh->f_datarep,
MPI_INFO_NULL);

Просмотреть файл

@ -42,7 +42,7 @@ int mca_sharedfp_lockedfile_iread(mca_io_ompio_file_t *fh,
if ( NULL == fh->f_sharedfp_data ) {
if ( mca_sharedfp_lockedfile_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"sharedfp_lockedfile_iread: opening the shared file pointer\n");
}
shared_fp_base_module = fh->f_sharedfp;
@ -77,7 +77,7 @@ int mca_sharedfp_lockedfile_iread(mca_io_ompio_file_t *fh,
if ( -1 != ret ) {
if ( mca_sharedfp_lockedfile_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"sharedfp_lockedfile_iread - Offset received is %lld\n",offset);
}
@ -108,7 +108,7 @@ int mca_sharedfp_lockedfile_read_ordered_begin(mca_io_ompio_file_t *fh,
if(fh->f_sharedfp_data==NULL){
if ( mca_sharedfp_lockedfile_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"sharedfp_lockedfile_read_ordered_begin: opening the shared file pointer\n");
}
shared_fp_base_module = fh->f_sharedfp;
@ -126,7 +126,7 @@ int mca_sharedfp_lockedfile_read_ordered_begin(mca_io_ompio_file_t *fh,
if ( true == fh->f_split_coll_in_use ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"Only one split collective I/O operation allowed per file handle at any given point in time!\n");
return MPI_ERR_REQUEST;
}
@ -163,7 +163,7 @@ int mca_sharedfp_lockedfile_read_ordered_begin(mca_io_ompio_file_t *fh,
for ( i = 0; i < size ; i ++) {
bytesRequested += buff[i];
if ( mca_sharedfp_lockedfile_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"sharedfp_lockedfile_read_ordered_begin: Bytes requested are %ld\n",bytesRequested);
}
}

Просмотреть файл

@ -198,7 +198,7 @@ int mca_sharedfp_lockedfile_write_ordered_begin(mca_io_ompio_file_t *fh,
offset /= sh->sharedfh->f_etype_size;
if ( mca_sharedfp_lockedfile_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"sharedfp_lockedfile_write_ordered_begin: Offset returned is %lld\n",offset);
}

Просмотреть файл

@ -31,7 +31,7 @@ AC_DEFUN([MCA_ompi_sharedfp_sm_CONFIG],[
AC_CHECK_HEADER([semaphore.h],
[AC_CHECK_FUNCS([sem_init],[sharedfp_sm_happy=yes],[])])
AS_IF([test "$sharedfp_sm_happy" = "yes"],
[$1],
[$2])

Просмотреть файл

@ -68,7 +68,7 @@ int mca_sharedfp_sm_file_open (struct ompi_communicator_t *comm,
if ( NULL == shfileHandle ) {
opal_output(0, "mca_sharedfp_sm_file_open: Error during memory allocation\n");
return OMPI_ERR_OUT_OF_RESOURCE;
}
}
err = ompio_io_ompio_file_open(comm,filename,amode,info,shfileHandle,false);
if ( OMPI_SUCCESS != err) {
opal_output(0, "mca_sharedfp_sm_file_open: Error during file open\n");
@ -79,16 +79,16 @@ int mca_sharedfp_sm_file_open (struct ompi_communicator_t *comm,
data = (mca_io_ompio_data_t *) fh->f_fh->f_io_selected_data;
ompio_fh = &data->ompio_fh;
err = mca_io_ompio_set_view_internal (shfileHandle,
ompio_fh->f_disp,
ompio_fh->f_etype,
err = mca_io_ompio_set_view_internal (shfileHandle,
ompio_fh->f_disp,
ompio_fh->f_etype,
ompio_fh->f_orig_filetype,
ompio_fh->f_datarep,
MPI_INFO_NULL);
/*Memory is allocated here for the sh structure*/
if ( mca_sharedfp_sm_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"mca_sharedfp_sm_file_open: malloc f_sharedfp_ptr struct\n");
}
@ -109,7 +109,7 @@ int mca_sharedfp_sm_file_open (struct ompi_communicator_t *comm,
/*Open a shared memory segment which will hold the shared file pointer*/
if ( mca_sharedfp_sm_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"mca_sharedfp_sm_file_open: allocatge shared memory segment.\n");
}

Просмотреть файл

@ -41,7 +41,7 @@ int mca_sharedfp_sm_iread(mca_io_ompio_file_t *fh,
if( NULL == fh->f_sharedfp_data){
if ( mca_sharedfp_sm_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"sharedfp_sm_iread: opening the shared file pointer\n");
}
shared_fp_base_module = fh->f_sharedfp;
@ -74,7 +74,7 @@ int mca_sharedfp_sm_iread(mca_io_ompio_file_t *fh,
if ( -1 != ret ) {
if ( mca_sharedfp_sm_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"sharedfp_sm_iread: Offset received is %lld\n",offset);
}
/* Read the file */
@ -104,7 +104,7 @@ int mca_sharedfp_sm_read_ordered_begin(mca_io_ompio_file_t *fh,
if ( NULL == fh->f_sharedfp_data){
if ( mca_sharedfp_sm_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"sharedfp_sm_read_ordered_begin: opening the shared file pointer\n");
}
shared_fp_base_module = fh->f_sharedfp;
@ -157,7 +157,7 @@ int mca_sharedfp_sm_read_ordered_begin(mca_io_ompio_file_t *fh,
for (i = 0; i < size ; i ++) {
bytesRequested += buff[i];
if ( mca_sharedfp_sm_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"mca_sharedfp_sm_read_ordered_begin: Bytes requested are %ld\n",
bytesRequested);
}
@ -174,7 +174,7 @@ int mca_sharedfp_sm_read_ordered_begin(mca_io_ompio_file_t *fh,
goto exit;
}
if ( mca_sharedfp_sm_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"mca_sharedfp_sm_read_ordered_begin: Offset received is %lld\n",offsetReceived);
}
@ -197,7 +197,7 @@ int mca_sharedfp_sm_read_ordered_begin(mca_io_ompio_file_t *fh,
offset /= sh->sharedfh->f_etype_size;
if ( mca_sharedfp_sm_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"mca_sharedfp_sm_read_ordered_begin: Offset returned is %lld\n",offset);
}

Просмотреть файл

@ -41,7 +41,7 @@ int mca_sharedfp_sm_iwrite(mca_io_ompio_file_t *fh,
if( NULL == fh->f_sharedfp_data){
if ( mca_sharedfp_sm_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"sharedfp_sm_iwrite - opening the shared file pointer\n");
}
shared_fp_base_module = fh->f_sharedfp;
@ -65,16 +65,16 @@ int mca_sharedfp_sm_iwrite(mca_io_ompio_file_t *fh,
sh = fh->f_sharedfp_data;
if ( mca_sharedfp_sm_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"sharedfp_sm_iwrite: Bytes Requested is %ld\n",bytesRequested);
}
/* Request the offset to write bytesRequested bytes */
ret = mca_sharedfp_sm_request_position(sh,bytesRequested,&offset);
offset /= sh->sharedfh->f_etype_size;
if ( -1 != ret ) {
if ( mca_sharedfp_sm_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"sharedfp_sm_iwrite: Offset received is %lld\n",offset);
}
/* Write to the file */
@ -105,7 +105,7 @@ int mca_sharedfp_sm_write_ordered_begin(mca_io_ompio_file_t *fh,
if ( NULL == fh->f_sharedfp_data){
if ( mca_sharedfp_sm_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"sharedfp_sm_write_ordered_begin: opening the shared file pointer\n");
}
shared_fp_base_module = fh->f_sharedfp;
@ -158,7 +158,7 @@ int mca_sharedfp_sm_write_ordered_begin(mca_io_ompio_file_t *fh,
for (i = 0; i < size ; i ++) {
bytesRequested += buff[i];
if ( mca_sharedfp_sm_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"mca_sharedfp_sm_write_ordered_begin: Bytes requested are %ld\n",
bytesRequested);
}
@ -175,7 +175,7 @@ int mca_sharedfp_sm_write_ordered_begin(mca_io_ompio_file_t *fh,
goto exit;
}
if ( mca_sharedfp_sm_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"mca_sharedfp_sm_write_ordered_begin: Offset received is %lld\n",offsetReceived);
}
@ -196,9 +196,9 @@ int mca_sharedfp_sm_write_ordered_begin(mca_io_ompio_file_t *fh,
/*Each process now has its own individual offset in recvBUFF*/
offset = offsetBuff - sendBuff;
offset /= sh->sharedfh->f_etype_size;
if ( mca_sharedfp_sm_verbose ) {
opal_output(ompi_sharedfp_base_framework.framework_output,
opal_output(ompi_sharedfp_base_framework.framework_output,
"mca_sharedfp_sm_write_ordered_begin: Offset returned is %lld\n",offset);
}

Просмотреть файл

@ -17,7 +17,7 @@ the two main principles guiding its drafting:
both authors and holders of the economic rights over software.
The authors of the CeCILL-B (for Ce[a] C[nrs] I[nria] L[ogiciel] L[ibre])
license are:
license are:
Commissariat р l'Energie Atomique - CEA, a public scientific, technical
and industrial research establishment, having its principal place of
@ -403,8 +403,8 @@ rights set forth in Article 5).
9.3 The Licensee acknowledges that the Software is supplied "as is" by
the Licensor without any other express or tacit warranty, other than
that provided for in Article 9.2 and, in particular, without any warranty
as to its commercial value, its secured, safe, innovative or relevant
that provided for in Article 9.2 and, in particular, without any warranty
as to its commercial value, its secured, safe, innovative or relevant
nature.
Specifically, the Licensor does not warrant that the Software is free

Просмотреть файл

@ -69,7 +69,7 @@ OMPI_Fortran_binding.lo: OMPI_Fortran_binding.f90 mpi-f08-types.lo
#
mpi_api_lo_files = $(mpi_api_files:.f90=.lo)
$(mpi_api_lo_files): mpi-f08.lo
mpi-f08.lo: mpi-f08-types.lo

Просмотреть файл

@ -50,10 +50,10 @@ JNIEXPORT jlong JNICALL Java_mpi_Win_allocateWin(JNIEnv *env, jobject jthis,
{
void *basePtr = (*env)->GetDirectBufferAddress(env, jBase);
MPI_Win win;
int rc = MPI_Win_allocate((MPI_Aint)size, dispUnit,
(MPI_Info)info, (MPI_Comm)comm, basePtr, &win);
ompi_java_exceptionCheck(env, rc);
return (jlong)win;
}
@ -63,10 +63,10 @@ JNIEXPORT jlong JNICALL Java_mpi_Win_allocateSharedWin(JNIEnv *env, jobject jthi
{
void *basePtr = (*env)->GetDirectBufferAddress(env, jBase);
MPI_Win win;
int rc = MPI_Win_allocate_shared((MPI_Aint)size, dispUnit,
(MPI_Info)info, (MPI_Comm)comm, basePtr, &win);
ompi_java_exceptionCheck(env, rc);
return (jlong)win;
}
@ -333,11 +333,11 @@ JNIEXPORT jlong JNICALL Java_mpi_Win_rPut(JNIEnv *env, jobject jthis,
{
void *origPtr = ompi_java_getDirectBufferAddress(env, origin_addr);
MPI_Request request;
int rc = MPI_Rput(origPtr, origin_count, (MPI_Datatype)origin_type,
target_rank, (MPI_Aint)target_disp, target_count, (MPI_Datatype)target_datatype,
(MPI_Win)win, &request);
ompi_java_exceptionCheck(env, rc);
return (jlong)request;
}
@ -348,11 +348,11 @@ JNIEXPORT jlong JNICALL Java_mpi_Win_rGet(JNIEnv *env, jobject jthis, jlong win,
{
void *orgPtr = (*env)->GetDirectBufferAddress(env, origin);
MPI_Request request;
int rc = MPI_Rget(orgPtr, orgCount, (MPI_Datatype)orgType,
targetRank, (MPI_Aint)targetDisp, targetCount,
(MPI_Datatype)targetType, (MPI_Win)win, &request);
ompi_java_exceptionCheck(env, rc);
return (jlong)request;
}
@ -364,11 +364,11 @@ JNIEXPORT jlong JNICALL Java_mpi_Win_rAccumulate(JNIEnv *env, jobject jthis, jlo
void *orgPtr = (*env)->GetDirectBufferAddress(env, origin);
MPI_Op op = ompi_java_op_getHandle(env, jOp, hOp, baseType);
MPI_Request request;
int rc = MPI_Raccumulate(orgPtr, orgCount, (MPI_Datatype)orgType,
targetRank, (MPI_Aint)targetDisp, targetCount,
(MPI_Datatype)targetType, op, (MPI_Win)win, &request);
ompi_java_exceptionCheck(env, rc);
return (jlong)request;
}
@ -381,12 +381,12 @@ JNIEXPORT void JNICALL Java_mpi_Win_getAccumulate(JNIEnv *env, jobject jthis, jl
void *orgPtr = (*env)->GetDirectBufferAddress(env, origin);
void *resultPtr = (*env)->GetDirectBufferAddress(env, resultBuff);
MPI_Op op = ompi_java_op_getHandle(env, jOp, hOp, baseType);
int rc = MPI_Get_accumulate(orgPtr, orgCount, (MPI_Datatype)orgType,
resultPtr, resultCount, (MPI_Datatype)resultType,
targetRank, (MPI_Aint)targetDisp, targetCount,
(MPI_Datatype)targetType, op, (MPI_Win)win);
ompi_java_exceptionCheck(env, rc);
}
@ -399,12 +399,12 @@ JNIEXPORT jlong JNICALL Java_mpi_Win_rGetAccumulate(JNIEnv *env, jobject jthis,
void *resultPtr = (*env)->GetDirectBufferAddress(env, resultBuff);
MPI_Op op = ompi_java_op_getHandle(env, jOp, hOp, baseType);
MPI_Request request;
int rc = MPI_Rget_accumulate(orgPtr, orgCount, (MPI_Datatype)orgType,
resultPtr, resultCount, (MPI_Datatype)resultType,
targetRank, (MPI_Aint)targetDisp, targetCount,
(MPI_Datatype)targetType, op, (MPI_Win)win, &request);
ompi_java_exceptionCheck(env, rc);
return (jlong)request;
}
@ -445,7 +445,7 @@ JNIEXPORT void JNICALL Java_mpi_Win_compareAndSwap (JNIEnv *env, jobject jthis,
void *orgPtr = (*env)->GetDirectBufferAddress(env, origin);
void *compPtr = (*env)->GetDirectBufferAddress(env, compareAddr);
void *resultPtr = (*env)->GetDirectBufferAddress(env, resultAddr);
int rc = MPI_Compare_and_swap(orgPtr, compPtr, resultPtr, dataType, targetRank, targetDisp, (MPI_Win)win);
ompi_java_exceptionCheck(env, rc);
}
@ -456,7 +456,7 @@ JNIEXPORT void JNICALL Java_mpi_Win_fetchAndOp(JNIEnv *env, jobject jthis, jlong
void *orgPtr = (*env)->GetDirectBufferAddress(env, origin);
void *resultPtr = (*env)->GetDirectBufferAddress(env, resultAddr);
MPI_Op op = ompi_java_op_getHandle(env, jOp, hOp, baseType);
int rc = MPI_Fetch_and_op(orgPtr, resultPtr, dataType, targetRank, targetDisp, op, (MPI_Win)win);
ompi_java_exceptionCheck(env, rc);
}

Просмотреть файл

@ -126,7 +126,7 @@ public final class CartComm extends Intracomm
MPI.check();
return new CartComm(dupWithInfo(handle, info.handle));
}
/**
* Returns cartesian topology information.
* <p>Java binding of the MPI operations {@code MPI_CARTDIM_GET} and

Просмотреть файл

@ -26,7 +26,7 @@
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software

Просмотреть файл

@ -668,7 +668,7 @@ public class Comm implements Freeable, Cloneable
{
MPI.check();
assertDirectBuffer(buf);
Request req = new Request(ibSend(handle, buf, count, type.handle, dest, tag));
Request req = new Request(ibSend(handle, buf, count, type.handle, dest, tag));
req.addSendBufRef(buf);
return req;
}
@ -2400,8 +2400,8 @@ public class Comm implements Freeable, Cloneable
recvHandles);
}
private native void allToAllw(long comm,
Buffer sendBuf, int[] sendCount, int[] sDispls, long[] sendTypes,
private native void allToAllw(long comm,
Buffer sendBuf, int[] sendCount, int[] sDispls, long[] sendTypes,
Buffer recvBuf, int[] recvCount, int[] rDispls, long[] recvTypes)
throws MPIException;

Просмотреть файл

@ -82,7 +82,7 @@ public final class Count implements Comparable
return false;
}
public int compareTo(Object obj)
public int compareTo(Object obj)
{
if(obj instanceof Count) {
if(this.count - ((Count)obj).getCount() > 0) {

Просмотреть файл

@ -26,7 +26,7 @@
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software

Просмотреть файл

@ -126,7 +126,7 @@ public final class GraphComm extends Intracomm
MPI.check();
return new GraphComm(dupWithInfo(handle, info.handle));
}
/**
* Returns graph topology information.
* <p>Java binding of the MPI operations {@code MPI_GRAPHDIMS_GET}

Просмотреть файл

@ -26,7 +26,7 @@
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software

Просмотреть файл

@ -106,7 +106,7 @@ public final class Intercomm extends Comm
MPI.check();
return new Intercomm(iDup(handle));
}
/**
* Duplicates this communicator with the info object used in the call.
* <p>Java binding of {@code MPI_COMM_DUP_WITH_INFO}.
@ -119,7 +119,7 @@ public final class Intercomm extends Comm
MPI.check();
return new Intercomm(dupWithInfo(handle, info.handle));
}
// Inter-Communication
/**

Просмотреть файл

@ -123,7 +123,7 @@ public class Intracomm extends Comm
MPI.check();
return new Intracomm(iDup(handle));
}
/**
* Duplicates this communicator with the info object used in the call.
* <p>Java binding of {@code MPI_COMM_DUP_WITH_INFO}.
@ -136,7 +136,7 @@ public class Intracomm extends Comm
MPI.check();
return new Intracomm(dupWithInfo(handle, info.handle));
}
/**
* Partition the group associated with this communicator and create
* a new communicator within each subgroup.
@ -186,7 +186,7 @@ public class Intracomm extends Comm
}
private native long create(long comm, long group);
/**
* Create a new intracommunicator for the given group.
* <p>Java binding of the MPI operation {@code MPI_COMM_CREATE_GROUP}.
@ -202,7 +202,7 @@ public class Intracomm extends Comm
}
private native long createGroup(long comm, long group, int tag);
// Topology Constructors
/**

Просмотреть файл

@ -998,4 +998,4 @@ public final class MPI
return DoubleBuffer.wrap(buf, offset, buf.length - offset).slice();
}
} // MPI
} // MPI

Просмотреть файл

@ -26,7 +26,7 @@
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software

Просмотреть файл

@ -117,10 +117,10 @@ public class Request implements Freeable
private native void cancel(long request) throws MPIException;
/**
* Adds a receive buffer to this Request object. This method
* should be called by the internal api whenever a persistent
* request is created and any time a request object, that has
* an associated buffer, is returned from an opperation to protect
* Adds a receive buffer to this Request object. This method
* should be called by the internal api whenever a persistent
* request is created and any time a request object, that has
* an associated buffer, is returned from an opperation to protect
* the buffer from getting prematurely garbage collected.
* @param buf buffer to add to the array list
*/
@ -130,10 +130,10 @@ public class Request implements Freeable
}
/**
* Adds a send buffer to this Request object. This method
* should be called by the internal api whenever a persistent
* request is created and any time a request object, that has
* an associated buffer, is returned from an opperation to protect
* Adds a send buffer to this Request object. This method
* should be called by the internal api whenever a persistent
* request is created and any time a request object, that has
* an associated buffer, is returned from an opperation to protect
* the buffer from getting prematurely garbage collected.
* @param buf buffer to add to the array list
*/

Просмотреть файл

@ -159,7 +159,7 @@ public final class Status
private native Count getElementsX(
int source, int tag, int error,
int cancelled, long ucount, long datatype) throws MPIException;
/**
* Sets the number of basic elements for this status object.
* <p>Java binding of the MPI operation {@code MPI_STATUS_SET_ELEMENTS}.
@ -182,7 +182,7 @@ public final class Status
private native int setElements(
int source, int tag, int error,
int cancelled, long ucount, long datatype, int count) throws MPIException;
/**
* Sets the number of basic elements for this status object.
* <p>Java binding of the MPI operation {@code MPI_STATUS_SET_ELEMENTS_X}.
@ -205,7 +205,7 @@ public final class Status
private native long setElementsX(
int source, int tag, int error,
int cancelled, long ucount, long datatype, long count) throws MPIException;
/**
* Sets the cancelled flag.
* <p>Java binding of the MPI operation {@code MPI_STATUS_SET_CANCELLED}.
@ -221,7 +221,7 @@ public final class Status
int error = (int)data[i++];
int cancelled = (int)data[i++];
long ucount = data[i++];
if(flag) {
setCancelled(source, tag, error, cancelled, ucount, 1);
data[3] = 1;
@ -229,13 +229,13 @@ public final class Status
setCancelled(source, tag, error, cancelled, ucount, 0);
data[3] = 0;
}
}
private native void setCancelled(
int source, int tag, int error,
int cancelled, long ucount, int flag) throws MPIException;
/**
* Returns the "source" of message.
* <p>Java binding of the MPI value {@code MPI_SOURCE}.

Просмотреть файл

@ -123,7 +123,7 @@ public final class Win implements Freeable
* Java binding of {@code MPI_WIN_CREATE_DYNAMIC}.
* @param info info object
* @param comm communicator
* @throws MPIException Signals that an MPI exception of some sort has occurred.
* @throws MPIException Signals that an MPI exception of some sort has occurred.
*/
public Win(Info info, Comm comm)
throws MPIException
@ -735,7 +735,7 @@ public final class Win implements Freeable
/**
* Java binding of the MPI operation {@code MPI_WIN_LOCK_ALL}.
* @param assertion program assertion
* @throws MPIException Signals that an MPI exception of some sort has occurred.
* @throws MPIException Signals that an MPI exception of some sort has occurred.
*/
public void lockAll(int assertion) throws MPIException
{
@ -748,7 +748,7 @@ public final class Win implements Freeable
/**
* Java binding of the MPI operation {@code MPI_WIN_UNLOCK_ALL}.
* @throws MPIException Signals that an MPI exception of some sort has occurred.
* @throws MPIException Signals that an MPI exception of some sort has occurred.
*/
public void unlockAll() throws MPIException
{
@ -760,7 +760,7 @@ public final class Win implements Freeable
/**
* Java binding of the MPI operation {@code MPI_WIN_SYNC}.
* @throws MPIException Signals that an MPI exception of some sort has occurred.
* @throws MPIException Signals that an MPI exception of some sort has occurred.
*/
public void sync() throws MPIException
{
@ -773,7 +773,7 @@ public final class Win implements Freeable
/**
* Java binding of the MPI operation {@code MPI_WIN_FLUSH}.
* @param targetRank rank of target window
* @throws MPIException Signals that an MPI exception of some sort has occurred.
* @throws MPIException Signals that an MPI exception of some sort has occurred.
*/
public void flush(int targetRank) throws MPIException
{
@ -785,7 +785,7 @@ public final class Win implements Freeable
/**
* Java binding of the MPI operation {@code MPI_WIN_FLUSH_ALL}.
* @throws MPIException Signals that an MPI exception of some sort has occurred.
* @throws MPIException Signals that an MPI exception of some sort has occurred.
*/
public void flushAll() throws MPIException
{
@ -834,7 +834,7 @@ public final class Win implements Freeable
* @throws MPIException Signals that an MPI exception of some sort has occurred.
*/
public void fetchAndOp(Buffer origin, Buffer resultAddr, Datatype dataType,
public void fetchAndOp(Buffer origin, Buffer resultAddr, Datatype dataType,
int targetRank, int targetDisp, Op op)
throws MPIException
{
@ -843,18 +843,18 @@ public final class Win implements Freeable
if(!origin.isDirect())
throw new IllegalArgumentException("The origin must be direct buffer.");
fetchAndOp(handle, origin, resultAddr, dataType.handle, targetRank,
fetchAndOp(handle, origin, resultAddr, dataType.handle, targetRank,
targetDisp, op, op.handle, getBaseType(dataType, dataType));
}
private native void fetchAndOp(
long win, Buffer origin, Buffer resultAddr, long targetType, int targetRank,
long win, Buffer origin, Buffer resultAddr, long targetType, int targetRank,
int targetDisp, Op jOp, long hOp, int baseType) throws MPIException;
/**
* Java binding of the MPI operation {@code MPI_WIN_FLUSH_LOCAL}.
* @param targetRank rank of target window
* @throws MPIException Signals that an MPI exception of some sort has occurred.
* @throws MPIException Signals that an MPI exception of some sort has occurred.
*/
public void flushLocal(int targetRank) throws MPIException
@ -867,7 +867,7 @@ public final class Win implements Freeable
/**
* Java binding of the MPI operation {@code MPI_WIN_FLUSH_LOCAL_ALL}.
* @throws MPIException Signals that an MPI exception of some sort has occurred.
* @throws MPIException Signals that an MPI exception of some sort has occurred.
*/
public void flushLocalAll() throws MPIException

Просмотреть файл

@ -42,10 +42,10 @@ Size of window in bytes (nonnegative integer).
.ft R
.TP 1i
win
Window object returned by the call (handle).
Window object returned by the call (handle).
.TP 1i
IERROR
Fortran only: Error status (integer).
Fortran only: Error status (integer).
.SH DESCRIPTION
.ft R
@ -57,14 +57,14 @@ If the \fIbase\fP value used by MPI_Win_create was allocated by MPI_Alloc_mem, t
.SH NOTES
Use memory allocated by MPI_Alloc_mem to guarantee properly aligned window boundaries (such as word, double-word, cache line, page frame, and so on).
.sp
.SH ERRORS
Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument.
.sp
Before the error value is returned, the current MPI error handler is
called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error.
called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error.

Просмотреть файл

@ -34,10 +34,10 @@ Communicator (handle).
.ft R
.TP 1i
win
Window object returned by the call (handle).
Window object returned by the call (handle).
.TP 1i
IERROR
Fortran only: Error status (integer).
Fortran only: Error status (integer).
.SH DESCRIPTION
.ft R
@ -46,12 +46,12 @@ MPI_Win_create_dynamic is a one-sided MPI communication collective call executed
The following info keys are supported:
.ft R
.TP 1i
no_locks
no_locks
If set to \fItrue\fP, then the implementation may assume that the local
window is never locked (by a call to MPI_Win_lock or
MPI_Win_lock_all). Setting this value if only active synchronization
may allow the implementation to enable certain optimizations.
.sp
.sp
.TP 1i
accumulate_ordering
By default, accumulate operations from one initiator to one target on
@ -62,7 +62,7 @@ required orderings consisting of \fIrar\fP, \fIwar\fP, \fIraw\fP, and \fIwaw\fP
read-after-read, write-after-read, read-after-write, and
write-after-write, respectively. Looser ordering constraints are
likely to result in improved performance.
.sp
.sp
.TP 1i
accumulate_ops
If set to \fIsame_op\fP, the implementation will assume that all concurrent
@ -75,7 +75,7 @@ will use the same operation or MPI_NO_OP. The default is \fIsame_op_no_op\fP.
Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument.
.sp
Before the error value is returned, the current MPI error handler is
called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error.
called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error.

Просмотреть файл

@ -34,16 +34,16 @@ info_used
New info object returned with all active hints on this window.
.TP 1i
IERROR
Fortran only: Error status (integer).
Fortran only: Error status (integer).
.
.SH DESCRIPTION
.ft R
MPI_Win_get_info returns a new info object containing the hints of
the window associated with
.IR win .
the window associated with
.IR win .
The current setting of all hints actually used by the system related
to this window is returned in
.IR info_used .
to this window is returned in
.IR info_used .
If no such hints exist, a handle to a newly created info object is
returned that contains no key/value pair. The user is responsible for
freeing info_used via MPI_Info_free.

Просмотреть файл

@ -34,17 +34,17 @@ Info object containing hints to be set on
.SH OUTPUT PARAMETERS
.TP 1i
IERROR
Fortran only: Error status (integer).
Fortran only: Error status (integer).
.
.SH DESCRIPTION
.ft R
MPI_WIN_SET_INFO sets new values for the hints of the window
associated with
associated with
.IR win.
MPI_WIN_SET_INFO is a collective routine. The info object may be
different on each process, but any info entries that an implementation
requires to be the same on all processes must appear with the same
value in each process's
value in each process's
.I info
object.
.

Просмотреть файл

@ -313,7 +313,7 @@ static int get_rsrc_exists(char str[OMPI_AFFINITY_STRING_MAX])
}
}
}
}
}
return OMPI_SUCCESS;
}

Просмотреть файл

@ -5,7 +5,7 @@ $COPYRIGHT$
Rolf vandeVaart
This extension provides a macro for compile time check of CUDA aware support.
This extension provides a macro for compile time check of CUDA aware support.
It also provides a function for runtime check of CUDA aware support.
See MPIX_Query_cuda_support(3) for more details.

Просмотреть файл

@ -349,7 +349,7 @@ of the source tree.
#include <stdio.h>
#include <string.h>
static void print_children(hwloc_topology_t topology, hwloc_obj_t obj,
static void print_children(hwloc_topology_t topology, hwloc_obj_t obj,
int depth)
{
char type[32], attr[1024];
@ -384,7 +384,7 @@ int main(void)
hwloc_topology_init(&topology);
/* ... Optionally, put detection configuration here to ignore
some objects types, define a synthetic topology, etc....
some objects types, define a synthetic topology, etc....
The default is to detect all the objects of the machine that
the caller is allowed to access. See Configure Topology
@ -404,7 +404,7 @@ int main(void)
*****************************************************************/
for (depth = 0; depth < topodepth; depth++) {
printf("*** Objects at level %d\n", depth);
for (i = 0; i < hwloc_get_nbobjs_by_depth(topology, depth);
for (i = 0; i < hwloc_get_nbobjs_by_depth(topology, depth);
i++) {
hwloc_obj_type_snprintf(string, sizeof(string),
hwloc_get_obj_by_depth
@ -446,7 +446,7 @@ int main(void)
levels++;
size += obj->attr->cache.size;
}
printf("*** Logical processor 0 has %d caches totaling %luKB\n",
printf("*** Logical processor 0 has %d caches totaling %luKB\n",
levels, size / 1024);
/*****************************************************************

Просмотреть файл

@ -666,7 +666,7 @@ static void component_shutdown(void)
/* because the listeners are in a separate
* async thread for apps, we can't just release them here.
* Instead, we push it into that event thread and release
* Instead, we push it into that event thread and release
* them there */
if (ORTE_PROC_IS_APP) {
opal_event_t ev;

Просмотреть файл

@ -83,7 +83,7 @@ static int allocate(orte_job_t *jdata, opal_list_t *nodes)
opal_argv_append_nosize(&max_slot_cnt, tmp);
}
}
#if OPAL_HAVE_HWLOC
if (NULL != mca_ras_simulator_component.topofiles) {
files = opal_argv_split(mca_ras_simulator_component.topofiles, ',');

Просмотреть файл

@ -726,7 +726,7 @@ int main(int argc, char *argv[])
while (mywait) {
opal_event_loop(orte_event_base, OPAL_EVLOOP_ONCE);
}
DONE:
/* cleanup and leave */
orte_finalize();