1
1

Merge remote-tracking branch 'upstream/master'

Этот коммит содержится в:
annu13 2015-10-06 06:03:37 -07:00
родитель 6f37c0e3e8 8af80cd02c
Коммит 4c371ea6d7
19 изменённых файлов: 149 добавлений и 81 удалений

48
NEWS
Просмотреть файл

@ -67,6 +67,54 @@ Master (not on release branches yet)
Please consider Score-P (score-p.org) as an external replacement.
1.10.1
------
- Fix segv when invoking non-blocking reductions with a user-defined
operation. Thanks to Rupert Nash and Georg Geiser for identifying
the issue.
- No longer probe for PCI topology on Solaris (unless running as root).
- Fix for Intel Parallel Studio 2016 ifort partial support of the
!GCC$ pragma. Thanks to Fabrice Roy for reporting the problem.
- Bunches of Coverity / static analysis fixes.
- Fixed ROMIO to look for lstat in <sys/stat.h>. Thanks to William
Throwe for submitting the patch both upstream and to Open MPI.
- Fixed minor memory leak when attempting to open plugins.
- Fixed type in MPI_IBARRIER C prototype. Thanks to Harald Servat for
reporting the issue.
- Add missing man pages for MPI_WIN_CREATE_DYNAMIC, MPI_WIN_ATTACH,
MPI_WIN_DETACH, MPI_WIN_ALLOCATE, MPI_WIN_ALLOCATE_SHARED.
- When mpirun-launching new applications, only close file descriptors
that are actually open (resulting in a faster launch in some
environments).
- Fix "test ==" issues in Open MPI's configure script. Thank to Kevin
Buckley for pointing out the issue.
- Fix performance issue in usnic BTL: ensure progress thread is
throttled back to not aggressively steal CPU cycles.
- Fix cache line size detection on POWER architectures.
- Add missing #include in a few places. Thanks to Orion Poplawski for
supplying the patch.
- When OpenSHMEM building is disabled, no longer install its header
files, help files, or man pages.
- Fix mpi_f08 implementations of MPI_COMM_SET_INFO, and profiling
versions of MPI_BUFFER_DETACH, MPI_WIN_ALLOCATE,
MPI_WIN_ALLOCATE_SHARED, MPI_WTICK, and MPI_WTIME.
- Add orte_rmaps_dist_device MCA param, allowing users to map near a
specific device.
- Various updates/fixes to the openib BTL.
- Add missing defaults for the Mellanox ConnectX 3 card to the openib BTL.
- Minor bug fixes in the OFI MTL.
- Various updates to Mellanox's hcoll and FCA components.
- Add OpenSHMEM man pages. Thanks to Tony Curtis for sharing the man
pages files from openshmem.org.
- Add missing "const" attributes to MPI_COMPARE_AND_SWAP,
MPI_FETCH_AND_OP, MPI_RACCUMULATE, and MPI_WIN_DETACH prototypes.
Thanks to Michael Knobloch and Takahiro Kawashima for bringing this
to our attention.
- Fix linking issues on some platforms (e.g., SLES 12).
- Fix hang on some corner cases when MPI applications abort.
1.10.0
------
** NOTE: The v1.10.0 release marks the transition to Open MPI's new

Просмотреть файл

@ -374,6 +374,12 @@ AC_DEFUN([OMPI_SETUP_MPI_FORTRAN],[
OMPI_FORTRAN_F08_TYPE=$OMPI_FORTRAN_IGNORE_TKR_TYPE
])
# F08 bindings require the usempi PMPI profiling bindings
AS_IF([test "$WANT_MPI_PROFILING" -eq 0],
[OMPI_TRY_FORTRAN_BINDINGS=$OMPI_FORTRAN_USEMPI_BINDINGS
OMPI_BUILD_FORTRAN_BINDINGS=$OMPI_FORTRAN_USEMPI_BINDINGS
AC_MSG_WARN([PMPI is not built, cannot build usempif08 bindings])])
# The overall "_BIND_C" variable will be set to 1 if we have all
# the necessary forms of BIND(C)
OMPI_FORTRAN_HAVE_BIND_C=0

Просмотреть файл

@ -104,7 +104,7 @@ AC_DEFUN([OPAL_LOAD_PLATFORM], [
if test -d "${patch_dir}"; then
if test ! -f "${patch_done}"; then
OPAL_LOG_MSG([Checking patches from ${patch_dir}/ directory ])
AC_MSG_NOTICE([Checking patches from ${patch_dir}/ directory ])
for one_patch in $patch_dir/*.patch ; do
AC_MSG_CHECKING([patch: $one_patch for errors ])
@ -129,7 +129,7 @@ AC_DEFUN([OPAL_LOAD_PLATFORM], [
for one_patch in $patch_dir/*.patch ; do
OPAL_LOG_MSG([Applying patch ${one_patch}])
AC_MSG_NOTICE([Applying patch ${one_patch}])
patch -d ${srcdir} -p1 -t -s < ${one_patch}
if test "$?" != "0"; then
AC_MSG_ERROR([Failed to apply patch ${one_patch}])
@ -161,7 +161,7 @@ AC_DEFUN([OPAL_LOAD_PLATFORM], [
AC_MSG_WARN([Platform patches already applied, skipping. ${patch_done} can be removed to re-apply ])
fi
else
OPAL_LOG_MSG([No platform patches in ${patch_dir}])
AC_MSG_NOTICE([No platform patches in ${patch_dir}])
fi
else

Просмотреть файл

@ -153,6 +153,10 @@ int mca_coll_fca_barrier(struct ompi_communicator_t *comm,
int ret;
FCA_VERBOSE(5,"Using FCA Barrier");
if (OPAL_UNLIKELY(ompi_mpi_finalize_started)) {
FCA_VERBOSE(5, "In finalize, reverting to previous barrier");
goto orig_barrier;
}
ret = fca_do_barrier(fca_module->fca_comm);
if (ret < 0) {
if (ret == -EUSEMPI) {

Просмотреть файл

@ -3,6 +3,8 @@
* Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2015 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -93,8 +95,6 @@ int ompi_coll_tuned_allgather_intra_dec_dynamic(ALLGATHER_ARGS);
int ompi_coll_tuned_allgather_intra_do_forced(ALLGATHER_ARGS);
int ompi_coll_tuned_allgather_intra_do_this(ALLGATHER_ARGS, int algorithm, int faninout, int segsize);
int ompi_coll_tuned_allgather_intra_check_forced_init(coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_allgather_inter_dec_fixed(ALLGATHER_ARGS);
int ompi_coll_tuned_allgather_inter_dec_dynamic(ALLGATHER_ARGS);
/* All GatherV */
int ompi_coll_tuned_allgatherv_intra_dec_fixed(ALLGATHERV_ARGS);
@ -102,8 +102,6 @@ int ompi_coll_tuned_allgatherv_intra_dec_dynamic(ALLGATHERV_ARGS);
int ompi_coll_tuned_allgatherv_intra_do_forced(ALLGATHERV_ARGS);
int ompi_coll_tuned_allgatherv_intra_do_this(ALLGATHERV_ARGS, int algorithm, int faninout, int segsize);
int ompi_coll_tuned_allgatherv_intra_check_forced_init(coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_allgatherv_inter_dec_fixed(ALLGATHERV_ARGS);
int ompi_coll_tuned_allgatherv_inter_dec_dynamic(ALLGATHERV_ARGS);
/* All Reduce */
int ompi_coll_tuned_allreduce_intra_dec_fixed(ALLREDUCE_ARGS);
@ -111,8 +109,6 @@ int ompi_coll_tuned_allreduce_intra_dec_dynamic(ALLREDUCE_ARGS);
int ompi_coll_tuned_allreduce_intra_do_forced(ALLREDUCE_ARGS);
int ompi_coll_tuned_allreduce_intra_do_this(ALLREDUCE_ARGS, int algorithm, int faninout, int segsize);
int ompi_coll_tuned_allreduce_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_allreduce_inter_dec_fixed(ALLREDUCE_ARGS);
int ompi_coll_tuned_allreduce_inter_dec_dynamic(ALLREDUCE_ARGS);
/* AlltoAll */
int ompi_coll_tuned_alltoall_intra_dec_fixed(ALLTOALL_ARGS);
@ -120,8 +116,6 @@ int ompi_coll_tuned_alltoall_intra_dec_dynamic(ALLTOALL_ARGS);
int ompi_coll_tuned_alltoall_intra_do_forced(ALLTOALL_ARGS);
int ompi_coll_tuned_alltoall_intra_do_this(ALLTOALL_ARGS, int algorithm, int faninout, int segsize, int max_requests);
int ompi_coll_tuned_alltoall_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_alltoall_inter_dec_fixed(ALLTOALL_ARGS);
int ompi_coll_tuned_alltoall_inter_dec_dynamic(ALLTOALL_ARGS);
/* AlltoAllV */
int ompi_coll_tuned_alltoallv_intra_dec_fixed(ALLTOALLV_ARGS);
@ -129,14 +123,6 @@ int ompi_coll_tuned_alltoallv_intra_dec_dynamic(ALLTOALLV_ARGS);
int ompi_coll_tuned_alltoallv_intra_do_forced(ALLTOALLV_ARGS);
int ompi_coll_tuned_alltoallv_intra_do_this(ALLTOALLV_ARGS, int algorithm);
int ompi_coll_tuned_alltoallv_intra_check_forced_init(coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_alltoallv_inter_dec_fixed(ALLTOALLV_ARGS);
int ompi_coll_tuned_alltoallv_inter_dec_dynamic(ALLTOALLV_ARGS);
/* AlltoAllW */
int ompi_coll_tuned_alltoallw_intra_dec_fixed(ALLTOALLW_ARGS);
int ompi_coll_tuned_alltoallw_intra_dec_dynamic(ALLTOALLW_ARGS);
int ompi_coll_tuned_alltoallw_inter_dec_fixed(ALLTOALLW_ARGS);
int ompi_coll_tuned_alltoallw_inter_dec_dynamic(ALLTOALLW_ARGS);
/* Barrier */
int ompi_coll_tuned_barrier_intra_dec_fixed(BARRIER_ARGS);
@ -144,24 +130,13 @@ int ompi_coll_tuned_barrier_intra_dec_dynamic(BARRIER_ARGS);
int ompi_coll_tuned_barrier_intra_do_forced(BARRIER_ARGS);
int ompi_coll_tuned_barrier_intra_do_this(BARRIER_ARGS, int algorithm, int faninout, int segsize);
int ompi_coll_tuned_barrier_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_barrier_inter_dec_fixed(BARRIER_ARGS);
int ompi_coll_tuned_barrier_inter_dec_dynamic(BARRIER_ARGS);
/* Bcast */
int ompi_coll_tuned_bcast_intra_generic( BCAST_ARGS, uint32_t count_by_segment, ompi_coll_tree_t* tree );
int ompi_coll_tuned_bcast_intra_dec_fixed(BCAST_ARGS);
int ompi_coll_tuned_bcast_intra_dec_dynamic(BCAST_ARGS);
int ompi_coll_tuned_bcast_intra_do_forced(BCAST_ARGS);
int ompi_coll_tuned_bcast_intra_do_this(BCAST_ARGS, int algorithm, int faninout, int segsize);
int ompi_coll_tuned_bcast_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_bcast_inter_dec_fixed(BCAST_ARGS);
int ompi_coll_tuned_bcast_inter_dec_dynamic(BCAST_ARGS);
/* Exscan */
int ompi_coll_tuned_exscan_intra_dec_fixed(EXSCAN_ARGS);
int ompi_coll_tuned_exscan_intra_dec_dynamic(EXSCAN_ARGS);
int ompi_coll_tuned_exscan_inter_dec_fixed(EXSCAN_ARGS);
int ompi_coll_tuned_exscan_inter_dec_dynamic(EXSCAN_ARGS);
/* Gather */
int ompi_coll_tuned_gather_intra_dec_fixed(GATHER_ARGS);
@ -169,24 +144,13 @@ int ompi_coll_tuned_gather_intra_dec_dynamic(GATHER_ARGS);
int ompi_coll_tuned_gather_intra_do_forced(GATHER_ARGS);
int ompi_coll_tuned_gather_intra_do_this(GATHER_ARGS, int algorithm, int faninout, int segsize);
int ompi_coll_tuned_gather_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_gather_inter_dec_fixed(GATHER_ARGS);
int ompi_coll_tuned_gather_inter_dec_dynamic(GATHER_ARGS);
/* GatherV */
int ompi_coll_tuned_gatherv_intra_dec_fixed(GATHERV_ARGS);
int ompi_coll_tuned_gatherv_intra_dec_dynamic(GATHER_ARGS);
int ompi_coll_tuned_gatherv_inter_dec_fixed(GATHER_ARGS);
int ompi_coll_tuned_gatherv_inter_dec_dynamic(GATHER_ARGS);
/* Reduce */
int ompi_coll_tuned_reduce_generic( REDUCE_ARGS, ompi_coll_tree_t* tree, int count_by_segment, int max_outstanding_reqs );
int ompi_coll_tuned_reduce_intra_dec_fixed(REDUCE_ARGS);
int ompi_coll_tuned_reduce_intra_dec_dynamic(REDUCE_ARGS);
int ompi_coll_tuned_reduce_intra_do_forced(REDUCE_ARGS);
int ompi_coll_tuned_reduce_intra_do_this(REDUCE_ARGS, int algorithm, int faninout, int segsize, int max_oustanding_reqs);
int ompi_coll_tuned_reduce_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_reduce_inter_dec_fixed(REDUCE_ARGS);
int ompi_coll_tuned_reduce_inter_dec_dynamic(REDUCE_ARGS);
/* Reduce_scatter */
int ompi_coll_tuned_reduce_scatter_intra_dec_fixed(REDUCESCATTER_ARGS);
@ -194,14 +158,6 @@ int ompi_coll_tuned_reduce_scatter_intra_dec_dynamic(REDUCESCATTER_ARGS);
int ompi_coll_tuned_reduce_scatter_intra_do_forced(REDUCESCATTER_ARGS);
int ompi_coll_tuned_reduce_scatter_intra_do_this(REDUCESCATTER_ARGS, int algorithm, int faninout, int segsize);
int ompi_coll_tuned_reduce_scatter_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_reduce_scatter_inter_dec_fixed(REDUCESCATTER_ARGS);
int ompi_coll_tuned_reduce_scatter_inter_dec_dynamic(REDUCESCATTER_ARGS);
/* Scan */
int ompi_coll_tuned_scan_intra_dec_fixed(SCAN_ARGS);
int ompi_coll_tuned_scan_intra_dec_dynamic(SCAN_ARGS);
int ompi_coll_tuned_scan_inter_dec_fixed(SCAN_ARGS);
int ompi_coll_tuned_scan_inter_dec_dynamic(SCAN_ARGS);
/* Scatter */
int ompi_coll_tuned_scatter_intra_dec_fixed(SCATTER_ARGS);
@ -209,14 +165,6 @@ int ompi_coll_tuned_scatter_intra_dec_dynamic(SCATTER_ARGS);
int ompi_coll_tuned_scatter_intra_do_forced(SCATTER_ARGS);
int ompi_coll_tuned_scatter_intra_do_this(SCATTER_ARGS, int algorithm, int faninout, int segsize);
int ompi_coll_tuned_scatter_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_scatter_inter_dec_fixed(SCATTER_ARGS);
int ompi_coll_tuned_scatter_inter_dec_dynamic(SCATTER_ARGS);
/* ScatterV */
int ompi_coll_tuned_scatterv_intra_dec_fixed(SCATTERV_ARGS);
int ompi_coll_tuned_scatterv_intra_dec_dynamic(SCATTERV_ARGS);
int ompi_coll_tuned_scatterv_inter_dec_fixed(SCATTERV_ARGS);
int ompi_coll_tuned_scatterv_inter_dec_dynamic(SCATTERV_ARGS);
int mca_coll_tuned_ft_event(int state);

Просмотреть файл

@ -500,7 +500,7 @@ exit:
if (flat_buf->indices != NULL){
free (flat_buf->indices);
}
flat_buf = NULL;
free (flat_buf);
}
free (start_offsets);

Просмотреть файл

@ -274,7 +274,6 @@ mca_fcoll_two_phase_file_write_all (mca_io_ompio_file_t *fh,
total_bytes = (size_t) long_total_bytes;
if ( 0 == total_bytes ) {
free(aggregator_list);
ret = OMPI_SUCCESS;
goto exit;
}

Просмотреть файл

@ -9,7 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2013 University of Houston. All rights reserved.
* Copyright (c) 2015 University of Houston. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -56,7 +56,7 @@ int mca_sharedfp_addproc_seek (mca_io_ompio_file_t *fh,
int mca_sharedfp_addproc_get_position (mca_io_ompio_file_t *fh,
OMPI_MPI_OFFSET_TYPE * offset);
int mca_sharedfp_addproc_file_open (struct ompi_communicator_t *comm,
char* filename,
const char* filename,
int amode,
struct ompi_info_t *info,
mca_io_ompio_file_t *fh);
@ -80,24 +80,24 @@ int mca_sharedfp_addproc_iread (mca_io_ompio_file_t *fh,
struct ompi_datatype_t *datatype,
ompi_request_t **request);
int mca_sharedfp_addproc_write (mca_io_ompio_file_t *fh,
void *buf,
const void *buf,
int count,
struct ompi_datatype_t *datatype,
ompi_status_public_t *status);
int mca_sharedfp_addproc_write_ordered (mca_io_ompio_file_t *fh,
void *buf,
const void *buf,
int count,
struct ompi_datatype_t *datatype,
ompi_status_public_t *status);
int mca_sharedfp_addproc_write_ordered_begin (mca_io_ompio_file_t *fh,
void *buf,
const void *buf,
int count,
struct ompi_datatype_t *datatype);
int mca_sharedfp_addproc_write_ordered_end (mca_io_ompio_file_t *fh,
void *buf,
const void *buf,
ompi_status_public_t *status);
int mca_sharedfp_addproc_iwrite (mca_io_ompio_file_t *fh,
void *buf,
const void *buf,
int count,
struct ompi_datatype_t *datatype,
ompi_request_t **request);

Просмотреть файл

@ -32,7 +32,7 @@
int mca_sharedfp_addproc_file_open (struct ompi_communicator_t *comm,
char* filename,
const char* filename,
int amode,
struct ompi_info_t *info,
mca_io_ompio_file_t *fh)

Просмотреть файл

@ -27,7 +27,7 @@
#include "ompi/mca/sharedfp/base/base.h"
int mca_sharedfp_addproc_iwrite(mca_io_ompio_file_t *fh,
void *buf,
const void *buf,
int count,
ompi_datatype_t *datatype,
MPI_Request * request)
@ -71,7 +71,7 @@ int mca_sharedfp_addproc_iwrite(mca_io_ompio_file_t *fh,
}
int mca_sharedfp_addproc_write_ordered_begin(mca_io_ompio_file_t *fh,
void *buf,
const void *buf,
int count,
struct ompi_datatype_t *datatype)
{
@ -188,7 +188,7 @@ exit:
int mca_sharedfp_addproc_write_ordered_end(mca_io_ompio_file_t *fh,
void *buf,
const void *buf,
ompi_status_public_t *status)
{
int ret = OMPI_SUCCESS;

Просмотреть файл

@ -27,7 +27,7 @@
#include "ompi/mca/sharedfp/base/base.h"
int mca_sharedfp_addproc_write (mca_io_ompio_file_t *fh,
void *buf,
const void *buf,
int count,
struct ompi_datatype_t *datatype,
ompi_status_public_t *status)
@ -72,7 +72,7 @@ int mca_sharedfp_addproc_write (mca_io_ompio_file_t *fh,
}
int mca_sharedfp_addproc_write_ordered (mca_io_ompio_file_t *fh,
void *buf,
const void *buf,
int count,
struct ompi_datatype_t *datatype,
ompi_status_public_t *status)

Просмотреть файл

@ -1,4 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; -*- */
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
* University Research and Technology
@ -13,6 +13,8 @@
* Copyright (c) 2008 UT-Battelle, LLC
* Copyright (c) 2008-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2009 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -594,6 +596,13 @@ static inline void ompi_op_reduce(ompi_op_t * op, void *source,
return;
}
static inline void ompi_3buff_op_user (ompi_op_t *op, void * restrict source1, void * restrict source2,
void * restrict result, int count, struct ompi_datatype_t *dtype)
{
ompi_datatype_copy_content_same_ddt (dtype, count, result, source1);
op->o_func.c_fn (source2, result, &count, &dtype);
}
/**
* Perform a reduction operation.
*
@ -628,10 +637,14 @@ static inline void ompi_3buff_op_reduce(ompi_op_t * op, void *source1,
src2 = source2;
tgt = target;
op->o_3buff_intrinsic.fns[ompi_op_ddt_map[dtype->id]](src1, src2,
tgt, &count,
&dtype,
op->o_3buff_intrinsic.modules[ompi_op_ddt_map[dtype->id]]);
if (OPAL_LIKELY(ompi_op_is_intrinsic (op))) {
op->o_3buff_intrinsic.fns[ompi_op_ddt_map[dtype->id]](src1, src2,
tgt, &count,
&dtype,
op->o_3buff_intrinsic.modules[ompi_op_ddt_map[dtype->id]]);
} else {
ompi_3buff_op_user (op, src1, src2, tgt, count, dtype);
}
}
END_C_DECLS

Просмотреть файл

@ -55,6 +55,8 @@ OMPI_DECLSPEC extern bool ompi_mpi_initialized;
OMPI_DECLSPEC extern bool ompi_mpi_finalized;
/** Has the RTE been initialized? */
OMPI_DECLSPEC extern bool ompi_rte_initialized;
/** Did mpi start to finalize? */
OMPI_DECLSPEC extern int32_t ompi_mpi_finalize_started;
/** Do we have multiple threads? */
OMPI_DECLSPEC extern bool ompi_mpi_thread_multiple;

Просмотреть файл

@ -93,7 +93,6 @@ extern bool ompi_enable_timing_ext;
int ompi_mpi_finalize(void)
{
int ret;
static int32_t finalize_has_already_started = 0;
opal_list_item_t *item;
ompi_proc_t** procs;
size_t nprocs;
@ -106,7 +105,7 @@ int ompi_mpi_finalize(void)
ompi_comm_free() (or run into other nasty lions, tigers, or
bears) */
if (! opal_atomic_cmpset_32(&finalize_has_already_started, 0, 1)) {
if (! opal_atomic_cmpset_32(&ompi_mpi_finalize_started, 0, 1)) {
/* Note that if we're already finalized, we cannot raise an
MPI exception. The best that we can do is write something
to stderr. */

Просмотреть файл

@ -128,6 +128,7 @@ bool ompi_mpi_init_started = false;
bool ompi_mpi_initialized = false;
bool ompi_mpi_finalized = false;
bool ompi_rte_initialized = false;
int32_t ompi_mpi_finalize_started = false;
bool ompi_mpi_thread_multiple = false;
int ompi_mpi_thread_requested = MPI_THREAD_SINGLE;

Просмотреть файл

@ -1811,7 +1811,7 @@ static int mca_common_cuda_is_gpu_buffer(const void *pUserBuf, opal_convertor_t
#if !OPAL_CUDA_GET_ATTRIBUTES
res = cuFunc.cuPointerGetAttribute(&memCtx,
CU_POINTER_ATTRIBUTE_CONTEXT, dbuf);
if (OPAL_UNLIKELEY(res != CUDA_SUCCESS)) {
if (OPAL_UNLIKELY(res != CUDA_SUCCESS)) {
opal_output(0, "CUDA: error calling cuPointerGetAttribute: "
"res=%d, ptr=%p aborting...", res, pUserBuf);
return OPAL_ERROR;

Просмотреть файл

@ -182,6 +182,9 @@ void mca_mpool_base_tree_print(int show_up_to_mem_leaks)
num_leaks = 0;
max_mem_leaks = show_up_to_mem_leaks;
opal_rb_tree_traverse(&mca_mpool_base_tree, condition, action);
if (0 == num_leaks) {
return;
}
if (num_leaks <= show_up_to_mem_leaks ||
show_up_to_mem_leaks < 0) {

Просмотреть файл

@ -13,10 +13,12 @@
.SH NAME
.
orterun, mpirun, mpiexec \- Execute serial and parallel jobs in Open MPI.
oshrun, shmemrun \- Execute serial and parallel jobs in Open SHMEM.
.B Note:
\fImpirun\fP, \fImpiexec\fP, and \fIorterun\fP are all synonyms for each
other. Using any of the names will produce the same behavior.
other as well as \fIoshrun\fP, \fIshmemrun\fP in case Open SHMEM is installed.
Using any of the names will produce the same behavior.
.
.\" **************************
.\" Synopsis Section

Просмотреть файл

@ -10,7 +10,13 @@
#
# $HEADER$
include $(top_srcdir)/Makefile.ompi-rules
man_pages = oshcc.1 shmemcc.1 oshfort.1 shmemfort.1 oshrun.1 shmemrun.1
if PROJECT_OSHMEM
man_MANS = $(man_pages)
nodist_oshmemdata_DATA = \
shmemcc-wrapper-data.txt \
shmemfort-wrapper-data.txt
@ -49,4 +55,41 @@ uninstall-local:
$(DESTDIR)$(bindir)/oshjavac \
$(DESTDIR)$(bindir)/shmemjavac
########################################################
#
# Man page generation / handling
#
########################################################
distclean-local:
rm -f $(man_pages)
$(top_builddir)/opal/tools/wrappers/generic_wrapper.1:
(cd $(top_builddir)/opal/tools/wrappers && $(MAKE) $(AM_MAKEFLAGS) generic_wrapper.1)
oshcc.1: $(top_builddir)/opal/tools/wrappers/generic_wrapper.1
rm -f oshcc.1
sed -e 's/#COMMAND#/oshcc/g' -e 's/#PROJECT#/Open SHMEM/g' -e 's/#PROJECT_SHORT#/OSHMEM/g' -e 's/#LANGUAGE#/C/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > oshcc.1
shmemcc.1: $(top_builddir)/opal/tools/wrappers/generic_wrapper.1
rm -f shmemcc.1
sed -e 's/#COMMAND#/shmemcc/g' -e 's/#PROJECT#/Open SHMEM/g' -e 's/#PROJECT_SHORT#/OSHMEM/g' -e 's/#LANGUAGE#/C/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > shmemcc.1
oshfort.1: $(top_builddir)/opal/tools/wrappers/generic_wrapper.1
rm -f oshfort.1
sed -e 's/#COMMAND#/oshfort/g' -e 's/#PROJECT#/Open SHMEM/g' -e 's/#PROJECT_SHORT#/OSHMEM/g' -e 's/#LANGUAGE#/Fortran/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > oshfort.1
shmemfort.1: $(top_builddir)/opal/tools/wrappers/generic_wrapper.1
rm -f shmemfort.1
sed -e 's/#COMMAND#/shmemfort/g' -e 's/#PROJECT#/Open SHMEM/g' -e 's/#PROJECT_SHORT#/OSHMEM/g' -e 's/#LANGUAGE#/Fortran/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > shmemfort.1
$(top_builddir)/orte/tools/orterun/orterun.1:
(cd $(top_builddir)/orte/tools/orterun && $(MAKE) $(AM_MAKEFLAGS) orterun.1)
oshrun.1: $(top_builddir)/orte/tools/orterun/orterun.1
cp -f $(top_builddir)/orte/tools/orterun/orterun.1 oshrun.1
shmemrun.1: $(top_builddir)/orte/tools/orterun/orterun.1
cp -f $(top_builddir)/orte/tools/orterun/orterun.1 shmemrun.1
endif # PROJECT_OSHMEM