1
1

Improve checkpoint/restart support by allowing a checkpoint to progress when the process is *not* in the MPI library. This involves creating a separate thread for polling for a checkpoint request. This thread is active when the MPI process is not in the MPI library, and paused when the MPI process is in the library.

Some MPI C interface files saw some spacing changes to conform to the coding standards of Open MPI.

Changed MPI C interface files to use {{{OPAL_CR_ENTER_LIBRARY()}}} and {{{OPAL_CR_EXIT_LIBRARY()}}} instead of just {{{OPAL_CR_TEST_CHECKPOINT_READY()}}}. This will allow the checkpoint/restart system more flexibility in how it is to behave.

Fixed the configure check for {{{--enable-ft-thread}}} so it has a know dependance on {{{--enable-mpi-thread}}} (and/or {{{--enable-progress-thread}}}).

Added a line for Checkpoint/Restart support to {{{ompi_info}}}.

Added some options to choose at runtime whether or not to use the checkpoint polling thread. By default, if the user asked for it to be compiled in, then it is used. But some users will want the ability to toggle its use at runtime.

There are still some places for improvement, but the feature works correctly. As always with Checkpoint/Restart, it is compiled out unless explicitly asked for at configure time. Further, if it was configured in, then it is not used unless explicitly asked for by the user at runtime.

This commit was SVN r17516.
Этот коммит содержится в:
Josh Hursey 2008-02-19 22:15:52 +00:00
родитель b87b15580c
Коммит 99144db970
322 изменённых файлов: 1480 добавлений и 947 удалений

Просмотреть файл

@ -237,5 +237,55 @@ fi
AC_DEFINE_UNQUOTED([OMPI_ENABLE_PROGRESS_THREADS], [$OMPI_ENABLE_PROGRESS_THREADS],
[Whether we should use progress threads rather than polling])
AC_MSG_RESULT([$enable_progress_threads])
#
# Fault Tolerance Thread
#
# --enable-ft-thread
# #if OPAL_ENABLE_FT_THREAD == 0 /* Disabled */
# #if OPAL_ENABLE_FT_THREAD == 1 /* Enabled */
#
AC_MSG_CHECKING([if want fault tolerance thread])
AC_ARG_ENABLE([ft_thread],
[AC_HELP_STRING([--disable-ft-thread],
[Disable fault tolerance thread running inside all processes. Requires progress and/or MPI threads (default: enabled)])],
[enable_ft_thread="$enableval"],
[enable_ft_thread="undef"])
# if they do not want FT support, then they do not want this thread either
if test "$ompi_want_ft" = "0"; then
ompi_want_ft_thread=0
AC_MSG_RESULT([Disabled (fault tolerance disabled --without-ft)])
# if --disable-ft-thread
elif test "$enable_ft_thread" = "no"; then
ompi_want_ft_thread=0
AC_MSG_RESULT([Disabled])
# if default, and no progress or MPI threads
elif test "$enable_ft_thread" = "undef" -a "$enable_progress_threads" = "no" -a "$enable_mpi_threads" = "no" ; then
ompi_want_ft_thread=0
AC_MSG_RESULT([Disabled (Progress and MPI Threads Disabled)])
# if default, and either progress or MPI threads enabled
else
# Default: Enable
# Make sure we have at least Progress Threads or MPI Threads enabled
if test "$enable_progress_threads" = "no" -a "$enable_mpi_threads" = "no"; then
AC_MSG_RESULT([Must enable progress or MPI threads to use this option])
AC_MSG_ERROR([Cannot continue])
else
AC_MSG_RESULT([yes])
ompi_want_ft_thread=1
AC_MSG_WARN([**************************************************])
AC_MSG_WARN([*** Fault Tolerance with a thread in Open MPI *])
AC_MSG_WARN([*** is an experimental, research quality option. *])
AC_MSG_WARN([*** It requires progress or MPI threads, and *])
AC_MSG_WARN([*** care should be used when enabling these *])
AC_MSG_WARN([*** options. *])
AC_MSG_WARN([**************************************************])
fi
fi
AC_DEFINE_UNQUOTED([OPAL_ENABLE_FT_THREAD], [$ompi_want_ft_thread],
[Enable fault tolerance thread in Open PAL])
AM_CONDITIONAL(WANT_FT_THREAD, test "$ompi_want_ft_thread" = "1")
])dnl

Просмотреть файл

@ -627,45 +627,6 @@ AC_DEFINE_UNQUOTED([OPAL_ENABLE_FT_CR], [$ompi_want_ft_cr],
[Enable fault tolerance checkpoint/restart components and logic])
AM_CONDITIONAL(WANT_FT, test "$ompi_want_ft" = "1")
#
# Fault Tolerance Components and Logic
#
# --enable-ft-thread
# #if OPAL_ENABLE_FT_THREAD == 0 /* Disabled */
# #if OPAL_ENABLE_FT_THREAD == 1 /* Enabled */
#
AC_MSG_CHECKING([if want fault tolerance thread])
AC_ARG_ENABLE([ft_thread],
[AC_HELP_STRING([--enable-ft-thread],
[Enable fault tolerance thread running inside all processes. Requires progress threads (default: disabled)])])
if test "$ompi_want_ft" = "0"; then
ompi_want_ft_thread=0
AC_MSG_RESULT([Disabled (fault tolerance disabled --without-ft-style)])
elif test "$enable_ft_thread" = "yes"; then
# This check may not fire since progress threads are checked after this section :/
if test "$OMPI_ENABLE_PROGRESS_THREADS" = "0"; then
AC_MSG_RESULT([Must enable progress threads to use this option])
AC_MSG_ERROR([Cannot continue])
else
AC_MSG_RESULT([yes])
ompi_want_ft_thread=1
AC_MSG_WARN([**************************************************])
AC_MSG_WARN([*** Fault Tolerance with a thread in Open MPI *])
AC_MSG_WARN([*** is an experimental, research quality option. *])
AC_MSG_WARN([*** It requires progress threads to be used, and *])
AC_MSG_WARN([*** care should be used when enabling these *])
AC_MSG_WARN([*** options. *])
AC_MSG_WARN([**************************************************])
fi
else
ompi_want_ft_thread=0
AC_MSG_RESULT([Disabled])
fi
AC_DEFINE_UNQUOTED([OPAL_ENABLE_FT_THREAD], [$ompi_want_ft_thread],
[Enable fault tolerance thread in Open PAL])
AM_CONDITIONAL(WANT_FT_THREAD, test "$ompi_want_ft_thread" = "1")
#
# Do we want to install binaries?
#

Просмотреть файл

@ -12,11 +12,13 @@
# - Select only checkpoint ready components
# - Enable Additional FT infrastructure
# - Auto-select OPAL CRS component
# - If available, use the FT Thread (Default)
#
opal_cr_allow_opal_only=0
mca_base_component_distill_checkpoint_ready=1
ft_cr_enabled=1
crs=
opal_cr_use_thread=1
#
# ORTE Parameters

Просмотреть файл

@ -213,6 +213,7 @@ struct ompi_request_t;
#define OMPI_ERRHANDLER_CHECK(rc, mpi_object, err_code, message) \
if( OPAL_UNLIKELY(rc != OMPI_SUCCESS) ) { \
int __mpi_err_code = (err_code < 0 ? (ompi_errcode_get_mpi_code(err_code)) : err_code); \
OPAL_CR_EXIT_LIBRARY() \
ompi_errhandler_invoke((mpi_object)->error_handler, \
(mpi_object), \
(int) (mpi_object)->errhandler_type, \
@ -237,6 +238,7 @@ struct ompi_request_t;
* MPI_SUCCESS.
*/
#define OMPI_ERRHANDLER_RETURN(rc, mpi_object, err_code, message) \
OPAL_CR_EXIT_LIBRARY() \
if (rc != OMPI_SUCCESS) { \
int __mpi_err_code = (err_code < 0 ? (ompi_errcode_get_mpi_code(err_code)) : err_code); \
ompi_errhandler_invoke((mpi_object)->error_handler, \

Просмотреть файл

@ -40,7 +40,7 @@ int MPI_Abort(MPI_Comm comm, int errorcode)
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_ABORT_LIBRARY();
/* Don't even bother checking comm and errorcode values for
errors */

Просмотреть файл

@ -44,8 +44,6 @@ int MPI_Accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_data
int rc;
ompi_win_t *ompi_win = (ompi_win_t*) win;
OPAL_CR_TEST_CHECKPOINT_READY();
MEMCHECKER(
memchecker_datatype(origin_datatype);
memchecker_datatype(target_datatype);
@ -219,14 +217,17 @@ int MPI_Accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_data
}
}
if (MPI_PROC_NULL == target_rank) return MPI_SUCCESS;
if (MPI_PROC_NULL == target_rank) {
return MPI_SUCCESS;
}
/* Set buffer to be unaccessable before sending it.
* It's set accessable again in file osc_pt2pt_data_move. */
MEMCHECKER (
memchecker_call(&opal_memchecker_base_mem_noaccess, origin_addr, origin_count, origin_datatype);
);
OPAL_CR_ENTER_LIBRARY();
rc = ompi_win->w_osc_module->osc_accumulate(origin_addr,
origin_count,
origin_datatype,

Просмотреть файл

@ -39,7 +39,7 @@ int MPI_Add_error_class(int *errorclass)
int err_class;
int rc;
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -39,7 +39,7 @@ int MPI_Add_error_code(int errorclass, int *errorcode)
int code;
int rc;
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -38,7 +38,7 @@ int MPI_Add_error_string(int errorcode, char *string)
{
int rc;
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -35,7 +35,7 @@ static const char FUNC_NAME[] = "MPI_Address";
int MPI_Address(void *location, MPI_Aint *address)
{
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -40,15 +40,13 @@ int MPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
MPI_Comm comm)
{
int err;
MEMCHECKER(
memchecker_datatype(sendtype);
memchecker_datatype(recvtype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
@ -81,6 +79,8 @@ int MPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_allgather(sendbuf, sendcount, sendtype,

Просмотреть файл

@ -47,8 +47,6 @@ int MPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
memchecker_comm (comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both
@ -60,8 +58,8 @@ int MPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if (MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
if (MPI_IN_PLACE != sendbuf) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
@ -95,6 +93,8 @@ int MPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_allgatherv(sendbuf, sendcount, sendtype,

Просмотреть файл

@ -40,8 +40,6 @@ static const char FUNC_NAME[] = "MPI_Alloc_mem";
int MPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr)
{
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (size < 0 || NULL == baseptr) {
@ -64,15 +62,17 @@ int MPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr)
*((void **) baseptr) = NULL;
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
*((void **) baseptr) = mca_mpool_base_alloc((size_t) size, info);
OPAL_CR_EXIT_LIBRARY();
if (NULL == *((void **) baseptr)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_NO_MEM,
FUNC_NAME);
}
/* All done */
return MPI_SUCCESS;
}

Просмотреть файл

@ -39,14 +39,12 @@ int MPI_Allreduce(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
{
int err;
MEMCHECKER(
memchecker_datatype(datatype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
char *msg;
@ -87,6 +85,8 @@ int MPI_Allreduce(void *sendbuf, void *recvbuf, int count,
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op);

Просмотреть файл

@ -48,8 +48,6 @@ int MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both
@ -78,6 +76,8 @@ int MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_alltoall(sendbuf, sendcount, sendtype,

Просмотреть файл

@ -54,8 +54,6 @@ int MPI_Alltoallv(void *sendbuf, int *sendcounts, int *sdispls,
}
);
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks */
@ -91,6 +89,8 @@ int MPI_Alltoallv(void *sendbuf, int *sendcounts, int *sdispls,
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_alltoallv(sendbuf, sendcounts, sdispls, sendtype,

Просмотреть файл

@ -52,8 +52,6 @@ int MPI_Alltoallw(void *sendbuf, int *sendcounts, int *sdispls,
}
);
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks */
@ -89,6 +87,8 @@ int MPI_Alltoallw(void *sendbuf, int *sendcounts, int *sdispls,
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_alltoallw(sendbuf, sendcounts, sdispls, sendtypes,

Просмотреть файл

@ -42,16 +42,17 @@ int MPI_Attr_delete(MPI_Comm comm, int keyval)
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
ret = ompi_attr_delete(COMM_ATTR, comm, comm->c_keyhash, keyval,
false, true);

Просмотреть файл

@ -40,16 +40,17 @@ int MPI_Attr_get(MPI_Comm comm, int keyval, void *attribute_val, int *flag)
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if ((NULL == attribute_val) || (NULL == flag)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
FUNC_NAME);
}
if ((NULL == attribute_val) || (NULL == flag)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
/* This stuff is very confusing. Be sure to see
src/attribute/attribute.c for a lengthy comment explaining Open
MPI attribute behavior. */

Просмотреть файл

@ -37,12 +37,10 @@ static const char FUNC_NAME[] = "MPI_Attr_put";
int MPI_Attr_put(MPI_Comm comm, int keyval, void *attribute_val)
{
int ret;
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -51,7 +49,9 @@ int MPI_Attr_put(MPI_Comm comm, int keyval, void *attribute_val)
FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
ret = ompi_attr_set_c(COMM_ATTR, comm, &comm->c_keyhash,
keyval, attribute_val, false, true);

Просмотреть файл

@ -40,10 +40,6 @@ int MPI_Barrier(MPI_Comm comm)
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
/* Error checking */
if (MPI_PARAM_CHECK) {
@ -53,6 +49,8 @@ int MPI_Barrier(MPI_Comm comm)
}
}
OPAL_CR_ENTER_LIBRARY();
/* Intracommunicators: Only invoke the back-end coll module barrier
function if there's more than one process in the communicator */

Просмотреть файл

@ -43,7 +43,6 @@ int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype,
memchecker_call(&opal_memchecker_base_isdefined, buffer, count, datatype);
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
@ -86,6 +85,8 @@ int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype,
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_bcast(buffer, count, datatype, root, comm,

Просмотреть файл

@ -25,8 +25,8 @@
#include "ompi/datatype/datatype.h"
#include "ompi/runtime/params.h"
/* This library needs to be here so that we can define
* OPAL_CR_TEST_CHECKPOINT_READY
/* This library needs to be here so that we can define
* the OPAL_CR_* checks
*/
#include "opal/runtime/opal_cr.h"

Просмотреть файл

@ -45,8 +45,6 @@ int MPI_Bsend(void *buf, int count, MPI_Datatype type, int dest, int tag, MPI_Co
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
@ -71,6 +69,8 @@ int MPI_Bsend(void *buf, int count, MPI_Datatype type, int dest, int tag, MPI_Co
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
rc = MCA_PML_CALL(send(buf, count, type, dest, tag, MCA_PML_BASE_SEND_BUFFERED, comm));
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -39,14 +39,12 @@ int MPI_Bsend_init(void *buf, int count, MPI_Datatype type,
int dest, int tag, MPI_Comm comm, MPI_Request *request)
{
int rc;
MEMCHECKER(
memchecker_datatype(type);
memchecker_call(&opal_memchecker_base_isdefined, buf, count, type);
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
@ -81,6 +79,8 @@ int MPI_Bsend_init(void *buf, int count, MPI_Datatype type,
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/*
* Here, we just initialize the request -- memchecker should set the buffer in MPI_Start.
*/

Просмотреть файл

@ -35,8 +35,7 @@ static const char FUNC_NAME[] = "MPI_Buffer_attach";
int MPI_Buffer_attach(void *buffer, int size)
{
OPAL_CR_TEST_CHECKPOINT_READY();
int ret = OMPI_SUCCESS;
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -45,6 +44,10 @@ int MPI_Buffer_attach(void *buffer, int size)
}
}
return mca_pml_base_bsend_attach(buffer, size);
OPAL_CR_ENTER_LIBRARY();
ret = mca_pml_base_bsend_attach(buffer, size);
OPAL_CR_EXIT_LIBRARY();
return ret;
}

Просмотреть файл

@ -35,8 +35,7 @@ static const char FUNC_NAME[] = "MPI_Buffer_detach";
int MPI_Buffer_detach(void *buffer, int *size)
{
OPAL_CR_TEST_CHECKPOINT_READY();
int ret = OMPI_SUCCESS;
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -45,5 +44,9 @@ int MPI_Buffer_detach(void *buffer, int *size)
}
}
return mca_pml_base_bsend_detach(buffer, size);
OPAL_CR_ENTER_LIBRARY();
ret = mca_pml_base_bsend_detach(buffer, size);
OPAL_CR_EXIT_LIBRARY();
return ret;
}

Просмотреть файл

@ -41,12 +41,11 @@ static const char FUNC_NAME[] = "MPI_Cancel";
int MPI_Cancel(MPI_Request *request)
{
int rc;
MEMCHECKER(
memchecker_request(request);
);
OPAL_CR_TEST_CHECKPOINT_READY();
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -59,6 +58,8 @@ int MPI_Cancel(MPI_Request *request)
if (MPI_REQUEST_NULL == *request) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
rc = ompi_request_cancel(*request);
OMPI_ERRHANDLER_RETURN(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
}

Просмотреть файл

@ -39,12 +39,10 @@ int MPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int *coords)
{
int err;
mca_topo_base_module_cart_coords_fn_t func;
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
/* check the arguments */
if (MPI_PARAM_CHECK) {
@ -71,12 +69,15 @@ int MPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int *coords)
}
}
OPAL_CR_ENTER_LIBRARY();
/* get the function pointer on this communicator */
func = comm->c_topo->topo_cart_coords;
/* call the function */
if ( MPI_SUCCESS !=
(err = func(comm, rank, maxdims, coords))) {
err = func(comm, rank, maxdims, coords);
OPAL_CR_EXIT_LIBRARY();
if ( MPI_SUCCESS != err ) {
return OMPI_ERRHANDLER_INVOKE(comm, err, FUNC_NAME);
}

Просмотреть файл

@ -40,12 +40,10 @@ int MPI_Cart_create(MPI_Comm old_comm, int ndims, int *dims,
int err;
bool re_order = false;
MEMCHECKER(
memchecker_comm(old_comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
/* check the arguments */
if (MPI_PARAM_CHECK) {
@ -107,6 +105,8 @@ int MPI_Cart_create(MPI_Comm old_comm, int ndims, int *dims,
}
}
OPAL_CR_ENTER_LIBRARY();
/* everything seems to be alright with the communicator, we can go
* ahead and select a topology module for this purpose and create
* the new cartesian communicator
@ -122,6 +122,7 @@ int MPI_Cart_create(MPI_Comm old_comm, int ndims, int *dims,
comm_cart,
OMPI_COMM_CART);
OPAL_CR_EXIT_LIBRARY();
/* check the error status */
if (MPI_SUCCESS != err) {
return OMPI_ERRHANDLER_INVOKE(old_comm, err, FUNC_NAME);

Просмотреть файл

@ -40,12 +40,10 @@ int MPI_Cart_get(MPI_Comm comm, int maxdims, int *dims,
/* local variables */
mca_topo_base_module_cart_get_fn_t func;
int err;
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
/* check the arguments */
if (MPI_PARAM_CHECK) {
@ -65,12 +63,16 @@ int MPI_Cart_get(MPI_Comm comm, int maxdims, int *dims,
FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
/* get the function pointer to do the right thing */
func = comm->c_topo->topo_cart_get;
/* all arguments are checked and now call the back end function */
if ( MPI_SUCCESS !=
(err = func(comm, maxdims, dims, periods, coords))) {
err = func(comm, maxdims, dims, periods, coords);
OPAL_CR_EXIT_LIBRARY();
if ( MPI_SUCCESS != err ) {
return OMPI_ERRHANDLER_INVOKE(comm, err, FUNC_NAME);
}

Просмотреть файл

@ -39,12 +39,10 @@ int MPI_Cart_map(MPI_Comm comm, int ndims, int *dims,
{
int err;
mca_topo_base_module_cart_map_fn_t func;
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
/* check the arguments */
if (MPI_PARAM_CHECK) {
@ -63,22 +61,26 @@ int MPI_Cart_map(MPI_Comm comm, int ndims, int *dims,
}
}
OPAL_CR_ENTER_LIBRARY();
if(!OMPI_COMM_IS_CART(comm)) {
/* In case the communicator has no topo-module attached to
it, we just return the "default" value suggested by MPI:
newrank = rank */
*newrank = ompi_comm_rank(comm);
/* In case the communicator has no topo-module attached to
it, we just return the "default" value suggested by MPI:
newrank = rank */
*newrank = ompi_comm_rank(comm);
}
else {
/* get the function pointer on this communicator */
func = comm->c_topo->topo_cart_map;
/* get the function pointer on this communicator */
func = comm->c_topo->topo_cart_map;
/* call the function */
if ( MPI_SUCCESS !=
(err = func(comm, ndims, dims, periods, newrank))) {
return OMPI_ERRHANDLER_INVOKE(comm, err, FUNC_NAME);
}
/* call the function */
err = func(comm, ndims, dims, periods, newrank);
if ( MPI_SUCCESS != err ) {
OPAL_CR_EXIT_LIBRARY();
return OMPI_ERRHANDLER_INVOKE(comm, err, FUNC_NAME);
}
}
OPAL_CR_EXIT_LIBRARY();
return MPI_SUCCESS;
}

Просмотреть файл

@ -38,12 +38,10 @@ int MPI_Cart_rank(MPI_Comm comm, int *coords, int *rank)
{
int err;
mca_topo_base_module_cart_rank_fn_t func;
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
/* check the arguments */
if (MPI_PARAM_CHECK) {
@ -66,12 +64,15 @@ int MPI_Cart_rank(MPI_Comm comm, int *coords, int *rank)
}
}
OPAL_CR_ENTER_LIBRARY();
/* get the function pointer on this communicator */
func = comm->c_topo->topo_cart_rank;
/* call the function */
if ( MPI_SUCCESS !=
(err = func(comm, coords, rank))) {
err = func(comm, coords, rank);
OPAL_CR_EXIT_LIBRARY();
if ( MPI_SUCCESS != err ) {
return OMPI_ERRHANDLER_INVOKE(comm, err, FUNC_NAME);
}

Просмотреть файл

@ -39,12 +39,10 @@ int MPI_Cart_shift(MPI_Comm comm, int direction, int disp,
{
int err;
mca_topo_base_module_cart_shift_fn_t func;
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
/* check the arguments */
if (MPI_PARAM_CHECK) {
@ -71,12 +69,15 @@ int MPI_Cart_shift(MPI_Comm comm, int direction, int disp,
}
}
OPAL_CR_ENTER_LIBRARY();
/* get the function pointer on this communicator */
func = comm->c_topo->topo_cart_shift;
/* call the function */
if ( MPI_SUCCESS !=
(err = func(comm, direction, disp, rank_source, rank_dest))) {
err = func(comm, direction, disp, rank_source, rank_dest);
OPAL_CR_EXIT_LIBRARY();
if ( MPI_SUCCESS != err ) {
return OMPI_ERRHANDLER_INVOKE(comm, err, FUNC_NAME);
}

Просмотреть файл

@ -38,12 +38,10 @@ int MPI_Cart_sub(MPI_Comm comm, int *remain_dims, MPI_Comm *new_comm)
{
int err;
mca_topo_base_module_cart_sub_fn_t func;
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
/* check the arguments */
if (MPI_PARAM_CHECK) {
@ -66,12 +64,15 @@ int MPI_Cart_sub(MPI_Comm comm, int *remain_dims, MPI_Comm *new_comm)
}
}
OPAL_CR_ENTER_LIBRARY();
/* get the function pointer on this communicator */
func = comm->c_topo->topo_cart_sub;
/* call the function */
if ( MPI_SUCCESS !=
(err = func(comm, remain_dims, new_comm))) {
err = func(comm, remain_dims, new_comm);
OPAL_CR_EXIT_LIBRARY();
if ( MPI_SUCCESS != err ) {
return OMPI_ERRHANDLER_INVOKE(comm, err, FUNC_NAME);
}

Просмотреть файл

@ -38,12 +38,10 @@ int MPI_Cartdim_get(MPI_Comm comm, int *ndims)
{
mca_topo_base_module_cartdim_get_fn_t func;
int err;
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -65,13 +63,16 @@ int MPI_Cartdim_get(MPI_Comm comm, int *ndims)
}
}
OPAL_CR_ENTER_LIBRARY();
/* get the function pointer on this communicator */
func = comm->c_topo->topo_cartdim_get;
/* call the function */
if ( MPI_SUCCESS !=
(err = func(comm, ndims))) {
return OMPI_ERRHANDLER_INVOKE(comm, err, FUNC_NAME);
err = func(comm, ndims);
OPAL_CR_EXIT_LIBRARY();
if ( MPI_SUCCESS != err ) {
return OMPI_ERRHANDLER_INVOKE(comm, err, FUNC_NAME);
}
return MPI_SUCCESS;

Просмотреть файл

@ -33,7 +33,8 @@ static const char FUNC_NAME[] = "MPI_Close_port";
int MPI_Close_port(char *port_name)
{
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -42,12 +42,10 @@ int MPI_Comm_accept(char *port_name, MPI_Info info, int root,
ompi_communicator_t *newcomp=MPI_COMM_NULL;
char *tmp_port=NULL;
orte_rml_tag_t tag;
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -88,7 +86,8 @@ int MPI_Comm_accept(char *port_name, MPI_Info info, int root,
* if ( rank == root && MPI_INFO_NULL != info ) {
* }
*/
OPAL_CR_ENTER_LIBRARY();
/*
* Our own port_name is not of interest here, so we pass in NULL.
* The two leaders will figure this out later. However, we need the tag.

Просмотреть файл

@ -39,7 +39,8 @@ MPI_Fint MPI_Comm_c2f(MPI_Comm comm)
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if ( MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -39,7 +39,7 @@ int MPI_Comm_call_errhandler(MPI_Comm comm, int errorcode)
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
/* Error checking */

Просмотреть файл

@ -36,11 +36,11 @@ static const char FUNC_NAME[] = "MPI_Comm_compare";
int MPI_Comm_compare(MPI_Comm comm1, MPI_Comm comm2, int *result) {
int rc;
MEMCHECKER(
memchecker_comm(comm1);
memchecker_comm(comm2);
);
OPAL_CR_TEST_CHECKPOINT_READY();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -55,7 +55,9 @@ int MPI_Comm_compare(MPI_Comm comm1, MPI_Comm comm2, int *result) {
FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
rc = ompi_comm_compare ( (ompi_communicator_t*)comm1,
(ompi_communicator_t*)comm2,
result);

Просмотреть файл

@ -44,12 +44,10 @@ int MPI_Comm_connect(char *port_name, MPI_Info info, int root,
orte_process_name_t *port_proc_name=NULL;
char *tmp_port=NULL;
orte_rml_tag_t tag;
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -91,7 +89,9 @@ int MPI_Comm_connect(char *port_name, MPI_Info info, int root,
* if ( rank == root && MPI_INFO_NULL != info ) {
* }
*/
OPAL_CR_ENTER_LIBRARY();
/*
* translate the port_name string into the according process_name_t
* structure.

Просмотреть файл

@ -36,12 +36,10 @@ static const char FUNC_NAME[] = "MPI_Comm_create";
int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm) {
int rc;
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -59,6 +57,8 @@ int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm) {
FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
rc = ompi_comm_create ( (ompi_communicator_t*)comm, (ompi_group_t*)group,
(ompi_communicator_t**)newcomm );
OMPI_ERRHANDLER_RETURN ( rc, comm, rc, FUNC_NAME);

Просмотреть файл

@ -38,8 +38,6 @@ int MPI_Comm_create_errhandler(MPI_Comm_errhandler_fn *function,
{
int err = MPI_SUCCESS;
OPAL_CR_TEST_CHECKPOINT_READY();
/* Error checking */
if (MPI_PARAM_CHECK) {
@ -52,6 +50,8 @@ int MPI_Comm_create_errhandler(MPI_Comm_errhandler_fn *function,
}
}
OPAL_CR_ENTER_LIBRARY();
/* Create and cache the errhandler. Sets a refcount of 1. */
*errhandler =

Просмотреть файл

@ -41,8 +41,6 @@ int MPI_Comm_create_keyval(MPI_Comm_copy_attr_function *comm_copy_attr_fn,
ompi_attribute_fn_ptr_union_t copy_fn;
ompi_attribute_fn_ptr_union_t del_fn;
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if ((NULL == comm_copy_attr_fn) || (NULL == comm_delete_attr_fn) ||
@ -51,7 +49,9 @@ int MPI_Comm_create_keyval(MPI_Comm_copy_attr_function *comm_copy_attr_fn,
FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
copy_fn.attr_communicator_copy_fn = (MPI_Comm_internal_copy_attr_function*)comm_copy_attr_fn;
del_fn.attr_communicator_delete_fn = comm_delete_attr_fn;

Просмотреть файл

@ -37,12 +37,10 @@ static const char FUNC_NAME[] = "MPI_Comm_delete_attr";
int MPI_Comm_delete_attr(MPI_Comm comm, int comm_keyval)
{
int ret;
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -51,7 +49,9 @@ int MPI_Comm_delete_attr(MPI_Comm comm, int comm_keyval)
FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
ret = ompi_attr_delete(COMM_ATTR, comm, comm->c_keyhash, comm_keyval,
false, true);

Просмотреть файл

@ -38,7 +38,6 @@ int MPI_Comm_disconnect(MPI_Comm *comm)
MEMCHECKER(
memchecker_comm(*comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -52,6 +51,7 @@ int MPI_Comm_disconnect(MPI_Comm *comm)
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
if ( OMPI_COMM_IS_DYNAMIC(*comm)) {
ompi_comm_disconnect_obj *dobj;
@ -64,5 +64,7 @@ int MPI_Comm_disconnect(MPI_Comm *comm)
}
ompi_comm_free(comm);
OPAL_CR_EXIT_LIBRARY();
return MPI_SUCCESS;
}

Просмотреть файл

@ -36,14 +36,10 @@ int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm)
{
int rc=MPI_SUCCESS;
OPAL_CR_TEST_CHECKPOINT_READY();
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
/* argument checking */
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -57,7 +53,8 @@ int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm)
FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
rc = ompi_comm_dup ( comm, newcomm, 0);
OMPI_ERRHANDLER_RETURN ( rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -38,7 +38,7 @@ MPI_Comm MPI_Comm_f2c(MPI_Fint comm)
{
int o_index= OMPI_FINT_2_INT(comm);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -40,8 +40,6 @@ int MPI_Comm_free(MPI_Comm *comm)
memchecker_comm(*comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -54,9 +52,12 @@ int MPI_Comm_free(MPI_Comm *comm)
FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
ret = ompi_comm_free ( comm );
OMPI_ERRHANDLER_CHECK(ret, *comm, ret, FUNC_NAME);
OPAL_CR_EXIT_LIBRARY();
return MPI_SUCCESS;
}

Просмотреть файл

@ -36,8 +36,6 @@ int MPI_Comm_free_keyval(int *comm_keyval)
{
int ret;
OPAL_CR_TEST_CHECKPOINT_READY();
/* Check for valid key pointer */
if (MPI_PARAM_CHECK) {
@ -47,7 +45,9 @@ int MPI_Comm_free_keyval(int *comm_keyval)
FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
ret = ompi_attr_free_keyval(COMM_ATTR, comm_keyval, 0);
OMPI_ERRHANDLER_RETURN(ret, MPI_COMM_WORLD, MPI_ERR_OTHER, FUNC_NAME);

Просмотреть файл

@ -37,12 +37,10 @@ int MPI_Comm_get_attr(MPI_Comm comm, int comm_keyval,
void *attribute_val, int *flag)
{
int ret;
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -51,6 +49,8 @@ int MPI_Comm_get_attr(MPI_Comm comm, int comm_keyval,
}
}
OPAL_CR_ENTER_LIBRARY();
/* This stuff is very confusing. Be sure to see
src/attribute/attribute.c for a lengthy comment explaining Open
MPI attribute behavior. */

Просмотреть файл

@ -41,7 +41,7 @@ int MPI_Comm_get_errhandler(MPI_Comm comm, MPI_Errhandler *errhandler)
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
/* Error checking */

Просмотреть файл

@ -43,7 +43,8 @@ int MPI_Comm_get_name(MPI_Comm comm, char *name, int *length)
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -34,7 +34,7 @@ static const char FUNC_NAME[] = "MPI_Comm_get_parent";
int MPI_Comm_get_parent(MPI_Comm *parent)
{
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -36,12 +36,10 @@ static const char FUNC_NAME[] = "MPI_Comm_group";
int MPI_Comm_group(MPI_Comm comm, MPI_Group *group) {
int rc;
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
/* argument checking */
if ( MPI_PARAM_CHECK ) {
@ -56,7 +54,8 @@ int MPI_Comm_group(MPI_Comm comm, MPI_Group *group) {
FUNC_NAME);
} /* end if ( MPI_PARAM_CHECK) */
OPAL_CR_ENTER_LIBRARY();
rc = ompi_comm_group ( (ompi_communicator_t*)comm, (ompi_group_t**)group );
OMPI_ERRHANDLER_RETURN ( rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -61,8 +61,6 @@ int MPI_Comm_join(int fd, MPI_Comm *intercomm)
ompi_communicator_t *newcomp;
orte_process_name_t *port_proc_name=NULL;
OPAL_CR_TEST_CHECKPOINT_READY();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -71,12 +69,15 @@ int MPI_Comm_join(int fd, MPI_Comm *intercomm)
FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
/* sendrecv OOB-name (port-name) through the socket connection.
Need to determine somehow how to avoid a potential deadlock
here. */
myproc = ompi_proc_self (&size);
if (ORTE_SUCCESS != (rc = orte_ns.get_proc_name_string (&name, &(myproc[0]->proc_name)))) {
OPAL_CR_EXIT_LIBRARY();
return rc;
}
llen = (uint32_t)(strlen(name)+1);
@ -89,6 +90,7 @@ int MPI_Comm_join(int fd, MPI_Comm *intercomm)
rname = (char *) malloc (lrlen);
if ( NULL == rname ) {
*intercomm = MPI_COMM_NULL;
OPAL_CR_EXIT_LIBRARY();
return MPI_ERR_INTERN;
}
@ -99,6 +101,7 @@ int MPI_Comm_join(int fd, MPI_Comm *intercomm)
ompi_socket_recv (fd, rname, lrlen);
if (ORTE_SUCCESS != (rc = orte_ns.convert_string_to_process_name(&port_proc_name, rname))) {
OPAL_CR_EXIT_LIBRARY();
return rc;
}
rc = ompi_comm_connect_accept (MPI_COMM_SELF, 0, port_proc_name,

Просмотреть файл

@ -39,7 +39,7 @@ int MPI_Comm_rank(MPI_Comm comm, int *rank)
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -40,7 +40,8 @@ int MPI_Comm_remote_group(MPI_Comm comm, MPI_Group *group)
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -38,9 +38,8 @@ int MPI_Comm_remote_size(MPI_Comm comm, int *size) {
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -41,7 +41,6 @@ int MPI_Comm_set_attr(MPI_Comm comm, int comm_keyval, void *attribute_val)
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -50,7 +49,9 @@ int MPI_Comm_set_attr(MPI_Comm comm, int comm_keyval, void *attribute_val)
FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
ret = ompi_attr_set_c(COMM_ATTR, comm, &comm->c_keyhash,
comm_keyval, attribute_val, false, true);
OMPI_ERRHANDLER_RETURN(ret, comm, MPI_ERR_OTHER, FUNC_NAME);

Просмотреть файл

@ -39,7 +39,7 @@ int MPI_Comm_set_errhandler(MPI_Comm comm, MPI_Errhandler errhandler)
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
/* Error checking */

Просмотреть файл

@ -44,7 +44,6 @@ int MPI_Comm_set_name(MPI_Comm comm, char *name)
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -60,6 +59,8 @@ int MPI_Comm_set_name(MPI_Comm comm, char *name)
}
}
OPAL_CR_ENTER_LIBRARY();
rc = ompi_comm_set_name (comm, name );
/* -- Tracing information for new communicator name -- */
#if 0

Просмотреть файл

@ -37,9 +37,8 @@ int MPI_Comm_size(MPI_Comm comm, int *size) {
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -49,7 +49,6 @@ int MPI_Comm_spawn(char *command, char **argv, int maxprocs, MPI_Info info,
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -90,6 +89,7 @@ int MPI_Comm_spawn(char *command, char **argv, int maxprocs, MPI_Info info,
}
}
OPAL_CR_ENTER_LIBRARY();
if ( rank == root ) {
/* Open a port. The port_name is passed as an environment variable
@ -106,6 +106,8 @@ int MPI_Comm_spawn(char *command, char **argv, int maxprocs, MPI_Info info,
rc = ompi_comm_connect_accept (comm, root, NULL, send_first, &newcomp, tag);
error:
OPAL_CR_EXIT_LIBRARY();
/* close the port again. Nothing has to be done for that at the moment.*/
/* set error codes */

Просмотреть файл

@ -51,8 +51,6 @@ int MPI_Comm_spawn_multiple(int count, char **array_of_commands, char ***array_o
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -108,6 +106,8 @@ int MPI_Comm_spawn_multiple(int count, char **array_of_commands, char ***array_o
}
}
OPAL_CR_ENTER_LIBRARY();
if ( rank == root ) {
/* Open a port. The port_name is passed as an environment variable
* to the children. */
@ -124,6 +124,7 @@ int MPI_Comm_spawn_multiple(int count, char **array_of_commands, char ***array_o
rc = ompi_comm_connect_accept (comm, root, NULL, send_first, &newcomp, tag);
error:
OPAL_CR_EXIT_LIBRARY();
/* close the port again. Nothing has to be done for that at the moment.*/
/* set array of errorcodes */

Просмотреть файл

@ -40,7 +40,6 @@ int MPI_Comm_split(MPI_Comm comm, int color, int key, MPI_Comm *newcomm) {
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -60,7 +59,9 @@ int MPI_Comm_split(MPI_Comm comm, int color, int key, MPI_Comm *newcomm) {
FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
rc = ompi_comm_split ( (ompi_communicator_t*)comm, color, key,
(ompi_communicator_t**)newcomm, false);
OMPI_ERRHANDLER_RETURN ( rc, comm, rc, FUNC_NAME);

Просмотреть файл

@ -38,7 +38,8 @@ int MPI_Comm_test_inter(MPI_Comm comm, int *flag) {
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -52,7 +52,7 @@ int MPI_Dims_create(int nnodes, int ndims, int *dims)
int *p;
int err;
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -35,7 +35,8 @@ static const char FUNC_NAME[] = "MPI_Errhandler_c2f";
MPI_Fint MPI_Errhandler_c2f(MPI_Errhandler errhandler)
{
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
/* Error checking */

Просмотреть файл

@ -32,7 +32,6 @@
int MPI_Errhandler_create(MPI_Handler_function *function,
MPI_Errhandler *errhandler)
{
OPAL_CR_TEST_CHECKPOINT_READY();
/* This is a deprecated -- just turn around and call the real
function */

Просмотреть файл

@ -38,7 +38,7 @@ MPI_Errhandler MPI_Errhandler_f2c(MPI_Fint errhandler_f)
{
int eh_index = OMPI_FINT_2_INT(errhandler_f);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
/* Error checking */

Просмотреть файл

@ -33,7 +33,8 @@ static const char FUNC_NAME[] = "MPI_Errhandler_free";
int MPI_Errhandler_free(MPI_Errhandler *errhandler)
{
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
/* Error checking */

Просмотреть файл

@ -37,7 +37,8 @@ int MPI_Errhandler_get(MPI_Comm comm, MPI_Errhandler *errhandler)
MEMCHECKER(
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -38,7 +38,7 @@ int MPI_Errhandler_set(MPI_Comm comm, MPI_Errhandler errhandler)
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -34,7 +34,8 @@ static const char FUNC_NAME[] = "MPI_Error_class";
int MPI_Error_class(int errorcode, int *errorclass)
{
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -37,7 +37,7 @@ int MPI_Error_string(int errorcode, char *string, int *resultlen)
{
char *tmpstring;
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -44,28 +44,26 @@ int MPI_Exscan(void *sendbuf, void *recvbuf, int count,
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);
memchecker_comm(comm);
);
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
char *msg;
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
}
/* Unrooted operation -- same checks for intracommunicators
and intercommunicators */
else if (MPI_OP_NULL == op) {
err = MPI_ERR_OP;
else if (MPI_OP_NULL == op) {
err = MPI_ERR_OP;
} else if (!ompi_op_is_valid(op, datatype, &msg, FUNC_NAME)) {
int ret = OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, msg);
free(msg);
return ret;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
@ -78,6 +76,8 @@ int MPI_Exscan(void *sendbuf, void *recvbuf, int count,
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op);

Просмотреть файл

@ -37,7 +37,7 @@ static const char FUNC_NAME[] = "MPI_File_c2f";
MPI_Fint MPI_File_c2f(MPI_File file)
{
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -35,7 +35,7 @@ static const char FUNC_NAME[] = "MPI_File_call_errhandler";
int MPI_File_call_errhandler(MPI_File fh, int errorcode)
{
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
/* Error checking */

Просмотреть файл

@ -36,8 +36,6 @@ int MPI_File_close(MPI_File *fh)
{
int rc;
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -51,6 +49,8 @@ int MPI_File_close(MPI_File *fh)
}
}
OPAL_CR_ENTER_LIBRARY();
/* Release the MPI_File; the destructor releases the component,
zeroes out fiels, etc. */

Просмотреть файл

@ -37,8 +37,6 @@ int MPI_File_create_errhandler(MPI_File_errhandler_fn *function,
MPI_Errhandler *errhandler) {
int err = MPI_SUCCESS;
OPAL_CR_TEST_CHECKPOINT_READY();
/* Error checking */
if (MPI_PARAM_CHECK) {
@ -50,6 +48,8 @@ int MPI_File_create_errhandler(MPI_File_errhandler_fn *function,
}
}
OPAL_CR_ENTER_LIBRARY();
/* Create and cache the errhandler. Sets a refcount of 1. */
*errhandler =

Просмотреть файл

@ -39,8 +39,6 @@ int MPI_File_delete(char *filename, MPI_Info info)
{
int rc;
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
rc = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -76,6 +74,8 @@ int MPI_File_delete(char *filename, MPI_Info info)
}
}
OPAL_CR_ENTER_LIBRARY();
/* Since there is no MPI_File handle associated with this
function, the MCA has to do a selection and perform the
action */

Просмотреть файл

@ -39,7 +39,7 @@ MPI_File MPI_File_f2c(MPI_Fint file_f)
{
int file_index = OMPI_FINT_2_INT(file_f);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -36,8 +36,6 @@ int MPI_File_get_amode(MPI_File fh, int *amode)
{
int rc;
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
rc = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -50,6 +48,8 @@ int MPI_File_get_amode(MPI_File fh, int *amode)
OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Call the back-end io component function */
switch (fh->f_io_version) {

Просмотреть файл

@ -37,8 +37,6 @@ int MPI_File_get_atomicity(MPI_File fh, int *flag)
{
int rc;
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
rc = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -51,6 +49,8 @@ int MPI_File_get_atomicity(MPI_File fh, int *flag)
OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Call the back-end io component function */
switch (fh->f_io_version) {

Просмотреть файл

@ -38,8 +38,6 @@ int MPI_File_get_byte_offset(MPI_File fh, MPI_Offset offset,
{
int rc;
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
rc = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -52,6 +50,8 @@ int MPI_File_get_byte_offset(MPI_File fh, MPI_Offset offset,
OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Call the back-end io component function */
switch (fh->f_io_version) {

Просмотреть файл

@ -35,7 +35,7 @@ static const char FUNC_NAME[] = "MPI_File_get_errhandler";
int MPI_File_get_errhandler( MPI_File file, MPI_Errhandler *errhandler)
{
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_NOOP_PROGRESS();
/* Error checking */

Просмотреть файл

@ -36,8 +36,6 @@ int MPI_File_get_group(MPI_File fh, MPI_Group *group)
{
int rc;
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
rc = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -50,6 +48,8 @@ int MPI_File_get_group(MPI_File fh, MPI_Group *group)
OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Does not need to invoke a back-end io function */
rc = ompi_comm_group (fh->f_comm, group);

Просмотреть файл

@ -36,8 +36,6 @@ int MPI_File_get_info(MPI_File fh, MPI_Info *info_used)
{
int rc;
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
rc = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -50,6 +48,8 @@ int MPI_File_get_info(MPI_File fh, MPI_Info *info_used)
OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Call the back-end io component function */
switch (fh->f_io_version) {

Просмотреть файл

@ -36,8 +36,6 @@ int MPI_File_get_position(MPI_File fh, MPI_Offset *offset)
{
int rc;
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
rc = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -50,6 +48,8 @@ int MPI_File_get_position(MPI_File fh, MPI_Offset *offset)
OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Call the back-end io component function */
switch (fh->f_io_version) {

Просмотреть файл

@ -36,8 +36,6 @@ int MPI_File_get_position_shared(MPI_File fh, MPI_Offset *offset)
{
int rc;
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
rc = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -50,6 +48,8 @@ int MPI_File_get_position_shared(MPI_File fh, MPI_Offset *offset)
OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Call the back-end io component function */
switch (fh->f_io_version) {

Просмотреть файл

@ -36,8 +36,6 @@ int MPI_File_get_size(MPI_File fh, MPI_Offset *size)
{
int rc;
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
rc = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -50,6 +48,8 @@ int MPI_File_get_size(MPI_File fh, MPI_Offset *size)
OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Call the back-end io component function */
switch (fh->f_io_version) {

Просмотреть файл

@ -38,8 +38,6 @@ int MPI_File_get_type_extent(MPI_File fh, MPI_Datatype datatype,
{
int rc;
OPAL_CR_TEST_CHECKPOINT_READY();
MEMCHECKER(
memchecker_datatype(datatype);
);
@ -56,6 +54,8 @@ int MPI_File_get_type_extent(MPI_File fh, MPI_Datatype datatype,
OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Call the back-end io component function */
switch (fh->f_io_version) {

Просмотреть файл

@ -38,8 +38,6 @@ int MPI_File_get_view(MPI_File fh, MPI_Offset *disp,
{
int rc;
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
rc = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -54,6 +52,8 @@ int MPI_File_get_view(MPI_File fh, MPI_Offset *disp,
OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Call the back-end io component function */
switch (fh->f_io_version) {

Просмотреть файл

@ -42,8 +42,6 @@ int MPI_File_iread(MPI_File fh, void *buf, int count,
int rc;
mca_io_base_request_t *io_request;
OPAL_CR_TEST_CHECKPOINT_READY();
MEMCHECKER(
memchecker_datatype(datatype);
);
@ -62,9 +60,12 @@ int MPI_File_iread(MPI_File fh, void *buf, int count,
OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Get a request */
if (OMPI_SUCCESS != mca_io_base_request_alloc(fh, &io_request)) {
OPAL_CR_EXIT_LIBRARY();
return OMPI_ERRHANDLER_INVOKE(fh, MPI_ERR_NO_MEM, FUNC_NAME);
}
*request = (ompi_request_t*) io_request;

Просмотреть файл

@ -42,8 +42,6 @@ int MPI_File_iread_at(MPI_File fh, MPI_Offset offset, void *buf,
int rc;
mca_io_base_request_t *io_request;
OPAL_CR_TEST_CHECKPOINT_READY();
MEMCHECKER(
memchecker_datatype(datatype);
);
@ -62,9 +60,12 @@ int MPI_File_iread_at(MPI_File fh, MPI_Offset offset, void *buf,
OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Get a request */
if (OMPI_SUCCESS != mca_io_base_request_alloc(fh, &io_request)) {
OPAL_CR_EXIT_LIBRARY();
return OMPI_ERRHANDLER_INVOKE(fh, MPI_ERR_NO_MEM, FUNC_NAME);
}
*request = (ompi_request_t*) io_request;

Просмотреть файл

@ -42,8 +42,6 @@ int MPI_File_iread_shared(MPI_File fh, void *buf, int count,
int rc;
mca_io_base_request_t *io_request;
OPAL_CR_TEST_CHECKPOINT_READY();
MEMCHECKER(
memchecker_datatype(datatype);
);
@ -62,9 +60,12 @@ int MPI_File_iread_shared(MPI_File fh, void *buf, int count,
OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Get a request */
if (OMPI_SUCCESS != mca_io_base_request_alloc(fh, &io_request)) {
OPAL_CR_EXIT_LIBRARY();
return OMPI_ERRHANDLER_INVOKE(fh, MPI_ERR_NO_MEM, FUNC_NAME);
}
*request = (ompi_request_t*) io_request;

Просмотреть файл

@ -41,13 +41,11 @@ int MPI_File_iwrite(MPI_File fh, void *buf, int count, MPI_Datatype
{
int rc;
mca_io_base_request_t *io_request;
MEMCHECKER(
memchecker_datatype(datatype);
memchecker_call(&opal_memchecker_base_isdefined, buf, count, datatype);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
rc = MPI_SUCCESS;
@ -63,9 +61,12 @@ int MPI_File_iwrite(MPI_File fh, void *buf, int count, MPI_Datatype
OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Get a request */
if (OMPI_SUCCESS != mca_io_base_request_alloc(fh, &io_request)) {
OPAL_CR_EXIT_LIBRARY();
return OMPI_ERRHANDLER_INVOKE(fh, MPI_ERR_NO_MEM, FUNC_NAME);
}
*request = (ompi_request_t*) io_request;

Просмотреть файл

@ -42,13 +42,11 @@ int MPI_File_iwrite_at(MPI_File fh, MPI_Offset offset, void *buf,
{
int rc;
mca_io_base_request_t *io_request;
MEMCHECKER(
memchecker_datatype(datatype);
memchecker_call(&opal_memchecker_base_isdefined, buf, count, datatype);
);
OPAL_CR_TEST_CHECKPOINT_READY();
OPAL_CR_TEST_CHECKPOINT_READY();
if (MPI_PARAM_CHECK) {
rc = MPI_SUCCESS;
@ -64,9 +62,12 @@ int MPI_File_iwrite_at(MPI_File fh, MPI_Offset offset, void *buf,
OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Get a request */
if (OMPI_SUCCESS != mca_io_base_request_alloc(fh, &io_request)) {
OPAL_CR_EXIT_LIBRARY();
return OMPI_ERRHANDLER_INVOKE(fh, MPI_ERR_NO_MEM, FUNC_NAME);
}
*request = (ompi_request_t*) io_request;

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше