1
1

Move the RTE framework change into the trunk. With this change, all non-CR

runtime code goes through one of the rte, dpm, or pubsub frameworks.

This commit was SVN r27934.
Этот коммит содержится в:
Brian Barrett 2013-01-27 23:25:10 +00:00
родитель d65194a97d
Коммит f42783ae1a
232 изменённых файлов: 4404 добавлений и 2203 удалений

Просмотреть файл

@ -978,11 +978,11 @@ if (! -e "orte") {
debug "No orte subdirectory found - will not build ORTE\n";
}
if ($no_ompi_arg) {
if ($no_ompi_arg == 1 && $no_orte_arg == 0) {
$project_name_long = "Open MPI Run Time Environment";
$project_name_short = "open-rte";
}
if ($no_orte_arg) {
if ($no_ompi_arg == 1 && $no_orte_arg == 1) {
$project_name_long = "Open Portability Access Layer";
$project_name_short = "open-pal";
}
@ -1140,7 +1140,7 @@ if (! (-f "VERSION" && -f "configure.ac" && -f $topdir_file)) {
my $projects;
push(@{$projects}, { name => "opal", dir => "opal", need_base => 1 });
push(@{$projects}, { name => "orte", dir => "orte", need_base => 1 })
if (!$no_ompi_arg || !$no_orte_arg);
if (!$no_orte_arg);
push(@{$projects}, { name => "ompi", dir => "ompi", need_base => 1 })
if (!$no_ompi_arg);

Просмотреть файл

@ -46,7 +46,5 @@ AC_DEFUN([OMPI_CONFIG_FILES],[
ompi/tools/wrappers/ompi-cxx.pc
ompi/tools/wrappers/ompi-fort.pc
ompi/tools/wrappers/mpijavac.pl
ompi/tools/ortetools/Makefile
ompi/tools/ompi-server/Makefile
])
])

Просмотреть файл

@ -32,5 +32,6 @@ AC_DEFUN([ORTE_CONFIG_FILES],[
orte/tools/orte-top/Makefile
orte/tools/orte-migrate/Makefile
orte/tools/orte-info/Makefile
orte/tools/orte-server/Makefile
])
])

Просмотреть файл

@ -137,8 +137,13 @@ libmpi_la_LIBADD = \
$(mpi_fortran_base_lib) \
$(MCA_ompi_FRAMEWORK_LIBS) \
$(OMPI_MPIEXT_C_LIBS) \
$(OMPI_LIBMPI_EXTRA_LIBS) \
$(OMPI_LIBMPI_EXTRA_LIBS)
if OMPI_RTE_ORTE
libmpi_la_LIBADD += \
$(top_ompi_builddir)/orte/libopen-rte.la
endif
libmpi_la_LIBADD += \
$(top_ompi_builddir)/opal/libopen-pal.la
libmpi_la_DEPENDENCIES = $(libmpi_la_LIBADD)
libmpi_la_LDFLAGS = \
-version-info $(libmpi_so_version) \

Просмотреть файл

@ -92,7 +92,6 @@
#include "ompi/errhandler/errcode.h"
#include "ompi/communicator/communicator.h"
#include "ompi/mca/pml/pml.h"
#include "orte/util/proc_info.h"
/*
* Private functions
@ -174,7 +173,7 @@ int ompi_attr_create_predefined(void)
return ret;
}
ret = set_f(MPI_APPNUM, orte_process_info.app_num);
ret = set_f(MPI_APPNUM, ompi_process_info.app_num);
return ret;
}

Просмотреть файл

@ -13,6 +13,8 @@
* Copyright (c) 2007-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2009 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2012 Oak Ridge National Labs. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -28,8 +30,6 @@
#include "opal/mca/hwloc/base/base.h"
#include "opal/dss/dss.h"
#include "orte/util/name_fns.h"
#include "orte/mca/rml/rml_types.h"
#include "ompi/proc/proc.h"
#include "opal/threads/mutex.h"
@ -1251,7 +1251,7 @@ ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm,
int rc;
int local_rank, local_size;
ompi_proc_t **rprocs=NULL;
orte_std_cntr_t size_len;
int32_t size_len;
int int_len, rlen;
opal_buffer_t *sbuf=NULL, *rbuf=NULL;
void *sendbuf;
@ -1355,7 +1355,7 @@ ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm,
goto err_exit;
}
if (ORTE_SUCCESS != (rc = opal_dss.load(rbuf, recvbuf, rlen))) {
if (OMPI_SUCCESS != (rc = opal_dss.load(rbuf, recvbuf, rlen))) {
goto err_exit;
}
@ -1425,7 +1425,7 @@ int ompi_comm_determine_first ( ompi_communicator_t *intercomm, int high )
int scount=0;
int rc;
ompi_proc_t *ourproc, *theirproc;
orte_ns_cmp_bitmask_t mask;
ompi_rte_cmp_bitmask_t mask;
rank = ompi_comm_rank (intercomm);
rsize= ompi_comm_remote_size (intercomm);
@ -1467,8 +1467,8 @@ int ompi_comm_determine_first ( ompi_communicator_t *intercomm, int high )
ourproc = ompi_group_peer_lookup(intercomm->c_local_group,0);
theirproc = ompi_group_peer_lookup(intercomm->c_remote_group,0);
mask = ORTE_NS_CMP_JOBID | ORTE_NS_CMP_VPID;
rc = orte_util_compare_name_fields(mask, &(ourproc->proc_name), &(theirproc->proc_name));
mask = OMPI_RTE_CMP_JOBID | OMPI_RTE_CMP_VPID;
rc = ompi_rte_compare_name_fields(mask, &(ourproc->proc_name), &(theirproc->proc_name));
if ( 0 > rc ) {
flag = true;
}

Просмотреть файл

@ -27,7 +27,6 @@
#include "ompi_config.h"
#include "opal/dss/dss.h"
#include "orte/types.h"
#include "ompi/proc/proc.h"
#include "ompi/communicator/communicator.h"
#include "ompi/op/op.h"
@ -35,13 +34,11 @@
#include "opal/class/opal_pointer_array.h"
#include "opal/class/opal_list.h"
#include "ompi/mca/pml/pml.h"
#include "ompi/mca/rte/rte.h"
#include "ompi/mca/coll/base/base.h"
#include "ompi/request/request.h"
#include "ompi/runtime/ompi_module_exchange.h"
#include "ompi/runtime/mpiruntime.h"
#include "ompi/mca/dpm/dpm.h"
#include "orte/mca/rml/rml.h"
BEGIN_C_DECLS
@ -783,11 +780,11 @@ static int ompi_comm_allreduce_intra_oob (int *inbuf, int *outbuf,
int i;
int rc;
int local_leader, local_rank;
orte_process_name_t *remote_leader=NULL;
orte_std_cntr_t size_count;
ompi_process_name_t *remote_leader=NULL;
int32_t size_count;
local_leader = (*((int*)lleader));
remote_leader = (orte_process_name_t*)rleader;
remote_leader = (ompi_process_name_t*)rleader;
size_count = count;
if ( &ompi_mpi_op_sum.op != op && &ompi_mpi_op_prod.op != op &&
@ -817,23 +814,23 @@ static int ompi_comm_allreduce_intra_oob (int *inbuf, int *outbuf,
sbuf = OBJ_NEW(opal_buffer_t);
rbuf = OBJ_NEW(opal_buffer_t);
if (OPAL_SUCCESS != (rc = opal_dss.pack(sbuf, tmpbuf, (orte_std_cntr_t)count, OPAL_INT))) {
if (OPAL_SUCCESS != (rc = opal_dss.pack(sbuf, tmpbuf, (int32_t)count, OPAL_INT))) {
goto exit;
}
if ( send_first ) {
if (0 > (rc = orte_rml.send_buffer(remote_leader, sbuf, OMPI_RML_TAG_COMM_CID_INTRA, 0))) {
if (0 > (rc = ompi_rte_send_buffer(remote_leader, sbuf, OMPI_RML_TAG_COMM_CID_INTRA, 0))) {
goto exit;
}
if (0 > (rc = orte_rml.recv_buffer(remote_leader, rbuf, OMPI_RML_TAG_COMM_CID_INTRA, 0))) {
if (0 > (rc = ompi_rte_recv_buffer(remote_leader, rbuf, OMPI_RML_TAG_COMM_CID_INTRA, 0))) {
goto exit;
}
}
else {
if (0 > (rc = orte_rml.recv_buffer(remote_leader, rbuf, OMPI_RML_TAG_COMM_CID_INTRA, 0))) {
if (0 > (rc = ompi_rte_recv_buffer(remote_leader, rbuf, OMPI_RML_TAG_COMM_CID_INTRA, 0))) {
goto exit;
}
if (0 > (rc = orte_rml.send_buffer(remote_leader, sbuf, OMPI_RML_TAG_COMM_CID_INTRA, 0))) {
if (0 > (rc = ompi_rte_send_buffer(remote_leader, sbuf, OMPI_RML_TAG_COMM_CID_INTRA, 0))) {
goto exit;
}
}

Просмотреть файл

@ -13,6 +13,8 @@
* Copyright (c) 2006-2010 University of Houston. All rights reserved.
* Copyright (c) 2007-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2009 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow

Просмотреть файл

@ -31,9 +31,10 @@
BEGIN_C_DECLS
/**
* Wait for a debugger if asked.
* Setup the magic constants so that the debugger can find the DLL
* necessary for understanding the queues and other structures.
*/
extern void ompi_wait_for_debugger(void);
extern void ompi_debugger_setup_dlls(void);
/**
* Notify a debugger that we're about to abort
@ -47,6 +48,21 @@ extern void ompi_debugger_notify_abort(char *string);
*/
OMPI_DECLSPEC void* MPIR_Breakpoint(void);
/**
* Flag debugger will set when an application may proceed past
* MPI_INIT. This needs to live in ompi_debuggers.c so that it's
* compiled with -g, but is needed by the runtime framework for
* startup
*/
OMPI_DECLSPEC extern volatile int MPIR_debug_gate;
/**
* Flag debugger will set if application is being debugged. This
* needs to live in ompi_debuggers.c so that it's compiled with -g,
* but is needed by the runtime framework for startup.
*/
OMPI_DECLSPEC extern volatile int MPIR_being_debugged;
END_C_DECLS
#endif /* OMPI_DEBUGGERS_H */

Просмотреть файл

@ -11,6 +11,8 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007-2011 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -48,6 +50,7 @@
#include "opal/util/argv.h"
#include "opal/mca/installdirs/installdirs.h"
#include "debuggers.h"
#include "ompi/mca/rte/rte.h"
/**
* BEWARE: The following headers are required by optimized builds in order
* to get access to the type information. Some compilers remove all type
@ -72,10 +75,6 @@
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/include/mpi.h"
#include "orte/mca/errmgr/errmgr.h"
#include "orte/mca/rml/rml.h"
#include "orte/runtime/orte_globals.h"
#if defined(OMPI_MSGQ_DLL)
/* This variable is old/deprecated -- the mpimsgq_dll_locations[]
method is preferred because it's more flexible */
@ -161,47 +160,12 @@ static void check(char *dir, char *file, char **locations)
}
/*
* Wait for a debugger if asked. We support two ways of waiting for
* attaching debuggers -- see big comment in
* orte/tools/orterun/debuggers.c explaning the two scenarios.
*/
void ompi_wait_for_debugger(void)
extern void
ompi_debugger_setup_dlls(void)
{
int i, debugger;
int i;
char *a, *b, **dirs, **tmp1 = NULL, **tmp2 = NULL;
#if !ORTE_DISABLE_FULL_SUPPORT
opal_buffer_t buf;
int rc;
#endif
/* See lengthy comment in orte/tools/orterun/debuggers.c about
orte_in_parallel_debugger */
#if ORTE_DISABLE_FULL_SUPPORT
debugger = 0;
#else
debugger = orte_in_parallel_debugger;
#endif
/* Add in environment variables for other launchers, such as yod,
srun, ...etc. */
if (1 == MPIR_being_debugged) {
debugger = 1;
} else if (NULL != getenv("yod_you_are_being_debugged")) {
debugger = 1;
}
if (1 == MPIR_being_debugged) {
debugger = 1;
}
if (!debugger) {
/* if not, just return */
return;
}
/* if we are being debugged, then we need to find
* the correct plug-ins
*/
a = strdup(opal_install_dirs.pkglibdir);
mca_base_param_reg_string_name("ompi",
"debugger_dll_path",
@ -224,47 +188,8 @@ void ompi_wait_for_debugger(void)
non-NULL values only when the entire array is ready). */
mpimsgq_dll_locations = tmp1;
mpidbg_dll_locations = tmp2;
}
#if !ORTE_DISABLE_FULL_SUPPORT
if (orte_standalone_operation) {
#endif
/* spin until debugger attaches and releases us */
while (MPIR_debug_gate == 0) {
#if defined(__WINDOWS__)
Sleep(100); /* milliseconds */
#elif defined(HAVE_USLEEP)
usleep(100000); /* microseconds */
#else
sleep(1); /* seconds */
#endif
}
#if !ORTE_DISABLE_FULL_SUPPORT
} else {
/* only the rank=0 proc waits for either a message from the
* HNP or for the debugger to attach - everyone else will just
* spin in * the grpcomm barrier in ompi_mpi_init until rank=0
* joins them.
*/
if (0 != ORTE_PROC_MY_NAME->vpid) {
return;
}
/* VPID 0 waits for a message from the HNP */
OBJ_CONSTRUCT(&buf, opal_buffer_t);
rc = orte_rml.recv_buffer(ORTE_NAME_WILDCARD, &buf,
ORTE_RML_TAG_DEBUGGER_RELEASE, 0);
OBJ_DESTRUCT(&buf); /* don't care about contents of message */
if (rc < 0) {
/* if it failed for some reason, then we are in trouble -
* for now, just report the problem and give up waiting
*/
opal_output(0, "Debugger_attach[rank=%ld]: could not wait for debugger!",
(long)ORTE_PROC_MY_NAME->vpid);
}
}
#endif
}
/*
* Tell the debugger that we are about to abort

Просмотреть файл

@ -35,8 +35,6 @@
#include "ompi/errhandler/errhandler_predefined.h"
#include "ompi/errhandler/errcode-internal.h"
#include "orte/types.h"
BEGIN_C_DECLS
/*

Просмотреть файл

@ -10,9 +10,11 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006 University of Houston. All rights reserved.
* Copyright (c) 2008-2011 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2008-2013 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2009 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2010-2011 Oak Ridge National Labs. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -30,9 +32,7 @@
#include <sys/param.h>
#endif
#include "orte/util/show_help.h"
#include "orte/runtime/orte_globals.h"
#include "orte/util/name_fns.h"
#include "ompi/mca/rte/rte.h"
#include "ompi/errhandler/errhandler_predefined.h"
#include "ompi/errhandler/errcode.h"
#include "ompi/communicator/communicator.h"
@ -163,7 +163,7 @@ static void out(char *str, char *arg)
}
/*
* Use orte_show_help() to aggregate the error messages (i.e., show it
* Use ompi_show_help() to aggregate the error messages (i.e., show it
* once rather than N times).
*
* Note that this function will only be invoked for errors during the
@ -179,11 +179,13 @@ static void backend_fatal_aggregate(char *type,
char *arg, *prefix, *err_msg = "Unknown error";
bool err_msg_need_free = false;
assert(ompi_mpi_initialized && !ompi_mpi_finalized);
arg = va_arg(arglist, char*);
va_end(arglist);
asprintf(&prefix, "[%s:%d]", orte_process_info.nodename,
(int) orte_process_info.pid);
asprintf(&prefix, "[%s:%d]", ompi_process_info.nodename,
(int) ompi_process_info.pid);
if (NULL != error_code) {
err_msg = ompi_mpi_errnum_get_string(*error_code);
@ -194,19 +196,19 @@ static void backend_fatal_aggregate(char *type,
}
}
if (NULL != name && ompi_mpi_initialized && !ompi_mpi_finalized) {
orte_show_help("help-mpi-errors.txt",
if (NULL != name) {
ompi_show_help("help-mpi-errors.txt",
"mpi_errors_are_fatal", false,
prefix, (NULL == arg) ? "" : "in",
(NULL == arg) ? "" : arg,
prefix, ORTE_PROC_MY_NAME->jobid, ORTE_PROC_MY_NAME->vpid,
prefix, OMPI_PROC_MY_NAME->jobid, OMPI_PROC_MY_NAME->vpid,
prefix, type, name, prefix, err_msg, prefix, type, prefix);
} else if (NULL == name) {
orte_show_help("help-mpi-errors.txt",
} else {
ompi_show_help("help-mpi-errors.txt",
"mpi_errors_are_fatal unknown handle", false,
prefix, (NULL == arg) ? "" : "in",
(NULL == arg) ? "" : arg,
prefix, ORTE_PROC_MY_NAME->jobid, ORTE_PROC_MY_NAME->vpid,
prefix, OMPI_PROC_MY_NAME->jobid, OMPI_PROC_MY_NAME->vpid,
prefix, type, prefix, err_msg, prefix, type, prefix);
}
@ -219,6 +221,11 @@ static void backend_fatal_aggregate(char *type,
* Note that this function has to handle pre-MPI_INIT and
* post-MPI_FINALIZE errors, which backend_fatal_aggregate() does not
* have to handle.
*
* This function also intentionally does not call malloc(), just in
* case we're being called due to some kind of stack/memory error --
* we *might* be able to get a message out if we're not further
* corrupting the stack by calling malloc()...
*/
static void backend_fatal_no_aggregate(char *type,
struct ompi_communicator_t *comm,
@ -227,6 +234,8 @@ static void backend_fatal_no_aggregate(char *type,
{
char *arg;
assert(!ompi_mpi_initialized || ompi_mpi_finalized);
fflush(stdout);
fflush(stderr);
@ -318,14 +327,9 @@ static void backend_fatal(char *type, struct ompi_communicator_t *comm,
char *name, int *error_code,
va_list arglist)
{
/* Do we want help message aggregation? Usually yes, but it uses
malloc(), which may cause further errors if we're exiting due
to a memory problem. So we also have the option to *not*
aggregate (which doesn't use malloc during its call stack,
meaning that there is a better chance that the error message
will actually get printed). Note that we can only do
aggregation after MPI_INIT and before MPI_FINALIZE. */
if (orte_help_want_aggregate && orte_show_help_is_available()) {
/* We only want aggregation after MPI_INIT and before
MPI_FINALIZE. */
if (ompi_mpi_initialized && !ompi_mpi_finalized) {
backend_fatal_aggregate(type, comm, name, error_code, arglist);
} else {
backend_fatal_no_aggregate(type, comm, name, error_code, arglist);

Просмотреть файл

@ -19,9 +19,13 @@
#ifndef OMPI_CONSTANTS_H
#define OMPI_CONSTANTS_H
#if defined(OMPI_RTE_ORTE) && OMPI_RTE_ORTE
#include "orte/constants.h"
#define OMPI_ERR_BASE ORTE_ERR_MAX
#else
#include "opal/constants.h"
#define OMPI_ERR_BASE OPAL_ERR_MAX
#endif
/* error codes */
enum {

Просмотреть файл

@ -20,8 +20,6 @@
#include "opal/mca/base/base.h"
#include "opal/util/argv.h"
#include "orte/util/show_help.h"
#include "ompi/mca/bcol/bcol.h"
#include "ompi/mca/bcol/base/base.h"
#include "ompi/include/ompi/constants.h"

Просмотреть файл

@ -21,9 +21,6 @@
#include "ompi/proc/proc.h"
#include "ompi/mca/common/netpatterns/common_netpatterns.h"
#include "orte/util/name_fns.h"
#include "orte/util/proc_info.h"
#include "opal/mca/mca.h"
#include "opal/util/arch.h"
#include "opal/util/argv.h"
@ -258,8 +255,8 @@ static inline int mca_bcol_basesmuma_err(const char* fmt, ...)
do { \
if(mca_bcol_basesmuma_component.verbose >= level) { \
mca_bcol_basesmuma_err("[%s]%s[%s:%d:%s] BCOL-BASESMUMA ", \
orte_process_info.nodename, \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
ompi_process_info.nodename, \
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME), \
__FILE__, __LINE__, __func__); \
mca_bcol_basesmuma_err args; \
mca_bcol_basesmuma_err("\n"); \

Просмотреть файл

@ -1,6 +1,8 @@
/*
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -19,13 +21,6 @@
#include "ompi/mca/bcol/base/base.h"
#include "ompi/mca/coll/ml/coll_ml.h"
#include "ompi/mca/common/commpatterns/common_coll_ops.h"
#include "ompi/mca/dpm/dpm.h"
#include "orte/mca/rml/rml.h"
#include "orte/mca/rml/rml_types.h"
#include "orte/mca/grpcomm/grpcomm.h"
#include "orte/mca/rml/rml.h"
#include "opal/dss/dss.h"

Просмотреть файл

@ -22,9 +22,6 @@
#include "ompi/mca/bcol/base/base.h"
#include "ompi/mca/coll/ml/coll_ml.h"
#include "orte/mca/rml/rml.h"
#include "orte/util/proc_info.h"
#include "bcol_basesmuma.h"
/*
* Public string showing the coll ompi_sm V2 component version number
@ -371,7 +368,7 @@ int mca_bcol_basesmuma_allocate_sm_ctl_memory(mca_bcol_basesmuma_component_t *cs
/* set the file name */
name_length=asprintf(&name,
"%s"OPAL_PATH_SEP"%s""%0d",
orte_process_info.job_session_dir,
ompi_process_info.job_session_dir,
cs->clt_base_fname,
(int)getpid());
if( 0 > name_length ) {
@ -410,7 +407,7 @@ int mca_bcol_basesmuma_allocate_sm_ctl_memory(mca_bcol_basesmuma_component_t *cs
ctl_length,getpagesize(),name);
if( !cs->sm_ctl_structs) {
fprintf(stderr," In mca_bcol_basesmuma_allocate_sm_ctl_memory failed to allocathe backing file %s \n",name);
ret=ORTE_ERR_OUT_OF_RESOURCE;
ret=OMPI_ERR_OUT_OF_RESOURCE;
goto Error;
}

Просмотреть файл

@ -1,6 +1,8 @@
/*
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -18,14 +20,8 @@
#include "ompi/communicator/communicator.h"
#include "ompi/mca/bcol/bcol.h"
#include "ompi/mca/bcol/base/base.h"
#include "ompi/mca/dpm/dpm.h"
#include "ompi/mca/common/netpatterns/common_netpatterns.h"
#include "orte/mca/grpcomm/grpcomm.h"
#include "orte/mca/rml/rml.h"
#include "orte/util/proc_info.h"
#include "opal/util/show_help.h"
#include "opal/align.h"
@ -428,7 +424,7 @@ mca_bcol_basesmuma_comm_query(mca_sbgp_base_module_t *module, int *num_modules)
/* give the payload sm file a name */
name_length=asprintf(&name,
"%s"OPAL_PATH_SEP"0%s%0d",
orte_process_info.job_session_dir,
ompi_process_info.job_session_dir,
cs->payload_base_fname,
(int)getpid());
if( 0 > name_length ) {

Просмотреть файл

@ -1,6 +1,8 @@
/*
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -20,14 +22,8 @@
#include "ompi/mca/mpool/base/base.h"
#include "ompi/mca/bcol/bcol.h"
#include "ompi/mca/bcol/base/base.h"
#include "ompi/mca/dpm/dpm.h"
#include "ompi/mca/common/commpatterns/common_coll_ops.h"
#include "orte/mca/rml/rml.h"
#include "orte/mca/rml/rml_types.h"
#include "orte/mca/grpcomm/grpcomm.h"
#include "orte/mca/rml/rml.h"
#include "opal/class/opal_object.h"
#include "opal/dss/dss.h"
@ -123,7 +119,7 @@ int base_bcol_basesmuma_exchange_offsets(
int ret=OMPI_SUCCESS,i,dummy;
int index_in_group, pcnt;
opal_list_t peers;
orte_namelist_t *peer;
ompi_namelist_t *peer;
ompi_proc_t *proc_temp, *my_id;
opal_buffer_t *send_buffer = OBJ_NEW(opal_buffer_t);
opal_buffer_t *recv_buffer = OBJ_NEW(opal_buffer_t);
@ -139,10 +135,10 @@ int base_bcol_basesmuma_exchange_offsets(
proc_temp = ompi_comm_peer_lookup(
sm_bcol_module->super.sbgp_partner_module->group_comm,
sm_bcol_module->super.sbgp_partner_module->group_list[i]);
peer = OBJ_NEW(orte_namelist_t);
peer = OBJ_NEW(ompi_namelist_t);
peer->name.jobid = proc_temp->proc_name.jobid;
peer->name.vpid = proc_temp->proc_name.vpid;
opal_list_append(&peers,&peer->super); /* this is with the new field called "super" in orte_namelist_t struct */
opal_list_append(&peers,&peer->super); /* this is with the new field called "super" in ompi_namelist_t struct */
}
/* pack up the data into the allgather send buffer */
if (NULL == send_buffer || NULL == recv_buffer) {
@ -159,7 +155,7 @@ int base_bcol_basesmuma_exchange_offsets(
ret = opal_dss.pack(send_buffer,
&(sm_bcol_module->super.sbgp_partner_module->my_index),1,OPAL_UINT32);
if (ORTE_SUCCESS != ret) {
if (OMPI_SUCCESS != ret) {
goto ERROR;
fprintf(stderr,"ORTE error packing my_index!!\n");
fflush(stderr);
@ -167,15 +163,15 @@ int base_bcol_basesmuma_exchange_offsets(
/* pack the offset of the allocated region */
ret = opal_dss.pack(send_buffer,&(mem_offset),1,OPAL_UINT64);
if (ORTE_SUCCESS != ret) {
if (OMPI_SUCCESS != ret) {
goto ERROR;
}
/* get the offsets from all procs, so can setup the control data
* structures.
*/
if (ORTE_SUCCESS != (ret = orte_grpcomm.allgather_list(&peers, send_buffer, recv_buffer))) {
fprintf(stderr,"orte_grpcomm.allgather_list returned error %d\n", ret);
if (OMPI_SUCCESS != (ret = ompi_rte_allgather_list(&peers, send_buffer, recv_buffer))) {
fprintf(stderr,"ompi_rte_allgather_list returned error %d\n", ret);
fflush(stderr);
goto ERROR;
}
@ -183,7 +179,7 @@ int base_bcol_basesmuma_exchange_offsets(
/* unpack the dummy */
pcnt=1;
ret = opal_dss.unpack(recv_buffer,&dummy, &pcnt, OPAL_INT32);
if (ORTE_SUCCESS != ret) {
if (OMPI_SUCCESS != ret) {
fprintf(stderr,"unpack returned error %d for dummy \n",ret);
fflush(stderr);
goto ERROR;
@ -199,7 +195,7 @@ int base_bcol_basesmuma_exchange_offsets(
int array_id;
pcnt=1;
ret = opal_dss.unpack(recv_buffer,&index_in_group, &pcnt, OPAL_UINT32);
if (ORTE_SUCCESS != ret) {
if (OMPI_SUCCESS != ret) {
fprintf(stderr,"unpack returned error %d for remote index_in_group \n",ret);
fflush(stderr);
goto ERROR;
@ -208,7 +204,7 @@ int base_bcol_basesmuma_exchange_offsets(
/* get the offset */
pcnt=1;
ret = opal_dss.unpack(recv_buffer,&rem_mem_offset, &pcnt, OPAL_UINT64);
if (ORTE_SUCCESS != ret) {
if (OMPI_SUCCESS != ret) {
fprintf(stderr,"unpack returned error %d for remote memory offset \n",ret);
fflush(stderr);
goto ERROR;
@ -220,10 +216,10 @@ int base_bcol_basesmuma_exchange_offsets(
}
/* clean up */
peer=(orte_namelist_t *)opal_list_remove_first(&peers);
peer=(ompi_namelist_t *)opal_list_remove_first(&peers);
while( NULL !=peer) {
OBJ_RELEASE(peer);
peer=(orte_namelist_t *)opal_list_remove_first(&peers);
peer=(ompi_namelist_t *)opal_list_remove_first(&peers);
}
OBJ_DESTRUCT(&peers);
if( send_buffer ) {
@ -238,10 +234,10 @@ int base_bcol_basesmuma_exchange_offsets(
ERROR:
/* free peer list */
peer=(orte_namelist_t *)opal_list_remove_first(&peers);
peer=(ompi_namelist_t *)opal_list_remove_first(&peers);
while( NULL !=peer) {
OBJ_RELEASE(peer);
peer=(orte_namelist_t *)opal_list_remove_first(&peers);
peer=(ompi_namelist_t *)opal_list_remove_first(&peers);
}
OBJ_DESTRUCT(&peers);
if( send_buffer ) {

Просмотреть файл

@ -2,6 +2,8 @@
*
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -16,18 +18,9 @@
#include <fcntl.h>
#include <errno.h>
#include "ompi/mca/dpm/dpm.h"
#include "ompi/proc/proc.h"
#include "ompi/mca/common/commpatterns/common_coll_ops.h"
#include "orte/util/show_help.h"
#include "orte/util/name_fns.h"
#include "orte/mca/rml/rml.h"
#include "orte/mca/rml/rml_types.h"
#include "orte/mca/grpcomm/grpcomm.h"
#include "orte/mca/errmgr/errmgr.h"
#include "orte/util/show_help.h"
#include "opal/dss/dss.h"
#include "opal/util/error.h"
#include "opal/util/output.h"
@ -52,7 +45,7 @@ struct file_info_t {
/* need to allocate space for the peer */
static void bcol_basesmuma_smcm_proc_item_t_construct
(bcol_basesmuma_smcm_proc_item_t * item) {
(bcol_basesmuma_smcm_proc_item_t * item) {
}
/* need to free the space for the peer */
@ -306,13 +299,13 @@ int bcol_basesmuma_smcm_allgather_connection(
temp->peer.jobid = rem_jobid;
temp->sm_file.file_name = (char *) malloc(len_other+1);
if( !temp->sm_file.file_name) {
rc = ORTE_ERR_OUT_OF_RESOURCE;
rc = OMPI_ERR_OUT_OF_RESOURCE;
goto Error;
}
cpy_ret=strncpy(temp->sm_file.file_name,&(all_files[i].file_name[0]),
len_other);
if( !cpy_ret ) {
rc = ORTE_ERROR;
rc = OMPI_ERROR;
goto Error;
}
temp->sm_file.file_name[len_other]='\0';

Просмотреть файл

@ -18,8 +18,6 @@
#include "ompi_config.h"
#include "ompi/proc/proc.h"
#include "orte/util/name_fns.h"
#include "opal/class/opal_object.h"
#include "opal/class/opal_list.h"
#include "opal/sys/atomic.h"
@ -78,7 +76,7 @@ typedef struct bcol_basesmuma_smcm_file_t bcol_basesmuma_smcm_file_t;
struct bcol_basesmuma_smcm_proc_item_t {
opal_list_item_t item; /* can put me on a free list */
orte_process_name_t peer;
ompi_process_name_t peer;
bcol_basesmuma_smcm_file_t sm_file;
bcol_basesmuma_smcm_mmap_t *sm_mmap; /* Pointer to peer's sm file */

Просмотреть файл

@ -1,6 +1,8 @@
/*
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -13,11 +15,6 @@
#include "ompi_config.h"
#include "orte/util/proc_info.h"
#include "orte/util/name_fns.h"
#include "orte/runtime/orte_globals.h"
BEGIN_C_DECLS
#define BASESMUMA_K_NOMIAL_SEND_CHILDREN(radix_mask,radix,relative_index, \

Просмотреть файл

@ -155,7 +155,7 @@ enum ompi_op_type {
* @param[in] enable_mpi_threads True if the component needs to
* support MPI_THREAD_MULTIPLE
*
* @retval ORTE_SUCCESS Component successfully initialized
* @retval OMPI_SUCCESS Component successfully initialized
* @retval ORTE_ERROR An unspecified error occurred
*/
typedef int (*mca_bcol_base_component_init_query_fn_t)

Просмотреть файл

@ -1,6 +1,8 @@
/*
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -22,10 +24,6 @@
#include "opal/mca/mca.h"
#include "orte/util/proc_info.h"
#include "orte/util/name_fns.h"
#include "orte/runtime/orte_globals.h"
#include "ompi/op/op.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/datatype/ompi_datatype_internal.h"
@ -475,8 +473,8 @@ do {
#define IBOFFLOAD_ERROR(args) \
do { \
mca_bcol_iboffload_err("[%s]%s[%s:%d:%s] IBOFFLOAD ", \
orte_process_info.nodename, \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
ompi_process_info.nodename, \
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME), \
__FILE__, __LINE__, __func__); \
mca_bcol_iboffload_err args; \
mca_bcol_iboffload_err("\n"); \
@ -487,8 +485,8 @@ do {
do { \
if (mca_bcol_iboffload_component.verbose >= level) { \
mca_bcol_iboffload_err("[%s]%s[%s:%d:%s] IBOFFLOAD ", \
orte_process_info.nodename, \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
ompi_process_info.nodename, \
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME), \
__FILE__, __LINE__, __func__); \
mca_bcol_iboffload_err args; \
mca_bcol_iboffload_err("\n"); \

Просмотреть файл

@ -27,9 +27,6 @@
#include "ompi/mca/common/ofacm/base.h"
#include "ompi/mca/common/verbs/common_verbs.h"
#include "orte/mca/rml/rml.h"
#include "orte/util/show_help.h"
#include "opal/util/argv.h"
#include "opal/include/opal/types.h"
@ -324,7 +321,7 @@ static int iboffload_load_devices(void)
if (0 == num_devs || NULL == cm->ib_devs) {
IBOFFLOAD_ERROR(("No IB devices found"));
/* No hca error*/
orte_show_help("help-mpi-btl-openib.txt", "no-nics", true);
ompi_show_help("help-mpi-btl-openib.txt", "no-nics", true);
return OMPI_ERROR;
}
@ -548,9 +545,9 @@ static int setup_qps(void)
queues = opal_argv_split(mca_bcol_iboffload_component.receive_queues, ':');
if (0 == opal_argv_count(queues)) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"no qps in receive_queues", true,
orte_process_info.nodename,
ompi_process_info.nodename,
mca_bcol_iboffload_component.receive_queues);
ret = OMPI_ERROR;
@ -567,16 +564,16 @@ static int setup_qps(void)
#if HAVE_XRC
type = MCA_BCOL_IBOFFLOAD_XRC_QP;
#else
orte_show_help("help-mpi-btl-openib.txt", "No XRC support", true,
orte_process_info.nodename,
ompi_show_help("help-mpi-btl-openib.txt", "No XRC support", true,
ompi_process_info.nodename,
mca_bcol_iboffload_component.receive_queues);
ret = OMPI_ERR_NOT_AVAILABLE;
goto exit;
#endif
} else {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"invalid qp type in receive_queues", true,
orte_process_info.nodename,
ompi_process_info.nodename,
mca_bcol_iboffload_component.receive_queues,
queues[qp]);
@ -600,9 +597,9 @@ static int setup_qps(void)
if ('P' == params[0][0]) {
if (count < 3 || count > 6) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"invalid pp qp specification", true,
orte_process_info.nodename, queues[qp]);
ompi_process_info.nodename, queues[qp]);
ret = OMPI_ERR_BAD_PARAM;
@ -620,14 +617,14 @@ static int setup_qps(void)
if ((rd_num - rd_low) > rd_win) {
orte_show_help("help-mpi-btl-openib.txt", "non optimal rd_win",
ompi_show_help("help-mpi-btl-openib.txt", "non optimal rd_win",
true, rd_win, rd_num - rd_low);
}
} else {
if (count < 3 || count > 5) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"invalid srq specification", true,
orte_process_info.nodename, queues[qp]);
ompi_process_info.nodename, queues[qp]);
ret = OMPI_ERR_BAD_PARAM;
@ -647,8 +644,8 @@ static int setup_qps(void)
}
if (rd_num <= rd_low) {
orte_show_help("help-mpi-btl-openib.txt", "rd_num must be > rd_low",
true, orte_process_info.nodename, queues[qp]);
ompi_show_help("help-mpi-btl-openib.txt", "rd_num must be > rd_low",
true, ompi_process_info.nodename, queues[qp]);
ret = OMPI_ERR_BAD_PARAM;
goto exit;

Просмотреть файл

@ -18,8 +18,6 @@
#include "bcol_iboffload.h"
#include "bcol_iboffload_mca.h"
#include "orte/util/show_help.h"
#include "ompi/constants.h"
#include "ompi/mca/common/ofacm/base.h"
#include "ompi/communicator/communicator.h"
@ -255,7 +253,7 @@ int mca_bcol_iboffload_register_params(void)
free(msg);
if (ival < IBV_MTU_1024 || ival > IBV_MTU_4096) {
orte_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
ompi_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
true, "invalid value for bcol_iboffload_ib_mtu",
"bcol_iboffload_ib_mtu reset to 1024");
mca_bcol_iboffload_component.mtu = IBV_MTU_1024;
@ -269,12 +267,12 @@ int mca_bcol_iboffload_register_params(void)
1 , &ival, 0));
if (ival > 31) {
orte_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
ompi_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
true, "bcol_iboffload_ib_min_rnr_timer > 31",
"bcol_iboffload_ib_min_rnr_timer reset to 31");
ival = 31;
} else if (ival < 0){
orte_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
ompi_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
true, "bcol_iboffload_ib_min_rnr_timer < 0",
"bcol_iboffload_ib_min_rnr_timer reset to 0");
ival = 0;
@ -286,12 +284,12 @@ int mca_bcol_iboffload_register_params(void)
"(must be >= 0 and <= 31)",
20, &ival, 0));
if (ival > 31) {
orte_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
ompi_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
true, "bcol_iboffload_ib_timeout > 31",
"bcol_iboffload_ib_timeout reset to 31");
ival = 31;
} else if (ival < 0) {
orte_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
ompi_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
true, "bcol_iboffload_ib_timeout < 0",
"bcol_iboffload_ib_timeout reset to 0");
ival = 0;
@ -303,12 +301,12 @@ int mca_bcol_iboffload_register_params(void)
"(must be >= 0 and <= 7)",
7, &ival, 0));
if (ival > 7) {
orte_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
ompi_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
true, "bcol_iboffload_ib_retry_count > 7",
"bcol_iboffload_ib_retry_count reset to 7");
ival = 7;
} else if (ival < 0) {
orte_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
ompi_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
true, "bcol_iboffload_ib_retry_count < 0",
"bcol_iboffload_ib_retry_count reset to 0");
ival = 0;
@ -323,12 +321,12 @@ int mca_bcol_iboffload_register_params(void)
"(must be >= 0 and <= 7; 7 = \"infinite\")",
7, &ival, 0));
if (ival > 7) {
orte_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
ompi_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
true, "bcol_iboffload_ib_rnr_retry > 7",
"bcol_iboffload_ib_rnr_retry reset to 7");
ival = 7;
} else if (ival < 0) {
orte_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
ompi_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
true, "bcol_iboffload_ib_rnr_retry < 0",
"bcol_iboffload_ib_rnr_retry reset to 0");
ival = 0;
@ -346,12 +344,12 @@ int mca_bcol_iboffload_register_params(void)
"(must be >= 0 and <= 15)",
0, &ival, 0));
if (ival > 15) {
orte_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
ompi_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
true, "bcol_iboffload_ib_service_level > 15",
"bcol_iboffload_ib_service_level reset to 15");
ival = 15;
} else if (ival < 0) {
orte_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
ompi_show_help("help-mpi-bcol-iboffload.txt", "invalid mca param value",
true, "bcol_iboffload_ib_service_level < 0",
"bcol_iboffload_ib_service_level reset to 0");
ival = 0;
@ -382,8 +380,8 @@ int mca_bcol_iboffload_register_params(void)
"(must be > 0 and power of two)",
64, &ival, REGINT_GE_ZERO));
if(ival <= 1 || (ival & (ival - 1))) {
orte_show_help("help-mpi-bcol-iboffload.txt", "wrong buffer alignment",
true, ival, orte_process_info.nodename, 64);
ompi_show_help("help-mpi-bcol-iboffload.txt", "wrong buffer alignment",
true, ival, ompi_process_info.nodename, 64);
mca_bcol_iboffload_component.buffer_alignment = 64;
} else {
mca_bcol_iboffload_component.buffer_alignment = (uint32_t) ival;

Просмотреть файл

@ -24,7 +24,6 @@
#include "ompi/mca/bcol/bcol.h"
#include "bcol_ptpcoll.h"
#include "ompi/mca/bcol/base/base.h"
#include "orte/mca/rml/rml.h"
#include "bcol_ptpcoll_mca.h"
#include "bcol_ptpcoll_utils.h"

Просмотреть файл

@ -1,6 +1,8 @@
/*
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -25,9 +27,6 @@
#include "ompi/mca/bcol/bcol.h"
#include "opal/util/show_help.h"
#include "ompi/mca/bcol/base/base.h"
#include "ompi/mca/dpm/dpm.h"
#include "orte/mca/rml/rml.h"
#include "orte/util/proc_info.h"
#include "ompi/mca/pml/pml.h" /* need this for the max tag size */
#include "ompi/mca/coll/ml/coll_ml.h"

Просмотреть файл

@ -1,6 +1,8 @@
/*
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -12,9 +14,8 @@
#define MCA_BCOL_PTPCOLL_UTILS_H
#include "ompi_config.h"
#include "orte/util/proc_info.h"
#include "orte/util/name_fns.h"
#include "orte/runtime/orte_globals.h"
#include "ompi/mca/rte/rte.h"
BEGIN_C_DECLS
@ -51,8 +52,8 @@ static inline int mca_bcol_ptpcoll_err(const char* fmt, ...)
#define PTPCOLL_ERROR(args) \
do { \
mca_bcol_ptpcoll_err("[%s]%s[%s:%d:%s] PTPCOLL ", \
orte_process_info.nodename, \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
ompi_process_info.nodename, \
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME), \
__FILE__, __LINE__, __func__); \
mca_bcol_ptpcoll_err args; \
mca_bcol_ptpcoll_err("\n"); \
@ -63,8 +64,8 @@ static inline int mca_bcol_ptpcoll_err(const char* fmt, ...)
do { \
if (mca_bcol_ptpcoll_component.verbose >= level) { \
mca_bcol_ptpcoll_err("[%s]%s[%s:%d:%s] PTPCOLL ", \
orte_process_info.nodename, \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
ompi_process_info.nodename, \
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME), \
__FILE__, __LINE__, __func__); \
mca_bcol_ptpcoll_err args; \
mca_bcol_ptpcoll_err("\n"); \

Просмотреть файл

@ -28,14 +28,12 @@
#include "opal/class/opal_bitmap.h"
#include "opal/util/argv.h"
#include "opal/util/output.h"
#include "orte/util/show_help.h"
#include "ompi/mca/bml/bml.h"
#include "ompi/mca/bml/base/base.h"
#include "ompi/mca/btl/btl.h"
#include "ompi/mca/btl/base/base.h"
#include "ompi/mca/bml/base/bml_base_btl.h"
#include "bml_r2.h"
#include "orte/util/name_fns.h"
#include "ompi/proc/proc.h"
extern mca_bml_base_component_t mca_bml_r2_component;
@ -405,13 +403,13 @@ static int mca_bml_r2_add_procs( size_t nprocs,
if (mca_bml_r2.show_unreach_errors &&
OMPI_ERR_UNREACH == ret) {
orte_show_help("help-mca-bml-r2.txt",
ompi_show_help("help-mca-bml-r2.txt",
"unreachable proc",
true,
ORTE_NAME_PRINT(&(ompi_proc_local_proc->proc_name)),
OMPI_NAME_PRINT(&(ompi_proc_local_proc->proc_name)),
(ompi_proc_local_proc->proc_hostname ?
ompi_proc_local_proc->proc_hostname : "unknown!"),
ORTE_NAME_PRINT(&(unreach_proc->proc_name)),
OMPI_NAME_PRINT(&(unreach_proc->proc_name)),
(unreach_proc->proc_hostname ?
unreach_proc->proc_hostname : "unknown!"),
btl_names);

Просмотреть файл

@ -27,9 +27,6 @@
#include "opal/runtime/opal_progress.h"
#include "orte/mca/grpcomm/grpcomm.h"
#include "orte/util/proc_info.h"
#include "ompi/runtime/ompi_cr.h"
#include "ompi/mca/bml/base/base.h"
#include "ompi/mca/btl/base/base.h"
@ -42,7 +39,7 @@
int mca_bml_r2_ft_event(int state)
{
#if !ORTE_DISABLE_FULL_SUPPORT
#if OPAL_ENABLE_FT_CR == 1
static bool first_continue_pass = false;
ompi_proc_t** procs = NULL;
size_t num_procs;
@ -51,7 +48,7 @@ int mca_bml_r2_ft_event(int state)
int loc_state;
int param_type = -1;
char *param_list = NULL;
orte_grpcomm_collective_t coll;
ompi_rte_collective_t coll;
if(OPAL_CRS_CHECKPOINT == state) {
/* Do nothing for now */
@ -158,10 +155,10 @@ int mca_bml_r2_ft_event(int state)
* Barrier to make all processes have been successfully restarted before
* we try to remove some restart only files.
*/
OBJ_CONSTRUCT(&coll, orte_grpcomm_collective_t);
coll.id = orte_process_info.peer_init_barrier;
if (OMPI_SUCCESS != (ret = orte_grpcomm.barrier(&coll))) {
opal_output(0, "bml:r2: ft_event(Restart): Failed in orte_grpcomm.barrier (%d)", ret);
OBJ_CONSTRUCT(&coll, ompi_rte_collective_t);
coll.id = ompi_process_info.peer_init_barrier;
if (OMPI_SUCCESS != (ret = ompi_rte_barrier(&coll))) {
opal_output(0, "bml:r2: ft_event(Restart): Failed in ompi_rte_barrier (%d)", ret);
return ret;
}
while (coll.active) {
@ -236,10 +233,10 @@ int mca_bml_r2_ft_event(int state)
* Barrier to make all processes have been successfully restarted before
* we try to remove some restart only files.
*/
OBJ_CONSTRUCT(&coll, orte_grpcomm_collective_t);
coll.id = orte_process_info.peer_init_barrier;
if (OMPI_SUCCESS != (ret = orte_grpcomm.barrier(&coll))) {
opal_output(0, "bml:r2: ft_event(Restart): Failed in orte_grpcomm.barrier (%d)", ret);
OBJ_CONSTRUCT(&coll, ompi_rte_collective_t);
coll.id = ompi_process_info.peer_init_barrier;
if (OMPI_SUCCESS != (ret = ompi_rte_barrier(&coll))) {
opal_output(0, "bml:r2: ft_event(Restart): Failed in ompi_rte_barrier (%d)", ret);
return ret;
}
while (coll.active) {

Просмотреть файл

@ -10,6 +10,8 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2007 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -22,15 +24,11 @@
#include <stdio.h>
#include <stdarg.h>
#include "ompi/mca/rte/rte.h"
#include "base.h"
#include "btl_base_error.h"
#include "orte/util/show_help.h"
#include "orte/util/proc_info.h"
#include "orte/types.h"
#include "orte/util/name_fns.h"
#include "orte/runtime/orte_globals.h"
int mca_btl_base_verbose = -1;
int mca_btl_base_err(const char* fmt, ...)
@ -63,10 +61,10 @@ void mca_btl_base_error_no_nics(const char* transport,
char *procid;
if (mca_btl_base_warn_component_unused) {
/* print out no-nic warning if user told us to */
asprintf(&procid, "%s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME));
asprintf(&procid, "%s", OMPI_NAME_PRINT(OMPI_PROC_MY_NAME));
orte_show_help("help-mpi-btl-base.txt", "btl:no-nics",
true, procid, transport, orte_process_info.nodename,
ompi_show_help("help-mpi-btl-base.txt", "btl:no-nics",
true, procid, transport, ompi_process_info.nodename,
nic_name);
free(procid);
}

Просмотреть файл

@ -11,7 +11,9 @@
* All rights reserved.
* Copyright (c) 2007-2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007 Sun Microsystems, Inc. All rights reserved.
* $COPYRIGHT$
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
@ -26,9 +28,7 @@
#include <errno.h>
#include <stdio.h>
#include "orte/util/proc_info.h"
#include "orte/util/name_fns.h"
#include "orte/runtime/orte_globals.h"
#include "ompi/mca/rte/rte.h"
OMPI_DECLSPEC extern int mca_btl_base_verbose;
@ -38,8 +38,8 @@ OMPI_DECLSPEC extern int mca_btl_base_out(const char*, ...) __opal_attribute_for
#define BTL_OUTPUT(args) \
do { \
mca_btl_base_out("[%s]%s[%s:%d:%s] ", \
orte_process_info.nodename, \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
ompi_process_info.nodename, \
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME), \
__FILE__, __LINE__, __func__); \
mca_btl_base_out args; \
mca_btl_base_out("\n"); \
@ -49,8 +49,8 @@ do { \
#define BTL_ERROR(args) \
do { \
mca_btl_base_err("[%s]%s[%s:%d:%s] ", \
orte_process_info.nodename, \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
ompi_process_info.nodename, \
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME), \
__FILE__, __LINE__, __func__); \
mca_btl_base_err args; \
mca_btl_base_err("\n"); \
@ -59,9 +59,9 @@ do { \
#define BTL_PEER_ERROR(proc, args) \
do { \
mca_btl_base_err("%s[%s:%d:%s] from %s ", \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME), \
__FILE__, __LINE__, __func__, \
orte_process_info.nodename); \
ompi_process_info.nodename); \
if(proc && proc->proc_hostname) { \
mca_btl_base_err("to: %s ", proc->proc_hostname); \
} \
@ -75,8 +75,8 @@ do { \
do { \
if(mca_btl_base_verbose > 0) { \
mca_btl_base_err("[%s]%s[%s:%d:%s] ", \
orte_process_info.nodename, \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
ompi_process_info.nodename, \
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME), \
__FILE__, __LINE__, __func__); \
mca_btl_base_err args; \
mca_btl_base_err("\n"); \

Просмотреть файл

@ -10,6 +10,8 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -26,8 +28,7 @@
#include "opal/mca/base/mca_base_component_repository.h"
#include "opal/runtime/opal.h"
#include "orte/util/show_help.h"
#include "orte/mca/errmgr/errmgr.h"
#include "ompi/mca/rte/rte.h"
#include "ompi/mca/btl/btl.h"
#include "ompi/mca/btl/base/btl_base_error.h"
@ -162,9 +163,9 @@ int mca_btl_base_select(bool enable_progress_threads,
/* Finished querying all components. Check for the bozo case. */
if (0 == opal_list_get_size(&mca_btl_base_modules_initialized)) {
orte_show_help("help-mca-base.txt", "find-available:none-found", true,
ompi_show_help("help-mca-base.txt", "find-available:none-found", true,
"btl");
orte_errmgr.abort(1, NULL);
ompi_rte_abort(1, NULL);
}
return OMPI_SUCCESS;
}

Просмотреть файл

@ -18,7 +18,6 @@
#include "ompi_config.h"
#include "orte/util/name_fns.h"
#include "ompi/runtime/ompi_module_exchange.h"
#include "btl_mx.h"
@ -121,7 +120,7 @@ mca_btl_mx_proc_t* mca_btl_mx_proc_create(ompi_proc_t* ompi_proc)
ompi_proc, (void*)&mx_peers, &size );
if( OMPI_SUCCESS != rc ) {
opal_output( 0, "mca_pml_base_modex_recv failed for peer %s",
ORTE_NAME_PRINT(&ompi_proc->proc_name) );
OMPI_NAME_PRINT(&ompi_proc->proc_name) );
return NULL;
}
@ -130,7 +129,7 @@ mca_btl_mx_proc_t* mca_btl_mx_proc_create(ompi_proc_t* ompi_proc)
}
if( (size % sizeof(mca_btl_mx_addr_t)) != 0 ) {
opal_output( 0, "invalid mx address for peer %s",
ORTE_NAME_PRINT(&ompi_proc->proc_name) );
OMPI_NAME_PRINT(&ompi_proc->proc_name) );
return NULL;
}
/* Let's see if we have a way to connect to the remote proc using MX.

Просмотреть файл

@ -29,14 +29,12 @@
#ifdef HAVE_INTTYPES_H
#include <inttypes.h>
#endif
#include "orte/util/show_help.h"
#include "orte/runtime/orte_globals.h"
#include "orte/mca/errmgr/base/base.h"
#include "opal/class/opal_bitmap.h"
#include "opal/util/output.h"
#include "opal/util/arch.h"
#include "opal/include/opal_stdint.h"
#include "ompi/mca/rte/rte.h"
#include "ompi/mca/btl/btl.h"
#include "ompi/mca/btl/base/btl_base_error.h"
@ -55,11 +53,12 @@
#include "ompi/mca/mpool/base/base.h"
#include "ompi/mca/mpool/mpool.h"
#include "ompi/mca/mpool/grdma/mpool_grdma.h"
#if OMPI_CUDA_SUPPORT
#include "opal/datatype/opal_datatype_cuda.h"
#include "ompi/mca/common/cuda/common_cuda.h"
#endif /* OMPI_CUDA_SUPPORT */
#include "orte/util/proc_info.h"
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
@ -154,14 +153,14 @@ void mca_btl_openib_show_init_error(const char *file, int line,
}
#endif
orte_show_help("help-mpi-btl-openib.txt", "init-fail-no-mem",
true, orte_process_info.nodename,
ompi_show_help("help-mpi-btl-openib.txt", "init-fail-no-mem",
true, ompi_process_info.nodename,
file, line, func, dev, str_limit);
if (NULL != str_limit) free(str_limit);
} else {
orte_show_help("help-mpi-btl-openib.txt", "init-fail-create-q",
true, orte_process_info.nodename,
ompi_show_help("help-mpi-btl-openib.txt", "init-fail-create-q",
true, ompi_process_info.nodename,
file, line, func, strerror(errno), errno, dev);
}
}
@ -486,9 +485,9 @@ static int mca_btl_openib_tune_endpoint(mca_btl_openib_module_t* openib_btl,
ompi_btl_openib_ini_values_t values;
if(mca_btl_openib_get_transport_type(openib_btl) != endpoint->rem_info.rem_transport_type) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"conflicting transport types", true,
orte_process_info.nodename,
ompi_process_info.nodename,
ibv_get_device_name(openib_btl->device->ib_dev),
(openib_btl->device->ib_dev_attr).vendor_id,
(openib_btl->device->ib_dev_attr).vendor_part_id,
@ -507,9 +506,9 @@ static int mca_btl_openib_tune_endpoint(mca_btl_openib_module_t* openib_btl,
if (OMPI_SUCCESS != ret &&
OMPI_ERR_NOT_FOUND != ret) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"error in device init", true,
orte_process_info.nodename,
ompi_process_info.nodename,
ibv_get_device_name(openib_btl->device->ib_dev));
return ret;
}
@ -548,9 +547,9 @@ static int mca_btl_openib_tune_endpoint(mca_btl_openib_module_t* openib_btl,
if(0 != strcmp(mca_btl_openib_component.receive_queues,
recv_qps)) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"unsupported queues configuration", true,
orte_process_info.nodename,
ompi_process_info.nodename,
ibv_get_device_name(openib_btl->device->ib_dev),
(openib_btl->device->ib_dev_attr).vendor_id,
(openib_btl->device->ib_dev_attr).vendor_part_id,
@ -570,9 +569,9 @@ static int mca_btl_openib_tune_endpoint(mca_btl_openib_module_t* openib_btl,
if(NULL != values.receive_queues) {
if(0 != strcmp(mca_btl_openib_component.receive_queues,
values.receive_queues)) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"unsupported queues configuration", true,
orte_process_info.nodename,
ompi_process_info.nodename,
ibv_get_device_name(openib_btl->device->ib_dev),
(openib_btl->device->ib_dev_attr).vendor_id,
(openib_btl->device->ib_dev_attr).vendor_part_id,
@ -671,11 +670,11 @@ static uint64_t calculate_max_reg (void)
} else {
action = "Your MPI job will continue, but may be behave poorly and/or hang.";
}
orte_show_help("help-mpi-btl-openib.txt", "reg mem limit low", true,
orte_process_info.nodename, (unsigned long)(max_reg >> 20),
ompi_show_help("help-mpi-btl-openib.txt", "reg mem limit low", true,
ompi_process_info.nodename, (unsigned long)(max_reg >> 20),
(unsigned long)(mem_total >> 20), action);
if (mca_btl_openib_component.abort_not_enough_reg_mem) {
orte_errmgr.abort(1, NULL);
ompi_rte_abort(1, NULL);
}
}
@ -740,8 +739,8 @@ int mca_btl_openib_add_procs(
/* OOB, XOOB, and RDMACM do not support SELF comunication, so
* mark the prco as unreachable by openib btl */
if (OPAL_EQUAL == orte_util_compare_name_fields
(ORTE_NS_CMP_ALL, ORTE_PROC_MY_NAME, &ompi_proc->proc_name)) {
if (OPAL_EQUAL == ompi_rte_compare_name_fields
(OMPI_RTE_CMP_ALL, OMPI_PROC_MY_NAME, &ompi_proc->proc_name)) {
continue;
}
#if defined(HAVE_STRUCT_IBV_DEVICE_TRANSPORT_TYPE)

Просмотреть файл

@ -20,8 +20,6 @@
#include <unistd.h>
#include <errno.h>
#include "orte/util/show_help.h"
#include "ompi/mca/btl/base/base.h"
#include "btl_openib.h"
#include "btl_openib_mca.h"
@ -402,15 +400,15 @@ static int btl_openib_async_deviceh(struct mca_btl_openib_async_poll *devices_po
case IBV_EVENT_QP_ACCESS_ERR:
case IBV_EVENT_PATH_MIG_ERR:
case IBV_EVENT_SRQ_ERR:
orte_show_help("help-mpi-btl-openib.txt", "of error event",
true,orte_process_info.nodename, orte_process_info.pid,
ompi_show_help("help-mpi-btl-openib.txt", "of error event",
true,ompi_process_info.nodename, ompi_process_info.pid,
event_type,
openib_event_to_str((enum ibv_event_type)event_type),
xrc_event ? "true" : "false");
break;
case IBV_EVENT_PORT_ERR:
orte_show_help("help-mpi-btl-openib.txt", "of error event",
true,orte_process_info.nodename, orte_process_info.pid,
ompi_show_help("help-mpi-btl-openib.txt", "of error event",
true,ompi_process_info.nodename, ompi_process_info.pid,
event_type,
openib_event_to_str((enum ibv_event_type)event_type),
xrc_event ? "true" : "false");
@ -439,8 +437,8 @@ static int btl_openib_async_deviceh(struct mca_btl_openib_async_poll *devices_po
break;
default:
orte_show_help("help-mpi-btl-openib.txt", "of unknown event",
true,orte_process_info.nodename, orte_process_info.pid,
ompi_show_help("help-mpi-btl-openib.txt", "of unknown event",
true,ompi_process_info.nodename, ompi_process_info.pid,
event_type, xrc_event ? "true" : "false");
}
ibv_ack_async_event(&event);

Просмотреть файл

@ -61,10 +61,6 @@
#include "opal/mca/installdirs/installdirs.h"
#include "opal_stdint.h"
#include "orte/util/show_help.h"
#include "orte/util/proc_info.h"
#include "orte/runtime/orte_globals.h"
#include "ompi/constants.h"
#include "ompi/proc/proc.h"
#include "ompi/mca/btl/btl.h"
@ -73,6 +69,7 @@
#include "ompi/mca/mpool/base/base.h"
#include "ompi/mca/mpool/grdma/mpool_grdma.h"
#include "ompi/mca/btl/base/base.h"
#include "ompi/mca/rte/rte.h"
#include "ompi/runtime/ompi_module_exchange.h"
#include "ompi/runtime/mpiruntime.h"
@ -680,8 +677,8 @@ static int init_one_port(opal_list_t *btl_list, mca_btl_openib_device_t *device,
size. */
if (mca_btl_openib_component.gid_index >
ib_port_attr->gid_tbl_len) {
orte_show_help("help-mpi-btl-openib.txt", "gid index too large",
true, orte_process_info.nodename,
ompi_show_help("help-mpi-btl-openib.txt", "gid index too large",
true, ompi_process_info.nodename,
ibv_get_device_name(device->ib_dev), port_num,
mca_btl_openib_component.gid_index,
ib_port_attr->gid_tbl_len);
@ -738,8 +735,8 @@ static int init_one_port(opal_list_t *btl_list, mca_btl_openib_device_t *device,
if(mca_btl_openib_component.ib_num_btls > 0 &&
IB_DEFAULT_GID_PREFIX == subnet_id &&
mca_btl_openib_component.warn_default_gid_prefix) {
orte_show_help("help-mpi-btl-openib.txt", "default subnet prefix",
true, orte_process_info.nodename);
ompi_show_help("help-mpi-btl-openib.txt", "default subnet prefix",
true, ompi_process_info.nodename);
}
lmc = (1 << ib_port_attr->lmc);
@ -762,7 +759,7 @@ static int init_one_port(opal_list_t *btl_list, mca_btl_openib_device_t *device,
} else if (0 == lmc % (mca_btl_openib_component.apm_lmc + 1)) {
lmc_step = mca_btl_openib_component.apm_lmc + 1;
} else {
orte_show_help("help-mpi-btl-openib.txt", "apm with wrong lmc",true,
ompi_show_help("help-mpi-btl-openib.txt", "apm with wrong lmc",true,
mca_btl_openib_component.apm_lmc, lmc);
return OMPI_ERROR;
}
@ -770,7 +767,7 @@ static int init_one_port(opal_list_t *btl_list, mca_btl_openib_device_t *device,
if (mca_btl_openib_component.apm_lmc) {
/* Disable apm and report warning */
mca_btl_openib_component.apm_lmc = 0;
orte_show_help("help-mpi-btl-openib.txt", "apm without lmc",true);
ompi_show_help("help-mpi-btl-openib.txt", "apm without lmc",true);
}
}
#endif
@ -1062,11 +1059,11 @@ static int prepare_device_for_use(mca_btl_openib_device_t *device)
*/
if (!(device->ib_dev_attr.device_cap_flags & IBV_DEVICE_XRC) &&
MCA_BTL_XRC_ENABLED) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"XRC on device without XRC support", true,
mca_btl_openib_component.num_xrc_qps,
ibv_get_device_name(device->ib_dev),
orte_process_info.nodename);
ompi_process_info.nodename);
return OMPI_ERROR;
}
@ -1371,9 +1368,9 @@ static int setup_qps(void)
queues = opal_argv_split(mca_btl_openib_component.receive_queues, ':');
if (0 == opal_argv_count(queues)) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"no qps in receive_queues", true,
orte_process_info.nodename,
ompi_process_info.nodename,
mca_btl_openib_component.receive_queues);
ret = OMPI_ERROR;
goto error;
@ -1391,16 +1388,16 @@ static int setup_qps(void)
#if HAVE_XRC
num_xrc_qps++;
#else
orte_show_help("help-mpi-btl-openib.txt", "No XRC support", true,
orte_process_info.nodename,
ompi_show_help("help-mpi-btl-openib.txt", "No XRC support", true,
ompi_process_info.nodename,
mca_btl_openib_component.receive_queues);
ret = OMPI_ERR_NOT_AVAILABLE;
goto error;
#endif
} else {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"invalid qp type in receive_queues", true,
orte_process_info.nodename,
ompi_process_info.nodename,
mca_btl_openib_component.receive_queues,
queues[qp]);
ret = OMPI_ERR_BAD_PARAM;
@ -1411,8 +1408,8 @@ static int setup_qps(void)
/* Current XRC implementation can't used with other QP types - PP
and SRQ */
if (num_xrc_qps > 0 && (num_pp_qps > 0 || num_srq_qps > 0)) {
orte_show_help("help-mpi-btl-openib.txt", "XRC with PP or SRQ", true,
orte_process_info.nodename,
ompi_show_help("help-mpi-btl-openib.txt", "XRC with PP or SRQ", true,
ompi_process_info.nodename,
mca_btl_openib_component.receive_queues);
ret = OMPI_ERR_BAD_PARAM;
goto error;
@ -1420,8 +1417,8 @@ static int setup_qps(void)
/* Current XRC implementation can't used with btls_per_lid > 1 */
if (num_xrc_qps > 0 && mca_btl_openib_component.btls_per_lid > 1) {
orte_show_help("help-mpi-btl-openib.txt", "XRC with BTLs per LID",
true, orte_process_info.nodename,
ompi_show_help("help-mpi-btl-openib.txt", "XRC with BTLs per LID",
true, ompi_process_info.nodename,
mca_btl_openib_component.receive_queues, num_xrc_qps);
ret = OMPI_ERR_BAD_PARAM;
goto error;
@ -1446,9 +1443,9 @@ static int setup_qps(void)
if ('P' == params[0][0]) {
int32_t rd_win, rd_rsv;
if (count < 3 || count > 6) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"invalid pp qp specification", true,
orte_process_info.nodename, queues[qp]);
ompi_process_info.nodename, queues[qp]);
ret = OMPI_ERR_BAD_PARAM;
goto error;
}
@ -1471,15 +1468,15 @@ static int setup_qps(void)
mca_btl_openib_component.qp_infos[qp].u.pp_qp.rd_win = rd_win;
mca_btl_openib_component.qp_infos[qp].u.pp_qp.rd_rsv = rd_rsv;
if ((rd_num - rd_low) > rd_win) {
orte_show_help("help-mpi-btl-openib.txt", "non optimal rd_win",
ompi_show_help("help-mpi-btl-openib.txt", "non optimal rd_win",
true, rd_win, rd_num - rd_low);
}
} else {
int32_t sd_max, rd_init, srq_limit;
if (count < 3 || count > 7) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"invalid srq specification", true,
orte_process_info.nodename, queues[qp]);
ompi_process_info.nodename, queues[qp]);
ret = OMPI_ERR_BAD_PARAM;
goto error;
}
@ -1514,15 +1511,15 @@ static int setup_qps(void)
}
if (rd_num < rd_init) {
orte_show_help("help-mpi-btl-openib.txt", "rd_num must be >= rd_init",
true, orte_process_info.nodename, queues[qp]);
ompi_show_help("help-mpi-btl-openib.txt", "rd_num must be >= rd_init",
true, ompi_process_info.nodename, queues[qp]);
ret = OMPI_ERR_BAD_PARAM;
goto error;
}
if (rd_num < srq_limit) {
orte_show_help("help-mpi-btl-openib.txt", "srq_limit must be > rd_num",
true, orte_process_info.nodename, queues[qp]);
ompi_show_help("help-mpi-btl-openib.txt", "srq_limit must be > rd_num",
true, ompi_process_info.nodename, queues[qp]);
ret = OMPI_ERR_BAD_PARAM;
goto error;
}
@ -1533,8 +1530,8 @@ static int setup_qps(void)
}
if (rd_num <= rd_low) {
orte_show_help("help-mpi-btl-openib.txt", "rd_num must be > rd_low",
true, orte_process_info.nodename, queues[qp]);
ompi_show_help("help-mpi-btl-openib.txt", "rd_num must be > rd_low",
true, ompi_process_info.nodename, queues[qp]);
ret = OMPI_ERR_BAD_PARAM;
goto error;
}
@ -1553,23 +1550,23 @@ static int setup_qps(void)
mca_btl_openib_module.super.btl_eager_limit :
mca_btl_openib_module.super.btl_max_send_size;
if (max_qp_size < max_size_needed) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"biggest qp size is too small", true,
orte_process_info.nodename, max_qp_size,
ompi_process_info.nodename, max_qp_size,
max_size_needed);
ret = OMPI_ERR_BAD_PARAM;
goto error;
} else if (max_qp_size > max_size_needed) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"biggest qp size is too big", true,
orte_process_info.nodename, max_qp_size,
ompi_process_info.nodename, max_qp_size,
max_size_needed);
}
if (mca_btl_openib_component.ib_free_list_max > 0 &&
min_freelist_size > mca_btl_openib_component.ib_free_list_max) {
orte_show_help("help-mpi-btl-openib.txt", "freelist too small", true,
orte_process_info.nodename,
ompi_show_help("help-mpi-btl-openib.txt", "freelist too small", true,
ompi_process_info.nodename,
mca_btl_openib_component.ib_free_list_max,
min_freelist_size);
ret = OMPI_ERR_BAD_PARAM;
@ -1674,9 +1671,9 @@ static int init_one_device(opal_list_t *btl_list, struct ibv_device* ib_dev)
warning that we're using default values (unless overridden
that we don't want to see these warnings) */
if (mca_btl_openib_component.warn_no_device_params_found) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"no device params found", true,
orte_process_info.nodename,
ompi_process_info.nodename,
ibv_get_device_name(device->ib_dev),
device->ib_dev_attr.vendor_id,
device->ib_dev_attr.vendor_part_id);
@ -1782,7 +1779,7 @@ static int init_one_device(opal_list_t *btl_list, struct ibv_device* ib_dev)
/* Eager RDMA is not currently supported with progress threads */
if (device->use_eager_rdma && OMPI_ENABLE_PROGRESS_THREADS) {
device->use_eager_rdma = 0;
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"eager RDMA and progress threads", true);
}
@ -1868,7 +1865,7 @@ static int init_one_device(opal_list_t *btl_list, struct ibv_device* ib_dev)
if (device->btls > 0) {
/* if apm was enabled it should be > 1 */
if (1 == mca_btl_openib_component.apm_ports) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"apm not enough ports", true);
mca_btl_openib_component.apm_ports = 0;
}
@ -2127,10 +2124,10 @@ static int init_one_device(opal_list_t *btl_list, struct ibv_device* ib_dev)
if (NULL != values.receive_queues) {
if (0 != strcmp(values.receive_queues,
mca_btl_openib_component.receive_queues)) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"locally conflicting receive_queues", true,
opal_install_dirs.pkgdatadir,
orte_process_info.nodename,
ompi_process_info.nodename,
ibv_get_device_name(receive_queues_device->ib_dev),
receive_queues_device->ib_dev_attr.vendor_id,
receive_queues_device->ib_dev_attr.vendor_part_id,
@ -2151,10 +2148,10 @@ static int init_one_device(opal_list_t *btl_list, struct ibv_device* ib_dev)
device's INI file, we must error. */
else if (BTL_OPENIB_RQ_SOURCE_DEVICE_INI ==
mca_btl_openib_component.receive_queues_source) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"locally conflicting receive_queues", true,
opal_install_dirs.pkgdatadir,
orte_process_info.nodename,
ompi_process_info.nodename,
ibv_get_device_name(receive_queues_device->ib_dev),
receive_queues_device->ib_dev_attr.vendor_id,
receive_queues_device->ib_dev_attr.vendor_part_id,
@ -2190,9 +2187,9 @@ error:
}
if (OMPI_SUCCESS != ret) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"error in device init", true,
orte_process_info.nodename,
ompi_process_info.nodename,
ibv_get_device_name(device->ib_dev));
}
@ -2404,7 +2401,7 @@ sort_devs_by_distance(struct ibv_device **ib_devs, int count)
for (i = 0; i < count; i++) {
devs[i].ib_dev = ib_devs[i];
if (OPAL_HAVE_HWLOC && orte_proc_is_bound) {
if (OPAL_HAVE_HWLOC && ompi_rte_proc_is_bound) {
/* If this process is bound to one or more PUs, we can get
an accurate distance. */
devs[i].distance = get_ib_dev_distance(ib_devs[i]);
@ -2481,7 +2478,7 @@ btl_openib_component_init(int *num_btl_modules,
}
#ifndef __WINDOWS__
seedv[0] = ORTE_PROC_MY_NAME->vpid;
seedv[0] = OMPI_PROC_MY_NAME->vpid;
seedv[1] = opal_timer_base_get_cycles();
seedv[2] = opal_timer_base_get_cycles();
seed48(seedv);
@ -2513,9 +2510,9 @@ btl_openib_component_init(int *num_btl_modules,
#if !OPAL_HAVE_THREADS
if ((OPAL_MEMORY_FREE_SUPPORT | OPAL_MEMORY_MUNMAP_SUPPORT) ==
((OPAL_MEMORY_FREE_SUPPORT | OPAL_MEMORY_MUNMAP_SUPPORT) & value)) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"ptmalloc2 with no threads", true,
orte_process_info.nodename);
ompi_process_info.nodename);
goto no_btls;
}
#endif
@ -2628,9 +2625,9 @@ btl_openib_component_init(int *num_btl_modules,
couldn't provide it. So print an error and deactivate
this BTL. */
if (mca_btl_openib_component.want_fork_support > 0) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"ibv_fork_init fail", true,
orte_process_info.nodename);
ompi_process_info.nodename);
goto no_btls;
}
}
@ -2652,7 +2649,7 @@ btl_openib_component_init(int *num_btl_modules,
list_count++;
if (list_count > 1) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"specified include and exclude", true,
NULL == mca_btl_openib_component.if_include ?
"<not specified>" : mca_btl_openib_component.if_include,
@ -2720,7 +2717,7 @@ btl_openib_component_init(int *num_btl_modules,
continue;
}
#else
orte_show_help("help-mpi-btl-openib.txt", "no iwarp support",
ompi_show_help("help-mpi-btl-openib.txt", "no iwarp support",
true);
#endif
break;
@ -2738,8 +2735,8 @@ btl_openib_component_init(int *num_btl_modules,
}
free(dev_sorted);
if (!found) {
orte_show_help("help-mpi-btl-openib.txt", "no devices right type",
true, orte_process_info.nodename,
ompi_show_help("help-mpi-btl-openib.txt", "no devices right type",
true, ompi_process_info.nodename,
((BTL_OPENIB_DT_IB == mca_btl_openib_component.device_type) ?
"InfiniBand" :
(BTL_OPENIB_DT_IWARP == mca_btl_openib_component.device_type) ?
@ -2755,16 +2752,16 @@ btl_openib_component_init(int *num_btl_modules,
if (0 != opal_argv_count(mca_btl_openib_component.if_list) &&
mca_btl_openib_component.warn_nonexistent_if) {
char *str = opal_argv_join(mca_btl_openib_component.if_list, ',');
orte_show_help("help-mpi-btl-openib.txt", "nonexistent port",
true, orte_process_info.nodename,
ompi_show_help("help-mpi-btl-openib.txt", "nonexistent port",
true, ompi_process_info.nodename,
((NULL != mca_btl_openib_component.if_include) ?
"in" : "ex"), str);
free(str);
}
if(0 == mca_btl_openib_component.ib_num_btls) {
orte_show_help("help-mpi-btl-openib.txt",
"no active ports found", true, orte_process_info.nodename);
ompi_show_help("help-mpi-btl-openib.txt",
"no active ports found", true, ompi_process_info.nodename);
goto no_btls;
}
@ -2875,9 +2872,9 @@ btl_openib_component_init(int *num_btl_modules,
/* Do finial init on device */
ret = prepare_device_for_use(device);
if (OMPI_SUCCESS != ret) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"error in device init", true,
orte_process_info.nodename,
ompi_process_info.nodename,
ibv_get_device_name(device->ib_dev));
goto no_btls;
}
@ -3538,16 +3535,16 @@ error:
ibv_get_device_name(endpoint->qps[qp].qp->lcl_qp->context->device);
if (IBV_WC_RNR_RETRY_EXC_ERR == wc->status) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
BTL_OPENIB_QP_TYPE_PP(qp) ?
"pp rnr retry exceeded" :
"srq rnr retry exceeded", true,
orte_process_info.nodename, device_name,
ompi_process_info.nodename, device_name,
peer_hostname);
} else if (IBV_WC_RETRY_EXC_ERR == wc->status) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"pp retry exceeded", true,
orte_process_info.nodename,
ompi_process_info.nodename,
device_name, peer_hostname);
}
}

Просмотреть файл

@ -37,8 +37,6 @@
#include "opal_stdint.h"
#include "opal/util/output.h"
#include "orte/util/show_help.h"
#include "ompi/types.h"
#include "ompi/class/ompi_free_list.h"
@ -1048,9 +1046,9 @@ void *mca_btl_openib_endpoint_invoke_error(void *context)
/* If we didn't find a BTL, then just bail :-( */
if (NULL == btl || NULL == btl->error_cb) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"cannot raise btl error", true,
orte_process_info.nodename,
ompi_process_info.nodename,
__FILE__, __LINE__);
exit(1);
}

Просмотреть файл

@ -372,14 +372,14 @@ void btl_openib_handle_failover_control_messages(mca_btl_openib_control_header_t
opal_output_verbose(20, mca_btl_openib_component.verbose_failover,
"IB: rank=%d, control message (remote=%d), "
"moved local head by one (new=%d)",
ORTE_PROC_MY_NAME->vpid,
OMPI_PROC_MY_NAME->vpid,
newep->endpoint_proc->proc_ompi->proc_name.vpid,
newep->eager_rdma_local.head);
} else {
opal_output_verbose(20, mca_btl_openib_component.verbose_failover,
"IB: rank=%d, control message (remote=%d), "
"did not move local head by one (still=%d)",
ORTE_PROC_MY_NAME->vpid,
OMPI_PROC_MY_NAME->vpid,
newep->endpoint_proc->proc_ompi->proc_name.vpid,
newep->eager_rdma_local.head);
}
@ -684,7 +684,7 @@ static void mca_btl_openib_endpoint_notify(mca_btl_base_endpoint_t* endpoint, ui
bc_hdr->control.type = type;
bc_hdr->lid = endpoint->endpoint_btl->port_info.lid;
bc_hdr->subnet_id = endpoint->endpoint_btl->port_info.subnet_id;
bc_hdr->vpid = ORTE_PROC_MY_NAME->vpid;
bc_hdr->vpid = OMPI_PROC_MY_NAME->vpid;
bc_hdr->index = index;
if(newep->nbo) {
@ -739,7 +739,7 @@ void mca_btl_openib_dump_all_local_rdma_frags(mca_btl_openib_device_t *device) {
mca_btl_openib_endpoint_t* endpoint;
c = device->eager_rdma_buffers_count;
opal_output(0, "rank=%d, device=%s", ORTE_PROC_MY_NAME->vpid, device->ib_dev->name);
opal_output(0, "rank=%d, device=%s", OMPI_PROC_MY_NAME->vpid, device->ib_dev->name);
for(i = 0; i < c; i++) {
endpoint = device->eager_rdma_buffers[i];

Просмотреть файл

@ -30,7 +30,6 @@
#include <unistd.h>
#endif
#include "orte/util/show_help.h"
#include "opal/mca/base/mca_base_param.h"
#include "btl_openib.h"
@ -247,7 +246,7 @@ static int parse_file(char *filename)
ini_filename = filename;
btl_openib_ini_yyin = fopen(filename, "r");
if (NULL == btl_openib_ini_yyin) {
orte_show_help("help-mpi-btl-openib.txt", "ini file:file not found",
ompi_show_help("help-mpi-btl-openib.txt", "ini file:file not found",
true, filename);
ret = OMPI_ERR_NOT_FOUND;
goto cleanup;
@ -425,7 +424,7 @@ static int parse_line(parsed_section_values_t *sv)
/* Have no idea what this parameter is. Not an error -- just
ignore it */
if (!showed_unknown_field_warning) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"ini file:unknown field", true,
ini_filename, btl_openib_ini_yynewlines,
key_buffer);
@ -694,7 +693,7 @@ static inline void show_help(const char *topic)
if (0 == strcmp("\n", btl_openib_ini_yytext)) {
btl_openib_ini_yytext = "<end of line>";
}
orte_show_help("help-mpi-btl-openib.txt", topic, true,
ompi_show_help("help-mpi-btl-openib.txt", topic, true,
ini_filename, btl_openib_ini_yynewlines,
btl_openib_ini_yytext);
btl_openib_ini_yytext = save;

Просмотреть файл

@ -22,8 +22,6 @@
#include "opal/util/argv.h"
#include "opal/util/if.h"
#include "orte/util/show_help.h"
#include "connect/connect.h"
#endif
/* Always want to include this file */
@ -197,9 +195,9 @@ static int ipaddr_specified(struct sockaddr_in *ipaddr, uint32_t netmask)
if (NULL == temp || NULL == temp[0] || NULL == temp[1] ||
NULL != temp[2]) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"invalid ipaddr_inexclude", true, "include",
orte_process_info.nodename, list[i],
ompi_process_info.nodename, list[i],
"Invalid specification (missing \"/\")");
if (NULL != temp) {
opal_argv_free(temp);
@ -208,9 +206,9 @@ static int ipaddr_specified(struct sockaddr_in *ipaddr, uint32_t netmask)
}
if (1 != inet_pton(ipaddr->sin_family, temp[0], &ipae)) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"invalid ipaddr_inexclude", true, "include",
orte_process_info.nodename, list[i],
ompi_process_info.nodename, list[i],
"Invalid specification (inet_pton() failed)");
opal_argv_free(temp);
continue;
@ -239,9 +237,9 @@ static int ipaddr_specified(struct sockaddr_in *ipaddr, uint32_t netmask)
if (NULL == temp || NULL == temp[0] || NULL == temp[1] ||
NULL != temp[2]) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"invalid ipaddr_inexclude", true, "exclude",
orte_process_info.nodename, list[i],
ompi_process_info.nodename, list[i],
"Invalid specification (missing \"/\")");
if (NULL != temp) {
opal_argv_free(temp);
@ -250,9 +248,9 @@ static int ipaddr_specified(struct sockaddr_in *ipaddr, uint32_t netmask)
}
if (1 != inet_pton(ipaddr->sin_family, temp[0], &ipae)) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"invalid ipaddr_inexclude", true, "exclude",
orte_process_info.nodename, list[i],
ompi_process_info.nodename, list[i],
"Invalid specification (inet_pton() failed)");
opal_argv_free(temp);
continue;

Просмотреть файл

@ -30,7 +30,6 @@
#include "opal/mca/installdirs/installdirs.h"
#include "opal/util/output.h"
#include "opal/mca/base/mca_base_param.h"
#include "orte/util/show_help.h"
#include "btl_openib.h"
#include "btl_openib_mca.h"
#include "btl_openib_ini.h"
@ -192,9 +191,9 @@ int btl_openib_register_mca_params(void)
mca_btl_openib_component.want_fork_support = ival;
#else
if (0 != ival) {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"ibv_fork requested but not supported", true,
orte_process_info.nodename);
ompi_process_info.nodename);
return OMPI_ERR_BAD_PARAM;
}
#endif
@ -223,9 +222,9 @@ int btl_openib_register_mca_params(void)
} else if (0 == strcasecmp(str, "all")) {
mca_btl_openib_component.device_type = BTL_OPENIB_DT_ALL;
} else {
orte_show_help("help-mpi-btl-openib.txt",
ompi_show_help("help-mpi-btl-openib.txt",
"ibv_fork requested but not supported", true,
orte_process_info.nodename);
ompi_process_info.nodename);
return OMPI_ERR_BAD_PARAM;
}
free(str);
@ -310,7 +309,7 @@ int btl_openib_register_mca_params(void)
CHECK(reg_int("mtu", "ib_mtu", msg, IBV_MTU_1024, &ival, 0));
free(msg);
if (ival < IBV_MTU_1024 || ival > IBV_MTU_4096) {
orte_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
true, "invalid value for btl_openib_ib_mtu",
"btl_openib_ib_mtu reset to 1024");
mca_btl_openib_component.ib_mtu = IBV_MTU_1024;
@ -323,12 +322,12 @@ int btl_openib_register_mca_params(void)
"(must be >= 0 and <= 31)",
25, &ival, 0));
if (ival > 31) {
orte_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
true, "btl_openib_ib_min_rnr_timer > 31",
"btl_openib_ib_min_rnr_timer reset to 31");
ival = 31;
} else if (ival < 0){
orte_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
true, "btl_openib_ib_min_rnr_timer < 0",
"btl_openib_ib_min_rnr_timer reset to 0");
ival = 0;
@ -340,12 +339,12 @@ int btl_openib_register_mca_params(void)
"(must be >= 0 and <= 31)",
20, &ival, 0));
if (ival > 31) {
orte_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
true, "btl_openib_ib_timeout > 31",
"btl_openib_ib_timeout reset to 31");
ival = 31;
} else if (ival < 0) {
orte_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
true, "btl_openib_ib_timeout < 0",
"btl_openib_ib_timeout reset to 0");
ival = 0;
@ -357,12 +356,12 @@ int btl_openib_register_mca_params(void)
"(must be >= 0 and <= 7)",
7, &ival, 0));
if (ival > 7) {
orte_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
true, "btl_openib_ib_retry_count > 7",
"btl_openib_ib_retry_count reset to 7");
ival = 7;
} else if (ival < 0) {
orte_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
true, "btl_openib_ib_retry_count < 0",
"btl_openib_ib_retry_count reset to 0");
ival = 0;
@ -377,12 +376,12 @@ int btl_openib_register_mca_params(void)
"(must be >= 0 and <= 7; 7 = \"infinite\")",
7, &ival, 0));
if (ival > 7) {
orte_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
true, "btl_openib_ib_rnr_retry > 7",
"btl_openib_ib_rnr_retry reset to 7");
ival = 7;
} else if (ival < 0) {
orte_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
true, "btl_openib_ib_rnr_retry < 0",
"btl_openib_ib_rnr_retry reset to 0");
ival = 0;
@ -399,12 +398,12 @@ int btl_openib_register_mca_params(void)
"(must be >= 0 and <= 15)",
0, &ival, 0));
if (ival > 15) {
orte_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
true, "btl_openib_ib_service_level > 15",
"btl_openib_ib_service_level reset to 15");
ival = 15;
} else if (ival < 0) {
orte_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
true, "btl_openib_ib_service_level < 0",
"btl_openib_ib_service_level reset to 0");
ival = 0;
@ -507,8 +506,8 @@ int btl_openib_register_mca_params(void)
"(must be > 0 and power of two)",
64, &ival, REGINT_GE_ZERO));
if(ival <= 1 || (ival & (ival - 1))) {
orte_show_help("help-mpi-btl-openib.txt", "wrong buffer alignment",
true, ival, orte_process_info.nodename, 64);
ompi_show_help("help-mpi-btl-openib.txt", "wrong buffer alignment",
true, ival, ompi_process_info.nodename, 64);
mca_btl_openib_component.buffer_alignment = 64;
} else {
mca_btl_openib_component.buffer_alignment = (uint32_t) ival;
@ -656,7 +655,7 @@ int btl_openib_register_mca_params(void)
if (mca_btl_openib_component.use_memalign != 32
&& mca_btl_openib_component.use_memalign != 64
&& mca_btl_openib_component.use_memalign != 0){
orte_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
true, "Wrong btl_openib_memalign parameter value. Allowed values: 64, 32, 0.",
"btl_openib_memalign is reset to 32");
mca_btl_openib_component.use_memalign = 32;
@ -669,7 +668,7 @@ int btl_openib_register_mca_params(void)
&ival,
REGINT_GE_ZERO);
if (ival < 0){
orte_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-openib.txt", "invalid mca param value",
true, "btl_openib_memalign_threshold must be positive",
"btl_openib_memalign_threshold is reset to btl_openib_eager_limit");
ival = mca_btl_openib_component.eager_limit;

Просмотреть файл

@ -154,7 +154,7 @@ mca_btl_openib_proc_t* mca_btl_openib_proc_create(ompi_proc_t* ompi_proc)
if (OMPI_SUCCESS != rc) {
BTL_ERROR(("[%s:%d] ompi_modex_recv failed for peer %s",
__FILE__, __LINE__,
ORTE_NAME_PRINT(&ompi_proc->proc_name)));
OMPI_NAME_PRINT(&ompi_proc->proc_name)));
OBJ_RELEASE(module_proc);
return NULL;
}

Просмотреть файл

@ -44,7 +44,7 @@ int mca_btl_openib_open_xrc_domain(struct mca_btl_openib_device_t *device)
dev_name = ibv_get_device_name(device->ib_dev);
len = asprintf(&xrc_file_name,
"%s"OPAL_PATH_SEP"openib_xrc_domain_%s",
orte_process_info.job_session_dir, dev_name);
ompi_process_info.job_session_dir, dev_name);
if (0 > len) {
BTL_ERROR(("Failed to allocate memomry for XRC file name: %s\n",
strerror(errno)));
@ -111,7 +111,7 @@ static void ib_address_destructor(ib_address_t *ib_addr)
OBJ_DESTRUCT(&ib_addr->pending_ep);
}
static int ib_address_init(ib_address_t *ib_addr, uint16_t lid, uint64_t s_id, orte_jobid_t ep_jobid)
static int ib_address_init(ib_address_t *ib_addr, uint16_t lid, uint64_t s_id, ompi_jobid_t ep_jobid)
{
ib_addr->key = malloc(SIZE_OF3(s_id, lid, ep_jobid));
if (NULL == ib_addr->key) {
@ -136,7 +136,7 @@ static int ib_address_init(ib_address_t *ib_addr, uint16_t lid, uint64_t s_id, o
* Before call to this function you need to protect with
*/
int mca_btl_openib_ib_address_add_new (uint16_t lid, uint64_t s_id,
orte_jobid_t ep_jobid, mca_btl_openib_endpoint_t *ep)
ompi_jobid_t ep_jobid, mca_btl_openib_endpoint_t *ep)
{
void *tmp;
int ret = OMPI_SUCCESS;

Просмотреть файл

@ -44,6 +44,6 @@ typedef struct ib_address_t ib_address_t;
int mca_btl_openib_open_xrc_domain(struct mca_btl_openib_device_t *device);
int mca_btl_openib_close_xrc_domain(struct mca_btl_openib_device_t *device);
int mca_btl_openib_ib_address_add_new (uint16_t lid, uint64_t s_id,
orte_jobid_t ep_jobid, mca_btl_openib_endpoint_t *ep);
ompi_jobid_t ep_jobid, mca_btl_openib_endpoint_t *ep);
#endif

Просмотреть файл

@ -27,7 +27,6 @@
#include "connect/btl_openib_connect_udcm.h"
#endif
#include "orte/util/show_help.h"
#include "opal/util/argv.h"
#include "opal/util/output.h"
@ -121,9 +120,9 @@ int ompi_btl_openib_connect_base_register(void)
}
}
if (NULL == all[i]) {
orte_show_help("help-mpi-btl-openib-cpc-base.txt",
ompi_show_help("help-mpi-btl-openib-cpc-base.txt",
"cpc name not found", true,
"include", orte_process_info.nodename,
"include", ompi_process_info.nodename,
"include", cpc_include, temp[j],
all_cpc_names);
opal_argv_free(temp);
@ -147,9 +146,9 @@ int ompi_btl_openib_connect_base_register(void)
}
}
if (NULL == all[i]) {
orte_show_help("help-mpi-btl-openib-cpc-base.txt",
ompi_show_help("help-mpi-btl-openib-cpc-base.txt",
"cpc name not found", true,
"exclude", orte_process_info.nodename,
"exclude", ompi_process_info.nodename,
"exclude", cpc_exclude, temp[j],
all_cpc_names);
opal_argv_free(temp);
@ -292,9 +291,9 @@ int ompi_btl_openib_connect_base_select_for_local_port(mca_btl_openib_module_t *
/* If we got an empty array, then no CPCs were eligible. Doh! */
if (0 == cpc_index) {
orte_show_help("help-mpi-btl-openib-cpc-base.txt",
ompi_show_help("help-mpi-btl-openib-cpc-base.txt",
"no cpcs for port", true,
orte_process_info.nodename,
ompi_process_info.nodename,
ibv_get_device_name(btl->device->ib_dev),
btl->port_num, msg);
free(cpcs);

Просмотреть файл

@ -27,16 +27,10 @@
#include "opal/dss/dss.h"
#include "opal_stdint.h"
#include "orte/util/show_help.h"
#include "opal/util/error.h"
#include "opal/util/output.h"
#include "orte/mca/rml/rml.h"
#include "orte/mca/rml/rml_types.h"
#include "orte/mca/errmgr/errmgr.h"
#include "orte/util/name_fns.h"
#include "orte/runtime/orte_globals.h"
#include "ompi/mca/dpm/dpm.h"
#include "ompi/mca/rte/rte.h"
#include "btl_openib.h"
#include "btl_openib_endpoint.h"
#include "btl_openib_proc.h"
@ -77,11 +71,11 @@ static int qp_create_one(mca_btl_base_endpoint_t* endpoint, int qp,
static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
uint8_t message_type);
static void rml_send_cb(int status, orte_process_name_t* endpoint,
opal_buffer_t* buffer, orte_rml_tag_t tag,
static void rml_send_cb(int status, ompi_process_name_t* endpoint,
opal_buffer_t* buffer, ompi_rml_tag_t tag,
void* cbdata);
static void rml_recv_cb(int status, orte_process_name_t* process_name,
opal_buffer_t* buffer, orte_rml_tag_t tag,
static void rml_recv_cb(int status, ompi_process_name_t* process_name,
opal_buffer_t* buffer, ompi_rml_tag_t tag,
void* cbdata);
/*
@ -149,12 +143,12 @@ static int oob_component_query(mca_btl_openib_module_t *btl,
ensure to only post it *once*, because another btl may have
come in before this and already posted it. */
if (!rml_recv_posted) {
rc = orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD,
rc = ompi_rte_recv_buffer_nb(OMPI_NAME_WILDCARD,
OMPI_RML_TAG_OPENIB,
ORTE_RML_PERSISTENT,
OMPI_RML_PERSISTENT,
rml_recv_cb,
NULL);
if (ORTE_SUCCESS != rc) {
if (OMPI_SUCCESS != rc) {
opal_output_verbose(5, mca_btl_base_output,
"openib BTL: oob CPC system error %d (%s)",
rc, opal_strerror(rc));
@ -165,7 +159,7 @@ static int oob_component_query(mca_btl_openib_module_t *btl,
*cpc = (ompi_btl_openib_connect_base_module_t *) malloc(sizeof(ompi_btl_openib_connect_base_module_t));
if (NULL == *cpc) {
orte_rml.recv_cancel(ORTE_NAME_WILDCARD, OMPI_RML_TAG_OPENIB);
ompi_rte_recv_cancel(OMPI_NAME_WILDCARD, OMPI_RML_TAG_OPENIB);
rml_recv_posted = false;
opal_output_verbose(5, mca_btl_base_output,
"openib BTL: oob CPC system error (malloc failed)");
@ -221,7 +215,7 @@ static int oob_module_start_connect(ompi_btl_openib_connect_base_module_t *cpc,
static int oob_component_finalize(void)
{
if (rml_recv_posted) {
orte_rml.recv_cancel(ORTE_NAME_WILDCARD, OMPI_RML_TAG_OPENIB);
ompi_rte_recv_cancel(OMPI_NAME_WILDCARD, OMPI_RML_TAG_OPENIB);
rml_recv_posted = false;
}
#if (ENABLE_DYNAMIC_SL)
@ -486,9 +480,9 @@ static int qp_create_one(mca_btl_base_endpoint_t* endpoint, int qp,
my_qp = ibv_create_qp(openib_btl->device->ib_pd, &init_attr);
if (NULL == my_qp) {
orte_show_help("help-mpi-btl-openib-cpc-base.txt",
ompi_show_help("help-mpi-btl-openib-cpc-base.txt",
"ibv_create_qp failed", true,
orte_process_info.nodename,
ompi_process_info.nodename,
ibv_get_device_name(openib_btl->device->ib_dev),
"Reliable connected (RC)");
return OMPI_ERROR;
@ -497,8 +491,8 @@ static int qp_create_one(mca_btl_base_endpoint_t* endpoint, int qp,
if (init_attr.cap.max_inline_data < req_inline) {
endpoint->qps[qp].ib_inline_max = init_attr.cap.max_inline_data;
orte_show_help("help-mpi-btl-openib-cpc-base.txt",
"inline truncated", true, orte_process_info.nodename,
ompi_show_help("help-mpi-btl-openib-cpc-base.txt",
"inline truncated", true, ompi_process_info.nodename,
ibv_get_device_name(openib_btl->device->ib_dev),
openib_btl->port_num,
req_inline, init_attr.cap.max_inline_data);
@ -539,7 +533,7 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
int rc;
if (NULL == buffer) {
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
OMPI_ERROR_LOG(OMPI_ERR_OUT_OF_RESOURCE);
return OMPI_ERR_OUT_OF_RESOURCE;
}
@ -547,14 +541,14 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT8));
rc = opal_dss.pack(buffer, &message_type, 1, OPAL_UINT8);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT64));
rc = opal_dss.pack(buffer, &endpoint->subnet_id, 1, OPAL_UINT64);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
@ -565,13 +559,13 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
&endpoint->rem_info.rem_qps[0].rem_qp_num, 1,
OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT16));
rc = opal_dss.pack(buffer, &endpoint->rem_info.rem_lid, 1, OPAL_UINT16);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
}
@ -584,14 +578,14 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
rc = opal_dss.pack(buffer, &endpoint->qps[qp].qp->lcl_qp->qp_num,
1, OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->qps[qp].qp->lcl_psn, 1,
OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
}
@ -599,30 +593,30 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT16));
rc = opal_dss.pack(buffer, &endpoint->endpoint_btl->lid, 1, OPAL_UINT16);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->endpoint_btl->device->mtu, 1,
OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->index, 1, OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
}
/* send to remote endpoint */
rc = orte_rml.send_buffer_nb(&endpoint->endpoint_proc->proc_ompi->proc_name,
rc = ompi_rte_send_buffer_nb(&endpoint->endpoint_proc->proc_ompi->proc_name,
buffer, OMPI_RML_TAG_OPENIB, 0,
rml_send_cb, NULL);
if (ORTE_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
if (OMPI_SUCCESS != rc) {
OMPI_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("Sent QP Info, LID = %d, SUBNET = %" PRIx64 "\n",
@ -637,8 +631,8 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
* Callback when we have finished RML sending the connect data to a
* remote peer
*/
static void rml_send_cb(int status, orte_process_name_t* endpoint,
opal_buffer_t* buffer, orte_rml_tag_t tag,
static void rml_send_cb(int status, ompi_process_name_t* endpoint,
opal_buffer_t* buffer, ompi_rml_tag_t tag,
void* cbdata)
{
OBJ_RELEASE(buffer);
@ -650,8 +644,8 @@ static void rml_send_cb(int status, orte_process_name_t* endpoint,
* and if this endpoint is trying to connect, reply with our QP info,
* otherwise try to modify QP's and establish reliable connection
*/
static void rml_recv_cb(int status, orte_process_name_t* process_name,
opal_buffer_t* buffer, orte_rml_tag_t tag,
static void rml_recv_cb(int status, ompi_process_name_t* process_name,
opal_buffer_t* buffer, ompi_rml_tag_t tag,
void* cbdata)
{
mca_btl_openib_proc_t *ib_proc;
@ -674,16 +668,16 @@ static void rml_recv_cb(int status, orte_process_name_t* process_name,
our door */
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT8));
rc = opal_dss.unpack(buffer, &message_type, &cnt, OPAL_UINT8);
if (ORTE_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
if (OMPI_SUCCESS != rc) {
OMPI_ERROR_LOG(rc);
mca_btl_openib_endpoint_invoke_error(NULL);
return;
}
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT64));
rc = opal_dss.unpack(buffer, &rem_info.rem_subnet_id, &cnt, OPAL_UINT64);
if (ORTE_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
if (OMPI_SUCCESS != rc) {
OMPI_ERROR_LOG(rc);
mca_btl_openib_endpoint_invoke_error(NULL);
return;
}
@ -691,15 +685,15 @@ static void rml_recv_cb(int status, orte_process_name_t* process_name,
if (ENDPOINT_CONNECT_REQUEST != message_type) {
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32));
rc = opal_dss.unpack(buffer, &lcl_qp, &cnt, OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
if (OMPI_SUCCESS != rc) {
OMPI_ERROR_LOG(rc);
mca_btl_openib_endpoint_invoke_error(NULL);
return;
}
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT16));
rc = opal_dss.unpack(buffer, &lcl_lid, &cnt, OPAL_UINT16);
if (ORTE_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
if (OMPI_SUCCESS != rc) {
OMPI_ERROR_LOG(rc);
mca_btl_openib_endpoint_invoke_error(NULL);
return;
}
@ -716,16 +710,16 @@ static void rml_recv_cb(int status, orte_process_name_t* process_name,
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32));
rc = opal_dss.unpack(buffer, &rem_info.rem_qps[qp].rem_qp_num, &cnt,
OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
if (OMPI_SUCCESS != rc) {
OMPI_ERROR_LOG(rc);
mca_btl_openib_endpoint_invoke_error(NULL);
return;
}
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32));
rc = opal_dss.unpack(buffer, &rem_info.rem_qps[qp].rem_psn, &cnt,
OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
if (OMPI_SUCCESS != rc) {
OMPI_ERROR_LOG(rc);
mca_btl_openib_endpoint_invoke_error(NULL);
return;
}
@ -733,22 +727,22 @@ static void rml_recv_cb(int status, orte_process_name_t* process_name,
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT16));
rc = opal_dss.unpack(buffer, &rem_info.rem_lid, &cnt, OPAL_UINT16);
if (ORTE_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
if (OMPI_SUCCESS != rc) {
OMPI_ERROR_LOG(rc);
mca_btl_openib_endpoint_invoke_error(NULL);
return;
}
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32));
rc = opal_dss.unpack(buffer, &rem_info.rem_mtu, &cnt, OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
if (OMPI_SUCCESS != rc) {
OMPI_ERROR_LOG(rc);
mca_btl_openib_endpoint_invoke_error(NULL);
return;
}
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32));
rc = opal_dss.unpack(buffer, &rem_info.rem_index, &cnt, OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
if (OMPI_SUCCESS != rc) {
OMPI_ERROR_LOG(rc);
mca_btl_openib_endpoint_invoke_error(NULL);
return;
}
@ -758,7 +752,7 @@ static void rml_recv_cb(int status, orte_process_name_t* process_name,
rem_info.rem_lid,
rem_info.rem_subnet_id));
master = orte_util_compare_name_fields(ORTE_NS_CMP_ALL, ORTE_PROC_MY_NAME,
master = ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL, OMPI_PROC_MY_NAME,
process_name) > 0 ? true : false;
/* Need to protect the ib_procs list */
@ -771,7 +765,7 @@ static void rml_recv_cb(int status, orte_process_name_t* process_name,
ib_proc = (mca_btl_openib_proc_t*)opal_list_get_next(ib_proc)) {
bool found = false;
if (OPAL_EQUAL != orte_util_compare_name_fields(ORTE_NS_CMP_ALL,
if (OPAL_EQUAL != ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL,
&ib_proc->proc_ompi->proc_name, process_name)) {
continue;
}

Просмотреть файл

@ -48,7 +48,6 @@
#include "opal/util/output.h"
#include "opal/util/error.h"
#include "orte/util/show_help.h"
#include "btl_openib_fd.h"
#include "btl_openib_proc.h"
@ -247,7 +246,7 @@ static void rdmacm_component_register(void)
if (value >= 0 && value < 65536) {
rdmacm_port = (uint16_t) value;
} else {
orte_show_help("help-mpi-btl-openib-cpc-rdmacm.txt",
ompi_show_help("help-mpi-btl-openib-cpc-rdmacm.txt",
"illegal tcp port", true, value);
}
@ -258,7 +257,7 @@ static void rdmacm_component_register(void)
if (value > 0) {
rdmacm_resolve_timeout = value;
} else {
orte_show_help("help-mpi-btl-openib-cpc-rdmacm.txt",
ompi_show_help("help-mpi-btl-openib-cpc-rdmacm.txt",
"illegal timeout", true, value);
}
@ -269,7 +268,7 @@ static void rdmacm_component_register(void)
if (value > 0) {
rdmacm_resolve_max_retry_count = value;
} else {
orte_show_help("help-mpi-btl-openib-cpc-rdmacm.txt",
ompi_show_help("help-mpi-btl-openib-cpc-rdmacm.txt",
"illegal retry count", true, value);
}
@ -453,9 +452,9 @@ static int rdmacm_setup_qp(rdmacm_contents_t *contents,
endpoint->qps[qpnum].credit_frag = NULL;
if (attr.cap.max_inline_data < req_inline) {
endpoint->qps[qpnum].ib_inline_max = attr.cap.max_inline_data;
orte_show_help("help-mpi-btl-openib-cpc-base.txt",
ompi_show_help("help-mpi-btl-openib-cpc-base.txt",
"inline truncated", true,
orte_process_info.nodename,
ompi_process_info.nodename,
ibv_get_device_name(contents->openib_btl->device->ib_dev),
contents->openib_btl->port_num,
req_inline, attr.cap.max_inline_data);
@ -753,16 +752,16 @@ static void *show_help_cant_find_endpoint(void *context)
if (NULL != c) {
msg = stringify(c->peer_ip_addr);
orte_show_help("help-mpi-btl-openib-cpc-rdmacm.txt",
ompi_show_help("help-mpi-btl-openib-cpc-rdmacm.txt",
"could not find matching endpoint", true,
orte_process_info.nodename,
ompi_process_info.nodename,
c->device_name,
c->peer_tcp_port);
free(msg);
} else {
orte_show_help("help-mpi-btl-openib-cpc-rdmacm.txt",
ompi_show_help("help-mpi-btl-openib-cpc-rdmacm.txt",
"could not find matching endpoint", true,
orte_process_info.nodename,
ompi_process_info.nodename,
"<unknown>", "<unknown>", -1);
}
free(context);
@ -1463,9 +1462,9 @@ static void *show_help_rdmacm_event_error(void *c)
id_context_t *context = (id_context_t*) event->id->context;
if (RDMA_CM_EVENT_DEVICE_REMOVAL == event->event) {
orte_show_help("help-mpi-btl-openib-cpc-rdmacm.txt",
ompi_show_help("help-mpi-btl-openib-cpc-rdmacm.txt",
"rdma cm device removal", true,
orte_process_info.nodename,
ompi_process_info.nodename,
ibv_get_device_name(event->id->verbs->device));
} else {
const char *device = "Unknown";
@ -1474,9 +1473,9 @@ static void *show_help_rdmacm_event_error(void *c)
NULL != event->id->verbs->device) {
device = ibv_get_device_name(event->id->verbs->device);
}
orte_show_help("help-mpi-btl-openib-cpc-rdmacm.txt",
ompi_show_help("help-mpi-btl-openib-cpc-rdmacm.txt",
"rdma cm event error", true,
orte_process_info.nodename,
ompi_process_info.nodename,
device,
rdma_event_str(event->event),
context->endpoint->endpoint_proc->proc_ompi->proc_hostname);

Просмотреть файл

@ -9,7 +9,6 @@
*/
#include "btl_openib.h"
#include "orte/util/show_help.h"
#include "connect/btl_openib_connect_sl.h"
#include <infiniband/iba/ib_types.h>
@ -108,8 +107,8 @@ static int init_ud_qp(struct ibv_context *context_arg,
cache->cq = ibv_create_cq(cache->context, 4, NULL, NULL, 0);
if (NULL == cache->cq) {
BTL_ERROR(("error creating cq, errno says %s", strerror(errno)));
orte_show_help("help-mpi-btl-openib.txt", "init-fail-create-q",
true, orte_process_info.nodename,
ompi_show_help("help-mpi-btl-openib.txt", "init-fail-create-q",
true, ompi_process_info.nodename,
__FILE__, __LINE__, "ibv_create_cq",
strerror(errno), errno,
ibv_get_device_name(context_arg->device));

Просмотреть файл

@ -69,8 +69,6 @@
#include "opal/util/output.h"
#include "opal/util/error.h"
#include "opal_stdint.h"
#include "orte/mca/ess/ess.h"
#include "orte/util/show_help.h"
#include "btl_openib_endpoint.h"
#include "btl_openib_proc.h"
@ -1151,8 +1149,8 @@ static int udcm_qp_create_one(udcm_module_t *m, mca_btl_base_endpoint_t* lcl_ep,
}
if (NULL == lcl_ep->qps[qp].qp->lcl_qp) {
orte_show_help("help-mpi-btl-openib-cpc-base.txt",
"ibv_create_qp failed", true, orte_process_info.nodename,
ompi_show_help("help-mpi-btl-openib-cpc-base.txt",
"ibv_create_qp failed", true, ompi_process_info.nodename,
ibv_get_device_name(m->btl->device->ib_dev),
"Reliable connected (RC)");
@ -1161,8 +1159,8 @@ static int udcm_qp_create_one(udcm_module_t *m, mca_btl_base_endpoint_t* lcl_ep,
if (init_attr.cap.max_inline_data < req_inline) {
lcl_ep->qps[qp].ib_inline_max = init_attr.cap.max_inline_data;
orte_show_help("help-mpi-btl-openib-cpc-base.txt",
"inline truncated", true, orte_process_info.nodename,
ompi_show_help("help-mpi-btl-openib-cpc-base.txt",
"inline truncated", true, ompi_process_info.nodename,
ibv_get_device_name(m->btl->device->ib_dev),
m->btl->port_num, req_inline,
init_attr.cap.max_inline_data);

Просмотреть файл

@ -21,12 +21,8 @@
#include "opal/dss/dss.h"
#include "opal/util/error.h"
#include "opal/util/output.h"
#include "orte/util/show_help.h"
#include "orte/util/name_fns.h"
#include "orte/mca/rml/rml.h"
#include "orte/mca/rml/rml_types.h"
#include "orte/mca/errmgr/errmgr.h"
#include "ompi/mca/dpm/dpm.h"
#include "ompi/mca/rte/rte.h"
#include "btl_openib.h"
#include "btl_openib_endpoint.h"
@ -34,7 +30,6 @@
#include "btl_openib_xrc.h"
#include "btl_openib_async.h"
#include "connect/connect.h"
#include "orte/util/show_help.h"
#if (ENABLE_DYNAMIC_SL)
#include "connect/btl_openib_connect_sl.h"
#endif
@ -94,8 +89,8 @@ static int xoob_priority = 60;
* Callback when we have finished RML sending the connect data to a
* remote peer
*/
static void xoob_rml_send_cb(int status, orte_process_name_t* endpoint,
opal_buffer_t* buffer, orte_rml_tag_t tag,
static void xoob_rml_send_cb(int status, ompi_process_name_t* endpoint,
opal_buffer_t* buffer, ompi_rml_tag_t tag,
void* cbdata)
{
OBJ_RELEASE(buffer);
@ -111,7 +106,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT8));
rc = opal_dss.unpack(buffer, message_type, &cnt, OPAL_UINT8);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return OMPI_ERROR;
}
BTL_VERBOSE(("Recv unpack Message type = %d\n", *message_type));
@ -119,7 +114,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT64));
rc = opal_dss.unpack(buffer, &info->rem_subnet_id, &cnt, OPAL_UINT64);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return OMPI_ERROR;
}
BTL_VERBOSE(("Recv unpack sid = %" PRIx64 "\n", info->rem_subnet_id));
@ -127,7 +122,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT16));
rc = opal_dss.unpack(buffer, &info->rem_lid, &cnt, OPAL_UINT16);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return OMPI_ERROR;
}
BTL_VERBOSE(("Recv unpack lid = %d", info->rem_lid));
@ -141,7 +136,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
rc = opal_dss.unpack(buffer, &info->rem_qps->rem_qp_num, &cnt,
OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return OMPI_ERROR;
}
BTL_VERBOSE(("Recv unpack remote qp = %x", info->rem_qps->rem_qp_num));
@ -150,7 +145,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
rc = opal_dss.unpack(buffer, &info->rem_qps->rem_psn, &cnt,
OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return OMPI_ERROR;
}
BTL_VERBOSE(("Recv unpack remote psn = %d", info->rem_qps->rem_psn));
@ -158,7 +153,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32));
rc = opal_dss.unpack(buffer, &info->rem_mtu, &cnt, OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return OMPI_ERROR;
}
BTL_VERBOSE(("Recv unpack remote mtu = %d", info->rem_mtu));
@ -170,7 +165,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT16));
rc = opal_dss.unpack(buffer, lid, &cnt, OPAL_UINT16);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return OMPI_ERROR;
}
BTL_VERBOSE(("Recv unpack requested lid = %d", *lid));
@ -183,7 +178,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
rc = opal_dss.unpack(buffer, &info->rem_qps->rem_qp_num, &cnt,
OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("Recv unpack requested qp = %x", info->rem_qps->rem_qp_num));
@ -194,7 +189,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32));
rc = opal_dss.unpack(buffer, &info->rem_index, &cnt, OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return OMPI_ERROR;
}
BTL_VERBOSE(("Recv unpack remote index = %d", info->rem_index));
@ -203,7 +198,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32));
rc = opal_dss.unpack(buffer, &info->rem_srqs[srq].rem_srq_num, &cnt, OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return OMPI_ERROR;
}
BTL_VERBOSE(("Recv unpack remote index srq num[%d]= %d", srq, info->rem_srqs[srq].rem_srq_num));
@ -222,7 +217,7 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
int rc, srq;
if (NULL == buffer) {
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
OMPI_ERROR_LOG(OMPI_ERR_OUT_OF_RESOURCE);
return OMPI_ERR_OUT_OF_RESOURCE;
}
@ -236,7 +231,7 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT8));
rc = opal_dss.pack(buffer, &message_type, 1, OPAL_UINT8);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
@ -244,7 +239,7 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT64));
rc = opal_dss.pack(buffer, &endpoint->subnet_id, 1, OPAL_UINT64);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
@ -252,7 +247,7 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT16));
rc = opal_dss.pack(buffer, &endpoint->endpoint_btl->lid, 1, OPAL_UINT16);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
@ -279,14 +274,14 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &qp_num, 1, OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("Send pack lpsn = %d", psn));
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &psn, 1, OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
@ -295,7 +290,7 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
rc = opal_dss.pack(buffer, &endpoint->endpoint_btl->device->mtu, 1,
OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
}
@ -312,7 +307,7 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT16));
rc = opal_dss.pack(buffer, &endpoint->ib_addr->lid, 1, OPAL_UINT16);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
}
@ -325,7 +320,7 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
rc = opal_dss.pack(buffer, &endpoint->ib_addr->remote_xrc_rcv_qp_num,
1, OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
}
@ -341,7 +336,7 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->index, 1, OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
/* on response we add all SRQ numbers */
@ -351,18 +346,18 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
rc = opal_dss.pack(buffer, &endpoint->endpoint_btl->qps[srq].u.srq_qp.srq->xrc_srq_num,
1, OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
}
}
/* send to remote endpoint */
rc = orte_rml.send_buffer_nb(&endpoint->endpoint_proc->proc_ompi->proc_name,
rc = ompi_rte_send_buffer_nb(&endpoint->endpoint_proc->proc_ompi->proc_name,
buffer, OMPI_RML_TAG_XOPENIB, 0,
xoob_rml_send_cb, NULL);
if (ORTE_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
if (OMPI_SUCCESS != rc) {
OMPI_ERROR_LOG(rc);
return rc;
}
@ -414,9 +409,9 @@ static int xoob_send_qp_create (mca_btl_base_endpoint_t* endpoint)
qp_init_attr.xrc_domain = openib_btl->device->xrc_domain;
*qp = ibv_create_qp(openib_btl->device->ib_pd, &qp_init_attr);
if (NULL == *qp) {
orte_show_help("help-mpi-btl-openib-cpc-base.txt",
ompi_show_help("help-mpi-btl-openib-cpc-base.txt",
"ibv_create_qp failed", true,
orte_process_info.nodename,
ompi_process_info.nodename,
ibv_get_device_name(openib_btl->device->ib_dev),
"Reliable connected (XRC)");
return OMPI_ERROR;
@ -424,8 +419,8 @@ static int xoob_send_qp_create (mca_btl_base_endpoint_t* endpoint)
if (qp_init_attr.cap.max_inline_data < req_inline) {
endpoint->qps[0].ib_inline_max = qp_init_attr.cap.max_inline_data;
orte_show_help("help-mpi-btl-openib-cpc-base.txt",
"inline truncated", orte_process_info.nodename,
ompi_show_help("help-mpi-btl-openib-cpc-base.txt",
"inline truncated", ompi_process_info.nodename,
ibv_get_device_name(openib_btl->device->ib_dev),
openib_btl->port_num,
req_inline, qp_init_attr.cap.max_inline_data);
@ -689,7 +684,7 @@ static int xoob_reply_first_connect(mca_btl_openib_endpoint_t *endpoint,
}
/* Find endpoint for specific subnet/lid/message */
static mca_btl_openib_endpoint_t* xoob_find_endpoint(orte_process_name_t* process_name,
static mca_btl_openib_endpoint_t* xoob_find_endpoint(ompi_process_name_t* process_name,
uint64_t subnet_id, uint16_t lid, uint8_t message_type)
{
size_t i;
@ -711,7 +706,7 @@ static mca_btl_openib_endpoint_t* xoob_find_endpoint(orte_process_name_t* proces
ib_proc != (mca_btl_openib_proc_t*)
opal_list_get_end(&mca_btl_openib_component.ib_procs);
ib_proc = (mca_btl_openib_proc_t*)opal_list_get_next(ib_proc)) {
if (OPAL_EQUAL == orte_util_compare_name_fields(ORTE_NS_CMP_ALL,
if (OPAL_EQUAL == ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL,
&ib_proc->proc_ompi->proc_name, process_name)) {
found = true;
break;
@ -822,8 +817,8 @@ static void free_rem_info(mca_btl_openib_rem_info_t *rem_info)
* and if this endpoint is trying to connect, reply with our QP info,
* otherwise try to modify QP's and establish reliable connection
*/
static void xoob_rml_recv_cb(int status, orte_process_name_t* process_name,
opal_buffer_t* buffer, orte_rml_tag_t tag,
static void xoob_rml_recv_cb(int status, ompi_process_name_t* process_name,
opal_buffer_t* buffer, ompi_rml_tag_t tag,
void* cbdata)
{
int rc;
@ -1028,12 +1023,12 @@ static int xoob_component_query(mca_btl_openib_module_t *openib_btl,
ensure to only post it *once*, because another btl may have
come in before this and already posted it. */
if (!rml_recv_posted) {
rc = orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD,
rc = ompi_rte_recv_buffer_nb(OMPI_NAME_WILDCARD,
OMPI_RML_TAG_XOPENIB,
ORTE_RML_PERSISTENT,
OMPI_RML_PERSISTENT,
xoob_rml_recv_cb,
NULL);
if (ORTE_SUCCESS != rc) {
if (OMPI_SUCCESS != rc) {
opal_output_verbose(5, mca_btl_base_output,
"openib BTL: xoob CPC system error %d (%s)",
rc, opal_strerror(rc));
@ -1141,7 +1136,7 @@ static int xoob_module_start_connect(ompi_btl_openib_connect_base_module_t *cpc,
static int xoob_component_finalize(void)
{
if (rml_recv_posted) {
orte_rml.recv_cancel(ORTE_NAME_WILDCARD, OMPI_RML_TAG_XOPENIB);
ompi_rte_recv_cancel(OMPI_NAME_WILDCARD, OMPI_RML_TAG_XOPENIB);
rml_recv_posted = false;
}
#if (ENABLE_DYNAMIC_SL)

Просмотреть файл

@ -942,7 +942,7 @@ void mca_btl_sctp_component_accept(void)
*/
static void mca_btl_sctp_component_recv_handler(int sd, short flags, void* user)
{
orte_process_name_t guid;
ompi_process_name_t guid;
struct sockaddr_in addr;
int retval;
mca_btl_sctp_proc_t* btl_proc;

Просмотреть файл

@ -59,7 +59,6 @@
#include "opal/mca/event/event.h"
#include "orte/util/name_fns.h"
#include "btl_sctp.h"
#include "btl_sctp_endpoint.h"
#include "btl_sctp_proc.h"
@ -368,7 +367,7 @@ int mca_btl_sctp_endpoint_send(mca_btl_base_endpoint_t* btl_endpoint, mca_btl_sc
int rc = OMPI_SUCCESS;
/* What if there are multiple procs on this endpoint? Possible? */
orte_vpid_t vpid = btl_endpoint->endpoint_proc->proc_ompi->proc_name.vpid;
ompi_vpid_t vpid = btl_endpoint->endpoint_proc->proc_ompi->proc_name.vpid;
OPAL_THREAD_LOCK(&btl_endpoint->endpoint_send_lock);
if((mca_btl_sctp_proc_check_vpid(vpid, sender_proc_table)) == INVALID_ENTRY) {
@ -545,7 +544,7 @@ static int mca_btl_sctp_endpoint_send_connect_ack(mca_btl_base_endpoint_t* btl_e
{
/* send process identifier to remote endpoint */
mca_btl_sctp_proc_t* btl_proc = mca_btl_sctp_proc_local();
orte_process_name_t guid = btl_proc->proc_ompi->proc_name;
ompi_process_name_t guid = btl_proc->proc_ompi->proc_name;
ORTE_PROCESS_NAME_HTON(guid);
if(mca_btl_sctp_endpoint_send_blocking(btl_endpoint, &guid, sizeof(guid)) !=
@ -578,7 +577,7 @@ bool mca_btl_sctp_endpoint_accept(mca_btl_base_endpoint_t* btl_endpoint, struct
btl_addr->addr_inet.s_addr == addr->sin_addr.s_addr)
{
mca_btl_sctp_proc_t *endpoint_proc = btl_endpoint->endpoint_proc;
cmpval = orte_util_compare_name_fields(ORTE_NS_CMP_ALL,
cmpval = ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL,
&endpoint_proc->proc_ompi->proc_name,
&this_proc->proc_ompi->proc_name);
if((btl_endpoint->endpoint_sd < 0) ||
@ -828,19 +827,19 @@ static int mca_btl_sctp_endpoint_recv_blocking(mca_btl_base_endpoint_t* btl_endp
static int mca_btl_sctp_endpoint_recv_connect_ack(mca_btl_base_endpoint_t* btl_endpoint)
{
orte_process_name_t guid;
ompi_process_name_t guid;
mca_btl_sctp_proc_t* btl_proc = btl_endpoint->endpoint_proc;
if((mca_btl_sctp_endpoint_recv_blocking(btl_endpoint, &guid,
sizeof(orte_process_name_t))) != sizeof(orte_process_name_t)) {
sizeof(ompi_process_name_t))) != sizeof(ompi_process_name_t)) {
return OMPI_ERR_UNREACH;
}
ORTE_PROCESS_NAME_NTOH(guid);
/* compare this to the expected values */
if(memcmp(&btl_proc->proc_ompi->proc_name, &guid, sizeof(orte_process_name_t)) != 0) {
if(memcmp(&btl_proc->proc_ompi->proc_name, &guid, sizeof(ompi_process_name_t)) != 0) {
BTL_ERROR(("received unexpected process identifier %s",
ORTE_NAME_PRINT(&guid)));
OMPI_NAME_PRINT(&guid)));
mca_btl_sctp_endpoint_close(btl_endpoint);
return OMPI_ERR_UNREACH;
}
@ -1198,7 +1197,7 @@ static void mca_btl_sctp_endpoint_send_handler(int sd, short flags, void* user)
/* 1 to many */
mca_btl_sctp_endpoint_t* btl_endpoint = (mca_btl_sctp_endpoint_t *)user;
our_sctp_endpoint *current_our_endpoint = NULL;
orte_vpid_t vpid;
ompi_vpid_t vpid;
send_handler_1_to_many_different_endpoint:
vpid = btl_endpoint->endpoint_proc->proc_ompi->proc_name.vpid;
OPAL_THREAD_LOCK(&btl_endpoint->endpoint_send_lock);

Просмотреть файл

@ -67,7 +67,7 @@ void mca_btl_sctp_proc_destruct(mca_btl_sctp_proc_t* stcp_proc)
/* remove from list of all proc instances */
OPAL_THREAD_LOCK(&mca_btl_sctp_component.sctp_lock);
opal_hash_table_remove_value_uint64(&mca_btl_sctp_component.sctp_procs,
orte_util_hash_name(&stcp_proc->proc_ompi->proc_name));
ompi_rte_hash_name(&stcp_proc->proc_ompi->proc_name));
OPAL_THREAD_UNLOCK(&mca_btl_sctp_component.sctp_lock);
/* release resources */
@ -113,7 +113,7 @@ mca_btl_sctp_proc_t* mca_btl_sctp_proc_create(ompi_proc_t* ompi_proc)
int rc;
size_t size;
mca_btl_sctp_proc_t* btl_proc;
uint64_t hash = orte_util_hash_name(&ompi_proc->proc_name);
uint64_t hash = ompi_rte_hash_name(&ompi_proc->proc_name);
OPAL_THREAD_LOCK(&mca_btl_sctp_component.sctp_lock);
rc = opal_hash_table_get_value_uint64(&mca_btl_sctp_component.sctp_procs,
@ -334,12 +334,12 @@ int mca_btl_sctp_proc_remove(mca_btl_sctp_proc_t* btl_proc, mca_btl_base_endpoin
* Look for an existing SCTP process instance based on the globally unique
* process identifier.
*/
mca_btl_sctp_proc_t* mca_btl_sctp_proc_lookup(const orte_process_name_t *name)
mca_btl_sctp_proc_t* mca_btl_sctp_proc_lookup(const ompi_process_name_t *name)
{
mca_btl_sctp_proc_t* proc = NULL;
OPAL_THREAD_LOCK(&mca_btl_sctp_component.sctp_lock);
opal_hash_table_get_value_uint64(&mca_btl_sctp_component.sctp_procs,
orte_util_hash_name(name), (void**)&proc);
ompi_rte_hash_name(name), (void**)&proc);
OPAL_THREAD_UNLOCK(&mca_btl_sctp_component.sctp_lock);
return proc;
}
@ -373,11 +373,11 @@ bool mca_btl_sctp_proc_accept(mca_btl_sctp_proc_t* btl_proc, struct sockaddr_in*
*
* TODO - change this to use a hash for constant time performance
*/
static int mca_btl_sctp_proc_check(int is_vpid, sctp_assoc_t id, orte_vpid_t vpid, struct mca_btl_sctp_proc_table_node *table) {
static int mca_btl_sctp_proc_check(int is_vpid, sctp_assoc_t id, ompi_vpid_t vpid, struct mca_btl_sctp_proc_table_node *table) {
#if MCA_BTL_SCTP_DONT_USE_HASH
int i;
for(i = 0; i < MCA_BTL_SCTP_PROC_TABLE_SIZE; i++) {
/* sender_proc_table uses orte_vpid_t.
/* sender_proc_table uses ompi_vpid_t.
* recvr_proc_table uses sctp_assoc_id.
* Calls using this function use one or the other.
*/
@ -403,7 +403,7 @@ static int mca_btl_sctp_proc_check(int is_vpid, sctp_assoc_t id, orte_vpid_t vpi
#endif
}
int mca_btl_sctp_proc_check_vpid(orte_vpid_t vpid, struct mca_btl_sctp_proc_table_node *table) {
int mca_btl_sctp_proc_check_vpid(ompi_vpid_t vpid, struct mca_btl_sctp_proc_table_node *table) {
return mca_btl_sctp_proc_check(1, 0, vpid, table);
}
@ -421,7 +421,7 @@ int mca_btl_sctp_proc_check_assoc_id(sctp_assoc_t id, struct mca_btl_sctp_proc_t
* TODO change this to a hash table that can expand to eliminate
* MCA_BTL_SCTP_PROC_TABLE_SIZE limitation
*/
static void mca_btl_sctp_proc_add(sctp_assoc_t id, orte_vpid_t vpid, struct mca_btl_sctp_proc_t *proc, struct mca_btl_sctp_proc_table_node *table) {
static void mca_btl_sctp_proc_add(sctp_assoc_t id, ompi_vpid_t vpid, struct mca_btl_sctp_proc_t *proc, struct mca_btl_sctp_proc_table_node *table) {
#if MCA_BTL_SCTP_DONT_USE_HASH
int i;
for(i = 0; i < MCA_BTL_SCTP_PROC_TABLE_SIZE; i++) {
@ -440,7 +440,7 @@ static void mca_btl_sctp_proc_add(sctp_assoc_t id, orte_vpid_t vpid, struct mca_
#endif
}
void mca_btl_sctp_proc_add_vpid(orte_vpid_t vpid, struct mca_btl_sctp_proc_t *proc, struct mca_btl_sctp_proc_table_node *table) {
void mca_btl_sctp_proc_add_vpid(ompi_vpid_t vpid, struct mca_btl_sctp_proc_t *proc, struct mca_btl_sctp_proc_table_node *table) {
mca_btl_sctp_proc_add(0, vpid, proc, table);
}

Просмотреть файл

@ -60,7 +60,7 @@ typedef struct mca_btl_sctp_proc_t mca_btl_sctp_proc_t;
OBJ_CLASS_DECLARATION(mca_btl_sctp_proc_t);
mca_btl_sctp_proc_t* mca_btl_sctp_proc_create(ompi_proc_t* ompi_proc);
mca_btl_sctp_proc_t* mca_btl_sctp_proc_lookup(const orte_process_name_t* name);
mca_btl_sctp_proc_t* mca_btl_sctp_proc_lookup(const ompi_process_name_t* name);
int mca_btl_sctp_proc_insert(mca_btl_sctp_proc_t*, mca_btl_base_endpoint_t*);
int mca_btl_sctp_proc_remove(mca_btl_sctp_proc_t*, mca_btl_base_endpoint_t*);
bool mca_btl_sctp_proc_accept(mca_btl_sctp_proc_t*, struct sockaddr_in*, int);
@ -90,7 +90,7 @@ enum {
struct mca_btl_sctp_proc_table_node {
int valid;
sctp_assoc_t sctp_assoc_id;
orte_vpid_t vpid;
ompi_vpid_t vpid;
struct mca_btl_sctp_proc_t *proc;
};
typedef struct mca_btl_sctp_proc_table_node mca_btl_sctp_proc_table_node;
@ -98,9 +98,9 @@ typedef struct mca_btl_sctp_proc_table_node mca_btl_sctp_proc_table_node;
extern struct mca_btl_sctp_proc_table_node *recvr_proc_table;
extern struct mca_btl_sctp_proc_table_node *sender_proc_table;
int mca_btl_sctp_proc_check_vpid(orte_vpid_t vpid, struct mca_btl_sctp_proc_table_node *table);
int mca_btl_sctp_proc_check_vpid(ompi_vpid_t vpid, struct mca_btl_sctp_proc_table_node *table);
int mca_btl_sctp_proc_check_assoc_id(sctp_assoc_t id, struct mca_btl_sctp_proc_table_node *table);
void mca_btl_sctp_proc_add_vpid(orte_vpid_t vpid, struct mca_btl_sctp_proc_t *proc, struct mca_btl_sctp_proc_table_node *table);
void mca_btl_sctp_proc_add_vpid(ompi_vpid_t vpid, struct mca_btl_sctp_proc_t *proc, struct mca_btl_sctp_proc_table_node *table);
void mca_btl_sctp_proc_add_assoc_id(sctp_assoc_t id, struct mca_btl_sctp_proc_t *proc, struct mca_btl_sctp_proc_table_node *table);
mca_btl_sctp_proc_t *mca_btl_sctp_proc_get(sctp_assoc_t id, struct mca_btl_sctp_proc_table_node *table);

Просмотреть файл

@ -97,7 +97,7 @@ void mca_btl_sctp_recv_handler(int sd, short flags, void *user) {
/* allocated this elsewhere only once per BTL to avoid repeatedly calling malloc */
char *buf = sctp_recv_buf;
orte_process_name_t guid;
ompi_process_name_t guid;
struct sockaddr_in their_addr;
int retval;
mca_btl_sctp_proc_t *btl_proc;

Просмотреть файл

@ -45,8 +45,7 @@
#include "opal/mca/hwloc/base/base.h"
#include "opal/mca/shmem/base/base.h"
#include "opal/mca/shmem/shmem.h"
#include "orte/util/proc_info.h"
#include "orte/util/show_help.h"
#include "opal/datatype/opal_convertor.h"
#include "ompi/class/ompi_free_list.h"
#include "ompi/runtime/ompi_module_exchange.h"
@ -140,7 +139,7 @@ setup_mpool_base_resources(mca_btl_sm_component_t *comp_ptr,
if (-1 == (fd = open(comp_ptr->sm_mpool_rndv_file_name, O_RDONLY))) {
int err = errno;
orte_show_help("help-mpi-btl-sm.txt", "sys call fail", true,
ompi_show_help("help-mpi-btl-sm.txt", "sys call fail", true,
"open(2)", strerror(err), err);
rc = OMPI_ERR_IN_ERRNO;
goto out;
@ -184,7 +183,7 @@ sm_segment_attach(mca_btl_sm_component_t *comp_ptr)
}
if (-1 == (fd = open(comp_ptr->sm_rndv_file_name, O_RDONLY))) {
int err = errno;
orte_show_help("help-mpi-btl-sm.txt", "sys call fail", true,
ompi_show_help("help-mpi-btl-sm.txt", "sys call fail", true,
"open(2)", strerror(err), err);
rc = OMPI_ERR_IN_ERRNO;
goto out;
@ -320,6 +319,7 @@ sm_btl_first_time_init(mca_btl_sm_t *sm_btl,
free(res);
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* remember that node rank zero is already attached */
if (0 != my_smp_rank) {
if (OMPI_SUCCESS != (rc = sm_segment_attach(m))) {
@ -443,7 +443,7 @@ create_sm_endpoint(int local_proc, struct ompi_proc_t *proc)
OBJ_CONSTRUCT(&ep->endpoint_lock, opal_mutex_t);
#if OMPI_ENABLE_PROGRESS_THREADS == 1
sprintf(path, "%s"OPAL_PATH_SEP"sm_fifo.%lu",
orte_process_info.job_session_dir,
ompi_process_info.job_session_dir,
(unsigned long)proc->proc_name.vpid);
ep->fifo_fd = open(path, O_WRONLY);
if(ep->fifo_fd < 0) {

Просмотреть файл

@ -48,10 +48,6 @@
#include "opal/util/bit_ops.h"
#include "opal/util/output.h"
#include "orte/util/show_help.h"
#include "orte/runtime/orte_globals.h"
#include "orte/util/proc_info.h"
#include "ompi/constants.h"
#include "ompi/runtime/ompi_module_exchange.h"
#include "ompi/mca/mpool/base/base.h"
@ -165,9 +161,9 @@ static int sm_register(void)
mca_btl_sm_component.use_knem = i;
} else {
if (i > 0) {
orte_show_help("help-mpi-btl-sm.txt",
ompi_show_help("help-mpi-btl-sm.txt",
"knem requested but not supported", true,
orte_process_info.nodename);
ompi_process_info.nodename);
return OMPI_ERROR;
}
mca_btl_sm_component.use_knem = 0;
@ -372,7 +368,7 @@ get_num_local_procs(void)
{
/* num_local_peers does not include us in
* its calculation, so adjust for that */
return (int)(1 + orte_process_info.num_local_peers);
return (int)(1 + ompi_process_info.num_local_peers);
}
static void
@ -523,29 +519,29 @@ set_uniq_paths_for_init_rndv(mca_btl_sm_component_t *comp_ptr)
if (asprintf(&comp_ptr->sm_mpool_ctl_file_name,
"%s"OPAL_PATH_SEP"shared_mem_pool.%s",
orte_process_info.job_session_dir,
orte_process_info.nodename) < 0) {
ompi_process_info.job_session_dir,
ompi_process_info.nodename) < 0) {
/* rc set */
goto out;
}
if (asprintf(&comp_ptr->sm_mpool_rndv_file_name,
"%s"OPAL_PATH_SEP"shared_mem_pool_rndv.%s",
orte_process_info.job_session_dir,
orte_process_info.nodename) < 0) {
ompi_process_info.job_session_dir,
ompi_process_info.nodename) < 0) {
/* rc set */
goto out;
}
if (asprintf(&comp_ptr->sm_ctl_file_name,
"%s"OPAL_PATH_SEP"shared_mem_btl_module.%s",
orte_process_info.job_session_dir,
orte_process_info.nodename) < 0) {
ompi_process_info.job_session_dir,
ompi_process_info.nodename) < 0) {
/* rc set */
goto out;
}
if (asprintf(&comp_ptr->sm_rndv_file_name,
"%s"OPAL_PATH_SEP"shared_mem_btl_rndv.%s",
orte_process_info.job_session_dir,
orte_process_info.nodename) < 0) {
ompi_process_info.job_session_dir,
ompi_process_info.nodename) < 0) {
/* rc set */
goto out;
}
@ -637,7 +633,7 @@ create_rndv_file(mca_btl_sm_component_t *comp_ptr,
* sizeof(opal_shmem_ds_t), so we know where the mpool_res_size starts. */
if (-1 == (fd = open(fname, O_CREAT | O_RDWR, 0600))) {
int err = errno;
orte_show_help("help-mpi-btl-sm.txt", "sys call fail", true,
ompi_show_help("help-mpi-btl-sm.txt", "sys call fail", true,
"open(2)", strerror(err), err);
rc = OMPI_ERR_IN_ERRNO;
goto out;
@ -645,7 +641,7 @@ create_rndv_file(mca_btl_sm_component_t *comp_ptr,
if ((ssize_t)sizeof(opal_shmem_ds_t) != write(fd, &(tmp_modp->shmem_ds),
sizeof(opal_shmem_ds_t))) {
int err = errno;
orte_show_help("help-mpi-btl-sm.txt", "sys call fail", true,
ompi_show_help("help-mpi-btl-sm.txt", "sys call fail", true,
"write(2)", strerror(err), err);
rc = OMPI_ERR_IN_ERRNO;
goto out;
@ -653,7 +649,7 @@ create_rndv_file(mca_btl_sm_component_t *comp_ptr,
if (MCA_BTL_SM_RNDV_MOD_MPOOL == type) {
if ((ssize_t)sizeof(size) != write(fd, &size, sizeof(size))) {
int err = errno;
orte_show_help("help-mpi-btl-sm.txt", "sys call fail", true,
ompi_show_help("help-mpi-btl-sm.txt", "sys call fail", true,
"write(2)", strerror(err), err);
rc = OMPI_ERR_IN_ERRNO;
goto out;
@ -674,7 +670,7 @@ out:
*/
static int
backing_store_init(mca_btl_sm_component_t *comp_ptr,
orte_node_rank_t node_rank)
ompi_node_rank_t node_rank)
{
int rc = OMPI_SUCCESS;
@ -708,7 +704,7 @@ mca_btl_sm_component_init(int *num_btls,
{
int num_local_procs = 0;
mca_btl_base_module_t **btls = NULL;
orte_node_rank_t my_node_rank = ORTE_NODE_RANK_INVALID;
ompi_node_rank_t my_node_rank = OMPI_NODE_RANK_INVALID;
#if OMPI_BTL_SM_HAVE_KNEM
int rc;
#endif /* OMPI_BTL_SM_HAVE_KNEM */
@ -719,17 +715,17 @@ mca_btl_sm_component_init(int *num_btls,
mca_btl_sm_component.sm_mpool_base = NULL;
/* if no session directory was created, then we cannot be used */
if (NULL == ompi_process_info.job_session_dir) {
/* SKG - this isn't true anymore. Some backing facilities don't require a
* file-backed store. Extend shmem to provide this info one day. Especially
* when we use a proper modex for init. */
if (!orte_create_session_dirs) {
return NULL;
}
/* if we don't have locality information, then we cannot be used because we
* need to know who the respective node ranks for initialization. */
if (ORTE_NODE_RANK_INVALID ==
(my_node_rank = orte_process_info.my_node_rank)) {
orte_show_help("help-mpi-btl-sm.txt", "no locality", true);
if (OMPI_NODE_RANK_INVALID ==
(my_node_rank = ompi_process_info.my_node_rank)) {
ompi_show_help("help-mpi-btl-sm.txt", "no locality", true);
return NULL;
}
/* no use trying to use sm with less than two procs, so just bail. */
@ -751,13 +747,11 @@ mca_btl_sm_component_init(int *num_btls,
#if OMPI_ENABLE_PROGRESS_THREADS == 1
/* create a named pipe to receive events */
sprintf(mca_btl_sm_component.sm_fifo_path,
"%s"OPAL_PATH_SEP"sm_fifo.%lu",
orte_process_info.job_session_dir,
(unsigned long)ORTE_PROC_MY_NAME->vpid);
if (mkfifo(mca_btl_sm_component.sm_fifo_path, 0660) < 0) {
opal_output(0, "mca_btl_sm_component_init: "
"mkfifo failed with errno=%d\n",errno);
sprintf( mca_btl_sm_component.sm_fifo_path,
"%s"OPAL_PATH_SEP"sm_fifo.%lu", ompi_process_info.job_session_dir,
(unsigned long)OMPI_PROC_MY_NAME->vpid );
if(mkfifo(mca_btl_sm_component.sm_fifo_path, 0660) < 0) {
opal_output(0, "mca_btl_sm_component_init: mkfifo failed with errno=%d\n",errno);
return NULL;
}
mca_btl_sm_component.sm_fifo_fd = open(mca_btl_sm_component.sm_fifo_path,
@ -823,11 +817,11 @@ mca_btl_sm_component_init(int *num_btls,
if (0 != stat("/dev/knem", &sbuf)) {
sbuf.st_mode = 0;
}
orte_show_help("help-mpi-btl-sm.txt", "knem permission denied",
true, orte_process_info.nodename, sbuf.st_mode);
ompi_show_help("help-mpi-btl-sm.txt", "knem permission denied",
true, ompi_process_info.nodename, sbuf.st_mode);
} else {
orte_show_help("help-mpi-btl-sm.txt", "knem fail open",
true, orte_process_info.nodename, errno,
ompi_show_help("help-mpi-btl-sm.txt", "knem fail open",
true, ompi_process_info.nodename, errno,
strerror(errno));
}
goto no_knem;
@ -838,14 +832,14 @@ mca_btl_sm_component_init(int *num_btls,
rc = ioctl(mca_btl_sm.knem_fd, KNEM_CMD_GET_INFO,
&mca_btl_sm_component.knem_info);
if (rc < 0) {
orte_show_help("help-mpi-btl-sm.txt", "knem get ABI fail",
true, orte_process_info.nodename, errno,
ompi_show_help("help-mpi-btl-sm.txt", "knem get ABI fail",
true, ompi_process_info.nodename, errno,
strerror(errno));
goto no_knem;
}
if (KNEM_ABI_VERSION != mca_btl_sm_component.knem_info.abi) {
orte_show_help("help-mpi-btl-sm.txt", "knem ABI mismatch",
true, orte_process_info.nodename, KNEM_ABI_VERSION,
ompi_show_help("help-mpi-btl-sm.txt", "knem ABI mismatch",
true, ompi_process_info.nodename, KNEM_ABI_VERSION,
mca_btl_sm_component.knem_info.abi);
goto no_knem;
}
@ -866,8 +860,8 @@ mca_btl_sm_component_init(int *num_btls,
MAP_SHARED, mca_btl_sm.knem_fd,
KNEM_STATUS_ARRAY_FILE_OFFSET);
if (MAP_FAILED == mca_btl_sm.knem_status_array) {
orte_show_help("help-mpi-btl-sm.txt", "knem mmap fail",
true, orte_process_info.nodename, errno,
ompi_show_help("help-mpi-btl-sm.txt", "knem mmap fail",
true, ompi_process_info.nodename, errno,
strerror(errno));
goto no_knem;
}
@ -878,8 +872,8 @@ mca_btl_sm_component_init(int *num_btls,
malloc(sizeof(mca_btl_sm_frag_t *) *
mca_btl_sm_component.knem_max_simultaneous);
if (NULL == mca_btl_sm.knem_frag_array) {
orte_show_help("help-mpi-btl-sm.txt", "knem init fail",
true, orte_process_info.nodename, "malloc",
ompi_show_help("help-mpi-btl-sm.txt", "knem init fail",
true, ompi_process_info.nodename, "malloc",
errno, strerror(errno));
goto no_knem;
}

Просмотреть файл

@ -45,9 +45,8 @@
#include "opal/mca/hwloc/base/base.h"
#include "opal/mca/shmem/base/base.h"
#include "opal/mca/shmem/shmem.h"
#include "orte/util/proc_info.h"
#include "orte/util/show_help.h"
#include "opal/datatype/opal_convertor.h"
#include "ompi/class/ompi_free_list.h"
#include "ompi/runtime/ompi_module_exchange.h"
#include "ompi/mca/btl/btl.h"
@ -457,7 +456,7 @@ create_sm_endpoint(int local_proc, struct ompi_proc_t *proc)
OBJ_CONSTRUCT(&ep->endpoint_lock, opal_mutex_t);
#if OMPI_ENABLE_PROGRESS_THREADS == 1
sprintf(path, "%s"OPAL_PATH_SEP"sm_fifo.%lu",
orte_process_info.job_session_dir,
ompi_process_info.job_session_dir,
(unsigned long)proc->proc_name.vpid);
ep->fifo_fd = open(path, O_WRONLY);
if(ep->fifo_fd < 0) {

Просмотреть файл

@ -47,10 +47,6 @@
#include "opal/util/bit_ops.h"
#include "opal/util/output.h"
#include "orte/util/show_help.h"
#include "orte/runtime/orte_globals.h"
#include "orte/util/proc_info.h"
#include "ompi/constants.h"
#include "ompi/runtime/ompi_module_exchange.h"
#include "ompi/mca/mpool/base/base.h"
@ -649,10 +645,10 @@ mca_btl_smcuda_component_init(int *num_btls,
mca_btl_smcuda_component.sm_mpool_base = NULL;
/* if no session directory was created, then we cannot be used */
if (NULL == ompi_process_info.job_session_dir) {
/* SKG - this isn't true anymore. Some backing facilities don't require a
* file-backed store. Extend shmem to provide this info one day. Especially
* when we use a proper modex for init. */
if (!orte_create_session_dirs) {
return NULL;
}
/* if we don't have locality information, then we cannot be used because we
@ -681,13 +677,11 @@ mca_btl_smcuda_component_init(int *num_btls,
#if OMPI_ENABLE_PROGRESS_THREADS == 1
/* create a named pipe to receive events */
sprintf(mca_btl_smcuda_component.sm_fifo_path,
"%s"OPAL_PATH_SEP"sm_fifo.%lu",
orte_process_info.job_session_dir,
(unsigned long)ORTE_PROC_MY_NAME->vpid);
if (mkfifo(mca_btl_smcuda_component.sm_fifo_path, 0660) < 0) {
opal_output(0, "mca_btl_smcuda_component_init: "
"mkfifo failed with errno=%d\n",errno);
sprintf( mca_btl_smcuda_component.sm_fifo_path,
"%s"OPAL_PATH_SEP"sm_fifo.%lu", ompi_process_info.job_session_dir,
(unsigned long)OMPI_PROC_MY_NAME->vpid );
if(mkfifo(mca_btl_smcuda_component.sm_fifo_path, 0660) < 0) {
opal_output(0, "mca_btl_smcuda_component_init: mkfifo failed with errno=%d\n",errno);
return NULL;
}
mca_btl_smcuda_component.sm_fifo_fd = open(mca_btl_smcuda_component.sm_fifo_path,

Просмотреть файл

@ -57,10 +57,6 @@
#include "opal/util/net.h"
#include "opal/mca/base/mca_base_param.h"
#include "orte/types.h"
#include "orte/util/show_help.h"
#include "orte/util/proc_info.h"
#include "ompi/constants.h"
#include "ompi/mca/btl/btl.h"
#include "ompi/mca/btl/base/base.h"
@ -221,8 +217,8 @@ static int mca_btl_tcp_component_register(void)
mca_btl_tcp_param_register_int( "port_min_v4",
"The minimum port where the TCP BTL will try to bind (default 1024)", 1024 );
if( mca_btl_tcp_component.tcp_port_min > USHRT_MAX ) {
orte_show_help("help-mpi-btl-tcp.txt", "invalid minimum port",
true, "v4", orte_process_info.nodename,
ompi_show_help("help-mpi-btl-tcp.txt", "invalid minimum port",
true, "v4", ompi_process_info.nodename,
mca_btl_tcp_component.tcp_port_min );
mca_btl_tcp_component.tcp_port_min = 1024;
}
@ -240,8 +236,8 @@ static int mca_btl_tcp_component_register(void)
mca_btl_tcp_param_register_int( "port_min_v6",
"The minimum port where the TCP BTL will try to bind (default 1024)", 1024 );
if( mca_btl_tcp_component.tcp6_port_min > USHRT_MAX ) {
orte_show_help("help-mpi-btl-tcp.txt", "invalid minimum port",
true, "v6", orte_process_info.nodename,
ompi_show_help("help-mpi-btl-tcp.txt", "invalid minimum port",
true, "v6", ompi_process_info.nodename,
mca_btl_tcp_component.tcp6_port_min );
mca_btl_tcp_component.tcp6_port_min = 1024;
}
@ -287,10 +283,10 @@ static int mca_btl_tcp_component_register(void)
if (NULL != argv && '\0' != *(argv[0])) {
int if_index, rc, count;
orte_node_rank_t node_rank;
ompi_node_rank_t node_rank;
char name[256];
node_rank = orte_process_info.my_node_rank;
node_rank = ompi_process_info.my_node_rank;
/* Now that we've got that local rank, take the
corresponding entry from the tcp_if_seq list (wrapping
@ -312,10 +308,10 @@ static int mca_btl_tcp_component_register(void)
}
}
if (if_index < 0) {
orte_show_help("help-mpi-btl-tcp.txt",
ompi_show_help("help-mpi-btl-tcp.txt",
"invalid if_inexclude",
true, "if_seq",
orte_process_info.nodename,
ompi_process_info.nodename,
mca_btl_tcp_component.tcp_if_seq,
"Interface does not exist");
return OMPI_ERR_BAD_PARAM;
@ -526,8 +522,8 @@ static char **split_and_resolve(char **orig_str, char *name)
tmp = strdup(argv[i]);
str = strchr(argv[i], '/');
if (NULL == str) {
orte_show_help("help-mpi-btl-tcp.txt", "invalid if_inexclude",
true, name, orte_process_info.nodename,
ompi_show_help("help-mpi-btl-tcp.txt", "invalid if_inexclude",
true, name, ompi_process_info.nodename,
tmp, "Invalid specification (missing \"/\")");
free(argv[i]);
free(tmp);
@ -543,8 +539,8 @@ static char **split_and_resolve(char **orig_str, char *name)
free(argv[i]);
if (1 != ret) {
orte_show_help("help-mpi-btl-tcp.txt", "invalid if_inexclude",
true, name, orte_process_info.nodename, tmp,
ompi_show_help("help-mpi-btl-tcp.txt", "invalid if_inexclude",
true, name, ompi_process_info.nodename, tmp,
"Invalid specification (inet_pton() failed)");
free(tmp);
continue;
@ -570,8 +566,8 @@ static char **split_and_resolve(char **orig_str, char *name)
/* If we didn't find a match, keep trying */
if (if_index < 0) {
orte_show_help("help-mpi-btl-tcp.txt", "invalid if_inexclude",
true, name, orte_process_info.nodename, tmp,
ompi_show_help("help-mpi-btl-tcp.txt", "invalid if_inexclude",
true, name, ompi_process_info.nodename, tmp,
"Did not find interface matching this subnet");
free(tmp);
continue;
@ -1134,7 +1130,7 @@ static void mca_btl_tcp_component_accept_handler( int incoming_sd,
*/
static void mca_btl_tcp_component_recv_handler(int sd, short flags, void* user)
{
orte_process_name_t guid;
ompi_process_name_t guid;
struct sockaddr_storage addr;
int retval;
mca_btl_tcp_proc_t* btl_proc;
@ -1149,7 +1145,7 @@ static void mca_btl_tcp_component_recv_handler(int sd, short flags, void* user)
CLOSE_THE_SOCKET(sd);
return;
}
ORTE_PROCESS_NAME_NTOH(guid);
OMPI_PROCESS_NAME_NTOH(guid);
/* now set socket up to be non-blocking */
if((flags = fcntl(sd, F_GETFL, 0)) < 0) {

Просмотреть файл

@ -316,9 +316,9 @@ static int mca_btl_tcp_endpoint_send_connect_ack(mca_btl_base_endpoint_t* btl_en
{
/* send process identifier to remote endpoint */
mca_btl_tcp_proc_t* btl_proc = mca_btl_tcp_proc_local();
orte_process_name_t guid = btl_proc->proc_ompi->proc_name;
ompi_process_name_t guid = btl_proc->proc_ompi->proc_name;
ORTE_PROCESS_NAME_HTON(guid);
OMPI_PROCESS_NAME_HTON(guid);
if(mca_btl_tcp_endpoint_send_blocking(btl_endpoint, &guid, sizeof(guid)) !=
sizeof(guid)) {
return OMPI_ERR_UNREACH;
@ -351,7 +351,7 @@ bool mca_btl_tcp_endpoint_accept(mca_btl_base_endpoint_t* btl_endpoint,
return false;
}
cmpval = orte_util_compare_name_fields(ORTE_NS_CMP_ALL,
cmpval = ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL,
&endpoint_proc->proc_ompi->proc_name,
&this_proc->proc_ompi->proc_name);
if((btl_endpoint->endpoint_sd < 0) ||
@ -492,19 +492,19 @@ static int mca_btl_tcp_endpoint_recv_blocking(mca_btl_base_endpoint_t* btl_endpo
*/
static int mca_btl_tcp_endpoint_recv_connect_ack(mca_btl_base_endpoint_t* btl_endpoint)
{
orte_process_name_t guid;
ompi_process_name_t guid;
mca_btl_tcp_proc_t* btl_proc = btl_endpoint->endpoint_proc;
if((mca_btl_tcp_endpoint_recv_blocking(btl_endpoint, &guid, sizeof(orte_process_name_t))) != sizeof(orte_process_name_t)) {
if((mca_btl_tcp_endpoint_recv_blocking(btl_endpoint, &guid, sizeof(ompi_process_name_t))) != sizeof(ompi_process_name_t)) {
return OMPI_ERR_UNREACH;
}
ORTE_PROCESS_NAME_NTOH(guid);
OMPI_PROCESS_NAME_NTOH(guid);
/* compare this to the expected values */
if (OPAL_EQUAL != orte_util_compare_name_fields(ORTE_NS_CMP_ALL,
if (OPAL_EQUAL != ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL,
&btl_proc->proc_ompi->proc_name,
&guid)) {
BTL_ERROR(("received unexpected process identifier %s",
ORTE_NAME_PRINT(&guid)));
OMPI_NAME_PRINT(&guid)));
mca_btl_tcp_endpoint_close(btl_endpoint);
return OMPI_ERR_UNREACH;
}
@ -591,7 +591,7 @@ static int mca_btl_tcp_endpoint_start_connect(mca_btl_base_endpoint_t* btl_endpo
opal_output_verbose(20, mca_btl_base_output,
"btl: tcp: attempting to connect() to %s address %s on port %d",
ORTE_NAME_PRINT(&btl_endpoint->endpoint_proc->proc_ompi->proc_name),
OMPI_NAME_PRINT(&btl_endpoint->endpoint_proc->proc_ompi->proc_name),
opal_net_get_hostname((struct sockaddr*) &endpoint_addr),
ntohs(btl_endpoint->endpoint_addr->addr_port));

Просмотреть файл

@ -76,7 +76,7 @@ void mca_btl_tcp_proc_destruct(mca_btl_tcp_proc_t* tcp_proc)
/* remove from list of all proc instances */
OPAL_THREAD_LOCK(&mca_btl_tcp_component.tcp_lock);
opal_hash_table_remove_value_uint64(&mca_btl_tcp_component.tcp_procs,
orte_util_hash_name(&tcp_proc->proc_ompi->proc_name));
ompi_rte_hash_name(&tcp_proc->proc_ompi->proc_name));
OPAL_THREAD_UNLOCK(&mca_btl_tcp_component.tcp_lock);
/* release resources */
@ -99,7 +99,7 @@ mca_btl_tcp_proc_t* mca_btl_tcp_proc_create(ompi_proc_t* ompi_proc)
int rc;
size_t size;
mca_btl_tcp_proc_t* btl_proc;
uint64_t hash = orte_util_hash_name(&ompi_proc->proc_name);
uint64_t hash = ompi_rte_hash_name(&ompi_proc->proc_name);
OPAL_THREAD_LOCK(&mca_btl_tcp_component.tcp_lock);
rc = opal_hash_table_get_value_uint64(&mca_btl_tcp_component.tcp_procs,
@ -706,12 +706,12 @@ int mca_btl_tcp_proc_remove(mca_btl_tcp_proc_t* btl_proc, mca_btl_base_endpoint_
* Look for an existing TCP process instance based on the globally unique
* process identifier.
*/
mca_btl_tcp_proc_t* mca_btl_tcp_proc_lookup(const orte_process_name_t *name)
mca_btl_tcp_proc_t* mca_btl_tcp_proc_lookup(const ompi_process_name_t *name)
{
mca_btl_tcp_proc_t* proc = NULL;
OPAL_THREAD_LOCK(&mca_btl_tcp_component.tcp_lock);
opal_hash_table_get_value_uint64(&mca_btl_tcp_component.tcp_procs,
orte_util_hash_name(name), (void**)&proc);
ompi_rte_hash_name(name), (void**)&proc);
OPAL_THREAD_UNLOCK(&mca_btl_tcp_component.tcp_lock);
return proc;
}

Просмотреть файл

@ -22,7 +22,6 @@
#include "opal/class/opal_object.h"
#include "ompi/proc/proc.h"
#include "orte/types.h"
#include "btl_tcp.h"
#include "btl_tcp_addr.h"
#include "btl_tcp_endpoint.h"
@ -106,7 +105,7 @@ enum mca_btl_tcp_connection_quality {
mca_btl_tcp_proc_t* mca_btl_tcp_proc_create(ompi_proc_t* ompi_proc);
mca_btl_tcp_proc_t* mca_btl_tcp_proc_lookup(const orte_process_name_t* name);
mca_btl_tcp_proc_t* mca_btl_tcp_proc_lookup(const ompi_process_name_t* name);
int mca_btl_tcp_proc_insert(mca_btl_tcp_proc_t*, mca_btl_base_endpoint_t*);
int mca_btl_tcp_proc_remove(mca_btl_tcp_proc_t*, mca_btl_base_endpoint_t*);
bool mca_btl_tcp_proc_accept(mca_btl_tcp_proc_t*, struct sockaddr*, int);

Просмотреть файл

@ -812,7 +812,7 @@ static int mca_btl_udapl_assign_netmask(mca_btl_udapl_module_t* udapl_btl)
BTL_UDAPL_VERBOSE_HELP(VERBOSE_SHOW_HELP,
("help-mpi-btl-udapl.txt", "interface not found",
true, orte_process_info.nodename, btl_addr_string));
true, ompi_process_info.nodename, btl_addr_string));
return OMPI_ERROR;
}
@ -826,7 +826,7 @@ static int mca_btl_udapl_assign_netmask(mca_btl_udapl_module_t* udapl_btl)
BTL_UDAPL_VERBOSE_HELP(VERBOSE_SHOW_HELP,
("help-mpi-btl-udapl.txt", "netmask not found",
true, orte_process_info.nodename, btl_addr_string));
true, ompi_process_info.nodename, btl_addr_string));
return OMPI_ERROR;
}
@ -840,7 +840,7 @@ static int mca_btl_udapl_assign_netmask(mca_btl_udapl_module_t* udapl_btl)
/* current uDAPL BTL does not support IPv6 */
BTL_UDAPL_VERBOSE_HELP(VERBOSE_SHOW_HELP,
("help-mpi-btl-udapl.txt", "IPv4 only",
true, orte_process_info.nodename));
true, ompi_process_info.nodename));
return OMPI_ERROR;
}

Просмотреть файл

@ -37,7 +37,6 @@
#include "ompi/mca/btl/btl.h"
#include "ompi/mca/btl/base/base.h"
#include "ompi/mca/btl/base/btl_base_error.h"
#include "orte/util/show_help.h"
#include "ompi/mca/mpool/mpool.h"
#include "ompi/mca/btl/btl.h"
#include "btl_udapl_endpoint.h"
@ -232,7 +231,7 @@ do { \
#define BTL_UDAPL_VERBOSE_HELP(verbose_level, args) \
do { \
if (verbose_level <= mca_btl_udapl_component.udapl_verbosity) { \
orte_show_help args; \
ompi_show_help args; \
} \
} while(0);

Просмотреть файл

@ -39,7 +39,6 @@
#include "ompi/mca/btl/base/base.h"
#include "ompi/mca/btl/base/btl_base_error.h"
#include "btl_udapl_endpoint.h"
#include "orte/util/proc_info.h"
#include "ompi/runtime/ompi_module_exchange.h"
#include "ompi/runtime/mpiruntime.h"
@ -417,7 +416,7 @@ static int mca_btl_udapl_modify_ia_list(DAT_COUNT *num_info_entries,
char *str = opal_argv_join(mca_btl_udapl_component.if_list, ',');
BTL_UDAPL_VERBOSE_HELP(VERBOSE_SHOW_HELP,
("help-mpi-btl-udapl.txt", "nonexistent entry",
true, orte_process_info.nodename,
true, ompi_process_info.nodename,
((NULL != mca_btl_udapl_component.if_include) ?
"in" : "ex"), str));
free(str);

Просмотреть файл

@ -28,14 +28,12 @@
#include "ompi/types.h"
#include "opal/align.h"
#include "orte/mca/rml/rml.h"
#include "orte/mca/errmgr/errmgr.h"
#include "opal/dss/dss.h"
#include "opal/class/opal_pointer_array.h"
#include "ompi/class/ompi_free_list.h"
#include "ompi/mca/mpool/grdma/mpool_grdma.h"
#include "ompi/mca/dpm/dpm.h"
#include "ompi/mca/rte/rte.h"
#include "ompi/mca/btl/base/btl_base_error.h"
#include "btl_udapl.h"
@ -46,9 +44,9 @@
static void mca_btl_udapl_endpoint_send_cb(
int status,
orte_process_name_t* endpoint,
ompi_process_name_t* endpoint,
opal_buffer_t* buffer,
orte_rml_tag_t tag,
ompi_rml_tag_t tag,
void* cbdata);
static int mca_btl_udapl_start_connect(mca_btl_base_endpoint_t* endpoint);
static int mca_btl_udapl_endpoint_post_recv(
@ -57,9 +55,9 @@ static int mca_btl_udapl_endpoint_post_recv(
void mca_btl_udapl_endpoint_connect(mca_btl_udapl_endpoint_t* endpoint);
void mca_btl_udapl_endpoint_recv(
int status,
orte_process_name_t* endpoint,
ompi_process_name_t* endpoint,
opal_buffer_t* buffer,
orte_rml_tag_t tag,
ompi_rml_tag_t tag,
void* cbdata);
static int mca_btl_udapl_endpoint_finish_eager(mca_btl_udapl_endpoint_t*);
static int mca_btl_udapl_endpoint_finish_max(mca_btl_udapl_endpoint_t*);
@ -322,8 +320,8 @@ int mca_btl_udapl_endpoint_send(mca_btl_base_endpoint_t* endpoint,
}
static void mca_btl_udapl_endpoint_send_cb(int status, orte_process_name_t* endpoint,
opal_buffer_t* buffer, orte_rml_tag_t tag, void* cbdata)
static void mca_btl_udapl_endpoint_send_cb(int status, ompi_process_name_t* endpoint,
opal_buffer_t* buffer, ompi_rml_tag_t tag, void* cbdata)
{
OBJ_RELEASE(buffer);
}
@ -523,7 +521,7 @@ static int mca_btl_udapl_start_connect(mca_btl_base_endpoint_t* endpoint)
int rc;
if(NULL == buf) {
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
OMPI_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
return OMPI_ERR_OUT_OF_RESOURCE;
}
@ -532,21 +530,21 @@ static int mca_btl_udapl_start_connect(mca_btl_base_endpoint_t* endpoint)
/* Pack our address information */
rc = opal_dss.pack(buf, &addr->port, 1, OPAL_UINT64);
if(OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
rc = opal_dss.pack(buf, &addr->addr, sizeof(DAT_SOCK_ADDR), OPAL_UINT8);
if(OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
/* Send the buffer */
rc = orte_rml.send_buffer_nb(&endpoint->endpoint_proc->proc_ompi->proc_name, buf,
rc = ompi_rte_send_buffer_nb(&endpoint->endpoint_proc->proc_ompi->proc_name, buf,
OMPI_RML_TAG_UDAPL, 0, mca_btl_udapl_endpoint_send_cb, NULL);
if(0 > rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
@ -555,8 +553,8 @@ static int mca_btl_udapl_start_connect(mca_btl_base_endpoint_t* endpoint)
}
void mca_btl_udapl_endpoint_recv(int status, orte_process_name_t* endpoint,
opal_buffer_t* buffer, orte_rml_tag_t tag, void* cbdata)
void mca_btl_udapl_endpoint_recv(int status, ompi_process_name_t* endpoint,
opal_buffer_t* buffer, ompi_rml_tag_t tag, void* cbdata)
{
mca_btl_udapl_addr_t addr;
mca_btl_udapl_proc_t* proc;
@ -568,14 +566,14 @@ void mca_btl_udapl_endpoint_recv(int status, orte_process_name_t* endpoint,
/* Unpack data */
rc = opal_dss.unpack(buffer, &addr.port, &cnt, OPAL_UINT64);
if(OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return;
}
cnt = sizeof(mca_btl_udapl_addr_t);
rc = opal_dss.unpack(buffer, &addr.addr, &cnt, OPAL_UINT8);
if(OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return;
}
@ -587,7 +585,7 @@ void mca_btl_udapl_endpoint_recv(int status, orte_process_name_t* endpoint,
opal_list_get_end(&mca_btl_udapl_component.udapl_procs);
proc = (mca_btl_udapl_proc_t*)opal_list_get_next(proc)) {
if(OPAL_EQUAL == orte_util_compare_name_fields(ORTE_NS_CMP_ALL, &proc->proc_ompi->proc_name, endpoint)) {
if(OPAL_EQUAL == ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL, &proc->proc_ompi->proc_name, endpoint)) {
for(i = 0; i < proc->proc_endpoint_count; i++) {
ep = proc->proc_endpoints[i];
@ -613,7 +611,7 @@ void mca_btl_udapl_endpoint_recv(int status, orte_process_name_t* endpoint,
void mca_btl_udapl_endpoint_post_oob_recv(void)
{
orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD, OMPI_RML_TAG_UDAPL,
ompi_rte_recv_buffer_nb(OMPI_NAME_WILDCARD, OMPI_RML_TAG_UDAPL,
ORTE_RML_PERSISTENT, mca_btl_udapl_endpoint_recv, NULL);
}
@ -631,7 +629,7 @@ void mca_btl_udapl_endpoint_connect(mca_btl_udapl_endpoint_t* endpoint)
/* Nasty test to prevent deadlock and unwanted connection attempts */
/* This right here is the whole point of using the ORTE/RML handshake */
if((MCA_BTL_UDAPL_CONN_EAGER == endpoint->endpoint_state &&
0 > orte_util_compare_name_fields(ORTE_NS_CMP_ALL,
0 > ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL,
&endpoint->endpoint_proc->proc_ompi->proc_name,
&ompi_proc_local()->proc_name)) ||
(MCA_BTL_UDAPL_CLOSED != endpoint->endpoint_state &&
@ -782,7 +780,7 @@ static int mca_btl_udapl_endpoint_finish_eager(
}
/* Only one side does dat_ep_connect() */
if(0 < orte_util_compare_name_fields(ORTE_NS_CMP_ALL,
if(0 < ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL,
&endpoint->endpoint_proc->proc_ompi->proc_name,
&ompi_proc_local()->proc_name)) {
@ -970,7 +968,7 @@ static int mca_btl_udapl_endpoint_pd_finish_eager(
* with this.
*/
if((BTL_UDAPL_NUM_CONNECTION != endpoint->endpoint_connections_completed)
&& (0 < orte_util_compare_name_fields(ORTE_NS_CMP_ALL,
&& (0 < ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL,
&endpoint->endpoint_proc->proc_ompi->proc_name,
&ompi_proc_local()->proc_name))) {

Просмотреть файл

@ -131,7 +131,7 @@ mca_btl_udapl_proc_t* mca_btl_udapl_proc_create(ompi_proc_t* ompi_proc)
if(OMPI_SUCCESS != rc) {
BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_CRITICAL,
("ompi_modex_recv failed for peer %s",
ORTE_NAME_PRINT(&ompi_proc->proc_name)));
OMPI_NAME_PRINT(&ompi_proc->proc_name)));
OBJ_RELEASE(udapl_proc);
return NULL;
}
@ -139,7 +139,7 @@ mca_btl_udapl_proc_t* mca_btl_udapl_proc_create(ompi_proc_t* ompi_proc)
if((size % sizeof(mca_btl_udapl_addr_t)) != 0) {
BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_CRITICAL,
("invalid udapl address for peer %s",
ORTE_NAME_PRINT(&ompi_proc->proc_name)));
OMPI_NAME_PRINT(&ompi_proc->proc_name)));
OBJ_RELEASE(udapl_proc);
return NULL;
}
@ -251,14 +251,14 @@ static int mca_btl_udapl_proc_address_match(
/* current uDAPL BTL only supports IPv4 */
BTL_UDAPL_VERBOSE_HELP(VERBOSE_SHOW_HELP,
("help-mpi-btl-udapl.txt", "IPv4 only",
true, orte_process_info.nodename));
true, ompi_process_info.nodename));
return OMPI_ERROR;
}
if (MCA_BTL_UDAPL_INVALID_PEER_ADDR_IDX == *peer_addr_idx) {
BTL_UDAPL_VERBOSE_HELP(VERBOSE_SHOW_HELP,
("help-mpi-btl-udapl.txt", "no network match",
true, btl_addr_string, orte_process_info.nodename,
true, btl_addr_string, ompi_process_info.nodename,
peer_proc->proc_ompi->proc_hostname));
return OMPI_ERR_OUT_OF_RESOURCE;
}

Просмотреть файл

@ -123,7 +123,7 @@ static inline int mca_btl_ugni_wildcard_ep_post (mca_btl_ugni_module_t *ugni_mod
memset (&ugni_module->wc_local_attr, 0, sizeof (ugni_module->wc_local_attr));
rc = GNI_EpPostDataWId (ugni_module->wildcard_ep, &ugni_module->wc_local_attr, sizeof (ugni_module->wc_local_attr),
&ugni_module->wc_remote_attr, sizeof (ugni_module->wc_remote_attr),
MCA_BTL_UGNI_CONNECT_WILDCARD_ID | ORTE_PROC_MY_NAME->vpid);
MCA_BTL_UGNI_CONNECT_WILDCARD_ID | OMPI_PROC_MY_NAME->vpid);
return ompi_common_rc_ugni_to_ompi (rc);
}

Просмотреть файл

@ -169,7 +169,7 @@ mca_btl_ugni_module_finalize (struct mca_btl_base_module_t *btl)
/* cancel wildcard post */
rc = GNI_EpPostDataCancelById (ugni_module->wildcard_ep,
MCA_BTL_UGNI_CONNECT_WILDCARD_ID |
ORTE_PROC_MY_NAME->vpid);
OMPI_PROC_MY_NAME->vpid);
if (GNI_RC_SUCCESS != rc) {
BTL_VERBOSE(("btl/ugni error cancelling wildcard post"));
}

Просмотреть файл

@ -26,7 +26,7 @@ int mca_btl_ugni_send (struct mca_btl_base_module_t *btl,
int rc;
BTL_VERBOSE(("btl/ugni sending descriptor %p from %d -> %d. length = %" PRIu64, (void *)descriptor,
ORTE_PROC_MY_NAME->vpid, endpoint->common->ep_rem_id, frag->segments[0].base.seg_len));
OMPI_PROC_MY_NAME->vpid, endpoint->common->ep_rem_id, frag->segments[0].base.seg_len));
/* tag and len are at the same location in eager and smsg frag hdrs */
frag->hdr.send.lag = (tag << 24) | size;

Просмотреть файл

@ -25,9 +25,6 @@
#include "ompi/constants.h"
#include "opal/util/output.h"
#include "orte/util/proc_info.h"
#include "orte/util/show_help.h"
#include "orte/runtime/orte_globals.h"
#include "opal/mca/base/mca_base_param.h"
#include "ompi/mca/btl/base/btl_base_error.h"
@ -229,7 +226,7 @@ static mca_btl_base_module_t **mca_btl_vader_component_init (int *num_btls,
/* if no session directory was created, then we cannot be used */
/* XXX LANL FIXME -- this is not the case. we can use an anonymous segment */
if (!orte_create_session_dirs) {
if (NULL == ompi_process_info.job_session_dir) {
return NULL;
}

Просмотреть файл

@ -172,8 +172,8 @@ static int vader_btl_first_time_init(mca_btl_vader_t *vader_btl, int n)
/* set file name */
if(asprintf(&vader_ctl_file, "%s"OPAL_PATH_SEP"vader_btl_module.%s",
orte_process_info.job_session_dir,
orte_process_info.nodename) < 0)
ompi_process_info.job_session_dir,
ompi_process_info.nodename) < 0)
return OMPI_ERR_OUT_OF_RESOURCE;
/* Pass in a data segment alignment of 0 to get no data

Просмотреть файл

@ -27,8 +27,6 @@
#include "ompi_config.h"
#include <string.h>
#include "orte/util/show_help.h"
#include "orte/runtime/orte_globals.h"
#include "opal/class/opal_bitmap.h"
#include "opal/util/output.h"
#include "opal/util/arch.h"
@ -45,7 +43,6 @@
#include "opal/datatype/opal_convertor.h"
#include "ompi/mca/mpool/base/base.h"
#include "ompi/mca/mpool/grdma/mpool_grdma.h"
#include "orte/util/proc_info.h"
#include <errno.h>
#include <string.h>
#include <math.h>
@ -115,14 +112,14 @@ void mca_btl_wv_show_init_error(const char *file, int line,
{
if (ENOMEM == errno) {char *str_limit = NULL;
orte_show_help("help-mpi-btl-wv.txt", "init-fail-no-mem",
true, orte_process_info.nodename,
ompi_show_help("help-mpi-btl-wv.txt", "init-fail-no-mem",
true, ompi_process_info.nodename,
file, line, func, dev, str_limit);
if (NULL != str_limit) free(str_limit);
} else {
orte_show_help("help-mpi-btl-wv.txt", "init-fail-create-q",
true, orte_process_info.nodename,
ompi_show_help("help-mpi-btl-wv.txt", "init-fail-create-q",
true, ompi_process_info.nodename,
file, line, func, strerror(errno), errno, dev);
}
}
@ -288,9 +285,9 @@ static int mca_btl_wv_tune_endpoint(mca_btl_wv_module_t* wv_btl,
ompi_btl_wv_ini_values_t values;
if(mca_btl_wv_get_transport_type(wv_btl) != endpoint->rem_info.rem_transport_type) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"conflicting transport types", true,
orte_process_info.nodename,
ompi_process_info.nodename,
wv_btl->device->ib_dev->name,
(wv_btl->device->ib_dev_attr).VendorId,
(wv_btl->device->ib_dev_attr).VendorPartId,
@ -309,9 +306,9 @@ static int mca_btl_wv_tune_endpoint(mca_btl_wv_module_t* wv_btl,
if (OMPI_SUCCESS != ret &&
OMPI_ERR_NOT_FOUND != ret) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"error in device init", true,
orte_process_info.nodename,
ompi_process_info.nodename,
wv_btl->device->ib_dev->name);
return ret;
}
@ -350,9 +347,9 @@ static int mca_btl_wv_tune_endpoint(mca_btl_wv_module_t* wv_btl,
if(0 != strcmp(mca_btl_wv_component.receive_queues,
recv_qps)) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"unsupported queues configuration", true,
orte_process_info.nodename,
ompi_process_info.nodename,
wv_btl->device->ib_dev->name,
(wv_btl->device->ib_dev_attr).VendorId,
(wv_btl->device->ib_dev_attr).VendorPartId,
@ -372,9 +369,9 @@ static int mca_btl_wv_tune_endpoint(mca_btl_wv_module_t* wv_btl,
if(NULL != values.receive_queues) {
if(0 != strcmp(mca_btl_wv_component.receive_queues,
values.receive_queues)) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"unsupported queues configuration", true,
orte_process_info.nodename,
ompi_process_info.nodename,
wv_btl->device->ib_dev->name,
(wv_btl->device->ib_dev_attr).VendorId,
(wv_btl->device->ib_dev_attr).VendorPartId,
@ -431,8 +428,8 @@ int mca_btl_wv_add_procs(struct mca_btl_base_module_t* btl,
/* OOB, XOOB, RDMACM, IBCM does not support SELF comunication, so
* mark the prco as unreachable by wv btl */
if (OPAL_EQUAL == orte_util_compare_name_fields
(ORTE_NS_CMP_ALL, ORTE_PROC_MY_NAME, &ompi_proc->proc_name)) {
if (OPAL_EQUAL == ompi_rte_compare_name_fields
(OMPI_RTE_CMP_ALL, OMPI_PROC_MY_NAME, &ompi_proc->proc_name)) {
continue;
}

Просмотреть файл

@ -55,10 +55,6 @@
#include "opal/mca/installdirs/installdirs.h"
#include "opal_stdint.h"
#include "orte/util/show_help.h"
#include "orte/util/proc_info.h"
#include "orte/runtime/orte_globals.h"
#include "ompi/constants.h"
#include "ompi/proc/proc.h"
#include "ompi/mca/btl/btl.h"
@ -526,8 +522,8 @@ static int init_one_port(opal_list_t *btl_list, mca_btl_wv_device_t *device,
if(mca_btl_wv_component.ib_num_btls > 0 &&
IB_DEFAULT_GID_PREFIX == subnet_id &&
mca_btl_wv_component.warn_default_gid_prefix) {
orte_show_help("help-mpi-btl-wv.txt", "default subnet prefix",
true, orte_process_info.nodename);
ompi_show_help("help-mpi-btl-wv.txt", "default subnet prefix",
true, ompi_process_info.nodename);
}
lmc = (1 << ib_port_attr->Lmc);
@ -1048,9 +1044,9 @@ static int setup_qps(void)
queues = opal_argv_split(mca_btl_wv_component.receive_queues, ':');
if (0 == opal_argv_count(queues)) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"no qps in receive_queues", true,
orte_process_info.nodename,
ompi_process_info.nodename,
mca_btl_wv_component.receive_queues);
ret = OMPI_ERROR;
goto error;
@ -1065,9 +1061,9 @@ static int setup_qps(void)
} else if (0 == strncmp("S,", queues[qp], 2)) {
num_srq_qps++;
}else {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"invalid qp type in receive_queues", true,
orte_process_info.nodename,
ompi_process_info.nodename,
mca_btl_wv_component.receive_queues,
queues[qp]);
ret = OMPI_ERR_BAD_PARAM;
@ -1094,9 +1090,9 @@ static int setup_qps(void)
if ('P' == params[0][0]) {
int32_t rd_win, rd_rsv;
if (count < 3 || count > 6) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"invalid pp qp specification", true,
orte_process_info.nodename, queues[qp]);
ompi_process_info.nodename, queues[qp]);
ret = OMPI_ERR_BAD_PARAM;
goto error;
}
@ -1119,15 +1115,15 @@ static int setup_qps(void)
mca_btl_wv_component.qp_infos[qp].u.pp_qp.rd_win = rd_win;
mca_btl_wv_component.qp_infos[qp].u.pp_qp.rd_rsv = rd_rsv;
if ((rd_num - rd_low) > rd_win) {
orte_show_help("help-mpi-btl-wv.txt", "non optimal rd_win",
ompi_show_help("help-mpi-btl-wv.txt", "non optimal rd_win",
true, rd_win, rd_num - rd_low);
}
} else {
int32_t sd_max, rd_init, srq_limit;
if (count < 3 || count > 7) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"invalid srq specification", true,
orte_process_info.nodename, queues[qp]);
ompi_process_info.nodename, queues[qp]);
ret = OMPI_ERR_BAD_PARAM;
goto error;
}
@ -1156,15 +1152,15 @@ static int setup_qps(void)
}
if (rd_num < rd_init) {
orte_show_help("help-mpi-btl-wv.txt", "rd_num must be >= rd_init",
true, orte_process_info.nodename, queues[qp]);
ompi_show_help("help-mpi-btl-wv.txt", "rd_num must be >= rd_init",
true, ompi_process_info.nodename, queues[qp]);
ret = OMPI_ERR_BAD_PARAM;
goto error;
}
if (rd_num < srq_limit) {
orte_show_help("help-mpi-btl-wv.txt", "srq_limit must be > rd_num",
true, orte_process_info.nodename, queues[qp]);
ompi_show_help("help-mpi-btl-wv.txt", "srq_limit must be > rd_num",
true, ompi_process_info.nodename, queues[qp]);
ret = OMPI_ERR_BAD_PARAM;
goto error;
}
@ -1175,8 +1171,8 @@ static int setup_qps(void)
}
if (rd_num <= rd_low) {
orte_show_help("help-mpi-btl-wv.txt", "rd_num must be > rd_low",
true, orte_process_info.nodename, queues[qp]);
ompi_show_help("help-mpi-btl-wv.txt", "rd_num must be > rd_low",
true, ompi_process_info.nodename, queues[qp]);
ret = OMPI_ERR_BAD_PARAM;
goto error;
}
@ -1195,23 +1191,23 @@ static int setup_qps(void)
mca_btl_wv_module.super.btl_eager_limit :
mca_btl_wv_module.super.btl_max_send_size;
if (max_qp_size < max_size_needed) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"biggest qp size is too small", true,
orte_process_info.nodename, max_qp_size,
ompi_process_info.nodename, max_qp_size,
max_size_needed);
ret = OMPI_ERR_BAD_PARAM;
goto error;
} else if (max_qp_size > max_size_needed) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"biggest qp size is too big", true,
orte_process_info.nodename, max_qp_size,
ompi_process_info.nodename, max_qp_size,
max_size_needed);
}
if (mca_btl_wv_component.ib_free_list_max > 0 &&
min_freelist_size > mca_btl_wv_component.ib_free_list_max) {
orte_show_help("help-mpi-btl-wv.txt", "freelist too small", true,
orte_process_info.nodename,
ompi_show_help("help-mpi-btl-wv.txt", "freelist too small", true,
ompi_process_info.nodename,
mca_btl_wv_component.ib_free_list_max,
min_freelist_size);
ret = OMPI_ERR_BAD_PARAM;
@ -1327,9 +1323,9 @@ static int init_one_device(opal_list_t *btl_list, struct wv_device* ib_dev)
warning that we're using default values (unless overridden
that we don't want to see these warnings) */
if (mca_btl_wv_component.warn_no_device_params_found) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"no device params found", true,
orte_process_info.nodename,
ompi_process_info.nodename,
device->ib_dev->name,
device->ib_dev_attr.VendorId,
device->ib_dev_attr.VendorPartId);
@ -1502,7 +1498,7 @@ static int init_one_device(opal_list_t *btl_list, struct wv_device* ib_dev)
if (device->btls > 0) {
/* if apm was enabled it should be > 1 */
if (1 == mca_btl_wv_component.apm_ports) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"apm not enough ports", true);
mca_btl_wv_component.apm_ports = 0;
}
@ -1761,10 +1757,10 @@ static int init_one_device(opal_list_t *btl_list, struct wv_device* ib_dev)
if (NULL != values.receive_queues) {
if (0 != strcmp(values.receive_queues,
mca_btl_wv_component.receive_queues)) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"locally conflicting receive_queues", true,
opal_install_dirs.pkgdatadir,
orte_process_info.nodename,
ompi_process_info.nodename,
receive_queues_device->ib_dev->name,
receive_queues_device->ib_dev_attr.VendorId,
receive_queues_device->ib_dev_attr.VendorPartId,
@ -1785,10 +1781,10 @@ static int init_one_device(opal_list_t *btl_list, struct wv_device* ib_dev)
device's INI file, we must error. */
else if (BTL_WV_RQ_SOURCE_DEVICE_INI ==
mca_btl_wv_component.receive_queues_source) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"locally conflicting receive_queues", true,
opal_install_dirs.pkgdatadir,
orte_process_info.nodename,
ompi_process_info.nodename,
receive_queues_device->ib_dev->name,
receive_queues_device->ib_dev_attr.VendorId,
receive_queues_device->ib_dev_attr.VendorPartId,
@ -1819,9 +1815,9 @@ error:
}
if (OMPI_SUCCESS != ret) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"error in device init", true,
orte_process_info.nodename,
ompi_process_info.nodename,
device->ib_dev->name);
}
device->ib_dev_context->device_if->Release();
@ -2103,7 +2099,7 @@ sort_devs_by_distance(struct wv_device **ib_devs, int count)
for (i = 0; i < count; i++) {
devs[i].ib_dev = ib_devs[i];
if (orte_proc_is_bound) {
if (ompi_rte_proc_is_bound) {
/* If this process is bound to one or more PUs, we can get
an accurate distance. */
devs[i].distance = get_ib_dev_distance((ibv_device *)ib_devs[i]);
@ -2290,7 +2286,7 @@ btl_wv_component_init(int *num_btl_modules,
list_count++;
if (list_count > 1) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"specified include and exclude", true,
NULL == mca_btl_wv_component.if_include ?
"<not specified>" : mca_btl_wv_component.if_include,
@ -2343,8 +2339,8 @@ btl_wv_component_init(int *num_btl_modules,
}
free(dev_sorted);
if (!found) {
orte_show_help("help-mpi-btl-wv.txt", "no devices right type",
true, orte_process_info.nodename,
ompi_show_help("help-mpi-btl-wv.txt", "no devices right type",
true, ompi_process_info.nodename,
((BTL_WV_DT_IB == mca_btl_wv_component.device_type) ?
"InfiniBand" :
(BTL_WV_DT_IWARP == mca_btl_wv_component.device_type) ?
@ -2360,16 +2356,16 @@ btl_wv_component_init(int *num_btl_modules,
if (0 != opal_argv_count(mca_btl_wv_component.if_list) &&
mca_btl_wv_component.warn_nonexistent_if) {
char *str = opal_argv_join(mca_btl_wv_component.if_list, ',');
orte_show_help("help-mpi-btl-wv.txt", "nonexistent port",
true, orte_process_info.nodename,
ompi_show_help("help-mpi-btl-wv.txt", "nonexistent port",
true, ompi_process_info.nodename,
((NULL != mca_btl_wv_component.if_include) ?
"in" : "ex"), str);
free(str);
}
if(0 == mca_btl_wv_component.ib_num_btls) {
orte_show_help("help-mpi-btl-wv.txt",
"no active ports found", true, orte_process_info.nodename);
ompi_show_help("help-mpi-btl-wv.txt",
"no active ports found", true, ompi_process_info.nodename);
goto no_btls;
}
@ -2458,9 +2454,9 @@ btl_wv_component_init(int *num_btl_modules,
/* Do finial init on device */
ret = prepare_device_for_use(device);
if (OMPI_SUCCESS != ret) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"error in device init", true,
orte_process_info.nodename,
ompi_process_info.nodename,
device->ib_dev->name);
goto no_btls;
}
@ -2982,16 +2978,16 @@ error:
(endpoint->qps[qp].qp->lcl_qp->context->device->name);
if (WvWcRnrRetryError == wc->Status) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
BTL_WV_QP_TYPE_PP(qp) ?
"pp rnr retry exceeded" :
"srq rnr retry exceeded", true,
orte_process_info.nodename, device_name,
ompi_process_info.nodename, device_name,
peer_hostname);
} else if (-2 == wc->Status) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"pp retry exceeded", true,
orte_process_info.nodename,
ompi_process_info.nodename,
device_name, peer_hostname);
}
}

Просмотреть файл

@ -34,8 +34,6 @@
#include "opal_stdint.h"
#include "opal/util/output.h"
#include "orte/util/show_help.h"
#include "ompi/types.h"
#include "ompi/class/ompi_free_list.h"
@ -930,9 +928,9 @@ void *mca_btl_wv_endpoint_invoke_error(void *context)
/* If we didn't find a BTL, then just bail :-( */
if (NULL == btl || NULL == btl->error_cb) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"cannot raise btl error", true,
orte_process_info.nodename,
ompi_process_info.nodename,
__FILE__, __LINE__);
exit(1);
}

Просмотреть файл

@ -30,7 +30,6 @@
#include <unistd.h>
#endif
#include "orte/util/show_help.h"
#include "opal/mca/base/mca_base_param.h"
#include "btl_wv.h"
@ -241,7 +240,7 @@ static int parse_file(char *filename)
ini_filename = filename;
btl_wv_ini_yyin = fopen(filename, "r");
if (NULL == btl_wv_ini_yyin) {
orte_show_help("help-mpi-btl-wv.txt", "ini file:file not found",
ompi_show_help("help-mpi-btl-wv.txt", "ini file:file not found",
true, filename);
ret = OMPI_ERR_NOT_FOUND;
goto cleanup;
@ -419,7 +418,7 @@ static int parse_line(parsed_section_values_t *sv)
/* Have no idea what this parameter is. Not an error -- just
ignore it */
if (!showed_unknown_field_warning) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"ini file:unknown field", true,
ini_filename, btl_wv_ini_yynewlines,
key_buffer);
@ -688,7 +687,7 @@ static inline void show_help(const char *topic)
if (0 == strcmp("\n", btl_wv_ini_yytext)) {
btl_wv_ini_yytext = "<end of line>";
}
orte_show_help("help-mpi-btl-wv.txt", topic, true,
ompi_show_help("help-mpi-btl-wv.txt", topic, true,
ini_filename, btl_wv_ini_yynewlines,
btl_wv_ini_yytext);
btl_wv_ini_yytext = save;

Просмотреть файл

@ -30,7 +30,6 @@
#include "opal/mca/installdirs/installdirs.h"
#include "opal/util/output.h"
#include "opal/mca/base/mca_base_param.h"
#include "orte/util/show_help.h"
#include "btl_wv.h"
#include "btl_wv_mca.h"
#include "btl_wv_ini.h"
@ -166,9 +165,9 @@ int btl_wv_register_mca_params(void)
"(negative = try to enable fork support, but continue even if it is not available, 0 = do not enable fork support, positive = try to enable fork support and fail if it is not available)",
ival2, &ival, 0));
if (0 != ival) {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"ib_fork requested but not supported", true,
orte_process_info.nodename);
ompi_process_info.nodename);
return OMPI_ERROR;
}
@ -196,9 +195,9 @@ int btl_wv_register_mca_params(void)
} else if (0 == strcasecmp(str, "all")) {
mca_btl_wv_component.device_type = BTL_WV_DT_ALL;
} else {
orte_show_help("help-mpi-btl-wv.txt",
ompi_show_help("help-mpi-btl-wv.txt",
"ib_fork requested but not supported", true,
orte_process_info.nodename);
ompi_process_info.nodename);
return OMPI_ERROR;
}
free(str);
@ -282,7 +281,7 @@ int btl_wv_register_mca_params(void)
CHECK(reg_int("mtu", "ib_mtu", msg, WV_MTU_1024, &ival, 0));
free(msg);
if (ival < WV_MTU_1024 || ival > WV_MTU_4096) {
orte_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
true, "invalid value for btl_wv_ib_mtu",
"btl_wv_ib_mtu reset to 1024");
mca_btl_wv_component.ib_mtu = WV_MTU_1024;
@ -295,12 +294,12 @@ int btl_wv_register_mca_params(void)
"(must be >= 0 and <= 31)",
25, &ival, 0));
if (ival > 31) {
orte_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
true, "btl_wv_ib_min_rnr_timer > 31",
"btl_wv_ib_min_rnr_timer reset to 31");
ival = 31;
} else if (ival < 0){
orte_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
true, "btl_wv_ib_min_rnr_timer < 0",
"btl_wv_ib_min_rnr_timer reset to 0");
ival = 0;
@ -312,12 +311,12 @@ int btl_wv_register_mca_params(void)
"(must be >= 0 and <= 31)",
20, &ival, 0));
if (ival > 31) {
orte_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
true, "btl_wv_ib_timeout > 31",
"btl_wv_ib_timeout reset to 31");
ival = 31;
} else if (ival < 0) {
orte_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
true, "btl_wv_ib_timeout < 0",
"btl_wv_ib_timeout reset to 0");
ival = 0;
@ -329,12 +328,12 @@ int btl_wv_register_mca_params(void)
"(must be >= 0 and <= 7)",
7, &ival, 0));
if (ival > 7) {
orte_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
true, "btl_wv_ib_retry_count > 7",
"btl_wv_ib_retry_count reset to 7");
ival = 7;
} else if (ival < 0) {
orte_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
true, "btl_wv_ib_retry_count < 0",
"btl_wv_ib_retry_count reset to 0");
ival = 0;
@ -349,12 +348,12 @@ int btl_wv_register_mca_params(void)
"(must be >= 0 and <= 7; 7 = \"infinite\")",
7, &ival, 0));
if (ival > 7) {
orte_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
true, "btl_wv_ib_rnr_retry > 7",
"btl_wv_ib_rnr_retry reset to 7");
ival = 7;
} else if (ival < 0) {
orte_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
true, "btl_wv_ib_rnr_retry < 0",
"btl_wv_ib_rnr_retry reset to 0");
ival = 0;
@ -365,12 +364,12 @@ int btl_wv_register_mca_params(void)
"(must be >= 0 and <= 15)",
0, &ival, 0));
if (ival > 15) {
orte_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
true, "btl_wv_ib_service_level > 15",
"btl_wv_ib_service_level reset to 15");
ival = 15;
} else if (ival < 0) {
orte_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
ompi_show_help("help-mpi-btl-wv.txt", "invalid mca param value",
true, "btl_wv_ib_service_level < 0",
"btl_wv_ib_service_level reset to 0");
ival = 0;
@ -427,8 +426,8 @@ int btl_wv_register_mca_params(void)
"(must be > 0 and power of two)",
64, &ival, REGINT_GE_ZERO));
if(ival <= 1 || (ival & (ival - 1))) {
orte_show_help("help-mpi-btl-wv.txt", "wrong buffer alignment",
true, ival, orte_process_info.nodename, 64);
ompi_show_help("help-mpi-btl-wv.txt", "wrong buffer alignment",
true, ival, ompi_process_info.nodename, 64);
mca_btl_wv_component.buffer_alignment = 64;
} else {
mca_btl_wv_component.buffer_alignment = (uint32_t) ival;

Просмотреть файл

@ -158,7 +158,7 @@ mca_btl_wv_proc_t* mca_btl_wv_proc_create(ompi_proc_t* ompi_proc)
if (OMPI_SUCCESS != rc) {
BTL_ERROR(("[%s:%d] ompi_modex_recv failed for peer %s",
__FILE__, __LINE__,
ORTE_NAME_PRINT(&ompi_proc->proc_name)));
OMPI_NAME_PRINT(&ompi_proc->proc_name)));
OBJ_RELEASE(module_proc);
return NULL;
}

Просмотреть файл

@ -66,7 +66,7 @@ struct mca_btl_wv_proc_t {
ompi_proc_t *proc_ompi;
/** globally unique identifier for the process */
orte_process_name_t proc_guid;
ompi_process_name_t proc_guid;
/** modex messages from this proc; one for each port in the peer */
mca_btl_wv_proc_modex_t *proc_ports;

Просмотреть файл

@ -17,8 +17,6 @@
#include "connect/base.h"
#include "connect/btl_wv_connect_oob.h"
#include "orte/util/show_help.h"
#include "opal/util/argv.h"
#include "opal/util/output.h"
@ -85,9 +83,9 @@ int ompi_btl_wv_connect_base_register(void)
}
}
if (NULL == all[i]) {
orte_show_help("help-mpi-btl-wv-cpc-base.txt",
ompi_show_help("help-mpi-btl-wv-cpc-base.txt",
"cpc name not found", true,
"include", orte_process_info.nodename,
"include", ompi_process_info.nodename,
"include", cpc_include, temp[j],
all_cpc_names);
opal_argv_free(temp);
@ -111,9 +109,9 @@ int ompi_btl_wv_connect_base_register(void)
}
}
if (NULL == all[i]) {
orte_show_help("help-mpi-btl-wv-cpc-base.txt",
ompi_show_help("help-mpi-btl-wv-cpc-base.txt",
"cpc name not found", true,
"exclude", orte_process_info.nodename,
"exclude", ompi_process_info.nodename,
"exclude", cpc_exclude, temp[j],
all_cpc_names);
opal_argv_free(temp);
@ -257,9 +255,9 @@ int ompi_btl_wv_connect_base_select_for_local_port(mca_btl_wv_module_t *btl)
/* If we got an empty array, then no CPCs were eligible. Doh! */
if (0 == cpc_index) {
orte_show_help("help-mpi-btl-wv-cpc-base.txt",
ompi_show_help("help-mpi-btl-wv-cpc-base.txt",
"no cpcs for port", true,
orte_process_info.nodename,
ompi_process_info.nodename,
btl->device->ib_dev->name,
btl->port_num, msg);
free(cpcs);

Просмотреть файл

@ -25,20 +25,14 @@
#include "ompi_config.h"
#include "opal/dss/dss.h"
#include "opal_stdint.h"
#include "orte/util/show_help.h"
#include "opal/util/error.h"
#include "opal/util/output.h"
#include "orte/mca/rml/rml.h"
#include "orte/mca/rml/rml_types.h"
#include "orte/mca/errmgr/errmgr.h"
#include "orte/util/name_fns.h"
#include "orte/runtime/orte_globals.h"
#include "ompi/mca/dpm/dpm.h"
#include "ompi/mca/rte/rte.h"
#include "btl_wv.h"
#include "btl_wv_endpoint.h"
#include "btl_wv_proc.h"
#include "connect/connect.h"
#include "orte/util/show_help.h"
#include <rdma/winverbs.h>
#include <malloc.h>
@ -170,11 +164,11 @@ static int qp_create_one(mca_btl_base_endpoint_t* endpoint, int qp,
static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
uint8_t message_type);
static void rml_send_cb(int status, orte_process_name_t* endpoint,
opal_buffer_t* buffer, orte_rml_tag_t tag,
static void rml_send_cb(int status, ompi_process_name_t* endpoint,
opal_buffer_t* buffer, ompi_rml_tag_t tag,
void* cbdata);
static void rml_recv_cb(int status, orte_process_name_t* process_name,
opal_buffer_t* buffer, orte_rml_tag_t tag,
static void rml_recv_cb(int status, ompi_process_name_t* process_name,
opal_buffer_t* buffer, ompi_rml_tag_t tag,
void* cbdata);
static int init_ud_qp(struct wv_context *context_arg,
struct mca_btl_wv_sa_qp_cache *cache);
@ -242,12 +236,12 @@ static int oob_component_query(mca_btl_wv_module_t *btl,
ensure to only post it *once*, because another btl may have
come in before this and already posted it. */
if (!rml_recv_posted) {
rc = orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD,
rc = ompi_rte_recv_buffer_nb(OMPI_NAME_WILDCARD,
OMPI_RML_TAG_OPENIB,
ORTE_RML_PERSISTENT,
rml_recv_cb,
NULL);
if (ORTE_SUCCESS != rc) {
if (OMPI_SUCCESS != rc) {
opal_output_verbose(5, mca_btl_base_output,
"wv BTL: oob CPC system error %d (%s)",
rc, opal_strerror(rc));
@ -258,7 +252,7 @@ static int oob_component_query(mca_btl_wv_module_t *btl,
*cpc = (ompi_btl_wv_connect_base_module_t *) malloc(sizeof(ompi_btl_wv_connect_base_module_t));
if (NULL == *cpc) {
orte_rml.recv_cancel(ORTE_NAME_WILDCARD, OMPI_RML_TAG_OPENIB);
ompi_rte_recv_cancel(OMPI_NAME_WILDCARD, OMPI_RML_TAG_OPENIB);
rml_recv_posted = false;
opal_output_verbose(5, mca_btl_base_output,
"wv BTL: oob CPC system error (malloc failed)");
@ -314,7 +308,7 @@ static int oob_module_start_connect(ompi_btl_wv_connect_base_module_t *cpc,
static int oob_component_finalize(void)
{
if (rml_recv_posted) {
orte_rml.recv_cancel(ORTE_NAME_WILDCARD, OMPI_RML_TAG_OPENIB);
ompi_rte_recv_cancel(OMPI_NAME_WILDCARD, OMPI_RML_TAG_OPENIB);
rml_recv_posted = false;
}
@ -613,7 +607,7 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
int rc;
if (NULL == buffer) {
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
OMPI_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
return OMPI_ERR_OUT_OF_RESOURCE;
}
@ -621,14 +615,14 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT8));
rc = opal_dss.pack(buffer, &message_type, 1, OPAL_UINT8);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT64));
rc = opal_dss.pack(buffer, &endpoint->subnet_id, 1, OPAL_UINT64);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
@ -639,13 +633,13 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
&endpoint->rem_info.rem_qps[0].rem_qp_num, 1,
OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT16));
rc = opal_dss.pack(buffer, &endpoint->rem_info.rem_lid, 1, OPAL_UINT16);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
}
@ -658,14 +652,14 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
rc = opal_dss.pack(buffer, &endpoint->qps[qp].qp->lcl_qp->qp_num,
1, OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->qps[qp].qp->lcl_psn, 1,
OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
}
@ -673,30 +667,30 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT16));
rc = opal_dss.pack(buffer, &endpoint->endpoint_btl->lid, 1, OPAL_UINT16);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->endpoint_btl->device->mtu, 1,
OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->index, 1, OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
return rc;
}
}
/* send to remote endpoint */
rc = orte_rml.send_buffer_nb(&endpoint->endpoint_proc->proc_guid,
rc = ompi_rte_send_buffer_nb(&endpoint->endpoint_proc->proc_guid,
buffer, OMPI_RML_TAG_OPENIB, 0,
rml_send_cb, NULL);
if (ORTE_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
if (OMPI_SUCCESS != rc) {
OMPI_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("Sent QP Info, LID = %d, SUBNET = %" PRIx64 "\n",
@ -711,8 +705,8 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
* Callback when we have finished RML sending the connect data to a
* remote peer
*/
static void rml_send_cb(int status, orte_process_name_t* endpoint,
opal_buffer_t* buffer, orte_rml_tag_t tag,
static void rml_send_cb(int status, ompi_process_name_t* endpoint,
opal_buffer_t* buffer, ompi_rml_tag_t tag,
void* cbdata)
{
OBJ_RELEASE(buffer);
@ -724,8 +718,8 @@ static void rml_send_cb(int status, orte_process_name_t* endpoint,
* and if this endpoint is trying to connect, reply with our QP info,
* otherwise try to modify QP's and establish reliable connection
*/
static void rml_recv_cb(int status, orte_process_name_t* process_name,
opal_buffer_t* buffer, orte_rml_tag_t tag,
static void rml_recv_cb(int status, ompi_process_name_t* process_name,
opal_buffer_t* buffer, ompi_rml_tag_t tag,
void* cbdata)
{
mca_btl_wv_proc_t *ib_proc;
@ -744,7 +738,7 @@ static void rml_recv_cb(int status, orte_process_name_t* process_name,
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT8));
rc = opal_dss.unpack(buffer, &message_type, &cnt, OPAL_UINT8);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
mca_btl_wv_endpoint_invoke_error(NULL);
return;
}
@ -752,7 +746,7 @@ static void rml_recv_cb(int status, orte_process_name_t* process_name,
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT64));
rc = opal_dss.unpack(buffer, &rem_info.rem_subnet_id, &cnt, OPAL_UINT64);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
mca_btl_wv_endpoint_invoke_error(NULL);
return;
}
@ -761,14 +755,14 @@ static void rml_recv_cb(int status, orte_process_name_t* process_name,
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32));
rc = opal_dss.unpack(buffer, &lcl_qp, &cnt, OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
mca_btl_wv_endpoint_invoke_error(NULL);
return;
}
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT16));
rc = opal_dss.unpack(buffer, &lcl_lid, &cnt, OPAL_UINT16);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
mca_btl_wv_endpoint_invoke_error(NULL);
return;
}
@ -786,7 +780,7 @@ static void rml_recv_cb(int status, orte_process_name_t* process_name,
rc = opal_dss.unpack(buffer, &rem_info.rem_qps[qp].rem_qp_num, &cnt,
OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
mca_btl_wv_endpoint_invoke_error(NULL);
return;
}
@ -794,7 +788,7 @@ static void rml_recv_cb(int status, orte_process_name_t* process_name,
rc = opal_dss.unpack(buffer, &rem_info.rem_qps[qp].rem_psn, &cnt,
OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
mca_btl_wv_endpoint_invoke_error(NULL);
return;
}
@ -803,21 +797,21 @@ static void rml_recv_cb(int status, orte_process_name_t* process_name,
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT16));
rc = opal_dss.unpack(buffer, &rem_info.rem_lid, &cnt, OPAL_UINT16);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
mca_btl_wv_endpoint_invoke_error(NULL);
return;
}
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32));
rc = opal_dss.unpack(buffer, &rem_info.rem_mtu, &cnt, OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
mca_btl_wv_endpoint_invoke_error(NULL);
return;
}
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32));
rc = opal_dss.unpack(buffer, &rem_info.rem_index, &cnt, OPAL_UINT32);
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
OMPI_ERROR_LOG(rc);
mca_btl_wv_endpoint_invoke_error(NULL);
return;
}
@ -827,7 +821,7 @@ static void rml_recv_cb(int status, orte_process_name_t* process_name,
rem_info.rem_lid,
rem_info.rem_subnet_id));
master = orte_util_compare_name_fields(ORTE_NS_CMP_ALL, ORTE_PROC_MY_NAME,
master = ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL, OMPI_PROC_MY_NAME,
process_name) > 0 ? true : false;
/* Need to protect the ib_procs list */
@ -840,7 +834,7 @@ static void rml_recv_cb(int status, orte_process_name_t* process_name,
ib_proc = (mca_btl_wv_proc_t*)opal_list_get_next(ib_proc)) {
bool found = false;
if (orte_util_compare_name_fields(ORTE_NS_CMP_ALL,
if (ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL,
&ib_proc->proc_guid, process_name) != OPAL_EQUAL) {
continue;
}
@ -1021,8 +1015,8 @@ static int init_ud_qp(struct wv_context *context_arg,
cache->cq->cqe = (uint32_t) entries;
if (NULL == cache->cq) {
BTL_ERROR(("error creating cq, errno says %s", strerror(errno)));
orte_show_help("help-mpi-btl-wv.txt", "init-fail-create-q",
true, orte_process_info.nodename,
ompi_show_help("help-mpi-btl-wv.txt", "init-fail-create-q",
true, ompi_process_info.nodename,
__FILE__, __LINE__, "create_cq",
strerror(errno), errno,
context_arg->device->name);

Просмотреть файл

@ -30,7 +30,6 @@
#include "mpi.h"
#include "ompi/communicator/communicator.h"
#include "opal/util/output.h"
#include "orte/util/show_help.h"
#include "opal/class/opal_list.h"
#include "opal/class/opal_object.h"
#include "opal/mca/mca.h"
@ -118,7 +117,7 @@ int mca_coll_base_comm_select(ompi_communicator_t * comm)
collective modules available, then print error and return. */
if (NULL == selectable) {
/* There's no modules available */
orte_show_help("help-mca-coll-base",
ompi_show_help("help-mca-coll-base",
"comm-select:none-available", true);
return OMPI_ERROR;
}

Просмотреть файл

@ -10,6 +10,8 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -27,10 +29,11 @@
#include "ompi/constants.h"
#include "opal/class/opal_list.h"
#include "opal/util/output.h"
#include "orte/util/show_help.h"
#include "opal/mca/mca.h"
#include "opal/mca/base/base.h"
#include "opal/mca/base/mca_base_component_repository.h"
#include "ompi/mca/rte/rte.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/base.h"
@ -130,7 +133,7 @@ int mca_coll_base_find_available(bool enable_progress_threads,
mca_coll_base_components_available_valid = false;
opal_output_verbose(10, mca_coll_base_output,
"coll:find_available: no coll components available!");
orte_show_help("help-mca-base", "find-available:none-found", true,
ompi_show_help("help-mca-base", "find-available:none-found", true,
"coll");
return OMPI_ERROR;
}

Просмотреть файл

@ -19,8 +19,6 @@
#include "opal/datatype/opal_convertor.h"
#include "opal/threads/mutex.h"
#include "orte/runtime/orte_globals.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/request/request.h"
#include "ompi/mca/bcol/bcol.h"
@ -908,8 +906,8 @@ void mca_coll_ml_allreduce_matrix_init(mca_coll_ml_module_t *ml_module,
#define ML_ERROR(args) \
do { \
mca_coll_ml_err("[%s]%s[%s:%d:%s] COLL-ML ", \
orte_process_info.nodename, \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
ompi_process_info.nodename, \
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME), \
__FILE__, __LINE__, __func__); \
mca_coll_ml_err args; \
mca_coll_ml_err("\n"); \
@ -920,8 +918,8 @@ do { \
do { \
if(mca_coll_ml_component.verbose >= level) { \
mca_coll_ml_err("[%s]%s[%s:%d:%s] COLL-ML ", \
orte_process_info.nodename, \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
ompi_process_info.nodename, \
OMPI_NAME_PRINT(OMPI_PROC_MY_NAME), \
__FILE__, __LINE__, __func__); \
mca_coll_ml_err args; \
mca_coll_ml_err("\n"); \

Просмотреть файл

@ -28,11 +28,6 @@
#include "ompi/mca/bcol/base/base.h"
#include "ompi/mca/sbgp/base/base.h"
#include "orte/mca/rml/rml.h"
#include "orte/util/proc_info.h"
#include "orte/util/name_fns.h"
#include "orte/mca/errmgr/errmgr.h"
#include "coll_ml.h"
#include "coll_ml_inlines.h"

Просмотреть файл

@ -23,8 +23,6 @@
#include "opal/threads/mutex.h"
#include "opal/sys/atomic.h"
#include "orte/util/show_help.h"
#include "ompi/op/op.h"
#include "ompi/constants.h"
#include "ompi/mca/coll/coll.h"

Просмотреть файл

@ -18,9 +18,6 @@
#include "ompi/communicator/communicator.h"
#include "ompi/mca/bcol/bcol.h"
#include "ompi/mca/bcol/base/base.h"
#include "orte/mca/rml/rml.h"
#include "orte/util/show_help.h"
#include "orte/util/proc_info.h"
#include "coll_ml.h"
#include "coll_ml_inlines.h"
#include "coll_ml_mca.h"

Просмотреть файл

@ -1,6 +1,8 @@
/*
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -26,18 +28,12 @@
#include "ompi/communicator/communicator.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/base.h"
#include "ompi/mca/dpm/dpm.h"
#include "ompi/mca/sbgp/base/base.h"
#include "ompi/mca/bcol/base/base.h"
#include "ompi/mca/sbgp/sbgp.h"
#include "ompi/mca/common/commpatterns/common_coll_ops.h"
#include "ompi/mca/coll/ml/coll_ml.h"
#include "orte/mca/rml/rml.h"
#include "orte/util/proc_info.h"
#include "orte/util/name_fns.h"
#include "orte/mca/grpcomm/grpcomm.h"
#include "opal/util/argv.h"
#include "opal/datatype/opal_datatype.h"
#include "opal/util/output.h"

Просмотреть файл

@ -26,7 +26,6 @@
#include "mpi.h"
#include "opal/mca/mca.h"
#include "opal/datatype/opal_convertor.h"
#include "orte/types.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/mca/common/sm/common_sm.h"

Просмотреть файл

@ -29,7 +29,6 @@
#include "ompi/constants.h"
#include "ompi/mca/coll/coll.h"
#include "orte/util/show_help.h"
#include "coll_sm.h"
@ -191,13 +190,13 @@ static int sm_register(void)
cs->sm_tree_degree,
&cs->sm_tree_degree);
if (cs->sm_tree_degree > cs->sm_control_size) {
orte_show_help("help-mpi-coll-sm.txt",
ompi_show_help("help-mpi-coll-sm.txt",
"tree-degree-larger-than-control", true,
cs->sm_tree_degree, cs->sm_control_size);
cs->sm_tree_degree = cs->sm_control_size;
}
if (cs->sm_tree_degree > 255) {
orte_show_help("help-mpi-coll-sm.txt",
ompi_show_help("help-mpi-coll-sm.txt",
"tree-degree-larger-than-255", true,
cs->sm_tree_degree);
cs->sm_tree_degree = 255;

Просмотреть файл

@ -51,13 +51,11 @@
#include "opal/mca/hwloc/base/base.h"
#include "opal/util/os_path.h"
#include "orte/util/proc_info.h"
#include "orte/util/name_fns.h"
#include "ompi/communicator/communicator.h"
#include "ompi/group/group.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/base.h"
#include "ompi/mca/rte/rte.h"
#include "ompi/proc/proc.h"
#include "coll_sm.h"
@ -129,7 +127,7 @@ int mca_coll_sm_init_query(bool enable_progress_threads,
bool enable_mpi_threads)
{
/* if no session directory was created, then we cannot be used */
if (!orte_create_session_dirs) {
if (NULL == ompi_process_info.job_session_dir) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* Don't do much here because we don't really want to allocate any
@ -514,7 +512,7 @@ static int bootstrap_comm(ompi_communicator_t *comm,
int num_in_use = c->sm_comm_num_in_use_flags;
int frag_size = c->sm_fragment_size;
int control_size = c->sm_control_size;
orte_process_name_t *lowest_name = NULL;
ompi_process_name_t *lowest_name = NULL;
size_t size;
ompi_proc_t *proc;
@ -526,21 +524,21 @@ static int bootstrap_comm(ompi_communicator_t *comm,
lowest_name = &(proc->proc_name);
for (i = 1; i < comm_size; ++i) {
proc = ompi_group_peer_lookup(comm->c_local_group, i);
if (orte_util_compare_name_fields(ORTE_NS_CMP_ALL,
if (ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL,
&(proc->proc_name),
lowest_name) < 0) {
lowest_name = &(proc->proc_name);
}
}
asprintf(&shortpath, "coll-sm-cid-%d-name-%s.mmap", comm->c_contextid,
ORTE_NAME_PRINT(lowest_name));
OMPI_NAME_PRINT(lowest_name));
if (NULL == shortpath) {
opal_output_verbose(10, mca_coll_base_output,
"coll:sm:enable:bootstrap comm (%d/%s): asprintf failed",
comm->c_contextid, comm->c_name);
return OMPI_ERR_OUT_OF_RESOURCE;
}
fullpath = opal_os_path(false, orte_process_info.job_session_dir,
fullpath = opal_os_path(false, ompi_process_info.job_session_dir,
shortpath, NULL);
free(shortpath);
if (NULL == fullpath) {

Просмотреть файл

@ -1,6 +1,8 @@
/*
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -15,12 +17,9 @@
#include "ompi/op/op.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/communicator/communicator.h"
#include "orte/mca/rml/rml.h"
#include "opal/include/opal/sys/atomic.h"
#include "common_coll_ops.h"
#include "ompi/mca/common/netpatterns/common_netpatterns.h"
#include "ompi/mca/dpm/dpm.h"
#include "orte/util/proc_info.h"
#include "ompi/mca/pml/pml.h"
/**

Просмотреть файл

@ -1,6 +1,8 @@
/*
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -15,13 +17,10 @@
#include "ompi/op/op.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/communicator/communicator.h"
#include "orte/mca/rml/rml.h"
#include "opal/include/opal/sys/atomic.h"
#include "ompi/mca/common/commpatterns/common_netpatterns.h"
#include "common_coll_ops.h"
#include "ompi/mca/common/netpatterns/common_netpatterns.h"
#include "ompi/mca/dpm/dpm.h"
#include "orte/util/proc_info.h"
#include "ompi/mca/pml/pml.h"
/**

Просмотреть файл

@ -1,6 +1,8 @@
/*
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -15,12 +17,9 @@
#include "ompi/op/op.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/communicator/communicator.h"
#include "orte/mca/rml/rml.h"
#include "opal/include/opal/sys/atomic.h"
#include "common_coll_ops.h"
#include "ompi/mca/common/netpatterns/common_netpatterns.h"
#include "ompi/mca/dpm/dpm.h"
#include "orte/util/proc_info.h"
#include "ompi/mca/pml/pml.h"
/**

Просмотреть файл

@ -1,6 +1,8 @@
/*
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -12,6 +14,7 @@
#define COMM_COLL_OP_TYPES_H
#include "ompi_config.h"
#include "ompi/communicator/communicator.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/proc/proc.h"

Просмотреть файл

@ -12,8 +12,6 @@
#define COMM_NETPATTERNS_H
#include "ompi_config.h"
#include "orte/include/orte/types.h"
#include "orte/mca/rml/rml_types.h"
BEGIN_C_DECLS

Просмотреть файл

@ -35,8 +35,6 @@
#include "opal/datatype/opal_datatype_cuda.h"
#include "opal/util/output.h"
#include "ompi/mca/mpool/base/base.h"
#include "orte/util/show_help.h"
#include "orte/util/proc_info.h"
#include "common_cuda.h"
static bool common_cuda_initialized = false;
@ -175,10 +173,10 @@ static int mca_common_cuda_init(void)
/* Check for the not initialized error since we can make suggestions to
* user for this error. */
if (CUDA_ERROR_NOT_INITIALIZED == res) {
orte_show_help("help-mpi-common-cuda.txt", "cuCtxGetCurrent failed not initialized",
ompi_show_help("help-mpi-common-cuda.txt", "cuCtxGetCurrent failed not initialized",
true);
} else {
orte_show_help("help-mpi-common-cuda.txt", "cuCtxGetCurrent failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuCtxGetCurrent failed",
true, res);
}
}
@ -186,7 +184,7 @@ static int mca_common_cuda_init(void)
mca_common_cuda_register_memory = false;
} else if ((CUDA_SUCCESS == res) && (NULL == cuContext)) {
if (mca_common_cuda_warning) {
orte_show_help("help-mpi-common-cuda.txt", "cuCtxGetCurrent returned NULL",
ompi_show_help("help-mpi-common-cuda.txt", "cuCtxGetCurrent returned NULL",
true);
}
mca_common_cuda_enabled = false;
@ -211,7 +209,7 @@ static int mca_common_cuda_init(void)
cuda_event_ipc_array = (CUevent *) malloc(sizeof(CUevent) * cuda_event_max);
if (NULL == cuda_event_ipc_array) {
orte_show_help("help-mpi-common-cuda.txt", "No memory",
ompi_show_help("help-mpi-common-cuda.txt", "No memory",
true, errno, strerror(errno));
return OMPI_ERROR;
}
@ -220,7 +218,7 @@ static int mca_common_cuda_init(void)
for (i = 0; i < cuda_event_max; i++) {
res = cuEventCreate(&cuda_event_ipc_array[i], CU_EVENT_DISABLE_TIMING);
if (CUDA_SUCCESS != res) {
orte_show_help("help-mpi-common-cuda.txt", "cuEventCreate failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuEventCreate failed",
true, res);
return OMPI_ERROR;
}
@ -231,7 +229,7 @@ static int mca_common_cuda_init(void)
cuda_event_ipc_frag_array = (struct mca_btl_base_descriptor_t **)
malloc(sizeof(struct mca_btl_base_descriptor_t *) * cuda_event_max);
if (NULL == cuda_event_ipc_frag_array) {
orte_show_help("help-mpi-common-cuda.txt", "No memory",
ompi_show_help("help-mpi-common-cuda.txt", "No memory",
true, errno, strerror(errno));
return OMPI_ERROR;
}
@ -249,7 +247,7 @@ static int mca_common_cuda_init(void)
cuda_event_dtoh_array = (CUevent *) malloc(sizeof(CUevent) * cuda_event_max);
if (NULL == cuda_event_dtoh_array) {
orte_show_help("help-mpi-common-cuda.txt", "No memory",
ompi_show_help("help-mpi-common-cuda.txt", "No memory",
true, errno, strerror(errno));
return OMPI_ERROR;
}
@ -258,7 +256,7 @@ static int mca_common_cuda_init(void)
for (i = 0; i < cuda_event_max; i++) {
res = cuEventCreate(&cuda_event_dtoh_array[i], CU_EVENT_DISABLE_TIMING);
if (CUDA_SUCCESS != res) {
orte_show_help("help-mpi-common-cuda.txt", "cuEventCreate failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuEventCreate failed",
true, res);
return OMPI_ERROR;
}
@ -269,7 +267,7 @@ static int mca_common_cuda_init(void)
cuda_event_dtoh_frag_array = (struct mca_btl_base_descriptor_t **)
malloc(sizeof(struct mca_btl_base_descriptor_t *) * cuda_event_max);
if (NULL == cuda_event_dtoh_frag_array) {
orte_show_help("help-mpi-common-cuda.txt", "No memory",
ompi_show_help("help-mpi-common-cuda.txt", "No memory",
true, errno, strerror(errno));
return OMPI_ERROR;
}
@ -284,7 +282,7 @@ static int mca_common_cuda_init(void)
cuda_event_htod_array = (CUevent *) malloc(sizeof(CUevent) * cuda_event_max);
if (NULL == cuda_event_htod_array) {
orte_show_help("help-mpi-common-cuda.txt", "No memory",
ompi_show_help("help-mpi-common-cuda.txt", "No memory",
true, errno, strerror(errno));
return OMPI_ERROR;
}
@ -293,7 +291,7 @@ static int mca_common_cuda_init(void)
for (i = 0; i < cuda_event_max; i++) {
res = cuEventCreate(&cuda_event_htod_array[i], CU_EVENT_DISABLE_TIMING);
if (CUDA_SUCCESS != res) {
orte_show_help("help-mpi-common-cuda.txt", "cuEventCreate failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuEventCreate failed",
true, res);
return OMPI_ERROR;
}
@ -304,7 +302,7 @@ static int mca_common_cuda_init(void)
cuda_event_htod_frag_array = (struct mca_btl_base_descriptor_t **)
malloc(sizeof(struct mca_btl_base_descriptor_t *) * cuda_event_max);
if (NULL == cuda_event_htod_frag_array) {
orte_show_help("help-mpi-common-cuda.txt", "No memory",
ompi_show_help("help-mpi-common-cuda.txt", "No memory",
true, errno, strerror(errno));
return OMPI_ERROR;
}
@ -319,9 +317,9 @@ static int mca_common_cuda_init(void)
if (res != CUDA_SUCCESS) {
/* If registering the memory fails, print a message and continue.
* This is not a fatal error. */
orte_show_help("help-mpi-common-cuda.txt", "cuMemHostRegister failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuMemHostRegister failed",
true, mem_reg->ptr, mem_reg->amount,
orte_process_info.nodename, res, mem_reg->msg);
ompi_process_info.nodename, res, mem_reg->msg);
} else {
opal_output_verbose(20, mca_common_cuda_output,
"CUDA: cuMemHostRegister OK on mpool %s: "
@ -336,7 +334,7 @@ static int mca_common_cuda_init(void)
/* Create stream for use in ipc asynchronous copies */
res = cuStreamCreate(&ipcStream, 0);
if (res != CUDA_SUCCESS) {
orte_show_help("help-mpi-common-cuda.txt", "cuStreamCreate failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuStreamCreate failed",
true, res);
return OMPI_ERROR;
}
@ -344,7 +342,7 @@ static int mca_common_cuda_init(void)
/* Create stream for use in dtoh asynchronous copies */
res = cuStreamCreate(&dtohStream, 0);
if (res != CUDA_SUCCESS) {
orte_show_help("help-mpi-common-cuda.txt", "cuStreamCreate failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuStreamCreate failed",
true, res);
return OMPI_ERROR;
@ -353,7 +351,7 @@ static int mca_common_cuda_init(void)
/* Create stream for use in htod asynchronous copies */
res = cuStreamCreate(&htodStream, 0);
if (res != CUDA_SUCCESS) {
orte_show_help("help-mpi-common-cuda.txt", "cuStreamCreate failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuStreamCreate failed",
true, res);
return OMPI_ERROR;
@ -393,9 +391,9 @@ void mca_common_cuda_register(void *ptr, size_t amount, char *msg) {
if (res != CUDA_SUCCESS) {
/* If registering the memory fails, print a message and continue.
* This is not a fatal error. */
orte_show_help("help-mpi-common-cuda.txt", "cuMemHostRegister failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuMemHostRegister failed",
true, ptr, amount,
orte_process_info.nodename, res, msg);
ompi_process_info.nodename, res, msg);
} else {
opal_output_verbose(20, mca_common_cuda_output,
"CUDA: cuMemHostRegister OK on mpool %s: "
@ -432,9 +430,9 @@ void mca_common_cuda_unregister(void *ptr, char *msg) {
if (res != CUDA_SUCCESS) {
/* If unregistering the memory fails, print a message and continue.
* This is not a fatal error. */
orte_show_help("help-mpi-common-cuda.txt", "cuMemHostUnregister failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuMemHostUnregister failed",
true, ptr,
orte_process_info.nodename, res, msg);
ompi_process_info.nodename, res, msg);
} else {
opal_output_verbose(20, mca_common_cuda_output,
"CUDA: cuMemHostUnregister OK on mpool %s: "
@ -473,7 +471,7 @@ int cuda_getmemhandle(void *base, size_t size, mca_mpool_base_registration_t *ne
CUDA_DUMP_MEMHANDLE((100, &memHandle, "GetMemHandle-After"));
if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuIpcGetMemHandle failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuIpcGetMemHandle failed",
true, result, base);
return OMPI_ERROR;
} else {
@ -486,7 +484,7 @@ int cuda_getmemhandle(void *base, size_t size, mca_mpool_base_registration_t *ne
* how the remote side saves the handles in a cache. */
result = cuMemGetAddressRange(&pbase, &psize, (CUdeviceptr)base);
if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuMemGetAddressRange failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuMemGetAddressRange failed",
true, result, base);
return OMPI_ERROR;
} else {
@ -509,7 +507,7 @@ int cuda_getmemhandle(void *base, size_t size, mca_mpool_base_registration_t *ne
* with. */
result = cuEventRecord((CUevent)cuda_reg->event, 0);
if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuEventRecord failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuEventRecord failed",
true, result, base);
return OMPI_ERROR;
}
@ -561,7 +559,7 @@ int cuda_openmemhandle(void *base, size_t size, mca_mpool_base_registration_t *n
return OMPI_ERR_WOULD_BLOCK;
}
if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuIpcOpenMemHandle failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuIpcOpenMemHandle failed",
true, result, base);
/* Currently, this is a non-recoverable error */
return OMPI_ERROR;
@ -585,7 +583,7 @@ int cuda_closememhandle(void *reg_data, mca_mpool_base_registration_t *reg)
result = cuIpcCloseMemHandle((CUdeviceptr)cuda_reg->base.alloc_base);
if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuIpcCloseMemHandle failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuIpcCloseMemHandle failed",
true, result, cuda_reg->base.alloc_base);
/* We will just continue on and hope things continue to work. */
} else {
@ -604,13 +602,13 @@ void mca_common_cuda_construct_event_and_handle(uint64_t **event, void **handle)
result = cuEventCreate((CUevent *)event, CU_EVENT_INTERPROCESS | CU_EVENT_DISABLE_TIMING);
if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuEventCreate failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuEventCreate failed",
true, result);
}
result = cuIpcGetEventHandle((CUipcEventHandle *)handle, (CUevent)*event);
if (CUDA_SUCCESS != result){
orte_show_help("help-mpi-common-cuda.txt", "cuIpcGetEventHandle failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuIpcGetEventHandle failed",
true, result);
}
@ -624,7 +622,7 @@ void mca_common_cuda_destruct_event(uint64_t *event)
result = cuEventDestroy((CUevent)event);
if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuEventDestroy failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuEventDestroy failed",
true, result);
}
}
@ -645,7 +643,7 @@ void mca_common_wait_stream_synchronize(mca_mpool_common_cuda_reg_t *rget_reg)
result = cuIpcOpenEventHandle(&event, evtHandle);
if (CUDA_SUCCESS != result){
orte_show_help("help-mpi-common-cuda.txt", "cuIpcOpenEventHandle failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuIpcOpenEventHandle failed",
true, result);
}
@ -656,21 +654,21 @@ void mca_common_wait_stream_synchronize(mca_mpool_common_cuda_reg_t *rget_reg)
*/
result = cuEventRecord(event, 0);
if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuEventRecord failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuEventRecord failed",
true, result);
}
/* END of Workaround */
result = cuStreamWaitEvent(0, event, 0);
if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuStreamWaitEvent failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuStreamWaitEvent failed",
true, result);
}
/* All done with this event. */
result = cuEventDestroy(event);
if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuEventDestroy failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuEventDestroy failed",
true, result);
}
}
@ -689,7 +687,7 @@ int mca_common_cuda_memcpy(void *dst, void *src, size_t amount, char *msg,
* return an error. The error message will tell the user to try and
* run again, but with a larger array for storing events. */
if (cuda_event_ipc_num_used == cuda_event_max) {
orte_show_help("help-mpi-common-cuda.txt", "Out of cuEvent handles",
ompi_show_help("help-mpi-common-cuda.txt", "Out of cuEvent handles",
true, cuda_event_max, cuda_event_max+100, cuda_event_max+100);
return OMPI_ERR_OUT_OF_RESOURCE;
}
@ -699,7 +697,7 @@ int mca_common_cuda_memcpy(void *dst, void *src, size_t amount, char *msg,
if (OPAL_LIKELY(mca_common_cuda_async)) {
result = cuMemcpyAsync((CUdeviceptr)dst, (CUdeviceptr)src, amount, ipcStream);
if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuMemcpyAsync failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuMemcpyAsync failed",
true, dst, src, amount, result);
return OMPI_ERROR;
} else {
@ -709,7 +707,7 @@ int mca_common_cuda_memcpy(void *dst, void *src, size_t amount, char *msg,
}
result = cuEventRecord(cuda_event_ipc_array[cuda_event_ipc_first_avail], ipcStream);
if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuEventRecord failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuEventRecord failed",
true, result);
return OMPI_ERROR;
}
@ -727,7 +725,7 @@ int mca_common_cuda_memcpy(void *dst, void *src, size_t amount, char *msg,
/* Mimic the async function so they use the same memcpy call. */
result = cuMemcpyAsync((CUdeviceptr)dst, (CUdeviceptr)src, amount, ipcStream);
if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuMemcpyAsync failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuMemcpyAsync failed",
true, dst, src, amount, result);
return OMPI_ERROR;
} else {
@ -739,7 +737,7 @@ int mca_common_cuda_memcpy(void *dst, void *src, size_t amount, char *msg,
/* Record an event, then wait for it to complete with calls to cuEventQuery */
result = cuEventRecord(cuda_event_ipc_array[cuda_event_ipc_first_avail], ipcStream);
if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuEventRecord failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuEventRecord failed",
true, result);
return OMPI_ERROR;
}
@ -755,7 +753,7 @@ int mca_common_cuda_memcpy(void *dst, void *src, size_t amount, char *msg,
result = cuEventQuery(cuda_event_ipc_array[cuda_event_ipc_first_used]);
if ((CUDA_SUCCESS != result) && (CUDA_ERROR_NOT_READY != result)) {
orte_show_help("help-mpi-common-cuda.txt", "cuEventQuery failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuEventQuery failed",
true, result);
return OMPI_ERROR;
}
@ -767,7 +765,7 @@ int mca_common_cuda_memcpy(void *dst, void *src, size_t amount, char *msg,
}
result = cuEventQuery(cuda_event_ipc_array[cuda_event_ipc_first_used]);
if ((CUDA_SUCCESS != result) && (CUDA_ERROR_NOT_READY != result)) {
orte_show_help("help-mpi-common-cuda.txt", "cuEventQuery failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuEventQuery failed",
true, result);
return OMPI_ERROR;
}
@ -796,14 +794,14 @@ int mca_common_cuda_record_dtoh_event(char *msg, struct mca_btl_base_descriptor_
* return an error. The error message will tell the user to try and
* run again, but with a larger array for storing events. */
if (cuda_event_dtoh_num_used == cuda_event_max) {
orte_show_help("help-mpi-common-cuda.txt", "Out of cuEvent handles",
ompi_show_help("help-mpi-common-cuda.txt", "Out of cuEvent handles",
true, cuda_event_max, cuda_event_max+100, cuda_event_max+100);
return OMPI_ERR_OUT_OF_RESOURCE;
}
result = cuEventRecord(cuda_event_dtoh_array[cuda_event_dtoh_first_avail], dtohStream);
if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuEventRecord failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuEventRecord failed",
true, result);
return OMPI_ERROR;
}
@ -831,14 +829,14 @@ int mca_common_cuda_record_htod_event(char *msg, struct mca_btl_base_descriptor_
* return an error. The error message will tell the user to try and
* run again, but with a larger array for storing events. */
if (cuda_event_htod_num_used == cuda_event_max) {
orte_show_help("help-mpi-common-cuda.txt", "Out of cuEvent handles",
ompi_show_help("help-mpi-common-cuda.txt", "Out of cuEvent handles",
true, cuda_event_max, cuda_event_max+100, cuda_event_max+100);
return OMPI_ERR_OUT_OF_RESOURCE;
}
result = cuEventRecord(cuda_event_htod_array[cuda_event_htod_first_avail], htodStream);
if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuEventRecord failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuEventRecord failed",
true, result);
return OMPI_ERROR;
}
@ -890,7 +888,7 @@ int progress_one_cuda_ipc_event(struct mca_btl_base_descriptor_t **frag) {
*frag = NULL;
return 0;
} else if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuEventQuery failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuEventQuery failed",
true, result);
*frag = NULL;
return OMPI_ERROR;
@ -932,7 +930,7 @@ int progress_one_cuda_dtoh_event(struct mca_btl_base_descriptor_t **frag) {
*frag = NULL;
return 0;
} else if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuEventQuery failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuEventQuery failed",
true, result);
*frag = NULL;
return OMPI_ERROR;
@ -974,7 +972,7 @@ int progress_one_cuda_htod_event(struct mca_btl_base_descriptor_t **frag) {
*frag = NULL;
return 0;
} else if (CUDA_SUCCESS != result) {
orte_show_help("help-mpi-common-cuda.txt", "cuEventQuery failed",
ompi_show_help("help-mpi-common-cuda.txt", "cuEventQuery failed",
true, result);
*frag = NULL;
return OMPI_ERROR;

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше