1
1
openmpi/ompi/runtime/ompi_mpi_abort.c
Ralph Castain 552c9ca5a0 George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-)
WHAT:    Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL

All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies.  This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP.  Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose.  UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs.  A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic.

This commit was SVN r32317.
2014-07-26 00:47:28 +00:00

216 строки
7.0 KiB
C

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2014 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2014 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2010-2011 Oak Ridge National Labs. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
#ifdef HAVE_NETDB_H
#include <netdb.h>
#endif
#include <errno.h>
#include "opal/mca/backtrace/backtrace.h"
#include "ompi/communicator/communicator.h"
#include "ompi/runtime/mpiruntime.h"
#include "ompi/runtime/params.h"
#include "ompi/debuggers/debuggers.h"
#include "ompi/errhandler/errcode.h"
static bool have_been_invoked = false;
/*
* Local helper function to build an array of all the procs in a
* communicator, excluding this process.
*
* Killing a just the indicated peers must be implemented for
* MPI_Abort() to work according to the standard language for
* a 'high-quality' implementation.
*
* It would be nifty if we could differentiate between the
* abort scenarios (but we don't, currently):
* - MPI_Abort()
* - MPI_ERRORS_ARE_FATAL
* - Victim of MPI_Abort()
*/
static void try_kill_peers(ompi_communicator_t *comm,
int errcode)
{
int nprocs;
ompi_process_name_t *procs;
nprocs = ompi_comm_size(comm);
/* ompi_comm_remote_size() returns 0 if not an intercomm, so
this is safe */
nprocs += ompi_comm_remote_size(comm);
procs = (ompi_process_name_t*) calloc(nprocs, sizeof(ompi_process_name_t));
if (NULL == procs) {
/* quick clean orte and get out */
ompi_rte_abort(errno, "Abort: unable to alloc memory to kill procs");
}
/* put all the local group procs in the abort list */
int rank, i, count;
rank = ompi_comm_rank(comm);
for (count = i = 0; i < ompi_comm_size(comm); ++i) {
if (rank == i) {
/* Don't include this process in the array */
--nprocs;
} else {
assert(count <= nprocs);
procs[count++] =
*OMPI_CAST_ORTE_NAME(ompi_group_get_proc_ptr(comm->c_remote_group, i)->super.proc_name);
}
}
/* if requested, kill off remote group procs too */
for (i = 0; i < ompi_comm_remote_size(comm); ++i) {
assert(count <= nprocs);
procs[count++] =
*OMPI_CAST_ORTE_NAME(ompi_group_get_proc_ptr(comm->c_remote_group, i)->super.proc_name);
}
if (nprocs > 0) {
ompi_rte_abort_peers(procs, nprocs, errcode);
}
/* We could fall through here if ompi_rte_abort_peers() fails, or
if (nprocs == 0). Either way, tidy up and let the caller
handle it. */
free(procs);
}
int
ompi_mpi_abort(struct ompi_communicator_t* comm,
int errcode)
{
char *msg, *host, hostname[MAXHOSTNAMELEN];
pid_t pid = 0;
/* Protection for recursive invocation */
if (have_been_invoked) {
return OMPI_SUCCESS;
}
have_been_invoked = true;
/* If MPI is initialized, we know we have a runtime nodename, so
use that. Otherwise, call gethostname. */
if (ompi_rte_initialized) {
host = ompi_process_info.nodename;
} else {
gethostname(hostname, sizeof(hostname));
host = hostname;
}
pid = getpid();
/* Should we print a stack trace? Not aggregated because they
might be different on all processes. */
if (ompi_mpi_abort_print_stack) {
char **messages;
int len, i;
if (OMPI_SUCCESS == opal_backtrace_buffer(&messages, &len)) {
for (i = 0; i < len; ++i) {
fprintf(stderr, "[%s:%d] [%d] func:%s\n", host, (int) pid,
i, messages[i]);
fflush(stderr);
}
free(messages);
} else {
/* This will print an message if it's unable to print the
backtrace, so we don't need an additional "else" clause
if opal_backtrace_print() is not supported. */
opal_backtrace_print(stderr, NULL, 1);
}
}
/* Notify the debugger that we're about to abort */
if (errcode < 0 ||
asprintf(&msg, "[%s:%d] aborting with MPI error %s%s",
host, (int) pid, ompi_mpi_errnum_get_string(errcode),
ompi_mpi_abort_print_stack ?
" (stack trace available on stderr)" : "") < 0) {
msg = NULL;
}
ompi_debugger_notify_abort(msg);
if (NULL != msg) {
free(msg);
}
/* Should we wait for a while before aborting? */
if (0 != ompi_mpi_abort_delay) {
if (ompi_mpi_abort_delay < 0) {
fprintf(stderr ,"[%s:%d] Looping forever (MCA parameter mpi_abort_delay is < 0)\n",
host, (int) pid);
fflush(stderr);
while (1) {
sleep(5);
}
} else {
fprintf(stderr, "[%s:%d] Delaying for %d seconds before aborting\n",
host, (int) pid, ompi_mpi_abort_delay);
do {
sleep(1);
} while (--ompi_mpi_abort_delay > 0);
}
}
/* If the RTE isn't setup yet/any more, then don't even try
killing everyone. Sorry, Charlie... */
if (!ompi_rte_initialized) {
fprintf(stderr, "[%s:%d] Local abort %s completed successfully, but am not able to aggregate error messages, and not able to guarantee that all other processes were killed!\n",
host, (int) pid, ompi_mpi_finalized ?
"after MPI_FINALIZE started" : "before MPI_INIT completed");
exit(errcode == 0 ? 1 : errcode);
}
/* If OMPI is initialized and we have a non-NULL communicator,
then try to kill just that set of processes */
if (ompi_mpi_initialized && !ompi_mpi_finalized && NULL != comm) {
try_kill_peers(comm, errcode);
}
/* We can fall through to here in a few cases:
1. The attempt to kill just a subset of peers via
try_kill_peers() failed (e.g., as of July 2014, ORTE does
returns NOT_IMPLENTED from orte_rte_abort_peers()).
2. MPI wasn't initialized, was already finalized, or we got a
NULL communicator.
In all of these cases, the only sensible thing left to do is to
kill the entire job. Wah wah. */
ompi_rte_abort(errcode, NULL);
/* Does not return */
}