1
1
openmpi/ompi/runtime/ompi_mpi_abort.c
Jeff Squyres 852af8b834 ompi_mpi_abort: fix corner cases, simplify logic
I recently found a case where ompi_mpi_abort() segv's:

{{{
$ mpirun --mca btl non_existent_btl_name ...
}}}

In this case, the BML init fails because we have no paths to any
peers.  It calls ompi_mpi_abort(), but this is before ompi_comm_self
has been setup.  ompi_mpi_abort() assumes that if the comm parameter
is != NULL, it can be used.  But since we aborted so early in
MPI_INIT, that's a false assumption.

(note that this isn't happening on v1.8 because the check for
INIT/FINALIZE in ompi_mpi_abort() is a little different.  Hence: this
is a trunk issue -- at least for now)

When fixing this problem, I noticed a few other problems in ompi_mpi_abort():

* the group access was incorrect (it didn't use accessor functions)
* it wasn't clear that ORTE's ompi_rte_abort_peers() returns
  NOT_IMPLEMENTED and falls through down to ompi_rte_abort()
* the check for my proc in the communicator was a little more
  complicated than necessary
* the logic for checking for aborts early in MPI_INIT wasn't right
* some comments were stale
* the hostname output in error messages would be NULL if MPI_FINALIZE
  had been invoked
* it was possible to abort, but still exit with a 0 status

This commit fixes all of the above problems, and makes the logic a
little more straightforward.  Thanks to Ralph Castain and George
Bosilca for the assists with this patch.

This commit was SVN r32125.
2014-07-03 02:38:27 +00:00

216 строки
6.9 KiB
C

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2014 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2014 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2010-2011 Oak Ridge National Labs. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
#ifdef HAVE_NETDB_H
#include <netdb.h>
#endif
#include <errno.h>
#include "opal/mca/backtrace/backtrace.h"
#include "ompi/communicator/communicator.h"
#include "ompi/runtime/mpiruntime.h"
#include "ompi/runtime/params.h"
#include "ompi/debuggers/debuggers.h"
#include "ompi/errhandler/errcode.h"
static bool have_been_invoked = false;
/*
* Local helper function to build an array of all the procs in a
* communicator, excluding this process.
*
* Killing a just the indicated peers must be implemented for
* MPI_Abort() to work according to the standard language for
* a 'high-quality' implementation.
*
* It would be nifty if we could differentiate between the
* abort scenarios (but we don't, currently):
* - MPI_Abort()
* - MPI_ERRORS_ARE_FATAL
* - Victim of MPI_Abort()
*/
static void try_kill_peers(ompi_communicator_t *comm,
int errcode)
{
int nprocs;
ompi_process_name_t *procs;
nprocs = ompi_comm_size(comm);
/* ompi_comm_remote_size() returns 0 if not an intercomm, so
this is safe */
nprocs += ompi_comm_remote_size(comm);
procs = (ompi_process_name_t*) calloc(nprocs, sizeof(ompi_process_name_t));
if (NULL == procs) {
/* quick clean orte and get out */
ompi_rte_abort(errno, "Abort: unable to alloc memory to kill procs");
}
/* put all the local group procs in the abort list */
int rank, i, count;
rank = ompi_comm_rank(comm);
for (count = i = 0; i < ompi_comm_size(comm); ++i) {
if (rank == i) {
/* Don't include this process in the array */
--nprocs;
} else {
assert(count <= nprocs);
procs[count++] =
ompi_group_get_proc_ptr(comm->c_remote_group, i)->proc_name;
}
}
/* if requested, kill off remote group procs too */
for (i = 0; i < ompi_comm_remote_size(comm); ++i) {
assert(count <= nprocs);
procs[count++] =
ompi_group_get_proc_ptr(comm->c_remote_group, i)->proc_name;
}
if (nprocs > 0) {
ompi_rte_abort_peers(procs, nprocs, errcode);
}
/* We could fall through here if ompi_rte_abort_peers() fails, or
if (nprocs == 0). Either way, tidy up and let the caller
handle it. */
free(procs);
}
int
ompi_mpi_abort(struct ompi_communicator_t* comm,
int errcode)
{
char *msg, *host, hostname[MAXHOSTNAMELEN];
pid_t pid = 0;
/* Protection for recursive invocation */
if (have_been_invoked) {
return OMPI_SUCCESS;
}
have_been_invoked = true;
/* If MPI is initialized, we know we have a runtime nodename, so
use that. Otherwise, call gethostname. */
if (ompi_rte_initialized) {
host = ompi_process_info.nodename;
} else {
gethostname(hostname, sizeof(hostname));
host = hostname;
}
pid = getpid();
/* Should we print a stack trace? Not aggregated because they
might be different on all processes. */
if (ompi_mpi_abort_print_stack) {
char **messages;
int len, i;
if (OMPI_SUCCESS == opal_backtrace_buffer(&messages, &len)) {
for (i = 0; i < len; ++i) {
fprintf(stderr, "[%s:%d] [%d] func:%s\n", host, (int) pid,
i, messages[i]);
fflush(stderr);
}
free(messages);
} else {
/* This will print an message if it's unable to print the
backtrace, so we don't need an additional "else" clause
if opal_backtrace_print() is not supported. */
opal_backtrace_print(stderr, NULL, 1);
}
}
/* Notify the debugger that we're about to abort */
if (errcode < 0 ||
asprintf(&msg, "[%s:%d] aborting with MPI error %s%s",
host, (int) pid, ompi_mpi_errnum_get_string(errcode),
ompi_mpi_abort_print_stack ?
" (stack trace available on stderr)" : "") < 0) {
msg = NULL;
}
ompi_debugger_notify_abort(msg);
if (NULL != msg) {
free(msg);
}
/* Should we wait for a while before aborting? */
if (0 != ompi_mpi_abort_delay) {
if (ompi_mpi_abort_delay < 0) {
fprintf(stderr ,"[%s:%d] Looping forever (MCA parameter mpi_abort_delay is < 0)\n",
host, (int) pid);
fflush(stderr);
while (1) {
sleep(5);
}
} else {
fprintf(stderr, "[%s:%d] Delaying for %d seconds before aborting\n",
host, (int) pid, ompi_mpi_abort_delay);
do {
sleep(1);
} while (--ompi_mpi_abort_delay > 0);
}
}
/* If the RTE isn't setup yet/any more, then don't even try
killing everyone. Sorry, Charlie... */
if (!ompi_rte_initialized) {
fprintf(stderr, "[%s:%d] Local abort %s completed successfully, but am not able to aggregate error messages, and not able to guarantee that all other processes were killed!\n",
host, (int) pid, ompi_mpi_finalized ?
"after MPI_FINALIZE started" : "before MPI_INIT completed");
exit(errcode == 0 ? 1 : errcode);
}
/* If OMPI is initialized and we have a non-NULL communicator,
then try to kill just that set of processes */
if (ompi_mpi_initialized && !ompi_mpi_finalized && NULL != comm) {
try_kill_peers(comm, errcode);
}
/* We can fall through to here in a few cases:
1. The attempt to kill just a subset of peers via
try_kill_peers() failed (e.g., as of July 2014, ORTE does
returns NOT_IMPLENTED from orte_rte_abort_peers()).
2. MPI wasn't initialized, was already finalized, or we got a
NULL communicator.
In all of these cases, the only sensible thing left to do is to
kill the entire job. Wah wah. */
ompi_rte_abort(errcode, NULL);
/* Does not return */
}