583bf425c0
Short version: remove opal_paffinity_alone and restore mpi_paffinity_alone. ORTE makes various information available for the MPI layer to decide what it wants to do in terms of processor affinity. Details: * remove opal_paffinity_alone MCA param; restore mpi_paffinity_alone MCA param * move opal_paffinity_slot_list param registration to paffinity base * ompi_mpi_init() calls opal_paffinity_base_slot_list_set(); if that succeeds use that. If no slot list was set, see if mpi_paffinity_alone was set. If so, bind this process to its Node Local Rank (NLR). The NLR is the ORTE-maintained slot ID; if you COMM_SPAWN to a host in this ORTE universe that already has procs on it, the NLR for the new job will start at N (not 0). So this is slightly better than mpi_paffinity_alone in the v1.2 series. * If a slot list is specified *and* mpi_paffinity_alone is set, we display an error and abort. * Remove calls from rmaps/rank_file component to register and lookup opal_paffinity mca params. * Remove code in orte/odls that set affinities - instead, have them just pass a slot_list if it exists. * Cleanup the orte/odls code that determined oversubscribed/want_processor as these were just opposites of each other. This commit was SVN r18874. The following Trac tickets were found above: Ticket 1383 --> https://svn.open-mpi.org/trac/ompi/ticket/1383
169 строки
5.4 KiB
C
169 строки
5.4 KiB
C
/*
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
* University Research and Technology
|
|
* Corporation. All rights reserved.
|
|
* Copyright (c) 2004-2005 The University of Tennessee and The University
|
|
* of Tennessee Research Foundation. All rights
|
|
* reserved.
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
* University of Stuttgart. All rights reserved.
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
* All rights reserved.
|
|
* Copyright (c) 2007 Los Alamos National Security, LLC. All rights
|
|
* reserved.
|
|
* Copyright (c) 2006-2008 Cisco Systems, Inc. All rights reserved.
|
|
* $COPYRIGHT$
|
|
*
|
|
* Additional copyrights may follow
|
|
*
|
|
* $HEADER$
|
|
*/
|
|
|
|
#ifndef OMPI_RUNTIME_PARAMS_H
|
|
#define OMPI_RUNTIME_PARAMS_H
|
|
|
|
BEGIN_C_DECLS
|
|
|
|
/*
|
|
* Global variables
|
|
*/
|
|
|
|
/**
|
|
* Whether or not to check the parameters of top-level MPI API
|
|
* functions or not.
|
|
*
|
|
* This variable should never be checked directly; the macro
|
|
* MPI_PARAM_CHECK should be used instead. This allows multiple
|
|
* levels of MPI function parameter checking:
|
|
*
|
|
* #- Disable all parameter checking at configure/compile time
|
|
* #- Enable all parameter checking at configure/compile time
|
|
* #- Disable all parameter checking at run time
|
|
* #- Enable all parameter checking at run time
|
|
*
|
|
* Hence, the MPI_PARAM_CHECK macro will either be "0", "1", or
|
|
* "ompi_mpi_param_check".
|
|
*/
|
|
OMPI_DECLSPEC extern bool ompi_mpi_param_check;
|
|
|
|
/**
|
|
* Whether or not to check for MPI handle leaks during MPI_FINALIZE.
|
|
* If enabled, each MPI handle type will display a summary of the
|
|
* handles that are still allocated during MPI_FINALIZE.
|
|
*
|
|
* This is good debugging for user applications to find out if they
|
|
* are inadvertantly orphaning MPI handles.
|
|
*/
|
|
OMPI_DECLSPEC extern bool ompi_debug_show_handle_leaks;
|
|
|
|
/**
|
|
* If > 0, show that many MPI_ALLOC_MEM leaks during MPI_FINALIZE. If
|
|
* enabled, memory that was returned via MPI_ALLOC_MEM but was never
|
|
* freed via MPI_FREE_MEM will be displayed during MPI_FINALIZE.
|
|
*
|
|
* This is good debugging for user applications to find out if they
|
|
* are inadvertantly orphaning MPI "special" memory.
|
|
*/
|
|
OMPI_DECLSPEC extern int ompi_debug_show_mpi_alloc_mem_leaks;
|
|
|
|
/**
|
|
* Whether or not to actually free MPI handles when their
|
|
* corresponding destructor is invoked. If enabled, Open MPI will not
|
|
* free handles, but will rather simply mark them as "freed". Any
|
|
* attempt to use them will result in an MPI exception.
|
|
*
|
|
* This is good debugging for user applications to find out if they
|
|
* are inadvertantly using MPI handles after they have been freed.
|
|
*/
|
|
OMPI_DECLSPEC extern bool ompi_debug_no_free_handles;
|
|
|
|
/**
|
|
* Whether or not to print MCA parameters on MPI_INIT
|
|
*
|
|
* This is good debugging for user applications to see exactly which
|
|
* MCA parameters are being used in the current program execution.
|
|
*/
|
|
OMPI_DECLSPEC extern bool ompi_mpi_show_mca_params;
|
|
|
|
/**
|
|
* Whether or not to print the MCA parameters to a file or to stdout
|
|
*
|
|
* If this argument is set then it is used when parameters are dumped
|
|
* when the mpi_show_mca_params is set.
|
|
*/
|
|
OMPI_DECLSPEC extern char * ompi_mpi_show_mca_params_file;
|
|
|
|
/**
|
|
* If this value is true, assume that this ORTE job is the only job
|
|
* running on the nodes that have been allocated to it, and bind
|
|
* processes to the processor ID corresponding to their node local
|
|
* rank (if you COMM_SPAWN on to empty processors on the same node,
|
|
* the NLR will start at N, not 0).
|
|
*/
|
|
OMPI_DECLSPEC extern bool ompi_mpi_paffinity_alone;
|
|
|
|
/**
|
|
* Whether we should keep the string hostnames of all the MPI
|
|
* process peers around or not (eats up a good bit of memory).
|
|
*/
|
|
OMPI_DECLSPEC extern bool ompi_mpi_keep_peer_hostnames;
|
|
|
|
/**
|
|
* Whether an MPI_ABORT should print out a stack trace or not.
|
|
*/
|
|
OMPI_DECLSPEC extern bool ompi_mpi_abort_print_stack;
|
|
|
|
/**
|
|
* Whether MPI_ABORT should print out an identifying message
|
|
* (e.g., hostname and PID) and loop waiting for a debugger to
|
|
* attach. The value of the integer is how many seconds to wait:
|
|
*
|
|
* 0 = do not print the message and do not loop
|
|
* negative value = print the message and loop forever
|
|
* positive value = print the message and delay for that many seconds
|
|
*/
|
|
OMPI_DECLSPEC extern int ompi_mpi_abort_delay;
|
|
|
|
/**
|
|
* Whether to use the "leave pinned" protocol or not.
|
|
*/
|
|
OMPI_DECLSPEC extern bool ompi_mpi_leave_pinned;
|
|
|
|
/**
|
|
* Whether to use the "leave pinned pipeline" protocol or not.
|
|
*/
|
|
OMPI_DECLSPEC extern bool ompi_mpi_leave_pinned_pipeline;
|
|
|
|
/**
|
|
* Whether sparse MPI group storage formats are supported or not.
|
|
*/
|
|
OMPI_DECLSPEC extern bool ompi_have_sparse_group_storage;
|
|
|
|
/**
|
|
* Whether sparse MPI group storage formats should be used or not.
|
|
*/
|
|
OMPI_DECLSPEC extern bool ompi_use_sparse_group_storage;
|
|
|
|
/**
|
|
* Register MCA parameters used by the MPI layer.
|
|
*
|
|
* @returns OMPI_SUCCESS
|
|
*
|
|
* Registers several MCA parameters and initializes corresponding
|
|
* global variables to the values obtained from the MCA system.
|
|
*/
|
|
OMPI_DECLSPEC int ompi_mpi_register_params(void);
|
|
|
|
/**
|
|
* Display all MCA parameters used
|
|
*
|
|
* @returns OMPI_SUCCESS
|
|
*
|
|
* Displays in key = value format
|
|
*/
|
|
int ompi_show_all_mca_params(int32_t, int, char *);
|
|
|
|
END_C_DECLS
|
|
|
|
#endif /* OMPI_RUNTIME_PARAMS_H */
|