1
1
openmpi/ompi/runtime/params.h
Ralph Castain 16c5b30a1f Since the calls to "PMI get" scale by number of procs (not nodes), it makes more sense to have the MCA param be the cutoff based on number of procs. Also, it occurred to me that this shouldn't impact the nidmap process as that is built and circulated when we launch via mpirun, not during direct launch.
So shift the cutoff param to the MPI layer, and have it solely determine whether or not we call modex_recv on the hostname. If comm_world is of size greater than the cutoff, then we don't automatically retrieve the hostname when we build the ompi_proc_t for a process - instead, we fill the hostname entry on first call to modex_recv for that process.

The param is now "ompi_hostname_cutoff=N", where N=number of procs for cutoff.

Refs trac:3729

This commit was SVN r29056.

The following Trac tickets were found above:
  Ticket 3729 --> https://svn.open-mpi.org/trac/ompi/ticket/3729
2013-08-22 03:40:26 +00:00

169 строки
5.2 KiB
C

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2006-2009 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 NVIDIA Corporation. All rights reserved.
* Copyright (c) 2013 Intel, Inc. All rights reserved
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef OMPI_RUNTIME_PARAMS_H
#define OMPI_RUNTIME_PARAMS_H
#include "ompi_config.h"
BEGIN_C_DECLS
/*
* Global variables
*/
/**
* Whether or not to check the parameters of top-level MPI API
* functions or not.
*
* This variable should never be checked directly; the macro
* MPI_PARAM_CHECK should be used instead. This allows multiple
* levels of MPI function parameter checking:
*
* #- Disable all parameter checking at configure/compile time
* #- Enable all parameter checking at configure/compile time
* #- Disable all parameter checking at run time
* #- Enable all parameter checking at run time
*
* Hence, the MPI_PARAM_CHECK macro will either be "0", "1", or
* "ompi_mpi_param_check".
*/
OMPI_DECLSPEC extern bool ompi_mpi_param_check;
/**
* Whether or not to check for MPI handle leaks during MPI_FINALIZE.
* If enabled, each MPI handle type will display a summary of the
* handles that are still allocated during MPI_FINALIZE.
*
* This is good debugging for user applications to find out if they
* are inadvertantly orphaning MPI handles.
*/
OMPI_DECLSPEC extern bool ompi_debug_show_handle_leaks;
/**
* If > 0, show that many MPI_ALLOC_MEM leaks during MPI_FINALIZE. If
* enabled, memory that was returned via MPI_ALLOC_MEM but was never
* freed via MPI_FREE_MEM will be displayed during MPI_FINALIZE.
*
* This is good debugging for user applications to find out if they
* are inadvertantly orphaning MPI "special" memory.
*/
OMPI_DECLSPEC extern int ompi_debug_show_mpi_alloc_mem_leaks;
/**
* Whether or not to actually free MPI handles when their
* corresponding destructor is invoked. If enabled, Open MPI will not
* free handles, but will rather simply mark them as "freed". Any
* attempt to use them will result in an MPI exception.
*
* This is good debugging for user applications to find out if they
* are inadvertantly using MPI handles after they have been freed.
*/
OMPI_DECLSPEC extern bool ompi_debug_no_free_handles;
/**
* Whether or not to print MCA parameters on MPI_INIT
*
* This is good debugging for user applications to see exactly which
* MCA parameters are being used in the current program execution.
*/
OMPI_DECLSPEC extern bool ompi_mpi_show_mca_params;
/**
* Whether or not to print the MCA parameters to a file or to stdout
*
* If this argument is set then it is used when parameters are dumped
* when the mpi_show_mca_params is set.
*/
OMPI_DECLSPEC extern char * ompi_mpi_show_mca_params_file;
/**
* Whether an MPI_ABORT should print out a stack trace or not.
*/
OMPI_DECLSPEC extern bool ompi_mpi_abort_print_stack;
/**
* Whether MPI_ABORT should print out an identifying message
* (e.g., hostname and PID) and loop waiting for a debugger to
* attach. The value of the integer is how many seconds to wait:
*
* 0 = do not print the message and do not loop
* negative value = print the message and loop forever
* positive value = print the message and delay for that many seconds
*/
OMPI_DECLSPEC extern int ompi_mpi_abort_delay;
/**
* Whether to use the "leave pinned" protocol or not (0 = no, 1 = yes,
* -1 = determine at runtime).
*/
OMPI_DECLSPEC extern int ompi_mpi_leave_pinned;
/**
* Whether to use the "leave pinned pipeline" protocol or not.
*/
OMPI_DECLSPEC extern bool ompi_mpi_leave_pinned_pipeline;
/**
* Whether sparse MPI group storage formats are supported or not.
*/
OMPI_DECLSPEC extern bool ompi_have_sparse_group_storage;
/**
* Whether sparse MPI group storage formats should be used or not.
*/
OMPI_DECLSPEC extern bool ompi_use_sparse_group_storage;
/**
* Whether we want to enable CUDA GPU buffer send and receive support.
*/
OMPI_DECLSPEC extern bool ompi_mpi_cuda_support;
/*
* Cutoff point for retrieving hostnames
*/
OMPI_DECLSPEC extern uint32_t ompi_hostname_cutoff;
/**
* Register MCA parameters used by the MPI layer.
*
* @returns OMPI_SUCCESS
*
* Registers several MCA parameters and initializes corresponding
* global variables to the values obtained from the MCA system.
*/
OMPI_DECLSPEC int ompi_mpi_register_params(void);
/**
* Display all MCA parameters used
*
* @returns OMPI_SUCCESS
*
* Displays in key = value format
*/
int ompi_show_all_mca_params(int32_t, int, char *);
END_C_DECLS
#endif /* OMPI_RUNTIME_PARAMS_H */