1
1
openmpi/ompi/runtime/mpiruntime.h
Ralph Castain d396f0a6fc Per the discussion on the devel list, move the binding of processes to processors from MPI_Init to process start. This involves:
1. replacing mpi_paffinity_alone with opal_paffinity_alone - for back-compatibility, I have aliased mpi_paffinity_alone to the new param name. This caus
es a mild abstraction break in the opal/mca/paffinity framework - per the devel discussion...live with it. :-) I also moved the ompi_xxx global variable
 that tracked maffinity setup so it could be properly closed in MPI_Finalize to the opal/mca/maffinity framework to avoid an abstraction break.

2. Added code to the odls/default module to perform paffinity binding and maffinity init between process fork and exec. This has been tested on IU's odi
n cluster and works for both MPI and non-MPI apps.

3. Revise MPI_Init to detect if affinity has already been set, and to attempt to set it if not already done. I have *not* tested this as I haven't yet f
igured out a way to do so - I couldn't get slurm to perform cpu bindings, even though it supposedly does do so.

This has only been lightly tested and would definitely benefit from a wider range of evaluation...

This commit was SVN r21209.
2009-05-12 02:18:35 +00:00

128 строки
4.0 KiB
C

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2008 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
/**
* @file
*
* Interface into the MPI portion of the Open MPI Run Time Environment
*/
#ifndef OMPI_MPI_MPIRUNTIME_H
#define OMPI_MPI_MPIRUNTIME_H
#include "ompi_config.h"
#include "opal/class/opal_list.h"
#include "opal/class/opal_hash_table.h"
BEGIN_C_DECLS
/** forward type declaration */
struct ompi_communicator_t;
/** forward type declaration */
struct opal_thread_t;
/* Global variables and symbols for the MPI layer */
/** Is mpi initialized? */
OMPI_DECLSPEC extern bool ompi_mpi_initialized;
/** Has mpi been finalized? */
OMPI_DECLSPEC extern bool ompi_mpi_finalized;
/** Do we have multiple threads? */
OMPI_DECLSPEC extern bool ompi_mpi_thread_multiple;
/** Thread level requested to \c MPI_Init_thread() */
OMPI_DECLSPEC extern int ompi_mpi_thread_requested;
/** Thread level provided by Open MPI */
OMPI_DECLSPEC extern int ompi_mpi_thread_provided;
/** Identifier of the main thread */
OMPI_DECLSPEC extern struct opal_thread_t *ompi_mpi_main_thread;
/** Do we want to be warned on fork or not? */
OMPI_DECLSPEC extern bool ompi_warn_on_fork;
/** In ompi_mpi_init: a list of all memory associated with calling
MPI_REGISTER_DATAREP so that we can free it during
MPI_FINALIZE. */
OMPI_DECLSPEC extern opal_list_t ompi_registered_datareps;
/** In ompi_mpi_init: the lists of Fortran 90 mathing datatypes.
* We need these lists and hashtables in order to satisfy the new
* requirements introduced in MPI 2-1 Sect. 10.2.5,
* MPI_TYPE_CREATE_F90_xxxx, page 295, line 47.
*/
extern opal_hash_table_t ompi_mpi_f90_integer_hashtable;
extern opal_hash_table_t ompi_mpi_f90_real_hashtable;
extern opal_hash_table_t ompi_mpi_f90_complex_hashtable;
/** version string of ompi */
OMPI_DECLSPEC extern const char ompi_version_string[];
OMPI_DECLSPEC void ompi_warn_fork(void);
/**
* Initialize the Open MPI MPI environment
*
* @param argc argc, typically from main() (IN)
* @param argv argv, typically from main() (IN)
* @param requested Thread support that is requested (IN)
* @param provided Thread support that is provided (OUT)
*
* @returns MPI_SUCCESS if successful
* @returns Error code if unsuccessful
*
* Intialize all support code needed for MPI applications. This
* function should only be called by MPI applications (including
* singletons). If this function is called, ompi_init() and
* ompi_rte_init() should *not* be called.
*
* It is permissable to pass in (0, NULL) for (argc, argv).
*/
int ompi_mpi_init(int argc, char **argv, int requested, int *provided);
/**
* Finalize the Open MPI MPI environment
*
* @returns MPI_SUCCESS if successful
* @returns Error code if unsuccessful
*
* Should be called after all MPI functionality is complete (usually
* during MPI_FINALIZE).
*/
int ompi_mpi_finalize(void);
/**
* Abort the processes of comm
*/
OMPI_DECLSPEC int ompi_mpi_abort(struct ompi_communicator_t* comm,
int errcode, bool kill_remote_of_intercomm);
/**
* Do a preconnect of MPI connections (i.e., force connections to
* be made if they will be made).
*/
int ompi_init_preconnect_mpi(void);
END_C_DECLS
#endif /* OMPI_MPI_MPIRUNTIME_H */