1
1
openmpi/opal/runtime/opal_init.c

444 строки
13 KiB
C
Исходник Обычный вид История

/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007-2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007 Sun Microsystems, Inc. All rights reserved.
- Split the datatype engine into two parts: an MPI specific part in OMPI and a language agnostic part in OPAL. The convertor is completely moved into OPAL. This offers several benefits as described in RFC http://www.open-mpi.org/community/lists/devel/2009/07/6387.php namely: - Fewer basic types (int* and float* types, boolean and wchar - Fixing naming scheme to ompi-nomenclature. - Usability outside of the ompi-layer. - Due to the fixed nature of simple opal types, their information is completely known at compile time and therefore constified - With fewer datatypes (22), the actual sizes of bit-field types may be reduced from 64 to 32 bits, allowing reorganizing the opal_datatype structure, eliminating holes and keeping data required in convertor (upon send/recv) in one cacheline... This has implications to the convertor-datastructure and other parts of the code. - Several performance tests have been run, the netpipe latency does not change with this patch on Linux/x86-64 on the smoky cluster. - Extensive tests have been done to verify correctness (no new regressions) using: 1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and ompi-ddt: a. running both trunk and ompi-ddt resulted in no differences (except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run correctly). b. with --enable-memchecker and running under valgrind (one buglet when run with static found in test-suite, commited) 2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt: all passed (except for the dynamic/ tests failed!! as trunk/MTT) 3. compilation and usage of HDF5 tests on Jaguar using PGI and PathScale compilers. 4. compilation and usage on Scicortex. - Please note, that for the heterogeneous case, (-m32 compiled binaries/ompi), neither ompi-trunk, nor ompi-ddt branch would successfully launch. This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
* Copyright (c) 2009 Oak Ridge National Labs. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
/** @file **/
#include "opal_config.h"
#include "opal/util/malloc.h"
#include "opal/util/output.h"
#include "opal/util/trace.h"
#include "opal/util/show_help.h"
#include "opal/memoryhooks/memory.h"
#include "opal/mca/base/base.h"
#include "opal/runtime/opal.h"
#include "opal/util/net.h"
- Split the datatype engine into two parts: an MPI specific part in OMPI and a language agnostic part in OPAL. The convertor is completely moved into OPAL. This offers several benefits as described in RFC http://www.open-mpi.org/community/lists/devel/2009/07/6387.php namely: - Fewer basic types (int* and float* types, boolean and wchar - Fixing naming scheme to ompi-nomenclature. - Usability outside of the ompi-layer. - Due to the fixed nature of simple opal types, their information is completely known at compile time and therefore constified - With fewer datatypes (22), the actual sizes of bit-field types may be reduced from 64 to 32 bits, allowing reorganizing the opal_datatype structure, eliminating holes and keeping data required in convertor (upon send/recv) in one cacheline... This has implications to the convertor-datastructure and other parts of the code. - Several performance tests have been run, the netpipe latency does not change with this patch on Linux/x86-64 on the smoky cluster. - Extensive tests have been done to verify correctness (no new regressions) using: 1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and ompi-ddt: a. running both trunk and ompi-ddt resulted in no differences (except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run correctly). b. with --enable-memchecker and running under valgrind (one buglet when run with static found in test-suite, commited) 2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt: all passed (except for the dynamic/ tests failed!! as trunk/MTT) 3. compilation and usage of HDF5 tests on Jaguar using PGI and PathScale compilers. 4. compilation and usage on Scicortex. - Please note, that for the heterogeneous case, (-m32 compiled binaries/ompi), neither ompi-trunk, nor ompi-ddt branch would successfully launch. This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
#include "opal/datatype/opal_datatype.h"
#include "opal/mca/installdirs/base/base.h"
#include "opal/mca/memory/base/base.h"
#include "opal/mca/memcpy/base/base.h"
#include "opal/mca/paffinity/base/base.h"
#include "opal/mca/timer/base/base.h"
#include "opal/mca/memchecker/base/base.h"
#include "opal/dss/dss.h"
#include "opal/mca/carto/base/base.h"
#include "opal/runtime/opal_cr.h"
#include "opal/mca/crs/base/base.h"
#include "opal/runtime/opal_progress.h"
#include "opal/event/event.h"
#include "opal/mca/backtrace/base/base.h"
#include "opal/constants.h"
#include "opal/util/error.h"
#include "opal/util/stacktrace.h"
#include "opal/util/keyval_parse.h"
#include "opal/util/sys_limits.h"
#include "opal/util/opal_sos.h"
#if OPAL_CC_USE_PRAGMA_IDENT
#pragma ident OPAL_IDENT_STRING
#elif OPAL_CC_USE_IDENT
#ident OPAL_IDENT_STRING
#endif
const char opal_version_string[] = OPAL_IDENT_STRING;
int opal_initialized = 0;
int opal_util_initialized = 0;
Enable modex-less launch. Consists of: 1. minor modification to include two new opal MCA params: (a) opal_profile: outputs what components were selected by each framework currently enabled for most, but not all, frameworks (b) opal_profile_file: name of file that contains profile info required for modex 2. introduction of two new tools: (a) ompi-probe: MPI process that simply calls MPI_Init/Finalize with opal_profile set. Also reports back the rml IP address for all interfaces on the node (b) ompi-profiler: uses ompi-probe to create the profile_file, also reports out a summary of what framework components are actually being used to help with configuration options 3. modification of the grpcomm basic component to utilize the profile file in place of the modex where possible 4. modification of orterun so it properly sees opal mca params and handles opal_profile correctly to ensure we don't get its profile 5. similar mod to orted as for orterun 6. addition of new test that calls orte_init followed by calls to grpcomm.barrier This is all completely benign unless actively selected. At the moment, it only supports modex-less launch for openib-based systems. Minor mod to the TCP btl would be required to enable it as well, if people are interested. Similarly, anyone interested in enabling other BTL's for modex-less operation should let me know and I'll give you the magic details. This seems to significantly improve scalability provided the file can be locally located on the nodes. I'm looking at an alternative means of disseminating the info (perhaps in launch message) as an option for removing that constraint. This commit was SVN r20098.
2008-12-10 02:49:02 +03:00
bool opal_profile = false;
char *opal_profile_file = NULL;
static const char *
opal_err2str(int errnum)
{
const char *retval;
switch (OPAL_SOS_GET_ERROR_CODE(errnum)) {
case OPAL_SUCCESS:
retval = "Success";
break;
case OPAL_ERROR:
retval = "Error";
break;
case OPAL_ERR_OUT_OF_RESOURCE:
retval = "Out of resource";
break;
case OPAL_ERR_TEMP_OUT_OF_RESOURCE:
retval = "Temporarily out of resource";
break;
case OPAL_ERR_RESOURCE_BUSY:
retval = "Resource busy";
break;
case OPAL_ERR_BAD_PARAM:
retval = "Bad parameter";
break;
case OPAL_ERR_FATAL:
retval = "Fatal";
break;
case OPAL_ERR_NOT_IMPLEMENTED:
retval = "Not implemented";
break;
case OPAL_ERR_NOT_SUPPORTED:
retval = "Not supported";
break;
case OPAL_ERR_INTERUPTED:
retval = "Interupted";
break;
case OPAL_ERR_WOULD_BLOCK:
retval = "Would block";
break;
case OPAL_ERR_IN_ERRNO:
retval = "In errno";
break;
case OPAL_ERR_UNREACH:
retval = "Unreachable";
break;
case OPAL_ERR_NOT_FOUND:
retval = "Not found";
break;
case OPAL_EXISTS:
retval = "Exists";
break;
case OPAL_ERR_TIMEOUT:
retval = "Timeout";
break;
case OPAL_ERR_NOT_AVAILABLE:
retval = "Not available";
break;
case OPAL_ERR_PERM:
retval = "No permission";
break;
case OPAL_ERR_VALUE_OUT_OF_BOUNDS:
retval = "Value out of bounds";
break;
case OPAL_ERR_FILE_READ_FAILURE:
retval = "File read failure";
break;
case OPAL_ERR_FILE_WRITE_FAILURE:
retval = "File write failure";
break;
case OPAL_ERR_FILE_OPEN_FAILURE:
retval = "File open failure";
break;
case OPAL_ERR_PACK_MISMATCH:
retval = "Pack data mismatch";
break;
case OPAL_ERR_PACK_FAILURE:
retval = "Data pack failed";
break;
case OPAL_ERR_UNPACK_FAILURE:
retval = "Data unpack failed";
break;
case OPAL_ERR_UNPACK_INADEQUATE_SPACE:
retval = "Data unpack had inadequate space";
break;
case OPAL_ERR_UNPACK_READ_PAST_END_OF_BUFFER:
retval = "Data unpack would read past end of buffer";
break;
case OPAL_ERR_OPERATION_UNSUPPORTED:
retval = "Requested operation is not supported on referenced data type";
break;
case OPAL_ERR_UNKNOWN_DATA_TYPE:
retval = "Unknown data type";
break;
case OPAL_ERR_BUFFER:
retval = "Buffer type (described vs non-described) mismatch - operation not allowed";
break;
case OPAL_ERR_DATA_TYPE_REDEF:
retval = "Attempt to redefine an existing data type";
break;
case OPAL_ERR_DATA_OVERWRITE_ATTEMPT:
retval = "Attempt to overwrite a data value";
break;
case OPAL_ERR_MODULE_NOT_FOUND:
retval = "Framework requires at least one active module, but none found";
break;
case OPAL_ERR_TOPO_SLOT_LIST_NOT_SUPPORTED:
retval = "OS topology does not support slot_list process affinity";
break;
case OPAL_ERR_TOPO_SOCKET_NOT_SUPPORTED:
retval = "Could not obtain socket topology information";
break;
case OPAL_ERR_TOPO_CORE_NOT_SUPPORTED:
retval = "Could not obtain core topology information";
break;
case OPAL_ERR_NOT_ENOUGH_SOCKETS:
retval = "Not enough sockets to meet request";
break;
case OPAL_ERR_NOT_ENOUGH_CORES:
retval = "Not enough cores to meet request";
break;
case OPAL_ERR_INVALID_PHYS_CPU:
retval = "Invalid physical cpu number returned";
break;
case OPAL_ERR_MULTIPLE_AFFINITIES:
retval = "Multiple methods for assigning process affinity were specified";
break;
case OPAL_ERR_SLOT_LIST_RANGE:
retval = "Provided slot_list range is invalid";
break;
case OPAL_ERR_PAFFINITY_NOT_SUPPORTED:
retval = "Process affinity is not supported";
break;
default:
retval = NULL;
}
return retval;
}
int
opal_init_util(int* pargc, char*** pargv)
{
int ret;
char *error = NULL;
if( ++opal_util_initialized != 1 ) {
if( opal_util_initialized < 1 ) {
return OPAL_ERROR;
}
return OPAL_SUCCESS;
}
/* initialize the memory allocator */
opal_malloc_init();
/* initialize the OPAL SOS system */
opal_sos_init();
/* initialize the output system */
opal_output_init();
/* initialize install dirs code */
if (OPAL_SUCCESS != (ret = opal_installdirs_base_open())) {
fprintf(stderr, "opal_installdirs_base_open() failed -- process will likely abort (%s:%d, returned %d instead of OPAL_INIT)\n",
__FILE__, __LINE__, ret);
return ret;
}
This commit represents a bunch of work on a Mercurial side branch. As such, the commit message back to the master SVN repository is fairly long. = ORTE Job-Level Output Messages = Add two new interfaces that should be used for all new code throughout the ORTE and OMPI layers (we already make the search-and-replace on the existing ORTE / OMPI layers): * orte_output(): (and corresponding friends ORTE_OUTPUT, orte_output_verbose, etc.) This function sends the output directly to the HNP for processing as part of a job-specific output channel. It supports all the same outputs as opal_output() (syslog, file, stdout, stderr), but for stdout/stderr, the output is sent to the HNP for processing and output. More on this below. * orte_show_help(): This function is a drop-in-replacement for opal_show_help(), with two differences in functionality: 1. the rendered text help message output is sent to the HNP for display (rather than outputting directly into the process' stderr stream) 1. the HNP detects duplicate help messages and does not display them (so that you don't see the same error message N times, once from each of your N MPI processes); instead, it counts "new" instances of the help message and displays a message every ~5 seconds when there are new ones ("I got X new copies of the help message...") opal_show_help and opal_output still exist, but they only output in the current process. The intent for the new orte_* functions is that they can apply job-level intelligence to the output. As such, we recommend that all new ORTE and OMPI code use the new orte_* functions, not thei opal_* functions. === New code === For ORTE and OMPI programmers, here's what you need to do differently in new code: * Do not include opal/util/show_help.h or opal/util/output.h. Instead, include orte/util/output.h (this one header file has declarations for both the orte_output() series of functions and orte_show_help()). * Effectively s/opal_output/orte_output/gi throughout your code. Note that orte_output_open() takes a slightly different argument list (as a way to pass data to the filtering stream -- see below), so you if explicitly call opal_output_open(), you'll need to slightly adapt to the new signature of orte_output_open(). * Literally s/opal_show_help/orte_show_help/. The function signature is identical. === Notes === * orte_output'ing to stream 0 will do similar to what opal_output'ing did, so leaving a hard-coded "0" as the first argument is safe. * For systems that do not use ORTE's RML or the HNP, the effect of orte_output_* and orte_show_help will be identical to their opal counterparts (the additional information passed to orte_output_open() will be lost!). Indeed, the orte_* functions simply become trivial wrappers to their opal_* counterparts. Note that we have not tested this; the code is simple but it is quite possible that we mucked something up. = Filter Framework = Messages sent view the new orte_* functions described above and messages output via the IOF on the HNP will now optionally be passed through a new "filter" framework before being output to stdout/stderr. The "filter" OPAL MCA framework is intended to allow preprocessing to messages before they are sent to their final destinations. The first component that was written in the filter framework was to create an XML stream, segregating all the messages into different XML tags, etc. This will allow 3rd party tools to read the stdout/stderr from the HNP and be able to know exactly what each text message is (e.g., a help message, another OMPI infrastructure message, stdout from the user process, stderr from the user process, etc.). Filtering is not active by default. Filter components must be specifically requested, such as: {{{ $ mpirun --mca filter xml ... }}} There can only be one filter component active. = New MCA Parameters = The new functionality described above introduces two new MCA parameters: * '''orte_base_help_aggregate''': Defaults to 1 (true), meaning that help messages will be aggregated, as described above. If set to 0, all help messages will be displayed, even if they are duplicates (i.e., the original behavior). * '''orte_base_show_output_recursions''': An MCA parameter to help debug one of the known issues, described below. It is likely that this MCA parameter will disappear before v1.3 final. = Known Issues = * The XML filter component is not complete. The current output from this component is preliminary and not real XML. A bit more work needs to be done to configure.m4 search for an appropriate XML library/link it in/use it at run time. * There are possible recursion loops in the orte_output() and orte_show_help() functions -- e.g., if RML send calls orte_output() or orte_show_help(). We have some ideas how to fix these, but figured that it was ok to commit before feature freeze with known issues. The code currently contains sub-optimal workarounds so that this will not be a problem, but it would be good to actually solve the problem rather than have hackish workarounds before v1.3 final. This commit was SVN r18434.
2008-05-14 00:00:55 +04:00
/* initialize the help system */
opal_show_help_init();
/* register handler for errnum -> string converstion */
if (OPAL_SUCCESS !=
(ret = opal_error_register("OPAL",
OPAL_ERR_BASE, OPAL_ERR_MAX, opal_err2str))) {
error = "opal_error_register";
goto return_error;
}
/* init the trace function */
opal_trace_init();
/* keyval lex-based parser */
if (OPAL_SUCCESS != (ret = opal_util_keyval_parse_init())) {
error = "opal_util_keyval_parse_init";
goto return_error;
}
if (OPAL_SUCCESS != (ret = opal_net_init())) {
error = "opal_net_init";
goto return_error;
}
/* Setup the parameter system */
if (OPAL_SUCCESS != (ret = mca_base_param_init())) {
error = "mca_base_param_init";
goto return_error;
}
/* register params for opal */
if (OPAL_SUCCESS != (ret = opal_register_params())) {
error = "opal_register_params";
goto return_error;
}
/* pretty-print stack handlers */
if (OPAL_SUCCESS != (ret = opal_util_register_stackhandlers())) {
error = "opal_util_register_stackhandlers";
goto return_error;
}
if (OPAL_SUCCESS != (ret = opal_util_init_sys_limits())) {
error = "opal_util_init_sys_limits";
goto return_error;
}
/* initialize the datatype engine */
if (OPAL_SUCCESS != (ret = opal_datatype_init ())) {
error = "opal_datatype_init";
goto return_error;
}
/* Initialize the data storage service. */
if (OPAL_SUCCESS != (ret = opal_dss_open())) {
error = "opal_dss_open";
goto return_error;
}
return OPAL_SUCCESS;
return_error:
opal_show_help( "help-opal-runtime.txt",
"opal_init:startup:internal-failure", true,
error, ret );
return ret;
}
int
opal_init(int* pargc, char*** pargv)
{
int ret;
char *error = NULL;
if( ++opal_initialized != 1 ) {
if( opal_initialized < 1 ) {
return OPAL_ERROR;
}
return OPAL_SUCCESS;
}
/* initialize util code */
if (OPAL_SUCCESS != (ret = opal_init_util(pargc, pargv))) {
return ret;
}
/* initialize the mca */
if (OPAL_SUCCESS != (ret = mca_base_open())) {
error = "mca_base_open";
goto return_error;
}
/* open the processor affinity base */
if (OPAL_SUCCESS != (ret = opal_paffinity_base_open())) {
error = "opal_paffinity_base_open";
goto return_error;
}
if (OPAL_SUCCESS != (ret = opal_paffinity_base_select())) {
error = "opal_paffinity_base_select";
goto return_error;
}
/* the memcpy component should be one of the first who get
* loaded in order to make sure we ddo have all the available
* versions of memcpy correctly configured.
*/
if( OPAL_SUCCESS != (ret = opal_memcpy_base_open()) ) {
error = "opal_memcpy_base_open";
goto return_error;
}
/* open the memory manager components. Memory hooks may be
triggered before this (any time after mem_free_init(),
actually). This is a hook available for memory manager hooks
without good initialization routine support */
if (OPAL_SUCCESS != (ret = opal_memory_base_open())) {
error = "opal_memory_base_open";
goto return_error;
}
/* initialize the memory manager / tracker */
if (OPAL_SUCCESS != (ret = opal_mem_hooks_init())) {
error = "opal_mem_hooks_init";
goto return_error;
}
/* initialize the memory checker, to allow early support for annotation */
if (OPAL_SUCCESS != (ret = opal_memchecker_base_open())) {
error = "opal_memchecker_base_open";
goto return_error;
}
/* select the memory checker */
if (OPAL_SUCCESS != (ret = opal_memchecker_base_select())) {
error = "opal_memchecker_base_select";
goto return_error;
}
if (OPAL_SUCCESS != (ret = opal_backtrace_base_open())) {
error = "opal_backtrace_base_open";
goto return_error;
}
if (OPAL_SUCCESS != (ret = opal_timer_base_open())) {
error = "opal_timer_base_open";
goto return_error;
}
/* setup the carto framework */
if (OPAL_SUCCESS != (ret = opal_carto_base_open())) {
error = "opal_carto_base_open";
goto return_error;
}
if (OPAL_SUCCESS != (ret = opal_carto_base_select())) {
error = "opal_carto_base_select";
goto return_error;
}
/*
* Need to start the event and progress engines if noone else is.
* opal_cr_init uses the progress engine, so it is lumped together
* into this set as well.
*/
/*
* Initialize the event library
*/
if (OPAL_SUCCESS != (ret = opal_event_init())) {
error = "opal_event_init";
goto return_error;
}
/*
* Initialize the general progress engine
*/
if (OPAL_SUCCESS != (ret = opal_progress_init())) {
error = "opal_progress_init";
goto return_error;
}
/* we want to tick the event library whenever possible */
opal_progress_event_users_increment();
/*
* Initalize the checkpoint/restart functionality
* Note: Always do this so we can detect if the user
* attempts to checkpoint a non checkpointable job,
* otherwise the tools may hang or not clean up properly.
*/
if (OPAL_SUCCESS != (ret = opal_cr_init() ) ) {
error = "opal_cr_init() failed";
goto return_error;
}
return OPAL_SUCCESS;
return_error:
opal_show_help( "help-opal-runtime.txt",
"opal_init:startup:internal-failure", true,
error, ret );
return ret;
}