1
1
openmpi/orte/runtime/runtime.h

173 строки
5.9 KiB
C
Исходник Обычный вид История

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
/**
* @file
*
* Interface into the Open MPI Run Time Environment
*/
#ifndef ORTE_RUNTIME_H
#define ORTE_RUNTIME_H
#include "orte_config.h"
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#include "mca/gpr/gpr_types.h"
#include "opal/util/cmd_line.h"
2004-09-23 18:40:05 +04:00
#include "runtime/runtime_types.h"
#include "util/univ_info.h"
#include "mca/ns/ns.h"
/* constants for spawn constraints */
/** Spawn constraint - require multi-cell support. The selected spawn
system must be capable of starting across multiple cells. This
allows multiple pcms to be used to satisfy a single resource
allocation request */
#define OMPI_RTE_SPAWN_MULTI_CELL 0x0001
/** Spawn constraint - require ability to launch daemons. The
selected spawn system must be capable of starting daemon process.
Setting this flag will result in a spawn service that does not
neccessarily provide process monitoring or standard I/O
forwarding. The calling process may exit before all children have
exited. */
#define OMPI_RTE_SPAWN_DAEMON 0x0002
/** Spawn constraint - require quality of service support. The
selected spawn system must provide I/O forwarding, quick process
shutdown, and process status monitoring. */
#define OMPI_RTE_SPAWN_HIGH_QOS 0x0004
/** Spawn constraint - caller is an MPI process. The caller is an MPI
application (has called MPI_Init). This should be used only for
MPI_COMM_SPAWN and MPI_COMM_SPAWN_MULTIPLE. The calling process
will follow the semantics of the MPI_COMM_SPAWN_* functions. */
#define OMPI_RTE_SPAWN_FROM_MPI 0x0008
/** Spawn constraint - require ability to launch either MPMD (hence
the name) applications or applications with specific placement of
processes. */
#define OMPI_RTE_SPAWN_MPMD 0x0010
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
/* globals used by RTE - instanced in orte_init.c */
OMPI_DECLSPEC extern int orte_debug_flag;
/**
* Abort the current application with a pretty-print error message
*
* Aborts currently running application with \code abort(), pretty
* printing an error message if possible. Error message should be
* specified using the standard \code printf() format.
*/
OMPI_DECLSPEC int orte_abort(int status, char *fmt, ...);
/**
* Initialize the Open Run Time Environment
*
* Initlize the Open Run Time Environment, including process
* control, malloc debugging and threads, and out of band messaging.
* This function should be called exactly once. This function should
* be called by every application using the RTE interface, including
* MPI applications and mpirun.
*
* @param infrastructure Whether we are ORTE infrastructure or an ORTE
* application
*/
OMPI_DECLSPEC int orte_init(bool infrastructure);
OMPI_DECLSPEC int orte_system_init(bool infrastructure);
OMPI_DECLSPEC int orte_init_stage1(bool infrastructure);
OMPI_DECLSPEC int orte_init_stage2(void);
/**
* Initialize parameters for ORTE.
*
* @retval ORTE_SUCCESS Upon success.
* @retval ORTE_ERROR Upon failure.
*/
OMPI_DECLSPEC int orte_register_params(bool infrastructure);
/**
* Re-init the Open run time environment.
*
* Restart selected components with a new process name.
*/
OMPI_DECLSPEC int orte_restart(orte_process_name_t* name, const char* uri);
/**
* Finalize the Open run time environment. Any function calling \code
* orte_init should call \code orte_finalize.
*
2004-09-23 18:40:05 +04:00
*/
OMPI_DECLSPEC int orte_finalize(void);
OMPI_DECLSPEC int orte_system_finalize(void);
/*
* Change state as processes complete registration/unregistration
*/
OMPI_DECLSPEC void orte_all_procs_registered(orte_gpr_notify_message_t* match, void* cbdata);
OMPI_DECLSPEC void orte_all_procs_unregistered(orte_gpr_notify_message_t* match, void* cbdata);
OMPI_DECLSPEC int orte_monitor_procs_registered(void);
OMPI_DECLSPEC int orte_monitor_procs_unregistered(void);
/**
* Check for universe existence
*
Some of these didn't really change - I was just in/out of them for diagnostics while chasing a bug. Got caught by my good buddy Tim again :) on his parse_contact_info function, which requires that the space for the answer be allocated in advance. Sigh. Anyway, mpirun2 now works again. My apologies if you tried it in the last few hours and found it didn't. Also removed the mpirun3 directory since we are basically dragging mpirun2 along with us - no need to create a new version after all. Made a few changes to the universe info structure, eliminating the "webserver" and "socket" fields since we will do those contacts through the oob channel. Also changed the "silent_mode" field to "console" since silent mode is the default - the flag needs to tell you to turn the console on, not off. Parse environ function now gets the ns and gpr replica contact info and loads it in the proper places to hand it off to the respective components, thus allowing me to check connection to them as part of determining if the named universe already exists. Changed the local_universe_exists function accordingly and gave it a new name (since the replicas may not be local). This name will shortly be changed to "ompi_rte_join_universe" as I complete the logic for doing that function. Please let me know if you see any problems. I successfully ran some trivial multi-process functions in both mpirun2 and singleton modes, and ran the seed daemon as well, so I think it should all be okay. This commit was SVN r2611.
2004-09-11 16:56:52 +04:00
* Checks to see if a specified universe exists. If so, attempts
* to connect to verify that the universe is accepting connections.
Some of these didn't really change - I was just in/out of them for diagnostics while chasing a bug. Got caught by my good buddy Tim again :) on his parse_contact_info function, which requires that the space for the answer be allocated in advance. Sigh. Anyway, mpirun2 now works again. My apologies if you tried it in the last few hours and found it didn't. Also removed the mpirun3 directory since we are basically dragging mpirun2 along with us - no need to create a new version after all. Made a few changes to the universe info structure, eliminating the "webserver" and "socket" fields since we will do those contacts through the oob channel. Also changed the "silent_mode" field to "console" since silent mode is the default - the flag needs to tell you to turn the console on, not off. Parse environ function now gets the ns and gpr replica contact info and loads it in the proper places to hand it off to the respective components, thus allowing me to check connection to them as part of determining if the named universe already exists. Changed the local_universe_exists function accordingly and gave it a new name (since the replicas may not be local). This name will shortly be changed to "ompi_rte_join_universe" as I complete the logic for doing that function. Please let me know if you see any problems. I successfully ran some trivial multi-process functions in both mpirun2 and singleton modes, and ran the seed daemon as well, so I think it should all be okay. This commit was SVN r2611.
2004-09-11 16:56:52 +04:00
* If both ns and gpr replicas provided, first checks for those
* connections. Gets any missing info from the universe contact.
*
* @param univ Pointer to universe info struct where any found info
* is to be stored
*
* @retval OMPI_SUCCESS Universe found and connection accepted
* @retval OMPI_NO_CONNECTION_ALLOWED Universe found, but not persistent or
* restricted to local scope
* @retval OMPI_CONNECTION_FAILED Universe found, but connection attempt
* failed. Probably caused by unclean termination of the universe seed
* daemon.
* @retval OMPI_CONNECTION_REFUSED Universe found and contact made, but
* universe refused to allow connection.
*/
OMPI_DECLSPEC int orte_universe_exists(orte_universe_t *univ);
/**
* Setup I/O forwarding.
*/
OMPI_DECLSPEC int ompi_rte_init_io(void);
/**
* Establish a Head Node Process on a cluster's front end
*/
OMPI_DECLSPEC int orte_setup_hnp(char *target_cluster, char *headnode, char *username);
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#endif /* OMPI_RUNTIME_H */