1
1
Ralph Castain ba5498cdc6 Repair the MPI-2 dynamic operations. This includes:
1. repair of the linear and direct routed modules

2. repair of the ompi/pubsub/orte module to correctly init routes to the ompi-server, and correctly handle failure to correctly parse the provided ompi-server URI

3. modification of orterun to accept both "file" and "FILE" for designating where the ompi-server URI is to be found - purely a convenience feature

4. resolution of a message ordering problem during the connect/accept handshake that allowed the "send-first" proc to attempt to send to the "recv-first" proc before the HNP had actually updated its routes.

Let this be a further reminder to all - message ordering is NOT guaranteed in the OOB

5. Repair the ompi/dpm/orte module to correctly init routes during connect/accept.

Reminder to all: messages sent to procs in another job family (i.e., started by a different mpirun) are ALWAYS routed through the respective HNPs. As per the comments in orte/routed, this is REQUIRED to maintain connect/accept (where only the root proc on each side is capable of init'ing the routes), allow communication between mpirun's using different routing modules, and to minimize connections on tools such as ompi-server. It is all taken care of "under the covers" by the OOB to ensure that a route back to the sender is maintained, even when the different mpirun's are using different routed modules.

6. corrections in the orte/odls to ensure proper identification of daemons participating in a dynamic launch

7. corrections in build/nidmap to support update of an existing nidmap during dynamic launch

8. corrected implementation of the update_arch function in the ESS, along with consolidation of a number of ESS operations into base functions for easier maintenance. The ability to support info from multiple jobs was added, although we don't currently do so - this will come later to support further fault recovery strategies

9. minor updates to several functions to remove unnecessary and/or no longer used variables and envar's, add some debugging output, etc.

10. addition of a new macro ORTE_PROC_IS_DAEMON that resolves to true if the provided proc is a daemon

There is still more cleanup to be done for efficiency, but this at least works.

Tested on single-node Mac, multi-node SLURM via odin. Tests included connect/accept, publish/lookup/unpublish, comm_spawn, comm_spawn_multiple, and singleton comm_spawn.

Fixes ticket #1256

This commit was SVN r18804.
2008-07-03 17:53:37 +00:00

106 строки
3.4 KiB
C

/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef OMPI_MCA_DPM_BASE_H
#define OMPI_MCA_DPM_BASE_H
#include "ompi_config.h"
#include "ompi/constants.h"
#include "ompi/mca/dpm/dpm.h"
/*
* Global functions for MCA overall DPM
*/
BEGIN_C_DECLS
struct ompi_dpm_base_disconnect_obj {
ompi_communicator_t *comm;
int size;
struct ompi_request_t **reqs;
int buf;
};
typedef struct ompi_dpm_base_disconnect_obj ompi_dpm_base_disconnect_obj;
/**
* Initialize the DPM MCA framework
*
* @retval OMPI_SUCCESS Upon success
* @retval OMPI_ERROR Upon failures
*
* This function is invoked during ompi_init();
*/
OMPI_DECLSPEC int ompi_dpm_base_open(void);
/**
* Select an available component.
*
* @retval OMPI_SUCCESS Upon Success
* @retval OMPI_NOT_FOUND If no component can be selected
* @retval OMPI_ERROR Upon other failure
*
*/
OMPI_DECLSPEC int ompi_dpm_base_select(void);
/**
* Finalize the DPM MCA framework
*
* @retval OMPI_SUCCESS Upon success
* @retval OMPI_ERROR Upon failures
*
* This function is invoked during ompi_finalize();
*/
OMPI_DECLSPEC int ompi_dpm_base_close(void);
#if !ORTE_DISABLE_FULL_SUPPORT
/* Internal support functions */
OMPI_DECLSPEC char* ompi_dpm_base_dyn_init (void);
OMPI_DECLSPEC int ompi_dpm_base_dyn_finalize (void);
OMPI_DECLSPEC void ompi_dpm_base_mark_dyncomm (ompi_communicator_t *comm);
OMPI_DECLSPEC ompi_dpm_base_disconnect_obj *ompi_dpm_base_disconnect_init ( ompi_communicator_t *comm);
OMPI_DECLSPEC void ompi_dpm_base_disconnect_waitall (int count, ompi_dpm_base_disconnect_obj **objs);
#endif
/* NULL component functions */
int ompi_dpm_base_null_connect_accept (ompi_communicator_t *comm, int root,
char *port_string, bool send_first,
ompi_communicator_t **newcomm);
void ompi_dpm_base_null_disconnect(ompi_communicator_t *comm);
int ompi_dpm_base_null_spawn(int count, char **array_of_commands,
char ***array_of_argv,
int *array_of_maxprocs,
MPI_Info *array_of_info,
char *port_name);
int ompi_dpm_base_null_dyn_init(void);
int ompi_dpm_base_null_dyn_finalize (void);
void ompi_dpm_base_null_mark_dyncomm (ompi_communicator_t *comm);
int ompi_dpm_base_null_open_port(char *port_name, orte_rml_tag_t given_tag);
int ompi_dpm_base_null_close_port(char *port_name);
/* useful globals */
OMPI_DECLSPEC extern int ompi_dpm_base_output;
OMPI_DECLSPEC extern opal_list_t ompi_dpm_base_components_available;
OMPI_DECLSPEC extern ompi_dpm_base_component_t ompi_dpm_base_selected_component;
OMPI_DECLSPEC extern ompi_dpm_base_module_t ompi_dpm;
END_C_DECLS
#endif /* OMPI_MCA_DPM_BASE_H */