1
1
openmpi/ompi/proc/proc.h

163 строки
4.9 KiB
C
Исходник Обычный вид История

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef OMPI_PROC
#define OMPI_PROC
#include "ompi/types.h"
#include "opal/class/opal_list.h"
#include "orte/dss/dss_types.h"
#include "opal/threads/mutex.h"
#include "orte/mca/ns/ns_types.h"
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
OMPI_DECLSPEC extern opal_class_t ompi_proc_t_class;
struct ompi_proc_t {
/** allow proc to be placed on a list */
opal_list_item_t super;
/** this process' name */
orte_process_name_t proc_name;
/** BML specific proc data */
struct mca_bml_base_endpoint_t* proc_bml;
/** PML specific proc data */
struct mca_pml_base_endpoint_t* proc_pml;
/** MCA module exchange data */
opal_object_t* proc_modex;
/** architecture of this process */
uint32_t proc_arch;
/** process-wide convertor */
struct ompi_convertor_t* proc_convertor;
/** process-wide lock */
opal_mutex_t proc_lock;
/** Keep the hostname around for debugging purposes */
char *proc_hostname;
/** flags for this proc */
uint8_t proc_flags;
};
/**
* Convenience typedef
*/
typedef struct ompi_proc_t ompi_proc_t;
OMPI_DECLSPEC extern ompi_proc_t* ompi_proc_local_proc;
/*
* Flags
*/
/**
* Flag to indicate that the proc is on the same node as the local proc
*/
#define OMPI_PROC_FLAG_LOCAL 0x01
/**
* Query the run-time environment and build list of available proc instances.
*/
int ompi_proc_init(void);
/**
* Release the processes at the end of the application
*/
int ompi_proc_finalize(void);
/**
* Returns the list of proc instances associated with this job.
*/
OMPI_DECLSPEC ompi_proc_t** ompi_proc_world(size_t* size);
/**
* Returns the list of all known proc instances.
*/
OMPI_DECLSPEC ompi_proc_t** ompi_proc_all(size_t* size);
/**
* Returns a list (of one) proc instances.
*/
ompi_proc_t** ompi_proc_self(size_t* size);
/**
* Returns the proc instance corresponding to the local proc.
*/
static inline ompi_proc_t* ompi_proc_local(void)
{
return ompi_proc_local_proc;
}
/**
* Returns the proc instance for a given name
*/
ompi_proc_t * ompi_proc_find ( const orte_process_name_t* name );
/**
* INPUT: ompi_proc_t **proclist : list of process pointers
* INPUT: int proclistsize : lenght of the proclist array
* IN/OUT: orte_buffer_t *buf : an orte_buffer containing the packed names
*
Clean up the way procs are added to the global process list after MPI_INIT: * Do not add new procs to the global list during modex callback or when sharing orte names during accept/connect. For modex, we cache the modex info for later, in case that proc ever does get added to the global proc list. For accept/connect orte name exchange between the roots, we only need the orte name, so no need to add a proc structure anyway. The procs will be added to the global process list during the proc exchange later in the wireup process * Rename proc_get_namebuf and proc_get_proclist to proc_pack and proc_unpack and extend them to include all information needed to build that proc struct on a remote node (which includes ORTE name, architecture, and hostname). Change unpack to call pml_add_procs for the entire list of new procs at once, rather than one at a time. * Remove ompi_proc_find_and_add from the public proc interface and make it a private function. This function would add a half-created proc to the global proc list, so making it harder to call is a good thing. This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible. Refs trac:564 This commit was SVN r12798. The following Trac tickets were found above: Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
* This function takes a list of ompi_proc_t pointers (e.g. as given
* in groups) and returns a orte buffer containing all information
* needed to add the proc to a remote list. This includes the ORTE
* process name, the architecture, and the hostname. Ordering is
* maintained. The buffer is packed to be sent to a remote node with
* different architecture (endian or word size). The buffer can be
* dss unloaded to be sent using MPI or send using rml_send_packed.
*
* Return values:
* OMPI_SUCCESS on success
* OMPI_ERROR: other errors
*/
Clean up the way procs are added to the global process list after MPI_INIT: * Do not add new procs to the global list during modex callback or when sharing orte names during accept/connect. For modex, we cache the modex info for later, in case that proc ever does get added to the global proc list. For accept/connect orte name exchange between the roots, we only need the orte name, so no need to add a proc structure anyway. The procs will be added to the global process list during the proc exchange later in the wireup process * Rename proc_get_namebuf and proc_get_proclist to proc_pack and proc_unpack and extend them to include all information needed to build that proc struct on a remote node (which includes ORTE name, architecture, and hostname). Change unpack to call pml_add_procs for the entire list of new procs at once, rather than one at a time. * Remove ompi_proc_find_and_add from the public proc interface and make it a private function. This function would add a half-created proc to the global proc list, so making it harder to call is a good thing. This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible. Refs trac:564 This commit was SVN r12798. The following Trac tickets were found above: Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
int ompi_proc_pack(ompi_proc_t **proclist, int proclistsize,
orte_buffer_t *buf);
/**
* INPUT: orte_buffer_t *buf : orte_buffer containing the packed names
* INPUT: int proclistsize : number of expected proc-pointres
* OUTPUT: ompi_proc_t ***proclist : list of process pointers
*
Clean up the way procs are added to the global process list after MPI_INIT: * Do not add new procs to the global list during modex callback or when sharing orte names during accept/connect. For modex, we cache the modex info for later, in case that proc ever does get added to the global proc list. For accept/connect orte name exchange between the roots, we only need the orte name, so no need to add a proc structure anyway. The procs will be added to the global process list during the proc exchange later in the wireup process * Rename proc_get_namebuf and proc_get_proclist to proc_pack and proc_unpack and extend them to include all information needed to build that proc struct on a remote node (which includes ORTE name, architecture, and hostname). Change unpack to call pml_add_procs for the entire list of new procs at once, rather than one at a time. * Remove ompi_proc_find_and_add from the public proc interface and make it a private function. This function would add a half-created proc to the global proc list, so making it harder to call is a good thing. This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible. Refs trac:564 This commit was SVN r12798. The following Trac tickets were found above: Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
* This function unpacks a packed list of ompi_proc_t structures and
* returns the ordered list of proc structures. If the given proc is
* already "known", the architecture and hostname information in the
* buffer is ignored. If the proc is "new" to this process, it will
* be added to the global list of known procs, with information
* provided in the buffer. The lookup actions are always entirely
* local. The proclist returned is a list of pointers to all procs in
* the buffer, whether they were previously known or are new to this
* process. PML_ADD_PROCS will be called on the list of new processes
* discovered during this operation.
*
* Return value:
* OMPI_SUCCESS on success
* OMPI_ERROR else
*/
Clean up the way procs are added to the global process list after MPI_INIT: * Do not add new procs to the global list during modex callback or when sharing orte names during accept/connect. For modex, we cache the modex info for later, in case that proc ever does get added to the global proc list. For accept/connect orte name exchange between the roots, we only need the orte name, so no need to add a proc structure anyway. The procs will be added to the global process list during the proc exchange later in the wireup process * Rename proc_get_namebuf and proc_get_proclist to proc_pack and proc_unpack and extend them to include all information needed to build that proc struct on a remote node (which includes ORTE name, architecture, and hostname). Change unpack to call pml_add_procs for the entire list of new procs at once, rather than one at a time. * Remove ompi_proc_find_and_add from the public proc interface and make it a private function. This function would add a half-created proc to the global proc list, so making it harder to call is a good thing. This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible. Refs trac:564 This commit was SVN r12798. The following Trac tickets were found above: Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
int ompi_proc_unpack(orte_buffer_t *buf, int proclistsize, ompi_proc_t ***proclist);
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#endif /* OMPI_PROC */