* clean up the Doxygen comments in the runtime and llm classes
* recalarify previous clarification on ompi_list_get_size() This commit was SVN r2190.
Этот коммит содержится в:
родитель
29b8a543af
Коммит
d5133eddec
@ -222,9 +222,12 @@ static inline ompi_list_item_t* ompi_list_get_end(ompi_list_t* list)
|
||||
* This is an inlined function in compilers that support inlining, so
|
||||
* it's usually a cheap operation.
|
||||
*
|
||||
* \warning In the future, this may become an O(N) operation. If you
|
||||
* only need to check for comparison with 0, please use \c
|
||||
* ompi_list_is_empty, which will always be an O(1) operation.
|
||||
* \warning The size of the list is cached as part of the list. In
|
||||
* the future, calling \c ompi_list_splice or \c ompi_list_join may
|
||||
* result in this function recomputing the list size, which would be
|
||||
* an O(N) operation. If \c ompi_list_splice or \c ompi_list_join is
|
||||
* never called on the specified list, this function will always be
|
||||
* O(1).
|
||||
*/
|
||||
static inline size_t ompi_list_get_size(ompi_list_t* list)
|
||||
{
|
||||
|
@ -43,6 +43,9 @@ extern "C" {
|
||||
*/
|
||||
int mca_llm_base_collapse_resources(ompi_list_t *hostlist);
|
||||
|
||||
/**
|
||||
* Deallocate resources allocated by parse hostfile
|
||||
*/
|
||||
int mca_llm_base_deallocate(ompi_list_t *nodelist);
|
||||
|
||||
#if defined(c_plusplus) || defined(__cplusplus)
|
||||
|
@ -19,7 +19,9 @@
|
||||
* \internal
|
||||
*/
|
||||
typedef struct opened_component_t {
|
||||
/** make us a list item */
|
||||
ompi_list_item_t super;
|
||||
/** component that has been opened */
|
||||
mca_llm_base_component_t *oc_component;
|
||||
} opened_component_t;
|
||||
|
||||
|
@ -68,12 +68,18 @@ typedef int (*mca_llm_base_component_finalize_fn_t)(void);
|
||||
* types in the future.
|
||||
*/
|
||||
struct mca_llm_base_component_1_0_0_t {
|
||||
/** component version */
|
||||
mca_base_component_t llm_version;
|
||||
/** component data */
|
||||
mca_base_component_data_1_0_0_t llm_data;
|
||||
/** Function called when component is initialized */
|
||||
mca_llm_base_component_init_fn_t llm_init;
|
||||
/** Function called when component is finalized */
|
||||
mca_llm_base_component_finalize_fn_t llm_finalize;
|
||||
};
|
||||
/** shorten mca_llm_base_component_1_0_0_t declaration */
|
||||
typedef struct mca_llm_base_component_1_0_0_t mca_llm_base_component_1_0_0_t;
|
||||
/** shorten mca_llm_base_component_t declaration */
|
||||
typedef mca_llm_base_component_1_0_0_t mca_llm_base_component_t;
|
||||
|
||||
|
||||
@ -131,10 +137,14 @@ typedef int (*mca_llm_base_deallocate_resources_fn_t)(int jobid,
|
||||
* pointers to the calling interface.
|
||||
*/
|
||||
struct mca_llm_base_module_1_0_0_t {
|
||||
/** Function to be called on resource request */
|
||||
mca_llm_base_allocate_resources_fn_t llm_allocate_resources;
|
||||
/** Function to be called on resource return */
|
||||
mca_llm_base_deallocate_resources_fn_t llm_deallocate_resources;
|
||||
};
|
||||
/** shorten mca_llm_base_module_1_0_0_t declaration */
|
||||
typedef struct mca_llm_base_module_1_0_0_t mca_llm_base_module_1_0_0_t;
|
||||
/** shorten mca_llm_base_module_t declaration */
|
||||
typedef struct mca_llm_base_module_1_0_0_t mca_llm_base_module_t;
|
||||
|
||||
|
||||
|
@ -17,11 +17,16 @@
|
||||
* Global variables and symbols for the MPI layer
|
||||
*/
|
||||
|
||||
/** Is mpi initialized? */
|
||||
extern bool ompi_mpi_initialized;
|
||||
/** Has mpi been finalized? */
|
||||
extern bool ompi_mpi_finalized;
|
||||
|
||||
/** Do we have multiple threads? */
|
||||
extern bool ompi_mpi_thread_multiple;
|
||||
/** Thread level requested to \c MPI_Init_thread() */
|
||||
extern int ompi_mpi_thread_requested;
|
||||
/** Thread level provided by Open MPI */
|
||||
extern int ompi_mpi_thread_provided;
|
||||
|
||||
|
||||
|
@ -33,7 +33,7 @@
|
||||
* 3) singleton (./a.out)
|
||||
*
|
||||
* Case 1) If the rte has already been booted, then mpirun will accept
|
||||
* an optional command line parameter --universe=<rte universe name>
|
||||
* an optional command line parameter --universe=[rte universe name]
|
||||
* which says which universe this application wants to be a part
|
||||
* of. mpirun will then package this universe name and send it to the
|
||||
* processes it will be starting off (fork/exec) on local or remote
|
||||
@ -43,13 +43,13 @@
|
||||
*
|
||||
* Case 2) When mpirun is done alone and no universe is present, then
|
||||
* the mpirun starts off the universe (using rte_boot), then
|
||||
* fork/execs the processes, passin g along the <universe_name>.
|
||||
* fork/execs the processes, passin g along the [universe_name].
|
||||
*
|
||||
* Case 3) For a singleton, if there is alrady an existing rte
|
||||
* universe which it wants to join, it can specify that using the
|
||||
* --universe command line. So it will do
|
||||
*
|
||||
* $ ./a.out --universe=<universe_name>
|
||||
* $ ./a.out --universe=[universe_name]
|
||||
*
|
||||
* In this case, MPI_Init will have to be called as MPI_Init(&argc, &argv)
|
||||
|
||||
@ -179,6 +179,7 @@ int ompi_rte_init(bool *allow_multi_user_threads, bool *have_hidden_threads)
|
||||
* interface type support
|
||||
*/
|
||||
|
||||
/** constructor for \c ompi_rte_node_schedule_t */
|
||||
static
|
||||
void
|
||||
ompi_rte_int_node_schedule_construct(ompi_object_t *obj)
|
||||
@ -188,6 +189,7 @@ ompi_rte_int_node_schedule_construct(ompi_object_t *obj)
|
||||
}
|
||||
|
||||
|
||||
/** destructor for \c ompi_rte_node_schedule_t */
|
||||
static
|
||||
void
|
||||
ompi_rte_int_node_schedule_destruct(ompi_object_t *obj)
|
||||
@ -205,6 +207,7 @@ ompi_rte_int_node_schedule_destruct(ompi_object_t *obj)
|
||||
}
|
||||
|
||||
|
||||
/** constructor for \c ompi_rte_node_allocation_t */
|
||||
static
|
||||
void
|
||||
ompi_rte_int_node_allocation_construct(ompi_object_t *obj)
|
||||
@ -214,6 +217,7 @@ ompi_rte_int_node_allocation_construct(ompi_object_t *obj)
|
||||
}
|
||||
|
||||
|
||||
/** destructor for \c ompi_rte_node_allocation_t */
|
||||
static
|
||||
void
|
||||
ompi_rte_int_node_allocation_destruct(ompi_object_t *obj)
|
||||
@ -231,6 +235,7 @@ ompi_rte_int_node_allocation_destruct(ompi_object_t *obj)
|
||||
}
|
||||
|
||||
|
||||
/** constructor for \c ompi_rte_valuepair_t */
|
||||
static
|
||||
void
|
||||
ompi_rte_int_valuepair_construct(ompi_object_t *obj)
|
||||
@ -241,6 +246,7 @@ ompi_rte_int_valuepair_construct(ompi_object_t *obj)
|
||||
}
|
||||
|
||||
|
||||
/** destructor for \c ompi_rte_valuepair_t */
|
||||
static
|
||||
void
|
||||
ompi_rte_int_valuepair_destruct(ompi_object_t *obj)
|
||||
@ -250,12 +256,15 @@ ompi_rte_int_valuepair_destruct(ompi_object_t *obj)
|
||||
if (NULL != valpair->value) free(valpair->value);
|
||||
}
|
||||
|
||||
/** create instance information for \c ompi_rte_node_schedule_t */
|
||||
OBJ_CLASS_INSTANCE(ompi_rte_node_schedule_t, ompi_list_item_t,
|
||||
ompi_rte_int_node_schedule_construct,
|
||||
ompi_rte_int_node_schedule_destruct);
|
||||
/** create instance information for \c ompi_rte_node_allocation_t */
|
||||
OBJ_CLASS_INSTANCE(ompi_rte_node_allocation_t, ompi_list_item_t,
|
||||
ompi_rte_int_node_allocation_construct,
|
||||
ompi_rte_int_node_allocation_destruct);
|
||||
/** create instance information for \c ompi_rte_valuepair_t */
|
||||
OBJ_CLASS_INSTANCE(ompi_rte_valuepair_t, ompi_list_item_t,
|
||||
ompi_rte_int_valuepair_construct,
|
||||
ompi_rte_int_valuepair_destruct);
|
||||
|
@ -26,11 +26,16 @@
|
||||
* container is destroyed.
|
||||
*/
|
||||
struct ompi_rte_valuepair_t {
|
||||
/** make us an instance of a list item */
|
||||
ompi_list_item_t super;
|
||||
/** key string for the info pair */
|
||||
char *key;
|
||||
/** value string for the info pair */
|
||||
char *value;
|
||||
};
|
||||
/** shorten ompi_rte_valuepair_t declarations */
|
||||
typedef struct ompi_rte_valuepair_t ompi_rte_valuepair_t;
|
||||
/** create the required instance information */
|
||||
OBJ_CLASS_DECLARATION(ompi_rte_valuepair_t);
|
||||
|
||||
|
||||
@ -42,28 +47,53 @@ OBJ_CLASS_DECLARATION(ompi_rte_valuepair_t);
|
||||
*
|
||||
*/
|
||||
struct ompi_rte_node_allocation_t {
|
||||
/** make us an instance of list item */
|
||||
ompi_list_item_t super;
|
||||
/** hostname for this node. Can be used as generic description
|
||||
field if hostnames aren't used on this platform */
|
||||
char hostname[MAXHOSTNAMELEN];
|
||||
/** number of MPI processes Open MPI can start on this host */
|
||||
int count;
|
||||
/** generic key=value storage mechanism */
|
||||
ompi_list_t *info;
|
||||
};
|
||||
/** shorten ompi_rte_allocation_t declarations */
|
||||
typedef struct ompi_rte_node_allocation_t ompi_rte_node_allocation_t;
|
||||
/** create the required instance information */
|
||||
OBJ_CLASS_DECLARATION(ompi_rte_node_allocation_t);
|
||||
|
||||
|
||||
/**
|
||||
* Container use for process startup information
|
||||
*
|
||||
* Container describing a job to be launched. A job consists of a
|
||||
* number of processes started on a number of nodes. Each process
|
||||
* type (a unique argv/envp/cwd) is given its own instance of \c
|
||||
* ompi_rte_node_schedule_t and its own unique list of hosts to start
|
||||
* on.
|
||||
*
|
||||
* All memory associated with \c argv, \c env, \c cwd, and \c nodelist
|
||||
* is given to the instance of \c ompi_rte_node_schedule_t and will be
|
||||
* freed when the instance of \c ompi_rte_node_schedule_t is
|
||||
* destructed.
|
||||
*/
|
||||
struct ompi_rte_node_schedule_t {
|
||||
/** make us an instance of list item */
|
||||
ompi_list_item_t super;
|
||||
/** argv array for process to start (NULL terminated array) */
|
||||
char **argv;
|
||||
/** length of argv */
|
||||
int argc;
|
||||
/** environ array for process to start (NULL terminated array) */
|
||||
char **env;
|
||||
/** working directory in which to start the application */
|
||||
char *cwd;
|
||||
/** list of nodes to start the process on */
|
||||
ompi_list_t *nodelist;
|
||||
};
|
||||
/** shorten ompi_rte_node_schedule_t declarations */
|
||||
typedef struct ompi_rte_node_schedule_t ompi_rte_node_schedule_t;
|
||||
/** create the required instance information */
|
||||
OBJ_CLASS_DECLARATION(ompi_rte_node_schedule_t);
|
||||
|
||||
|
||||
|
@ -6,4 +6,9 @@
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* code required to start connection to universe
|
||||
*
|
||||
* \warning Interface may change in the near future
|
||||
*/
|
||||
int ompi_universe_connect(char *tmpdir);
|
||||
|
Загрузка…
x
Ссылка в новой задаче
Block a user