1
1

* Move the PCM and LLM types into src/runtime/runtime_types.h

* Add more of the mpirun shell - still far from functional
* Expand the src/runtime interface to include the parts of the pcm needed
  for mpirun

This commit was SVN r1998.
Этот коммит содержится в:
Brian Barrett 2004-08-10 03:48:41 +00:00
родитель 3fc646ffd7
Коммит 8e8ef21ae8
16 изменённых файлов: 406 добавлений и 140 удалений

Просмотреть файл

@ -13,20 +13,20 @@
static
bool
has_conflicts(mca_llm_base_node_t *a, mca_llm_base_node_t *b)
has_conflicts(ompi_rte_node_allocation_t *a, ompi_rte_node_allocation_t *b)
{
mca_llm_base_valuepair_t *a_val, *b_val;
ompi_rte_valuepair_t *a_val, *b_val;
ompi_list_item_t *a_item, *b_item;
for (a_item = ompi_list_get_first(&(a->info)) ;
a_item != ompi_list_get_end(&(a->info)) ;
a_item = ompi_list_get_next(a_item)) {
a_val = (mca_llm_base_valuepair_t*) a_item;
a_val = (ompi_rte_valuepair_t*) a_item;
for (b_item = ompi_list_get_first(&(b->info)) ;
b_item != ompi_list_get_end(&(b->info)) ;
b_item = ompi_list_get_next(b_item)) {
b_val = (mca_llm_base_valuepair_t*) b_item;
b_val = (ompi_rte_valuepair_t*) b_item;
/* if both a_val and b_val have the same key but different
values, we have a conflict */
@ -43,7 +43,7 @@ has_conflicts(mca_llm_base_node_t *a, mca_llm_base_node_t *b)
static
void
keyval_merge(mca_llm_base_node_t *new, mca_llm_base_node_t *old)
keyval_merge(ompi_rte_node_allocation_t *new, ompi_rte_node_allocation_t *old)
{
ompi_list_item_t *old_item;
@ -56,19 +56,19 @@ keyval_merge(mca_llm_base_node_t *new, mca_llm_base_node_t *old)
int
mca_llm_base_collapse_resources(ompi_list_t *hostlist)
{
mca_llm_base_node_t *curr_node, *check_node;
ompi_rte_node_allocation_t *curr_node, *check_node;
ompi_list_item_t *curr_nodeitem, *check_nodeitem, *tmp;
for (curr_nodeitem = ompi_list_get_first(hostlist) ;
curr_nodeitem != ompi_list_get_end(hostlist) ;
curr_nodeitem = ompi_list_get_next(curr_nodeitem)) {
curr_node = (mca_llm_base_node_t*) curr_nodeitem;
curr_node = (ompi_rte_node_allocation_t*) curr_nodeitem;
for (check_nodeitem = ompi_list_get_next(curr_nodeitem) ;
check_nodeitem != ompi_list_get_end(hostlist) ;
check_nodeitem = ompi_list_get_next(check_nodeitem)) {
check_node = (mca_llm_base_node_t*) check_nodeitem;
check_node = (ompi_rte_node_allocation_t*) check_nodeitem;
if ((strcmp(curr_node->hostname, check_node->hostname) == 0) &&
(!has_conflicts(curr_node, check_node))) {

Просмотреть файл

@ -16,7 +16,7 @@ mca_llm_base_map_resources(int nodes,
int procs,
ompi_list_t *hostlist)
{
mca_llm_base_node_t *node;
ompi_rte_node_allocation_t *node;
ompi_list_item_t *nodeitem, *tmp;
if (NULL == hostlist) {
@ -35,7 +35,7 @@ mca_llm_base_map_resources(int nodes,
for (nodeitem = ompi_list_get_first(hostlist);
nodeitem != ompi_list_get_end(hostlist);
nodeitem = ompi_list_get_next(nodeitem)) {
node = (mca_llm_base_node_t*) nodeitem;
node = (ompi_rte_node_allocation_t*) nodeitem;
if (alloc_procs >= procs) {
/* we've allocated enough - release this guy from the
@ -61,7 +61,7 @@ mca_llm_base_map_resources(int nodes,
for (nodeitem = ompi_list_get_first(hostlist);
nodeitem != ompi_list_get_end(hostlist);
nodeitem = ompi_list_get_next(nodeitem)) {
node = (mca_llm_base_node_t*) nodeitem;
node = (ompi_rte_node_allocation_t*) nodeitem;
node->count = 1;
}

Просмотреть файл

@ -8,6 +8,7 @@
#include "mca/base/base.h"
#include "mca/llm/llm.h"
#include "mca/llm/base/base.h"
#include "runtime/runtime_types.h"
/*
@ -33,7 +34,7 @@ static
void
mca_llm_base_node_construct(ompi_object_t *obj)
{
mca_llm_base_node_t *node = (mca_llm_base_node_t*) obj;
ompi_rte_node_allocation_t *node = (ompi_rte_node_allocation_t*) obj;
OBJ_CONSTRUCT(&(node->info), ompi_list_t);
}
@ -41,7 +42,7 @@ static
void
mca_llm_base_node_destruct(ompi_object_t *obj)
{
mca_llm_base_node_t *node = (mca_llm_base_node_t*) obj;
ompi_rte_node_allocation_t *node = (ompi_rte_node_allocation_t*) obj;
OBJ_DESTRUCT(&(node->info));
}
@ -49,7 +50,7 @@ static
void
mca_llm_base_valuepair_construct(ompi_object_t *obj)
{
mca_llm_base_valuepair_t *valpair = (mca_llm_base_valuepair_t*) obj;
ompi_rte_valuepair_t *valpair = (ompi_rte_valuepair_t*) obj;
valpair->key = NULL;
valpair->value = NULL;
}
@ -58,14 +59,14 @@ static
void
mca_llm_base_valuepair_destruct(ompi_object_t *obj)
{
mca_llm_base_valuepair_t *valpair = (mca_llm_base_valuepair_t*) obj;
ompi_rte_valuepair_t *valpair = (ompi_rte_valuepair_t*) obj;
if (NULL != valpair->key) free(valpair->key);
if (NULL != valpair->value) free(valpair->value);
}
OBJ_CLASS_INSTANCE(mca_llm_base_node_t, ompi_list_item_t,
OBJ_CLASS_INSTANCE(ompi_rte_node_allocation_t, ompi_list_item_t,
mca_llm_base_node_construct, mca_llm_base_node_destruct);
OBJ_CLASS_INSTANCE(mca_llm_base_valuepair_t, ompi_list_item_t,
OBJ_CLASS_INSTANCE(ompi_rte_valuepair_t, ompi_list_item_t,
mca_llm_base_valuepair_construct,
mca_llm_base_valuepair_destruct);

Просмотреть файл

@ -13,9 +13,10 @@
#include "mca/llm/base/base.h"
#include "mca/llm/base/base_internal.h"
#include "mca/llm/base/llm_base_parse_hostfile_lex.h"
#include "runtime/runtime_types.h"
static void parse_error(void);
static int parse_keyval(int, mca_llm_base_node_t*);
static int parse_keyval(int, ompi_rte_node_allocation_t*);
static void
parse_error()
@ -26,11 +27,11 @@ parse_error()
static
int
parse_keyval(int first, mca_llm_base_node_t *node)
parse_keyval(int first, ompi_rte_node_allocation_t *node)
{
int val;
char *key, *value;
mca_llm_base_valuepair_t *keyval;
ompi_rte_valuepair_t *keyval;
if (MCA_LLM_BASE_STRING != first) {
return OMPI_ERROR;
@ -63,7 +64,7 @@ parse_keyval(int first, mca_llm_base_node_t *node)
}
/* make a keyval and store it */
keyval = OBJ_NEW(mca_llm_base_valuepair_t);
keyval = OBJ_NEW(ompi_rte_valuepair_t);
keyval->key = key;
keyval->value = value;
@ -88,7 +89,7 @@ parse_count(void)
static
int
parse_line(int first, mca_llm_base_node_t *node)
parse_line(int first, ompi_rte_node_allocation_t *node)
{
int val;
int ret;
@ -138,7 +139,7 @@ parse_line(int first, mca_llm_base_node_t *node)
ompi_list_t *
mca_llm_base_parse_hostfile(const char *hostfile)
{
mca_llm_base_node_t *newnode;
ompi_rte_node_allocation_t *newnode;
ompi_list_t *list;
int val, ret;
@ -168,7 +169,7 @@ mca_llm_base_parse_hostfile(const char *hostfile)
break;
case MCA_LLM_BASE_STRING:
newnode = OBJ_NEW(mca_llm_base_node_t);
newnode = OBJ_NEW(ompi_rte_node_allocation_t);
ret = parse_line(val, newnode);
if (OMPI_SUCCESS != ret) {
OBJ_RELEASE(newnode);

Просмотреть файл

@ -14,11 +14,11 @@ int
mca_llm_hostfile_deallocate_resources(int jobid,
ompi_list_t *nodelist)
{
mca_llm_base_node_t *node;
ompi_rte_node_allocation_t *node;
ompi_list_item_t *item;
while (NULL != (item = ompi_list_remove_first(nodelist))) {
node = (mca_llm_base_node_t*) item;
node = (ompi_rte_node_allocation_t*) item;
OBJ_RELEASE(node);
}

Просмотреть файл

@ -20,8 +20,7 @@
#include "ompi_config.h"
#include "mca/mca.h"
#include "class/ompi_list.h"
#include <sys/param.h>
#include "runtime/runtime_types.h"
/*
* MCA component management functions
@ -78,42 +77,6 @@ typedef struct mca_llm_base_component_1_0_0_t mca_llm_base_component_1_0_0_t;
typedef mca_llm_base_component_1_0_0_t mca_llm_base_component_t;
/*
* LLM interface types
*/
/**
* Container for key = value pairs from the node container.
*
* Container used for the \code info member of the \code
* mca_llm_base_node_t structure. Ownership of char* strings must be
* give to the container, who will \code free() them when the
* container is destroyed.
*/
struct mca_llm_base_valuepair_t {
ompi_list_item_t super;
char *key;
char *value;
};
typedef struct mca_llm_base_valuepair_t mca_llm_base_valuepair_t;
OBJ_CLASS_DECLARATION(mca_llm_base_valuepair_t);
/**
* Container for node allocation information.
*
* Container used for the allocate and deallocate functions of the
* LLM.
*/
struct mca_llm_base_node_t {
ompi_list_item_t super;
char hostname[MAXHOSTNAMELEN];
int count;
ompi_list_t info;
};
typedef struct mca_llm_base_node_t mca_llm_base_node_t;
OBJ_CLASS_DECLARATION(mca_llm_base_node_t);
/*
* LLM interface functions
*/
@ -137,8 +100,8 @@ OBJ_CLASS_DECLARATION(mca_llm_base_node_t);
* <code>nodes</code> nodes
* @param procs (IN) Number of processors to try to allocate. See the note
* for <code>nodes</code> for usage.
* @param nodelist (OUT) List of <code>mca_llm_node_t</code>s describing
* the allocated resources.
* @param nodelist (OUT) List of <code>ompi_rte_node_allocation_t</code>s
* describing the allocated resources.
*
* @warning The type for jobid will change in the near future
*/

Просмотреть файл

@ -33,6 +33,9 @@ int mca_pcm_base_select(bool *allow_multi_user_threads,
mca_pcm_base_module_t *module, *best_module;
extern ompi_list_t mca_pcm_base_components_available;
ompi_output_verbose(100, mca_pcm_base_output,
"mca_pcm_base_select started");
/* Traverse the list of available components; call their init
functions. */

Просмотреть файл

@ -65,8 +65,7 @@
#include "mca/mca.h"
#include "mca/ns/ns.h"
#include "include/types.h"
#include <sys/param.h>
#include "runtime/runtime_types.h"
/*
* MCA component management functions
@ -122,72 +121,6 @@ typedef struct mca_pcm_base_component_1_0_0_t mca_pcm_base_component_1_0_0_t;
typedef mca_pcm_base_component_1_0_0_t mca_pcm_base_component_t;
/*
* PCM interface types
*/
/**
* Container for key = value pairs from the node container.
*
* Container used for the \c info member of the \c mca_pcm_base_node_t
* structure. Ownership of char* strings must be give to the
* container, who will \c free() them when the container is destroyed.
*/
struct mca_pcm_base_valuepair_t {
ompi_list_item_t super;
char *key;
char *value;
};
typedef struct mca_pcm_base_valuepair_t mca_pcm_base_valuepair_t;
OBJ_CLASS_DECLARATION(mca_pcm_base_valuepair_t);
/**
* Container for node allocation information.
*
* Container used for the allocate and deallocate functions of the
* PCM.
*/
struct mca_pcm_base_node_t {
ompi_list_item_t super;
char hostname[MAXHOSTNAMELEN];
int count;
ompi_list_t info;
};
typedef struct mca_pcm_base_node_t mca_pcm_base_node_t;
OBJ_CLASS_DECLARATION(mca_pcm_base_node_t);
/**
* Container use for process startup information
*
*/
struct mca_pcm_base_schedule_t {
ompi_list_item_t super;
char **argv;
int argc;
char **env;
char *cwd;
ompi_list_t nodelist;
};
typedef struct mca_pcm_base_schedule_t mca_pcm_base_schedule_t;
OBJ_CLASS_DECLARATION(mca_pcm_base_schedule_t);
/**
* VPID type
*/
typedef pid_t ompi_vpid_t;
/**
* Monitor type
*/
typedef int (*mca_pcm_base_monitor_fn_t)(ompi_process_name_t*,
int newstate,
int status);
/*
* PCM interface functions
*/
@ -249,7 +182,7 @@ typedef int
*/
typedef int
(*mca_pcm_base_register_monitor_fn_t)(int jobid,
mca_pcm_base_monitor_fn_t func);
ompi_rte_monitor_fn_t func);
/**

Просмотреть файл

@ -39,7 +39,7 @@ extern "C" {
int mca_pcm_rsh_allocate_resources(int jobid, int nodes, int procs,
ompi_list_t **nodelist);
int mca_pcm_rsh_register_monitor(int jobid,
mca_pcm_base_monitor_fn_t func);
ompi_rte_monitor_fn_t func);
bool mca_pcm_rsh_can_spawn(void);
int mca_pcm_rsh_spawn_procs(int jobid, ompi_list_t schedule_list,
ompi_vpid_t start_vpid);

Просмотреть файл

@ -12,7 +12,7 @@
#include <string.h>
int
mca_pcm_rsh_register_monitor(int jobid, mca_pcm_base_monitor_fn_t func)
mca_pcm_rsh_register_monitor(int jobid, ompi_rte_monitor_fn_t func)
{
return OMPI_SUCCESS;
}

Просмотреть файл

@ -13,6 +13,7 @@ noinst_LTLIBRARIES = libruntime.la
headers = \
runtime.h \
runtime_types.h \
universe_connect.h \
universe_init.h \
ompi_progress.h
@ -26,7 +27,9 @@ libruntime_la_SOURCES = \
universe_init.c \
ompi_progress.c \
ompi_rte_finalize.c \
ompi_rte_init.c
ompi_rte_init.c \
ompi_rte_llm.c \
ompi_rte_pcm.c
# Conditionally install the header files

31
src/runtime/ompi_rte_llm.c Обычный файл
Просмотреть файл

@ -0,0 +1,31 @@
/*
* $HEADER$
*/
#include "ompi_config.h"
#include "runtime/runtime.h"
#include "runtime/runtime_types.h"
#include "mca/pcm/pcm.h"
int
ompi_rte_allocate_resources(int jobid, int nodes, int procs,
ompi_list_t **nodelist)
{
if (NULL == mca_pcm.pcm_allocate_resources) {
return OMPI_ERROR;
}
return mca_pcm.pcm_allocate_resources(jobid, nodes, procs, nodelist);
}
int
ompi_rte_deallocate_resources(int jobid, ompi_list_t *nodelist)
{
if (NULL == mca_pcm.pcm_deallocate_resources) {
return OMPI_ERROR;
}
return mca_pcm.pcm_deallocate_resources(jobid, nodelist);
}

76
src/runtime/ompi_rte_pcm.c Обычный файл
Просмотреть файл

@ -0,0 +1,76 @@
/*
* $HEADER$
*/
#include "ompi_config.h"
#include "runtime/runtime.h"
#include "runtime/runtime_types.h"
#include "mca/pcm/pcm.h"
bool
ompi_rte_can_spawn(void)
{
if (NULL == mca_pcm.pcm_can_spawn) {
return OMPI_ERROR;
}
return mca_pcm.pcm_can_spawn();
}
int
ompi_rte_spawn_procs(int jobid, ompi_list_t schedule_list,
ompi_vpid_t start_vpid)
{
if (NULL == mca_pcm.pcm_spawn_procs) {
return OMPI_ERROR;
}
return mca_pcm.pcm_spawn_procs(jobid, schedule_list, start_vpid);
}
ompi_process_name_t*
ompi_rte_get_self(void)
{
if (NULL == mca_pcm.pcm_self) {
return OMPI_ERROR;
}
return mca_pcm.pcm_self();
}
int
ompi_rte_get_peers(ompi_process_name_t **peers, size_t *npeers)
{
if (NULL == mca_pcm.pcm_peers) {
return OMPI_ERROR;
}
return mca_pcm.pcm_peers(peers, npeers);
}
int
ompi_rte_kill_proc(ompi_process_name_t *name, int flags)
{
if (NULL == mca_pcm.pcm_kill_proc) {
return OMPI_ERROR;
}
return mca_pcm.pcm_kill_proc(name, flags);
}
int
ompi_rte_kill_job(int jobid, int flags)
{
if (NULL == mca_pcm.pcm_kill_job) {
return OMPI_ERROR;
}
return mca_pcm.pcm_kill_job(jobid, flags);
}

Просмотреть файл

@ -13,6 +13,8 @@
#include "ompi_config.h"
#include "runtime/runtime_types.h"
/* For backwards compatibility. If you only need MPI stuff, please include
mpiruntime/mpiruntime.h directly */
#include "mpi/runtime/mpiruntime.h"
@ -71,6 +73,117 @@ extern "C" {
*/
int ompi_rte_finalize(void);
/**
* Allocate requested resources
*
* Allocate the specified nodes / processes for use in a new job.
* Requires a newly created jobid. The allocation returned may be
* smaller than requested - it is up to the caller to proceed as
* appropriate should this occur. This function should only be called
* once per jobid.
*
* @param jobid (IN) Jobid with which to associate the given resources.
* @param nodes (IN) Number of nodes to try to allocate. If 0, the
* LLM will try to allocate <code>procs</code>
* processes on as many nodes as are needed. If
* non-zero, will try to fairly distribute
* <code>procs</code> processes over the nodes.
* If <code>procs</code> is 0, will attempt to
* allocate all cpus on <code>nodes</code> nodes
* @param procs (IN) Number of processors to try to allocate. See the note
* for <code>nodes</code> for usage.
* @param nodelist (OUT) List of <code>ompi_rte_node_allocation_t</code>s
* describing the allocated resources.
*
* @warning The type for jobid will change in the near future
*/
int ompi_rte_allocate_resources(int jobid, int nodes, int procs,
ompi_list_t **nodelist);
/**
* This tells you whether the runtime is capable of spawning new
* processes or not
*
* @return True/False
*/
bool ompi_rte_can_spawn(void);
/**
* Spawn a job
*
* Start a job with given jobid and starting vpid (should probably be
* 0 for the forseeable future). The job is specified using an array
* of \c mca_pcm_base_schedule_t structures, which give both process
* and location information.
*
* @warning Parameter list will probably change in the near future.
*/
int ompi_rte_spawn_procs(int jobid, ompi_list_t schedule_list,
ompi_vpid_t start_vpid);
/**
* Get my name
*
* @return my name
*/
ompi_process_name_t* ompi_rte_get_self(void);
/**
* Get names of peer processes which have been launched
*
* @param Nothing
* @return An array of peer names, including me
*/
int ompi_rte_get_peers(ompi_process_name_t **peers, size_t *npeers);
/**
* Kill a specific process in this cell
*
* @param process_name Which process needs to be killed.
* @return Error code
*
* @warning flags is currently ignored, but should be set to 0 for
* future compatibility. Will be used to specify how to kill
* processes (0 will be same as a "kill <pid>"
*/
int ompi_rte_kill_proc(ompi_process_name_t *name, int flags);
/**
* Kill all the processes in a job. This will probably find out all
* the processes in the job by contacting the registry and then call
* mca_pcm_kill_process for each process in the job (for a cell)
*
* @param jobid Job id
* @return Error code
*
* @warning flags is currently ignored, but should be set to 0 for
* future compatibility. Will be used to specify how to kill
* processes (0 will be same as a "kill <pid>"
*/
int ompi_rte_kill_job(int jobid, int flags);
/**
* Deallocate requested resources
*
* Return the resources for the given jobid to the system.
*
* @param jobid (IN) Jobid associated with the resources to be freed.
* @param nodes (IN) Nodelist from associated allocate_resource call.
* All associated memory will be freed as appropriate.
*
* @warning The type for jobid will change in the near future.
*/
int ompi_rte_deallocate_resources(int jobid, ompi_list_t *nodelist);
#ifdef __cplusplus
}
#endif

88
src/runtime/runtime_types.h Обычный файл
Просмотреть файл

@ -0,0 +1,88 @@
/*
* $HEADER$
*/
/**
* @file
*
* Types for interface into the Open MPI Run Time Environment
*/
#ifndef OMPI_RUNTIME_TYPES_H
#define OMPI_RUNTIME_TYPES_H
#include "class/ompi_list.h"
#include "mca/ns/ns.h"
#include <sys/param.h>
/**
* Container for key = value pairs from the node allocation container
*
* Container used for the \c info member of the \c
* ompi_rte_node_allocation_t structure. Ownership of char* strings must be
* give to the container, who will \c free() them when the
* container is destroyed.
*/
struct ompi_rte_valuepair_t {
ompi_list_item_t super;
char *key;
char *value;
};
typedef struct ompi_rte_valuepair_t ompi_rte_valuepair_t;
OBJ_CLASS_DECLARATION(ompi_rte_valuepair_t);
/**
* Container for node allocation information.
*
* Container for allocation and deallocation of resources used to
* launch parallel jobs.
*
*/
struct ompi_rte_node_allocation_t {
ompi_list_item_t super;
char hostname[MAXHOSTNAMELEN];
int count;
ompi_list_t info;
};
typedef struct ompi_rte_node_allocation_t ompi_rte_node_allocation_t;
OBJ_CLASS_DECLARATION(ompi_rte_node_allocation_t);
/**
* Container use for process startup information
*
*/
struct ompi_rte_node_schedule_t {
ompi_list_item_t super;
char **argv;
int argc;
char **env;
char *cwd;
ompi_list_t nodelist;
};
typedef struct ompi_rte_node_schedule_t ompi_rte_node_schedule_t;
OBJ_CLASS_DECLARATION(ompi_rte_node_schedule_t);
/**
* VPID type
*/
typedef pid_t ompi_vpid_t;
/**
* Monitor callback type
*
* Typedef for callback function when there is a state change in any
* of the processes that were spawned locally. This function will
* only be called while the library is in its progress function (ie,
* not from signal handler context).
*/
typedef int (*ompi_rte_monitor_fn_t)(ompi_process_name_t* name,
int newstate,
int status);
#endif /* OMPI_RUNTIME_TYPES_H */

Просмотреть файл

@ -12,6 +12,16 @@
#include <stdio.h>
static long num_running_procs;
static int
mpirun_monitor(ompi_process_name_t *name, int newstate, int status)
{
/* BWB - do state checks and the like... */
num_running_procs--;
return OMPI_SUCCESS;
}
int
main(int argc, char *argv[])
@ -20,9 +30,10 @@ main(int argc, char *argv[])
bool hidden_thread = false;
int ret;
ompi_cmd_line_t *cmd_line = NULL;
ompi_list_t *nodelist = NULL;
/*
* Intialize our environment
* Intialize our Open MPI environment
*/
cmd_line = ompi_cmd_line_create();
@ -32,12 +43,22 @@ main(int argc, char *argv[])
return ret;
}
/*
* Start command line arguments
*/
if (OMPI_SUCCESS != (ret = mca_base_cmd_line_setup(cmd_line))) {
/* BWB show_help */
printf("show_help: mca_base_cmd_line_setup failed\n");
return ret;
}
ompi_cmd_line_make_opt(cmd_line, 'h', "help", 0,
"Show this help message");
ompi_cmd_line_make_opt(cmd_line, '\0', "np", 1,
"Number of processes to start");
ompi_cmd_line_make_opt(cmd_line, 'h', "hostfile", 1,
"Host description file");
if (OMPI_SUCCESS != ompi_cmd_line_parse(cmd_line, false, argc, argv) ||
ompi_cmd_line_is_taken(cmd_line, "help") ||
ompi_cmd_line_is_taken(cmd_line, "h")) {
@ -51,6 +72,10 @@ main(int argc, char *argv[])
return ret;
}
/*
* Start the Open MPI Run Time Environment
*/
if (OMPI_SUCCESS != (ret = mca_base_open())) {
/* JMS show_help */
printf("show_help: mca_base_open failed\n");
@ -64,9 +89,38 @@ main(int argc, char *argv[])
}
/*
* Prep for starting a new job
*/
/*
* BWB: todo:
*
* - ompi_rte_get_new_jobid()
*/
/* BWB - fix jobid, procs, and nodes */
if (OMPI_SUCCESS != ompi_rte_allocate_resources(0, 0, 2, &nodelist)) {
/* BWB show_help */
printf("show_help: ompi_rte_allocate_resources failed\n");
return -1;
}
/*
* BWB: todo:
*
* MPI process mapping
* - ompi_rte_register_monitor()
* - ompi_rte_spawn()
* - ompi_rte_monitor()
* - ompi_rte_kill_job()
*/
/*
* Clean up
*/
if (NULL != nodelist) ompi_rte_deallocate_resources(0, nodelist);
if (NULL != cmd_line) ompi_cmd_line_free(cmd_line);
ompi_rte_finalize();
mca_base_close();
ompi_finalize();