1
1

* Change from ompi_list_t to ompi_list_t* in the schedule and allocation

structures to make it easier to swap around lists when doing process ->
  resource mapping
* Fix spawn interface to take an ompi_list_t* instead of an ompi_list_t
  since you can't pass an ompi_list_t by value
* Change allocate_resource to return an ompi_list_t* instead of having
  an ompi_list_t** as an argument, since it's a bit cleaner and makes
  who should call OBJ_NEW much more clear
* Clean up deallocation in error cases for the llm_base_allocate function
* Update test case for llm to not depend on current environment for
  correctness

This commit was SVN r2126.
Этот коммит содержится в:
Brian Barrett 2004-08-13 19:39:06 +00:00
родитель 03f195d22c
Коммит 5540dc37bc
26 изменённых файлов: 224 добавлений и 214 удалений

Просмотреть файл

@ -32,7 +32,8 @@ libmca_llm_base_la_SOURCES = \
llm_base_open.c \
llm_base_parse_hostfile.c \
llm_base_parse_hostfile_lex.l \
llm_base_select.c
llm_base_select.c \
llm_base_util.c
# Conditionally install the header files

Просмотреть файл

@ -43,6 +43,8 @@ extern "C" {
*/
int mca_llm_base_collapse_resources(ompi_list_t *hostlist);
int mca_llm_base_deallocate(ompi_list_t *nodelist);
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif

Просмотреть файл

@ -18,13 +18,13 @@ has_conflicts(ompi_rte_node_allocation_t *a, ompi_rte_node_allocation_t *b)
ompi_rte_valuepair_t *a_val, *b_val;
ompi_list_item_t *a_item, *b_item;
for (a_item = ompi_list_get_first(&(a->info)) ;
a_item != ompi_list_get_end(&(a->info)) ;
for (a_item = ompi_list_get_first(a->info) ;
a_item != ompi_list_get_end(a->info) ;
a_item = ompi_list_get_next(a_item)) {
a_val = (ompi_rte_valuepair_t*) a_item;
for (b_item = ompi_list_get_first(&(b->info)) ;
b_item != ompi_list_get_end(&(b->info)) ;
for (b_item = ompi_list_get_first(b->info) ;
b_item != ompi_list_get_end(b->info) ;
b_item = ompi_list_get_next(b_item)) {
b_val = (ompi_rte_valuepair_t*) b_item;
@ -47,8 +47,8 @@ keyval_merge(ompi_rte_node_allocation_t *new, ompi_rte_node_allocation_t *old)
{
ompi_list_item_t *old_item;
while (NULL != (old_item = ompi_list_remove_first(&(old->info)))) {
ompi_list_append(&(new->info), old_item);
while (NULL != (old_item = ompi_list_remove_first(old->info))) {
ompi_list_append(new->info, old_item);
}
}

Просмотреть файл

@ -27,51 +27,15 @@ ompi_list_t mca_llm_base_components_available;
mca_llm_base_component_t mca_llm_base_selected_component;
mca_llm_base_module_t mca_llm;
/*
* LLM interface type support
*/
static
void
mca_llm_base_node_construct(ompi_object_t *obj)
{
ompi_rte_node_allocation_t *node = (ompi_rte_node_allocation_t*) obj;
OBJ_CONSTRUCT(&(node->info), ompi_list_t);
}
static
void
mca_llm_base_node_destruct(ompi_object_t *obj)
{
ompi_rte_node_allocation_t *node = (ompi_rte_node_allocation_t*) obj;
OBJ_DESTRUCT(&(node->info));
}
static
void
mca_llm_base_valuepair_construct(ompi_object_t *obj)
{
ompi_rte_valuepair_t *valpair = (ompi_rte_valuepair_t*) obj;
valpair->key = NULL;
valpair->value = NULL;
}
static
void
mca_llm_base_valuepair_destruct(ompi_object_t *obj)
{
ompi_rte_valuepair_t *valpair = (ompi_rte_valuepair_t*) obj;
if (NULL != valpair->key) free(valpair->key);
if (NULL != valpair->value) free(valpair->value);
}
OBJ_CLASS_INSTANCE(ompi_rte_node_allocation_t, ompi_list_item_t,
mca_llm_base_node_construct, mca_llm_base_node_destruct);
OBJ_CLASS_INSTANCE(ompi_rte_valuepair_t, ompi_list_item_t,
mca_llm_base_valuepair_construct,
mca_llm_base_valuepair_destruct);
ompi_mutex_t mca_llm_base_parse_mutex;
/* give us a way to hook in for base unit tests */
void
mca_llm_base_setup(void)
{
/* initialize the internal mutex */
OBJ_CONSTRUCT(&mca_llm_base_parse_mutex, ompi_mutex_t);
}
/**
* Function for finding and opening either all MCA modules, or the one
@ -79,8 +43,7 @@ ompi_mutex_t mca_llm_base_parse_mutex;
*/
int mca_llm_base_open(void)
{
/* initialize the internal mutex */
OBJ_CONSTRUCT(&mca_llm_base_parse_mutex, ompi_mutex_t);
mca_llm_base_setup();
/* Open up all available components */
if (OMPI_SUCCESS !=

Просмотреть файл

@ -68,7 +68,7 @@ parse_keyval(int first, ompi_rte_node_allocation_t *node)
keyval->key = key;
keyval->value = value;
ompi_list_append(&(node->info), (ompi_list_item_t *) keyval);
ompi_list_append(node->info, (ompi_list_item_t *) keyval);
return OMPI_SUCCESS;
}

27
src/mca/llm/base/llm_base_util.c Обычный файл
Просмотреть файл

@ -0,0 +1,27 @@
/* -*- C -*-
*
* $HEADER$
*/
#include "ompi_config.h"
#include "mca/llm/llm.h"
#include "mca/llm/base/base.h"
#include "mca/llm/base/base_internal.h"
int
mca_llm_base_deallocate(ompi_list_t *nodelist)
{
ompi_rte_node_allocation_t *node;
ompi_list_item_t *item;
while (NULL != (item = ompi_list_remove_first(nodelist))) {
node = (ompi_rte_node_allocation_t*) item;
OBJ_RELEASE(node);
}
OBJ_RELEASE(nodelist);
return OMPI_SUCCESS;
}

Просмотреть файл

@ -38,10 +38,9 @@ extern "C" {
int mca_llm_hostfile_component_finalize(void);
int mca_llm_hostfile_allocate_resources(int jobid,
int nodes,
int procs,
ompi_list_t **nodelist);
ompi_list_t* mca_llm_hostfile_allocate_resources(int jobid,
int nodes,
int procs);
int mca_llm_hostfile_deallocate_resources(int jobid,
ompi_list_t *nodelist);

Просмотреть файл

@ -14,27 +14,29 @@
extern char *mca_llm_hostfile_filename;
int
mca_llm_hostfile_allocate_resources(int jobid,
int nodes,
int procs,
ompi_list_t **nodelist)
ompi_list_t*
mca_llm_hostfile_allocate_resources(int jobid, int nodes, int procs)
{
ompi_list_t *hostlist = NULL;
ompi_list_t *nodelist = NULL;
int ret;
/* start by getting the full list of available resources */
hostlist = mca_llm_base_parse_hostfile(mca_llm_hostfile_filename);
if (NULL == hostlist) {
return OMPI_ERROR;
nodelist = mca_llm_base_parse_hostfile(mca_llm_hostfile_filename);
if (NULL == nodelist) {
return NULL;
}
ret = mca_llm_base_collapse_resources(hostlist);
ret = mca_llm_base_collapse_resources(nodelist);
if (OMPI_SUCCESS != ret) {
return ret;
mca_llm_base_deallocate(nodelist);
return NULL;
}
ret = mca_llm_base_map_resources(nodes, procs, hostlist);
*nodelist = hostlist;
return ret;
ret = mca_llm_base_map_resources(nodes, procs, nodelist);
if (OMPI_SUCCESS != ret) {
mca_llm_base_deallocate(nodelist);
return NULL;
}
return nodelist;
}

Просмотреть файл

@ -8,21 +8,12 @@
#include "mca/llm/llm.h"
#include "mca/llm/base/base.h"
#include "mca/llm/base/base_internal.h"
int
mca_llm_hostfile_deallocate_resources(int jobid,
ompi_list_t *nodelist)
{
ompi_rte_node_allocation_t *node;
ompi_list_item_t *item;
while (NULL != (item = ompi_list_remove_first(nodelist))) {
node = (ompi_rte_node_allocation_t*) item;
OBJ_RELEASE(node);
}
OBJ_RELEASE(nodelist);
mca_llm_base_deallocate(nodelist);
return OMPI_SUCCESS;
}

Просмотреть файл

@ -105,10 +105,8 @@ typedef mca_llm_base_component_1_0_0_t mca_llm_base_component_t;
*
* @warning The type for jobid will change in the near future
*/
typedef int (*mca_llm_base_allocate_resources_fn_t)(int jobid,
int nodes,
int procs,
ompi_list_t **nodelist);
typedef ompi_list_t*
(*mca_llm_base_allocate_resources_fn_t)(int jobid, int nodes,int procs);
/**

Просмотреть файл

@ -61,9 +61,9 @@ mca_pcm_base_send_schedule(FILE *fp,
fprintf(fp, "%d\n", node->count);
/* INFO */
fprintf(fp, "%d\n", (int) ompi_list_get_size(&(node->info)));
for (info_item = ompi_list_get_first(&(node->info)) ;
info_item != ompi_list_get_end(&(node->info)) ;
fprintf(fp, "%d\n", (int) ompi_list_get_size(node->info));
for (info_item = ompi_list_get_first(node->info) ;
info_item != ompi_list_get_end(node->info) ;
info_item = ompi_list_get_next(info_item)) {
valpair = (ompi_rte_valuepair_t*) info_item;
@ -79,7 +79,7 @@ mca_pcm_base_send_schedule(FILE *fp,
* check that the other side hasn't dropped our connection yet.
*
* Do this before the last print so we don't get swapped out and
* accidently catch an eof or something
* accidently catch the expected eof or something
*/
if (feof(fp) || ferror(fp)) return OMPI_ERROR;
@ -101,6 +101,7 @@ get_key(FILE *fp, const char *key)
size_t pos = 0;
size_t len;
int val;
int countdown = 50;
len = strlen(key);
@ -108,6 +109,8 @@ get_key(FILE *fp, const char *key)
val = fgetc(fp);
if (val == EOF) {
if (feof(fp)) {
countdown--;
if (0 == countdown) return OMPI_ERROR;
/* BWB: probably want to back off at some point */
clearerr(fp);
} else {
@ -268,7 +271,7 @@ get_keyval(FILE *fp, char **keyp, char **valp)
static int
get_nodeinfo(FILE *fp, ompi_list_t info)
get_nodeinfo(FILE *fp, ompi_list_t *info)
{
ompi_rte_valuepair_t *newinfo;
int ret;
@ -285,7 +288,7 @@ get_nodeinfo(FILE *fp, ompi_list_t info)
return ret;
}
ompi_list_append(&info, (ompi_list_item_t*) newinfo);
ompi_list_append(info, (ompi_list_item_t*) newinfo);
}
return OMPI_SUCCESS;

Просмотреть файл

@ -6,7 +6,7 @@
#include "ompi_config.h"
#include "mca/pcm/pcm.h"
#include "mca/pcm/base/base.h"
#include <string.h>

Просмотреть файл

@ -166,11 +166,10 @@ typedef char *
*
* @warning The type for jobid will change in the near future
*/
typedef int
typedef ompi_list_t*
(*mca_pcm_base_allocate_resources_fn_t)(int jobid,
int nodes,
int procs,
ompi_list_t **nodelist);
int procs);
/**
* Register a watch function for changes in the job status
@ -208,8 +207,7 @@ typedef bool
*/
typedef int
(*mca_pcm_base_spawn_procs_fn_t)(int jobid,
ompi_list_t schedule_list,
ompi_vpid_t start_vpid);
ompi_list_t *schedule_list);
/**

Просмотреть файл

@ -36,13 +36,12 @@ extern "C" {
* Interface
*/
char *mca_pcm_rsh_get_unique_name(void);
int mca_pcm_rsh_allocate_resources(int jobid, int nodes, int procs,
ompi_list_t **nodelist);
ompi_list_t* mca_pcm_rsh_allocate_resources(int jobid, int nodes,
int procs);
int mca_pcm_rsh_register_monitor(int jobid,
ompi_rte_monitor_fn_t func);
bool mca_pcm_rsh_can_spawn(void);
int mca_pcm_rsh_spawn_procs(int jobid, ompi_list_t schedule_list,
ompi_vpid_t start_vpid);
int mca_pcm_rsh_spawn_procs(int jobid, ompi_list_t *schedule_list);
ompi_process_name_t* mca_pcm_rsh_get_self(void);
int mca_pcm_rsh_get_peers(ompi_process_name_t **peers,
size_t *npeers);

Просмотреть файл

@ -9,9 +9,8 @@
#include "mca/pcm/pcm.h"
#include "mca/pcm/rsh/src/pcm_rsh.h"
int
mca_pcm_rsh_allocate_resources(int jobid, int nodes, int procs,
ompi_list_t **nodelist)
ompi_list_t*
mca_pcm_rsh_allocate_resources(int jobid, int nodes, int procs)
{
return OMPI_SUCCESS;
return NULL;
}

Просмотреть файл

@ -10,8 +10,7 @@
#include "mca/pcm/rsh/src/pcm_rsh.h"
int
mca_pcm_rsh_spawn_procs(int jobid, ompi_list_t schedule_list,
ompi_vpid_t start_vpid)
mca_pcm_rsh_spawn_procs(int jobid, ompi_list_t *schedule_list)
{
return OMPI_SUCCESS;
}

Просмотреть файл

@ -172,3 +172,90 @@ int ompi_rte_init(bool *allow_multi_user_threads, bool *have_hidden_threads)
*/
return OMPI_SUCCESS;
}
/*
* interface type support
*/
static
void
ompi_rte_int_node_schedule_construct(ompi_object_t *obj)
{
ompi_rte_node_schedule_t *sched = (ompi_rte_node_schedule_t*) obj;
sched->nodelist = OBJ_NEW(ompi_list_t);
}
static
void
ompi_rte_int_node_schedule_destruct(ompi_object_t *obj)
{
ompi_rte_node_schedule_t *sched = (ompi_rte_node_schedule_t*) obj;
ompi_rte_node_allocation_t *node;
ompi_list_item_t *item;
while (NULL != (item = ompi_list_remove_first(sched->nodelist))) {
node = (ompi_rte_node_allocation_t*) item;
OBJ_RELEASE(node);
}
OBJ_RELEASE(sched->nodelist);
}
static
void
ompi_rte_int_node_allocation_construct(ompi_object_t *obj)
{
ompi_rte_node_allocation_t *node = (ompi_rte_node_allocation_t*) obj;
node->info = OBJ_NEW(ompi_list_t);
}
static
void
ompi_rte_int_node_allocation_destruct(ompi_object_t *obj)
{
ompi_rte_node_allocation_t *node = (ompi_rte_node_allocation_t*) obj;
ompi_rte_valuepair_t *valpair;
ompi_list_item_t *item;
while (NULL != (item = ompi_list_remove_first(node->info))) {
valpair = (ompi_rte_valuepair_t*) item;
OBJ_RELEASE(valpair);
}
OBJ_RELEASE(node->info);
}
static
void
ompi_rte_int_valuepair_construct(ompi_object_t *obj)
{
ompi_rte_valuepair_t *valpair = (ompi_rte_valuepair_t*) obj;
valpair->key = NULL;
valpair->value = NULL;
}
static
void
ompi_rte_int_valuepair_destruct(ompi_object_t *obj)
{
ompi_rte_valuepair_t *valpair = (ompi_rte_valuepair_t*) obj;
if (NULL != valpair->key) free(valpair->key);
if (NULL != valpair->value) free(valpair->value);
}
OBJ_CLASS_INSTANCE(ompi_rte_node_schedule_t, ompi_list_item_t,
ompi_rte_int_node_schedule_construct,
ompi_rte_int_node_schedule_destruct);
OBJ_CLASS_INSTANCE(ompi_rte_node_allocation_t, ompi_list_item_t,
ompi_rte_int_node_allocation_construct,
ompi_rte_int_node_allocation_destruct);
OBJ_CLASS_INSTANCE(ompi_rte_valuepair_t, ompi_list_item_t,
ompi_rte_int_valuepair_construct,
ompi_rte_int_valuepair_destruct);

Просмотреть файл

@ -8,15 +8,15 @@
#include "runtime/runtime_types.h"
#include "mca/pcm/pcm.h"
int
ompi_rte_allocate_resources(int jobid, int nodes, int procs,
ompi_list_t **nodelist)
ompi_list_t*
ompi_rte_allocate_resources(int jobid, int nodes, int procs)
{
if (NULL == mca_pcm.pcm_allocate_resources) {
return OMPI_ERROR;
}
return mca_pcm.pcm_allocate_resources(jobid, nodes, procs, nodelist);
return mca_pcm.pcm_allocate_resources(jobid, nodes, procs);
}

Просмотреть файл

@ -21,14 +21,13 @@ ompi_rte_can_spawn(void)
int
ompi_rte_spawn_procs(int jobid, ompi_list_t schedule_list,
ompi_vpid_t start_vpid)
ompi_rte_spawn_procs(int jobid, ompi_list_t *schedule_list)
{
if (NULL == mca_pcm.pcm_spawn_procs) {
return OMPI_ERROR;
}
return mca_pcm.pcm_spawn_procs(jobid, schedule_list, start_vpid);
return mca_pcm.pcm_spawn_procs(jobid, schedule_list);
}
@ -36,7 +35,7 @@ ompi_process_name_t*
ompi_rte_get_self(void)
{
if (NULL == mca_pcm.pcm_self) {
return OMPI_ERROR;
return NULL;
}
return mca_pcm.pcm_self();

Просмотреть файл

@ -93,13 +93,12 @@ extern "C" {
* allocate all cpus on <code>nodes</code> nodes
* @param procs (IN) Number of processors to try to allocate. See the note
* for <code>nodes</code> for usage.
* @param nodelist (OUT) List of <code>ompi_rte_node_allocation_t</code>s
* @return List of <code>ompi_rte_node_allocation_t</code>s
* describing the allocated resources.
*
* @warning The type for jobid will change in the near future
*/
int ompi_rte_allocate_resources(int jobid, int nodes, int procs,
ompi_list_t **nodelist);
ompi_list_t* ompi_rte_allocate_resources(int jobid, int nodes, int procs);
/**
@ -121,8 +120,7 @@ extern "C" {
*
* @warning Parameter list will probably change in the near future.
*/
int ompi_rte_spawn_procs(int jobid, ompi_list_t schedule_list,
ompi_vpid_t start_vpid);
int ompi_rte_spawn_procs(int jobid, ompi_list_t *schedule_list);
/**

Просмотреть файл

@ -45,7 +45,7 @@ struct ompi_rte_node_allocation_t {
ompi_list_item_t super;
char hostname[MAXHOSTNAMELEN];
int count;
ompi_list_t info;
ompi_list_t *info;
};
typedef struct ompi_rte_node_allocation_t ompi_rte_node_allocation_t;
OBJ_CLASS_DECLARATION(ompi_rte_node_allocation_t);
@ -61,7 +61,7 @@ struct ompi_rte_node_schedule_t {
int argc;
char **env;
char *cwd;
ompi_list_t nodelist;
ompi_list_t *nodelist;
};
typedef struct ompi_rte_node_schedule_t ompi_rte_node_schedule_t;
OBJ_CLASS_DECLARATION(ompi_rte_node_schedule_t);

Просмотреть файл

@ -99,7 +99,8 @@ main(int argc, char *argv[])
*/
/* BWB - fix jobid, procs, and nodes */
if (OMPI_SUCCESS != ompi_rte_allocate_resources(0, 0, 2, &nodelist)) {
nodelist = ompi_rte_allocate_resources(0, 0, 2);
if (NULL != nodelist) {
/* BWB show_help */
printf("show_help: ompi_rte_allocate_resources failed\n");
return -1;

Просмотреть файл

@ -14,6 +14,9 @@
static char *cmd1_str="diff ./test1_out ./test1_out_std";
static char *cmd2_str="diff ./test2_out ./test2_out_std";
extern void mca_llm_base_setup(void);
int
main(int argc, char *argv[])
{
@ -27,6 +30,9 @@ main(int argc, char *argv[])
test_init("parse_hostfile_t");
/* setup the llm */
mca_llm_base_setup();
/* Open output files for the tests */
test1_out = fopen("./test1_out", "w+" );
if( test1_out == NULL ) {
@ -52,8 +58,8 @@ main(int argc, char *argv[])
node = (ompi_rte_node_allocation_t*) nodeitem;
fprintf(test1_out, "\t%s %d\n", node->hostname, node->count);
for (valpairitem = ompi_list_get_first(&(node->info));
valpairitem != ompi_list_get_end(&(node->info));
for (valpairitem = ompi_list_get_first(node->info);
valpairitem != ompi_list_get_end(node->info);
valpairitem = ompi_list_get_next(valpairitem)) {
valpair = (ompi_rte_valuepair_t*) valpairitem;
fprintf(test1_out, "\t\t%s = %s\n", valpair->key, valpair->value);
@ -81,8 +87,8 @@ main(int argc, char *argv[])
node = (ompi_rte_node_allocation_t*) nodeitem;
fprintf(test2_out, "\t%s %d\n", node->hostname, node->count);
for (valpairitem = ompi_list_get_first(&(node->info));
valpairitem != ompi_list_get_end(&(node->info));
for (valpairitem = ompi_list_get_first(node->info);
valpairitem != ompi_list_get_end(node->info);
valpairitem = ompi_list_get_next(valpairitem)) {
valpair = (ompi_rte_valuepair_t*) valpairitem;
fprintf(test2_out, "\t\t%s = %s\n", valpair->key, valpair->value);

Просмотреть файл

@ -11,11 +11,16 @@
#include "runtime/runtime_types.h"
#include "mca/pcm/base/base.h"
extern char **environ;
static char *cmd1_str="diff ./test1_out ./test1_out_std";
static char *cmd2_str="diff ./test2_out ./test2_out_std";
char *env[] = {
"ENV1=blah blah blah",
"ENV2=foo bar is fun",
"ENV3=123",
NULL
};
int
main(int argc, char *argv[])
{
@ -39,15 +44,14 @@ main(int argc, char *argv[])
exit(-1);
}
schedout = malloc(sizeof(ompi_rte_node_schedule_t));
OBJ_CONSTRUCT(&(schedout->nodelist), ompi_list_t);
schedout = OBJ_NEW(ompi_rte_node_schedule_t);
schedout->argv = argv;
schedout->argc = argc;
schedout->env = environ;
schedout->env = env;
schedout->cwd = "/foo/bar/baz";
result = mca_pcm_base_send_schedule(test1_out, schedout,
&(schedout->nodelist));
schedout->nodelist);
if (result != OMPI_SUCCESS) {
test_failure("send_schedule failed");
exit(1);
@ -65,18 +69,17 @@ main(int argc, char *argv[])
}
/* test 2 */
schedin = malloc(sizeof(ompi_rte_node_schedule_t));
OBJ_CONSTRUCT(&(schedin->nodelist), ompi_list_t);
schedin = OBJ_NEW(ompi_rte_node_schedule_t);
test2_in = fopen("./test1_out", "r");
result = mca_pcm_base_recv_schedule(test2_in, schedin,
&(schedin->nodelist));
schedin->nodelist);
if (result != OMPI_SUCCESS) {
test_failure("recv_schedule failed");
exit(1);
}
mca_pcm_base_send_schedule(test2_out, schedin, &(schedin->nodelist));
mca_pcm_base_send_schedule(test2_out, schedin, schedin->nodelist);
if (result != OMPI_SUCCESS) {
test_failure("send_schedule (2) failed");
exit(1);
@ -92,6 +95,9 @@ main(int argc, char *argv[])
else {
test_failure( "sched_comm test2 failed" );
}
OBJ_RELEASE(schedin);
OBJ_RELEASE(schedout);
test_finalize();
return 0;

Просмотреть файл

@ -2,44 +2,10 @@
1
1
12 ./sched_comm
37
24 SECURITYSESSIONID=20f500
20 HOME=/Users/bbarrett
15 SHELL=/bin/tcsh
13 USER=bbarrett
192 PATH=/Users/bbarrett/research/Software/powerpc-apple-darwin7/bin:/Users/bbarrett/Software/powerpc-apple-darwin7/bin:/sw/bin:/sw/sbin:/bin:/sbin:/usr/bin:/usr/sbin:/usr/X11R6/bin:/usr/local/bin
33 __CF_USER_TEXT_ENCODING=0x1F5:0:0
27 TERM_PROGRAM=Apple_Terminal
24 TERM_PROGRAM_VERSION=100
16 TERM=xterm-color
16 LOGNAME=bbarrett
17 HOSTTYPE=powermac
12 VENDOR=apple
13 OSTYPE=darwin
16 MACHTYPE=powerpc
7 SHLVL=1
57 PWD=/Users/bbarrett/research/ompi/trunk/test/mca/pcm/base
14 GROUP=bbarrett
17 HOST=fluffy.local
27 OMPI_MCA_verbose=level:1000
25 BWB_HAVE_RUN_TCSHRC_PRE=1
164 MANPATH=::/Users/bbarrett/research/Software/powerpc-apple-darwin7/man:/Users/bbarrett/Software/powerpc-apple-darwin7/man:/sw/share/man:/usr/share/man:/usr/X11R6/man
48 INFOPATH=/sw/share/info:/sw/info:/usr/share/info
43 PERL5LIB=/sw/lib/perl5:/sw/lib/perl5/darwin
33 XAPPLRESDIR=/sw/etc/app-defaults/
10 DISPLAY=:0
30 BWB_ARCH=powerpc-apple-darwin7
26 ARCH=powerpc-apple-darwin7
21 BWB_HAVE_RUN_TCSHRC=1
50 MSP=/Users/bbarrett/Software/powerpc-apple-darwin7
126 LD_LIBRARY_PATH=/Users/bbarrett/research/Software/powerpc-apple-darwin7/lib:/Users/bbarrett/Software/powerpc-apple-darwin7/lib
60 DDIR=/Users/bbarrett/research/Software/powerpc-apple-darwin7
11 CVS_RSH=ssh
12 CVSEDITOR=vi
13 SVN_EDITOR=vi
12 EDITOR=emacs
10 LAMRSH=ssh
443 LS_COLORS=no=00:fi=00:di=01;34:ln=01;36:pi=40;33:so=01;35:bd=40;33;01:cd=40;33;01:or=41;37;01:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arj=01;31:*.taz=01;31:*.lzh=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.gz=01;31:*.deb=01;31:*.bz2=01;31:*.jpg=01;35:*.JPG=01;35:*.gif=01;35:*.bmp=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.mpg=01;37:*.avi=01;37:*.gl=01;37:*.dl=01;37:*.cc=01;35:*.c=01;35:*.h=01;35:*.C=01;35:*.hh=01;35:*.o=35:
3
19 ENV1=blah blah blah
19 ENV2=foo bar is fun
8 ENV3=123
12 /foo/bar/baz
0
@MCA_PCM_END@

Просмотреть файл

@ -2,44 +2,10 @@
1
1
12 ./sched_comm
37
24 SECURITYSESSIONID=20f500
20 HOME=/Users/bbarrett
15 SHELL=/bin/tcsh
13 USER=bbarrett
192 PATH=/Users/bbarrett/research/Software/powerpc-apple-darwin7/bin:/Users/bbarrett/Software/powerpc-apple-darwin7/bin:/sw/bin:/sw/sbin:/bin:/sbin:/usr/bin:/usr/sbin:/usr/X11R6/bin:/usr/local/bin
33 __CF_USER_TEXT_ENCODING=0x1F5:0:0
27 TERM_PROGRAM=Apple_Terminal
24 TERM_PROGRAM_VERSION=100
16 TERM=xterm-color
16 LOGNAME=bbarrett
17 HOSTTYPE=powermac
12 VENDOR=apple
13 OSTYPE=darwin
16 MACHTYPE=powerpc
7 SHLVL=1
57 PWD=/Users/bbarrett/research/ompi/trunk/test/mca/pcm/base
14 GROUP=bbarrett
17 HOST=fluffy.local
27 OMPI_MCA_verbose=level:1000
25 BWB_HAVE_RUN_TCSHRC_PRE=1
164 MANPATH=::/Users/bbarrett/research/Software/powerpc-apple-darwin7/man:/Users/bbarrett/Software/powerpc-apple-darwin7/man:/sw/share/man:/usr/share/man:/usr/X11R6/man
48 INFOPATH=/sw/share/info:/sw/info:/usr/share/info
43 PERL5LIB=/sw/lib/perl5:/sw/lib/perl5/darwin
33 XAPPLRESDIR=/sw/etc/app-defaults/
10 DISPLAY=:0
30 BWB_ARCH=powerpc-apple-darwin7
26 ARCH=powerpc-apple-darwin7
21 BWB_HAVE_RUN_TCSHRC=1
50 MSP=/Users/bbarrett/Software/powerpc-apple-darwin7
126 LD_LIBRARY_PATH=/Users/bbarrett/research/Software/powerpc-apple-darwin7/lib:/Users/bbarrett/Software/powerpc-apple-darwin7/lib
60 DDIR=/Users/bbarrett/research/Software/powerpc-apple-darwin7
11 CVS_RSH=ssh
12 CVSEDITOR=vi
13 SVN_EDITOR=vi
12 EDITOR=emacs
10 LAMRSH=ssh
443 LS_COLORS=no=00:fi=00:di=01;34:ln=01;36:pi=40;33:so=01;35:bd=40;33;01:cd=40;33;01:or=41;37;01:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arj=01;31:*.taz=01;31:*.lzh=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.gz=01;31:*.deb=01;31:*.bz2=01;31:*.jpg=01;35:*.JPG=01;35:*.gif=01;35:*.bmp=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.mpg=01;37:*.avi=01;37:*.gl=01;37:*.dl=01;37:*.cc=01;35:*.c=01;35:*.h=01;35:*.C=01;35:*.hh=01;35:*.o=35:
3
19 ENV1=blah blah blah
19 ENV2=foo bar is fun
8 ENV3=123
12 /foo/bar/baz
0
@MCA_PCM_END@