1
1

Revert r21097 as this results in multiple instantiations of global variables. Instead, fix the problem by including orte_globals.h in the orte_init.c.

Since I already had some changes in there, add in the rmaps rank_file changes - should work okay, but not fully tested.

This commit was SVN r21099.

The following SVN revision numbers were found above:
  r21097 --> open-mpi/ompi@88ae934c26
Этот коммит содержится в:
Ralph Castain 2009-04-29 02:13:14 +00:00
родитель 48e4574907
Коммит 5fa3b38d3c
13 изменённых файлов: 450 добавлений и 360 удалений

Просмотреть файл

@ -256,6 +256,7 @@ PROCESS:
int orte_rmaps_base_claim_slot(orte_job_t *jdata,
orte_node_t *current_node,
orte_vpid_t vpid,
char *slot_list,
orte_std_cntr_t app_idx,
opal_list_t *nodes,
bool oversubscribe,
@ -278,10 +279,9 @@ int orte_rmaps_base_claim_slot(orte_job_t *jdata,
proc->app_idx = app_idx;
OBJ_RETAIN(current_node); /* maintain accounting on object */
if ( NULL != current_node->slot_list) {
proc->slot_list = strdup(current_node->slot_list);
if ( NULL != slot_list) {
proc->slot_list = strdup(slot_list);
}
current_node->slot_list = NULL;
proc->node = current_node;
proc->nodename = current_node->name;

Просмотреть файл

@ -73,6 +73,7 @@ ORTE_DECLSPEC int orte_rmaps_base_get_mapped_targets(opal_list_t *mapped_node_li
ORTE_DECLSPEC int orte_rmaps_base_claim_slot(orte_job_t *jdata,
orte_node_t *current_node,
orte_vpid_t vpid,
char *slot_list,
orte_std_cntr_t app_idx,
opal_list_t *nodes,
bool oversubscribe,

Просмотреть файл

@ -99,8 +99,3 @@ request to launch on a %d process-per-node basis - only %d slots/node were
available.
Either request fewer processes/node, or obtain a larger allocation.
[orte-rmaps-rf:no-np-and-user-map]
You have specified a rank-to-node/slot mapping, but failed to provide
the number of processes to be executed. This information is critical for Rank
Mapping component.

Просмотреть файл

@ -33,9 +33,9 @@
#endif /* HAVE_STRING_H */
#include "opal/mca/base/mca_base_param.h"
#include "opal/util/trace.h"
#include "opal/util/argv.h"
#include "opal/util/if.h"
#include "opal/class/opal_pointer_array.h"
#include "orte/mca/errmgr/errmgr.h"
#include "orte/util/show_help.h"
@ -46,95 +46,24 @@
#include "orte/runtime/orte_globals.h"
#include "orte/mca/ras/ras_types.h"
static int orte_rmaps_rank_file_parse(const char *, int);
static int orte_rmaps_rank_file_parse(const char *);
static char *orte_rmaps_rank_file_parse_string_or_int(void);
char *orte_rmaps_rank_file_path = NULL;
static const char *orte_rmaps_rank_file_name_cur = NULL;
static opal_mutex_t orte_rmaps_rank_file_mutex;
char *orte_rmaps_rank_file_slot_list;
/*
* Local variable
*/
static opal_list_item_t *cur_node_item = NULL;
orte_rmaps_rank_file_map_t *rankmap = NULL;
static int map_app_by_user_map(
orte_app_context_t* app,
orte_job_t* jdata,
orte_vpid_t vpid_start,
opal_list_t* nodes,
opal_list_t* procs)
{
int rc = ORTE_SUCCESS;
opal_list_item_t *next;
orte_node_t *node;
orte_std_cntr_t round_cnt, num_alloc = 0;
OPAL_TRACE(2);
if ( NULL == orte_rmaps_rank_file_path ) {
return ORTE_SUCCESS;
}
while (num_alloc < app->num_procs) {
/** see if any nodes remain unused and available. We need to do this check
* each time since we may remove nodes from the list (as they become fully
* used) as we cycle through the loop */
if(0 >= opal_list_get_size(nodes) ) {
/* No more nodes to allocate :( */
orte_show_help("help-rmaps_rank_file.txt", "orte-rmaps-rf:alloc-error",
true, app->num_procs, app->app);
return ORTE_ERR_SILENT;
}
/* Save the next node we can use before claiming slots, since
* we may need to prune the nodes list removing overused nodes.
* Wrap around to beginning if we are at the end of the list */
round_cnt=0;
if ( -1 != rankmap[vpid_start + num_alloc].rank) {
do {
if (opal_list_get_end(nodes) == opal_list_get_next(cur_node_item)) {
next = opal_list_get_first(nodes);
round_cnt++;
} else {
next = opal_list_get_next(cur_node_item);
}
/* Allocate a slot on this node */
node = (orte_node_t*) cur_node_item;
cur_node_item = next;
if ( round_cnt == 2 ) {
orte_show_help("help-rmaps_rank_file.txt","bad-host", true,rankmap[num_alloc+vpid_start].node_name);
ORTE_ERROR_LOG(ORTE_ERR_BAD_PARAM);
return ORTE_ERR_BAD_PARAM;
}
} while ( strcmp(node->name, rankmap[num_alloc + vpid_start].node_name));
node->slot_list = strdup(rankmap[num_alloc+vpid_start].slot_list);
if (ORTE_SUCCESS != (rc = orte_rmaps_base_claim_slot(jdata, node, rankmap[num_alloc+vpid_start].rank, app->idx,
nodes, jdata->map->oversubscribe, true))) {
/** if the code is ORTE_ERR_NODE_FULLY_USED, then we know this
* really isn't an error - we just need to break from the loop
* since the node is fully used up. For now, just don't report
* an error
*/
if (ORTE_ERR_NODE_FULLY_USED != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
}
}
++num_alloc;
}
return ORTE_SUCCESS;
}
static opal_pointer_array_t rankmap;
/*
* Create a default mapping for the application, mapping rank by rank_file and
* by node.
*/
static int map_app_by_node(
orte_app_context_t* app,
static int map_app_by_node(orte_app_context_t* app,
orte_job_t* jdata,
orte_vpid_t vpid_start,
opal_list_t* nodes )
@ -144,8 +73,6 @@ static int map_app_by_node(
orte_node_t *node;
orte_std_cntr_t num_alloc = 0;
OPAL_TRACE(2);
/* This loop continues until all procs have been mapped or we run
out of resources. We determine that we have "run out of
resources" when all nodes have slots_max processes mapped to them,
@ -164,7 +91,8 @@ static int map_app_by_node(
*/
while (num_alloc < app->num_procs) {
if ( -1 != rankmap[num_alloc + vpid_start].rank) {
if (NULL != opal_pointer_array_get_item(&rankmap, vpid_start+num_alloc)) {
/* this rank was already mapped */
++num_alloc;
continue;
}
@ -189,10 +117,8 @@ static int map_app_by_node(
}
/* Allocate a slot on this node */
node = (orte_node_t*) cur_node_item;
if ( NULL != orte_rmaps_base.slot_list ) {
node->slot_list = strdup(orte_rmaps_base.slot_list);
}
if (ORTE_SUCCESS != (rc = orte_rmaps_base_claim_slot(jdata, node, vpid_start + num_alloc, app->idx,
/* pass the base slot list in case it was provided */
if (ORTE_SUCCESS != (rc = orte_rmaps_base_claim_slot(jdata, node, vpid_start+num_alloc, orte_rmaps_base.slot_list, app->idx,
nodes, jdata->map->oversubscribe, true))) {
/** if the code is ORTE_ERR_NODE_FULLY_USED, then we know this
* really isn't an error - we just need to break from the loop
@ -215,8 +141,7 @@ static int map_app_by_node(
* Create a default mapping for the application, scheduling ranks byr rank_file
* and by slot.
*/
static int map_app_by_slot(
orte_app_context_t* app,
static int map_app_by_slot(orte_app_context_t* app,
orte_job_t* jdata,
orte_vpid_t vpid_start,
opal_list_t* nodes )
@ -226,7 +151,6 @@ static int map_app_by_slot(
orte_node_t *node;
opal_list_item_t *next;
OPAL_TRACE(2);
/* This loop continues until all procs have been mapped or we run
out of resources. We determine that we have "run out of
resources" when either all nodes have slots_max processes mapped to them,
@ -292,14 +216,13 @@ static int map_app_by_slot(
}
for( i = 0; i < num_slots_to_take; ++i) {
if ( -1 != rankmap[num_alloc + vpid_start].rank) {
if (NULL != opal_pointer_array_get_item(&rankmap, vpid_start+num_alloc)) {
/* this rank was already mapped */
++num_alloc;
continue;
}
if ( NULL != orte_rmaps_base.slot_list ) {
node->slot_list = strdup(orte_rmaps_base.slot_list);
}
if (ORTE_SUCCESS != (rc = orte_rmaps_base_claim_slot(jdata, node, vpid_start + num_alloc, app->idx,
/* pass the base slot list in case it was provided */
if (ORTE_SUCCESS != (rc = orte_rmaps_base_claim_slot(jdata, node, vpid_start+num_alloc, orte_rmaps_base.slot_list, app->idx,
nodes, jdata->map->oversubscribe, true))) {
/** if the code is ORTE_ERR_NODE_FULLY_USED, then we know this
* really isn't an error - we just need to break from the loop
@ -311,7 +234,7 @@ static int map_app_by_slot(
return rc;
}
}
/* Update the number of procs allocated */
/* Update the rank */
++num_alloc;
/** if all the procs have been mapped OR we have fully used up this node, then
@ -336,7 +259,6 @@ static int map_app_by_slot(
return ORTE_SUCCESS;
}
/*
* Create a rank_file mapping for the job.
*/
@ -344,41 +266,189 @@ static int orte_rmaps_rf_map(orte_job_t *jdata)
{
orte_job_map_t *map;
orte_app_context_t *app=NULL, **apps;
orte_std_cntr_t i, j;
opal_list_t node_list, procs;
orte_std_cntr_t i, k;
orte_vpid_t total_procs;
opal_list_t node_list;
opal_list_item_t *item;
orte_node_t *node;
orte_vpid_t vpid_start;
orte_node_t *node, *nd;
orte_vpid_t rank, vpid_start;
orte_std_cntr_t num_nodes, num_slots;
int rc;
orte_rmaps_rank_file_map_t *rfmap;
orte_std_cntr_t slots_per_node;
int rc;
OPAL_TRACE(1);
/* conveniece def */
/* convenience def */
map = jdata->map;
apps = (orte_app_context_t**)jdata->apps->addr;
/* SANITY CHECKS */
/* if the number of processes wasn't specified, then we know there can be only
* one app_context allowed in the launch, and that we are to launch it across
* all available slots. We'll double-check the single app_context rule first
*/
if (0 == apps[0]->num_procs && 1 < jdata->num_apps) {
orte_show_help("help-rmaps_rank_file.txt", "orte-rmaps-rf:multi-apps-and-zero-np",
true, jdata->num_apps, NULL);
rc = ORTE_ERR_SILENT;
goto error;
}
/* likewise, we only support pernode options for a single app_context */
if (map->pernode && 1 < jdata->num_apps) {
orte_show_help("help-rmaps_rank_file.txt", "orte-rmaps-rf:multi-apps-and-zero-np",
true, jdata->num_apps, NULL);
rc = ORTE_ERR_SILENT;
goto error;
}
/* END SANITY CHECKS */
/* flag the map as containing cpu_lists */
map->cpu_lists = true;
/* start at the beginning... */
vpid_start = 0;
jdata->num_procs = 0;
total_procs = 0;
OBJ_CONSTRUCT(&node_list, opal_list_t);
OBJ_CONSTRUCT(&rankmap, opal_pointer_array_t);
/* parse the rankfile, storing its results in the rankmap */
if ( NULL != orte_rmaps_rank_file_path ) {
if ( ORTE_SUCCESS != (rc = orte_rmaps_rank_file_parse(orte_rmaps_rank_file_path))) {
ORTE_ERROR_LOG(rc);
goto error;
}
}
/* cycle through the app_contexts, mapping them sequentially */
for(i=0; i < jdata->num_apps; i++) {
app = apps[i];
/* if the number of processes wasn't specified, then we know there can be only
* one app_context allowed in the launch, and that we are to launch it across
* all available slots. We'll double-check the single app_context rule first
/* for each app_context, we have to get the list of nodes that it can
* use since that can now be modified with a hostfile and/or -host
* option
*/
if (0 == app->num_procs && 1 < jdata->num_apps) {
orte_show_help("help-rmaps_rank_file.txt", "orte-rmaps-rf:multi-apps-and-zero-np",
true, jdata->num_apps, NULL);
if(ORTE_SUCCESS != (rc = orte_rmaps_base_get_target_nodes(&node_list, &num_slots, app,
map->policy))) {
ORTE_ERROR_LOG(rc);
goto error;
}
num_nodes = (orte_std_cntr_t)opal_list_get_size(&node_list);
/* we already checked for sanity, so these are okay to just do here */
if (map->pernode && map->npernode == 1) {
/* there are three use-cases that we need to deal with:
* (a) if -np was not provided, then we just use the number of nodes
* (b) if -np was provided AND #procs > #nodes, then error out
* (c) if -np was provided AND #procs <= #nodes, then launch
* the specified #procs one/node. In this case, we just
* leave app->num_procs alone
*/
if (0 == app->num_procs) {
app->num_procs = num_nodes;
} else if (app->num_procs > num_nodes) {
orte_show_help("help-rmaps_rank_file.txt", "orte-rmaps-rf:per-node-and-too-many-procs",
true, app->num_procs, num_nodes, NULL);
rc = ORTE_ERR_SILENT;
goto error;
}
} else if (map->pernode && map->npernode > 1) {
/* first, let's check to see if there are enough slots/node to
* meet the request - error out if not
*/
slots_per_node = num_slots / num_nodes;
if (map->npernode > slots_per_node) {
orte_show_help("help-rmaps_rank_file.txt", "orte-rmaps-rf:n-per-node-and-not-enough-slots",
true, map->npernode, slots_per_node, NULL);
rc = ORTE_ERR_SILENT;
goto error;
}
/* there are three use-cases that we need to deal with:
* (a) if -np was not provided, then we just use the n/node * #nodes
* (b) if -np was provided AND #procs > (n/node * #nodes), then error out
* (c) if -np was provided AND #procs <= (n/node * #nodes), then launch
* the specified #procs n/node. In this case, we just
* leave app->num_procs alone
*/
if (0 == app->num_procs) {
/* set the num_procs to equal the specified num/node * the number of nodes */
app->num_procs = map->npernode * num_nodes;
} else if (app->num_procs > (map->npernode * num_nodes)) {
orte_show_help("help-rmaps_rank_file.txt", "orte-rmaps-rf:n-per-node-and-too-many-procs",
true, app->num_procs, map->npernode, num_nodes, num_slots, NULL);
rc = ORTE_ERR_SILENT;
goto error;
}
} else if (0 == app->num_procs) {
/** set the num_procs to equal the number of slots on these mapped nodes */
app->num_procs = num_slots;
}
/* keep track of the total #procs in this job */
total_procs += app->num_procs;
for (k=0; k < app->num_procs; k++) {
rank = vpid_start + k;
/* get the rankfile entry for this rank */
if (NULL == (rfmap = opal_pointer_array_get_item(&rankmap, rank))) {
/* no entry for this rank */
continue;
}
/* find the node where this proc was assigned */
node = NULL;
for (item = opal_list_get_first(&node_list);
item != opal_list_get_end(&node_list);
item = opal_list_get_next(item)) {
nd = (orte_node_t*)item;
if (NULL != rfmap->node_name &&
0 == strcmp(nd->name, rfmap->node_name)) {
node = nd;
break;
}
}
if (NULL == node) {
orte_show_help("help-rmaps_rank_file.txt","bad-host", true, rfmap->node_name);
return ORTE_ERR_SILENT;
}
if (NULL == rfmap->slot_list) {
/* rank was specified but no slot list given - that's an error */
orte_show_help("help-rmaps_rank_file.txt","no-slot-list", true, rank, rfmap->node_name);
return ORTE_ERR_SILENT;
}
if (ORTE_SUCCESS != (rc = orte_rmaps_base_claim_slot(jdata, node, rank, rfmap->slot_list,
app->idx, &node_list, jdata->map->oversubscribe, false))) {
ORTE_ERROR_LOG(rc);
return rc;
}
jdata->num_procs++;
}
/* update the starting point */
vpid_start += app->num_procs;
/* cleanup the node list - it can differ from one app_context
* to another, so we have to get it every time
*/
while(NULL != (item = opal_list_remove_first(&node_list))) {
OBJ_RELEASE(item);
}
OBJ_DESTRUCT(&node_list);
OBJ_CONSTRUCT(&node_list, opal_list_t);
}
OBJ_DESTRUCT(&node_list);
/* did we map all the procs, or did the user's rankfile not contain
* a specification for every rank?
*/
if (jdata->num_procs < total_procs) {
/* we need to map the remainder of the procs according to the
* mapping policy
*/
vpid_start = 0;
for(i=0; i < jdata->num_apps; i++) {
app = apps[i];
/* for each app_context, we have to get the list of nodes that it can
* use since that can now be modified with a hostfile and/or -host
@ -389,20 +459,6 @@ static int orte_rmaps_rf_map(orte_job_t *jdata)
map->policy))) {
ORTE_ERROR_LOG(rc);
goto error;
}
num_nodes = (orte_std_cntr_t)opal_list_get_size(&node_list);
rankmap = (orte_rmaps_rank_file_map_t *) malloc ( app->num_procs * sizeof(orte_rmaps_rank_file_map_t));
for ( j=0; j<app->num_procs; j++) {
rankmap[j].rank = -1;
rankmap[j].slot_list = (char *)malloc(64*sizeof(char));
}
if ( NULL != orte_rmaps_rank_file_path ) {
if ( ORTE_SUCCESS != (rc = orte_rmaps_rank_file_parse(orte_rmaps_rank_file_path, app->num_procs))) {
ORTE_ERROR_LOG(rc);
goto error;
}
}
/* if a bookmark exists from some prior mapping, set us to start there */
if (NULL != jdata->bookmark) {
@ -426,55 +482,16 @@ static int orte_rmaps_rf_map(orte_job_t *jdata)
/* if no bookmark, then just start at the beginning of the list */
cur_node_item = opal_list_get_first(&node_list);
}
if (map->pernode && map->npernode == 1) {
if (app->num_procs > num_nodes) {
orte_show_help("help-rmaps_rank_file.txt", "orte-rmaps-rf:per-node-and-too-many-procs",
true, app->num_procs, num_nodes, NULL);
rc = ORTE_ERR_SILENT;
goto error;
}
} else if (map->pernode && map->npernode > 1) {
/* first, let's check to see if there are enough slots/node to
* meet the request - error out if not
*/
slots_per_node = num_slots / num_nodes;
if (map->npernode > slots_per_node) {
orte_show_help("help-rmaps_rank_file.txt", "orte-rmaps-rf:n-per-node-and-not-enough-slots",
true, map->npernode, slots_per_node, NULL);
rc = ORTE_ERR_SILENT;
goto error;
}
if (app->num_procs > (map->npernode * num_nodes)) {
orte_show_help("help-rmaps_rank_file.txt", "orte-rmaps-rf:n-per-node-and-too-many-procs",
true, app->num_procs, map->npernode, num_nodes, num_slots, NULL);
rc = ORTE_ERR_SILENT;
goto error;
}
}
/** track the total number of processes we mapped */
jdata->num_procs += app->num_procs;
/* Make assignments */
if ( ORTE_SUCCESS != (rc = map_app_by_user_map(app, jdata, vpid_start, &node_list, &procs))) {
goto error;
}
/* assign unassigned ranks by map policy */
if (map->policy == ORTE_RMAPS_BYNODE) {
rc = map_app_by_node(app, jdata, vpid_start, &node_list);
} else {
rc = map_app_by_slot(app, jdata, vpid_start, &node_list);
}
if (ORTE_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
goto error;
}
/* save the bookmark */
jdata->bookmark = (orte_node_t*)cur_node_item;
vpid_start += app->num_procs;
/* cleanup the node list - it can differ from one app_context
* to another, so we have to get it every time
*/
@ -483,6 +500,9 @@ static int orte_rmaps_rf_map(orte_job_t *jdata)
}
OBJ_DESTRUCT(&node_list);
}
/* save the bookmark */
jdata->bookmark = (orte_node_t*)cur_node_item;
}
/* compute and save convenience values */
if (ORTE_SUCCESS != (rc = orte_rmaps_base_compute_usage(jdata))) {
@ -495,14 +515,14 @@ static int orte_rmaps_rf_map(orte_job_t *jdata)
ORTE_ERROR_LOG(rc);
return rc;
}
for (j=0; j<app->num_procs; j++) {
if (NULL != rankmap[j].slot_list) {
free (rankmap[j].slot_list );
/* cleanup the rankmap */
for (i=0; i < rankmap.size; i++) {
if (NULL != (rfmap = opal_pointer_array_get_item(&rankmap, i))) {
OBJ_RELEASE(rfmap);
}
}
if (NULL != rankmap) {
free(rankmap);
}
OBJ_DESTRUCT(&rankmap);
return ORTE_SUCCESS;
error:
@ -510,66 +530,90 @@ error:
OBJ_RELEASE(item);
}
OBJ_DESTRUCT(&node_list);
for (j=0; j<app->num_procs; j++) {
if (NULL != rankmap[j].slot_list) {
free (rankmap[j].slot_list );
}
}
if (NULL != rankmap) {
free(rankmap);
}
return rc;
}
orte_rmaps_base_module_t orte_rmaps_rank_file_module = {
orte_rmaps_rf_map
orte_rmaps_rf_map
};
static int orte_rmaps_rank_file_parse(const char *rankfile, int np)
static int orte_rmaps_rank_file_parse(const char *rankfile)
{
int token;
int rc = ORTE_SUCCESS;
int line_number = 1;
int cnt;
char* node_name = NULL;
char* username = NULL;
char** argv;
char buff[64];
char* value;
int ival=-1;
int rank=-1;
int i;
orte_node_t *hnp_node;
orte_rmaps_rank_file_map_t *rfmap=NULL;
OPAL_THREAD_LOCK(&orte_rmaps_rank_file_mutex);
/* get the hnp node's info */
hnp_node = (orte_node_t*)(orte_node_pool->addr[0]);
orte_rmaps_rank_file_name_cur = rankfile;
orte_rmaps_rank_file_done = false;
orte_rmaps_rank_file_in = fopen(rankfile, "r");
if (NULL == orte_rmaps_rank_file_in) {
orte_show_help("help-rmaps_rank_file.txt", "no-rankfile", true, rankfile, np);
orte_show_help("help-rmaps_rank_file.txt", "no-rankfile", true, rankfile);
rc = OPAL_ERR_NOT_FOUND;
ORTE_ERROR_LOG(rc);
goto unlock;
}
if ( 0 == np ) {
orte_show_help("help-rmaps_rank_file.txt", "orte-rmaps-rf:no-np-and-user-map", true, NULL);
return ORTE_ERR_BAD_PARAM;
}
while (!orte_rmaps_rank_file_done) {
token = orte_rmaps_rank_file_lex();
switch (token) {
case ORTE_RANKFILE_DONE:
orte_rmaps_rank_file_done = true;
case ORTE_RANKFILE_ERROR:
opal_output(0, "Got an error!");
break;
case ORTE_RANKFILE_QUOTED_STRING:
orte_show_help("help-rmaps_rank_file.txt", "not-supported-rankfile", true, "QUOTED_STRING", rankfile);
rc = ORTE_ERR_BAD_PARAM;
ORTE_ERROR_LOG(rc);
goto unlock;
case ORTE_RANKFILE_NEWLINE:
line_number++;
rank = -1;
if (NULL != node_name) {
free(node_name);
}
node_name = NULL;
rfmap = NULL;
break;
case ORTE_RANKFILE_RANK:
token = orte_rmaps_rank_file_lex();
if (ORTE_RANKFILE_INT == token) {
rank = orte_rmaps_rank_file_value.ival;
rfmap = OBJ_NEW(orte_rmaps_rank_file_map_t);
opal_pointer_array_set_item(&rankmap, rank, rfmap);
} else {
orte_show_help("help-rmaps_rank_file.txt", "bad-syntax", true, rankfile);
rc = ORTE_ERR_BAD_PARAM;
ORTE_ERROR_LOG(rc);
goto unlock;
}
break;
case ORTE_RANKFILE_USERNAME:
orte_show_help("help-rmaps_rank_file.txt", "not-supported-rankfile", true, "USERNAME", rankfile);
rc = ORTE_ERR_BAD_PARAM;
ORTE_ERROR_LOG(rc);
goto unlock;
break;
case ORTE_RANKFILE_EQUAL:
ival = orte_rmaps_rank_file_value.ival;
if ( ival > (np-1) ) {
orte_show_help("help-rmaps_rank_file.txt", "bad-rankfile", true, ival, rankfile);
if (rank < 0) {
orte_show_help("help-rmaps_rank_file.txt", "bad-syntax", true, rankfile);
rc = ORTE_ERR_BAD_PARAM;
ORTE_ERROR_LOG(rc);
goto unlock;
}
token = orte_rmaps_rank_file_lex();
@ -596,25 +640,44 @@ static int orte_rmaps_rank_file_parse(const char *rankfile, int np)
else {
orte_show_help("help-rmaps_rank_file.txt", "bad-syntax", true, rankfile);
rc = ORTE_ERR_BAD_PARAM;
ORTE_ERROR_LOG(rc);
goto unlock;
}
opal_argv_free (argv);
rankmap[ival].rank = ival;
rankmap[ival].node_name = strdup(node_name);
/* convert this into something globally unique */
if (strcmp(node_name, "localhost") == 0 || opal_ifislocal(node_name)) {
/* Nodename has been allocated, that is for sure */
free (node_name);
/* check the rank item */
if (NULL == rfmap) {
orte_show_help("help-rmaps_rank_file.txt", "bad-syntax", true, rankfile);
rc = ORTE_ERR_BAD_PARAM;
ORTE_ERROR_LOG(rc);
goto unlock;
}
/* check if this is the local node */
if (0 == strcmp(node_name, hnp_node->name) ||
opal_ifislocal(node_name)) {
rfmap->node_name = strdup(hnp_node->name);
} else {
rfmap->node_name = strdup(node_name);
}
}
break;
case ORTE_RANKFILE_SLOT:
if ( NULL == (value = orte_rmaps_rank_file_parse_string_or_int())) {
if (NULL == node_name || rank < 0 ||
NULL == (value = orte_rmaps_rank_file_parse_string_or_int())) {
orte_show_help("help-rmaps_rank_file.txt", "bad-syntax", true, rankfile);
rc = ORTE_ERR_BAD_PARAM;
ORTE_ERROR_LOG(rc);
goto unlock;
}
rankmap[ival].slot_list = strdup(value);
/* check the rank item */
if (NULL == rfmap) {
orte_show_help("help-rmaps_rank_file.txt", "bad-syntax", true, rankfile);
rc = ORTE_ERR_BAD_PARAM;
ORTE_ERROR_LOG(rc);
goto unlock;
}
for (i=0; i < 64 && '\0' != value[i]; i++) {
rfmap->slot_list[i] = value[i];
}
break;
}
}
@ -622,7 +685,11 @@ static int orte_rmaps_rank_file_parse(const char *rankfile, int np)
orte_rmaps_rank_file_in = NULL;
unlock:
if (NULL != node_name) {
free(node_name);
}
orte_rmaps_rank_file_name_cur = NULL;
OPAL_THREAD_UNLOCK(&orte_rmaps_rank_file_mutex);
return rc;
}

Просмотреть файл

@ -24,17 +24,17 @@
*/
#include "orte_config.h"
#include "opal_config.h"
#include "opal/mca/paffinity/paffinity.h"
#ifndef ORTE_RMAPS_RF_H
#define ORTE_RMAPS_RF_H
#include "orte_config.h"
#include "opal/class/opal_object.h"
#include "opal/mca/paffinity/paffinity.h"
#include "orte/mca/rmaps/rmaps.h"
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
BEGIN_C_DECLS
/**
* RMGR Component
@ -56,16 +56,14 @@ extern char *orte_rmaps_rank_file_path;
typedef struct cpu_socket_t cpu_socket_t;
struct orte_rmaps_rank_file_map_t {
int rank;
opal_object_t super;
char* node_name;
char* slot_list;
char slot_list[64];
};
typedef struct orte_rmaps_rank_file_map_t orte_rmaps_rank_file_map_t;
ORTE_DECLSPEC OBJ_CLASS_DECLARATION(orte_rmaps_rank_file_map_t);
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
END_C_DECLS
#endif

Просмотреть файл

@ -106,3 +106,16 @@ static int orte_rmaps_rank_file_close(void)
return ORTE_SUCCESS;
}
static void rf_map_construct(orte_rmaps_rank_file_map_t *ptr)
{
ptr->node_name = NULL;
memset(ptr->slot_list, (char)0x00, 64);
}
static void rf_map_destruct(orte_rmaps_rank_file_map_t *ptr)
{
if (NULL != ptr->node_name) free(ptr->node_name);
}
OBJ_CLASS_INSTANCE(orte_rmaps_rank_file_map_t,
opal_object_t,
rf_map_construct,
rf_map_destruct);

Просмотреть файл

@ -24,24 +24,19 @@
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include "orte/mca/rmaps/rank_file/rmaps_rank_file_lex.h"
#include "opal/util/output.h"
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
BEGIN_C_DECLS
/*
* local functions
*/
int orte_rmaps_rank_file_wrap(void)
{
orte_rmaps_rank_file_done = true;
return 1;
}
static int finish_parsing(void);
int orte_rmaps_rank_file_yywrap(void);
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
END_C_DECLS
#define yyterminate() \
return finish_parsing()
/*
* global variables
@ -121,3 +116,25 @@ username { orte_rmaps_rank_file_value.sval = yytext;
%%
/*
* This cleans up at the end of the parse (since, in this case, we
* always parse the entire file) and prevents a memory leak.
*/
static int finish_parsing(void)
{
if (NULL != YY_CURRENT_BUFFER) {
yy_delete_buffer(YY_CURRENT_BUFFER);
#if defined(YY_CURRENT_BUFFER_LVALUE)
YY_CURRENT_BUFFER_LVALUE = NULL;
#else
YY_CURRENT_BUFFER = NULL;
#endif /* YY_CURRENT_BUFFER_LVALUE */
}
return YY_NULL;
}
int orte_rmaps_rank_file_wrap(void)
{
orte_rmaps_rank_file_done = true;
return 1;
}

Просмотреть файл

@ -104,7 +104,7 @@ static int map_app_by_node(
/* Allocate a slot on this node */
node = (orte_node_t*) cur_node_item;
if (ORTE_SUCCESS != (rc = orte_rmaps_base_claim_slot(jdata, node, vpid_start + num_alloc, app->idx,
if (ORTE_SUCCESS != (rc = orte_rmaps_base_claim_slot(jdata, node, vpid_start + num_alloc, NULL, app->idx,
nodes, jdata->map->oversubscribe, true))) {
/** if the code is ORTE_ERR_NODE_FULLY_USED, then we know this
* really isn't an error - we just need to break from the loop
@ -217,7 +217,7 @@ static int map_app_by_slot(
}
for( i = 0; i < num_slots_to_take; ++i) {
if (ORTE_SUCCESS != (rc = orte_rmaps_base_claim_slot(jdata, node, vpid_start + num_alloc, app->idx,
if (ORTE_SUCCESS != (rc = orte_rmaps_base_claim_slot(jdata, node, vpid_start + num_alloc, NULL, app->idx,
nodes, jdata->map->oversubscribe, true))) {
/** if the code is ORTE_ERR_NODE_FULLY_USED, then we know this
* really isn't an error - we just need to break from the loop

Просмотреть файл

@ -210,7 +210,7 @@ static int orte_rmaps_seq_map(orte_job_t *jdata)
* an oversubscribed node from the list!
*/
if (ORTE_SUCCESS != (rc = orte_rmaps_base_claim_slot(jdata, node,
vpid, app->idx,
vpid, NULL, app->idx,
node_list,
jdata->map->oversubscribe,
false))) {

Просмотреть файл

@ -110,7 +110,7 @@ static int map_app_by_node(
/* Allocate a slot on this node */
node = (orte_node_t*) cur_node_item;
if (ORTE_SUCCESS != (rc = orte_rmaps_base_claim_slot(jdata, node, vpid_start + num_alloc, app->idx,
if (ORTE_SUCCESS != (rc = orte_rmaps_base_claim_slot(jdata, node, vpid_start + num_alloc, NULL, app->idx,
nodes, jdata->map->oversubscribe, true))) {
/** if the code is ORTE_ERR_NODE_FULLY_USED, then we know this
* really isn't an error - we just need to break from the loop
@ -217,7 +217,7 @@ static int map_app_by_slot(
}
for( i = 0; i < num_slots_to_take; ++i) {
if (ORTE_SUCCESS != (rc = orte_rmaps_base_claim_slot(jdata, node, vpid_start + num_alloc, app->idx,
if (ORTE_SUCCESS != (rc = orte_rmaps_base_claim_slot(jdata, node, vpid_start + num_alloc, NULL, app->idx,
nodes, jdata->map->oversubscribe, true))) {
/** if the code is ORTE_ERR_NODE_FULLY_USED, then we know this
* really isn't an error - we just need to break from the loop

Просмотреть файл

@ -603,7 +603,6 @@ static void orte_node_construct(orte_node_t* node)
node->slots_alloc = 0;
node->slots_max = 0;
node->username = NULL;
node->slot_list = NULL;
}
static void orte_node_destruct(orte_node_t* node)

Просмотреть файл

@ -53,9 +53,9 @@ ORTE_DECLSPEC extern bool orte_help_want_aggregate; /* instantiated in orte/uti
/* Shortcut for some commonly used names */
#define ORTE_NAME_WILDCARD (&orte_name_wildcard)
ORTE_DECLSPEC orte_process_name_t orte_name_wildcard; /** instantiated in orte/runtime/orte_init.c */
ORTE_DECLSPEC extern orte_process_name_t orte_name_wildcard; /** instantiated in orte/runtime/orte_init.c */
#define ORTE_NAME_INVALID (&orte_name_invalid)
ORTE_DECLSPEC orte_process_name_t orte_name_invalid; /** instantiated in orte/runtime/orte_init.c */
ORTE_DECLSPEC extern orte_process_name_t orte_name_invalid; /** instantiated in orte/runtime/orte_init.c */
#define ORTE_PROC_MY_NAME (&orte_process_info.my_name)
@ -242,7 +242,6 @@ typedef struct {
orte_std_cntr_t slots_max;
/** Username on this node, if specified */
char *username;
char *slot_list;
} orte_node_t;
ORTE_DECLSPEC OBJ_CLASS_DECLARATION(orte_node_t);

Просмотреть файл

@ -42,6 +42,7 @@
#include "orte/util/proc_info.h"
#include "orte/runtime/runtime.h"
#include "orte/runtime/orte_globals.h"
#include "orte/runtime/orte_locks.h"
/*