1
1

Few updates due to RMAPS rank_file component changes

1. applied prefix rule to functions and variables of RMAPS rank_file component
2. cleaned ompi_mpi_init.c from paffinity code
3. paffinity code moved to new opal/mca/paffinity/base/paffinity_base_service.c file
4. added opal_paffinity_slot_list mca parameter

This commit was SVN r18019.
Этот коммит содержится в:
Lenny Verkhovsky 2008-03-30 11:52:11 +00:00
родитель cb83a1287d
Коммит 7e45d7e134
7 изменённых файлов: 46 добавлений и 477 удалений

Просмотреть файл

@ -88,7 +88,6 @@
#endif
#include "ompi/runtime/ompi_cr.h"
static int slot_list_to_cpu_set(char *slot_str);
#include "orte/runtime/orte_globals.h"
/*
* Global variables and symbols for the MPI layer
@ -310,45 +309,15 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
}
/* Setup process affinity */
if ( NULL != ( slot_list = getenv("slot_list"))) {
if (ORTE_SUCCESS != (ret = slot_list_to_cpu_set(slot_list))){
error = "ompi_mpi_init: error slot_list assigning";
goto error;
}
}
if (ompi_mpi_paffinity_alone) {
bool set = false;
param = mca_base_param_find("mpi", NULL, "paffinity_processor");
if (param >= 0) {
if (OMPI_SUCCESS == mca_base_param_lookup_int(param, &value)) {
if (value >= 0) {
opal_paffinity_base_cpu_set_t mpi_cpumask;
OPAL_PAFFINITY_CPU_ZERO(mpi_cpumask);
OPAL_PAFFINITY_CPU_SET(value,mpi_cpumask);
if (OPAL_SUCCESS == opal_paffinity_base_set(mpi_cpumask)) {
set = true;
}
}
}
if (!set) {
char *vpid;
orte_util_convert_vpid_to_string(&vpid, ORTE_PROC_MY_NAME->vpid);
opal_show_help("help-mpi-runtime",
"mpi_init:startup:paffinity-unavailable",
true, vpid);
free(vpid);
}
/* If we were able to set processor affinity, try setting
up memory affinity */
else {
if (OPAL_SUCCESS == opal_maffinity_base_open() &&
OPAL_SUCCESS == opal_maffinity_base_select()) {
ompi_mpi_maffinity_setup = true;
}
}
if (OMPI_SUCCESS != (ret = opal_paffinity_base_slot_list_set((long)ORTE_PROC_MY_NAME->vpid))) {
error = "opal_paffinity_base_slot_list_set: error slot_list assigning";
goto error;
} else {
/* If we were able to set processor affinity, try setting
up memory affinity */
if (OPAL_SUCCESS == opal_maffinity_base_open() &&
OPAL_SUCCESS == opal_maffinity_base_select()) {
ompi_mpi_maffinity_setup = true;
}
}
@ -801,384 +770,3 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
return MPI_SUCCESS;
}
/**
* This function receives a slot string ant translate it to
* cpu_set (long bitmap) using the PLPA module.
*/
static int socket_to_cpu_set(char **socket_list, int socket_cnt)
{
int i;
char **range;
int range_cnt;
int lower_range, upper_range;
int processor_id, num_processors;
int max_processor_id;
int rc;
opal_paffinity_base_cpu_set_t cpumask;
if (OPAL_SUCCESS != (rc = opal_paffinity_base_get_processor_info(&num_processors, &max_processor_id))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
OPAL_PAFFINITY_CPU_ZERO(cpumask);
for (i=0; i<socket_cnt; i++) {
if (0 == strcmp("*", socket_list[i])) {
for ( processor_id=0; processor_id<=max_processor_id; processor_id++) {
OPAL_PAFFINITY_CPU_SET(processor_id, cpumask);
if (OPAL_SUCCESS != ( rc = opal_paffinity_base_set(cpumask))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
if (rmaps_rank_file_debug) {
opal_output(0,"rank %ld runon cpu #%d (any socket)",(long)ORTE_PROC_MY_NAME->vpid, processor_id);
}
}
continue;
}
range = opal_argv_split(socket_list[i],'-');
range_cnt = opal_argv_count(range);
switch (range_cnt) {
case 1:
processor_id = atoi(range[0]);
if (max_processor_id < processor_id) {
opal_output(0, "ERROR !!! max_processor_id (%d) < processor_id(%d), modify rankfile and run again\n",max_processor_id, processor_id);
ORTE_ERROR_LOG(OPAL_ERR_BAD_PARAM);
return ORTE_ERROR;
}
OPAL_PAFFINITY_CPU_SET(processor_id, cpumask);
if (OPAL_SUCCESS != ( rc = opal_paffinity_base_set(cpumask))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
if (rmaps_rank_file_debug) {
opal_output(0,"rank %ld runs on cpu #%d", (long)ORTE_PROC_MY_NAME->vpid, processor_id);
}
break;
case 2:
lower_range = atoi(range[0]);
upper_range = atoi(range[1]);
if (max_processor_id < upper_range || lower_range >= upper_range ) {
opal_output(0,"Error !!! Check your boundaries %d < %d(max_cpu) < %d , modify rankfile and run again\n",lower_range, max_processor_id, upper_range);
ORTE_ERROR_LOG(OPAL_ERR_BAD_PARAM);
return ORTE_ERROR;
}
for (processor_id=lower_range; processor_id<=upper_range; processor_id++) {
OPAL_PAFFINITY_CPU_SET(processor_id, cpumask);
if (OPAL_SUCCESS != (rc = opal_paffinity_base_set(cpumask))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
if (rmaps_rank_file_debug) {
opal_output(0,"rank %ld runs on cpu #%d (%d-%d)",
(long)ORTE_PROC_MY_NAME->vpid, processor_id, lower_range, upper_range);
}
}
break;
default:
opal_argv_free(range);
ORTE_ERROR_LOG(ORTE_ERROR);
return ORTE_ERROR;
}
opal_argv_free(range);
}
return ORTE_SUCCESS;
}
static int socket_core_to_cpu_set(char **socket_core_list, int socket_core_list_cnt)
{
int rc, i;
char **socket_core;
int socket_core_cnt;
char **range;
int range_cnt;
int lower_range, upper_range;
int socket, core, processor_id ;
int max_socket_num, max_core_num;
int num_sockets, num_cores;
opal_paffinity_base_cpu_set_t cpumask;
socket_core = opal_argv_split (socket_core_list[0], ':');
socket_core_cnt = opal_argv_count(socket_core);
OPAL_PAFFINITY_CPU_ZERO(cpumask);
socket = atoi(socket_core[0]);
if ( OPAL_SUCCESS != ( rc = opal_paffinity_base_get_socket_info(&num_sockets, &max_socket_num))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
if ( max_socket_num < socket) {
opal_output(0,"ERROR !!! socket(%d) > max_socket_num(%d), modify rankfile and run again", socket, max_socket_num);
return ORTE_ERROR;
}
if ( OPAL_SUCCESS != ( rc = opal_paffinity_base_get_core_info(socket, &num_cores, &max_core_num))) {
opal_output(0,"Error !!! Invalid socket number (%d) in rankfile, modify rankfile and run again\n", socket);
ORTE_ERROR_LOG(OPAL_ERR_BAD_PARAM);
return ORTE_ERROR;
}
if (0 == strcmp("*",socket_core[1])) {
for (core = 0; core <= max_core_num; core++) {
if ( OPAL_SUCCESS != (rc = opal_paffinity_base_map_to_processor_id (socket, core, &processor_id))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
OPAL_PAFFINITY_CPU_SET(processor_id, cpumask);
if (OPAL_SUCCESS != (rc = opal_paffinity_base_set(cpumask))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
if (rmaps_rank_file_debug) {
opal_output(0,"rank %ld runs on pair %d:%d (cpu #%d)",
(long)ORTE_PROC_MY_NAME->vpid, socket, core, processor_id);
}
}
} else {
range = opal_argv_split(socket_core[1], '-');
range_cnt = opal_argv_count(range);
switch (range_cnt) {
case 1:
core = atoi(range[0]);
if ( max_core_num < core ) {
opal_output(0,"Error !!! core(%d) > max_core (%d) on socket %d, modify rankfile and run again\n",
core, max_core_num, socket);
ORTE_ERROR_LOG(OPAL_ERR_BAD_PARAM);
return ORTE_ERROR;
}
if ( OPAL_SUCCESS != (rc = opal_paffinity_base_map_to_processor_id (socket, core, &processor_id))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
OPAL_PAFFINITY_CPU_SET(processor_id, cpumask);
if (OPAL_SUCCESS != (rc = opal_paffinity_base_set(cpumask))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
if (rmaps_rank_file_debug) {
opal_output(0,"rank %ld runs on pair %d:%d (cpu #%d)",
(long)ORTE_PROC_MY_NAME->vpid, socket, core, processor_id);
}
break;
case 2:
lower_range = atoi(range[0]);
upper_range = atoi(range[1]);
if ( 0 > lower_range || max_core_num < upper_range || lower_range >= upper_range ) {
opal_output(0,"Error !!! Check your boundaries %d < %d(max_core) < %d ,modify rankfile and run again\n",
lower_range, max_core_num, upper_range);
ORTE_ERROR_LOG(OPAL_ERR_BAD_PARAM);
return ORTE_ERROR;
}
for (core=lower_range; core<=upper_range; core++) {
if ( OPAL_SUCCESS != (rc = opal_paffinity_base_map_to_processor_id (socket, core, &processor_id))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
OPAL_PAFFINITY_CPU_SET(processor_id, cpumask);
if ( OPAL_SUCCESS != (rc = opal_paffinity_base_set(cpumask))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
if (rmaps_rank_file_debug) {
opal_output(0,"rank %ld runs on pair %d:%d (cpu #%d)",
(long)ORTE_PROC_MY_NAME->vpid, socket, core, processor_id);
}
}
break;
default:
opal_argv_free(range);
opal_argv_free(socket_core);
ORTE_ERROR_LOG(ORTE_ERROR);
return ORTE_ERROR;
}
opal_argv_free(range);
opal_argv_free(socket_core);
}
for (i=1; i<socket_core_list_cnt; i++) {
socket_core = opal_argv_split (socket_core_list[i], ':');
socket_core_cnt = opal_argv_count(socket_core);
switch (socket_core_cnt) {
case 1:
range = opal_argv_split(socket_core[0], '-');
range_cnt = opal_argv_count(range);
switch (range_cnt) {
case 1:
core = atoi(range[0]);
/* use PLPA to construct the child->cpu_set */
if ( max_core_num < core ) {
opal_output(0,"Error !!! max_core(%d) < core(%d), modify rankfile and run again\n",max_core_num, core);
ORTE_ERROR_LOG(OPAL_ERR_BAD_PARAM);
return ORTE_ERROR;
}
if ( OPAL_SUCCESS != (rc = opal_paffinity_base_map_to_processor_id (socket, core, &processor_id))) {
opal_output(0,"Error !!! Invalid socket : core pair ( #%d : %d), modify rankfile and run again\n",socket, core);
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
OPAL_PAFFINITY_CPU_SET(processor_id, cpumask);
if ( OPAL_SUCCESS != (rc = opal_paffinity_base_set(cpumask))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
if (rmaps_rank_file_debug) {
opal_output(0,"rank %ld runs on pair %d:%d (cpu #%d)",
(long)ORTE_PROC_MY_NAME->vpid, socket, core, processor_id);
}
break;
case 2:
lower_range = atoi(range[0]);
upper_range = atoi(range[1]);
if ( 0 > lower_range || max_core_num < upper_range || lower_range >= upper_range) {
opal_output(0,"Error !!! Check your boundaries %d < %d(max_core) < %d, modify rankfile and run again\n",
lower_range, max_core_num, upper_range);
ORTE_ERROR_LOG(OPAL_ERR_BAD_PARAM);
return ORTE_ERROR;
}
for (core=lower_range; core<=upper_range; core++) {
if ( OPAL_SUCCESS != (rc = opal_paffinity_base_map_to_processor_id (socket, core, &processor_id))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
OPAL_PAFFINITY_CPU_SET(processor_id, cpumask);
if ( OPAL_SUCCESS != (rc = opal_paffinity_base_set(cpumask))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
if (rmaps_rank_file_debug) {
opal_output(0,"rank %ld runs on pair %d:%d (cpu #%d)",
(long)ORTE_PROC_MY_NAME->vpid, socket, core, processor_id);
}
}
break;
default:
opal_argv_free(range);
opal_argv_free(socket_core);
ORTE_ERROR_LOG(ORTE_ERROR);
return ORTE_ERROR;
}
opal_argv_free(range);
break;
case 2:
socket = atoi(socket_core[0]);
if (0 == strcmp("*",socket_core[1])) {
for (core=0; core<=max_core_num; core++) {
if ( OPAL_SUCCESS != (rc = opal_paffinity_base_map_to_processor_id ( socket, core, &processor_id))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
OPAL_PAFFINITY_CPU_SET(processor_id, cpumask);
if ( OPAL_SUCCESS != (rc = opal_paffinity_base_set(cpumask))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
if (rmaps_rank_file_debug) {
opal_output(0,"rank %ld runs on pair %d:%d (cpu #%d)",
(long)ORTE_PROC_MY_NAME->vpid, socket, core, processor_id);
}
}
} else {
range = opal_argv_split(socket_core[1], '-');
range_cnt = opal_argv_count(range);
socket = atoi(socket_core[0]);
switch (range_cnt) {
case 1:
core = atoi(range[0]);
if ( max_core_num < core ) {
opal_output(0,"Error !!! max_core(%d) < core(%d), modify rankfile and run again\n", max_core_num, core);
ORTE_ERROR_LOG(OPAL_ERR_BAD_PARAM);
return ORTE_ERROR;
}
if ( OPAL_SUCCESS != (rc = opal_paffinity_base_map_to_processor_id (socket, core, &processor_id))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
OPAL_PAFFINITY_CPU_SET(processor_id, cpumask);
if ( OPAL_SUCCESS != (rc = opal_paffinity_base_set(cpumask))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
if (rmaps_rank_file_debug) {
opal_output(0,"rank %ld runs on pair %d:%d (cpu #%d)",
(long)ORTE_PROC_MY_NAME->vpid, socket, core, processor_id);
}
break;
case 2:
lower_range = atoi(range[0]);
upper_range = atoi(range[1]);
if ( 0 > lower_range || max_core_num < upper_range || lower_range > upper_range) {
opal_output(0,"Error !!! Check your boundaries %d < %d(max_core) < %d, modify rankfile and run again\n",
lower_range, max_core_num, upper_range);
ORTE_ERROR_LOG(OPAL_ERR_BAD_PARAM);
return ORTE_ERROR;
}
for ( core = lower_range; core <= upper_range; core++) {
if ( OPAL_SUCCESS != (rc = opal_paffinity_base_map_to_processor_id (socket, core, &processor_id))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
OPAL_PAFFINITY_CPU_SET(processor_id, cpumask);
if ( OPAL_SUCCESS != (rc = opal_paffinity_base_set(cpumask))) {
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
if (rmaps_rank_file_debug) {
opal_output(0,"rank %ld runs on pair %d:%d (cpu #%d)",
(long)ORTE_PROC_MY_NAME->vpid, socket, core, processor_id);
}
}
break;
default:
opal_argv_free(range);
opal_argv_free(socket_core);
ORTE_ERROR_LOG(ORTE_ERROR);
return ORTE_ERROR;
}
opal_argv_free(range);
}
break;
default:
opal_argv_free(socket_core);
ORTE_ERROR_LOG(ORTE_ERROR);
return ORTE_ERROR;
}
opal_argv_free(socket_core);
}
return ORTE_SUCCESS;
}
static int slot_list_to_cpu_set(char *slot_str)
{
char **item;
char **socket_core;
orte_std_cntr_t item_cnt, socket_core_cnt;
int rc;
item = opal_argv_split (slot_str, ',');
item_cnt = opal_argv_count (item);
socket_core = opal_argv_split (item[0], ':');
socket_core_cnt = opal_argv_count(socket_core);
opal_argv_free(socket_core);
switch (socket_core_cnt) {
case 1:
if (ORTE_SUCCESS != (rc = socket_to_cpu_set(item, item_cnt))) {
opal_argv_free(item);
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
break;
case 2:
if (ORTE_SUCCESS != (rc = socket_core_to_cpu_set(item, item_cnt))) {
opal_argv_free(item);
ORTE_ERROR_LOG(rc);
return ORTE_ERROR;
}
break;
default:
opal_argv_free(item);
return ORTE_ERROR;
}
opal_argv_free(item);
return ORTE_SUCCESS;
}

Просмотреть файл

@ -48,8 +48,6 @@ int ompi_debug_show_mpi_alloc_mem_leaks = 0;
bool ompi_debug_no_free_handles = false;
bool ompi_mpi_show_mca_params = false;
char *ompi_mpi_show_mca_params_file = NULL;
bool ompi_mpi_paffinity_alone = false;
bool rmaps_rank_file_debug = false;
bool ompi_mpi_abort_print_stack = false;
int ompi_mpi_abort_delay = 0;
bool ompi_mpi_keep_peer_hostnames = true;
@ -59,7 +57,6 @@ bool ompi_mpi_leave_pinned_pipeline = false;
bool ompi_have_sparse_group_storage = OPAL_INT_TO_BOOL(OMPI_GROUP_SPARSE);
bool ompi_use_sparse_group_storage = OPAL_INT_TO_BOOL(OMPI_GROUP_SPARSE);
int ompi_mpi_register_params(void)
{
int value;
@ -148,33 +145,7 @@ int ompi_mpi_register_params(void)
"", &ompi_mpi_show_mca_params_file);
/* User-level process pinning controls */
mca_base_param_reg_int_name("mpi", "paffinity_alone",
"If nonzero, assume that this job is the only (set of) process(es) running on each node and bind processes to processors, starting with processor ID 0",
false, false,
(int) ompi_mpi_paffinity_alone, &value);
ompi_mpi_paffinity_alone = OPAL_INT_TO_BOOL(value);
if ( ompi_mpi_paffinity_alone ){
char *rank_file_path;
mca_base_param_reg_string_name("rmaps","rank_file_path",
"The path to the rank mapping file",
false, false, NULL, &rank_file_path);
if (NULL != rank_file_path) {
opal_output(0, "WARNING: Rankfile component can't be set with paffinity_alone, paffinity_alone set to 0");
ompi_mpi_paffinity_alone = 0;
}
}
mca_base_param_reg_int_name("mpi", "paffinity_processor",
"If set, pin this process to the processor number indicated by the value",
true, false,
-1, NULL);
mca_base_param_reg_int_name("rmaps", "rank_file_debug",
"If nonzero, prints binding to processors ",
false, false,
(int) rmaps_rank_file_debug, &value);
rmaps_rank_file_debug = OPAL_INT_TO_BOOL(value);
/* Do we want to save hostnames for debugging messages? This can
eat quite a bit of memory... */
@ -286,7 +257,6 @@ int ompi_mpi_register_params(void)
}
/* The ddt engine has a few parameters */
return ompi_ddt_register_params();
}

Просмотреть файл

@ -93,17 +93,9 @@ OMPI_DECLSPEC extern bool ompi_mpi_show_mca_params;
*/
OMPI_DECLSPEC extern char * ompi_mpi_show_mca_params_file;
/**
* If this value is true, assume that this ORTE job is the only job
* running on the nodes that have been allocated to it, and bind
* processes to processors (starting with processor 0).
*/
OMPI_DECLSPEC extern bool ompi_mpi_paffinity_alone;
/**
* If this value is true, we can check process binding to CPU
*/
OMPI_DECLSPEC extern bool rmaps_rank_file_debug;
/**
* Whether we should keep the string hostnames of all the MPI
@ -165,7 +157,6 @@ OMPI_DECLSPEC extern bool ompi_use_sparse_group_storage;
*/
OMPI_DECLSPEC int ompi_mpi_register_params(void);
/**
* Display all MCA parameters used
*

Просмотреть файл

@ -23,4 +23,5 @@ libmca_paffinity_la_SOURCES += \
base/paffinity_base_close.c \
base/paffinity_base_select.c \
base/paffinity_base_open.c \
base/paffinity_base_wrappers.c
base/paffinity_base_wrappers.c \
base/paffinity_base_service.c

Просмотреть файл

@ -206,7 +206,6 @@ extern "C" {
*/
OPAL_DECLSPEC extern const opal_paffinity_base_module_1_1_0_t
*opal_paffinity_base_module;
/**
* Indicator as to whether the list of opened paffinity components
* is valid or not.
@ -218,6 +217,10 @@ extern "C" {
* to all available paffinity components.
*/
OPAL_DECLSPEC extern opal_list_t opal_paffinity_base_components_opened;
/**
* Assigning slot_list to proccess
*/
OPAL_DECLSPEC int opal_paffinity_base_slot_list_set(long rank);
/**
* Debugging output stream

Просмотреть файл

@ -70,6 +70,14 @@ int opal_register_params(void)
mca_base_param_reg_string_name("opal", "signal",
"If a signal is received, display the stack trace frame",
false, false, string, NULL);
mca_base_param_reg_string_name("opal","paffinity_slot_list",
"Used to set list of slots to be bind to",
false,false, NULL, NULL);
mca_base_param_reg_int_name("opal", "paffinity_alone",
"If nonzero, assume that this job is the only (set of) process(es) running on each node and bind processes to processors, starting with processor ID 0",
false, false, (int)false, 0);
free(string);
}

Просмотреть файл

@ -699,7 +699,7 @@ int orte_odls_base_default_launch_local(orte_jobid_t job,
opal_list_item_t *item;
orte_app_context_t *app;
orte_odls_child_t *child=NULL;
int i, num_processors;
int i, num_processors, int_value;
bool want_processor, oversubscribed;
int rc=ORTE_SUCCESS, ret;
bool launch_failed=true;
@ -957,25 +957,33 @@ int orte_odls_base_default_launch_local(orte_jobid_t job,
*/
opal_setenv("OMPI_COMM_WORLD_LOCAL_RANK", value, true, &app->env);
free(value);
if (want_processor) {
param = mca_base_param_environ_variable("mpi", NULL,
"paffinity_processor");
asprintf(&value, "%lu", (unsigned long) proc_rank);
opal_setenv(param, value, true, &app->env);
free(param);
free(value);
} else {
param = mca_base_param_environ_variable("mpi", NULL,
"paffinity_processor");
{ /* unset paffinity_slot_list environment */
param = mca_base_param_environ_variable("opal", NULL, "paffinity_slot_list");
opal_unsetenv(param, &app->env);
free(param);
}
if ( NULL != child->slot_list ) {
opal_setenv("slot_list", child->slot_list, true, &app->env);
}else{
opal_unsetenv("slot_list", &app->env);
param = mca_base_param_environ_variable("opal", NULL, "paffinity_slot_list");
asprintf(&value, "%s", child->slot_list);
opal_setenv(param, value, true, &app->env);
free(param);
free(value);
} else if (want_processor) { /* setting paffinity_alone */
param = mca_base_param_find("opal", NULL, "paffinity_alone");
if ( param >=0 ) {
int_value = 0;
mca_base_param_lookup_int(param, &int_value);
if ( int_value ){
param = mca_base_param_environ_variable("opal", NULL, "paffinity_slot_list");
asprintf(&value, "%lu", (unsigned long) proc_rank);
opal_setenv(param, value, true, &app->env);
free(value);
free(param);
}
}
}
/* must unlock prior to fork to keep things clean in the
* event library
*/