At long last, the fabled revision to the affinity system has arrived. A more detailed explanation of how this all works will be presented here:
https://svn.open-mpi.org/trac/ompi/wiki/ProcessPlacement The wiki page is incomplete at the moment, but I hope to complete it over the next few days. I will provide updates on the devel list. As the wiki page states, the default and most commonly used options remain unchanged (except as noted below). New, esoteric and complex options have been added, but unless you are a true masochist, you are unlikely to use many of them beyond perhaps an initial curiosity-motivated experimentation. In a nutshell, this commit revamps the map/rank/bind procedure to take into account topology info on the compute nodes. I have, for the most part, preserved the default behaviors, with three notable exceptions: 1. I have at long last bowed my head in submission to the system admin's of managed clusters. For years, they have complained about our default of allowing users to oversubscribe nodes - i.e., to run more processes on a node than allocated slots. Accordingly, I have modified the default behavior: if you are running off of hostfile/dash-host allocated nodes, then the default is to allow oversubscription. If you are running off of RM-allocated nodes, then the default is to NOT allow oversubscription. Flags to override these behaviors are provided, so this only affects the default behavior. 2. both cpus/rank and stride have been removed. The latter was demanded by those who didn't understand the purpose behind it - and I agreed as the users who requested it are no longer using it. The former was removed temporarily pending implementation. 3. vm launch is now the sole method for starting OMPI. It was just too darned hard to maintain multiple launch procedures - maybe someday, provided someone can demonstrate a reason to do so. As Jeff stated, it is impossible to fully test a change of this size. I have tested it on Linux and Mac, covering all the default and simple options, singletons, and comm_spawn. That said, I'm sure others will find problems, so I'll be watching MTT results until this stabilizes. This commit was SVN r25476.
This commit is contained in:
parent
c8e105bd8c
commit
6310361532
@ -3,7 +3,7 @@ enable_multicast=no
|
||||
enable_dlopen=no
|
||||
enable_pty_support=no
|
||||
with_blcr=no
|
||||
with_openib=yes
|
||||
with_openib=no
|
||||
with_memory_manager=no
|
||||
enable_mem_debug=yes
|
||||
enable_mem_profile=no
|
||||
|
@ -62,6 +62,7 @@
|
||||
mca_component_show_load_errors = 0
|
||||
mpi_param_check = 0
|
||||
orte_abort_timeout = 10
|
||||
hwloc_base_mem_bind_failure_action = silent
|
||||
|
||||
## Protect the shared file systems
|
||||
|
||||
@ -72,22 +73,13 @@ oob_tcp_disable_family = IPv6
|
||||
#oob_tcp_connect_timeout=600
|
||||
|
||||
## Define the MPI interconnects
|
||||
btl = sm,openib,self
|
||||
#mpi_leave_pinned = 1
|
||||
btl = sm,tcp,self
|
||||
|
||||
## Setup shared memory
|
||||
btl_sm_free_list_max = 768
|
||||
|
||||
## Setup OpenIB
|
||||
btl_openib_want_fork_support = 0
|
||||
btl_openib_cpc_include = oob
|
||||
#btl_openib_receive_queues = P,128,256,64,32,32:S,2048,1024,128,32:S,12288,1024,128,32:S,65536,1024,128,32
|
||||
|
||||
## Setup TCP
|
||||
btl_tcp_if_include = ib0
|
||||
|
||||
## Configure the PML
|
||||
pml_ob1_use_early_completion = 0
|
||||
|
||||
## Enable cpu affinity
|
||||
opal_paffinity_alone = 1
|
||||
|
@ -58,6 +58,8 @@ enum {
|
||||
OMPI_ERR_DATA_OVERWRITE_ATTEMPT = OPAL_ERR_DATA_OVERWRITE_ATTEMPT,
|
||||
|
||||
OMPI_ERR_BUFFER = OPAL_ERR_BUFFER,
|
||||
OMPI_ERR_SILENT = OPAL_ERR_SILENT,
|
||||
|
||||
OMPI_ERR_REQUEST = OMPI_ERR_BASE - 1
|
||||
};
|
||||
|
||||
|
@ -568,7 +568,7 @@ static int spawn(int count, char **array_of_commands,
|
||||
char stdin_target[OPAL_PATH_MAX];
|
||||
char params[OPAL_PATH_MAX];
|
||||
char mapper[OPAL_PATH_MAX];
|
||||
int nperxxx;
|
||||
int npernode;
|
||||
char slot_list[OPAL_PATH_MAX];
|
||||
|
||||
orte_job_t *jdata;
|
||||
@ -735,7 +735,7 @@ static int spawn(int count, char **array_of_commands,
|
||||
}
|
||||
|
||||
/* check for 'mapper' */
|
||||
ompi_info_get (array_of_info[i], "mapper", sizeof(mapper) - 1, mapper, &flag);
|
||||
ompi_info_get(array_of_info[i], "mapper", sizeof(mapper) - 1, mapper, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
@ -743,20 +743,27 @@ static int spawn(int count, char **array_of_commands,
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
/* load it with the system defaults */
|
||||
jdata->map->policy = orte_default_mapping_policy;
|
||||
jdata->map->cpus_per_rank = orte_rmaps_base.cpus_per_rank;
|
||||
jdata->map->stride = orte_rmaps_base.stride;
|
||||
jdata->map->oversubscribe = orte_rmaps_base.oversubscribe;
|
||||
jdata->map->display_map = orte_rmaps_base.display_map;
|
||||
}
|
||||
jdata->map->req_mapper = strdup(mapper);
|
||||
}
|
||||
|
||||
/* check for 'npernode' */
|
||||
/* check for 'display_map' */
|
||||
ompi_info_get_bool(array_of_info[i], "display_map", &local_spawn, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
jdata->map->display_map = true;
|
||||
}
|
||||
|
||||
/* check for 'npernode' and 'ppr' */
|
||||
ompi_info_get (array_of_info[i], "npernode", sizeof(slot_list) - 1, slot_list, &flag);
|
||||
if ( flag ) {
|
||||
if (ORTE_SUCCESS != ompi_info_value_to_int(slot_list, &nperxxx)) {
|
||||
if (ORTE_SUCCESS != ompi_info_value_to_int(slot_list, &npernode)) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_BAD_PARAM);
|
||||
return ORTE_ERR_BAD_PARAM;
|
||||
}
|
||||
@ -766,18 +773,14 @@ static int spawn(int count, char **array_of_commands,
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
/* load it with the system defaults */
|
||||
jdata->map->policy = orte_default_mapping_policy;
|
||||
jdata->map->cpus_per_rank = orte_rmaps_base.cpus_per_rank;
|
||||
jdata->map->stride = orte_rmaps_base.stride;
|
||||
jdata->map->oversubscribe = orte_rmaps_base.oversubscribe;
|
||||
jdata->map->display_map = orte_rmaps_base.display_map;
|
||||
}
|
||||
jdata->map->npernode = nperxxx;
|
||||
if (ORTE_MAPPING_POLICY_IS_SET(jdata->map->mapping)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->mapping |= ORTE_MAPPING_PPR;
|
||||
asprintf(&(jdata->map->ppr), "%d:n", npernode);
|
||||
}
|
||||
|
||||
/* check for 'map_bynode' */
|
||||
ompi_info_get_bool(array_of_info[i], "map_bynode", &local_bynode, &flag);
|
||||
ompi_info_get (array_of_info[i], "pernode", sizeof(slot_list) - 1, slot_list, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
@ -785,20 +788,438 @@ static int spawn(int count, char **array_of_commands,
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
/* load it with the system defaults */
|
||||
jdata->map->policy = orte_default_mapping_policy;
|
||||
jdata->map->cpus_per_rank = orte_rmaps_base.cpus_per_rank;
|
||||
jdata->map->stride = orte_rmaps_base.stride;
|
||||
jdata->map->oversubscribe = orte_rmaps_base.oversubscribe;
|
||||
jdata->map->display_map = orte_rmaps_base.display_map;
|
||||
}
|
||||
if( local_bynode ) {
|
||||
jdata->map->policy = ORTE_MAPPING_BYNODE;
|
||||
}
|
||||
else {
|
||||
jdata->map->policy = ORTE_MAPPING_BYSLOT;
|
||||
if (ORTE_MAPPING_POLICY_IS_SET(jdata->map->mapping)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->mapping |= ORTE_MAPPING_PPR;
|
||||
jdata->map->ppr = strdup("1:n");
|
||||
}
|
||||
ompi_info_get (array_of_info[i], "ppr", sizeof(slot_list) - 1, slot_list, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (ORTE_MAPPING_POLICY_IS_SET(jdata->map->mapping)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->mapping |= ORTE_MAPPING_PPR;
|
||||
jdata->map->ppr = strdup(slot_list);
|
||||
}
|
||||
|
||||
/* check for 'map_byxxx' */
|
||||
ompi_info_get_bool(array_of_info[i], "map_by_node", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (ORTE_MAPPING_POLICY_IS_SET(jdata->map->mapping)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->mapping |= ORTE_MAPPING_BYNODE;
|
||||
}
|
||||
#if OPAL_HAVE_HWLOC
|
||||
ompi_info_get_bool(array_of_info[i], "map_by_board", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (ORTE_MAPPING_POLICY_IS_SET(jdata->map->mapping)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->mapping |= ORTE_MAPPING_BYBOARD;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "map_by_numa", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (ORTE_MAPPING_POLICY_IS_SET(jdata->map->mapping)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->mapping |= ORTE_MAPPING_BYNUMA;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "map_by_socket", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (ORTE_MAPPING_POLICY_IS_SET(jdata->map->mapping)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->mapping |= ORTE_MAPPING_BYSOCKET;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "map_by_l3cache", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (ORTE_MAPPING_POLICY_IS_SET(jdata->map->mapping)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->mapping |= ORTE_MAPPING_BYL3CACHE;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "map_by_l2cache", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (ORTE_MAPPING_POLICY_IS_SET(jdata->map->mapping)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->mapping |= ORTE_MAPPING_BYL2CACHE;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "map_by_l1cache", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (ORTE_MAPPING_POLICY_IS_SET(jdata->map->mapping)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->mapping |= ORTE_MAPPING_BYL1CACHE;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "map_by_core", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (ORTE_MAPPING_POLICY_IS_SET(jdata->map->mapping)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->mapping |= ORTE_MAPPING_BYCORE;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "map_by_hwthread", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (ORTE_MAPPING_POLICY_IS_SET(jdata->map->mapping)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->mapping |= ORTE_MAPPING_BYHWTHREAD;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* check for 'rank_byxxx' */
|
||||
ompi_info_get_bool(array_of_info[i], "rank_by_node", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (0 != jdata->map->ranking) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->ranking = ORTE_RANK_BY_NODE;
|
||||
}
|
||||
#if OPAL_HAVE_HWLOC
|
||||
ompi_info_get_bool(array_of_info[i], "rank_by_board", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (0 != jdata->map->ranking) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->ranking = ORTE_RANK_BY_BOARD;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "rank_by_numa", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (0 != jdata->map->ranking) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->ranking = ORTE_RANK_BY_NUMA;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "rank_by_socket", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (0 != jdata->map->ranking) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->ranking = ORTE_RANK_BY_SOCKET;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "rank_by_l3cache", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (0 != jdata->map->ranking) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->ranking = ORTE_RANK_BY_L3CACHE;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "rank_by_l2cache", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (0 != jdata->map->ranking) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->ranking = ORTE_RANK_BY_L2CACHE;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "rank_by_l1cache", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (0 != jdata->map->ranking) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->ranking = ORTE_RANK_BY_L1CACHE;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "rank_by_core", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (0 != jdata->map->ranking) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->ranking = ORTE_RANK_BY_CORE;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "rank_by_hwthread", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (0 != jdata->map->ranking) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->ranking = ORTE_RANK_BY_HWTHREAD;
|
||||
}
|
||||
|
||||
/* check for 'bind_toxxx' */
|
||||
ompi_info_get_bool(array_of_info[i], "bind_if_supported", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
jdata->map->binding |= OPAL_BIND_IF_SUPPORTED;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "bind_overload_allowed", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
jdata->map->binding |= OPAL_BIND_ALLOW_OVERLOAD;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "bind_to_none", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (OPAL_BINDING_POLICY_IS_SET(jdata->map->binding)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->binding |= OPAL_BIND_TO_NONE;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "bind_to_board", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (OPAL_BINDING_POLICY_IS_SET(jdata->map->binding)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->binding |= OPAL_BIND_TO_BOARD;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "bind_to_numa", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (OPAL_BINDING_POLICY_IS_SET(jdata->map->binding)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->binding |= OPAL_BIND_TO_NUMA;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "bind_to_socket", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (OPAL_BINDING_POLICY_IS_SET(jdata->map->binding)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->binding |= OPAL_BIND_TO_SOCKET;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "bind_to_l3cache", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (OPAL_BINDING_POLICY_IS_SET(jdata->map->binding)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->binding |= OPAL_BIND_TO_L3CACHE;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "bind_to_l2cache", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (OPAL_BINDING_POLICY_IS_SET(jdata->map->binding)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->binding |= OPAL_BIND_TO_L2CACHE;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "bind_to_l1cache", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (OPAL_BINDING_POLICY_IS_SET(jdata->map->binding)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->binding |= OPAL_BIND_TO_L1CACHE;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "bind_to_core", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (OPAL_BINDING_POLICY_IS_SET(jdata->map->binding)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->binding |= OPAL_BIND_TO_CORE;
|
||||
}
|
||||
ompi_info_get_bool(array_of_info[i], "bind_to_hwthread", &local_bynode, &flag);
|
||||
if ( flag ) {
|
||||
if (NULL == jdata->map) {
|
||||
jdata->map = OBJ_NEW(orte_job_map_t);
|
||||
if (NULL == jdata->map) {
|
||||
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
|
||||
return ORTE_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
}
|
||||
if (OPAL_BINDING_POLICY_IS_SET(jdata->map->binding)) {
|
||||
return OMPI_ERROR;
|
||||
}
|
||||
jdata->map->binding |= OPAL_BIND_TO_HWTHREAD;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* check for 'preload_binary' */
|
||||
ompi_info_get_bool(array_of_info[i], "ompi_preload_binary", &local_spawn, &flag);
|
||||
|
@ -287,14 +287,7 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
|
||||
int param, value;
|
||||
struct timeval ompistart, ompistop;
|
||||
char *event_val = NULL;
|
||||
opal_paffinity_base_cpu_set_t mask;
|
||||
bool proc_bound;
|
||||
#if 0
|
||||
/* see comment below about sched_yield */
|
||||
int num_processors;
|
||||
#endif
|
||||
bool orte_setup = false;
|
||||
bool paffinity_enabled = false;
|
||||
|
||||
/* bitflag of the thread level support provided. To be used
|
||||
* for the modex in order to work in heterogeneous environments. */
|
||||
@ -371,6 +364,18 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
|
||||
gettimeofday(&ompistart, NULL);
|
||||
}
|
||||
|
||||
#if OPAL_HAVE_HWLOC
|
||||
/* if hwloc is available but didn't get setup for some
|
||||
* reason, do so now
|
||||
*/
|
||||
if (NULL == opal_hwloc_topology) {
|
||||
if (OPAL_SUCCESS != (ret = opal_hwloc_base_get_topology())) {
|
||||
error = "Topology init";
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Register errhandler callback with orte errmgr */
|
||||
if (NULL != orte_errmgr.set_fault_callback) {
|
||||
orte_errmgr.set_fault_callback(ompi_errhandler_runtime_callback);
|
||||
@ -412,17 +417,6 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
|
||||
goto error;
|
||||
}
|
||||
|
||||
#if OPAL_HAVE_HWLOC
|
||||
/* If orte_init() didn't fill in opal_hwloc_topology, then we need
|
||||
to go fill it in ourselves. */
|
||||
if (NULL == opal_hwloc_topology) {
|
||||
if (0 != hwloc_topology_init(&opal_hwloc_topology) ||
|
||||
0 != hwloc_topology_load(opal_hwloc_topology)) {
|
||||
return OPAL_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Once we've joined the RTE, see if any MCA parameters were
|
||||
passed to the MPI level */
|
||||
|
||||
@ -442,106 +436,217 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
|
||||
}
|
||||
#endif
|
||||
|
||||
/* if it hasn't already been done, setup process affinity.
|
||||
* First check to see if a slot list was
|
||||
* specified. If so, use it. If no slot list was specified,
|
||||
* that's not an error -- just fall through and try the next
|
||||
* paffinity scheme.
|
||||
*/
|
||||
ret = opal_paffinity_base_get(&mask);
|
||||
if (OPAL_SUCCESS == ret) {
|
||||
/* paffinity is supported - check for binding */
|
||||
OPAL_PAFFINITY_PROCESS_IS_BOUND(mask, &proc_bound);
|
||||
if (proc_bound || opal_paffinity_base_bound) {
|
||||
/* someone external set it - indicate it is set
|
||||
* so that we know
|
||||
*/
|
||||
paffinity_enabled = true;
|
||||
} else {
|
||||
/* the system is capable of doing processor affinity, but it
|
||||
* has not yet been set - see if a slot_list was given
|
||||
*/
|
||||
if (NULL != opal_paffinity_base_slot_list) {
|
||||
/* It's an error if multiple paffinity schemes were specified */
|
||||
if (opal_paffinity_alone) {
|
||||
ret = OMPI_ERR_BAD_PARAM;
|
||||
error = "Multiple processor affinity schemes specified (can only specify one)";
|
||||
goto error;
|
||||
}
|
||||
ret = opal_paffinity_base_slot_list_set((long)ORTE_PROC_MY_NAME->vpid, opal_paffinity_base_slot_list, &mask);
|
||||
if (OPAL_SUCCESS != ret && OPAL_ERR_NOT_FOUND != OPAL_SOS_GET_ERROR_CODE(ret)) {
|
||||
error = "opal_paffinity_base_slot_list_set() returned an error";
|
||||
goto error;
|
||||
}
|
||||
#if !ORTE_DISABLE_FULL_SUPPORT
|
||||
/* print out a warning if result is no-op, if not suppressed */
|
||||
OPAL_PAFFINITY_PROCESS_IS_BOUND(mask, &proc_bound);
|
||||
if (!proc_bound && orte_odls_base.warn_if_not_bound) {
|
||||
orte_show_help("help-orte-odls-base.txt",
|
||||
"orte-odls-base:warn-not-bound",
|
||||
true, "slot-list",
|
||||
"Request resulted in binding to all available processors",
|
||||
orte_process_info.nodename,
|
||||
"bind-to-slot-list", opal_paffinity_base_slot_list, argv[0]);
|
||||
}
|
||||
#endif
|
||||
paffinity_enabled = true;
|
||||
} else if (opal_paffinity_alone) {
|
||||
/* no slot_list, but they asked for paffinity */
|
||||
int phys_cpu;
|
||||
orte_node_rank_t nrank;
|
||||
if (ORTE_NODE_RANK_INVALID == (nrank = orte_ess.get_node_rank(ORTE_PROC_MY_NAME))) {
|
||||
/* this is okay - we probably were direct-launched, which means
|
||||
* we won't get our node rank until the modex. So just ignore
|
||||
#if OPAL_HAVE_HWLOC
|
||||
{
|
||||
hwloc_obj_t node, obj;
|
||||
hwloc_cpuset_t cpus, nodeset;
|
||||
bool paffinity_enabled=false;
|
||||
orte_node_rank_t nrank;
|
||||
hwloc_obj_type_t target;
|
||||
unsigned cache_level;
|
||||
struct hwloc_topology_support *support;
|
||||
|
||||
/* see if we were bound when launched */
|
||||
if (NULL == getenv("OMPI_MCA_opal_bound_at_launch")) {
|
||||
/* we were not bound at launch */
|
||||
if (NULL != opal_hwloc_topology) {
|
||||
support = (struct hwloc_topology_support*)hwloc_topology_get_support(opal_hwloc_topology);
|
||||
/* get our node object */
|
||||
node = hwloc_get_root_obj(opal_hwloc_topology);
|
||||
nodeset = hwloc_bitmap_alloc();
|
||||
hwloc_bitmap_and(nodeset, node->online_cpuset, node->allowed_cpuset);
|
||||
/* get our cpuset */
|
||||
cpus = hwloc_bitmap_alloc();
|
||||
hwloc_get_cpubind(opal_hwloc_topology, cpus, HWLOC_CPUBIND_PROCESS);
|
||||
/* we are bound if the two cpusets are not equal */
|
||||
if (0 != hwloc_bitmap_compare(cpus, nodeset)) {
|
||||
/* someone external set it - indicate it is set
|
||||
* so that we know
|
||||
*/
|
||||
goto MOVEON;
|
||||
paffinity_enabled = true;
|
||||
hwloc_bitmap_free(nodeset);
|
||||
hwloc_bitmap_free(cpus);
|
||||
} else if (support->cpubind->set_thisproc_cpubind &&
|
||||
OPAL_BINDING_POLICY_IS_SET(opal_hwloc_binding_policy) &&
|
||||
OPAL_BIND_TO_NONE != OPAL_GET_BINDING_POLICY(opal_hwloc_binding_policy)) {
|
||||
/* the system is capable of doing processor affinity, but it
|
||||
* has not yet been set - see if a slot_list was given
|
||||
*/
|
||||
hwloc_bitmap_zero(cpus);
|
||||
if (OPAL_BIND_TO_CPUSET == OPAL_GET_BINDING_POLICY(opal_hwloc_binding_policy)) {
|
||||
if (ORTE_SUCCESS != (ret = opal_hwloc_base_slot_list_parse(opal_hwloc_base_slot_list,
|
||||
opal_hwloc_topology, cpus))) {
|
||||
error = "Setting processor affinity failed";
|
||||
hwloc_bitmap_free(nodeset);
|
||||
hwloc_bitmap_free(cpus);
|
||||
goto error;
|
||||
}
|
||||
if (0 > hwloc_set_cpubind(opal_hwloc_topology, cpus, 0)) {
|
||||
error = "Setting processor affinity failed";
|
||||
hwloc_bitmap_free(nodeset);
|
||||
hwloc_bitmap_free(cpus);
|
||||
goto error;
|
||||
}
|
||||
/* try to find a level and index for this location */
|
||||
opal_hwloc_base_get_level_and_index(cpus, &orte_process_info.bind_level, &orte_process_info.bind_idx);
|
||||
/* cleanup */
|
||||
hwloc_bitmap_free(nodeset);
|
||||
hwloc_bitmap_free(cpus);
|
||||
paffinity_enabled = true;
|
||||
} else {
|
||||
/* cleanup */
|
||||
hwloc_bitmap_free(nodeset);
|
||||
hwloc_bitmap_free(cpus);
|
||||
/* get the node rank */
|
||||
if (ORTE_NODE_RANK_INVALID == (nrank = orte_ess.get_node_rank(ORTE_PROC_MY_NAME))) {
|
||||
/* this is not an error - could be due to being
|
||||
* direct launched - so just ignore and leave
|
||||
* us unbound
|
||||
*/
|
||||
goto MOVEON;
|
||||
}
|
||||
/* if the binding policy is hwthread, then we bind to the nrank-th
|
||||
* hwthread on this node
|
||||
*/
|
||||
if (OPAL_BIND_TO_HWTHREAD == OPAL_GET_BINDING_POLICY(opal_hwloc_binding_policy)) {
|
||||
if (NULL == (obj = opal_hwloc_base_get_obj_by_type(opal_hwloc_topology, HWLOC_OBJ_PU,
|
||||
0, nrank, OPAL_HWLOC_LOGICAL))) {
|
||||
ret = OMPI_ERR_NOT_FOUND;
|
||||
error = "Getting hwthread object";
|
||||
goto error;
|
||||
}
|
||||
cpus = hwloc_bitmap_alloc();
|
||||
hwloc_bitmap_and(cpus, obj->online_cpuset, obj->allowed_cpuset);
|
||||
if (0 > hwloc_set_cpubind(opal_hwloc_topology, cpus, 0)) {
|
||||
ret = OMPI_ERROR;
|
||||
error = "Setting processor affinity failed";
|
||||
hwloc_bitmap_free(cpus);
|
||||
goto error;
|
||||
}
|
||||
hwloc_bitmap_free(cpus);
|
||||
orte_process_info.bind_level = OPAL_HWLOC_L1CACHE_LEVEL;
|
||||
orte_process_info.bind_idx = nrank;
|
||||
} else if (OPAL_BIND_TO_CORE == OPAL_GET_BINDING_POLICY(opal_hwloc_binding_policy)) {
|
||||
/* if the binding policy is core, then we bind to the nrank-th
|
||||
* core on this node
|
||||
*/
|
||||
if (NULL == (obj = opal_hwloc_base_get_obj_by_type(opal_hwloc_topology, HWLOC_OBJ_CORE,
|
||||
0, nrank, OPAL_HWLOC_LOGICAL))) {
|
||||
ret = OMPI_ERR_NOT_FOUND;
|
||||
error = "Getting core object";
|
||||
goto error;
|
||||
}
|
||||
cpus = hwloc_bitmap_alloc();
|
||||
hwloc_bitmap_and(cpus, obj->online_cpuset, obj->allowed_cpuset);
|
||||
if (0 > hwloc_set_cpubind(opal_hwloc_topology, cpus, 0)) {
|
||||
error = "Setting processor affinity failed";
|
||||
hwloc_bitmap_free(cpus);
|
||||
ret = OMPI_ERROR;
|
||||
goto error;
|
||||
}
|
||||
hwloc_bitmap_free(cpus);
|
||||
orte_process_info.bind_level = OPAL_HWLOC_CORE_LEVEL;
|
||||
orte_process_info.bind_idx = nrank;
|
||||
} else {
|
||||
/* for all higher binding policies, we bind to the specified
|
||||
* object that the nrank-th core belongs to
|
||||
*/
|
||||
if (NULL == (obj = opal_hwloc_base_get_obj_by_type(opal_hwloc_topology, HWLOC_OBJ_CORE,
|
||||
0, nrank, OPAL_HWLOC_LOGICAL))) {
|
||||
ret = OMPI_ERR_NOT_FOUND;
|
||||
error = "Getting core object";
|
||||
goto error;
|
||||
}
|
||||
if (OPAL_BIND_TO_L1CACHE == OPAL_GET_BINDING_POLICY(opal_hwloc_binding_policy)) {
|
||||
target = HWLOC_OBJ_CACHE;
|
||||
cache_level = 1;
|
||||
orte_process_info.bind_level = OPAL_HWLOC_L1CACHE_LEVEL;
|
||||
} else if (OPAL_BIND_TO_L2CACHE == OPAL_GET_BINDING_POLICY(opal_hwloc_binding_policy)) {
|
||||
target = HWLOC_OBJ_CACHE;
|
||||
cache_level = 2;
|
||||
orte_process_info.bind_level = OPAL_HWLOC_L2CACHE_LEVEL;
|
||||
} else if (OPAL_BIND_TO_L3CACHE == OPAL_GET_BINDING_POLICY(opal_hwloc_binding_policy)) {
|
||||
target = HWLOC_OBJ_CACHE;
|
||||
cache_level = 3;
|
||||
orte_process_info.bind_level = OPAL_HWLOC_L3CACHE_LEVEL;
|
||||
} else if (OPAL_BIND_TO_SOCKET == OPAL_GET_BINDING_POLICY(opal_hwloc_binding_policy)) {
|
||||
target = HWLOC_OBJ_SOCKET;
|
||||
orte_process_info.bind_level = OPAL_HWLOC_SOCKET_LEVEL;
|
||||
} else if (OPAL_BIND_TO_NUMA == OPAL_GET_BINDING_POLICY(opal_hwloc_binding_policy)) {
|
||||
target = HWLOC_OBJ_NODE;
|
||||
orte_process_info.bind_level = OPAL_HWLOC_NUMA_LEVEL;
|
||||
} else {
|
||||
ret = OMPI_ERR_NOT_FOUND;
|
||||
error = "Binding policy not known";
|
||||
goto error;
|
||||
}
|
||||
for (obj = obj->parent; NULL != obj; obj = obj->parent) {
|
||||
if (target == obj->type) {
|
||||
if (HWLOC_OBJ_CACHE == target && cache_level != obj->attr->cache.depth) {
|
||||
continue;
|
||||
}
|
||||
/* this is the place! */
|
||||
cpus = hwloc_bitmap_alloc();
|
||||
hwloc_bitmap_and(cpus, obj->online_cpuset, obj->allowed_cpuset);
|
||||
if (0 > hwloc_set_cpubind(opal_hwloc_topology, cpus, 0)) {
|
||||
ret = OMPI_ERROR;
|
||||
error = "Setting processor affinity failed";
|
||||
hwloc_bitmap_free(cpus);
|
||||
goto error;
|
||||
}
|
||||
hwloc_bitmap_free(cpus);
|
||||
orte_process_info.bind_idx = opal_hwloc_base_get_obj_idx(opal_hwloc_topology,
|
||||
obj, OPAL_HWLOC_LOGICAL);
|
||||
paffinity_enabled = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!paffinity_enabled) {
|
||||
ret = OMPI_ERROR;
|
||||
error = "Setting processor affinity failed";
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
paffinity_enabled = true;
|
||||
}
|
||||
}
|
||||
OPAL_PAFFINITY_CPU_ZERO(mask);
|
||||
ret = opal_paffinity_base_get_physical_processor_id(nrank, &phys_cpu);
|
||||
if (OPAL_SUCCESS != ret) {
|
||||
error = "Could not get physical processor id - cannot set processor affinity";
|
||||
goto error;
|
||||
/* If we were able to set processor affinity, try setting up
|
||||
memory affinity */
|
||||
if (!opal_maffinity_setup && paffinity_enabled) {
|
||||
if (OPAL_SUCCESS == opal_maffinity_base_open() &&
|
||||
OPAL_SUCCESS == opal_maffinity_base_select()) {
|
||||
opal_maffinity_setup = true;
|
||||
}
|
||||
}
|
||||
OPAL_PAFFINITY_CPU_SET(phys_cpu, mask);
|
||||
ret = opal_paffinity_base_set(mask);
|
||||
if (OPAL_SUCCESS != ret) {
|
||||
error = "Setting processor affinity failed";
|
||||
goto error;
|
||||
}
|
||||
#if !ORTE_DISABLE_FULL_SUPPORT
|
||||
/* print out a warning if result is no-op, if not suppressed */
|
||||
OPAL_PAFFINITY_PROCESS_IS_BOUND(mask, &proc_bound);
|
||||
if (!proc_bound && orte_odls_base.warn_if_not_bound) {
|
||||
orte_show_help("help-orte-odls-base.txt",
|
||||
"orte-odls-base:warn-not-bound",
|
||||
true, "cpu",
|
||||
"Request resulted in binding to all available processors",
|
||||
orte_process_info.nodename,
|
||||
"[opal|mpi]_paffinity_alone set non-zero", "n/a", argv[0]);
|
||||
}
|
||||
#endif
|
||||
paffinity_enabled = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MOVEON:
|
||||
#if OPAL_HAVE_HWLOC
|
||||
/* get or update our local cpuset - it will get used multiple
|
||||
* times, so it's more efficient to keep a global copy
|
||||
*/
|
||||
opal_hwloc_base_get_local_cpuset();
|
||||
#endif
|
||||
|
||||
/* If we were able to set processor affinity, try setting up
|
||||
memory affinity */
|
||||
if (!opal_maffinity_setup && paffinity_enabled) {
|
||||
if (OPAL_SUCCESS == opal_maffinity_base_open() &&
|
||||
OPAL_SUCCESS == opal_maffinity_base_select()) {
|
||||
opal_maffinity_setup = true;
|
||||
/* report bindings, if requested */
|
||||
if (opal_hwloc_report_bindings) {
|
||||
char bindings[64];
|
||||
hwloc_obj_t root;
|
||||
hwloc_cpuset_t cpus;
|
||||
/* get the root object for this node */
|
||||
root = hwloc_get_root_obj(opal_hwloc_topology);
|
||||
cpus = opal_hwloc_base_get_available_cpus(opal_hwloc_topology, root);
|
||||
if (0 == hwloc_bitmap_compare(cpus, opal_hwloc_my_cpuset)) {
|
||||
opal_output(0, "%s is not bound",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME));
|
||||
} else {
|
||||
hwloc_bitmap_list_snprintf(bindings, 64, opal_hwloc_my_cpuset);
|
||||
opal_output(0, "%s is bound to cpus %s",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
bindings);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* initialize datatypes. This step should be done early as it will
|
||||
* create the local convertor and local arch used in the proc
|
||||
* init.
|
||||
@ -649,7 +754,7 @@ MOVEON:
|
||||
|
||||
if (OMPI_SUCCESS !=
|
||||
(ret = ompi_osc_base_find_available(OMPI_ENABLE_PROGRESS_THREADS,
|
||||
OMPI_ENABLE_THREAD_MULTIPLE))) {
|
||||
OMPI_ENABLE_THREAD_MULTIPLE))) {
|
||||
error = "ompi_osc_base_find_available() failed";
|
||||
goto error;
|
||||
}
|
||||
@ -801,16 +906,16 @@ MOVEON:
|
||||
* Dump all MCA parameters if requested
|
||||
*/
|
||||
if (ompi_mpi_show_mca_params) {
|
||||
ompi_show_all_mca_params(ompi_mpi_comm_world.comm.c_my_rank,
|
||||
nprocs,
|
||||
orte_process_info.nodename);
|
||||
ompi_show_all_mca_params(ompi_mpi_comm_world.comm.c_my_rank,
|
||||
nprocs,
|
||||
orte_process_info.nodename);
|
||||
}
|
||||
|
||||
/* Do we need to wait for a debugger? */
|
||||
ompi_wait_for_debugger();
|
||||
|
||||
/* check for timing request - get stop time and report elapsed
|
||||
time if so, then start the clock again */
|
||||
time if so, then start the clock again */
|
||||
if (timing && 0 == ORTE_PROC_MY_NAME->vpid) {
|
||||
gettimeofday(&ompistop, NULL);
|
||||
opal_output(0, "ompi_mpi_init[%ld]: time from modex to first barrier %ld usec",
|
||||
|
@ -68,7 +68,8 @@ enum {
|
||||
OPAL_ERR_INVALID_PHYS_CPU = (OPAL_ERR_BASE - 39),
|
||||
OPAL_ERR_MULTIPLE_AFFINITIES = (OPAL_ERR_BASE - 40),
|
||||
OPAL_ERR_SLOT_LIST_RANGE = (OPAL_ERR_BASE - 41),
|
||||
OPAL_ERR_NETWORK_NOT_PARSEABLE = (OPAL_ERR_BASE - 42)
|
||||
OPAL_ERR_NETWORK_NOT_PARSEABLE = (OPAL_ERR_BASE - 42),
|
||||
OPAL_ERR_SILENT = (OPAL_ERR_BASE - 43)
|
||||
};
|
||||
|
||||
#define OPAL_ERR_MAX (OPAL_ERR_BASE - 100)
|
||||
|
@ -9,6 +9,8 @@
|
||||
|
||||
EXTRA_DIST = base/.windows
|
||||
|
||||
dist_pkgdata_DATA = base/help-opal-hwloc-base.txt
|
||||
|
||||
headers += \
|
||||
base/base.h
|
||||
|
||||
|
@ -77,7 +77,111 @@ OPAL_DECLSPEC extern bool opal_hwloc_base_inited;
|
||||
OPAL_DECLSPEC extern bool opal_hwloc_topology_inited;
|
||||
|
||||
#if OPAL_HAVE_HWLOC
|
||||
OPAL_DECLSPEC extern char *opal_hwloc_base_slot_list;
|
||||
OPAL_DECLSPEC extern char *opal_hwloc_base_cpu_set;
|
||||
OPAL_DECLSPEC extern hwloc_cpuset_t opal_hwloc_base_given_cpus;
|
||||
|
||||
/**
|
||||
* Report a bind failure using the normal mechanisms if a component
|
||||
* fails to bind memory -- according to the value of the
|
||||
* hwloc_base_bind_failure_action MCA parameter.
|
||||
*/
|
||||
OPAL_DECLSPEC int opal_hwloc_base_report_bind_failure(const char *file,
|
||||
int line,
|
||||
const char *msg,
|
||||
int rc);
|
||||
|
||||
OPAL_DECLSPEC opal_paffinity_locality_t opal_hwloc_base_get_relative_locality(hwloc_topology_t topo,
|
||||
opal_hwloc_level_t level1,
|
||||
unsigned int peer1,
|
||||
opal_hwloc_level_t level2,
|
||||
unsigned int peer2);
|
||||
|
||||
OPAL_DECLSPEC void opal_hwloc_base_get_local_cpuset(void);
|
||||
|
||||
/**
|
||||
* Enum for what memory allocation policy we want for user allocations.
|
||||
* MAP = memory allocation policy.
|
||||
*/
|
||||
typedef enum {
|
||||
OPAL_HWLOC_BASE_MAP_NONE,
|
||||
OPAL_HWLOC_BASE_MAP_LOCAL_ONLY
|
||||
} opal_hwloc_base_map_t;
|
||||
|
||||
/**
|
||||
* Global reflecting the MAP (set by MCA param).
|
||||
*/
|
||||
OPAL_DECLSPEC extern opal_hwloc_base_map_t opal_hwloc_base_map;
|
||||
|
||||
/**
|
||||
* Enum for what to do if the hwloc framework tries to bind memory
|
||||
* and fails. BFA = bind failure action.
|
||||
*/
|
||||
typedef enum {
|
||||
OPAL_HWLOC_BASE_MBFA_SILENT,
|
||||
OPAL_HWLOC_BASE_MBFA_WARN,
|
||||
OPAL_HWLOC_BASE_MBFA_ERROR
|
||||
} opal_hwloc_base_mbfa_t;
|
||||
|
||||
/**
|
||||
* Global reflecting the BFA (set by MCA param).
|
||||
*/
|
||||
OPAL_DECLSPEC extern opal_hwloc_base_mbfa_t opal_hwloc_base_mbfa;
|
||||
|
||||
/* some critical helper functions */
|
||||
OPAL_DECLSPEC int opal_hwloc_base_filter_cpus(hwloc_topology_t topo);
|
||||
OPAL_DECLSPEC int opal_hwloc_base_get_topology(void);
|
||||
OPAL_DECLSPEC void opal_hwloc_base_free_topology(hwloc_topology_t topo);
|
||||
OPAL_DECLSPEC hwloc_cpuset_t opal_hwloc_base_get_available_cpus(hwloc_topology_t topo,
|
||||
hwloc_obj_t obj);
|
||||
OPAL_DECLSPEC unsigned int opal_hwloc_base_get_nbobjs_by_type(hwloc_topology_t topo,
|
||||
hwloc_obj_type_t target,
|
||||
unsigned cache_level,
|
||||
opal_hwloc_resource_type_t rtype);
|
||||
OPAL_DECLSPEC hwloc_obj_t opal_hwloc_base_get_obj_by_type(hwloc_topology_t topo,
|
||||
hwloc_obj_type_t target,
|
||||
unsigned cache_level,
|
||||
unsigned int instance,
|
||||
opal_hwloc_resource_type_t rtype);
|
||||
OPAL_DECLSPEC unsigned int opal_hwloc_base_get_obj_idx(hwloc_topology_t topo,
|
||||
hwloc_obj_t obj,
|
||||
opal_hwloc_resource_type_t rtype);
|
||||
OPAL_DECLSPEC void opal_hwloc_base_get_level_and_index(hwloc_cpuset_t cpus,
|
||||
opal_hwloc_level_t *bind_level,
|
||||
unsigned int *bind_idx);
|
||||
OPAL_DECLSPEC unsigned int opal_hwloc_base_get_npus(hwloc_topology_t topo,
|
||||
hwloc_obj_t target);
|
||||
OPAL_DECLSPEC char* opal_hwloc_base_print_binding(opal_binding_policy_t binding);
|
||||
OPAL_DECLSPEC char* opal_hwloc_base_print_locality(opal_paffinity_locality_t locality);
|
||||
OPAL_DECLSPEC char* opal_hwloc_base_print_level(opal_hwloc_level_t level);
|
||||
|
||||
/**
|
||||
* Provide a utility to parse a slot list against the local
|
||||
* logical cpus, and produce a cpuset for the described binding
|
||||
*/
|
||||
OPAL_DECLSPEC int opal_hwloc_base_slot_list_parse(const char *slot_str,
|
||||
hwloc_topology_t topo,
|
||||
hwloc_cpuset_t cpumask);
|
||||
|
||||
/**
|
||||
* Report a bind failure using the normal mechanisms if a component
|
||||
* fails to bind memory -- according to the value of the
|
||||
* hwloc_base_bind_failure_action MCA parameter.
|
||||
*/
|
||||
OPAL_DECLSPEC int opal_hwloc_base_report_bind_failure(const char *file,
|
||||
int line,
|
||||
const char *msg,
|
||||
int rc);
|
||||
|
||||
/**
|
||||
* This function sets the process-wide memory affinity policy
|
||||
* according to opal_hwloc_base_map and opal_hwloc_base_mbfa. It needs
|
||||
* to be a separate, standalone function (as opposed to being done
|
||||
* during opal_hwloc_base_open()) because opal_hwloc_topology is not
|
||||
* loaded by opal_hwloc_base_open(). Hence, an upper layer needs to
|
||||
* invoke this function after opal_hwloc_topology has been loaded.
|
||||
*/
|
||||
OPAL_DECLSPEC int opal_hwloc_base_set_process_membind_policy(void);
|
||||
|
||||
/* datatype support */
|
||||
OPAL_DECLSPEC int opal_hwloc_pack(opal_buffer_t *buffer, const void *src,
|
||||
@ -100,80 +204,8 @@ OPAL_DECLSPEC int opal_hwloc_size(size_t *size,
|
||||
opal_data_type_t type);
|
||||
OPAL_DECLSPEC void opal_hwloc_release(opal_dss_value_t *value);
|
||||
|
||||
/**
|
||||
* Report a bind failure using the normal mechanisms if a component
|
||||
* fails to bind memory -- according to the value of the
|
||||
* hwloc_base_bind_failure_action MCA parameter.
|
||||
*/
|
||||
OPAL_DECLSPEC int opal_hwloc_base_report_bind_failure(const char *file,
|
||||
int line,
|
||||
const char *msg,
|
||||
int rc);
|
||||
|
||||
OPAL_DECLSPEC opal_paffinity_locality_t opal_hwloc_base_get_relative_locality(hwloc_topology_t topo,
|
||||
hwloc_cpuset_t peer1,
|
||||
hwloc_cpuset_t peer2);
|
||||
|
||||
OPAL_DECLSPEC void opal_hwloc_base_get_local_cpuset(void);
|
||||
|
||||
/* some critical helper functions */
|
||||
OPAL_DECLSPEC int opal_hwloc_base_filter_cpus(hwloc_topology_t topo);
|
||||
OPAL_DECLSPEC int opal_hwloc_base_get_topology(void);
|
||||
OPAL_DECLSPEC void opal_hwloc_base_free_topology(hwloc_topology_t topo);
|
||||
OPAL_DECLSPEC hwloc_cpuset_t opal_hwloc_base_get_available_cpus(hwloc_topology_t topo,
|
||||
hwloc_obj_t obj);
|
||||
OPAL_DECLSPEC unsigned int opal_hwloc_base_get_nbobjs_by_type(hwloc_topology_t topo,
|
||||
hwloc_obj_type_t target,
|
||||
unsigned cache_level,
|
||||
opal_hwloc_resource_type_t rtype);
|
||||
OPAL_DECLSPEC hwloc_obj_t opal_hwloc_base_get_obj_by_type(hwloc_topology_t topo,
|
||||
hwloc_obj_type_t target,
|
||||
unsigned cache_level,
|
||||
unsigned int instance,
|
||||
opal_hwloc_resource_type_t rtype);
|
||||
OPAL_DECLSPEC unsigned int opal_hwloc_base_get_npus(hwloc_topology_t topo,
|
||||
hwloc_obj_t target);
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Enum for what memory allocation policy we want for user allocations.
|
||||
* MAP = memory allocation policy.
|
||||
*/
|
||||
typedef enum {
|
||||
OPAL_HWLOC_BASE_MAP_NONE,
|
||||
OPAL_HWLOC_BASE_MAP_LOCAL_ONLY
|
||||
} opal_hwloc_base_map_t;
|
||||
|
||||
/**
|
||||
* Global reflecting the MAP (set by MCA param).
|
||||
*/
|
||||
OPAL_DECLSPEC extern opal_hwloc_base_map_t opal_hwloc_base_map;
|
||||
|
||||
/**
|
||||
* Enum for what to do if the hwloc framework tries to bind memory
|
||||
* and fails. BFA = bind failure action.
|
||||
*/
|
||||
typedef enum {
|
||||
OPAL_HWLOC_BASE_MBFA_WARN,
|
||||
OPAL_HWLOC_BASE_MBFA_ERROR
|
||||
} opal_hwloc_base_mbfa_t;
|
||||
|
||||
/**
|
||||
* Global reflecting the BFA (set by MCA param).
|
||||
*/
|
||||
OPAL_DECLSPEC extern opal_hwloc_base_mbfa_t opal_hwloc_base_mbfa;
|
||||
|
||||
/**
|
||||
* This function sets the process-wide memory affinity policy
|
||||
* according to opal_hwloc_base_map and opal_hwloc_base_mbfa. It needs
|
||||
* to be a separate, standalone function (as opposed to being done
|
||||
* during opal_hwloc_base_open()) because opal_hwloc_topology is not
|
||||
* loaded by opal_hwloc_base_open(). Hence, an upper layer needs to
|
||||
* invoke this function after opal_hwloc_topology has been loaded.
|
||||
*/
|
||||
OPAL_DECLSPEC int opal_hwloc_base_set_process_membind_policy(void);
|
||||
|
||||
END_C_DECLS
|
||||
|
||||
#endif /* OPAL_HWLOC_BASE_H */
|
||||
|
@ -39,3 +39,25 @@ message will only be reported at most once per process.
|
||||
File: %s:%d
|
||||
Message: %s
|
||||
Severity: %s
|
||||
#
|
||||
[unrecognized-policy]
|
||||
The specified %s policy is not recognized:
|
||||
|
||||
Policy: %s
|
||||
|
||||
Please check for a typo or ensure that the option is a supported
|
||||
one.
|
||||
#
|
||||
[logical-cpu-not-found]
|
||||
A specified logical processor does not exist in this topology:
|
||||
|
||||
Cpu set given: %s
|
||||
#
|
||||
[redefining-policy]
|
||||
Conflicting directives for binding policy are causing the policy
|
||||
to be redefined:
|
||||
|
||||
New policy: %s
|
||||
Prior policy: %s
|
||||
|
||||
Please check that only one policy is defined.
|
||||
|
@ -12,6 +12,7 @@
|
||||
|
||||
#include "opal/constants.h"
|
||||
#include "opal/dss/dss.h"
|
||||
#include "opal/util/argv.h"
|
||||
#include "opal/util/output.h"
|
||||
#include "opal/util/show_help.h"
|
||||
#include "opal/mca/mca.h"
|
||||
@ -39,7 +40,13 @@ bool opal_hwloc_base_inited = false;
|
||||
#if OPAL_HAVE_HWLOC
|
||||
hwloc_topology_t opal_hwloc_topology=NULL;
|
||||
hwloc_cpuset_t opal_hwloc_my_cpuset=NULL;
|
||||
hwloc_cpuset_t opal_hwloc_base_given_cpus=NULL;
|
||||
opal_hwloc_base_map_t opal_hwloc_base_map = OPAL_HWLOC_BASE_MAP_NONE;
|
||||
opal_hwloc_base_mbfa_t opal_hwloc_base_mbfa = OPAL_HWLOC_BASE_MBFA_WARN;
|
||||
opal_binding_policy_t opal_hwloc_binding_policy=0;
|
||||
char *opal_hwloc_base_slot_list=NULL;
|
||||
char *opal_hwloc_base_cpu_set=NULL;
|
||||
bool opal_hwloc_report_bindings=false;
|
||||
hwloc_obj_type_t opal_hwloc_levels[] = {
|
||||
HWLOC_OBJ_MACHINE,
|
||||
HWLOC_OBJ_NODE,
|
||||
@ -50,10 +57,8 @@ hwloc_obj_type_t opal_hwloc_levels[] = {
|
||||
HWLOC_OBJ_CORE,
|
||||
HWLOC_OBJ_PU
|
||||
};
|
||||
|
||||
bool opal_hwloc_use_hwthreads_as_cpus = false;
|
||||
#endif
|
||||
opal_hwloc_base_map_t opal_hwloc_base_map = OPAL_HWLOC_BASE_MAP_NONE;
|
||||
opal_hwloc_base_mbfa_t opal_hwloc_base_mbfa = OPAL_HWLOC_BASE_MBFA_ERROR;
|
||||
|
||||
|
||||
int opal_hwloc_base_open(void)
|
||||
@ -65,10 +70,11 @@ int opal_hwloc_base_open(void)
|
||||
|
||||
#if OPAL_HAVE_HWLOC
|
||||
{
|
||||
int value;
|
||||
int value, i;
|
||||
opal_data_type_t tmp;
|
||||
char *str_value;
|
||||
|
||||
char **tmpvals, **quals;
|
||||
|
||||
/* Debugging / verbose output */
|
||||
mca_base_param_reg_int_name("hwloc", "base_verbose",
|
||||
"Verbosity level of the hwloc framework",
|
||||
@ -102,12 +108,16 @@ int opal_hwloc_base_open(void)
|
||||
gethostname(hostname, sizeof(hostname));
|
||||
opal_show_help("help-opal-hwloc-base.txt", "invalid policy",
|
||||
true, hostname, getpid(), str_value);
|
||||
free(str_value);
|
||||
return OPAL_ERR_BAD_PARAM;
|
||||
}
|
||||
free(str_value);
|
||||
|
||||
/* hwloc_base_bind_failure_action */
|
||||
switch (opal_hwloc_base_mbfa) {
|
||||
case OPAL_HWLOC_BASE_MBFA_SILENT:
|
||||
str_value = "silent";
|
||||
break;
|
||||
case OPAL_HWLOC_BASE_MBFA_WARN:
|
||||
str_value = "warn";
|
||||
break;
|
||||
@ -116,9 +126,11 @@ int opal_hwloc_base_open(void)
|
||||
break;
|
||||
}
|
||||
mca_base_param_reg_string_name("hwloc", "base_mem_bind_failure_action",
|
||||
"What Open MPI will do if it explicitly tries to bind memory to a specific NUMA location, and fails. Note that this is a different case than the general allocation policy described by hwloc_base_alloc_policy. A value of \"warn\" means that Open MPI will warn the first time this happens, but allow the job to continue (possibly with degraded performance). A value of \"error\" means that Open MPI will abort the job if this happens.",
|
||||
"What Open MPI will do if it explicitly tries to bind memory to a specific NUMA location, and fails. Note that this is a different case than the general allocation policy described by hwloc_base_alloc_policy. A value of \"silent\" means that Open MPI will proceed without comment. A value of \"warn\" means that Open MPI will warn the first time this happens, but allow the job to continue (possibly with degraded performance). A value of \"error\" means that Open MPI will abort the job if this happens.",
|
||||
false, false, str_value, &str_value);
|
||||
if (strcasecmp(str_value, "warn") == 0) {
|
||||
if (strcasecmp(str_value, "silent") == 0) {
|
||||
opal_hwloc_base_mbfa = OPAL_HWLOC_BASE_MBFA_SILENT;
|
||||
} else if (strcasecmp(str_value, "warn") == 0) {
|
||||
opal_hwloc_base_mbfa = OPAL_HWLOC_BASE_MBFA_WARN;
|
||||
} else if (strcasecmp(str_value, "error") == 0) {
|
||||
opal_hwloc_base_mbfa = OPAL_HWLOC_BASE_MBFA_ERROR;
|
||||
@ -127,14 +139,123 @@ int opal_hwloc_base_open(void)
|
||||
gethostname(hostname, sizeof(hostname));
|
||||
opal_show_help("help-opal-hwloc-base.txt", "invalid error action",
|
||||
true, hostname, getpid(), str_value);
|
||||
free(str_value);
|
||||
return OPAL_ERR_BAD_PARAM;
|
||||
}
|
||||
free(str_value);
|
||||
|
||||
/* binding specification */
|
||||
mca_base_param_reg_string_name("hwloc", "base_binding_policy",
|
||||
"Policy for binding processes [none (default) | hwthread | core | l1cache | l2cache | l3cache | socket | numa | board] (supported qualifiers: overload-allowed,if-supported)",
|
||||
false, false, NULL, &str_value);
|
||||
if (NULL == str_value) {
|
||||
opal_hwloc_binding_policy = OPAL_BIND_TO_NONE;
|
||||
/* mark that no binding policy was specified */
|
||||
opal_hwloc_binding_policy &= ~OPAL_BIND_GIVEN;
|
||||
} else if (0 == strncasecmp(str_value, "none", strlen("none"))) {
|
||||
opal_hwloc_binding_policy = OPAL_BIND_TO_NONE;
|
||||
opal_hwloc_binding_policy |= OPAL_BIND_GIVEN;
|
||||
} else {
|
||||
opal_hwloc_binding_policy |= OPAL_BIND_GIVEN;
|
||||
tmpvals = opal_argv_split(str_value, ':');
|
||||
if (1 < opal_argv_count(tmpvals)) {
|
||||
quals = opal_argv_split(tmpvals[1], ',');
|
||||
for (i=0; NULL != quals[i]; i++) {
|
||||
if (0 == strcasecmp(quals[i], "if-supported")) {
|
||||
opal_hwloc_binding_policy |= OPAL_BIND_IF_SUPPORTED;
|
||||
} else if (0 == strcasecmp(quals[i], "overload-allowed")) {
|
||||
opal_hwloc_binding_policy |= OPAL_BIND_ALLOW_OVERLOAD;
|
||||
} else {
|
||||
/* unknown option */
|
||||
opal_output(0, "Unknown qualifier to orte_process_binding: %s", str_value);
|
||||
return OPAL_ERR_BAD_PARAM;
|
||||
}
|
||||
}
|
||||
opal_argv_free(quals);
|
||||
}
|
||||
if (0 == strcasecmp(tmpvals[0], "hwthread")) {
|
||||
OPAL_SET_BINDING_POLICY(opal_hwloc_binding_policy, OPAL_BIND_TO_HWTHREAD);
|
||||
} else if (0 == strcasecmp(tmpvals[0], "core")) {
|
||||
OPAL_SET_BINDING_POLICY(opal_hwloc_binding_policy, OPAL_BIND_TO_CORE);
|
||||
} else if (0 == strcasecmp(tmpvals[0], "l1cache")) {
|
||||
OPAL_SET_BINDING_POLICY(opal_hwloc_binding_policy, OPAL_BIND_TO_L1CACHE);
|
||||
} else if (0 == strcasecmp(tmpvals[0], "l2cache")) {
|
||||
OPAL_SET_BINDING_POLICY(opal_hwloc_binding_policy, OPAL_BIND_TO_L2CACHE);
|
||||
} else if (0 == strcasecmp(tmpvals[0], "l3cache")) {
|
||||
OPAL_SET_BINDING_POLICY(opal_hwloc_binding_policy, OPAL_BIND_TO_L3CACHE);
|
||||
} else if (0 == strcasecmp(tmpvals[0], "socket")) {
|
||||
OPAL_SET_BINDING_POLICY(opal_hwloc_binding_policy, OPAL_BIND_TO_SOCKET);
|
||||
} else if (0 == strcasecmp(tmpvals[0], "numa")) {
|
||||
OPAL_SET_BINDING_POLICY(opal_hwloc_binding_policy, OPAL_BIND_TO_NUMA);
|
||||
} else if (0 == |