1
1

hwloc: since WHOLE_SYSTEM is no more used, remove useless

checks related to offline and disallowed elements

Signed-off-by: Gilles Gouaillardet <gilles@rist.or.jp>
Этот коммит содержится в:
Gilles Gouaillardet 2017-07-19 15:23:52 +09:00
родитель 1a34224948
Коммит 9f29f3bff4
11 изменённых файлов: 48 добавлений и 302 удалений

Просмотреть файл

@ -131,7 +131,7 @@ static int get_rsrc_current_binding(char str[OMPI_AFFINITY_STRING_MAX])
/* get our root object */
root = hwloc_get_root_obj(opal_hwloc_topology);
rootset = opal_hwloc_base_get_available_cpus(opal_hwloc_topology, root);
rootset = root->cpuset;
/* get our bindings */
boundset = hwloc_bitmap_alloc();
@ -324,7 +324,7 @@ static int get_layout_current_binding(char str[OMPI_AFFINITY_STRING_MAX])
/* get our root object */
root = hwloc_get_root_obj(opal_hwloc_topology);
rootset = opal_hwloc_base_get_available_cpus(opal_hwloc_topology, root);
rootset = root->cpuset;
/* get our bindings */
boundset = hwloc_bitmap_alloc();

Просмотреть файл

@ -16,7 +16,7 @@
* reserved.
* Copyright (c) 2012-2015 NVIDIA Corporation. All rights reserved.
* Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014 Research Organization for Information Science
* Copyright (c) 2014-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2015-2016 Intel, Inc. All rights reserved.
* $COPYRIGHT$
@ -296,7 +296,6 @@ smcuda_btl_first_time_init(mca_btl_smcuda_t *smcuda_btl,
num_mem_nodes > 0 && NULL != opal_process_info.cpuset) {
int numa=0, w;
unsigned n_bound=0;
hwloc_cpuset_t avail;
hwloc_obj_t obj;
/* count the number of NUMA nodes to which we are bound */
@ -306,10 +305,8 @@ smcuda_btl_first_time_init(mca_btl_smcuda_t *smcuda_btl,
OPAL_HWLOC_AVAILABLE))) {
continue;
}
/* get that NUMA node's available cpus */
avail = opal_hwloc_base_get_available_cpus(opal_hwloc_topology, obj);
/* see if we intersect */
if (hwloc_bitmap_intersects(avail, opal_hwloc_my_cpuset)) {
/* see if we intersect with that NUMA node's cpus */
if (hwloc_bitmap_intersects(obj->cpuset, opal_hwloc_my_cpuset)) {
n_bound++;
numa = w;
}

Просмотреть файл

@ -1,6 +1,8 @@
/*
* Copyright (c) 2011-2017 Cisco Systems, Inc. All rights reserved
* Copyright (c) 2013-2017 Intel, Inc. All rights reserved.
* Copyright (c) 2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -132,9 +134,6 @@ typedef enum {
*/
OPAL_DECLSPEC extern opal_hwloc_base_mbfa_t opal_hwloc_base_mbfa;
/* some critical helper functions */
OPAL_DECLSPEC int opal_hwloc_base_filter_cpus(hwloc_topology_t topo);
/**
* Discover / load the hwloc topology (i.e., call hwloc_topology_init() and
* hwloc_topology_load()).
@ -150,8 +149,6 @@ OPAL_DECLSPEC int opal_hwloc_base_set_topology(char *topofile);
* Free the hwloc topology.
*/
OPAL_DECLSPEC void opal_hwloc_base_free_topology(hwloc_topology_t topo);
OPAL_DECLSPEC hwloc_cpuset_t opal_hwloc_base_get_available_cpus(hwloc_topology_t topo,
hwloc_obj_t obj);
OPAL_DECLSPEC unsigned int opal_hwloc_base_get_nbobjs_by_type(hwloc_topology_t topo,
hwloc_obj_type_t target,
unsigned cache_level,

Просмотреть файл

@ -136,11 +136,6 @@ int opal_hwloc_unpack(opal_buffer_t *buffer, void *dest,
goto cleanup;
}
/* filter the cpus thru any default cpu set */
if (OPAL_SUCCESS != (rc = opal_hwloc_base_filter_cpus(t))) {
goto cleanup;
}
/* pass it back */
tarray[i] = t;
@ -268,18 +263,6 @@ static void print_hwloc_obj(char **output, char *prefix,
free(tmp);
tmp = tmp2;
}
if (NULL != obj->online_cpuset) {
hwloc_bitmap_snprintf(string, OPAL_HWLOC_MAX_STRING, obj->online_cpuset);
asprintf(&tmp2, "%s%sOnline: %s", tmp, pfx, string);
free(tmp);
tmp = tmp2;
}
if (NULL != obj->allowed_cpuset) {
hwloc_bitmap_snprintf(string, OPAL_HWLOC_MAX_STRING, obj->allowed_cpuset);
asprintf(&tmp2, "%s%sAllowed: %s", tmp, pfx, string);
free(tmp);
tmp = tmp2;
}
if (HWLOC_OBJ_MACHINE == obj->type) {
/* root level object - add support values */
support = (struct hwloc_topology_support*)hwloc_topology_get_support(topo);

Просмотреть файл

@ -110,100 +110,6 @@ hwloc_obj_t opal_hwloc_base_get_pu(hwloc_topology_t topo,
return obj;
}
/* determine the node-level available cpuset based on
* online vs allowed vs user-specified cpus
*/
int opal_hwloc_base_filter_cpus(hwloc_topology_t topo)
{
hwloc_obj_t root, pu;
hwloc_cpuset_t avail = NULL, pucpus, res;
opal_hwloc_topo_data_t *sum;
opal_hwloc_obj_data_t *data;
char **ranges=NULL, **range=NULL;
int idx, cpu, start, end;
root = hwloc_get_root_obj(topo);
if (NULL == root->userdata) {
root->userdata = (void*)OBJ_NEW(opal_hwloc_topo_data_t);
}
sum = (opal_hwloc_topo_data_t*)root->userdata;
/* should only ever enter here once, but check anyway */
if (NULL != sum->available) {
return OPAL_SUCCESS;
}
/* process any specified default cpu set against this topology */
if (NULL == opal_hwloc_base_cpu_list) {
/* get the root available cpuset */
avail = hwloc_bitmap_alloc();
hwloc_bitmap_and(avail, root->online_cpuset, root->allowed_cpuset);
OPAL_OUTPUT_VERBOSE((5, opal_hwloc_base_framework.framework_output,
"hwloc:base: no cpus specified - using root available cpuset"));
} else {
OPAL_OUTPUT_VERBOSE((5, opal_hwloc_base_framework.framework_output,
"hwloc:base: filtering cpuset"));
/* find the specified logical cpus */
ranges = opal_argv_split(opal_hwloc_base_cpu_list, ',');
avail = hwloc_bitmap_alloc();
hwloc_bitmap_zero(avail);
res = hwloc_bitmap_alloc();
pucpus = hwloc_bitmap_alloc();
for (idx=0; idx < opal_argv_count(ranges); idx++) {
range = opal_argv_split(ranges[idx], '-');
switch (opal_argv_count(range)) {
case 1:
/* only one cpu given - get that object */
cpu = strtoul(range[0], NULL, 10);
if (NULL != (pu = opal_hwloc_base_get_pu(topo, cpu, OPAL_HWLOC_LOGICAL))) {
hwloc_bitmap_and(pucpus, pu->online_cpuset, pu->allowed_cpuset);
hwloc_bitmap_or(res, avail, pucpus);
hwloc_bitmap_copy(avail, res);
data = (opal_hwloc_obj_data_t*)pu->userdata;
if (NULL == data) {
pu->userdata = (void*)OBJ_NEW(opal_hwloc_obj_data_t);
data = (opal_hwloc_obj_data_t*)pu->userdata;
}
data->npus++;
}
break;
case 2:
/* range given */
start = strtoul(range[0], NULL, 10);
end = strtoul(range[1], NULL, 10);
for (cpu=start; cpu <= end; cpu++) {
if (NULL != (pu = opal_hwloc_base_get_pu(topo, cpu, OPAL_HWLOC_LOGICAL))) {
hwloc_bitmap_and(pucpus, pu->online_cpuset, pu->allowed_cpuset);
hwloc_bitmap_or(res, avail, pucpus);
hwloc_bitmap_copy(avail, res);
data = (opal_hwloc_obj_data_t*)pu->userdata;
if (NULL == data) {
pu->userdata = (void*)OBJ_NEW(opal_hwloc_obj_data_t);
data = (opal_hwloc_obj_data_t*)pu->userdata;
}
data->npus++;
}
}
break;
default:
break;
}
opal_argv_free(range);
}
if (NULL != ranges) {
opal_argv_free(ranges);
}
hwloc_bitmap_free(res);
hwloc_bitmap_free(pucpus);
}
/* cache this info */
sum->available = avail;
return OPAL_SUCCESS;
}
static void fill_cache_line_size(void)
{
int i = 0, cache_level = 2;
@ -297,11 +203,6 @@ int opal_hwloc_base_get_topology(void)
return OPAL_ERROR;
}
free(val);
/* filter the cpus thru any default cpu set */
if (OPAL_SUCCESS != (rc = opal_hwloc_base_filter_cpus(opal_hwloc_topology))) {
hwloc_topology_destroy(opal_hwloc_topology);
return rc;
}
} else if (NULL == opal_hwloc_base_topo_file) {
if (0 != hwloc_topology_init(&opal_hwloc_topology) ||
0 != hwloc_topology_set_flags(opal_hwloc_topology,
@ -309,9 +210,6 @@ int opal_hwloc_base_get_topology(void)
0 != hwloc_topology_load(opal_hwloc_topology)) {
return OPAL_ERR_NOT_SUPPORTED;
}
if (OPAL_SUCCESS != (rc = opal_hwloc_base_filter_cpus(opal_hwloc_topology))) {
return rc;
}
} else {
if (OPAL_SUCCESS != (rc = opal_hwloc_base_set_topology(opal_hwloc_base_topo_file))) {
return rc;
@ -333,7 +231,6 @@ int opal_hwloc_base_get_topology(void)
int opal_hwloc_base_set_topology(char *topofile)
{
struct hwloc_topology_support *support;
int rc;
OPAL_OUTPUT_VERBOSE((5, opal_hwloc_base_framework.framework_output,
"hwloc:base:set_topology %s", topofile));
@ -375,12 +272,6 @@ int opal_hwloc_base_set_topology(char *topofile)
support->cpubind->set_thisproc_cpubind = true;
support->membind->set_thisproc_membind = true;
/* filter the cpus thru any default cpu set */
rc = opal_hwloc_base_filter_cpus(opal_hwloc_topology);
if (OPAL_SUCCESS != rc) {
return rc;
}
/* fill opal_cache_line_size global with the smallest L1 cache
line size */
fill_cache_line_size();
@ -432,7 +323,6 @@ void opal_hwloc_base_free_topology(hwloc_topology_t topo)
void opal_hwloc_base_get_local_cpuset(void)
{
hwloc_obj_t root;
hwloc_cpuset_t base_cpus;
if (NULL != opal_hwloc_topology) {
if (NULL == opal_hwloc_my_cpuset) {
@ -445,8 +335,7 @@ void opal_hwloc_base_get_local_cpuset(void)
HWLOC_CPUBIND_PROCESS) < 0) {
/* we are not bound - use the root's available cpuset */
root = hwloc_get_root_obj(opal_hwloc_topology);
base_cpus = opal_hwloc_base_get_available_cpus(opal_hwloc_topology, root);
hwloc_bitmap_copy(opal_hwloc_my_cpuset, base_cpus);
hwloc_bitmap_copy(opal_hwloc_my_cpuset, root->cpuset);
}
}
}
@ -474,72 +363,6 @@ int opal_hwloc_base_report_bind_failure(const char *file,
return OPAL_SUCCESS;
}
hwloc_cpuset_t opal_hwloc_base_get_available_cpus(hwloc_topology_t topo,
hwloc_obj_t obj)
{
hwloc_obj_t root;
hwloc_cpuset_t avail, specd=NULL;
opal_hwloc_topo_data_t *rdata;
opal_hwloc_obj_data_t *data;
OPAL_OUTPUT_VERBOSE((10, opal_hwloc_base_framework.framework_output,
"hwloc:base: get available cpus"));
/* get the node-level information */
root = hwloc_get_root_obj(topo);
rdata = (opal_hwloc_topo_data_t*)root->userdata;
/* bozo check */
if (NULL == rdata) {
rdata = OBJ_NEW(opal_hwloc_topo_data_t);
root->userdata = (void*)rdata;
OPAL_OUTPUT_VERBOSE((5, opal_hwloc_base_framework.framework_output,
"hwloc:base:get_available_cpus first time - filtering cpus"));
}
/* are we asking about the root object? */
if (obj == root) {
OPAL_OUTPUT_VERBOSE((5, opal_hwloc_base_framework.framework_output,
"hwloc:base:get_available_cpus root object"));
return rdata->available;
}
/* some hwloc object types don't have cpus */
if (NULL == obj->online_cpuset || NULL == obj->allowed_cpuset) {
return NULL;
}
/* see if we already have this info */
if (NULL == (data = (opal_hwloc_obj_data_t*)obj->userdata)) {
/* nope - create the object */
data = OBJ_NEW(opal_hwloc_obj_data_t);
obj->userdata = (void*)data;
}
/* do we have the cpuset */
if (NULL != data->available) {
return data->available;
}
/* find the available processors on this object */
avail = hwloc_bitmap_alloc();
hwloc_bitmap_and(avail, obj->online_cpuset, obj->allowed_cpuset);
/* filter this against the node-available processors */
if (NULL == rdata->available) {
hwloc_bitmap_free(avail);
return NULL;
}
specd = hwloc_bitmap_alloc();
hwloc_bitmap_and(specd, avail, rdata->available);
/* cache the info */
data->available = specd;
/* cleanup */
hwloc_bitmap_free(avail);
return specd;
}
static void df_search_cores(hwloc_obj_t obj, unsigned int *cnt)
{
unsigned k;
@ -552,13 +375,6 @@ static void df_search_cores(hwloc_obj_t obj, unsigned int *cnt)
obj->userdata = (void*)data;
}
if (NULL == opal_hwloc_base_cpu_list) {
if (!hwloc_bitmap_intersects(obj->cpuset, obj->allowed_cpuset)) {
/*
* do not count not allowed cores (e.g. cores with zero allowed PU)
* if SMT is enabled, do count cores with at least one allowed hwthread
*/
return;
}
data->npus = 1;
}
*cnt += data->npus;
@ -605,7 +421,6 @@ unsigned int opal_hwloc_base_get_npus(hwloc_topology_t topo,
{
opal_hwloc_obj_data_t *data;
unsigned int cnt = 0;
hwloc_cpuset_t cpuset;
data = (opal_hwloc_obj_data_t*)obj->userdata;
if (NULL == data || !data->npus_calculated) {
@ -629,12 +444,13 @@ unsigned int opal_hwloc_base_get_npus(hwloc_topology_t topo,
df_search_cores(obj, &cnt);
}
} else {
hwloc_cpuset_t cpuset;
/* if we are treating cores as cpus, or the system can't detect
* "cores", then get the available cpuset for this object - this will
* create and store the data
*/
if (NULL == (cpuset = opal_hwloc_base_get_available_cpus(topo, obj))) {
if (NULL == (cpuset = obj->cpuset)) {
return 0;
}
/* count the number of bits that are set - there is
@ -795,7 +611,7 @@ static hwloc_obj_t df_search(hwloc_topology_t topo,
}
/* see if we already know our available cpuset */
if (NULL == data->available) {
data->available = opal_hwloc_base_get_available_cpus(topo, start);
data->available = hwloc_bitmap_dup(start->cpuset);
}
if (NULL != data->available && !hwloc_bitmap_iszero(data->available)) {
if (NULL != num_objs) {
@ -1092,7 +908,6 @@ static int socket_to_cpu_set(char *cpus,
int lower_range, upper_range;
int socket_id;
hwloc_obj_t obj;
hwloc_bitmap_t res;
if ('*' == cpus[0]) {
/* requesting cpumask for ALL sockets */
@ -1100,8 +915,7 @@ static int socket_to_cpu_set(char *cpus,
/* set to all available processors - essentially,
* this specification equates to unbound
*/
res = opal_hwloc_base_get_available_cpus(topo, obj);
hwloc_bitmap_or(cpumask, cpumask, res);
hwloc_bitmap_or(cpumask, cpumask, obj->cpuset);
return OPAL_SUCCESS;
}
@ -1112,8 +926,7 @@ static int socket_to_cpu_set(char *cpus,
socket_id = atoi(range[0]);
obj = opal_hwloc_base_get_obj_by_type(topo, HWLOC_OBJ_SOCKET, 0, socket_id, rtype);
/* get the available cpus for this socket */
res = opal_hwloc_base_get_available_cpus(topo, obj);
hwloc_bitmap_or(cpumask, cpumask, res);
hwloc_bitmap_or(cpumask, cpumask, obj->cpuset);
break;
case 2: /* range of sockets was given */
@ -1122,10 +935,8 @@ static int socket_to_cpu_set(char *cpus,
/* cycle across the range of sockets */
for (socket_id=lower_range; socket_id<=upper_range; socket_id++) {
obj = opal_hwloc_base_get_obj_by_type(topo, HWLOC_OBJ_SOCKET, 0, socket_id, rtype);
/* get the available cpus for this socket */
res = opal_hwloc_base_get_available_cpus(topo, obj);
/* set the corresponding bits in the bitmask */
hwloc_bitmap_or(cpumask, cpumask, res);
/* set the available cpus for this socket bits in the bitmask */
hwloc_bitmap_or(cpumask, cpumask, obj->cpuset);
}
break;
default:
@ -1149,7 +960,6 @@ static int socket_core_to_cpu_set(char *socket_core_list,
int lower_range, upper_range;
int socket_id, core_id;
hwloc_obj_t socket, core;
hwloc_cpuset_t res;
unsigned int idx;
hwloc_obj_type_t obj_type = HWLOC_OBJ_CORE;
@ -1179,9 +989,8 @@ static int socket_core_to_cpu_set(char *socket_core_list,
corestr = socket_core[i];
}
if ('*' == corestr[0]) {
/* set to all available cpus on this socket */
res = opal_hwloc_base_get_available_cpus(topo, socket);
hwloc_bitmap_or(cpumask, cpumask, res);
/* set to all cpus on this socket */
hwloc_bitmap_or(cpumask, cpumask, socket->cpuset);
/* we are done - already assigned all cores! */
rc = OPAL_SUCCESS;
break;
@ -1205,8 +1014,7 @@ static int socket_core_to_cpu_set(char *socket_core_list,
return OPAL_ERR_NOT_FOUND;
}
/* get the cpus */
res = opal_hwloc_base_get_available_cpus(topo, core);
hwloc_bitmap_or(cpumask, cpumask, res);
hwloc_bitmap_or(cpumask, cpumask, core->cpuset);
}
opal_argv_free(list);
break;
@ -1227,10 +1035,8 @@ static int socket_core_to_cpu_set(char *socket_core_list,
opal_argv_free(socket_core);
return OPAL_ERR_NOT_FOUND;
}
/* get the cpus */
res = opal_hwloc_base_get_available_cpus(topo, core);
/* add them into the result */
hwloc_bitmap_or(cpumask, cpumask, res);
/* get the cpus add them into the result */
hwloc_bitmap_or(cpumask, cpumask, core->cpuset);
}
break;
@ -1255,7 +1061,6 @@ int opal_hwloc_base_cpu_list_parse(const char *slot_str,
char **item, **rngs;
int rc, i, j, k;
hwloc_obj_t pu;
hwloc_cpuset_t pucpus;
char **range, **list;
size_t range_cnt;
int core_id, lower_range, upper_range;
@ -1349,10 +1154,8 @@ int opal_hwloc_base_cpu_list_parse(const char *slot_str,
opal_argv_free(list);
return OPAL_ERR_SILENT;
}
/* get the available cpus for that object */
pucpus = opal_hwloc_base_get_available_cpus(topo, pu);
/* set that in the mask */
hwloc_bitmap_or(cpumask, cpumask, pucpus);
/* get the cpus for that object and set them in the massk*/
hwloc_bitmap_or(cpumask, cpumask, pu->cpuset);
}
opal_argv_free(list);
break;
@ -1368,10 +1171,8 @@ int opal_hwloc_base_cpu_list_parse(const char *slot_str,
opal_argv_free(rngs);
return OPAL_ERR_SILENT;
}
/* get the available cpus for that object */
pucpus = opal_hwloc_base_get_available_cpus(topo, pu);
/* set that in the mask */
hwloc_bitmap_or(cpumask, cpumask, pucpus);
/* get the cpus for that object and set them in the mask*/
hwloc_bitmap_or(cpumask, cpumask, pu->cpuset);
}
break;
@ -1396,7 +1197,6 @@ opal_hwloc_locality_t opal_hwloc_base_get_relative_locality(hwloc_topology_t top
opal_hwloc_locality_t locality;
hwloc_obj_t obj;
unsigned depth, d, width, w;
hwloc_cpuset_t avail;
bool shared;
hwloc_obj_type_t type;
int sect1, sect2;
@ -1444,11 +1244,9 @@ opal_hwloc_locality_t opal_hwloc_base_get_relative_locality(hwloc_topology_t top
for (w=0; w < width; w++) {
/* get the object at this depth/index */
obj = hwloc_get_obj_by_depth(topo, d, w);
/* get the available cpuset for this obj */
avail = opal_hwloc_base_get_available_cpus(topo, obj);
/* see if our locations intersect with it */
sect1 = hwloc_bitmap_intersects(avail, loc1);
sect2 = hwloc_bitmap_intersects(avail, loc2);
/* see if our locations intersect with the cpuset for this obj */
sect1 = hwloc_bitmap_intersects(obj->cpuset, loc1);
sect2 = hwloc_bitmap_intersects(obj->cpuset, loc2);
/* if both intersect, then we share this level */
if (sect1 && sect2) {
shared = true;
@ -1864,9 +1662,7 @@ int opal_hwloc_base_cset2str(char *str, int len,
/* if the cpuset includes all available cpus, then we are unbound */
root = hwloc_get_root_obj(topo);
if (NULL == root->userdata) {
opal_hwloc_base_filter_cpus(topo);
} else {
if (NULL != root->userdata) {
sum = (opal_hwloc_topo_data_t*)root->userdata;
if (NULL == sum->available) {
return OPAL_ERROR;
@ -1934,9 +1730,7 @@ int opal_hwloc_base_cset2mapstr(char *str, int len,
/* if the cpuset includes all available cpus, then we are unbound */
root = hwloc_get_root_obj(topo);
if (NULL == root->userdata) {
opal_hwloc_base_filter_cpus(topo);
} else {
if (NULL != root->userdata) {
sum = (opal_hwloc_topo_data_t*)root->userdata;
if (NULL == sum->available) {
return OPAL_ERROR;
@ -2201,7 +1995,7 @@ char* opal_hwloc_base_get_locality_string(hwloc_topology_t topo,
hwloc_obj_t obj;
char *locality=NULL, *tmp, *t2;
unsigned depth, d, width, w;
hwloc_cpuset_t cpuset, avail, result;
hwloc_cpuset_t cpuset, result;
hwloc_obj_type_t type;
/* if this proc is not bound, then there is no locality. We
@ -2249,10 +2043,8 @@ char* opal_hwloc_base_get_locality_string(hwloc_topology_t topo,
for (w=0; w < width; w++) {
/* get the object at this depth/index */
obj = hwloc_get_obj_by_depth(topo, d, w);
/* get the available cpuset for this obj */
avail = opal_hwloc_base_get_available_cpus(topo, obj);
/* see if the location intersects with it */
if (hwloc_bitmap_intersects(avail, cpuset)) {
if (hwloc_bitmap_intersects(obj->cpuset, cpuset)) {
hwloc_bitmap_set(result, w);
}
}

Просмотреть файл

@ -13,7 +13,7 @@
* Copyright (c) 2011-2012 Los Alamos National Security, LLC.
* All rights reserved.
* Copyright (c) 2014-2017 Intel, Inc. All rights reserved.
* Copyright (c) 2014 Research Organization for Information Science
* Copyright (c) 2014-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
@ -113,7 +113,7 @@ int orte_ess_base_proc_binding(void)
support = (struct hwloc_topology_support*)hwloc_topology_get_support(opal_hwloc_topology);
/* get our node object */
node = hwloc_get_root_obj(opal_hwloc_topology);
nodeset = opal_hwloc_base_get_available_cpus(opal_hwloc_topology, node);
nodeset = node->cpuset;
/* get our bindings */
cpus = hwloc_bitmap_alloc();
if (hwloc_get_cpubind(opal_hwloc_topology, cpus, HWLOC_CPUBIND_PROCESS) < 0) {
@ -191,14 +191,13 @@ int orte_ess_base_proc_binding(void)
error = "Getting hwthread object";
goto error;
}
cpus = opal_hwloc_base_get_available_cpus(opal_hwloc_topology, obj);
cpus = obj->cpuset;
if (0 > hwloc_set_cpubind(opal_hwloc_topology, cpus, 0)) {
ret = ORTE_ERROR;
error = "Setting processor affinity failed";
goto error;
}
hwloc_bitmap_list_asprintf(&orte_process_info.cpuset, cpus);
hwloc_bitmap_free(cpus);
OPAL_OUTPUT_VERBOSE((5, orte_ess_base_framework.framework_output,
"%s Process bound to hwthread",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
@ -212,7 +211,7 @@ int orte_ess_base_proc_binding(void)
error = "Getting core object";
goto error;
}
cpus = opal_hwloc_base_get_available_cpus(opal_hwloc_topology, obj);
cpus = obj->cpuset;
if (0 > hwloc_set_cpubind(opal_hwloc_topology, cpus, 0)) {
error = "Setting processor affinity failed";
ret = ORTE_ERROR;
@ -256,7 +255,7 @@ int orte_ess_base_proc_binding(void)
continue;
}
/* this is the place! */
cpus = opal_hwloc_base_get_available_cpus(opal_hwloc_topology, obj);
cpus = obj->cpuset;
if (0 > hwloc_set_cpubind(opal_hwloc_topology, cpus, 0)) {
ret = ORTE_ERROR;
error = "Setting processor affinity failed";

Просмотреть файл

@ -944,8 +944,6 @@ void orte_plm_base_daemon_topology(int status, orte_process_name_t* sender,
orted_failed_launch = true;
goto CLEANUP;
}
/* filter the topology as we'll need it that way later */
opal_hwloc_base_filter_cpus(topo);
/* record the final topology */
t->topo = topo;

Просмотреть файл

@ -1,7 +1,7 @@
/*
* Copyright (c) 2011-2017 Cisco Systems, Inc. All rights reserved
* Copyright (c) 2012 Los Alamos National Security, LLC. All rights reserved
* Copyright (c) 2015 Research Organization for Information Science
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2015-2017 Intel, Inc. All rights reserved.
*
@ -205,13 +205,6 @@ static int allocate(orte_job_t *jdata, opal_list_t *nodes)
hwloc_topology_destroy(topo);
goto error_silent;
}
if (OPAL_SUCCESS != opal_hwloc_base_filter_cpus(topo)) {
orte_show_help("help-ras-simulator.txt",
"hwloc API fail", true,
__FILE__, __LINE__, "opal_hwloc_base_filter_cpus");
hwloc_topology_destroy(topo);
goto error_silent;
}
/* remove the hostname from the topology. Unfortunately, hwloc
* decided to add the source hostname to the "topology", thus
* rendering it unusable as a pure topological description. So

Просмотреть файл

@ -13,7 +13,7 @@
* Copyright (c) 2011-2012 Los Alamos National Security, LLC.
* All rights reserved.
* Copyright (c) 2013-2017 Intel, Inc. All rights reserved.
* Copyright (c) 2015 Research Organization for Information Science
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
@ -133,7 +133,6 @@ static int bind_upwards(orte_job_t *jdata,
orte_job_map_t *map;
orte_proc_t *proc;
hwloc_obj_t obj;
hwloc_cpuset_t cpus;
unsigned int idx, ncpus;
opal_hwloc_obj_data_t *data;
hwloc_obj_t locale;
@ -210,8 +209,7 @@ static int bind_upwards(orte_job_t *jdata,
}
}
/* bind it here */
cpus = opal_hwloc_base_get_available_cpus(node->topology->topo, obj);
hwloc_bitmap_list_asprintf(&cpu_bitmap, cpus);
hwloc_bitmap_list_asprintf(&cpu_bitmap, obj->cpuset);
orte_set_attribute(&proc->attributes, ORTE_PROC_CPU_BITMAP, ORTE_ATTR_GLOBAL, cpu_bitmap, OPAL_STRING);
/* record the location */
orte_set_attribute(&proc->attributes, ORTE_PROC_HWLOC_BOUND, ORTE_ATTR_LOCAL, obj, OPAL_PTR);
@ -250,7 +248,6 @@ static int bind_downwards(orte_job_t *jdata,
orte_job_map_t *map;
orte_proc_t *proc;
hwloc_obj_t trg_obj, nxt_obj;
hwloc_cpuset_t cpus;
unsigned int ncpus;
opal_hwloc_obj_data_t *data;
int total_cpus;
@ -344,8 +341,7 @@ static int bind_downwards(orte_job_t *jdata,
}
}
/* bind the proc here */
cpus = opal_hwloc_base_get_available_cpus(node->topology->topo, trg_obj);
hwloc_bitmap_or(totalcpuset, totalcpuset, cpus);
hwloc_bitmap_or(totalcpuset, totalcpuset, trg_obj->cpuset);
/* track total #cpus */
total_cpus += ncpus;
/* move to the next location, in case we need it */
@ -395,7 +391,6 @@ static int bind_in_place(orte_job_t *jdata,
orte_job_map_t *map;
orte_node_t *node;
orte_proc_t *proc;
hwloc_cpuset_t cpus;
unsigned int idx, ncpus;
struct hwloc_topology_support *support;
opal_hwloc_obj_data_t *data;
@ -566,8 +561,7 @@ static int bind_in_place(orte_job_t *jdata,
ORTE_NAME_PRINT(&proc->name),
hwloc_obj_type_string(locale->type), idx);
/* bind the proc here */
cpus = opal_hwloc_base_get_available_cpus(node->topology->topo, locale);
hwloc_bitmap_list_asprintf(&cpu_bitmap, cpus);
hwloc_bitmap_list_asprintf(&cpu_bitmap, locale->cpuset);
orte_set_attribute(&proc->attributes, ORTE_PROC_CPU_BITMAP, ORTE_ATTR_GLOBAL, cpu_bitmap, OPAL_STRING);
/* update the location, in case it changed */
orte_set_attribute(&proc->attributes, ORTE_PROC_HWLOC_BOUND, ORTE_ATTR_LOCAL, locale, OPAL_PTR);

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2011 Los Alamos National Security, LLC.
* All rights reserved.
* Copyright (c) 2014-2017 Intel, Inc. All rights reserved.
* Copyright (c) 2015 Research Organization for Information Science
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
@ -441,7 +441,7 @@ static void prune(orte_jobid_t jobid,
hwloc_obj_type_t lvl;
unsigned cache_level = 0, k;
int nprocs;
hwloc_cpuset_t avail, cpus, childcpus;
hwloc_cpuset_t avail;
int n, limit, nmax, nunder, idx, idxmax = 0;
orte_proc_t *proc, *pptr, *procmax;
opal_hwloc_level_t ll;
@ -492,7 +492,7 @@ static void prune(orte_jobid_t jobid,
lvl, cache_level,
i, OPAL_HWLOC_AVAILABLE);
/* get the available cpuset */
avail = opal_hwloc_base_get_available_cpus(node->topology->topo, obj);
avail = obj->cpuset;
/* look at the intersection of this object's cpuset and that
* of each proc in the job/app - if they intersect, then count this proc
@ -512,8 +512,7 @@ static void prune(orte_jobid_t jobid,
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
return;
}
cpus = opal_hwloc_base_get_available_cpus(node->topology->topo, locale);
if (hwloc_bitmap_intersects(avail, cpus)) {
if (hwloc_bitmap_intersects(avail, locale->cpuset)) {
nprocs++;
}
}
@ -550,7 +549,6 @@ static void prune(orte_jobid_t jobid,
/* find the child with the most procs underneath it */
for (k=0; k < top->arity && limit < nprocs; k++) {
/* get this object's available cpuset */
childcpus = opal_hwloc_base_get_available_cpus(node->topology->topo, top->children[k]);
nunder = 0;
pptr = NULL;
for (n=0; n < node->procs->size; n++) {
@ -566,8 +564,7 @@ static void prune(orte_jobid_t jobid,
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
return;
}
cpus = opal_hwloc_base_get_available_cpus(node->topology->topo, locale);
if (hwloc_bitmap_intersects(childcpus, cpus)) {
if (hwloc_bitmap_intersects(top->children[k]->cpuset, locale->cpuset)) {
nunder++;
if (NULL == pptr) {
/* save the location of the first proc under this object */

Просмотреть файл

@ -362,7 +362,7 @@ int orte_daemon(int argc, char *argv[])
if (NULL != orte_daemon_cores) {
char **cores=NULL, tmp[128];
hwloc_obj_t pu;
hwloc_cpuset_t ours, pucpus, res;
hwloc_cpuset_t ours, res;
int core;
/* could be a collection of comma-delimited ranges, so
@ -372,7 +372,6 @@ int orte_daemon(int argc, char *argv[])
if (NULL != cores) {
ours = hwloc_bitmap_alloc();
hwloc_bitmap_zero(ours);
pucpus = hwloc_bitmap_alloc();
res = hwloc_bitmap_alloc();
for (i=0; NULL != cores[i]; i++) {
core = strtoul(cores[i], NULL, 10);
@ -387,12 +386,10 @@ int orte_daemon(int argc, char *argv[])
orte_daemon_cores);
ret = ORTE_ERR_NOT_SUPPORTED;
hwloc_bitmap_free(ours);
hwloc_bitmap_free(pucpus);
hwloc_bitmap_free(res);
goto DONE;
}
hwloc_bitmap_and(pucpus, pu->online_cpuset, pu->allowed_cpuset);
hwloc_bitmap_or(res, ours, pucpus);
hwloc_bitmap_or(res, ours, pu->cpuset);
hwloc_bitmap_copy(ours, res);
}
/* if the result is all zeros, then don't bind */
@ -406,7 +403,6 @@ int orte_daemon(int argc, char *argv[])
}
/* cleanup */
hwloc_bitmap_free(ours);
hwloc_bitmap_free(pucpus);
hwloc_bitmap_free(res);
opal_argv_free(cores);
}