summary refs log tree commit diff
path: root/tools
diff options
context:
space:
mode:
authorAlexander Gordeev <agordeev@linux.ibm.com>2020-08-13 13:32:48 +0200
committerArnaldo Carvalho de Melo <acme@redhat.com>2020-08-13 10:02:27 -0300
commit2db13a9b30f7e438777eb1a462c4b055ba948b89 (patch)
tree5ee599d325ed854e4306db262293ca6fcaa85b0e /tools
parent509f68e327d0c87e9bc93cb138e445c506ae9ce9 (diff)
downloadlinux-2db13a9b30f7e438777eb1a462c4b055ba948b89.tar.gz
perf bench numa: Use numa_node_to_cpus() to bind tasks to nodes
It is currently assumed that each node contains at most nr_cpus/nr_nodes
CPUs and nodes' CPU ranges do not overlap.

That assumption is generally incorrect as there are archs where a CPU
number does not depend on to its node number.

This update removes the described assumption by simply calling
numa_node_to_cpus() interface and using the returned mask for binding
CPUs to nodes.

Also, variable types and names made consistent in functions using
cpumask.

Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Reviewed-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Balamuruhan S <bala24@linux.vnet.ibm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Satheesh Rajendran <sathnaga@linux.vnet.ibm.com>
Link: http://lore.kernel.org/lkml/20200813113247.GA2014@oc3871087118.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools')
-rw-r--r--tools/perf/bench/numa.c33
1 files changed, 16 insertions, 17 deletions
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 9066511aed47..6d5c890478cb 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -247,20 +247,20 @@ static int is_node_present(int node)
  */
 static bool node_has_cpus(int node)
 {
-	struct bitmask *cpu = numa_allocate_cpumask();
+	struct bitmask *cpumask = numa_allocate_cpumask();
 	bool ret = false; /* fall back to nocpus */
-	unsigned int i;
+	int cpu;
 
-	BUG_ON(!cpu);
-	if (!numa_node_to_cpus(node, cpu)) {
-		for (i = 0; i < cpu->size; i++) {
-			if (numa_bitmask_isbitset(cpu, i)) {
+	BUG_ON(!cpumask);
+	if (!numa_node_to_cpus(node, cpumask)) {
+		for (cpu = 0; cpu < (int)cpumask->size; cpu++) {
+			if (numa_bitmask_isbitset(cpumask, cpu)) {
 				ret = true;
 				break;
 			}
 		}
 	}
-	numa_free_cpumask(cpu);
+	numa_free_cpumask(cpumask);
 
 	return ret;
 }
@@ -293,14 +293,10 @@ static cpu_set_t bind_to_cpu(int target_cpu)
 
 static cpu_set_t bind_to_node(int target_node)
 {
-	int cpus_per_node = g->p.nr_cpus / nr_numa_nodes();
 	cpu_set_t orig_mask, mask;
 	int cpu;
 	int ret;
 
-	BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus);
-	BUG_ON(!cpus_per_node);
-
 	ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
 	BUG_ON(ret);
 
@@ -310,13 +306,16 @@ static cpu_set_t bind_to_node(int target_node)
 		for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
 			CPU_SET(cpu, &mask);
 	} else {
-		int cpu_start = (target_node + 0) * cpus_per_node;
-		int cpu_stop  = (target_node + 1) * cpus_per_node;
-
-		BUG_ON(cpu_stop > g->p.nr_cpus);
+		struct bitmask *cpumask = numa_allocate_cpumask();
 
-		for (cpu = cpu_start; cpu < cpu_stop; cpu++)
-			CPU_SET(cpu, &mask);
+		BUG_ON(!cpumask);
+		if (!numa_node_to_cpus(target_node, cpumask)) {
+			for (cpu = 0; cpu < (int)cpumask->size; cpu++) {
+				if (numa_bitmask_isbitset(cpumask, cpu))
+					CPU_SET(cpu, &mask);
+			}
+		}
+		numa_free_cpumask(cpumask);
 	}
 
 	ret = sched_setaffinity(0, sizeof(mask), &mask);