summary refs log tree commit diff
path: root/arch/s390/kernel
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2012-11-12 10:03:25 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2012-11-23 11:14:31 +0100
commitd1e57508fbd63b340788afe6f2c74a608603e714 (patch)
treea4f32399d06ea644f3a6cb6f02b87ad9082b3bb6 /arch/s390/kernel
parent0a4ccc992978ef552dc86ac68bc1ec62cf268e2a (diff)
downloadlinux-d1e57508fbd63b340788afe6f2c74a608603e714.tar.gz
s390/topology: cleanup topology code
Mainly merge all different per-cpu arrays into a single array which
holds all topology information per logical cpu.
Also fix the broken core vs socket variable naming and simplify the
locking a bit.
When running in environments without topology information also
invent book, socket and core ids, so that not all ids are zero.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/topology.c113
1 files changed, 54 insertions, 59 deletions
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index dd55f7c20104..f1aba87cceb8 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -29,48 +29,38 @@ struct mask_info {
 	cpumask_t mask;
 };
 
-static int topology_enabled = 1;
+static void set_topology_timer(void);
 static void topology_work_fn(struct work_struct *work);
 static struct sysinfo_15_1_x *tl_info;
-static void set_topology_timer(void);
-static DECLARE_WORK(topology_work, topology_work_fn);
-/* topology_lock protects the core linked list */
-static DEFINE_SPINLOCK(topology_lock);
 
-static struct mask_info core_info;
-cpumask_t cpu_core_map[NR_CPUS];
-unsigned char cpu_core_id[NR_CPUS];
-unsigned char cpu_socket_id[NR_CPUS];
+static int topology_enabled = 1;
+static DECLARE_WORK(topology_work, topology_work_fn);
 
+/* topology_lock protects the socket and book linked lists */
+static DEFINE_SPINLOCK(topology_lock);
+static struct mask_info socket_info;
 static struct mask_info book_info;
-cpumask_t cpu_book_map[NR_CPUS];
-unsigned char cpu_book_id[NR_CPUS];
+
+struct cpu_topology_s390 cpu_topology[NR_CPUS];
 
 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
 {
 	cpumask_t mask;
 
-	cpumask_clear(&mask);
-	if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
-		cpumask_copy(&mask, cpumask_of(cpu));
+	cpumask_copy(&mask, cpumask_of(cpu));
+	if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
 		return mask;
+	for (; info; info = info->next) {
+		if (cpumask_test_cpu(cpu, &info->mask))
+			return info->mask;
 	}
-	while (info) {
-		if (cpumask_test_cpu(cpu, &info->mask)) {
-			mask = info->mask;
-			break;
-		}
-		info = info->next;
-	}
-	if (cpumask_empty(&mask))
-		cpumask_copy(&mask, cpumask_of(cpu));
 	return mask;
 }
 
 static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
 					  struct mask_info *book,
-					  struct mask_info *core,
-					  int one_core_per_cpu)
+					  struct mask_info *socket,
+					  int one_socket_per_cpu)
 {
 	unsigned int cpu;
 
@@ -80,28 +70,28 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
 
 		rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
 		lcpu = smp_find_processor_id(rcpu);
-		if (lcpu >= 0) {
-			cpumask_set_cpu(lcpu, &book->mask);
-			cpu_book_id[lcpu] = book->id;
-			cpumask_set_cpu(lcpu, &core->mask);
-			cpu_core_id[lcpu] = rcpu;
-			if (one_core_per_cpu) {
-				cpu_socket_id[lcpu] = rcpu;
-				core = core->next;
-			} else {
-				cpu_socket_id[lcpu] = core->id;
-			}
-			smp_cpu_set_polarization(lcpu, tl_cpu->pp);
+		if (lcpu < 0)
+			continue;
+		cpumask_set_cpu(lcpu, &book->mask);
+		cpu_topology[lcpu].book_id = book->id;
+		cpumask_set_cpu(lcpu, &socket->mask);
+		cpu_topology[lcpu].core_id = rcpu;
+		if (one_socket_per_cpu) {
+			cpu_topology[lcpu].socket_id = rcpu;
+			socket = socket->next;
+		} else {
+			cpu_topology[lcpu].socket_id = socket->id;
 		}
+		smp_cpu_set_polarization(lcpu, tl_cpu->pp);
 	}
-	return core;
+	return socket;
 }
 
 static void clear_masks(void)
 {
 	struct mask_info *info;
 
-	info = &core_info;
+	info = &socket_info;
 	while (info) {
 		cpumask_clear(&info->mask);
 		info = info->next;
@@ -120,9 +110,9 @@ static union topology_entry *next_tle(union topology_entry *tle)
 	return (union topology_entry *)((struct topology_container *)tle + 1);
 }
 
-static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
+static void __tl_to_masks_generic(struct sysinfo_15_1_x *info)
 {
-	struct mask_info *core = &core_info;
+	struct mask_info *socket = &socket_info;
 	struct mask_info *book = &book_info;
 	union topology_entry *tle, *end;
 
@@ -135,11 +125,11 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
 			book->id = tle->container.id;
 			break;
 		case 1:
-			core = core->next;
-			core->id = tle->container.id;
+			socket = socket->next;
+			socket->id = tle->container.id;
 			break;
 		case 0:
-			add_cpus_to_mask(&tle->cpu, book, core, 0);
+			add_cpus_to_mask(&tle->cpu, book, socket, 0);
 			break;
 		default:
 			clear_masks();
@@ -149,9 +139,9 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
 	}
 }
 
-static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
+static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
 {
-	struct mask_info *core = &core_info;
+	struct mask_info *socket = &socket_info;
 	struct mask_info *book = &book_info;
 	union topology_entry *tle, *end;
 
@@ -164,7 +154,7 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
 			book->id = tle->container.id;
 			break;
 		case 0:
-			core = add_cpus_to_mask(&tle->cpu, book, core, 1);
+			socket = add_cpus_to_mask(&tle->cpu, book, socket, 1);
 			break;
 		default:
 			clear_masks();
@@ -174,20 +164,20 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
 	}
 }
 
-static void tl_to_cores(struct sysinfo_15_1_x *info)
+static void tl_to_masks(struct sysinfo_15_1_x *info)
 {
 	struct cpuid cpu_id;
 
-	get_cpu_id(&cpu_id);
 	spin_lock_irq(&topology_lock);
+	get_cpu_id(&cpu_id);
 	clear_masks();
 	switch (cpu_id.machine) {
 	case 0x2097:
 	case 0x2098:
-		__tl_to_cores_z10(info);
+		__tl_to_masks_z10(info);
 		break;
 	default:
-		__tl_to_cores_generic(info);
+		__tl_to_masks_generic(info);
 	}
 	spin_unlock_irq(&topology_lock);
 }
@@ -232,15 +222,20 @@ int topology_set_cpu_management(int fc)
 	return rc;
 }
 
-static void update_cpu_core_map(void)
+static void update_cpu_masks(void)
 {
 	unsigned long flags;
 	int cpu;
 
 	spin_lock_irqsave(&topology_lock, flags);
 	for_each_possible_cpu(cpu) {
-		cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
-		cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
+		cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
+		cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
+		if (!MACHINE_HAS_TOPOLOGY) {
+			cpu_topology[cpu].core_id = cpu;
+			cpu_topology[cpu].socket_id = cpu;
+			cpu_topology[cpu].book_id = cpu;
+		}
 	}
 	spin_unlock_irqrestore(&topology_lock, flags);
 }
@@ -260,13 +255,13 @@ int arch_update_cpu_topology(void)
 	int cpu;
 
 	if (!MACHINE_HAS_TOPOLOGY) {
-		update_cpu_core_map();
+		update_cpu_masks();
 		topology_update_polarization_simple();
 		return 0;
 	}
 	store_topology(info);
-	tl_to_cores(info);
-	update_cpu_core_map();
+	tl_to_masks(info);
+	update_cpu_masks();
 	for_each_online_cpu(cpu) {
 		dev = get_cpu_device(cpu);
 		kobject_uevent(&dev->kobj, KOBJ_CHANGE);
@@ -355,7 +350,7 @@ void __init s390_init_cpu_topology(void)
 	for (i = 0; i < TOPOLOGY_NR_MAG; i++)
 		printk(KERN_CONT " %d", info->mag[i]);
 	printk(KERN_CONT " / %d\n", info->mnest);
-	alloc_masks(info, &core_info, 1);
+	alloc_masks(info, &socket_info, 1);
 	alloc_masks(info, &book_info, 2);
 }
 
@@ -454,7 +449,7 @@ static int __init topology_init(void)
 	}
 	set_topology_timer();
 out:
-	update_cpu_core_map();
+	update_cpu_masks();
 	return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
 }
 device_initcall(topology_init);