2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #define KMSG_COMPONENT "cpu"
7 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 #include <linux/workqueue.h>
10 #include <linux/bootmem.h>
11 #include <linux/uaccess.h>
12 #include <linux/sysctl.h>
13 #include <linux/cpuset.h>
14 #include <linux/device.h>
15 #include <linux/export.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/sched/topology.h>
19 #include <linux/delay.h>
20 #include <linux/init.h>
21 #include <linux/slab.h>
22 #include <linux/cpu.h>
23 #include <linux/smp.h>
25 #include <linux/nodemask.h>
26 #include <linux/node.h>
27 #include <asm/sysinfo.h>
30 #define PTF_HORIZONTAL (0UL)
31 #define PTF_VERTICAL (1UL)
32 #define PTF_CHECK (2UL)
37 TOPOLOGY_MODE_PACKAGE,
38 TOPOLOGY_MODE_UNINITIALIZED
42 struct mask_info *next;
47 static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED;
48 static void set_topology_timer(void);
49 static void topology_work_fn(struct work_struct *work);
50 static struct sysinfo_15_1_x *tl_info;
52 static DECLARE_WORK(topology_work, topology_work_fn);
55 * Socket/Book linked lists and cpu_topology updates are
56 * protected by "sched_domains_mutex".
58 static struct mask_info socket_info;
59 static struct mask_info book_info;
60 static struct mask_info drawer_info;
62 struct cpu_topology_s390 cpu_topology[NR_CPUS];
63 EXPORT_SYMBOL_GPL(cpu_topology);
65 cpumask_t cpus_with_topology;
67 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
71 cpumask_copy(&mask, cpumask_of(cpu));
72 switch (topology_mode) {
73 case TOPOLOGY_MODE_HW:
75 if (cpumask_test_cpu(cpu, &info->mask)) {
81 if (cpumask_empty(&mask))
82 cpumask_copy(&mask, cpumask_of(cpu));
84 case TOPOLOGY_MODE_PACKAGE:
85 cpumask_copy(&mask, cpu_present_mask);
89 case TOPOLOGY_MODE_SINGLE:
90 cpumask_copy(&mask, cpumask_of(cpu));
96 static cpumask_t cpu_thread_map(unsigned int cpu)
101 cpumask_copy(&mask, cpumask_of(cpu));
102 if (topology_mode != TOPOLOGY_MODE_HW)
104 cpu -= cpu % (smp_cpu_mtid + 1);
105 for (i = 0; i <= smp_cpu_mtid; i++)
106 if (cpu_present(cpu + i))
107 cpumask_set_cpu(cpu + i, &mask);
111 #define TOPOLOGY_CORE_BITS 64
113 static void add_cpus_to_mask(struct topology_core *tl_core,
114 struct mask_info *drawer,
115 struct mask_info *book,
116 struct mask_info *socket)
118 struct cpu_topology_s390 *topo;
121 for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) {
125 rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
126 lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
129 for (i = 0; i <= smp_cpu_mtid; i++) {
130 topo = &cpu_topology[lcpu + i];
131 topo->drawer_id = drawer->id;
132 topo->book_id = book->id;
133 topo->socket_id = socket->id;
134 topo->core_id = rcore;
135 topo->thread_id = lcpu + i;
136 cpumask_set_cpu(lcpu + i, &drawer->mask);
137 cpumask_set_cpu(lcpu + i, &book->mask);
138 cpumask_set_cpu(lcpu + i, &socket->mask);
139 cpumask_set_cpu(lcpu + i, &cpus_with_topology);
140 smp_cpu_set_polarization(lcpu + i, tl_core->pp);
145 static void clear_masks(void)
147 struct mask_info *info;
151 cpumask_clear(&info->mask);
156 cpumask_clear(&info->mask);
161 cpumask_clear(&info->mask);
166 static union topology_entry *next_tle(union topology_entry *tle)
169 return (union topology_entry *)((struct topology_core *)tle + 1);
170 return (union topology_entry *)((struct topology_container *)tle + 1);
173 static void tl_to_masks(struct sysinfo_15_1_x *info)
175 struct mask_info *socket = &socket_info;
176 struct mask_info *book = &book_info;
177 struct mask_info *drawer = &drawer_info;
178 union topology_entry *tle, *end;
182 end = (union topology_entry *)((unsigned long)info + info->length);
186 drawer = drawer->next;
187 drawer->id = tle->container.id;
191 book->id = tle->container.id;
194 socket = socket->next;
195 socket->id = tle->container.id;
198 add_cpus_to_mask(&tle->cpu, drawer, book, socket);
208 static void topology_update_polarization_simple(void)
212 for_each_possible_cpu(cpu)
213 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
216 static int ptf(unsigned long fc)
221 " .insn rre,0xb9a20000,%1,%1\n"
229 int topology_set_cpu_management(int fc)
233 if (!MACHINE_HAS_TOPOLOGY)
236 rc = ptf(PTF_VERTICAL);
238 rc = ptf(PTF_HORIZONTAL);
241 for_each_possible_cpu(cpu)
242 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
246 static void update_cpu_masks(void)
248 struct cpu_topology_s390 *topo;
251 for_each_possible_cpu(cpu) {
252 topo = &cpu_topology[cpu];
253 topo->thread_mask = cpu_thread_map(cpu);
254 topo->core_mask = cpu_group_map(&socket_info, cpu);
255 topo->book_mask = cpu_group_map(&book_info, cpu);
256 topo->drawer_mask = cpu_group_map(&drawer_info, cpu);
257 if (topology_mode != TOPOLOGY_MODE_HW) {
258 id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
259 topo->thread_id = cpu;
261 topo->socket_id = id;
263 topo->drawer_id = id;
264 if (cpu_present(cpu))
265 cpumask_set_cpu(cpu, &cpus_with_topology);
268 numa_update_cpu_topology();
271 void store_topology(struct sysinfo_15_1_x *info)
273 stsi(info, 15, 1, topology_mnest_limit());
276 static int __arch_update_cpu_topology(void)
278 struct sysinfo_15_1_x *info = tl_info;
281 mutex_lock(&smp_cpu_state_mutex);
282 cpumask_clear(&cpus_with_topology);
283 if (MACHINE_HAS_TOPOLOGY) {
285 store_topology(info);
289 if (!MACHINE_HAS_TOPOLOGY)
290 topology_update_polarization_simple();
291 mutex_unlock(&smp_cpu_state_mutex);
295 int arch_update_cpu_topology(void)
300 rc = __arch_update_cpu_topology();
301 for_each_online_cpu(cpu) {
302 dev = get_cpu_device(cpu);
304 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
309 static void topology_work_fn(struct work_struct *work)
311 rebuild_sched_domains();
314 void topology_schedule_update(void)
316 schedule_work(&topology_work);
319 static void topology_flush_work(void)
321 flush_work(&topology_work);
324 static void topology_timer_fn(unsigned long ignored)
327 topology_schedule_update();
328 set_topology_timer();
331 static struct timer_list topology_timer =
332 TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
334 static atomic_t topology_poll = ATOMIC_INIT(0);
336 static void set_topology_timer(void)
338 if (atomic_add_unless(&topology_poll, -1, 0))
339 mod_timer(&topology_timer, jiffies + HZ / 10);
341 mod_timer(&topology_timer, jiffies + HZ * 60);
344 void topology_expect_change(void)
346 if (!MACHINE_HAS_TOPOLOGY)
348 /* This is racy, but it doesn't matter since it is just a heuristic.
349 * Worst case is that we poll in a higher frequency for a bit longer.
351 if (atomic_read(&topology_poll) > 60)
353 atomic_add(60, &topology_poll);
354 set_topology_timer();
357 static int cpu_management;
359 static ssize_t dispatching_show(struct device *dev,
360 struct device_attribute *attr,
365 mutex_lock(&smp_cpu_state_mutex);
366 count = sprintf(buf, "%d\n", cpu_management);
367 mutex_unlock(&smp_cpu_state_mutex);
371 static ssize_t dispatching_store(struct device *dev,
372 struct device_attribute *attr,
379 if (sscanf(buf, "%d %c", &val, &delim) != 1)
381 if (val != 0 && val != 1)
385 mutex_lock(&smp_cpu_state_mutex);
386 if (cpu_management == val)
388 rc = topology_set_cpu_management(val);
391 cpu_management = val;
392 topology_expect_change();
394 mutex_unlock(&smp_cpu_state_mutex);
396 return rc ? rc : count;
398 static DEVICE_ATTR(dispatching, 0644, dispatching_show,
401 static ssize_t cpu_polarization_show(struct device *dev,
402 struct device_attribute *attr, char *buf)
407 mutex_lock(&smp_cpu_state_mutex);
408 switch (smp_cpu_get_polarization(cpu)) {
409 case POLARIZATION_HRZ:
410 count = sprintf(buf, "horizontal\n");
412 case POLARIZATION_VL:
413 count = sprintf(buf, "vertical:low\n");
415 case POLARIZATION_VM:
416 count = sprintf(buf, "vertical:medium\n");
418 case POLARIZATION_VH:
419 count = sprintf(buf, "vertical:high\n");
422 count = sprintf(buf, "unknown\n");
425 mutex_unlock(&smp_cpu_state_mutex);
428 static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
430 static struct attribute *topology_cpu_attrs[] = {
431 &dev_attr_polarization.attr,
435 static struct attribute_group topology_cpu_attr_group = {
436 .attrs = topology_cpu_attrs,
439 int topology_cpu_init(struct cpu *cpu)
441 return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
444 static const struct cpumask *cpu_thread_mask(int cpu)
446 return &cpu_topology[cpu].thread_mask;
450 const struct cpumask *cpu_coregroup_mask(int cpu)
452 return &cpu_topology[cpu].core_mask;
455 static const struct cpumask *cpu_book_mask(int cpu)
457 return &cpu_topology[cpu].book_mask;
460 static const struct cpumask *cpu_drawer_mask(int cpu)
462 return &cpu_topology[cpu].drawer_mask;
465 static struct sched_domain_topology_level s390_topology[] = {
466 { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
467 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
468 { cpu_book_mask, SD_INIT_NAME(BOOK) },
469 { cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
470 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
474 static void __init alloc_masks(struct sysinfo_15_1_x *info,
475 struct mask_info *mask, int offset)
479 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
480 for (i = 0; i < info->mnest - offset; i++)
481 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
482 nr_masks = max(nr_masks, 1);
483 for (i = 0; i < nr_masks; i++) {
484 mask->next = memblock_virt_alloc(sizeof(*mask->next), 8);
489 void __init topology_init_early(void)
491 struct sysinfo_15_1_x *info;
493 set_sched_topology(s390_topology);
494 if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) {
495 if (MACHINE_HAS_TOPOLOGY)
496 topology_mode = TOPOLOGY_MODE_HW;
498 topology_mode = TOPOLOGY_MODE_SINGLE;
500 if (!MACHINE_HAS_TOPOLOGY)
502 tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE);
504 store_topology(info);
505 pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
506 info->mag[0], info->mag[1], info->mag[2], info->mag[3],
507 info->mag[4], info->mag[5], info->mnest);
508 alloc_masks(info, &socket_info, 1);
509 alloc_masks(info, &book_info, 2);
510 alloc_masks(info, &drawer_info, 3);
512 __arch_update_cpu_topology();
515 static inline int topology_get_mode(int enabled)
518 return TOPOLOGY_MODE_SINGLE;
519 return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE;
522 static inline int topology_is_enabled(void)
524 return topology_mode != TOPOLOGY_MODE_SINGLE;
527 static int __init topology_setup(char *str)
532 rc = kstrtobool(str, &enabled);
535 topology_mode = topology_get_mode(enabled);
538 early_param("topology", topology_setup);
540 static int topology_ctl_handler(struct ctl_table *ctl, int write,
541 void __user *buffer, size_t *lenp, loff_t *ppos)
547 if (!*lenp || *ppos) {
552 strncpy(buf, topology_is_enabled() ? "1\n" : "0\n",
554 len = strnlen(buf, ARRAY_SIZE(buf));
557 if (copy_to_user(buffer, buf, len))
562 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
564 if (buf[0] != '0' && buf[0] != '1')
566 mutex_lock(&smp_cpu_state_mutex);
567 new_mode = topology_get_mode(buf[0] == '1');
568 if (topology_mode != new_mode) {
569 topology_mode = new_mode;
570 topology_schedule_update();
572 mutex_unlock(&smp_cpu_state_mutex);
573 topology_flush_work();
580 static struct ctl_table topology_ctl_table[] = {
582 .procname = "topology",
584 .proc_handler = topology_ctl_handler,
589 static struct ctl_table topology_dir_table[] = {
594 .child = topology_ctl_table,
599 static int __init topology_init(void)
601 if (MACHINE_HAS_TOPOLOGY)
602 set_topology_timer();
604 topology_update_polarization_simple();
605 register_sysctl_table(topology_dir_table);
606 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
608 device_initcall(topology_init);