1 // SPDX-License-Identifier: GPL-2.0
3 * Arch specific cpu topology information
5 * Copyright (C) 2016, ARM Ltd.
6 * Written by: Juri Lelli, ARM Ltd.
9 #include <linux/acpi.h>
10 #include <linux/cacheinfo.h>
11 #include <linux/cpu.h>
12 #include <linux/cpufreq.h>
13 #include <linux/device.h>
15 #include <linux/slab.h>
16 #include <linux/sched/topology.h>
17 #include <linux/cpuset.h>
18 #include <linux/cpumask.h>
19 #include <linux/init.h>
20 #include <linux/rcupdate.h>
21 #include <linux/sched.h>
22 #include <linux/units.h>
24 #define CREATE_TRACE_POINTS
25 #include <trace/events/thermal_pressure.h>
27 static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
28 static struct cpumask scale_freq_counters_mask;
29 static bool scale_freq_invariant;
30 DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1;
31 EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref);
33 static bool supports_scale_freq_counters(const struct cpumask *cpus)
35 return cpumask_subset(cpus, &scale_freq_counters_mask);
38 bool topology_scale_freq_invariant(void)
40 return cpufreq_supports_freq_invariance() ||
41 supports_scale_freq_counters(cpu_online_mask);
44 static void update_scale_freq_invariant(bool status)
46 if (scale_freq_invariant == status)
50 * Task scheduler behavior depends on frequency invariance support,
51 * either cpufreq or counter driven. If the support status changes as
52 * a result of counter initialisation and use, retrigger the build of
53 * scheduling domains to ensure the information is propagated properly.
55 if (topology_scale_freq_invariant() == status) {
56 scale_freq_invariant = status;
57 rebuild_sched_domains_energy();
61 void topology_set_scale_freq_source(struct scale_freq_data *data,
62 const struct cpumask *cpus)
64 struct scale_freq_data *sfd;
68 * Avoid calling rebuild_sched_domains() unnecessarily if FIE is
69 * supported by cpufreq.
71 if (cpumask_empty(&scale_freq_counters_mask))
72 scale_freq_invariant = topology_scale_freq_invariant();
76 for_each_cpu(cpu, cpus) {
77 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
79 /* Use ARCH provided counters whenever possible */
80 if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
81 rcu_assign_pointer(per_cpu(sft_data, cpu), data);
82 cpumask_set_cpu(cpu, &scale_freq_counters_mask);
88 update_scale_freq_invariant(true);
90 EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
92 void topology_clear_scale_freq_source(enum scale_freq_source source,
93 const struct cpumask *cpus)
95 struct scale_freq_data *sfd;
100 for_each_cpu(cpu, cpus) {
101 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
103 if (sfd && sfd->source == source) {
104 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
105 cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
112 * Make sure all references to previous sft_data are dropped to avoid
113 * use-after-free races.
117 update_scale_freq_invariant(false);
119 EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
121 void topology_scale_freq_tick(void)
123 struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
126 sfd->set_freq_scale();
129 DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
130 EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
132 void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
133 unsigned long max_freq)
138 if (WARN_ON_ONCE(!cur_freq || !max_freq))
142 * If the use of counters for FIE is enabled, just return as we don't
143 * want to update the scale factor with information from CPUFREQ.
144 * Instead the scale factor will be updated from arch_scale_freq_tick.
146 if (supports_scale_freq_counters(cpus))
149 scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
151 for_each_cpu(i, cpus)
152 per_cpu(arch_freq_scale, i) = scale;
155 DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
156 EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
158 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
160 per_cpu(cpu_scale, cpu) = capacity;
163 DEFINE_PER_CPU(unsigned long, thermal_pressure);
166 * topology_update_thermal_pressure() - Update thermal pressure for CPUs
167 * @cpus : The related CPUs for which capacity has been reduced
168 * @capped_freq : The maximum allowed frequency that CPUs can run at
170 * Update the value of thermal pressure for all @cpus in the mask. The
171 * cpumask should include all (online+offline) affected CPUs, to avoid
172 * operating on stale data when hot-plug is used for some CPUs. The
173 * @capped_freq reflects the currently allowed max CPUs frequency due to
174 * thermal capping. It might be also a boost frequency value, which is bigger
175 * than the internal 'capacity_freq_ref' max frequency. In such case the
176 * pressure value should simply be removed, since this is an indication that
177 * there is no thermal throttling. The @capped_freq must be provided in kHz.
179 void topology_update_thermal_pressure(const struct cpumask *cpus,
180 unsigned long capped_freq)
182 unsigned long max_capacity, capacity, th_pressure;
186 cpu = cpumask_first(cpus);
187 max_capacity = arch_scale_cpu_capacity(cpu);
188 max_freq = arch_scale_freq_ref(cpu);
191 * Handle properly the boost frequencies, which should simply clean
192 * the thermal pressure value.
194 if (max_freq <= capped_freq)
195 capacity = max_capacity;
197 capacity = mult_frac(max_capacity, capped_freq, max_freq);
199 th_pressure = max_capacity - capacity;
201 trace_thermal_pressure_update(cpu, th_pressure);
203 for_each_cpu(cpu, cpus)
204 WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
206 EXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
208 static ssize_t cpu_capacity_show(struct device *dev,
209 struct device_attribute *attr,
212 struct cpu *cpu = container_of(dev, struct cpu, dev);
214 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
217 static void update_topology_flags_workfn(struct work_struct *work);
218 static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
220 static DEVICE_ATTR_RO(cpu_capacity);
222 static int cpu_capacity_sysctl_add(unsigned int cpu)
224 struct device *cpu_dev = get_cpu_device(cpu);
229 device_create_file(cpu_dev, &dev_attr_cpu_capacity);
234 static int cpu_capacity_sysctl_remove(unsigned int cpu)
236 struct device *cpu_dev = get_cpu_device(cpu);
241 device_remove_file(cpu_dev, &dev_attr_cpu_capacity);
246 static int register_cpu_capacity_sysctl(void)
248 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity",
249 cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove);
253 subsys_initcall(register_cpu_capacity_sysctl);
255 static int update_topology;
257 int topology_update_cpu_topology(void)
259 return update_topology;
263 * Updating the sched_domains can't be done directly from cpufreq callbacks
264 * due to locking, so queue the work for later.
266 static void update_topology_flags_workfn(struct work_struct *work)
269 rebuild_sched_domains();
270 pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
274 static u32 *raw_capacity;
276 static int free_raw_capacity(void)
284 void topology_normalize_cpu_scale(void)
294 for_each_possible_cpu(cpu) {
295 capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
296 capacity_scale = max(capacity, capacity_scale);
299 pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
300 for_each_possible_cpu(cpu) {
301 capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
302 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
304 topology_set_cpu_scale(cpu, capacity);
305 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
306 cpu, topology_get_cpu_scale(cpu));
310 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
313 static bool cap_parsing_failed;
317 if (cap_parsing_failed)
320 ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
324 raw_capacity = kcalloc(num_possible_cpus(),
325 sizeof(*raw_capacity),
328 cap_parsing_failed = true;
332 raw_capacity[cpu] = cpu_capacity;
333 pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
334 cpu_node, raw_capacity[cpu]);
337 * Update capacity_freq_ref for calculating early boot CPU capacities.
338 * For non-clk CPU DVFS mechanism, there's no way to get the
339 * frequency value now, assuming they are running at the same
340 * frequency (by keeping the initial capacity_freq_ref value).
342 cpu_clk = of_clk_get(cpu_node, 0);
343 if (!PTR_ERR_OR_ZERO(cpu_clk)) {
344 per_cpu(capacity_freq_ref, cpu) =
345 clk_get_rate(cpu_clk) / HZ_PER_KHZ;
350 pr_err("cpu_capacity: missing %pOF raw capacity\n",
352 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
354 cap_parsing_failed = true;
361 void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate)
365 #ifdef CONFIG_ACPI_CPPC_LIB
366 #include <acpi/cppc_acpi.h>
368 void topology_init_cpu_capacity_cppc(void)
370 u64 capacity, capacity_scale = 0;
371 struct cppc_perf_caps perf_caps;
374 if (likely(!acpi_cpc_valid()))
377 raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
382 for_each_possible_cpu(cpu) {
383 if (!cppc_get_perf_caps(cpu, &perf_caps) &&
384 (perf_caps.highest_perf >= perf_caps.nominal_perf) &&
385 (perf_caps.highest_perf >= perf_caps.lowest_perf)) {
386 raw_capacity[cpu] = perf_caps.highest_perf;
387 capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]);
389 per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]);
391 pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
392 cpu, raw_capacity[cpu]);
396 pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
397 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
401 for_each_possible_cpu(cpu) {
402 freq_inv_set_max_ratio(cpu,
403 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
405 capacity = raw_capacity[cpu];
406 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
408 topology_set_cpu_scale(cpu, capacity);
409 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
410 cpu, topology_get_cpu_scale(cpu));
413 schedule_work(&update_topology_flags_work);
414 pr_debug("cpu_capacity: cpu_capacity initialization done\n");
421 #ifdef CONFIG_CPU_FREQ
422 static cpumask_var_t cpus_to_visit;
423 static void parsing_done_workfn(struct work_struct *work);
424 static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
427 init_cpu_capacity_callback(struct notifier_block *nb,
431 struct cpufreq_policy *policy = data;
434 if (val != CPUFREQ_CREATE_POLICY)
437 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
438 cpumask_pr_args(policy->related_cpus),
439 cpumask_pr_args(cpus_to_visit));
441 cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
443 for_each_cpu(cpu, policy->related_cpus) {
444 per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq;
445 freq_inv_set_max_ratio(cpu,
446 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
449 if (cpumask_empty(cpus_to_visit)) {
451 topology_normalize_cpu_scale();
452 schedule_work(&update_topology_flags_work);
455 pr_debug("cpu_capacity: parsing done\n");
456 schedule_work(&parsing_done_work);
462 static struct notifier_block init_cpu_capacity_notifier = {
463 .notifier_call = init_cpu_capacity_callback,
466 static int __init register_cpufreq_notifier(void)
471 * On ACPI-based systems skip registering cpufreq notifier as cpufreq
472 * information is not needed for cpu capacity initialization.
477 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
480 cpumask_copy(cpus_to_visit, cpu_possible_mask);
482 ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
483 CPUFREQ_POLICY_NOTIFIER);
486 free_cpumask_var(cpus_to_visit);
490 core_initcall(register_cpufreq_notifier);
492 static void parsing_done_workfn(struct work_struct *work)
494 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
495 CPUFREQ_POLICY_NOTIFIER);
496 free_cpumask_var(cpus_to_visit);
500 core_initcall(free_raw_capacity);
503 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
505 * This function returns the logic cpu number of the node.
506 * There are basically three kinds of return values:
507 * (1) logic cpu number which is > 0.
508 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
509 * there is no possible logical CPU in the kernel to match. This happens
510 * when CONFIG_NR_CPUS is configure to be smaller than the number of
511 * CPU nodes in DT. We need to just ignore this case.
512 * (3) -1 if the node does not exist in the device tree
514 static int __init get_cpu_for_node(struct device_node *node)
516 struct device_node *cpu_node;
519 cpu_node = of_parse_phandle(node, "cpu", 0);
523 cpu = of_cpu_node_to_id(cpu_node);
525 topology_parse_cpu_capacity(cpu_node, cpu);
527 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
528 cpu_node, cpumask_pr_args(cpu_possible_mask));
530 of_node_put(cpu_node);
534 static int __init parse_core(struct device_node *core, int package_id,
535 int cluster_id, int core_id)
541 struct device_node *t;
544 snprintf(name, sizeof(name), "thread%d", i);
545 t = of_get_child_by_name(core, name);
548 cpu = get_cpu_for_node(t);
550 cpu_topology[cpu].package_id = package_id;
551 cpu_topology[cpu].cluster_id = cluster_id;
552 cpu_topology[cpu].core_id = core_id;
553 cpu_topology[cpu].thread_id = i;
554 } else if (cpu != -ENODEV) {
555 pr_err("%pOF: Can't get CPU for thread\n", t);
564 cpu = get_cpu_for_node(core);
567 pr_err("%pOF: Core has both threads and CPU\n",
572 cpu_topology[cpu].package_id = package_id;
573 cpu_topology[cpu].cluster_id = cluster_id;
574 cpu_topology[cpu].core_id = core_id;
575 } else if (leaf && cpu != -ENODEV) {
576 pr_err("%pOF: Can't get CPU for leaf core\n", core);
583 static int __init parse_cluster(struct device_node *cluster, int package_id,
584 int cluster_id, int depth)
588 bool has_cores = false;
589 struct device_node *c;
594 * First check for child clusters; we currently ignore any
595 * information about the nesting of clusters and present the
596 * scheduler with a flat list of them.
600 snprintf(name, sizeof(name), "cluster%d", i);
601 c = of_get_child_by_name(cluster, name);
604 ret = parse_cluster(c, package_id, i, depth + 1);
606 pr_warn("Topology for clusters of clusters not yet supported\n");
614 /* Now check for cores */
617 snprintf(name, sizeof(name), "core%d", i);
618 c = of_get_child_by_name(cluster, name);
623 pr_err("%pOF: cpu-map children should be clusters\n",
630 ret = parse_core(c, package_id, cluster_id,
633 pr_err("%pOF: Non-leaf cluster with core %s\n",
645 if (leaf && !has_cores)
646 pr_warn("%pOF: empty cluster\n", cluster);
651 static int __init parse_socket(struct device_node *socket)
654 struct device_node *c;
655 bool has_socket = false;
656 int package_id = 0, ret;
659 snprintf(name, sizeof(name), "socket%d", package_id);
660 c = of_get_child_by_name(socket, name);
663 ret = parse_cluster(c, package_id, -1, 0);
672 ret = parse_cluster(socket, 0, -1, 0);
677 static int __init parse_dt_topology(void)
679 struct device_node *cn, *map;
683 cn = of_find_node_by_path("/cpus");
685 pr_err("No CPU information found in DT\n");
690 * When topology is provided cpu-map is essentially a root
691 * cluster with restricted subnodes.
693 map = of_get_child_by_name(cn, "cpu-map");
697 ret = parse_socket(map);
701 topology_normalize_cpu_scale();
704 * Check that all cores are in the topology; the SMP code will
705 * only mark cores described in the DT as possible.
707 for_each_possible_cpu(cpu)
708 if (cpu_topology[cpu].package_id < 0) {
724 struct cpu_topology cpu_topology[NR_CPUS];
725 EXPORT_SYMBOL_GPL(cpu_topology);
727 const struct cpumask *cpu_coregroup_mask(int cpu)
729 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
731 /* Find the smaller of NUMA, core or LLC siblings */
732 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
733 /* not numa in package, lets use the package siblings */
734 core_mask = &cpu_topology[cpu].core_sibling;
737 if (last_level_cache_is_valid(cpu)) {
738 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
739 core_mask = &cpu_topology[cpu].llc_sibling;
743 * For systems with no shared cpu-side LLC but with clusters defined,
744 * extend core_mask to cluster_siblings. The sched domain builder will
745 * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled.
747 if (IS_ENABLED(CONFIG_SCHED_CLUSTER) &&
748 cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
749 core_mask = &cpu_topology[cpu].cluster_sibling;
754 const struct cpumask *cpu_clustergroup_mask(int cpu)
757 * Forbid cpu_clustergroup_mask() to span more or the same CPUs as
758 * cpu_coregroup_mask().
760 if (cpumask_subset(cpu_coregroup_mask(cpu),
761 &cpu_topology[cpu].cluster_sibling))
762 return topology_sibling_cpumask(cpu);
764 return &cpu_topology[cpu].cluster_sibling;
767 void update_siblings_masks(unsigned int cpuid)
769 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
772 ret = detect_cache_attributes(cpuid);
773 if (ret && ret != -ENOENT)
774 pr_info("Early cacheinfo allocation failed, ret = %d\n", ret);
776 /* update core and thread sibling masks */
777 for_each_online_cpu(cpu) {
778 cpu_topo = &cpu_topology[cpu];
780 if (last_level_cache_is_shared(cpu, cpuid)) {
781 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
782 cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
785 if (cpuid_topo->package_id != cpu_topo->package_id)
788 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
789 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
791 if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
794 if (cpuid_topo->cluster_id >= 0) {
795 cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
796 cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
799 if (cpuid_topo->core_id != cpu_topo->core_id)
802 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
803 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
807 static void clear_cpu_topology(int cpu)
809 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
811 cpumask_clear(&cpu_topo->llc_sibling);
812 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
814 cpumask_clear(&cpu_topo->cluster_sibling);
815 cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
817 cpumask_clear(&cpu_topo->core_sibling);
818 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
819 cpumask_clear(&cpu_topo->thread_sibling);
820 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
823 void __init reset_cpu_topology(void)
827 for_each_possible_cpu(cpu) {
828 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
830 cpu_topo->thread_id = -1;
831 cpu_topo->core_id = -1;
832 cpu_topo->cluster_id = -1;
833 cpu_topo->package_id = -1;
835 clear_cpu_topology(cpu);
839 void remove_cpu_topology(unsigned int cpu)
843 for_each_cpu(sibling, topology_core_cpumask(cpu))
844 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
845 for_each_cpu(sibling, topology_sibling_cpumask(cpu))
846 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
847 for_each_cpu(sibling, topology_cluster_cpumask(cpu))
848 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling));
849 for_each_cpu(sibling, topology_llc_cpumask(cpu))
850 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
852 clear_cpu_topology(cpu);
855 __weak int __init parse_acpi_topology(void)
860 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
861 void __init init_cpu_topology(void)
865 reset_cpu_topology();
866 ret = parse_acpi_topology();
868 ret = of_have_populated_dt() && parse_dt_topology();
872 * Discard anything that was parsed if we hit an error so we
873 * don't use partial information. But do not return yet to give
874 * arch-specific early cache level detection a chance to run.
876 reset_cpu_topology();
879 for_each_possible_cpu(cpu) {
880 ret = fetch_cache_info(cpu);
883 else if (ret != -ENOENT)
884 pr_err("Early cacheinfo failed, ret = %d\n", ret);
889 void store_cpu_topology(unsigned int cpuid)
891 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
893 if (cpuid_topo->package_id != -1)
894 goto topology_populated;
896 cpuid_topo->thread_id = -1;
897 cpuid_topo->core_id = cpuid;
898 cpuid_topo->package_id = cpu_to_node(cpuid);
900 pr_debug("CPU%u: package %d core %d thread %d\n",
901 cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
902 cpuid_topo->thread_id);
905 update_siblings_masks(cpuid);