2 * ACPI probing code for ARM performance counters.
4 * Copyright (C) 2017 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/acpi.h>
12 #include <linux/cpumask.h>
13 #include <linux/init.h>
14 #include <linux/percpu.h>
15 #include <linux/perf/arm_pmu.h>
17 #include <asm/cputype.h>
19 static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
20 static DEFINE_PER_CPU(int, pmu_irqs);
22 static int arm_pmu_acpi_register_irq(int cpu)
24 struct acpi_madt_generic_interrupt *gicc;
27 gicc = acpi_cpu_get_madt_gicc(cpu);
29 gsi = gicc->performance_interrupt;
32 * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
33 * have an interrupt. QEMU advertises this by using a GSI of zero,
34 * which is not known to be valid on any hardware despite being
35 * valid per the spec. Take the pragmatic approach and reject a
36 * GSI of zero for now.
41 if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
42 trigger = ACPI_EDGE_SENSITIVE;
44 trigger = ACPI_LEVEL_SENSITIVE;
47 * Helpfully, the MADT GICC doesn't have a polarity flag for the
48 * "performance interrupt". Luckily, on compliant GICs the polarity is
49 * a fixed value in HW (for both SPIs and PPIs) that we cannot change
52 * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
53 * may not match the real polarity, but that should not matter.
55 * Other interrupt controllers are not supported with ACPI.
57 return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
60 static void arm_pmu_acpi_unregister_irq(int cpu)
62 struct acpi_madt_generic_interrupt *gicc;
65 gicc = acpi_cpu_get_madt_gicc(cpu);
67 gsi = gicc->performance_interrupt;
69 acpi_unregister_gsi(gsi);
72 static int arm_pmu_acpi_parse_irqs(void)
74 int irq, cpu, irq_cpu, err;
76 for_each_possible_cpu(cpu) {
77 irq = arm_pmu_acpi_register_irq(cpu);
80 pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
83 } else if (irq == 0) {
84 pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
87 per_cpu(pmu_irqs, cpu) = irq;
93 for_each_possible_cpu(cpu) {
94 irq = per_cpu(pmu_irqs, cpu);
98 arm_pmu_acpi_unregister_irq(cpu);
101 * Blat all copies of the IRQ so that we only unregister the
102 * corresponding GSI once (e.g. when we have PPIs).
104 for_each_possible_cpu(irq_cpu) {
105 if (per_cpu(pmu_irqs, irq_cpu) == irq)
106 per_cpu(pmu_irqs, irq_cpu) = 0;
113 static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
115 unsigned long cpuid = read_cpuid_id();
119 for_each_possible_cpu(cpu) {
120 pmu = per_cpu(probed_pmus, cpu);
121 if (!pmu || pmu->acpi_cpuid != cpuid)
127 pmu = armpmu_alloc();
129 pr_warn("Unable to allocate PMU for CPU%d\n",
134 pmu->acpi_cpuid = cpuid;
140 * This must run before the common arm_pmu hotplug logic, so that we can
141 * associate a CPU and its interrupt before the common code tries to manage the
142 * affinity and so on.
144 * Note that hotplug events are serialized, so we cannot race with another CPU
145 * coming up. The perf core won't open events while a hotplug event is in
148 static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
151 struct pmu_hw_events __percpu *hw_events;
154 /* If we've already probed this CPU, we have nothing to do */
155 if (per_cpu(probed_pmus, cpu))
158 irq = per_cpu(pmu_irqs, cpu);
160 pmu = arm_pmu_acpi_find_alloc_pmu();
164 cpumask_set_cpu(cpu, &pmu->supported_cpus);
166 per_cpu(probed_pmus, cpu) = pmu;
169 * Log and request the IRQ so the core arm_pmu code can manage it. In
170 * some situations (e.g. mismatched PPIs), we may fail to request the
171 * IRQ. However, it may be too late for us to do anything about it.
172 * The common ARM PMU code will log a warning in this case.
174 hw_events = pmu->hw_events;
175 per_cpu(hw_events->irq, cpu) = irq;
176 armpmu_request_irq(pmu, cpu);
179 * Ideally, we'd probe the PMU here when we find the first matching
180 * CPU. We can't do that for several reasons; see the comment in
181 * arm_pmu_acpi_init().
183 * So for the time being, we're done.
188 int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
197 * Initialise and register the set of PMUs which we know about right
198 * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
199 * could handle late hotplug, but this may lead to deadlock since we
200 * might try to register a hotplug notifier instance from within a
203 * There's also the problem of having access to the right init_fn,
204 * without tying this too deeply into the "real" PMU driver.
206 * For the moment, as with the platform/DT case, we need at least one
207 * of a PMU's CPUs to be online at probe time.
209 for_each_possible_cpu(cpu) {
210 struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
213 if (!pmu || pmu->name)
217 if (ret == -ENODEV) {
218 /* PMU not handled by this driver, or not present */
221 pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
225 base_name = pmu->name;
226 pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
228 pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
232 ret = armpmu_register(pmu);
234 pr_warn("Failed to register PMU for CPU%d\n", cpu);
243 static int arm_pmu_acpi_init(void)
251 * We can't request IRQs yet, since we don't know the cookie value
252 * until we know which CPUs share the same logical PMU. We'll handle
253 * that in arm_pmu_acpi_cpu_starting().
255 ret = arm_pmu_acpi_parse_irqs();
259 ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
260 "perf/arm/pmu_acpi:starting",
261 arm_pmu_acpi_cpu_starting, NULL);
265 subsys_initcall(arm_pmu_acpi_init)