GNU Linux-libre 5.4.274-gnu1
[releases.git] / arch / x86 / kvm / pmu_amd.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM PMU support for AMD
4  *
5  * Copyright 2015, Red Hat, Inc. and/or its affiliates.
6  *
7  * Author:
8  *   Wei Huang <wei@redhat.com>
9  *
10  * Implementation is based on pmu_intel.c file
11  */
12 #include <linux/types.h>
13 #include <linux/kvm_host.h>
14 #include <linux/perf_event.h>
15 #include "x86.h"
16 #include "cpuid.h"
17 #include "lapic.h"
18 #include "pmu.h"
19
20 enum pmu_type {
21         PMU_TYPE_COUNTER = 0,
22         PMU_TYPE_EVNTSEL,
23 };
24
25 enum index {
26         INDEX_ZERO = 0,
27         INDEX_ONE,
28         INDEX_TWO,
29         INDEX_THREE,
30         INDEX_FOUR,
31         INDEX_FIVE,
32         INDEX_ERROR,
33 };
34
35 /* duplicated from amd_perfmon_event_map, K7 and above should work. */
36 static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
37         [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
38         [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
39         [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
40         [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
41         [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
42         [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
43         [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
44         [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
45 };
46
47 /* duplicated from amd_f17h_perfmon_event_map. */
48 static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
49         [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
50         [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
51         [2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
52         [3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
53         [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
54         [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
55         [6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
56         [7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
57 };
58
59 /* amd_pmc_perf_hw_id depends on these being the same size */
60 static_assert(ARRAY_SIZE(amd_event_mapping) ==
61              ARRAY_SIZE(amd_f17h_event_mapping));
62
63 static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
64 {
65         struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
66
67         if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
68                 if (type == PMU_TYPE_COUNTER)
69                         return MSR_F15H_PERF_CTR;
70                 else
71                         return MSR_F15H_PERF_CTL;
72         } else {
73                 if (type == PMU_TYPE_COUNTER)
74                         return MSR_K7_PERFCTR0;
75                 else
76                         return MSR_K7_EVNTSEL0;
77         }
78 }
79
80 static enum index msr_to_index(u32 msr)
81 {
82         switch (msr) {
83         case MSR_F15H_PERF_CTL0:
84         case MSR_F15H_PERF_CTR0:
85         case MSR_K7_EVNTSEL0:
86         case MSR_K7_PERFCTR0:
87                 return INDEX_ZERO;
88         case MSR_F15H_PERF_CTL1:
89         case MSR_F15H_PERF_CTR1:
90         case MSR_K7_EVNTSEL1:
91         case MSR_K7_PERFCTR1:
92                 return INDEX_ONE;
93         case MSR_F15H_PERF_CTL2:
94         case MSR_F15H_PERF_CTR2:
95         case MSR_K7_EVNTSEL2:
96         case MSR_K7_PERFCTR2:
97                 return INDEX_TWO;
98         case MSR_F15H_PERF_CTL3:
99         case MSR_F15H_PERF_CTR3:
100         case MSR_K7_EVNTSEL3:
101         case MSR_K7_PERFCTR3:
102                 return INDEX_THREE;
103         case MSR_F15H_PERF_CTL4:
104         case MSR_F15H_PERF_CTR4:
105                 return INDEX_FOUR;
106         case MSR_F15H_PERF_CTL5:
107         case MSR_F15H_PERF_CTR5:
108                 return INDEX_FIVE;
109         default:
110                 return INDEX_ERROR;
111         }
112 }
113
114 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
115                                              enum pmu_type type)
116 {
117         switch (msr) {
118         case MSR_F15H_PERF_CTL0:
119         case MSR_F15H_PERF_CTL1:
120         case MSR_F15H_PERF_CTL2:
121         case MSR_F15H_PERF_CTL3:
122         case MSR_F15H_PERF_CTL4:
123         case MSR_F15H_PERF_CTL5:
124         case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
125                 if (type != PMU_TYPE_EVNTSEL)
126                         return NULL;
127                 break;
128         case MSR_F15H_PERF_CTR0:
129         case MSR_F15H_PERF_CTR1:
130         case MSR_F15H_PERF_CTR2:
131         case MSR_F15H_PERF_CTR3:
132         case MSR_F15H_PERF_CTR4:
133         case MSR_F15H_PERF_CTR5:
134         case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
135                 if (type != PMU_TYPE_COUNTER)
136                         return NULL;
137                 break;
138         default:
139                 return NULL;
140         }
141
142         return &pmu->gp_counters[msr_to_index(msr)];
143 }
144
145 static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
146 {
147         struct kvm_event_hw_type_mapping *event_mapping;
148         u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
149         u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
150         int i;
151
152         if (guest_cpuid_family(pmc->vcpu) >= 0x17)
153                 event_mapping = amd_f17h_event_mapping;
154         else
155                 event_mapping = amd_event_mapping;
156
157         for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
158                 if (event_mapping[i].eventsel == event_select
159                     && event_mapping[i].unit_mask == unit_mask)
160                         break;
161
162         if (i == ARRAY_SIZE(amd_event_mapping))
163                 return PERF_COUNT_HW_MAX;
164
165         return event_mapping[i].event_type;
166 }
167
168 /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
169 static unsigned amd_find_fixed_event(int idx)
170 {
171         return PERF_COUNT_HW_MAX;
172 }
173
174 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
175  * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
176  */
177 static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
178 {
179         return true;
180 }
181
182 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
183 {
184         unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
185         struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
186
187         if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
188                 /*
189                  * The idx is contiguous. The MSRs are not. The counter MSRs
190                  * are interleaved with the event select MSRs.
191                  */
192                 pmc_idx *= 2;
193         }
194
195         return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
196 }
197
198 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
199 static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
200 {
201         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
202
203         idx &= ~(3u << 30);
204
205         return (idx >= pmu->nr_arch_gp_counters);
206 }
207
208 /* idx is the ECX register of RDPMC instruction */
209 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *mask)
210 {
211         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
212         struct kvm_pmc *counters;
213
214         idx &= ~(3u << 30);
215         if (idx >= pmu->nr_arch_gp_counters)
216                 return NULL;
217         counters = pmu->gp_counters;
218
219         return &counters[idx];
220 }
221
222 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
223 {
224         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
225         int ret = false;
226
227         ret = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER) ||
228                 get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
229
230         return ret;
231 }
232
233 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
234 {
235         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
236         struct kvm_pmc *pmc;
237
238         /* MSR_PERFCTRn */
239         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
240         if (pmc) {
241                 *data = pmc_read_counter(pmc);
242                 return 0;
243         }
244         /* MSR_EVNTSELn */
245         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
246         if (pmc) {
247                 *data = pmc->eventsel;
248                 return 0;
249         }
250
251         return 1;
252 }
253
254 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
255 {
256         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
257         struct kvm_pmc *pmc;
258         u32 msr = msr_info->index;
259         u64 data = msr_info->data;
260
261         /* MSR_PERFCTRn */
262         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
263         if (pmc) {
264                 pmc->counter += data - pmc_read_counter(pmc);
265                 return 0;
266         }
267         /* MSR_EVNTSELn */
268         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
269         if (pmc) {
270                 data &= ~pmu->reserved_bits;
271                 if (data != pmc->eventsel)
272                         reprogram_gp_counter(pmc, data);
273                 return 0;
274         }
275
276         return 1;
277 }
278
279 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
280 {
281         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
282
283         if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
284                 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
285         else
286                 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
287
288         pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
289         pmu->reserved_bits = 0xfffffff000280000ull;
290         pmu->version = 1;
291         /* not applicable to AMD; but clean them to prevent any fall out */
292         pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
293         pmu->nr_arch_fixed_counters = 0;
294         pmu->global_status = 0;
295 }
296
297 static void amd_pmu_init(struct kvm_vcpu *vcpu)
298 {
299         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
300         int i;
301
302         BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
303
304         for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
305                 pmu->gp_counters[i].type = KVM_PMC_GP;
306                 pmu->gp_counters[i].vcpu = vcpu;
307                 pmu->gp_counters[i].idx = i;
308         }
309 }
310
311 static void amd_pmu_reset(struct kvm_vcpu *vcpu)
312 {
313         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
314         int i;
315
316         for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
317                 struct kvm_pmc *pmc = &pmu->gp_counters[i];
318
319                 pmc_stop_counter(pmc);
320                 pmc->counter = pmc->eventsel = 0;
321         }
322 }
323
324 struct kvm_pmu_ops amd_pmu_ops = {
325         .pmc_perf_hw_id = amd_pmc_perf_hw_id,
326         .find_fixed_event = amd_find_fixed_event,
327         .pmc_is_enabled = amd_pmc_is_enabled,
328         .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
329         .msr_idx_to_pmc = amd_msr_idx_to_pmc,
330         .is_valid_msr_idx = amd_is_valid_msr_idx,
331         .is_valid_msr = amd_is_valid_msr,
332         .get_msr = amd_pmu_get_msr,
333         .set_msr = amd_pmu_set_msr,
334         .refresh = amd_pmu_refresh,
335         .init = amd_pmu_init,
336         .reset = amd_pmu_reset,
337 };