1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM PMU support for AMD
5 * Copyright 2015, Red Hat, Inc. and/or its affiliates.
8 * Wei Huang <wei@redhat.com>
10 * Implementation is based on pmu_intel.c file
12 #include <linux/types.h>
13 #include <linux/kvm_host.h>
14 #include <linux/perf_event.h>
35 /* duplicated from amd_perfmon_event_map, K7 and above should work. */
36 static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
37 [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
38 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
39 [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
40 [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
41 [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
42 [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
43 [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
44 [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
47 /* duplicated from amd_f17h_perfmon_event_map. */
48 static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
49 [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
50 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
51 [2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
52 [3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
53 [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
54 [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
55 [6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
56 [7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
59 /* amd_pmc_perf_hw_id depends on these being the same size */
60 static_assert(ARRAY_SIZE(amd_event_mapping) ==
61 ARRAY_SIZE(amd_f17h_event_mapping));
63 static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
65 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
67 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
68 if (type == PMU_TYPE_COUNTER)
69 return MSR_F15H_PERF_CTR;
71 return MSR_F15H_PERF_CTL;
73 if (type == PMU_TYPE_COUNTER)
74 return MSR_K7_PERFCTR0;
76 return MSR_K7_EVNTSEL0;
80 static enum index msr_to_index(u32 msr)
83 case MSR_F15H_PERF_CTL0:
84 case MSR_F15H_PERF_CTR0:
88 case MSR_F15H_PERF_CTL1:
89 case MSR_F15H_PERF_CTR1:
93 case MSR_F15H_PERF_CTL2:
94 case MSR_F15H_PERF_CTR2:
98 case MSR_F15H_PERF_CTL3:
99 case MSR_F15H_PERF_CTR3:
100 case MSR_K7_EVNTSEL3:
101 case MSR_K7_PERFCTR3:
103 case MSR_F15H_PERF_CTL4:
104 case MSR_F15H_PERF_CTR4:
106 case MSR_F15H_PERF_CTL5:
107 case MSR_F15H_PERF_CTR5:
114 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
118 case MSR_F15H_PERF_CTL0:
119 case MSR_F15H_PERF_CTL1:
120 case MSR_F15H_PERF_CTL2:
121 case MSR_F15H_PERF_CTL3:
122 case MSR_F15H_PERF_CTL4:
123 case MSR_F15H_PERF_CTL5:
124 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
125 if (type != PMU_TYPE_EVNTSEL)
128 case MSR_F15H_PERF_CTR0:
129 case MSR_F15H_PERF_CTR1:
130 case MSR_F15H_PERF_CTR2:
131 case MSR_F15H_PERF_CTR3:
132 case MSR_F15H_PERF_CTR4:
133 case MSR_F15H_PERF_CTR5:
134 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
135 if (type != PMU_TYPE_COUNTER)
142 return &pmu->gp_counters[msr_to_index(msr)];
145 static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
147 struct kvm_event_hw_type_mapping *event_mapping;
148 u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
149 u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
152 if (guest_cpuid_family(pmc->vcpu) >= 0x17)
153 event_mapping = amd_f17h_event_mapping;
155 event_mapping = amd_event_mapping;
157 for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
158 if (event_mapping[i].eventsel == event_select
159 && event_mapping[i].unit_mask == unit_mask)
162 if (i == ARRAY_SIZE(amd_event_mapping))
163 return PERF_COUNT_HW_MAX;
165 return event_mapping[i].event_type;
168 /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
169 static unsigned amd_find_fixed_event(int idx)
171 return PERF_COUNT_HW_MAX;
174 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
175 * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
177 static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
182 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
184 unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
185 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
187 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
189 * The idx is contiguous. The MSRs are not. The counter MSRs
190 * are interleaved with the event select MSRs.
195 return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
198 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
199 static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
201 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
205 return (idx >= pmu->nr_arch_gp_counters);
208 /* idx is the ECX register of RDPMC instruction */
209 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *mask)
211 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
212 struct kvm_pmc *counters;
215 if (idx >= pmu->nr_arch_gp_counters)
217 counters = pmu->gp_counters;
219 return &counters[idx];
222 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
224 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
227 ret = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER) ||
228 get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
233 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
235 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
239 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
241 *data = pmc_read_counter(pmc);
245 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
247 *data = pmc->eventsel;
254 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
256 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
258 u32 msr = msr_info->index;
259 u64 data = msr_info->data;
262 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
264 pmc->counter += data - pmc_read_counter(pmc);
268 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
270 data &= ~pmu->reserved_bits;
271 if (data != pmc->eventsel)
272 reprogram_gp_counter(pmc, data);
279 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
281 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
283 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
284 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
286 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
288 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
289 pmu->reserved_bits = 0xfffffff000280000ull;
291 /* not applicable to AMD; but clean them to prevent any fall out */
292 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
293 pmu->nr_arch_fixed_counters = 0;
294 pmu->global_status = 0;
297 static void amd_pmu_init(struct kvm_vcpu *vcpu)
299 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
302 BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
304 for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
305 pmu->gp_counters[i].type = KVM_PMC_GP;
306 pmu->gp_counters[i].vcpu = vcpu;
307 pmu->gp_counters[i].idx = i;
311 static void amd_pmu_reset(struct kvm_vcpu *vcpu)
313 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
316 for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
317 struct kvm_pmc *pmc = &pmu->gp_counters[i];
319 pmc_stop_counter(pmc);
320 pmc->counter = pmc->eventsel = 0;
324 struct kvm_pmu_ops amd_pmu_ops = {
325 .pmc_perf_hw_id = amd_pmc_perf_hw_id,
326 .find_fixed_event = amd_find_fixed_event,
327 .pmc_is_enabled = amd_pmc_is_enabled,
328 .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
329 .msr_idx_to_pmc = amd_msr_idx_to_pmc,
330 .is_valid_msr_idx = amd_is_valid_msr_idx,
331 .is_valid_msr = amd_is_valid_msr,
332 .get_msr = amd_pmu_get_msr,
333 .set_msr = amd_pmu_set_msr,
334 .refresh = amd_pmu_refresh,
335 .init = amd_pmu_init,
336 .reset = amd_pmu_reset,