1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM PMU support for Intel CPUs
5 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8 * Avi Kivity <avi@redhat.com>
9 * Gleb Natapov <gleb@redhat.com>
11 #include <linux/types.h>
12 #include <linux/kvm_host.h>
13 #include <linux/perf_event.h>
14 #include <asm/perf_event.h>
21 #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
23 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
24 /* Index must match CPUID 0x0A.EBX bit vector */
25 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
26 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
27 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
28 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
29 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
30 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
31 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
32 [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
35 /* mapping between fixed pmc index and intel_arch_events array */
36 static int fixed_pmc_events[] = {1, 0, 7};
38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
42 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
43 u8 new_ctrl = fixed_ctrl_field(data, i);
44 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
47 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
49 if (old_ctrl == new_ctrl)
52 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
53 reprogram_fixed_counter(pmc, new_ctrl, i);
56 pmu->fixed_ctr_ctrl = data;
59 /* function is called when global control register has been updated. */
60 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
63 u64 diff = pmu->global_ctrl ^ data;
65 pmu->global_ctrl = data;
67 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
68 reprogram_counter(pmu, bit);
71 static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
73 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
74 u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
75 u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
78 for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
79 if (intel_arch_events[i].eventsel == event_select
80 && intel_arch_events[i].unit_mask == unit_mask
81 && (pmu->available_event_types & (1 << i)))
84 if (i == ARRAY_SIZE(intel_arch_events))
85 return PERF_COUNT_HW_MAX;
87 return intel_arch_events[i].event_type;
90 static unsigned intel_find_fixed_event(int idx)
93 size_t size = ARRAY_SIZE(fixed_pmc_events);
96 return PERF_COUNT_HW_MAX;
98 event = fixed_pmc_events[array_index_nospec(idx, size)];
99 return intel_arch_events[event].event_type;
102 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
103 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
105 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
107 if (pmu->version < 2)
110 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
113 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
115 if (pmc_idx < INTEL_PMC_IDX_FIXED)
116 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
119 u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
121 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
125 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
126 static int intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
128 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
129 bool fixed = idx & (1u << 30);
133 return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
134 (fixed && idx >= pmu->nr_arch_fixed_counters);
137 static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
138 unsigned int idx, u64 *mask)
140 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
141 bool fixed = idx & (1u << 30);
142 struct kvm_pmc *counters;
143 unsigned int num_counters;
147 counters = pmu->fixed_counters;
148 num_counters = pmu->nr_arch_fixed_counters;
150 counters = pmu->gp_counters;
151 num_counters = pmu->nr_arch_gp_counters;
153 if (idx >= num_counters)
155 *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
156 return &counters[array_index_nospec(idx, num_counters)];
159 static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
161 if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
164 return vcpu->arch.perf_capabilities;
167 static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
169 return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0;
172 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
174 if (!fw_writes_is_enabled(pmu_to_vcpu(pmu)))
177 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
180 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
182 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
186 case MSR_CORE_PERF_FIXED_CTR_CTRL:
187 case MSR_CORE_PERF_GLOBAL_STATUS:
188 case MSR_CORE_PERF_GLOBAL_CTRL:
189 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
190 ret = pmu->version > 1;
193 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
194 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
195 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr);
202 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
204 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
207 pmc = get_fixed_pmc(pmu, msr);
208 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
209 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
214 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
216 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
218 u32 msr = msr_info->index;
221 case MSR_CORE_PERF_FIXED_CTR_CTRL:
222 msr_info->data = pmu->fixed_ctr_ctrl;
224 case MSR_CORE_PERF_GLOBAL_STATUS:
225 msr_info->data = pmu->global_status;
227 case MSR_CORE_PERF_GLOBAL_CTRL:
228 msr_info->data = pmu->global_ctrl;
230 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
231 msr_info->data = pmu->global_ovf_ctrl;
234 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
235 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
236 u64 val = pmc_read_counter(pmc);
238 val & pmu->counter_bitmask[KVM_PMC_GP];
240 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
241 u64 val = pmc_read_counter(pmc);
243 val & pmu->counter_bitmask[KVM_PMC_FIXED];
245 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
246 msr_info->data = pmc->eventsel;
254 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
256 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
258 u32 msr = msr_info->index;
259 u64 data = msr_info->data;
262 case MSR_CORE_PERF_FIXED_CTR_CTRL:
263 if (pmu->fixed_ctr_ctrl == data)
265 if (!(data & pmu->fixed_ctr_ctrl_mask)) {
266 reprogram_fixed_counters(pmu, data);
270 case MSR_CORE_PERF_GLOBAL_STATUS:
271 if (msr_info->host_initiated) {
272 pmu->global_status = data;
276 case MSR_CORE_PERF_GLOBAL_CTRL:
277 if (pmu->global_ctrl == data)
279 if (kvm_valid_perf_global_ctrl(pmu, data)) {
280 global_ctrl_changed(pmu, data);
284 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
285 if (!(data & pmu->global_ovf_ctrl_mask)) {
286 if (!msr_info->host_initiated)
287 pmu->global_status &= ~data;
288 pmu->global_ovf_ctrl = data;
293 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
294 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
295 if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
296 (data & ~pmu->counter_bitmask[KVM_PMC_GP]))
298 if (!msr_info->host_initiated &&
299 !(msr & MSR_PMC_FULL_WIDTH_BIT))
300 data = (s64)(s32)data;
301 pmc->counter += data - pmc_read_counter(pmc);
303 perf_event_period(pmc->perf_event,
304 get_sample_period(pmc, data));
306 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
307 pmc->counter += data - pmc_read_counter(pmc);
309 perf_event_period(pmc->perf_event,
310 get_sample_period(pmc, data));
312 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
313 if (data == pmc->eventsel)
315 if (!(data & pmu->reserved_bits)) {
316 reprogram_gp_counter(pmc, data);
325 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
327 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
328 struct x86_pmu_capability x86_pmu;
329 struct kvm_cpuid_entry2 *entry;
330 union cpuid10_eax eax;
331 union cpuid10_edx edx;
334 pmu->nr_arch_gp_counters = 0;
335 pmu->nr_arch_fixed_counters = 0;
336 pmu->counter_bitmask[KVM_PMC_GP] = 0;
337 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
339 pmu->reserved_bits = 0xffffffff00200000ull;
340 pmu->raw_event_mask = X86_RAW_EVENT_MASK;
341 pmu->global_ctrl_mask = ~0ull;
342 pmu->global_ovf_ctrl_mask = ~0ull;
343 pmu->fixed_ctr_ctrl_mask = ~0ull;
345 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
348 eax.full = entry->eax;
349 edx.full = entry->edx;
351 pmu->version = eax.split.version_id;
355 perf_get_x86_pmu_capability(&x86_pmu);
357 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
358 x86_pmu.num_counters_gp);
359 eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
360 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
361 eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
362 pmu->available_event_types = ~entry->ebx &
363 ((1ull << eax.split.mask_length) - 1);
365 if (pmu->version == 1) {
366 pmu->nr_arch_fixed_counters = 0;
368 pmu->nr_arch_fixed_counters =
369 min_t(int, edx.split.num_counters_fixed,
370 x86_pmu.num_counters_fixed);
371 edx.split.bit_width_fixed = min_t(int,
372 edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
373 pmu->counter_bitmask[KVM_PMC_FIXED] =
374 ((u64)1 << edx.split.bit_width_fixed) - 1;
377 for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
378 pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
379 pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
380 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
381 pmu->global_ctrl_mask = ~pmu->global_ctrl;
382 pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
383 & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
384 MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
385 if (vmx_pt_mode_is_host_guest())
386 pmu->global_ovf_ctrl_mask &=
387 ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
389 entry = kvm_find_cpuid_entry(vcpu, 7, 0);
391 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
392 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
393 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
395 bitmap_set(pmu->all_valid_pmc_idx,
396 0, pmu->nr_arch_gp_counters);
397 bitmap_set(pmu->all_valid_pmc_idx,
398 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
400 nested_vmx_pmu_entry_exit_ctls_update(vcpu);
403 static void intel_pmu_init(struct kvm_vcpu *vcpu)
406 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
408 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
409 pmu->gp_counters[i].type = KVM_PMC_GP;
410 pmu->gp_counters[i].vcpu = vcpu;
411 pmu->gp_counters[i].idx = i;
412 pmu->gp_counters[i].current_config = 0;
415 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
416 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
417 pmu->fixed_counters[i].vcpu = vcpu;
418 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
419 pmu->fixed_counters[i].current_config = 0;
422 vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
425 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
427 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
428 struct kvm_pmc *pmc = NULL;
431 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
432 pmc = &pmu->gp_counters[i];
434 pmc_stop_counter(pmc);
435 pmc->counter = pmc->eventsel = 0;
438 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
439 pmc = &pmu->fixed_counters[i];
441 pmc_stop_counter(pmc);
445 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
446 pmu->global_ovf_ctrl = 0;
449 struct kvm_pmu_ops intel_pmu_ops = {
450 .pmc_perf_hw_id = intel_pmc_perf_hw_id,
451 .find_fixed_event = intel_find_fixed_event,
452 .pmc_is_enabled = intel_pmc_is_enabled,
453 .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
454 .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
455 .msr_idx_to_pmc = intel_msr_idx_to_pmc,
456 .is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
457 .is_valid_msr = intel_is_valid_msr,
458 .get_msr = intel_pmu_get_msr,
459 .set_msr = intel_pmu_set_msr,
460 .refresh = intel_pmu_refresh,
461 .init = intel_pmu_init,
462 .reset = intel_pmu_reset,