1 #include <linux/perf_event.h>
2 #include <linux/export.h>
3 #include <linux/types.h>
4 #include <linux/init.h>
5 #include <linux/slab.h>
6 #include <asm/apicdef.h>
8 #include "../perf_event.h"
10 static __initconst const u64 amd_hw_cache_event_ids
11 [PERF_COUNT_HW_CACHE_MAX]
12 [PERF_COUNT_HW_CACHE_OP_MAX]
13 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
17 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
18 [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
21 [ C(RESULT_ACCESS) ] = 0,
22 [ C(RESULT_MISS) ] = 0,
24 [ C(OP_PREFETCH) ] = {
25 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
26 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
31 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
32 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
35 [ C(RESULT_ACCESS) ] = -1,
36 [ C(RESULT_MISS) ] = -1,
38 [ C(OP_PREFETCH) ] = {
39 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
40 [ C(RESULT_MISS) ] = 0,
45 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
46 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
49 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
50 [ C(RESULT_MISS) ] = 0,
52 [ C(OP_PREFETCH) ] = {
53 [ C(RESULT_ACCESS) ] = 0,
54 [ C(RESULT_MISS) ] = 0,
59 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
60 [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
63 [ C(RESULT_ACCESS) ] = 0,
64 [ C(RESULT_MISS) ] = 0,
66 [ C(OP_PREFETCH) ] = {
67 [ C(RESULT_ACCESS) ] = 0,
68 [ C(RESULT_MISS) ] = 0,
73 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
74 [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
77 [ C(RESULT_ACCESS) ] = -1,
78 [ C(RESULT_MISS) ] = -1,
80 [ C(OP_PREFETCH) ] = {
81 [ C(RESULT_ACCESS) ] = -1,
82 [ C(RESULT_MISS) ] = -1,
87 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
88 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
91 [ C(RESULT_ACCESS) ] = -1,
92 [ C(RESULT_MISS) ] = -1,
94 [ C(OP_PREFETCH) ] = {
95 [ C(RESULT_ACCESS) ] = -1,
96 [ C(RESULT_MISS) ] = -1,
101 [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
102 [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */
105 [ C(RESULT_ACCESS) ] = -1,
106 [ C(RESULT_MISS) ] = -1,
108 [ C(OP_PREFETCH) ] = {
109 [ C(RESULT_ACCESS) ] = -1,
110 [ C(RESULT_MISS) ] = -1,
115 static __initconst const u64 amd_hw_cache_event_ids_f17h
116 [PERF_COUNT_HW_CACHE_MAX]
117 [PERF_COUNT_HW_CACHE_OP_MAX]
118 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
121 [C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
122 [C(RESULT_MISS)] = 0xc860, /* L2$ access from DC Miss */
125 [C(RESULT_ACCESS)] = 0,
126 [C(RESULT_MISS)] = 0,
129 [C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
130 [C(RESULT_MISS)] = 0,
135 [C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches */
136 [C(RESULT_MISS)] = 0x0081, /* Instruction cache misses */
139 [C(RESULT_ACCESS)] = -1,
140 [C(RESULT_MISS)] = -1,
143 [C(RESULT_ACCESS)] = 0,
144 [C(RESULT_MISS)] = 0,
149 [C(RESULT_ACCESS)] = 0,
150 [C(RESULT_MISS)] = 0,
153 [C(RESULT_ACCESS)] = 0,
154 [C(RESULT_MISS)] = 0,
157 [C(RESULT_ACCESS)] = 0,
158 [C(RESULT_MISS)] = 0,
163 [C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
164 [C(RESULT_MISS)] = 0xf045, /* L2 DTLB misses (PT walks) */
167 [C(RESULT_ACCESS)] = 0,
168 [C(RESULT_MISS)] = 0,
171 [C(RESULT_ACCESS)] = 0,
172 [C(RESULT_MISS)] = 0,
177 [C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
178 [C(RESULT_MISS)] = 0xff85, /* L1 ITLB misses, L2 misses */
181 [C(RESULT_ACCESS)] = -1,
182 [C(RESULT_MISS)] = -1,
185 [C(RESULT_ACCESS)] = -1,
186 [C(RESULT_MISS)] = -1,
191 [C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr. */
192 [C(RESULT_MISS)] = 0x00c3, /* Retired Mispredicted BI */
195 [C(RESULT_ACCESS)] = -1,
196 [C(RESULT_MISS)] = -1,
199 [C(RESULT_ACCESS)] = -1,
200 [C(RESULT_MISS)] = -1,
205 [C(RESULT_ACCESS)] = 0,
206 [C(RESULT_MISS)] = 0,
209 [C(RESULT_ACCESS)] = -1,
210 [C(RESULT_MISS)] = -1,
213 [C(RESULT_ACCESS)] = -1,
214 [C(RESULT_MISS)] = -1,
220 * AMD Performance Monitor K7 and later, up to and including Family 16h:
222 static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
224 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
225 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
226 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
227 [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
228 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
229 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
230 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
231 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
235 * AMD Performance Monitor Family 17h and later:
237 static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
239 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
240 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
241 [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
242 [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
243 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
244 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
245 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
246 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
249 static u64 amd_pmu_event_map(int hw_event)
251 if (boot_cpu_data.x86 >= 0x17)
252 return amd_f17h_perfmon_event_map[hw_event];
254 return amd_perfmon_event_map[hw_event];
258 * Previously calculated offsets
260 static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
261 static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
265 * 4 counters starting at 0xc0010000 each offset by 1
267 * CPUs with core performance counter extensions:
268 * 6 counters starting at 0xc0010200 each offset by 2
270 static inline int amd_pmu_addr_offset(int index, bool eventsel)
278 offset = event_offsets[index];
280 offset = count_offsets[index];
285 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
291 event_offsets[index] = offset;
293 count_offsets[index] = offset;
298 static int amd_core_hw_config(struct perf_event *event)
300 if (event->attr.exclude_host && event->attr.exclude_guest)
302 * When HO == GO == 1 the hardware treats that as GO == HO == 0
303 * and will count in both modes. We don't want to count in that
304 * case so we emulate no-counting by setting US = OS = 0.
306 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
307 ARCH_PERFMON_EVENTSEL_OS);
308 else if (event->attr.exclude_host)
309 event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
310 else if (event->attr.exclude_guest)
311 event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
317 * AMD64 events are detected based on their event codes.
319 static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
321 return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
324 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
326 return (hwc->config & 0xe0) == 0xe0;
329 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
331 struct amd_nb *nb = cpuc->amd_nb;
333 return nb && nb->nb_id != -1;
336 static int amd_pmu_hw_config(struct perf_event *event)
340 /* pass precise event sampling to ibs: */
341 if (event->attr.precise_ip && get_ibs_caps())
344 if (has_branch_stack(event))
347 ret = x86_pmu_hw_config(event);
351 if (event->attr.type == PERF_TYPE_RAW)
352 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
354 return amd_core_hw_config(event);
357 static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
358 struct perf_event *event)
360 struct amd_nb *nb = cpuc->amd_nb;
364 * need to scan whole list because event may not have
365 * been assigned during scheduling
367 * no race condition possible because event can only
368 * be removed on one CPU at a time AND PMU is disabled
371 for (i = 0; i < x86_pmu.num_counters; i++) {
372 if (cmpxchg(nb->owners + i, event, NULL) == event)
378 * AMD64 NorthBridge events need special treatment because
379 * counter access needs to be synchronized across all cores
380 * of a package. Refer to BKDG section 3.12
382 * NB events are events measuring L3 cache, Hypertransport
383 * traffic. They are identified by an event code >= 0xe00.
384 * They measure events on the NorthBride which is shared
385 * by all cores on a package. NB events are counted on a
386 * shared set of counters. When a NB event is programmed
387 * in a counter, the data actually comes from a shared
388 * counter. Thus, access to those counters needs to be
391 * We implement the synchronization such that no two cores
392 * can be measuring NB events using the same counters. Thus,
393 * we maintain a per-NB allocation table. The available slot
394 * is propagated using the event_constraint structure.
396 * We provide only one choice for each NB event based on
397 * the fact that only NB events have restrictions. Consequently,
398 * if a counter is available, there is a guarantee the NB event
399 * will be assigned to it. If no slot is available, an empty
400 * constraint is returned and scheduling will eventually fail
403 * Note that all cores attached the same NB compete for the same
404 * counters to host NB events, this is why we use atomic ops. Some
405 * multi-chip CPUs may have more than one NB.
407 * Given that resources are allocated (cmpxchg), they must be
408 * eventually freed for others to use. This is accomplished by
409 * calling __amd_put_nb_event_constraints()
411 * Non NB events are not impacted by this restriction.
413 static struct event_constraint *
414 __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
415 struct event_constraint *c)
417 struct hw_perf_event *hwc = &event->hw;
418 struct amd_nb *nb = cpuc->amd_nb;
419 struct perf_event *old;
429 * detect if already present, if so reuse
431 * cannot merge with actual allocation
432 * because of possible holes
434 * event can already be present yet not assigned (in hwc->idx)
435 * because of successive calls to x86_schedule_events() from
436 * hw_perf_group_sched_in() without hw_perf_enable()
438 for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
439 if (new == -1 || hwc->idx == idx)
440 /* assign free slot, prefer hwc->idx */
441 old = cmpxchg(nb->owners + idx, NULL, event);
442 else if (nb->owners[idx] == event)
443 /* event already present */
448 if (old && old != event)
451 /* reassign to this slot */
453 cmpxchg(nb->owners + new, event, NULL);
456 /* already present, reuse */
462 return &emptyconstraint;
464 return &nb->event_constraints[new];
467 static struct amd_nb *amd_alloc_nb(int cpu)
472 nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu));
479 * initialize all possible NB constraints
481 for (i = 0; i < x86_pmu.num_counters; i++) {
482 __set_bit(i, nb->event_constraints[i].idxmsk);
483 nb->event_constraints[i].weight = 1;
488 static int amd_pmu_cpu_prepare(int cpu)
490 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
492 WARN_ON_ONCE(cpuc->amd_nb);
494 if (!x86_pmu.amd_nb_constraints)
497 cpuc->amd_nb = amd_alloc_nb(cpu);
504 static void amd_pmu_cpu_starting(int cpu)
506 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
507 void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
511 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
513 if (!x86_pmu.amd_nb_constraints)
516 nb_id = amd_get_nb_id(cpu);
517 WARN_ON_ONCE(nb_id == BAD_APICID);
519 for_each_online_cpu(i) {
520 nb = per_cpu(cpu_hw_events, i).amd_nb;
521 if (WARN_ON_ONCE(!nb))
524 if (nb->nb_id == nb_id) {
525 *onln = cpuc->amd_nb;
531 cpuc->amd_nb->nb_id = nb_id;
532 cpuc->amd_nb->refcnt++;
535 static void amd_pmu_cpu_dead(int cpu)
537 struct cpu_hw_events *cpuhw;
539 if (!x86_pmu.amd_nb_constraints)
542 cpuhw = &per_cpu(cpu_hw_events, cpu);
545 struct amd_nb *nb = cpuhw->amd_nb;
547 if (nb->nb_id == -1 || --nb->refcnt == 0)
550 cpuhw->amd_nb = NULL;
554 static struct event_constraint *
555 amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
556 struct perf_event *event)
559 * if not NB event or no NB, then no constraints
561 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
562 return &unconstrained;
564 return __amd_get_nb_event_constraints(cpuc, event, NULL);
567 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
568 struct perf_event *event)
570 if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
571 __amd_put_nb_event_constraints(cpuc, event);
574 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
575 PMU_FORMAT_ATTR(umask, "config:8-15" );
576 PMU_FORMAT_ATTR(edge, "config:18" );
577 PMU_FORMAT_ATTR(inv, "config:23" );
578 PMU_FORMAT_ATTR(cmask, "config:24-31" );
580 static struct attribute *amd_format_attr[] = {
581 &format_attr_event.attr,
582 &format_attr_umask.attr,
583 &format_attr_edge.attr,
584 &format_attr_inv.attr,
585 &format_attr_cmask.attr,
591 #define AMD_EVENT_TYPE_MASK 0x000000F0ULL
593 #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
594 #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
595 #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
596 #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
597 #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
598 #define AMD_EVENT_EX_LS 0x000000C0ULL
599 #define AMD_EVENT_DE 0x000000D0ULL
600 #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
603 * AMD family 15h event code/PMC mappings:
605 * type = event_code & 0x0F0:
607 * 0x000 FP PERF_CTL[5:3]
608 * 0x010 FP PERF_CTL[5:3]
609 * 0x020 LS PERF_CTL[5:0]
610 * 0x030 LS PERF_CTL[5:0]
611 * 0x040 DC PERF_CTL[5:0]
612 * 0x050 DC PERF_CTL[5:0]
613 * 0x060 CU PERF_CTL[2:0]
614 * 0x070 CU PERF_CTL[2:0]
615 * 0x080 IC/DE PERF_CTL[2:0]
616 * 0x090 IC/DE PERF_CTL[2:0]
619 * 0x0C0 EX/LS PERF_CTL[5:0]
620 * 0x0D0 DE PERF_CTL[2:0]
621 * 0x0E0 NB NB_PERF_CTL[3:0]
622 * 0x0F0 NB NB_PERF_CTL[3:0]
626 * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
627 * 0x003 FP PERF_CTL[3]
628 * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
629 * 0x00B FP PERF_CTL[3]
630 * 0x00D FP PERF_CTL[3]
631 * 0x023 DE PERF_CTL[2:0]
632 * 0x02D LS PERF_CTL[3]
633 * 0x02E LS PERF_CTL[3,0]
634 * 0x031 LS PERF_CTL[2:0] (**)
635 * 0x043 CU PERF_CTL[2:0]
636 * 0x045 CU PERF_CTL[2:0]
637 * 0x046 CU PERF_CTL[2:0]
638 * 0x054 CU PERF_CTL[2:0]
639 * 0x055 CU PERF_CTL[2:0]
640 * 0x08F IC PERF_CTL[0]
641 * 0x187 DE PERF_CTL[0]
642 * 0x188 DE PERF_CTL[0]
643 * 0x0DB EX PERF_CTL[5:0]
644 * 0x0DC LS PERF_CTL[5:0]
645 * 0x0DD LS PERF_CTL[5:0]
646 * 0x0DE LS PERF_CTL[5:0]
647 * 0x0DF LS PERF_CTL[5:0]
648 * 0x1C0 EX PERF_CTL[5:3]
649 * 0x1D6 EX PERF_CTL[5:0]
650 * 0x1D8 EX PERF_CTL[5:0]
652 * (*) depending on the umask all FPU counters may be used
653 * (**) only one unitmask enabled at a time
656 static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
657 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
658 static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
659 static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
660 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
661 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
663 static struct event_constraint *
664 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
665 struct perf_event *event)
667 struct hw_perf_event *hwc = &event->hw;
668 unsigned int event_code = amd_get_event_code(hwc);
670 switch (event_code & AMD_EVENT_TYPE_MASK) {
672 switch (event_code) {
674 if (!(hwc->config & 0x0000F000ULL))
676 if (!(hwc->config & 0x00000F00ULL))
678 return &amd_f15_PMC3;
680 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
682 return &amd_f15_PMC3;
686 return &amd_f15_PMC3;
688 return &amd_f15_PMC53;
691 case AMD_EVENT_EX_LS:
692 switch (event_code) {
699 return &amd_f15_PMC20;
701 return &amd_f15_PMC3;
703 return &amd_f15_PMC30;
705 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
706 return &amd_f15_PMC20;
707 return &emptyconstraint;
709 return &amd_f15_PMC53;
711 return &amd_f15_PMC50;
714 case AMD_EVENT_IC_DE:
716 switch (event_code) {
720 return &amd_f15_PMC0;
721 case 0x0DB ... 0x0DF:
724 return &amd_f15_PMC50;
726 return &amd_f15_PMC20;
729 /* moved to perf_event_amd_uncore.c */
730 return &emptyconstraint;
732 return &emptyconstraint;
736 static ssize_t amd_event_sysfs_show(char *page, u64 config)
738 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
739 (config & AMD64_EVENTSEL_EVENT) >> 24;
741 return x86_event_sysfs_show(page, config, event);
744 static __initconst const struct x86_pmu amd_pmu = {
746 .handle_irq = x86_pmu_handle_irq,
747 .disable_all = x86_pmu_disable_all,
748 .enable_all = x86_pmu_enable_all,
749 .enable = x86_pmu_enable_event,
750 .disable = x86_pmu_disable_event,
751 .hw_config = amd_pmu_hw_config,
752 .schedule_events = x86_schedule_events,
753 .eventsel = MSR_K7_EVNTSEL0,
754 .perfctr = MSR_K7_PERFCTR0,
755 .addr_offset = amd_pmu_addr_offset,
756 .event_map = amd_pmu_event_map,
757 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
758 .num_counters = AMD64_NUM_COUNTERS,
760 .cntval_mask = (1ULL << 48) - 1,
762 /* use highest bit to detect overflow */
763 .max_period = (1ULL << 47) - 1,
764 .get_event_constraints = amd_get_event_constraints,
765 .put_event_constraints = amd_put_event_constraints,
767 .format_attrs = amd_format_attr,
768 .events_sysfs_show = amd_event_sysfs_show,
770 .cpu_prepare = amd_pmu_cpu_prepare,
771 .cpu_starting = amd_pmu_cpu_starting,
772 .cpu_dead = amd_pmu_cpu_dead,
774 .amd_nb_constraints = 1,
777 static int __init amd_core_pmu_init(void)
779 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
782 switch (boot_cpu_data.x86) {
785 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
790 * In family 17h, there are no event constraints in the PMC hardware.
791 * We fallback to using default amd_get_event_constraints.
795 pr_err("core perfctr but no constraints; unknown hardware!\n");
800 * If core performance counter extensions exists, we must use
801 * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
802 * amd_pmu_addr_offset().
804 x86_pmu.eventsel = MSR_F15H_PERF_CTL;
805 x86_pmu.perfctr = MSR_F15H_PERF_CTR;
806 x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
808 * AMD Core perfctr has separate MSRs for the NB events, see
809 * the amd/uncore.c driver.
811 x86_pmu.amd_nb_constraints = 0;
813 pr_cont("core perfctr, ");
817 __init int amd_pmu_init(void)
821 /* Performance-monitoring supported from K7 and later: */
822 if (boot_cpu_data.x86 < 6)
827 ret = amd_core_pmu_init();
831 if (num_possible_cpus() == 1) {
833 * No point in allocating data structures to serialize
834 * against other CPUs, when there is only the one CPU.
836 x86_pmu.amd_nb_constraints = 0;
839 if (boot_cpu_data.x86 >= 0x17)
840 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
842 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
847 void amd_pmu_enable_virt(void)
849 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
851 cpuc->perf_ctr_virt_mask = 0;
853 /* Reload all events */
854 x86_pmu_disable_all();
855 x86_pmu_enable_all(0);
857 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
859 void amd_pmu_disable_virt(void)
861 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
864 * We only mask out the Host-only bit so that host-only counting works
865 * when SVM is disabled. If someone sets up a guest-only counter when
866 * SVM is disabled the Guest-only bits still gets set and the counter
867 * will not count anything.
869 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
871 /* Reload all events */
872 x86_pmu_disable_all();
873 x86_pmu_enable_all(0);
875 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);