GNU Linux-libre 4.9.315-gnu1
[releases.git] / arch / x86 / events / amd / core.c
1 #include <linux/perf_event.h>
2 #include <linux/export.h>
3 #include <linux/types.h>
4 #include <linux/init.h>
5 #include <linux/slab.h>
6 #include <asm/apicdef.h>
7
8 #include "../perf_event.h"
9
10 static __initconst const u64 amd_hw_cache_event_ids
11                                 [PERF_COUNT_HW_CACHE_MAX]
12                                 [PERF_COUNT_HW_CACHE_OP_MAX]
13                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
14 {
15  [ C(L1D) ] = {
16         [ C(OP_READ) ] = {
17                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
18                 [ C(RESULT_MISS)   ] = 0x0141, /* Data Cache Misses          */
19         },
20         [ C(OP_WRITE) ] = {
21                 [ C(RESULT_ACCESS) ] = 0,
22                 [ C(RESULT_MISS)   ] = 0,
23         },
24         [ C(OP_PREFETCH) ] = {
25                 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
26                 [ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
27         },
28  },
29  [ C(L1I ) ] = {
30         [ C(OP_READ) ] = {
31                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
32                 [ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
33         },
34         [ C(OP_WRITE) ] = {
35                 [ C(RESULT_ACCESS) ] = -1,
36                 [ C(RESULT_MISS)   ] = -1,
37         },
38         [ C(OP_PREFETCH) ] = {
39                 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
40                 [ C(RESULT_MISS)   ] = 0,
41         },
42  },
43  [ C(LL  ) ] = {
44         [ C(OP_READ) ] = {
45                 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
46                 [ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
47         },
48         [ C(OP_WRITE) ] = {
49                 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
50                 [ C(RESULT_MISS)   ] = 0,
51         },
52         [ C(OP_PREFETCH) ] = {
53                 [ C(RESULT_ACCESS) ] = 0,
54                 [ C(RESULT_MISS)   ] = 0,
55         },
56  },
57  [ C(DTLB) ] = {
58         [ C(OP_READ) ] = {
59                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
60                 [ C(RESULT_MISS)   ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
61         },
62         [ C(OP_WRITE) ] = {
63                 [ C(RESULT_ACCESS) ] = 0,
64                 [ C(RESULT_MISS)   ] = 0,
65         },
66         [ C(OP_PREFETCH) ] = {
67                 [ C(RESULT_ACCESS) ] = 0,
68                 [ C(RESULT_MISS)   ] = 0,
69         },
70  },
71  [ C(ITLB) ] = {
72         [ C(OP_READ) ] = {
73                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
74                 [ C(RESULT_MISS)   ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
75         },
76         [ C(OP_WRITE) ] = {
77                 [ C(RESULT_ACCESS) ] = -1,
78                 [ C(RESULT_MISS)   ] = -1,
79         },
80         [ C(OP_PREFETCH) ] = {
81                 [ C(RESULT_ACCESS) ] = -1,
82                 [ C(RESULT_MISS)   ] = -1,
83         },
84  },
85  [ C(BPU ) ] = {
86         [ C(OP_READ) ] = {
87                 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
88                 [ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
89         },
90         [ C(OP_WRITE) ] = {
91                 [ C(RESULT_ACCESS) ] = -1,
92                 [ C(RESULT_MISS)   ] = -1,
93         },
94         [ C(OP_PREFETCH) ] = {
95                 [ C(RESULT_ACCESS) ] = -1,
96                 [ C(RESULT_MISS)   ] = -1,
97         },
98  },
99  [ C(NODE) ] = {
100         [ C(OP_READ) ] = {
101                 [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
102                 [ C(RESULT_MISS)   ] = 0x98e9, /* CPU Request to Memory, r   */
103         },
104         [ C(OP_WRITE) ] = {
105                 [ C(RESULT_ACCESS) ] = -1,
106                 [ C(RESULT_MISS)   ] = -1,
107         },
108         [ C(OP_PREFETCH) ] = {
109                 [ C(RESULT_ACCESS) ] = -1,
110                 [ C(RESULT_MISS)   ] = -1,
111         },
112  },
113 };
114
115 static __initconst const u64 amd_hw_cache_event_ids_f17h
116                                 [PERF_COUNT_HW_CACHE_MAX]
117                                 [PERF_COUNT_HW_CACHE_OP_MAX]
118                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
119 [C(L1D)] = {
120         [C(OP_READ)] = {
121                 [C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
122                 [C(RESULT_MISS)]   = 0xc860, /* L2$ access from DC Miss */
123         },
124         [C(OP_WRITE)] = {
125                 [C(RESULT_ACCESS)] = 0,
126                 [C(RESULT_MISS)]   = 0,
127         },
128         [C(OP_PREFETCH)] = {
129                 [C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
130                 [C(RESULT_MISS)]   = 0,
131         },
132 },
133 [C(L1I)] = {
134         [C(OP_READ)] = {
135                 [C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches  */
136                 [C(RESULT_MISS)]   = 0x0081, /* Instruction cache misses   */
137         },
138         [C(OP_WRITE)] = {
139                 [C(RESULT_ACCESS)] = -1,
140                 [C(RESULT_MISS)]   = -1,
141         },
142         [C(OP_PREFETCH)] = {
143                 [C(RESULT_ACCESS)] = 0,
144                 [C(RESULT_MISS)]   = 0,
145         },
146 },
147 [C(LL)] = {
148         [C(OP_READ)] = {
149                 [C(RESULT_ACCESS)] = 0,
150                 [C(RESULT_MISS)]   = 0,
151         },
152         [C(OP_WRITE)] = {
153                 [C(RESULT_ACCESS)] = 0,
154                 [C(RESULT_MISS)]   = 0,
155         },
156         [C(OP_PREFETCH)] = {
157                 [C(RESULT_ACCESS)] = 0,
158                 [C(RESULT_MISS)]   = 0,
159         },
160 },
161 [C(DTLB)] = {
162         [C(OP_READ)] = {
163                 [C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
164                 [C(RESULT_MISS)]   = 0xf045, /* L2 DTLB misses (PT walks) */
165         },
166         [C(OP_WRITE)] = {
167                 [C(RESULT_ACCESS)] = 0,
168                 [C(RESULT_MISS)]   = 0,
169         },
170         [C(OP_PREFETCH)] = {
171                 [C(RESULT_ACCESS)] = 0,
172                 [C(RESULT_MISS)]   = 0,
173         },
174 },
175 [C(ITLB)] = {
176         [C(OP_READ)] = {
177                 [C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
178                 [C(RESULT_MISS)]   = 0xff85, /* L1 ITLB misses, L2 misses */
179         },
180         [C(OP_WRITE)] = {
181                 [C(RESULT_ACCESS)] = -1,
182                 [C(RESULT_MISS)]   = -1,
183         },
184         [C(OP_PREFETCH)] = {
185                 [C(RESULT_ACCESS)] = -1,
186                 [C(RESULT_MISS)]   = -1,
187         },
188 },
189 [C(BPU)] = {
190         [C(OP_READ)] = {
191                 [C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr.      */
192                 [C(RESULT_MISS)]   = 0x00c3, /* Retired Mispredicted BI    */
193         },
194         [C(OP_WRITE)] = {
195                 [C(RESULT_ACCESS)] = -1,
196                 [C(RESULT_MISS)]   = -1,
197         },
198         [C(OP_PREFETCH)] = {
199                 [C(RESULT_ACCESS)] = -1,
200                 [C(RESULT_MISS)]   = -1,
201         },
202 },
203 [C(NODE)] = {
204         [C(OP_READ)] = {
205                 [C(RESULT_ACCESS)] = 0,
206                 [C(RESULT_MISS)]   = 0,
207         },
208         [C(OP_WRITE)] = {
209                 [C(RESULT_ACCESS)] = -1,
210                 [C(RESULT_MISS)]   = -1,
211         },
212         [C(OP_PREFETCH)] = {
213                 [C(RESULT_ACCESS)] = -1,
214                 [C(RESULT_MISS)]   = -1,
215         },
216 },
217 };
218
219 /*
220  * AMD Performance Monitor K7 and later, up to and including Family 16h:
221  */
222 static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
223 {
224         [PERF_COUNT_HW_CPU_CYCLES]              = 0x0076,
225         [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
226         [PERF_COUNT_HW_CACHE_REFERENCES]        = 0x077d,
227         [PERF_COUNT_HW_CACHE_MISSES]            = 0x077e,
228         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c2,
229         [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c3,
230         [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
231         [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = 0x00d1, /* "Dispatch stalls" event */
232 };
233
234 /*
235  * AMD Performance Monitor Family 17h and later:
236  */
237 static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
238 {
239         [PERF_COUNT_HW_CPU_CYCLES]              = 0x0076,
240         [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
241         [PERF_COUNT_HW_CACHE_REFERENCES]        = 0xff60,
242         [PERF_COUNT_HW_CACHE_MISSES]            = 0x0964,
243         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c2,
244         [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c3,
245         [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
246         [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = 0x0187,
247 };
248
249 static u64 amd_pmu_event_map(int hw_event)
250 {
251         if (boot_cpu_data.x86 >= 0x17)
252                 return amd_f17h_perfmon_event_map[hw_event];
253
254         return amd_perfmon_event_map[hw_event];
255 }
256
257 /*
258  * Previously calculated offsets
259  */
260 static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
261 static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
262
263 /*
264  * Legacy CPUs:
265  *   4 counters starting at 0xc0010000 each offset by 1
266  *
267  * CPUs with core performance counter extensions:
268  *   6 counters starting at 0xc0010200 each offset by 2
269  */
270 static inline int amd_pmu_addr_offset(int index, bool eventsel)
271 {
272         int offset;
273
274         if (!index)
275                 return index;
276
277         if (eventsel)
278                 offset = event_offsets[index];
279         else
280                 offset = count_offsets[index];
281
282         if (offset)
283                 return offset;
284
285         if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
286                 offset = index;
287         else
288                 offset = index << 1;
289
290         if (eventsel)
291                 event_offsets[index] = offset;
292         else
293                 count_offsets[index] = offset;
294
295         return offset;
296 }
297
298 static int amd_core_hw_config(struct perf_event *event)
299 {
300         if (event->attr.exclude_host && event->attr.exclude_guest)
301                 /*
302                  * When HO == GO == 1 the hardware treats that as GO == HO == 0
303                  * and will count in both modes. We don't want to count in that
304                  * case so we emulate no-counting by setting US = OS = 0.
305                  */
306                 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
307                                       ARCH_PERFMON_EVENTSEL_OS);
308         else if (event->attr.exclude_host)
309                 event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
310         else if (event->attr.exclude_guest)
311                 event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
312
313         return 0;
314 }
315
316 /*
317  * AMD64 events are detected based on their event codes.
318  */
319 static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
320 {
321         return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
322 }
323
324 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
325 {
326         return (hwc->config & 0xe0) == 0xe0;
327 }
328
329 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
330 {
331         struct amd_nb *nb = cpuc->amd_nb;
332
333         return nb && nb->nb_id != -1;
334 }
335
336 static int amd_pmu_hw_config(struct perf_event *event)
337 {
338         int ret;
339
340         /* pass precise event sampling to ibs: */
341         if (event->attr.precise_ip && get_ibs_caps())
342                 return -ENOENT;
343
344         if (has_branch_stack(event))
345                 return -EOPNOTSUPP;
346
347         ret = x86_pmu_hw_config(event);
348         if (ret)
349                 return ret;
350
351         if (event->attr.type == PERF_TYPE_RAW)
352                 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
353
354         return amd_core_hw_config(event);
355 }
356
357 static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
358                                            struct perf_event *event)
359 {
360         struct amd_nb *nb = cpuc->amd_nb;
361         int i;
362
363         /*
364          * need to scan whole list because event may not have
365          * been assigned during scheduling
366          *
367          * no race condition possible because event can only
368          * be removed on one CPU at a time AND PMU is disabled
369          * when we come here
370          */
371         for (i = 0; i < x86_pmu.num_counters; i++) {
372                 if (cmpxchg(nb->owners + i, event, NULL) == event)
373                         break;
374         }
375 }
376
377  /*
378   * AMD64 NorthBridge events need special treatment because
379   * counter access needs to be synchronized across all cores
380   * of a package. Refer to BKDG section 3.12
381   *
382   * NB events are events measuring L3 cache, Hypertransport
383   * traffic. They are identified by an event code >= 0xe00.
384   * They measure events on the NorthBride which is shared
385   * by all cores on a package. NB events are counted on a
386   * shared set of counters. When a NB event is programmed
387   * in a counter, the data actually comes from a shared
388   * counter. Thus, access to those counters needs to be
389   * synchronized.
390   *
391   * We implement the synchronization such that no two cores
392   * can be measuring NB events using the same counters. Thus,
393   * we maintain a per-NB allocation table. The available slot
394   * is propagated using the event_constraint structure.
395   *
396   * We provide only one choice for each NB event based on
397   * the fact that only NB events have restrictions. Consequently,
398   * if a counter is available, there is a guarantee the NB event
399   * will be assigned to it. If no slot is available, an empty
400   * constraint is returned and scheduling will eventually fail
401   * for this event.
402   *
403   * Note that all cores attached the same NB compete for the same
404   * counters to host NB events, this is why we use atomic ops. Some
405   * multi-chip CPUs may have more than one NB.
406   *
407   * Given that resources are allocated (cmpxchg), they must be
408   * eventually freed for others to use. This is accomplished by
409   * calling __amd_put_nb_event_constraints()
410   *
411   * Non NB events are not impacted by this restriction.
412   */
413 static struct event_constraint *
414 __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
415                                struct event_constraint *c)
416 {
417         struct hw_perf_event *hwc = &event->hw;
418         struct amd_nb *nb = cpuc->amd_nb;
419         struct perf_event *old;
420         int idx, new = -1;
421
422         if (!c)
423                 c = &unconstrained;
424
425         if (cpuc->is_fake)
426                 return c;
427
428         /*
429          * detect if already present, if so reuse
430          *
431          * cannot merge with actual allocation
432          * because of possible holes
433          *
434          * event can already be present yet not assigned (in hwc->idx)
435          * because of successive calls to x86_schedule_events() from
436          * hw_perf_group_sched_in() without hw_perf_enable()
437          */
438         for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
439                 if (new == -1 || hwc->idx == idx)
440                         /* assign free slot, prefer hwc->idx */
441                         old = cmpxchg(nb->owners + idx, NULL, event);
442                 else if (nb->owners[idx] == event)
443                         /* event already present */
444                         old = event;
445                 else
446                         continue;
447
448                 if (old && old != event)
449                         continue;
450
451                 /* reassign to this slot */
452                 if (new != -1)
453                         cmpxchg(nb->owners + new, event, NULL);
454                 new = idx;
455
456                 /* already present, reuse */
457                 if (old == event)
458                         break;
459         }
460
461         if (new == -1)
462                 return &emptyconstraint;
463
464         return &nb->event_constraints[new];
465 }
466
467 static struct amd_nb *amd_alloc_nb(int cpu)
468 {
469         struct amd_nb *nb;
470         int i;
471
472         nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu));
473         if (!nb)
474                 return NULL;
475
476         nb->nb_id = -1;
477
478         /*
479          * initialize all possible NB constraints
480          */
481         for (i = 0; i < x86_pmu.num_counters; i++) {
482                 __set_bit(i, nb->event_constraints[i].idxmsk);
483                 nb->event_constraints[i].weight = 1;
484         }
485         return nb;
486 }
487
488 static int amd_pmu_cpu_prepare(int cpu)
489 {
490         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
491
492         WARN_ON_ONCE(cpuc->amd_nb);
493
494         if (!x86_pmu.amd_nb_constraints)
495                 return 0;
496
497         cpuc->amd_nb = amd_alloc_nb(cpu);
498         if (!cpuc->amd_nb)
499                 return -ENOMEM;
500
501         return 0;
502 }
503
504 static void amd_pmu_cpu_starting(int cpu)
505 {
506         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
507         void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
508         struct amd_nb *nb;
509         int i, nb_id;
510
511         cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
512
513         if (!x86_pmu.amd_nb_constraints)
514                 return;
515
516         nb_id = amd_get_nb_id(cpu);
517         WARN_ON_ONCE(nb_id == BAD_APICID);
518
519         for_each_online_cpu(i) {
520                 nb = per_cpu(cpu_hw_events, i).amd_nb;
521                 if (WARN_ON_ONCE(!nb))
522                         continue;
523
524                 if (nb->nb_id == nb_id) {
525                         *onln = cpuc->amd_nb;
526                         cpuc->amd_nb = nb;
527                         break;
528                 }
529         }
530
531         cpuc->amd_nb->nb_id = nb_id;
532         cpuc->amd_nb->refcnt++;
533 }
534
535 static void amd_pmu_cpu_dead(int cpu)
536 {
537         struct cpu_hw_events *cpuhw;
538
539         if (!x86_pmu.amd_nb_constraints)
540                 return;
541
542         cpuhw = &per_cpu(cpu_hw_events, cpu);
543
544         if (cpuhw->amd_nb) {
545                 struct amd_nb *nb = cpuhw->amd_nb;
546
547                 if (nb->nb_id == -1 || --nb->refcnt == 0)
548                         kfree(nb);
549
550                 cpuhw->amd_nb = NULL;
551         }
552 }
553
554 static struct event_constraint *
555 amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
556                           struct perf_event *event)
557 {
558         /*
559          * if not NB event or no NB, then no constraints
560          */
561         if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
562                 return &unconstrained;
563
564         return __amd_get_nb_event_constraints(cpuc, event, NULL);
565 }
566
567 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
568                                       struct perf_event *event)
569 {
570         if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
571                 __amd_put_nb_event_constraints(cpuc, event);
572 }
573
574 PMU_FORMAT_ATTR(event,  "config:0-7,32-35");
575 PMU_FORMAT_ATTR(umask,  "config:8-15"   );
576 PMU_FORMAT_ATTR(edge,   "config:18"     );
577 PMU_FORMAT_ATTR(inv,    "config:23"     );
578 PMU_FORMAT_ATTR(cmask,  "config:24-31"  );
579
580 static struct attribute *amd_format_attr[] = {
581         &format_attr_event.attr,
582         &format_attr_umask.attr,
583         &format_attr_edge.attr,
584         &format_attr_inv.attr,
585         &format_attr_cmask.attr,
586         NULL,
587 };
588
589 /* AMD Family 15h */
590
591 #define AMD_EVENT_TYPE_MASK     0x000000F0ULL
592
593 #define AMD_EVENT_FP            0x00000000ULL ... 0x00000010ULL
594 #define AMD_EVENT_LS            0x00000020ULL ... 0x00000030ULL
595 #define AMD_EVENT_DC            0x00000040ULL ... 0x00000050ULL
596 #define AMD_EVENT_CU            0x00000060ULL ... 0x00000070ULL
597 #define AMD_EVENT_IC_DE         0x00000080ULL ... 0x00000090ULL
598 #define AMD_EVENT_EX_LS         0x000000C0ULL
599 #define AMD_EVENT_DE            0x000000D0ULL
600 #define AMD_EVENT_NB            0x000000E0ULL ... 0x000000F0ULL
601
602 /*
603  * AMD family 15h event code/PMC mappings:
604  *
605  * type = event_code & 0x0F0:
606  *
607  * 0x000        FP      PERF_CTL[5:3]
608  * 0x010        FP      PERF_CTL[5:3]
609  * 0x020        LS      PERF_CTL[5:0]
610  * 0x030        LS      PERF_CTL[5:0]
611  * 0x040        DC      PERF_CTL[5:0]
612  * 0x050        DC      PERF_CTL[5:0]
613  * 0x060        CU      PERF_CTL[2:0]
614  * 0x070        CU      PERF_CTL[2:0]
615  * 0x080        IC/DE   PERF_CTL[2:0]
616  * 0x090        IC/DE   PERF_CTL[2:0]
617  * 0x0A0        ---
618  * 0x0B0        ---
619  * 0x0C0        EX/LS   PERF_CTL[5:0]
620  * 0x0D0        DE      PERF_CTL[2:0]
621  * 0x0E0        NB      NB_PERF_CTL[3:0]
622  * 0x0F0        NB      NB_PERF_CTL[3:0]
623  *
624  * Exceptions:
625  *
626  * 0x000        FP      PERF_CTL[3], PERF_CTL[5:3] (*)
627  * 0x003        FP      PERF_CTL[3]
628  * 0x004        FP      PERF_CTL[3], PERF_CTL[5:3] (*)
629  * 0x00B        FP      PERF_CTL[3]
630  * 0x00D        FP      PERF_CTL[3]
631  * 0x023        DE      PERF_CTL[2:0]
632  * 0x02D        LS      PERF_CTL[3]
633  * 0x02E        LS      PERF_CTL[3,0]
634  * 0x031        LS      PERF_CTL[2:0] (**)
635  * 0x043        CU      PERF_CTL[2:0]
636  * 0x045        CU      PERF_CTL[2:0]
637  * 0x046        CU      PERF_CTL[2:0]
638  * 0x054        CU      PERF_CTL[2:0]
639  * 0x055        CU      PERF_CTL[2:0]
640  * 0x08F        IC      PERF_CTL[0]
641  * 0x187        DE      PERF_CTL[0]
642  * 0x188        DE      PERF_CTL[0]
643  * 0x0DB        EX      PERF_CTL[5:0]
644  * 0x0DC        LS      PERF_CTL[5:0]
645  * 0x0DD        LS      PERF_CTL[5:0]
646  * 0x0DE        LS      PERF_CTL[5:0]
647  * 0x0DF        LS      PERF_CTL[5:0]
648  * 0x1C0        EX      PERF_CTL[5:3]
649  * 0x1D6        EX      PERF_CTL[5:0]
650  * 0x1D8        EX      PERF_CTL[5:0]
651  *
652  * (*)  depending on the umask all FPU counters may be used
653  * (**) only one unitmask enabled at a time
654  */
655
656 static struct event_constraint amd_f15_PMC0  = EVENT_CONSTRAINT(0, 0x01, 0);
657 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
658 static struct event_constraint amd_f15_PMC3  = EVENT_CONSTRAINT(0, 0x08, 0);
659 static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
660 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
661 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
662
663 static struct event_constraint *
664 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
665                                struct perf_event *event)
666 {
667         struct hw_perf_event *hwc = &event->hw;
668         unsigned int event_code = amd_get_event_code(hwc);
669
670         switch (event_code & AMD_EVENT_TYPE_MASK) {
671         case AMD_EVENT_FP:
672                 switch (event_code) {
673                 case 0x000:
674                         if (!(hwc->config & 0x0000F000ULL))
675                                 break;
676                         if (!(hwc->config & 0x00000F00ULL))
677                                 break;
678                         return &amd_f15_PMC3;
679                 case 0x004:
680                         if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
681                                 break;
682                         return &amd_f15_PMC3;
683                 case 0x003:
684                 case 0x00B:
685                 case 0x00D:
686                         return &amd_f15_PMC3;
687                 }
688                 return &amd_f15_PMC53;
689         case AMD_EVENT_LS:
690         case AMD_EVENT_DC:
691         case AMD_EVENT_EX_LS:
692                 switch (event_code) {
693                 case 0x023:
694                 case 0x043:
695                 case 0x045:
696                 case 0x046:
697                 case 0x054:
698                 case 0x055:
699                         return &amd_f15_PMC20;
700                 case 0x02D:
701                         return &amd_f15_PMC3;
702                 case 0x02E:
703                         return &amd_f15_PMC30;
704                 case 0x031:
705                         if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
706                                 return &amd_f15_PMC20;
707                         return &emptyconstraint;
708                 case 0x1C0:
709                         return &amd_f15_PMC53;
710                 default:
711                         return &amd_f15_PMC50;
712                 }
713         case AMD_EVENT_CU:
714         case AMD_EVENT_IC_DE:
715         case AMD_EVENT_DE:
716                 switch (event_code) {
717                 case 0x08F:
718                 case 0x187:
719                 case 0x188:
720                         return &amd_f15_PMC0;
721                 case 0x0DB ... 0x0DF:
722                 case 0x1D6:
723                 case 0x1D8:
724                         return &amd_f15_PMC50;
725                 default:
726                         return &amd_f15_PMC20;
727                 }
728         case AMD_EVENT_NB:
729                 /* moved to perf_event_amd_uncore.c */
730                 return &emptyconstraint;
731         default:
732                 return &emptyconstraint;
733         }
734 }
735
736 static ssize_t amd_event_sysfs_show(char *page, u64 config)
737 {
738         u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
739                     (config & AMD64_EVENTSEL_EVENT) >> 24;
740
741         return x86_event_sysfs_show(page, config, event);
742 }
743
744 static __initconst const struct x86_pmu amd_pmu = {
745         .name                   = "AMD",
746         .handle_irq             = x86_pmu_handle_irq,
747         .disable_all            = x86_pmu_disable_all,
748         .enable_all             = x86_pmu_enable_all,
749         .enable                 = x86_pmu_enable_event,
750         .disable                = x86_pmu_disable_event,
751         .hw_config              = amd_pmu_hw_config,
752         .schedule_events        = x86_schedule_events,
753         .eventsel               = MSR_K7_EVNTSEL0,
754         .perfctr                = MSR_K7_PERFCTR0,
755         .addr_offset            = amd_pmu_addr_offset,
756         .event_map              = amd_pmu_event_map,
757         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
758         .num_counters           = AMD64_NUM_COUNTERS,
759         .cntval_bits            = 48,
760         .cntval_mask            = (1ULL << 48) - 1,
761         .apic                   = 1,
762         /* use highest bit to detect overflow */
763         .max_period             = (1ULL << 47) - 1,
764         .get_event_constraints  = amd_get_event_constraints,
765         .put_event_constraints  = amd_put_event_constraints,
766
767         .format_attrs           = amd_format_attr,
768         .events_sysfs_show      = amd_event_sysfs_show,
769
770         .cpu_prepare            = amd_pmu_cpu_prepare,
771         .cpu_starting           = amd_pmu_cpu_starting,
772         .cpu_dead               = amd_pmu_cpu_dead,
773
774         .amd_nb_constraints     = 1,
775 };
776
777 static int __init amd_core_pmu_init(void)
778 {
779         if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
780                 return 0;
781
782         switch (boot_cpu_data.x86) {
783         case 0x15:
784                 pr_cont("Fam15h ");
785                 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
786                 break;
787         case 0x17:
788                 pr_cont("Fam17h ");
789                 /*
790                  * In family 17h, there are no event constraints in the PMC hardware.
791                  * We fallback to using default amd_get_event_constraints.
792                  */
793                 break;
794         default:
795                 pr_err("core perfctr but no constraints; unknown hardware!\n");
796                 return -ENODEV;
797         }
798
799         /*
800          * If core performance counter extensions exists, we must use
801          * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
802          * amd_pmu_addr_offset().
803          */
804         x86_pmu.eventsel        = MSR_F15H_PERF_CTL;
805         x86_pmu.perfctr         = MSR_F15H_PERF_CTR;
806         x86_pmu.num_counters    = AMD64_NUM_COUNTERS_CORE;
807         /*
808          * AMD Core perfctr has separate MSRs for the NB events, see
809          * the amd/uncore.c driver.
810          */
811         x86_pmu.amd_nb_constraints = 0;
812
813         pr_cont("core perfctr, ");
814         return 0;
815 }
816
817 __init int amd_pmu_init(void)
818 {
819         int ret;
820
821         /* Performance-monitoring supported from K7 and later: */
822         if (boot_cpu_data.x86 < 6)
823                 return -ENODEV;
824
825         x86_pmu = amd_pmu;
826
827         ret = amd_core_pmu_init();
828         if (ret)
829                 return ret;
830
831         if (num_possible_cpus() == 1) {
832                 /*
833                  * No point in allocating data structures to serialize
834                  * against other CPUs, when there is only the one CPU.
835                  */
836                 x86_pmu.amd_nb_constraints = 0;
837         }
838
839         if (boot_cpu_data.x86 >= 0x17)
840                 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
841         else
842                 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
843
844         return 0;
845 }
846
847 void amd_pmu_enable_virt(void)
848 {
849         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
850
851         cpuc->perf_ctr_virt_mask = 0;
852
853         /* Reload all events */
854         x86_pmu_disable_all();
855         x86_pmu_enable_all(0);
856 }
857 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
858
859 void amd_pmu_disable_virt(void)
860 {
861         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
862
863         /*
864          * We only mask out the Host-only bit so that host-only counting works
865          * when SVM is disabled. If someone sets up a guest-only counter when
866          * SVM is disabled the Guest-only bits still gets set and the counter
867          * will not count anything.
868          */
869         cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
870
871         /* Reload all events */
872         x86_pmu_disable_all();
873         x86_pmu_enable_all(0);
874 }
875 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);