GNU Linux-libre 4.9.333-gnu1
[releases.git] / arch / x86 / events / intel / uncore_snb.c
1 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
2 #include "uncore.h"
3
4 /* Uncore IMC PCI IDs */
5 #define PCI_DEVICE_ID_INTEL_SNB_IMC     0x0100
6 #define PCI_DEVICE_ID_INTEL_IVB_IMC     0x0154
7 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC  0x0150
8 #define PCI_DEVICE_ID_INTEL_HSW_IMC     0x0c00
9 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC   0x0a04
10 #define PCI_DEVICE_ID_INTEL_BDW_IMC     0x1604
11 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC   0x1904
12 #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC   0x190c
13 #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC  0x1900
14 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC  0x1910
15 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC  0x190f
16 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC  0x191f
17 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC   0x590c
18 #define PCI_DEVICE_ID_INTEL_KBL_U_IMC   0x5904
19 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC  0x5914
20 #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC  0x590f
21 #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC  0x591f
22 #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC  0x3ecc
23 #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC  0x3ed0
24 #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC  0x3e10
25 #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC  0x3ec4
26 #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC        0x3e0f
27 #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC        0x3e1f
28 #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC        0x3ec2
29 #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC        0x3e30
30 #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC        0x3e18
31 #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC        0x3ec6
32 #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC        0x3e31
33 #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC        0x3e33
34 #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC        0x3eca
35 #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC        0x3e32
36
37 /* SNB event control */
38 #define SNB_UNC_CTL_EV_SEL_MASK                 0x000000ff
39 #define SNB_UNC_CTL_UMASK_MASK                  0x0000ff00
40 #define SNB_UNC_CTL_EDGE_DET                    (1 << 18)
41 #define SNB_UNC_CTL_EN                          (1 << 22)
42 #define SNB_UNC_CTL_INVERT                      (1 << 23)
43 #define SNB_UNC_CTL_CMASK_MASK                  0x1f000000
44 #define NHM_UNC_CTL_CMASK_MASK                  0xff000000
45 #define NHM_UNC_FIXED_CTR_CTL_EN                (1 << 0)
46
47 #define SNB_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
48                                                  SNB_UNC_CTL_UMASK_MASK | \
49                                                  SNB_UNC_CTL_EDGE_DET | \
50                                                  SNB_UNC_CTL_INVERT | \
51                                                  SNB_UNC_CTL_CMASK_MASK)
52
53 #define NHM_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
54                                                  SNB_UNC_CTL_UMASK_MASK | \
55                                                  SNB_UNC_CTL_EDGE_DET | \
56                                                  SNB_UNC_CTL_INVERT | \
57                                                  NHM_UNC_CTL_CMASK_MASK)
58
59 /* SNB global control register */
60 #define SNB_UNC_PERF_GLOBAL_CTL                 0x391
61 #define SNB_UNC_FIXED_CTR_CTRL                  0x394
62 #define SNB_UNC_FIXED_CTR                       0x395
63
64 /* SNB uncore global control */
65 #define SNB_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 4) - 1)
66 #define SNB_UNC_GLOBAL_CTL_EN                   (1 << 29)
67
68 /* SNB Cbo register */
69 #define SNB_UNC_CBO_0_PERFEVTSEL0               0x700
70 #define SNB_UNC_CBO_0_PER_CTR0                  0x706
71 #define SNB_UNC_CBO_MSR_OFFSET                  0x10
72
73 /* SNB ARB register */
74 #define SNB_UNC_ARB_PER_CTR0                    0x3b0
75 #define SNB_UNC_ARB_PERFEVTSEL0                 0x3b2
76 #define SNB_UNC_ARB_MSR_OFFSET                  0x10
77
78 /* NHM global control register */
79 #define NHM_UNC_PERF_GLOBAL_CTL                 0x391
80 #define NHM_UNC_FIXED_CTR                       0x394
81 #define NHM_UNC_FIXED_CTR_CTRL                  0x395
82
83 /* NHM uncore global control */
84 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL            ((1ULL << 8) - 1)
85 #define NHM_UNC_GLOBAL_CTL_EN_FC                (1ULL << 32)
86
87 /* NHM uncore register */
88 #define NHM_UNC_PERFEVTSEL0                     0x3c0
89 #define NHM_UNC_UNCORE_PMC0                     0x3b0
90
91 /* SKL uncore global control */
92 #define SKL_UNC_PERF_GLOBAL_CTL                 0xe01
93 #define SKL_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 5) - 1)
94
95 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
96 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
97 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
98 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
99 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
100 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
101
102 /* Sandy Bridge uncore support */
103 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
104 {
105         struct hw_perf_event *hwc = &event->hw;
106
107         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
108                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
109         else
110                 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
111 }
112
113 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
114 {
115         wrmsrl(event->hw.config_base, 0);
116 }
117
118 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
119 {
120         if (box->pmu->pmu_idx == 0) {
121                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
122                         SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
123         }
124 }
125
126 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
127 {
128         wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
129                 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
130 }
131
132 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
133 {
134         if (box->pmu->pmu_idx == 0)
135                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
136 }
137
138 static struct uncore_event_desc snb_uncore_events[] = {
139         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
140         { /* end: all zeroes */ },
141 };
142
143 static struct attribute *snb_uncore_formats_attr[] = {
144         &format_attr_event.attr,
145         &format_attr_umask.attr,
146         &format_attr_edge.attr,
147         &format_attr_inv.attr,
148         &format_attr_cmask5.attr,
149         NULL,
150 };
151
152 static struct attribute_group snb_uncore_format_group = {
153         .name           = "format",
154         .attrs          = snb_uncore_formats_attr,
155 };
156
157 static struct intel_uncore_ops snb_uncore_msr_ops = {
158         .init_box       = snb_uncore_msr_init_box,
159         .enable_box     = snb_uncore_msr_enable_box,
160         .exit_box       = snb_uncore_msr_exit_box,
161         .disable_event  = snb_uncore_msr_disable_event,
162         .enable_event   = snb_uncore_msr_enable_event,
163         .read_counter   = uncore_msr_read_counter,
164 };
165
166 static struct event_constraint snb_uncore_arb_constraints[] = {
167         UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
168         UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
169         EVENT_CONSTRAINT_END
170 };
171
172 static struct intel_uncore_type snb_uncore_cbox = {
173         .name           = "cbox",
174         .num_counters   = 2,
175         .num_boxes      = 4,
176         .perf_ctr_bits  = 44,
177         .fixed_ctr_bits = 48,
178         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
179         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
180         .fixed_ctr      = SNB_UNC_FIXED_CTR,
181         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
182         .single_fixed   = 1,
183         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
184         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
185         .ops            = &snb_uncore_msr_ops,
186         .format_group   = &snb_uncore_format_group,
187         .event_descs    = snb_uncore_events,
188 };
189
190 static struct intel_uncore_type snb_uncore_arb = {
191         .name           = "arb",
192         .num_counters   = 2,
193         .num_boxes      = 1,
194         .perf_ctr_bits  = 44,
195         .perf_ctr       = SNB_UNC_ARB_PER_CTR0,
196         .event_ctl      = SNB_UNC_ARB_PERFEVTSEL0,
197         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
198         .msr_offset     = SNB_UNC_ARB_MSR_OFFSET,
199         .constraints    = snb_uncore_arb_constraints,
200         .ops            = &snb_uncore_msr_ops,
201         .format_group   = &snb_uncore_format_group,
202 };
203
204 static struct intel_uncore_type *snb_msr_uncores[] = {
205         &snb_uncore_cbox,
206         &snb_uncore_arb,
207         NULL,
208 };
209
210 void snb_uncore_cpu_init(void)
211 {
212         uncore_msr_uncores = snb_msr_uncores;
213         if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
214                 snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
215 }
216
217 static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
218 {
219         if (box->pmu->pmu_idx == 0) {
220                 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
221                         SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
222         }
223 }
224
225 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
226 {
227         wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
228                 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
229 }
230
231 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
232 {
233         if (box->pmu->pmu_idx == 0)
234                 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
235 }
236
237 static struct intel_uncore_ops skl_uncore_msr_ops = {
238         .init_box       = skl_uncore_msr_init_box,
239         .enable_box     = skl_uncore_msr_enable_box,
240         .exit_box       = skl_uncore_msr_exit_box,
241         .disable_event  = snb_uncore_msr_disable_event,
242         .enable_event   = snb_uncore_msr_enable_event,
243         .read_counter   = uncore_msr_read_counter,
244 };
245
246 static struct intel_uncore_type skl_uncore_cbox = {
247         .name           = "cbox",
248         .num_counters   = 4,
249         .num_boxes      = 5,
250         .perf_ctr_bits  = 44,
251         .fixed_ctr_bits = 48,
252         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
253         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
254         .fixed_ctr      = SNB_UNC_FIXED_CTR,
255         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
256         .single_fixed   = 1,
257         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
258         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
259         .ops            = &skl_uncore_msr_ops,
260         .format_group   = &snb_uncore_format_group,
261         .event_descs    = snb_uncore_events,
262 };
263
264 static struct intel_uncore_type *skl_msr_uncores[] = {
265         &skl_uncore_cbox,
266         &snb_uncore_arb,
267         NULL,
268 };
269
270 void skl_uncore_cpu_init(void)
271 {
272         uncore_msr_uncores = skl_msr_uncores;
273         if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
274                 skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
275         snb_uncore_arb.ops = &skl_uncore_msr_ops;
276 }
277
278 enum {
279         SNB_PCI_UNCORE_IMC,
280 };
281
282 static struct uncore_event_desc snb_uncore_imc_events[] = {
283         INTEL_UNCORE_EVENT_DESC(data_reads,  "event=0x01"),
284         INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
285         INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
286
287         INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
288         INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
289         INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
290
291         { /* end: all zeroes */ },
292 };
293
294 #define SNB_UNCORE_PCI_IMC_EVENT_MASK           0xff
295 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET           0x48
296
297 /* page size multiple covering all config regs */
298 #define SNB_UNCORE_PCI_IMC_MAP_SIZE             0x6000
299
300 #define SNB_UNCORE_PCI_IMC_DATA_READS           0x1
301 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE      0x5050
302 #define SNB_UNCORE_PCI_IMC_DATA_WRITES          0x2
303 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE     0x5054
304 #define SNB_UNCORE_PCI_IMC_CTR_BASE             SNB_UNCORE_PCI_IMC_DATA_READS_BASE
305
306 static struct attribute *snb_uncore_imc_formats_attr[] = {
307         &format_attr_event.attr,
308         NULL,
309 };
310
311 static struct attribute_group snb_uncore_imc_format_group = {
312         .name = "format",
313         .attrs = snb_uncore_imc_formats_attr,
314 };
315
316 static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
317 {
318         struct pci_dev *pdev = box->pci_dev;
319         int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
320         resource_size_t addr;
321         u32 pci_dword;
322
323         pci_read_config_dword(pdev, where, &pci_dword);
324         addr = pci_dword;
325
326 #ifdef CONFIG_PHYS_ADDR_T_64BIT
327         pci_read_config_dword(pdev, where + 4, &pci_dword);
328         addr |= ((resource_size_t)pci_dword << 32);
329 #endif
330
331         addr &= ~(PAGE_SIZE - 1);
332
333         box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
334         box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
335 }
336
337 static void snb_uncore_imc_exit_box(struct intel_uncore_box *box)
338 {
339         iounmap(box->io_addr);
340 }
341
342 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
343 {}
344
345 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
346 {}
347
348 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
349 {}
350
351 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
352 {}
353
354 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
355 {
356         struct hw_perf_event *hwc = &event->hw;
357
358         return (u64)*(unsigned int *)(box->io_addr + hwc->event_base);
359 }
360
361 /*
362  * custom event_init() function because we define our own fixed, free
363  * running counters, so we do not want to conflict with generic uncore
364  * logic. Also simplifies processing
365  */
366 static int snb_uncore_imc_event_init(struct perf_event *event)
367 {
368         struct intel_uncore_pmu *pmu;
369         struct intel_uncore_box *box;
370         struct hw_perf_event *hwc = &event->hw;
371         u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
372         int idx, base;
373
374         if (event->attr.type != event->pmu->type)
375                 return -ENOENT;
376
377         pmu = uncore_event_to_pmu(event);
378         /* no device found for this pmu */
379         if (pmu->func_id < 0)
380                 return -ENOENT;
381
382         /* Sampling not supported yet */
383         if (hwc->sample_period)
384                 return -EINVAL;
385
386         /* unsupported modes and filters */
387         if (event->attr.exclude_user   ||
388             event->attr.exclude_kernel ||
389             event->attr.exclude_hv     ||
390             event->attr.exclude_idle   ||
391             event->attr.exclude_host   ||
392             event->attr.exclude_guest  ||
393             event->attr.sample_period) /* no sampling */
394                 return -EINVAL;
395
396         /*
397          * Place all uncore events for a particular physical package
398          * onto a single cpu
399          */
400         if (event->cpu < 0)
401                 return -EINVAL;
402
403         /* check only supported bits are set */
404         if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
405                 return -EINVAL;
406
407         box = uncore_pmu_to_box(pmu, event->cpu);
408         if (!box || box->cpu < 0)
409                 return -EINVAL;
410
411         event->cpu = box->cpu;
412         event->pmu_private = box;
413
414         event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
415
416         event->hw.idx = -1;
417         event->hw.last_tag = ~0ULL;
418         event->hw.extra_reg.idx = EXTRA_REG_NONE;
419         event->hw.branch_reg.idx = EXTRA_REG_NONE;
420         /*
421          * check event is known (whitelist, determines counter)
422          */
423         switch (cfg) {
424         case SNB_UNCORE_PCI_IMC_DATA_READS:
425                 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
426                 idx = UNCORE_PMC_IDX_FIXED;
427                 break;
428         case SNB_UNCORE_PCI_IMC_DATA_WRITES:
429                 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
430                 idx = UNCORE_PMC_IDX_FIXED + 1;
431                 break;
432         default:
433                 return -EINVAL;
434         }
435
436         /* must be done before validate_group */
437         event->hw.event_base = base;
438         event->hw.config = cfg;
439         event->hw.idx = idx;
440
441         /* no group validation needed, we have free running counters */
442
443         return 0;
444 }
445
446 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
447 {
448         return 0;
449 }
450
451 static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
452 {
453         struct intel_uncore_box *box = uncore_event_to_box(event);
454         u64 count;
455
456         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
457                 return;
458
459         event->hw.state = 0;
460         box->n_active++;
461
462         list_add_tail(&event->active_entry, &box->active_list);
463
464         count = snb_uncore_imc_read_counter(box, event);
465         local64_set(&event->hw.prev_count, count);
466
467         if (box->n_active == 1)
468                 uncore_pmu_start_hrtimer(box);
469 }
470
471 static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
472 {
473         struct intel_uncore_box *box = uncore_event_to_box(event);
474         struct hw_perf_event *hwc = &event->hw;
475
476         if (!(hwc->state & PERF_HES_STOPPED)) {
477                 box->n_active--;
478
479                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
480                 hwc->state |= PERF_HES_STOPPED;
481
482                 list_del(&event->active_entry);
483
484                 if (box->n_active == 0)
485                         uncore_pmu_cancel_hrtimer(box);
486         }
487
488         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
489                 /*
490                  * Drain the remaining delta count out of a event
491                  * that we are disabling:
492                  */
493                 uncore_perf_event_update(box, event);
494                 hwc->state |= PERF_HES_UPTODATE;
495         }
496 }
497
498 static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
499 {
500         struct intel_uncore_box *box = uncore_event_to_box(event);
501         struct hw_perf_event *hwc = &event->hw;
502
503         if (!box)
504                 return -ENODEV;
505
506         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
507         if (!(flags & PERF_EF_START))
508                 hwc->state |= PERF_HES_ARCH;
509
510         snb_uncore_imc_event_start(event, 0);
511
512         return 0;
513 }
514
515 static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
516 {
517         snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
518 }
519
520 int snb_pci2phy_map_init(int devid)
521 {
522         struct pci_dev *dev = NULL;
523         struct pci2phy_map *map;
524         int bus, segment;
525
526         dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
527         if (!dev)
528                 return -ENOTTY;
529
530         bus = dev->bus->number;
531         segment = pci_domain_nr(dev->bus);
532
533         raw_spin_lock(&pci2phy_map_lock);
534         map = __find_pci2phy_map(segment);
535         if (!map) {
536                 raw_spin_unlock(&pci2phy_map_lock);
537                 pci_dev_put(dev);
538                 return -ENOMEM;
539         }
540         map->pbus_to_physid[bus] = 0;
541         raw_spin_unlock(&pci2phy_map_lock);
542
543         pci_dev_put(dev);
544
545         return 0;
546 }
547
548 static struct pmu snb_uncore_imc_pmu = {
549         .task_ctx_nr    = perf_invalid_context,
550         .event_init     = snb_uncore_imc_event_init,
551         .add            = snb_uncore_imc_event_add,
552         .del            = snb_uncore_imc_event_del,
553         .start          = snb_uncore_imc_event_start,
554         .stop           = snb_uncore_imc_event_stop,
555         .read           = uncore_pmu_event_read,
556 };
557
558 static struct intel_uncore_ops snb_uncore_imc_ops = {
559         .init_box       = snb_uncore_imc_init_box,
560         .exit_box       = snb_uncore_imc_exit_box,
561         .enable_box     = snb_uncore_imc_enable_box,
562         .disable_box    = snb_uncore_imc_disable_box,
563         .disable_event  = snb_uncore_imc_disable_event,
564         .enable_event   = snb_uncore_imc_enable_event,
565         .hw_config      = snb_uncore_imc_hw_config,
566         .read_counter   = snb_uncore_imc_read_counter,
567 };
568
569 static struct intel_uncore_type snb_uncore_imc = {
570         .name           = "imc",
571         .num_counters   = 2,
572         .num_boxes      = 1,
573         .fixed_ctr_bits = 32,
574         .fixed_ctr      = SNB_UNCORE_PCI_IMC_CTR_BASE,
575         .event_descs    = snb_uncore_imc_events,
576         .format_group   = &snb_uncore_imc_format_group,
577         .perf_ctr       = SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
578         .event_mask     = SNB_UNCORE_PCI_IMC_EVENT_MASK,
579         .ops            = &snb_uncore_imc_ops,
580         .pmu            = &snb_uncore_imc_pmu,
581 };
582
583 static struct intel_uncore_type *snb_pci_uncores[] = {
584         [SNB_PCI_UNCORE_IMC]    = &snb_uncore_imc,
585         NULL,
586 };
587
588 static const struct pci_device_id snb_uncore_pci_ids[] = {
589         { /* IMC */
590                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
591                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
592         },
593         { /* end: all zeroes */ },
594 };
595
596 static const struct pci_device_id ivb_uncore_pci_ids[] = {
597         { /* IMC */
598                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
599                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
600         },
601         { /* IMC */
602                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC),
603                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
604         },
605         { /* end: all zeroes */ },
606 };
607
608 static const struct pci_device_id hsw_uncore_pci_ids[] = {
609         { /* IMC */
610                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
611                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
612         },
613         { /* IMC */
614                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
615                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
616         },
617         { /* end: all zeroes */ },
618 };
619
620 static const struct pci_device_id bdw_uncore_pci_ids[] = {
621         { /* IMC */
622                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC),
623                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
624         },
625         { /* end: all zeroes */ },
626 };
627
628 static const struct pci_device_id skl_uncore_pci_ids[] = {
629         { /* IMC */
630                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
631                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
632         },
633         { /* IMC */
634                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
635                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
636         },
637         { /* IMC */
638                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
639                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
640         },
641         { /* IMC */
642                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
643                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
644         },
645         { /* IMC */
646                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
647                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
648         },
649         { /* IMC */
650                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
651                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
652         },
653         { /* IMC */
654                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
655                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
656         },
657         { /* IMC */
658                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC),
659                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
660         },
661         { /* IMC */
662                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC),
663                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
664         },
665         { /* IMC */
666                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC),
667                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
668         },
669         { /* IMC */
670                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
671                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
672         },
673         { /* IMC */
674                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
675                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
676         },
677         { /* IMC */
678                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC),
679                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
680         },
681         { /* IMC */
682                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC),
683                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
684         },
685         { /* IMC */
686                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC),
687                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
688         },
689         { /* IMC */
690                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC),
691                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
692         },
693         { /* IMC */
694                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC),
695                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
696         },
697         { /* IMC */
698                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC),
699                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
700         },
701         { /* IMC */
702                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC),
703                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
704         },
705         { /* IMC */
706                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC),
707                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
708         },
709         { /* IMC */
710                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC),
711                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
712         },
713         { /* IMC */
714                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC),
715                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
716         },
717         { /* IMC */
718                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC),
719                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
720         },
721         { /* IMC */
722                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC),
723                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
724         },
725         { /* IMC */
726                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
727                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
728         },
729         { /* end: all zeroes */ },
730 };
731
732 static struct pci_driver snb_uncore_pci_driver = {
733         .name           = "snb_uncore",
734         .id_table       = snb_uncore_pci_ids,
735 };
736
737 static struct pci_driver ivb_uncore_pci_driver = {
738         .name           = "ivb_uncore",
739         .id_table       = ivb_uncore_pci_ids,
740 };
741
742 static struct pci_driver hsw_uncore_pci_driver = {
743         .name           = "hsw_uncore",
744         .id_table       = hsw_uncore_pci_ids,
745 };
746
747 static struct pci_driver bdw_uncore_pci_driver = {
748         .name           = "bdw_uncore",
749         .id_table       = bdw_uncore_pci_ids,
750 };
751
752 static struct pci_driver skl_uncore_pci_driver = {
753         .name           = "skl_uncore",
754         .id_table       = skl_uncore_pci_ids,
755 };
756
757 struct imc_uncore_pci_dev {
758         __u32 pci_id;
759         struct pci_driver *driver;
760 };
761 #define IMC_DEV(a, d) \
762         { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
763
764 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
765         IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
766         IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver),    /* 3rd Gen Core processor */
767         IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
768         IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
769         IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
770         IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
771         IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core Y */
772         IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core U */
773         IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Dual Core */
774         IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Quad Core */
775         IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Dual Core */
776         IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Quad Core */
777         IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core Y */
778         IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U */
779         IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U Quad Core */
780         IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Dual Core */
781         IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Quad Core */
782         IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 2 Cores */
783         IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 4 Cores */
784         IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 4 Cores */
785         IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 6 Cores */
786         IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 2 Cores Desktop */
787         IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Desktop */
788         IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Desktop */
789         IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Desktop */
790         IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Work Station */
791         IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Work Station */
792         IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Work Station */
793         IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Server */
794         IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Server */
795         IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Server */
796         {  /* end marker */ }
797 };
798
799
800 #define for_each_imc_pci_id(x, t) \
801         for (x = (t); (x)->pci_id; x++)
802
803 static struct pci_driver *imc_uncore_find_dev(void)
804 {
805         const struct imc_uncore_pci_dev *p;
806         int ret;
807
808         for_each_imc_pci_id(p, desktop_imc_pci_ids) {
809                 ret = snb_pci2phy_map_init(p->pci_id);
810                 if (ret == 0)
811                         return p->driver;
812         }
813         return NULL;
814 }
815
816 static int imc_uncore_pci_init(void)
817 {
818         struct pci_driver *imc_drv = imc_uncore_find_dev();
819
820         if (!imc_drv)
821                 return -ENODEV;
822
823         uncore_pci_uncores = snb_pci_uncores;
824         uncore_pci_driver = imc_drv;
825
826         return 0;
827 }
828
829 int snb_uncore_pci_init(void)
830 {
831         return imc_uncore_pci_init();
832 }
833
834 int ivb_uncore_pci_init(void)
835 {
836         return imc_uncore_pci_init();
837 }
838 int hsw_uncore_pci_init(void)
839 {
840         return imc_uncore_pci_init();
841 }
842
843 int bdw_uncore_pci_init(void)
844 {
845         return imc_uncore_pci_init();
846 }
847
848 int skl_uncore_pci_init(void)
849 {
850         return imc_uncore_pci_init();
851 }
852
853 /* end of Sandy Bridge uncore support */
854
855 /* Nehalem uncore support */
856 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
857 {
858         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
859 }
860
861 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
862 {
863         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
864 }
865
866 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
867 {
868         struct hw_perf_event *hwc = &event->hw;
869
870         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
871                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
872         else
873                 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
874 }
875
876 static struct attribute *nhm_uncore_formats_attr[] = {
877         &format_attr_event.attr,
878         &format_attr_umask.attr,
879         &format_attr_edge.attr,
880         &format_attr_inv.attr,
881         &format_attr_cmask8.attr,
882         NULL,
883 };
884
885 static struct attribute_group nhm_uncore_format_group = {
886         .name = "format",
887         .attrs = nhm_uncore_formats_attr,
888 };
889
890 static struct uncore_event_desc nhm_uncore_events[] = {
891         INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
892         INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
893         INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
894         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
895         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
896         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
897         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
898         INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
899         INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
900         { /* end: all zeroes */ },
901 };
902
903 static struct intel_uncore_ops nhm_uncore_msr_ops = {
904         .disable_box    = nhm_uncore_msr_disable_box,
905         .enable_box     = nhm_uncore_msr_enable_box,
906         .disable_event  = snb_uncore_msr_disable_event,
907         .enable_event   = nhm_uncore_msr_enable_event,
908         .read_counter   = uncore_msr_read_counter,
909 };
910
911 static struct intel_uncore_type nhm_uncore = {
912         .name           = "",
913         .num_counters   = 8,
914         .num_boxes      = 1,
915         .perf_ctr_bits  = 48,
916         .fixed_ctr_bits = 48,
917         .event_ctl      = NHM_UNC_PERFEVTSEL0,
918         .perf_ctr       = NHM_UNC_UNCORE_PMC0,
919         .fixed_ctr      = NHM_UNC_FIXED_CTR,
920         .fixed_ctl      = NHM_UNC_FIXED_CTR_CTRL,
921         .event_mask     = NHM_UNC_RAW_EVENT_MASK,
922         .event_descs    = nhm_uncore_events,
923         .ops            = &nhm_uncore_msr_ops,
924         .format_group   = &nhm_uncore_format_group,
925 };
926
927 static struct intel_uncore_type *nhm_msr_uncores[] = {
928         &nhm_uncore,
929         NULL,
930 };
931
932 void nhm_uncore_cpu_init(void)
933 {
934         uncore_msr_uncores = nhm_msr_uncores;
935 }
936
937 /* end of Nehalem uncore support */