GNU Linux-libre 5.15.137-gnu
[releases.git] / arch / x86 / events / intel / uncore_snb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
3 #include "uncore.h"
4
5 /* Uncore IMC PCI IDs */
6 #define PCI_DEVICE_ID_INTEL_SNB_IMC             0x0100
7 #define PCI_DEVICE_ID_INTEL_IVB_IMC             0x0154
8 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC          0x0150
9 #define PCI_DEVICE_ID_INTEL_HSW_IMC             0x0c00
10 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC           0x0a04
11 #define PCI_DEVICE_ID_INTEL_BDW_IMC             0x1604
12 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC           0x1904
13 #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC           0x190c
14 #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC          0x1900
15 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC          0x1910
16 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC          0x190f
17 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC          0x191f
18 #define PCI_DEVICE_ID_INTEL_SKL_E3_IMC          0x1918
19 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC           0x590c
20 #define PCI_DEVICE_ID_INTEL_KBL_U_IMC           0x5904
21 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC          0x5914
22 #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC          0x590f
23 #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC          0x591f
24 #define PCI_DEVICE_ID_INTEL_KBL_HQ_IMC          0x5910
25 #define PCI_DEVICE_ID_INTEL_KBL_WQ_IMC          0x5918
26 #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC          0x3ecc
27 #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC          0x3ed0
28 #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC          0x3e10
29 #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC          0x3ec4
30 #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC        0x3e0f
31 #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC        0x3e1f
32 #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC        0x3ec2
33 #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC        0x3e30
34 #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC        0x3e18
35 #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC        0x3ec6
36 #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC        0x3e31
37 #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC        0x3e33
38 #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC        0x3eca
39 #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC        0x3e32
40 #define PCI_DEVICE_ID_INTEL_AML_YD_IMC          0x590c
41 #define PCI_DEVICE_ID_INTEL_AML_YQ_IMC          0x590d
42 #define PCI_DEVICE_ID_INTEL_WHL_UQ_IMC          0x3ed0
43 #define PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC        0x3e34
44 #define PCI_DEVICE_ID_INTEL_WHL_UD_IMC          0x3e35
45 #define PCI_DEVICE_ID_INTEL_CML_H1_IMC          0x9b44
46 #define PCI_DEVICE_ID_INTEL_CML_H2_IMC          0x9b54
47 #define PCI_DEVICE_ID_INTEL_CML_H3_IMC          0x9b64
48 #define PCI_DEVICE_ID_INTEL_CML_U1_IMC          0x9b51
49 #define PCI_DEVICE_ID_INTEL_CML_U2_IMC          0x9b61
50 #define PCI_DEVICE_ID_INTEL_CML_U3_IMC          0x9b71
51 #define PCI_DEVICE_ID_INTEL_CML_S1_IMC          0x9b33
52 #define PCI_DEVICE_ID_INTEL_CML_S2_IMC          0x9b43
53 #define PCI_DEVICE_ID_INTEL_CML_S3_IMC          0x9b53
54 #define PCI_DEVICE_ID_INTEL_CML_S4_IMC          0x9b63
55 #define PCI_DEVICE_ID_INTEL_CML_S5_IMC          0x9b73
56 #define PCI_DEVICE_ID_INTEL_ICL_U_IMC           0x8a02
57 #define PCI_DEVICE_ID_INTEL_ICL_U2_IMC          0x8a12
58 #define PCI_DEVICE_ID_INTEL_TGL_U1_IMC          0x9a02
59 #define PCI_DEVICE_ID_INTEL_TGL_U2_IMC          0x9a04
60 #define PCI_DEVICE_ID_INTEL_TGL_U3_IMC          0x9a12
61 #define PCI_DEVICE_ID_INTEL_TGL_U4_IMC          0x9a14
62 #define PCI_DEVICE_ID_INTEL_TGL_H_IMC           0x9a36
63 #define PCI_DEVICE_ID_INTEL_RKL_1_IMC           0x4c43
64 #define PCI_DEVICE_ID_INTEL_RKL_2_IMC           0x4c53
65 #define PCI_DEVICE_ID_INTEL_ADL_1_IMC           0x4660
66 #define PCI_DEVICE_ID_INTEL_ADL_2_IMC           0x4641
67
68 /* SNB event control */
69 #define SNB_UNC_CTL_EV_SEL_MASK                 0x000000ff
70 #define SNB_UNC_CTL_UMASK_MASK                  0x0000ff00
71 #define SNB_UNC_CTL_EDGE_DET                    (1 << 18)
72 #define SNB_UNC_CTL_EN                          (1 << 22)
73 #define SNB_UNC_CTL_INVERT                      (1 << 23)
74 #define SNB_UNC_CTL_CMASK_MASK                  0x1f000000
75 #define NHM_UNC_CTL_CMASK_MASK                  0xff000000
76 #define NHM_UNC_FIXED_CTR_CTL_EN                (1 << 0)
77
78 #define SNB_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
79                                                  SNB_UNC_CTL_UMASK_MASK | \
80                                                  SNB_UNC_CTL_EDGE_DET | \
81                                                  SNB_UNC_CTL_INVERT | \
82                                                  SNB_UNC_CTL_CMASK_MASK)
83
84 #define NHM_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
85                                                  SNB_UNC_CTL_UMASK_MASK | \
86                                                  SNB_UNC_CTL_EDGE_DET | \
87                                                  SNB_UNC_CTL_INVERT | \
88                                                  NHM_UNC_CTL_CMASK_MASK)
89
90 /* SNB global control register */
91 #define SNB_UNC_PERF_GLOBAL_CTL                 0x391
92 #define SNB_UNC_FIXED_CTR_CTRL                  0x394
93 #define SNB_UNC_FIXED_CTR                       0x395
94
95 /* SNB uncore global control */
96 #define SNB_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 4) - 1)
97 #define SNB_UNC_GLOBAL_CTL_EN                   (1 << 29)
98
99 /* SNB Cbo register */
100 #define SNB_UNC_CBO_0_PERFEVTSEL0               0x700
101 #define SNB_UNC_CBO_0_PER_CTR0                  0x706
102 #define SNB_UNC_CBO_MSR_OFFSET                  0x10
103
104 /* SNB ARB register */
105 #define SNB_UNC_ARB_PER_CTR0                    0x3b0
106 #define SNB_UNC_ARB_PERFEVTSEL0                 0x3b2
107 #define SNB_UNC_ARB_MSR_OFFSET                  0x10
108
109 /* NHM global control register */
110 #define NHM_UNC_PERF_GLOBAL_CTL                 0x391
111 #define NHM_UNC_FIXED_CTR                       0x394
112 #define NHM_UNC_FIXED_CTR_CTRL                  0x395
113
114 /* NHM uncore global control */
115 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL            ((1ULL << 8) - 1)
116 #define NHM_UNC_GLOBAL_CTL_EN_FC                (1ULL << 32)
117
118 /* NHM uncore register */
119 #define NHM_UNC_PERFEVTSEL0                     0x3c0
120 #define NHM_UNC_UNCORE_PMC0                     0x3b0
121
122 /* SKL uncore global control */
123 #define SKL_UNC_PERF_GLOBAL_CTL                 0xe01
124 #define SKL_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 5) - 1)
125
126 /* ICL Cbo register */
127 #define ICL_UNC_CBO_CONFIG                      0x396
128 #define ICL_UNC_NUM_CBO_MASK                    0xf
129 #define ICL_UNC_CBO_0_PER_CTR0                  0x702
130 #define ICL_UNC_CBO_MSR_OFFSET                  0x8
131
132 /* ICL ARB register */
133 #define ICL_UNC_ARB_PER_CTR                     0x3b1
134 #define ICL_UNC_ARB_PERFEVTSEL                  0x3b3
135
136 /* ADL uncore global control */
137 #define ADL_UNC_PERF_GLOBAL_CTL                 0x2ff0
138 #define ADL_UNC_FIXED_CTR_CTRL                  0x2fde
139 #define ADL_UNC_FIXED_CTR                       0x2fdf
140
141 /* ADL Cbo register */
142 #define ADL_UNC_CBO_0_PER_CTR0                  0x2002
143 #define ADL_UNC_CBO_0_PERFEVTSEL0               0x2000
144 #define ADL_UNC_CTL_THRESHOLD                   0x3f000000
145 #define ADL_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
146                                                  SNB_UNC_CTL_UMASK_MASK | \
147                                                  SNB_UNC_CTL_EDGE_DET | \
148                                                  SNB_UNC_CTL_INVERT | \
149                                                  ADL_UNC_CTL_THRESHOLD)
150
151 /* ADL ARB register */
152 #define ADL_UNC_ARB_PER_CTR0                    0x2FD2
153 #define ADL_UNC_ARB_PERFEVTSEL0                 0x2FD0
154 #define ADL_UNC_ARB_MSR_OFFSET                  0x8
155
156 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
157 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
158 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
159 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
160 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
161 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
162 DEFINE_UNCORE_FORMAT_ATTR(threshold, threshold, "config:24-29");
163
164 /* Sandy Bridge uncore support */
165 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
166 {
167         struct hw_perf_event *hwc = &event->hw;
168
169         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
170                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
171         else
172                 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
173 }
174
175 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
176 {
177         wrmsrl(event->hw.config_base, 0);
178 }
179
180 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
181 {
182         if (box->pmu->pmu_idx == 0) {
183                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
184                         SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
185         }
186 }
187
188 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
189 {
190         wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
191                 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
192 }
193
194 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
195 {
196         if (box->pmu->pmu_idx == 0)
197                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
198 }
199
200 static struct uncore_event_desc snb_uncore_events[] = {
201         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
202         { /* end: all zeroes */ },
203 };
204
205 static struct attribute *snb_uncore_formats_attr[] = {
206         &format_attr_event.attr,
207         &format_attr_umask.attr,
208         &format_attr_edge.attr,
209         &format_attr_inv.attr,
210         &format_attr_cmask5.attr,
211         NULL,
212 };
213
214 static const struct attribute_group snb_uncore_format_group = {
215         .name           = "format",
216         .attrs          = snb_uncore_formats_attr,
217 };
218
219 static struct intel_uncore_ops snb_uncore_msr_ops = {
220         .init_box       = snb_uncore_msr_init_box,
221         .enable_box     = snb_uncore_msr_enable_box,
222         .exit_box       = snb_uncore_msr_exit_box,
223         .disable_event  = snb_uncore_msr_disable_event,
224         .enable_event   = snb_uncore_msr_enable_event,
225         .read_counter   = uncore_msr_read_counter,
226 };
227
228 static struct event_constraint snb_uncore_arb_constraints[] = {
229         UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
230         UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
231         EVENT_CONSTRAINT_END
232 };
233
234 static struct intel_uncore_type snb_uncore_cbox = {
235         .name           = "cbox",
236         .num_counters   = 2,
237         .num_boxes      = 4,
238         .perf_ctr_bits  = 44,
239         .fixed_ctr_bits = 48,
240         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
241         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
242         .fixed_ctr      = SNB_UNC_FIXED_CTR,
243         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
244         .single_fixed   = 1,
245         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
246         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
247         .ops            = &snb_uncore_msr_ops,
248         .format_group   = &snb_uncore_format_group,
249         .event_descs    = snb_uncore_events,
250 };
251
252 static struct intel_uncore_type snb_uncore_arb = {
253         .name           = "arb",
254         .num_counters   = 2,
255         .num_boxes      = 1,
256         .perf_ctr_bits  = 44,
257         .perf_ctr       = SNB_UNC_ARB_PER_CTR0,
258         .event_ctl      = SNB_UNC_ARB_PERFEVTSEL0,
259         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
260         .msr_offset     = SNB_UNC_ARB_MSR_OFFSET,
261         .constraints    = snb_uncore_arb_constraints,
262         .ops            = &snb_uncore_msr_ops,
263         .format_group   = &snb_uncore_format_group,
264 };
265
266 static struct intel_uncore_type *snb_msr_uncores[] = {
267         &snb_uncore_cbox,
268         &snb_uncore_arb,
269         NULL,
270 };
271
272 void snb_uncore_cpu_init(void)
273 {
274         uncore_msr_uncores = snb_msr_uncores;
275         if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
276                 snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
277 }
278
279 static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
280 {
281         if (box->pmu->pmu_idx == 0) {
282                 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
283                         SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
284         }
285
286         /* The 8th CBOX has different MSR space */
287         if (box->pmu->pmu_idx == 7)
288                 __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags);
289 }
290
291 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
292 {
293         wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
294                 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
295 }
296
297 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
298 {
299         if (box->pmu->pmu_idx == 0)
300                 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
301 }
302
303 static struct intel_uncore_ops skl_uncore_msr_ops = {
304         .init_box       = skl_uncore_msr_init_box,
305         .enable_box     = skl_uncore_msr_enable_box,
306         .exit_box       = skl_uncore_msr_exit_box,
307         .disable_event  = snb_uncore_msr_disable_event,
308         .enable_event   = snb_uncore_msr_enable_event,
309         .read_counter   = uncore_msr_read_counter,
310 };
311
312 static struct intel_uncore_type skl_uncore_cbox = {
313         .name           = "cbox",
314         .num_counters   = 4,
315         .num_boxes      = 8,
316         .perf_ctr_bits  = 44,
317         .fixed_ctr_bits = 48,
318         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
319         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
320         .fixed_ctr      = SNB_UNC_FIXED_CTR,
321         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
322         .single_fixed   = 1,
323         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
324         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
325         .ops            = &skl_uncore_msr_ops,
326         .format_group   = &snb_uncore_format_group,
327         .event_descs    = snb_uncore_events,
328 };
329
330 static struct intel_uncore_type *skl_msr_uncores[] = {
331         &skl_uncore_cbox,
332         &snb_uncore_arb,
333         NULL,
334 };
335
336 void skl_uncore_cpu_init(void)
337 {
338         uncore_msr_uncores = skl_msr_uncores;
339         if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
340                 skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
341         snb_uncore_arb.ops = &skl_uncore_msr_ops;
342 }
343
344 static struct intel_uncore_ops icl_uncore_msr_ops = {
345         .disable_event  = snb_uncore_msr_disable_event,
346         .enable_event   = snb_uncore_msr_enable_event,
347         .read_counter   = uncore_msr_read_counter,
348 };
349
350 static struct intel_uncore_type icl_uncore_cbox = {
351         .name           = "cbox",
352         .num_counters   = 2,
353         .perf_ctr_bits  = 44,
354         .perf_ctr       = ICL_UNC_CBO_0_PER_CTR0,
355         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
356         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
357         .msr_offset     = ICL_UNC_CBO_MSR_OFFSET,
358         .ops            = &icl_uncore_msr_ops,
359         .format_group   = &snb_uncore_format_group,
360 };
361
362 static struct uncore_event_desc icl_uncore_events[] = {
363         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"),
364         { /* end: all zeroes */ },
365 };
366
367 static struct attribute *icl_uncore_clock_formats_attr[] = {
368         &format_attr_event.attr,
369         NULL,
370 };
371
372 static struct attribute_group icl_uncore_clock_format_group = {
373         .name = "format",
374         .attrs = icl_uncore_clock_formats_attr,
375 };
376
377 static struct intel_uncore_type icl_uncore_clockbox = {
378         .name           = "clock",
379         .num_counters   = 1,
380         .num_boxes      = 1,
381         .fixed_ctr_bits = 48,
382         .fixed_ctr      = SNB_UNC_FIXED_CTR,
383         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
384         .single_fixed   = 1,
385         .event_mask     = SNB_UNC_CTL_EV_SEL_MASK,
386         .format_group   = &icl_uncore_clock_format_group,
387         .ops            = &icl_uncore_msr_ops,
388         .event_descs    = icl_uncore_events,
389 };
390
391 static struct intel_uncore_type icl_uncore_arb = {
392         .name           = "arb",
393         .num_counters   = 1,
394         .num_boxes      = 1,
395         .perf_ctr_bits  = 44,
396         .perf_ctr       = ICL_UNC_ARB_PER_CTR,
397         .event_ctl      = ICL_UNC_ARB_PERFEVTSEL,
398         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
399         .ops            = &icl_uncore_msr_ops,
400         .format_group   = &snb_uncore_format_group,
401 };
402
403 static struct intel_uncore_type *icl_msr_uncores[] = {
404         &icl_uncore_cbox,
405         &icl_uncore_arb,
406         &icl_uncore_clockbox,
407         NULL,
408 };
409
410 static int icl_get_cbox_num(void)
411 {
412         u64 num_boxes;
413
414         rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes);
415
416         return num_boxes & ICL_UNC_NUM_CBO_MASK;
417 }
418
419 void icl_uncore_cpu_init(void)
420 {
421         uncore_msr_uncores = icl_msr_uncores;
422         icl_uncore_cbox.num_boxes = icl_get_cbox_num();
423 }
424
425 static struct intel_uncore_type *tgl_msr_uncores[] = {
426         &icl_uncore_cbox,
427         &snb_uncore_arb,
428         &icl_uncore_clockbox,
429         NULL,
430 };
431
432 static void rkl_uncore_msr_init_box(struct intel_uncore_box *box)
433 {
434         if (box->pmu->pmu_idx == 0)
435                 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
436 }
437
438 void tgl_uncore_cpu_init(void)
439 {
440         uncore_msr_uncores = tgl_msr_uncores;
441         icl_uncore_cbox.num_boxes = icl_get_cbox_num();
442         icl_uncore_cbox.ops = &skl_uncore_msr_ops;
443         icl_uncore_clockbox.ops = &skl_uncore_msr_ops;
444         snb_uncore_arb.ops = &skl_uncore_msr_ops;
445         skl_uncore_msr_ops.init_box = rkl_uncore_msr_init_box;
446 }
447
448 static void adl_uncore_msr_init_box(struct intel_uncore_box *box)
449 {
450         if (box->pmu->pmu_idx == 0)
451                 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
452 }
453
454 static void adl_uncore_msr_enable_box(struct intel_uncore_box *box)
455 {
456         wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
457 }
458
459 static void adl_uncore_msr_disable_box(struct intel_uncore_box *box)
460 {
461         if (box->pmu->pmu_idx == 0)
462                 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0);
463 }
464
465 static void adl_uncore_msr_exit_box(struct intel_uncore_box *box)
466 {
467         if (box->pmu->pmu_idx == 0)
468                 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0);
469 }
470
471 static struct intel_uncore_ops adl_uncore_msr_ops = {
472         .init_box       = adl_uncore_msr_init_box,
473         .enable_box     = adl_uncore_msr_enable_box,
474         .disable_box    = adl_uncore_msr_disable_box,
475         .exit_box       = adl_uncore_msr_exit_box,
476         .disable_event  = snb_uncore_msr_disable_event,
477         .enable_event   = snb_uncore_msr_enable_event,
478         .read_counter   = uncore_msr_read_counter,
479 };
480
481 static struct attribute *adl_uncore_formats_attr[] = {
482         &format_attr_event.attr,
483         &format_attr_umask.attr,
484         &format_attr_edge.attr,
485         &format_attr_inv.attr,
486         &format_attr_threshold.attr,
487         NULL,
488 };
489
490 static const struct attribute_group adl_uncore_format_group = {
491         .name           = "format",
492         .attrs          = adl_uncore_formats_attr,
493 };
494
495 static struct intel_uncore_type adl_uncore_cbox = {
496         .name           = "cbox",
497         .num_counters   = 2,
498         .perf_ctr_bits  = 44,
499         .perf_ctr       = ADL_UNC_CBO_0_PER_CTR0,
500         .event_ctl      = ADL_UNC_CBO_0_PERFEVTSEL0,
501         .event_mask     = ADL_UNC_RAW_EVENT_MASK,
502         .msr_offset     = ICL_UNC_CBO_MSR_OFFSET,
503         .ops            = &adl_uncore_msr_ops,
504         .format_group   = &adl_uncore_format_group,
505 };
506
507 static struct intel_uncore_type adl_uncore_arb = {
508         .name           = "arb",
509         .num_counters   = 2,
510         .num_boxes      = 2,
511         .perf_ctr_bits  = 44,
512         .perf_ctr       = ADL_UNC_ARB_PER_CTR0,
513         .event_ctl      = ADL_UNC_ARB_PERFEVTSEL0,
514         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
515         .msr_offset     = ADL_UNC_ARB_MSR_OFFSET,
516         .constraints    = snb_uncore_arb_constraints,
517         .ops            = &adl_uncore_msr_ops,
518         .format_group   = &snb_uncore_format_group,
519 };
520
521 static struct intel_uncore_type adl_uncore_clockbox = {
522         .name           = "clock",
523         .num_counters   = 1,
524         .num_boxes      = 1,
525         .fixed_ctr_bits = 48,
526         .fixed_ctr      = ADL_UNC_FIXED_CTR,
527         .fixed_ctl      = ADL_UNC_FIXED_CTR_CTRL,
528         .single_fixed   = 1,
529         .event_mask     = SNB_UNC_CTL_EV_SEL_MASK,
530         .format_group   = &icl_uncore_clock_format_group,
531         .ops            = &adl_uncore_msr_ops,
532         .event_descs    = icl_uncore_events,
533 };
534
535 static struct intel_uncore_type *adl_msr_uncores[] = {
536         &adl_uncore_cbox,
537         &adl_uncore_arb,
538         &adl_uncore_clockbox,
539         NULL,
540 };
541
542 void adl_uncore_cpu_init(void)
543 {
544         adl_uncore_cbox.num_boxes = icl_get_cbox_num();
545         uncore_msr_uncores = adl_msr_uncores;
546 }
547
548 enum {
549         SNB_PCI_UNCORE_IMC,
550 };
551
552 static struct uncore_event_desc snb_uncore_imc_events[] = {
553         INTEL_UNCORE_EVENT_DESC(data_reads,  "event=0x01"),
554         INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
555         INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
556
557         INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
558         INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
559         INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
560
561         INTEL_UNCORE_EVENT_DESC(gt_requests, "event=0x03"),
562         INTEL_UNCORE_EVENT_DESC(gt_requests.scale, "6.103515625e-5"),
563         INTEL_UNCORE_EVENT_DESC(gt_requests.unit, "MiB"),
564
565         INTEL_UNCORE_EVENT_DESC(ia_requests, "event=0x04"),
566         INTEL_UNCORE_EVENT_DESC(ia_requests.scale, "6.103515625e-5"),
567         INTEL_UNCORE_EVENT_DESC(ia_requests.unit, "MiB"),
568
569         INTEL_UNCORE_EVENT_DESC(io_requests, "event=0x05"),
570         INTEL_UNCORE_EVENT_DESC(io_requests.scale, "6.103515625e-5"),
571         INTEL_UNCORE_EVENT_DESC(io_requests.unit, "MiB"),
572
573         { /* end: all zeroes */ },
574 };
575
576 #define SNB_UNCORE_PCI_IMC_EVENT_MASK           0xff
577 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET           0x48
578
579 /* page size multiple covering all config regs */
580 #define SNB_UNCORE_PCI_IMC_MAP_SIZE             0x6000
581
582 #define SNB_UNCORE_PCI_IMC_DATA_READS           0x1
583 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE      0x5050
584 #define SNB_UNCORE_PCI_IMC_DATA_WRITES          0x2
585 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE     0x5054
586 #define SNB_UNCORE_PCI_IMC_CTR_BASE             SNB_UNCORE_PCI_IMC_DATA_READS_BASE
587
588 /* BW break down- legacy counters */
589 #define SNB_UNCORE_PCI_IMC_GT_REQUESTS          0x3
590 #define SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE     0x5040
591 #define SNB_UNCORE_PCI_IMC_IA_REQUESTS          0x4
592 #define SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE     0x5044
593 #define SNB_UNCORE_PCI_IMC_IO_REQUESTS          0x5
594 #define SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE     0x5048
595
596 enum perf_snb_uncore_imc_freerunning_types {
597         SNB_PCI_UNCORE_IMC_DATA_READS           = 0,
598         SNB_PCI_UNCORE_IMC_DATA_WRITES,
599         SNB_PCI_UNCORE_IMC_GT_REQUESTS,
600         SNB_PCI_UNCORE_IMC_IA_REQUESTS,
601         SNB_PCI_UNCORE_IMC_IO_REQUESTS,
602
603         SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
604 };
605
606 static struct freerunning_counters snb_uncore_imc_freerunning[] = {
607         [SNB_PCI_UNCORE_IMC_DATA_READS]         = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
608                                                         0x0, 0x0, 1, 32 },
609         [SNB_PCI_UNCORE_IMC_DATA_WRITES]        = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
610                                                         0x0, 0x0, 1, 32 },
611         [SNB_PCI_UNCORE_IMC_GT_REQUESTS]        = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE,
612                                                         0x0, 0x0, 1, 32 },
613         [SNB_PCI_UNCORE_IMC_IA_REQUESTS]        = { SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE,
614                                                         0x0, 0x0, 1, 32 },
615         [SNB_PCI_UNCORE_IMC_IO_REQUESTS]        = { SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE,
616                                                         0x0, 0x0, 1, 32 },
617 };
618
619 static struct attribute *snb_uncore_imc_formats_attr[] = {
620         &format_attr_event.attr,
621         NULL,
622 };
623
624 static const struct attribute_group snb_uncore_imc_format_group = {
625         .name = "format",
626         .attrs = snb_uncore_imc_formats_attr,
627 };
628
629 static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
630 {
631         struct intel_uncore_type *type = box->pmu->type;
632         struct pci_dev *pdev = box->pci_dev;
633         int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
634         resource_size_t addr;
635         u32 pci_dword;
636
637         pci_read_config_dword(pdev, where, &pci_dword);
638         addr = pci_dword;
639
640 #ifdef CONFIG_PHYS_ADDR_T_64BIT
641         pci_read_config_dword(pdev, where + 4, &pci_dword);
642         addr |= ((resource_size_t)pci_dword << 32);
643 #endif
644
645         addr &= ~(PAGE_SIZE - 1);
646
647         box->io_addr = ioremap(addr, type->mmio_map_size);
648         if (!box->io_addr)
649                 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
650
651         box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
652 }
653
654 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
655 {}
656
657 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
658 {}
659
660 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
661 {}
662
663 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
664 {}
665
666 /*
667  * Keep the custom event_init() function compatible with old event
668  * encoding for free running counters.
669  */
670 static int snb_uncore_imc_event_init(struct perf_event *event)
671 {
672         struct intel_uncore_pmu *pmu;
673         struct intel_uncore_box *box;
674         struct hw_perf_event *hwc = &event->hw;
675         u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
676         int idx, base;
677
678         if (event->attr.type != event->pmu->type)
679                 return -ENOENT;
680
681         pmu = uncore_event_to_pmu(event);
682         /* no device found for this pmu */
683         if (pmu->func_id < 0)
684                 return -ENOENT;
685
686         /* Sampling not supported yet */
687         if (hwc->sample_period)
688                 return -EINVAL;
689
690         /* unsupported modes and filters */
691         if (event->attr.sample_period) /* no sampling */
692                 return -EINVAL;
693
694         /*
695          * Place all uncore events for a particular physical package
696          * onto a single cpu
697          */
698         if (event->cpu < 0)
699                 return -EINVAL;
700
701         /* check only supported bits are set */
702         if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
703                 return -EINVAL;
704
705         box = uncore_pmu_to_box(pmu, event->cpu);
706         if (!box || box->cpu < 0)
707                 return -EINVAL;
708
709         event->cpu = box->cpu;
710         event->pmu_private = box;
711
712         event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
713
714         event->hw.idx = -1;
715         event->hw.last_tag = ~0ULL;
716         event->hw.extra_reg.idx = EXTRA_REG_NONE;
717         event->hw.branch_reg.idx = EXTRA_REG_NONE;
718         /*
719          * check event is known (whitelist, determines counter)
720          */
721         switch (cfg) {
722         case SNB_UNCORE_PCI_IMC_DATA_READS:
723                 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
724                 idx = UNCORE_PMC_IDX_FREERUNNING;
725                 break;
726         case SNB_UNCORE_PCI_IMC_DATA_WRITES:
727                 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
728                 idx = UNCORE_PMC_IDX_FREERUNNING;
729                 break;
730         case SNB_UNCORE_PCI_IMC_GT_REQUESTS:
731                 base = SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE;
732                 idx = UNCORE_PMC_IDX_FREERUNNING;
733                 break;
734         case SNB_UNCORE_PCI_IMC_IA_REQUESTS:
735                 base = SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE;
736                 idx = UNCORE_PMC_IDX_FREERUNNING;
737                 break;
738         case SNB_UNCORE_PCI_IMC_IO_REQUESTS:
739                 base = SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE;
740                 idx = UNCORE_PMC_IDX_FREERUNNING;
741                 break;
742         default:
743                 return -EINVAL;
744         }
745
746         /* must be done before validate_group */
747         event->hw.event_base = base;
748         event->hw.idx = idx;
749
750         /* Convert to standard encoding format for freerunning counters */
751         event->hw.config = ((cfg - 1) << 8) | 0x10ff;
752
753         /* no group validation needed, we have free running counters */
754
755         return 0;
756 }
757
758 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
759 {
760         return 0;
761 }
762
763 int snb_pci2phy_map_init(int devid)
764 {
765         struct pci_dev *dev = NULL;
766         struct pci2phy_map *map;
767         int bus, segment;
768
769         dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
770         if (!dev)
771                 return -ENOTTY;
772
773         bus = dev->bus->number;
774         segment = pci_domain_nr(dev->bus);
775
776         raw_spin_lock(&pci2phy_map_lock);
777         map = __find_pci2phy_map(segment);
778         if (!map) {
779                 raw_spin_unlock(&pci2phy_map_lock);
780                 pci_dev_put(dev);
781                 return -ENOMEM;
782         }
783         map->pbus_to_dieid[bus] = 0;
784         raw_spin_unlock(&pci2phy_map_lock);
785
786         pci_dev_put(dev);
787
788         return 0;
789 }
790
791 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
792 {
793         struct hw_perf_event *hwc = &event->hw;
794
795         /*
796          * SNB IMC counters are 32-bit and are laid out back to back
797          * in MMIO space. Therefore we must use a 32-bit accessor function
798          * using readq() from uncore_mmio_read_counter() causes problems
799          * because it is reading 64-bit at a time. This is okay for the
800          * uncore_perf_event_update() function because it drops the upper
801          * 32-bits but not okay for plain uncore_read_counter() as invoked
802          * in uncore_pmu_event_start().
803          */
804         return (u64)readl(box->io_addr + hwc->event_base);
805 }
806
807 static struct pmu snb_uncore_imc_pmu = {
808         .task_ctx_nr    = perf_invalid_context,
809         .event_init     = snb_uncore_imc_event_init,
810         .add            = uncore_pmu_event_add,
811         .del            = uncore_pmu_event_del,
812         .start          = uncore_pmu_event_start,
813         .stop           = uncore_pmu_event_stop,
814         .read           = uncore_pmu_event_read,
815         .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
816 };
817
818 static struct intel_uncore_ops snb_uncore_imc_ops = {
819         .init_box       = snb_uncore_imc_init_box,
820         .exit_box       = uncore_mmio_exit_box,
821         .enable_box     = snb_uncore_imc_enable_box,
822         .disable_box    = snb_uncore_imc_disable_box,
823         .disable_event  = snb_uncore_imc_disable_event,
824         .enable_event   = snb_uncore_imc_enable_event,
825         .hw_config      = snb_uncore_imc_hw_config,
826         .read_counter   = snb_uncore_imc_read_counter,
827 };
828
829 static struct intel_uncore_type snb_uncore_imc = {
830         .name           = "imc",
831         .num_counters   = 5,
832         .num_boxes      = 1,
833         .num_freerunning_types  = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
834         .mmio_map_size  = SNB_UNCORE_PCI_IMC_MAP_SIZE,
835         .freerunning    = snb_uncore_imc_freerunning,
836         .event_descs    = snb_uncore_imc_events,
837         .format_group   = &snb_uncore_imc_format_group,
838         .ops            = &snb_uncore_imc_ops,
839         .pmu            = &snb_uncore_imc_pmu,
840 };
841
842 static struct intel_uncore_type *snb_pci_uncores[] = {
843         [SNB_PCI_UNCORE_IMC]    = &snb_uncore_imc,
844         NULL,
845 };
846
847 static const struct pci_device_id snb_uncore_pci_ids[] = {
848         { /* IMC */
849                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
850                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
851         },
852         { /* end: all zeroes */ },
853 };
854
855 static const struct pci_device_id ivb_uncore_pci_ids[] = {
856         { /* IMC */
857                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
858                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
859         },
860         { /* IMC */
861                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC),
862                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
863         },
864         { /* end: all zeroes */ },
865 };
866
867 static const struct pci_device_id hsw_uncore_pci_ids[] = {
868         { /* IMC */
869                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
870                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
871         },
872         { /* IMC */
873                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
874                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
875         },
876         { /* end: all zeroes */ },
877 };
878
879 static const struct pci_device_id bdw_uncore_pci_ids[] = {
880         { /* IMC */
881                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC),
882                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
883         },
884         { /* end: all zeroes */ },
885 };
886
887 static const struct pci_device_id skl_uncore_pci_ids[] = {
888         { /* IMC */
889                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
890                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
891         },
892         { /* IMC */
893                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
894                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
895         },
896         { /* IMC */
897                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
898                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
899         },
900         { /* IMC */
901                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
902                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
903         },
904         { /* IMC */
905                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
906                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
907         },
908         { /* IMC */
909                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
910                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
911         },
912         { /* IMC */
913                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_E3_IMC),
914                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
915         },
916         { /* IMC */
917                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
918                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
919         },
920         { /* IMC */
921                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC),
922                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
923         },
924         { /* IMC */
925                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC),
926                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
927         },
928         { /* IMC */
929                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC),
930                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
931         },
932         { /* IMC */
933                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
934                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
935         },
936         { /* IMC */
937                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_HQ_IMC),
938                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
939         },
940         { /* IMC */
941                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_WQ_IMC),
942                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
943         },
944         { /* IMC */
945                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
946                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
947         },
948         { /* IMC */
949                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC),
950                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
951         },
952         { /* IMC */
953                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC),
954                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
955         },
956         { /* IMC */
957                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC),
958                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
959         },
960         { /* IMC */
961                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC),
962                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
963         },
964         { /* IMC */
965                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC),
966                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
967         },
968         { /* IMC */
969                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC),
970                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
971         },
972         { /* IMC */
973                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC),
974                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
975         },
976         { /* IMC */
977                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC),
978                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
979         },
980         { /* IMC */
981                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC),
982                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
983         },
984         { /* IMC */
985                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC),
986                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
987         },
988         { /* IMC */
989                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC),
990                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
991         },
992         { /* IMC */
993                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC),
994                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
995         },
996         { /* IMC */
997                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
998                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
999         },
1000         { /* IMC */
1001                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YD_IMC),
1002                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1003         },
1004         { /* IMC */
1005                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YQ_IMC),
1006                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1007         },
1008         { /* IMC */
1009                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UQ_IMC),
1010                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1011         },
1012         { /* IMC */
1013                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC),
1014                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1015         },
1016         { /* IMC */
1017                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UD_IMC),
1018                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1019         },
1020         { /* IMC */
1021                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H1_IMC),
1022                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1023         },
1024         { /* IMC */
1025                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H2_IMC),
1026                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1027         },
1028         { /* IMC */
1029                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H3_IMC),
1030                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1031         },
1032         { /* IMC */
1033                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U1_IMC),
1034                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1035         },
1036         { /* IMC */
1037                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U2_IMC),
1038                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1039         },
1040         { /* IMC */
1041                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U3_IMC),
1042                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1043         },
1044         { /* IMC */
1045                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S1_IMC),
1046                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1047         },
1048         { /* IMC */
1049                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S2_IMC),
1050                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1051         },
1052         { /* IMC */
1053                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S3_IMC),
1054                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1055         },
1056         { /* IMC */
1057                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S4_IMC),
1058                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1059         },
1060         { /* IMC */
1061                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S5_IMC),
1062                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1063         },
1064         { /* end: all zeroes */ },
1065 };
1066
1067 static const struct pci_device_id icl_uncore_pci_ids[] = {
1068         { /* IMC */
1069                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U_IMC),
1070                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1071         },
1072         { /* IMC */
1073                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U2_IMC),
1074                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1075         },
1076         { /* IMC */
1077                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RKL_1_IMC),
1078                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1079         },
1080         { /* IMC */
1081                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RKL_2_IMC),
1082                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1083         },
1084         { /* end: all zeroes */ },
1085 };
1086
1087 static struct pci_driver snb_uncore_pci_driver = {
1088         .name           = "snb_uncore",
1089         .id_table       = snb_uncore_pci_ids,
1090 };
1091
1092 static struct pci_driver ivb_uncore_pci_driver = {
1093         .name           = "ivb_uncore",
1094         .id_table       = ivb_uncore_pci_ids,
1095 };
1096
1097 static struct pci_driver hsw_uncore_pci_driver = {
1098         .name           = "hsw_uncore",
1099         .id_table       = hsw_uncore_pci_ids,
1100 };
1101
1102 static struct pci_driver bdw_uncore_pci_driver = {
1103         .name           = "bdw_uncore",
1104         .id_table       = bdw_uncore_pci_ids,
1105 };
1106
1107 static struct pci_driver skl_uncore_pci_driver = {
1108         .name           = "skl_uncore",
1109         .id_table       = skl_uncore_pci_ids,
1110 };
1111
1112 static struct pci_driver icl_uncore_pci_driver = {
1113         .name           = "icl_uncore",
1114         .id_table       = icl_uncore_pci_ids,
1115 };
1116
1117 struct imc_uncore_pci_dev {
1118         __u32 pci_id;
1119         struct pci_driver *driver;
1120 };
1121 #define IMC_DEV(a, d) \
1122         { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
1123
1124 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
1125         IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
1126         IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver),    /* 3rd Gen Core processor */
1127         IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
1128         IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
1129         IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
1130         IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
1131         IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core Y */
1132         IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core U */
1133         IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Dual Core */
1134         IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Quad Core */
1135         IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Dual Core */
1136         IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Quad Core */
1137         IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver),  /* Xeon E3 V5 Gen Core processor */
1138         IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core Y */
1139         IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U */
1140         IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U Quad Core */
1141         IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Dual Core */
1142         IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Quad Core */
1143         IMC_DEV(KBL_HQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core H Quad Core */
1144         IMC_DEV(KBL_WQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S 4 cores Work Station */
1145         IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 2 Cores */
1146         IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 4 Cores */
1147         IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 4 Cores */
1148         IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 6 Cores */
1149         IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 2 Cores Desktop */
1150         IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Desktop */
1151         IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Desktop */
1152         IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Desktop */
1153         IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Work Station */
1154         IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Work Station */
1155         IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Work Station */
1156         IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Server */
1157         IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Server */
1158         IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Server */
1159         IMC_DEV(AML_YD_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core Y Mobile Dual Core */
1160         IMC_DEV(AML_YQ_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core Y Mobile Quad Core */
1161         IMC_DEV(WHL_UQ_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core U Mobile Quad Core */
1162         IMC_DEV(WHL_4_UQ_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U Mobile Quad Core */
1163         IMC_DEV(WHL_UD_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core U Mobile Dual Core */
1164         IMC_DEV(CML_H1_IMC, &skl_uncore_pci_driver),
1165         IMC_DEV(CML_H2_IMC, &skl_uncore_pci_driver),
1166         IMC_DEV(CML_H3_IMC, &skl_uncore_pci_driver),
1167         IMC_DEV(CML_U1_IMC, &skl_uncore_pci_driver),
1168         IMC_DEV(CML_U2_IMC, &skl_uncore_pci_driver),
1169         IMC_DEV(CML_U3_IMC, &skl_uncore_pci_driver),
1170         IMC_DEV(CML_S1_IMC, &skl_uncore_pci_driver),
1171         IMC_DEV(CML_S2_IMC, &skl_uncore_pci_driver),
1172         IMC_DEV(CML_S3_IMC, &skl_uncore_pci_driver),
1173         IMC_DEV(CML_S4_IMC, &skl_uncore_pci_driver),
1174         IMC_DEV(CML_S5_IMC, &skl_uncore_pci_driver),
1175         IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver),     /* 10th Gen Core Mobile */
1176         IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver),    /* 10th Gen Core Mobile */
1177         IMC_DEV(RKL_1_IMC, &icl_uncore_pci_driver),
1178         IMC_DEV(RKL_2_IMC, &icl_uncore_pci_driver),
1179         {  /* end marker */ }
1180 };
1181
1182
1183 #define for_each_imc_pci_id(x, t) \
1184         for (x = (t); (x)->pci_id; x++)
1185
1186 static struct pci_driver *imc_uncore_find_dev(void)
1187 {
1188         const struct imc_uncore_pci_dev *p;
1189         int ret;
1190
1191         for_each_imc_pci_id(p, desktop_imc_pci_ids) {
1192                 ret = snb_pci2phy_map_init(p->pci_id);
1193                 if (ret == 0)
1194                         return p->driver;
1195         }
1196         return NULL;
1197 }
1198
1199 static int imc_uncore_pci_init(void)
1200 {
1201         struct pci_driver *imc_drv = imc_uncore_find_dev();
1202
1203         if (!imc_drv)
1204                 return -ENODEV;
1205
1206         uncore_pci_uncores = snb_pci_uncores;
1207         uncore_pci_driver = imc_drv;
1208
1209         return 0;
1210 }
1211
1212 int snb_uncore_pci_init(void)
1213 {
1214         return imc_uncore_pci_init();
1215 }
1216
1217 int ivb_uncore_pci_init(void)
1218 {
1219         return imc_uncore_pci_init();
1220 }
1221 int hsw_uncore_pci_init(void)
1222 {
1223         return imc_uncore_pci_init();
1224 }
1225
1226 int bdw_uncore_pci_init(void)
1227 {
1228         return imc_uncore_pci_init();
1229 }
1230
1231 int skl_uncore_pci_init(void)
1232 {
1233         return imc_uncore_pci_init();
1234 }
1235
1236 /* end of Sandy Bridge uncore support */
1237
1238 /* Nehalem uncore support */
1239 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
1240 {
1241         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
1242 }
1243
1244 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
1245 {
1246         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
1247 }
1248
1249 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1250 {
1251         struct hw_perf_event *hwc = &event->hw;
1252
1253         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1254                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1255         else
1256                 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
1257 }
1258
1259 static struct attribute *nhm_uncore_formats_attr[] = {
1260         &format_attr_event.attr,
1261         &format_attr_umask.attr,
1262         &format_attr_edge.attr,
1263         &format_attr_inv.attr,
1264         &format_attr_cmask8.attr,
1265         NULL,
1266 };
1267
1268 static const struct attribute_group nhm_uncore_format_group = {
1269         .name = "format",
1270         .attrs = nhm_uncore_formats_attr,
1271 };
1272
1273 static struct uncore_event_desc nhm_uncore_events[] = {
1274         INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
1275         INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
1276         INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
1277         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
1278         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
1279         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
1280         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
1281         INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
1282         INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
1283         { /* end: all zeroes */ },
1284 };
1285
1286 static struct intel_uncore_ops nhm_uncore_msr_ops = {
1287         .disable_box    = nhm_uncore_msr_disable_box,
1288         .enable_box     = nhm_uncore_msr_enable_box,
1289         .disable_event  = snb_uncore_msr_disable_event,
1290         .enable_event   = nhm_uncore_msr_enable_event,
1291         .read_counter   = uncore_msr_read_counter,
1292 };
1293
1294 static struct intel_uncore_type nhm_uncore = {
1295         .name           = "",
1296         .num_counters   = 8,
1297         .num_boxes      = 1,
1298         .perf_ctr_bits  = 48,
1299         .fixed_ctr_bits = 48,
1300         .event_ctl      = NHM_UNC_PERFEVTSEL0,
1301         .perf_ctr       = NHM_UNC_UNCORE_PMC0,
1302         .fixed_ctr      = NHM_UNC_FIXED_CTR,
1303         .fixed_ctl      = NHM_UNC_FIXED_CTR_CTRL,
1304         .event_mask     = NHM_UNC_RAW_EVENT_MASK,
1305         .event_descs    = nhm_uncore_events,
1306         .ops            = &nhm_uncore_msr_ops,
1307         .format_group   = &nhm_uncore_format_group,
1308 };
1309
1310 static struct intel_uncore_type *nhm_msr_uncores[] = {
1311         &nhm_uncore,
1312         NULL,
1313 };
1314
1315 void nhm_uncore_cpu_init(void)
1316 {
1317         uncore_msr_uncores = nhm_msr_uncores;
1318 }
1319
1320 /* end of Nehalem uncore support */
1321
1322 /* Tiger Lake MMIO uncore support */
1323
1324 static const struct pci_device_id tgl_uncore_pci_ids[] = {
1325         { /* IMC */
1326                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U1_IMC),
1327                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1328         },
1329         { /* IMC */
1330                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U2_IMC),
1331                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1332         },
1333         { /* IMC */
1334                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U3_IMC),
1335                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1336         },
1337         { /* IMC */
1338                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U4_IMC),
1339                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1340         },
1341         { /* IMC */
1342                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_H_IMC),
1343                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1344         },
1345         { /* IMC */
1346                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_1_IMC),
1347                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1348         },
1349         { /* IMC */
1350                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_2_IMC),
1351                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1352         },
1353         { /* end: all zeroes */ }
1354 };
1355
1356 enum perf_tgl_uncore_imc_freerunning_types {
1357         TGL_MMIO_UNCORE_IMC_DATA_TOTAL,
1358         TGL_MMIO_UNCORE_IMC_DATA_READ,
1359         TGL_MMIO_UNCORE_IMC_DATA_WRITE,
1360         TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
1361 };
1362
1363 static struct freerunning_counters tgl_l_uncore_imc_freerunning[] = {
1364         [TGL_MMIO_UNCORE_IMC_DATA_TOTAL]        = { 0x5040, 0x0, 0x0, 1, 64 },
1365         [TGL_MMIO_UNCORE_IMC_DATA_READ]         = { 0x5058, 0x0, 0x0, 1, 64 },
1366         [TGL_MMIO_UNCORE_IMC_DATA_WRITE]        = { 0x50A0, 0x0, 0x0, 1, 64 },
1367 };
1368
1369 static struct freerunning_counters tgl_uncore_imc_freerunning[] = {
1370         [TGL_MMIO_UNCORE_IMC_DATA_TOTAL]        = { 0xd840, 0x0, 0x0, 1, 64 },
1371         [TGL_MMIO_UNCORE_IMC_DATA_READ]         = { 0xd858, 0x0, 0x0, 1, 64 },
1372         [TGL_MMIO_UNCORE_IMC_DATA_WRITE]        = { 0xd8A0, 0x0, 0x0, 1, 64 },
1373 };
1374
1375 static struct uncore_event_desc tgl_uncore_imc_events[] = {
1376         INTEL_UNCORE_EVENT_DESC(data_total,         "event=0xff,umask=0x10"),
1377         INTEL_UNCORE_EVENT_DESC(data_total.scale,   "6.103515625e-5"),
1378         INTEL_UNCORE_EVENT_DESC(data_total.unit,    "MiB"),
1379
1380         INTEL_UNCORE_EVENT_DESC(data_read,         "event=0xff,umask=0x20"),
1381         INTEL_UNCORE_EVENT_DESC(data_read.scale,   "6.103515625e-5"),
1382         INTEL_UNCORE_EVENT_DESC(data_read.unit,    "MiB"),
1383
1384         INTEL_UNCORE_EVENT_DESC(data_write,        "event=0xff,umask=0x30"),
1385         INTEL_UNCORE_EVENT_DESC(data_write.scale,  "6.103515625e-5"),
1386         INTEL_UNCORE_EVENT_DESC(data_write.unit,   "MiB"),
1387
1388         { /* end: all zeroes */ }
1389 };
1390
1391 static struct pci_dev *tgl_uncore_get_mc_dev(void)
1392 {
1393         const struct pci_device_id *ids = tgl_uncore_pci_ids;
1394         struct pci_dev *mc_dev = NULL;
1395
1396         while (ids && ids->vendor) {
1397                 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, ids->device, NULL);
1398                 if (mc_dev)
1399                         return mc_dev;
1400                 ids++;
1401         }
1402
1403         return mc_dev;
1404 }
1405
1406 #define TGL_UNCORE_MMIO_IMC_MEM_OFFSET          0x10000
1407 #define TGL_UNCORE_PCI_IMC_MAP_SIZE             0xe000
1408
1409 static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
1410 {
1411         struct pci_dev *pdev = tgl_uncore_get_mc_dev();
1412         struct intel_uncore_pmu *pmu = box->pmu;
1413         struct intel_uncore_type *type = pmu->type;
1414         resource_size_t addr;
1415         u32 mch_bar;
1416
1417         if (!pdev) {
1418                 pr_warn("perf uncore: Cannot find matched IMC device.\n");
1419                 return;
1420         }
1421
1422         pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET, &mch_bar);
1423         /* MCHBAR is disabled */
1424         if (!(mch_bar & BIT(0))) {
1425                 pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n");
1426                 pci_dev_put(pdev);
1427                 return;
1428         }
1429         mch_bar &= ~BIT(0);
1430         addr = (resource_size_t)(mch_bar + TGL_UNCORE_MMIO_IMC_MEM_OFFSET * pmu->pmu_idx);
1431
1432 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1433         pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET + 4, &mch_bar);
1434         addr |= ((resource_size_t)mch_bar << 32);
1435 #endif
1436
1437         box->io_addr = ioremap(addr, type->mmio_map_size);
1438         if (!box->io_addr)
1439                 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
1440
1441         pci_dev_put(pdev);
1442 }
1443
1444 static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = {
1445         .init_box       = tgl_uncore_imc_freerunning_init_box,
1446         .exit_box       = uncore_mmio_exit_box,
1447         .read_counter   = uncore_mmio_read_counter,
1448         .hw_config      = uncore_freerunning_hw_config,
1449 };
1450
1451 static struct attribute *tgl_uncore_imc_formats_attr[] = {
1452         &format_attr_event.attr,
1453         &format_attr_umask.attr,
1454         NULL
1455 };
1456
1457 static const struct attribute_group tgl_uncore_imc_format_group = {
1458         .name = "format",
1459         .attrs = tgl_uncore_imc_formats_attr,
1460 };
1461
1462 static struct intel_uncore_type tgl_uncore_imc_free_running = {
1463         .name                   = "imc_free_running",
1464         .num_counters           = 3,
1465         .num_boxes              = 2,
1466         .num_freerunning_types  = TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
1467         .mmio_map_size          = TGL_UNCORE_PCI_IMC_MAP_SIZE,
1468         .freerunning            = tgl_uncore_imc_freerunning,
1469         .ops                    = &tgl_uncore_imc_freerunning_ops,
1470         .event_descs            = tgl_uncore_imc_events,
1471         .format_group           = &tgl_uncore_imc_format_group,
1472 };
1473
1474 static struct intel_uncore_type *tgl_mmio_uncores[] = {
1475         &tgl_uncore_imc_free_running,
1476         NULL
1477 };
1478
1479 void tgl_l_uncore_mmio_init(void)
1480 {
1481         tgl_uncore_imc_free_running.freerunning = tgl_l_uncore_imc_freerunning;
1482         uncore_mmio_uncores = tgl_mmio_uncores;
1483 }
1484
1485 void tgl_uncore_mmio_init(void)
1486 {
1487         uncore_mmio_uncores = tgl_mmio_uncores;
1488 }
1489
1490 /* end of Tiger Lake MMIO uncore support */