GNU Linux-libre 5.4.274-gnu1
[releases.git] / arch / x86 / events / intel / uncore_snbep.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include "uncore.h"
4
5 /* SNB-EP pci bus to socket mapping */
6 #define SNBEP_CPUNODEID                 0x40
7 #define SNBEP_GIDNIDMAP                 0x54
8
9 /* SNB-EP Box level control */
10 #define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
11 #define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
12 #define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
13 #define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
14 #define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
15                                          SNBEP_PMON_BOX_CTL_RST_CTRS | \
16                                          SNBEP_PMON_BOX_CTL_FRZ_EN)
17 /* SNB-EP event control */
18 #define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
19 #define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
20 #define SNBEP_PMON_CTL_RST              (1 << 17)
21 #define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
22 #define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)
23 #define SNBEP_PMON_CTL_EN               (1 << 22)
24 #define SNBEP_PMON_CTL_INVERT           (1 << 23)
25 #define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
26 #define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
27                                          SNBEP_PMON_CTL_UMASK_MASK | \
28                                          SNBEP_PMON_CTL_EDGE_DET | \
29                                          SNBEP_PMON_CTL_INVERT | \
30                                          SNBEP_PMON_CTL_TRESH_MASK)
31
32 /* SNB-EP Ubox event control */
33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
35                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
36                                  SNBEP_PMON_CTL_UMASK_MASK | \
37                                  SNBEP_PMON_CTL_EDGE_DET | \
38                                  SNBEP_PMON_CTL_INVERT | \
39                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
40
41 #define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
43                                                  SNBEP_CBO_PMON_CTL_TID_EN)
44
45 /* SNB-EP PCU event control */
46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
51                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
52                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53                                  SNBEP_PMON_CTL_EDGE_DET | \
54                                  SNBEP_PMON_CTL_INVERT | \
55                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
58
59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
60                                 (SNBEP_PMON_RAW_EVENT_MASK | \
61                                  SNBEP_PMON_CTL_EV_SEL_EXT)
62
63 /* SNB-EP pci control register */
64 #define SNBEP_PCI_PMON_BOX_CTL                  0xf4
65 #define SNBEP_PCI_PMON_CTL0                     0xd8
66 /* SNB-EP pci counter register */
67 #define SNBEP_PCI_PMON_CTR0                     0xa0
68
69 /* SNB-EP home agent register */
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
73 /* SNB-EP memory controller register */
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
76 /* SNB-EP QPI register */
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
81
82 /* SNB-EP Ubox register */
83 #define SNBEP_U_MSR_PMON_CTR0                   0xc16
84 #define SNBEP_U_MSR_PMON_CTL0                   0xc10
85
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
88
89 /* SNB-EP Cbo register */
90 #define SNBEP_C0_MSR_PMON_CTR0                  0xd16
91 #define SNBEP_C0_MSR_PMON_CTL0                  0xd10
92 #define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
93 #define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
94 #define SNBEP_CBO_MSR_OFFSET                    0x20
95
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID       0x1f
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID       0x3fc00
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE     0x7c0000
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC       0xff800000
100
101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {    \
102         .event = (e),                           \
103         .msr = SNBEP_C0_MSR_PMON_BOX_FILTER,    \
104         .config_mask = (m),                     \
105         .idx = (i)                              \
106 }
107
108 /* SNB-EP PCU register */
109 #define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
110 #define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
111 #define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
114 #define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
115 #define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
116
117 /* IVBEP event control */
118 #define IVBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
119                                          SNBEP_PMON_BOX_CTL_RST_CTRS)
120 #define IVBEP_PMON_RAW_EVENT_MASK               (SNBEP_PMON_CTL_EV_SEL_MASK | \
121                                          SNBEP_PMON_CTL_UMASK_MASK | \
122                                          SNBEP_PMON_CTL_EDGE_DET | \
123                                          SNBEP_PMON_CTL_TRESH_MASK)
124 /* IVBEP Ubox */
125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL             0xc00
126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL             (1 << 31)
127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL           (1 << 29)
128
129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
130                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
131                                  SNBEP_PMON_CTL_UMASK_MASK | \
132                                  SNBEP_PMON_CTL_EDGE_DET | \
133                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
134 /* IVBEP Cbo */
135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK               (IVBEP_PMON_RAW_EVENT_MASK | \
136                                                  SNBEP_CBO_PMON_CTL_TID_EN)
137
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x1fULL << 0)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 5)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x3fULL << 17)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
146
147 /* IVBEP home agent */
148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST         (1 << 16)
149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK                \
150                                 (IVBEP_PMON_RAW_EVENT_MASK | \
151                                  IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
152 /* IVBEP PCU */
153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
154                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
155                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156                                  SNBEP_PMON_CTL_EDGE_DET | \
157                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
160 /* IVBEP QPI */
161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
162                                 (IVBEP_PMON_RAW_EVENT_MASK | \
163                                  SNBEP_PMON_CTL_EV_SEL_EXT)
164
165 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
166                                 ((1ULL << (n)) - 1)))
167
168 /* Haswell-EP Ubox */
169 #define HSWEP_U_MSR_PMON_CTR0                   0x709
170 #define HSWEP_U_MSR_PMON_CTL0                   0x705
171 #define HSWEP_U_MSR_PMON_FILTER                 0x707
172
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL         0x703
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR         0x704
175
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID         (0x1 << 0)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID         (0x1fULL << 1)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179                                         (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180                                          HSWEP_U_MSR_PMON_BOX_FILTER_CID)
181
182 /* Haswell-EP CBo */
183 #define HSWEP_C0_MSR_PMON_CTR0                  0xe08
184 #define HSWEP_C0_MSR_PMON_CTL0                  0xe01
185 #define HSWEP_C0_MSR_PMON_BOX_CTL                       0xe00
186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0           0xe05
187 #define HSWEP_CBO_MSR_OFFSET                    0x10
188
189
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x3fULL << 0)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 6)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x7fULL << 17)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
198
199
200 /* Haswell-EP Sbox */
201 #define HSWEP_S0_MSR_PMON_CTR0                  0x726
202 #define HSWEP_S0_MSR_PMON_CTL0                  0x721
203 #define HSWEP_S0_MSR_PMON_BOX_CTL                       0x720
204 #define HSWEP_SBOX_MSR_OFFSET                   0xa
205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
206                                                  SNBEP_CBO_PMON_CTL_TID_EN)
207
208 /* Haswell-EP PCU */
209 #define HSWEP_PCU_MSR_PMON_CTR0                 0x717
210 #define HSWEP_PCU_MSR_PMON_CTL0                 0x711
211 #define HSWEP_PCU_MSR_PMON_BOX_CTL              0x710
212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER           0x715
213
214 /* KNL Ubox */
215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216                                         (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217                                                 SNBEP_CBO_PMON_CTL_TID_EN)
218 /* KNL CHA */
219 #define KNL_CHA_MSR_OFFSET                      0xc
220 #define KNL_CHA_MSR_PMON_CTL_QOR                (1 << 16)
221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222                                         (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223                                          KNL_CHA_MSR_PMON_CTL_QOR)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID         0x1ff
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE       (7 << 18)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP          (0xfffffe2aULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE  (0x1ULL << 33)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC         (0x1ULL << 37)
230
231 /* KNL EDC/MC UCLK */
232 #define KNL_UCLK_MSR_PMON_CTR0_LOW              0x400
233 #define KNL_UCLK_MSR_PMON_CTL0                  0x420
234 #define KNL_UCLK_MSR_PMON_BOX_CTL               0x430
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW        0x44c
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL        0x454
237 #define KNL_PMON_FIXED_CTL_EN                   0x1
238
239 /* KNL EDC */
240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW         0xa00
241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0             0xa20
242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL          0xa30
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW   0xa3c
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL   0xa44
245
246 /* KNL MC */
247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW           0xb00
248 #define KNL_MC0_CH0_MSR_PMON_CTL0               0xb20
249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL            0xb30
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW          0xb3c
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL          0xb44
252
253 /* KNL IRP */
254 #define KNL_IRP_PCI_PMON_BOX_CTL                0xf0
255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
256                                                  KNL_CHA_MSR_PMON_CTL_QOR)
257 /* KNL PCU */
258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK            0x0000007f
259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR            (1 << 7)
260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK         0x3f000000
261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
262                                 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263                                  KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265                                  SNBEP_PMON_CTL_EDGE_DET | \
266                                  SNBEP_CBO_PMON_CTL_TID_EN | \
267                                  SNBEP_PMON_CTL_INVERT | \
268                                  KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
271
272 /* SKX pci bus to socket mapping */
273 #define SKX_CPUNODEID                   0xc0
274 #define SKX_GIDNIDMAP                   0xd4
275
276 /* SKX CHA */
277 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID         (0x1ffULL << 0)
278 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK        (0xfULL << 9)
279 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE       (0x3ffULL << 17)
280 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM         (0x1ULL << 32)
281 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC         (0x1ULL << 33)
282 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC     (0x1ULL << 35)
283 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM          (0x1ULL << 36)
284 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM      (0x1ULL << 37)
285 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0        (0x3ffULL << 41)
286 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1        (0x3ffULL << 51)
287 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6          (0x1ULL << 61)
288 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC          (0x1ULL << 62)
289 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC        (0x1ULL << 63)
290
291 /* SKX IIO */
292 #define SKX_IIO0_MSR_PMON_CTL0          0xa48
293 #define SKX_IIO0_MSR_PMON_CTR0          0xa41
294 #define SKX_IIO0_MSR_PMON_BOX_CTL       0xa40
295 #define SKX_IIO_MSR_OFFSET              0x20
296
297 #define SKX_PMON_CTL_TRESH_MASK         (0xff << 24)
298 #define SKX_PMON_CTL_TRESH_MASK_EXT     (0xf)
299 #define SKX_PMON_CTL_CH_MASK            (0xff << 4)
300 #define SKX_PMON_CTL_FC_MASK            (0x7 << 12)
301 #define SKX_IIO_PMON_RAW_EVENT_MASK     (SNBEP_PMON_CTL_EV_SEL_MASK | \
302                                          SNBEP_PMON_CTL_UMASK_MASK | \
303                                          SNBEP_PMON_CTL_EDGE_DET | \
304                                          SNBEP_PMON_CTL_INVERT | \
305                                          SKX_PMON_CTL_TRESH_MASK)
306 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
307                                          SKX_PMON_CTL_CH_MASK | \
308                                          SKX_PMON_CTL_FC_MASK)
309
310 /* SKX IRP */
311 #define SKX_IRP0_MSR_PMON_CTL0          0xa5b
312 #define SKX_IRP0_MSR_PMON_CTR0          0xa59
313 #define SKX_IRP0_MSR_PMON_BOX_CTL       0xa58
314 #define SKX_IRP_MSR_OFFSET              0x20
315
316 /* SKX UPI */
317 #define SKX_UPI_PCI_PMON_CTL0           0x350
318 #define SKX_UPI_PCI_PMON_CTR0           0x318
319 #define SKX_UPI_PCI_PMON_BOX_CTL        0x378
320 #define SKX_UPI_CTL_UMASK_EXT           0xffefff
321
322 /* SKX M2M */
323 #define SKX_M2M_PCI_PMON_CTL0           0x228
324 #define SKX_M2M_PCI_PMON_CTR0           0x200
325 #define SKX_M2M_PCI_PMON_BOX_CTL        0x258
326
327 /* SNR Ubox */
328 #define SNR_U_MSR_PMON_CTR0                     0x1f98
329 #define SNR_U_MSR_PMON_CTL0                     0x1f91
330 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL           0x1f93
331 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR           0x1f94
332
333 /* SNR CHA */
334 #define SNR_CHA_RAW_EVENT_MASK_EXT              0x3ffffff
335 #define SNR_CHA_MSR_PMON_CTL0                   0x1c01
336 #define SNR_CHA_MSR_PMON_CTR0                   0x1c08
337 #define SNR_CHA_MSR_PMON_BOX_CTL                0x1c00
338 #define SNR_C0_MSR_PMON_BOX_FILTER0             0x1c05
339
340
341 /* SNR IIO */
342 #define SNR_IIO_MSR_PMON_CTL0                   0x1e08
343 #define SNR_IIO_MSR_PMON_CTR0                   0x1e01
344 #define SNR_IIO_MSR_PMON_BOX_CTL                0x1e00
345 #define SNR_IIO_MSR_OFFSET                      0x10
346 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT         0x7ffff
347
348 /* SNR IRP */
349 #define SNR_IRP0_MSR_PMON_CTL0                  0x1ea8
350 #define SNR_IRP0_MSR_PMON_CTR0                  0x1ea1
351 #define SNR_IRP0_MSR_PMON_BOX_CTL               0x1ea0
352 #define SNR_IRP_MSR_OFFSET                      0x10
353
354 /* SNR M2PCIE */
355 #define SNR_M2PCIE_MSR_PMON_CTL0                0x1e58
356 #define SNR_M2PCIE_MSR_PMON_CTR0                0x1e51
357 #define SNR_M2PCIE_MSR_PMON_BOX_CTL             0x1e50
358 #define SNR_M2PCIE_MSR_OFFSET                   0x10
359
360 /* SNR PCU */
361 #define SNR_PCU_MSR_PMON_CTL0                   0x1ef1
362 #define SNR_PCU_MSR_PMON_CTR0                   0x1ef8
363 #define SNR_PCU_MSR_PMON_BOX_CTL                0x1ef0
364 #define SNR_PCU_MSR_PMON_BOX_FILTER             0x1efc
365
366 /* SNR M2M */
367 #define SNR_M2M_PCI_PMON_CTL0                   0x468
368 #define SNR_M2M_PCI_PMON_CTR0                   0x440
369 #define SNR_M2M_PCI_PMON_BOX_CTL                0x438
370 #define SNR_M2M_PCI_PMON_UMASK_EXT              0xff
371
372 /* SNR IMC */
373 #define SNR_IMC_MMIO_PMON_FIXED_CTL             0x54
374 #define SNR_IMC_MMIO_PMON_FIXED_CTR             0x38
375 #define SNR_IMC_MMIO_PMON_CTL0                  0x40
376 #define SNR_IMC_MMIO_PMON_CTR0                  0x8
377 #define SNR_IMC_MMIO_PMON_BOX_CTL               0x22800
378 #define SNR_IMC_MMIO_OFFSET                     0x4000
379 #define SNR_IMC_MMIO_SIZE                       0x4000
380 #define SNR_IMC_MMIO_BASE_OFFSET                0xd0
381 #define SNR_IMC_MMIO_BASE_MASK                  0x1FFFFFFF
382 #define SNR_IMC_MMIO_MEM0_OFFSET                0xd8
383 #define SNR_IMC_MMIO_MEM0_MASK                  0x7FF
384
385 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
386 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
387 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
388 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
389 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
390 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
391 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
392 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
393 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
394 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
395 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
396 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
397 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
398 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
399 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
400 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
401 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
402 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
403 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
404 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
405 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
406 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
407 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
408 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
409 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
410 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
411 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
412 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
413 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
414 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
415 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
416 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
417 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
418 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
419 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
420 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
421 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
422 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
423 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
424 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
425 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
426 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
427 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
428 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
429 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
430 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
431 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
432 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
433 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
434 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
435 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
436 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
437 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
438 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
439 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
440 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
441 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
442 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
443 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
444 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
445 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
446 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
447 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
448 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
449 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
450 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
451 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
452 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
453 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
454 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
455 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
456 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
457 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
458 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
459 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
460 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
461 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
462
463 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
464 {
465         struct pci_dev *pdev = box->pci_dev;
466         int box_ctl = uncore_pci_box_ctl(box);
467         u32 config = 0;
468
469         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
470                 config |= SNBEP_PMON_BOX_CTL_FRZ;
471                 pci_write_config_dword(pdev, box_ctl, config);
472         }
473 }
474
475 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
476 {
477         struct pci_dev *pdev = box->pci_dev;
478         int box_ctl = uncore_pci_box_ctl(box);
479         u32 config = 0;
480
481         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
482                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
483                 pci_write_config_dword(pdev, box_ctl, config);
484         }
485 }
486
487 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
488 {
489         struct pci_dev *pdev = box->pci_dev;
490         struct hw_perf_event *hwc = &event->hw;
491
492         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
493 }
494
495 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
496 {
497         struct pci_dev *pdev = box->pci_dev;
498         struct hw_perf_event *hwc = &event->hw;
499
500         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
501 }
502
503 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
504 {
505         struct pci_dev *pdev = box->pci_dev;
506         struct hw_perf_event *hwc = &event->hw;
507         u64 count = 0;
508
509         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
510         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
511
512         return count;
513 }
514
515 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
516 {
517         struct pci_dev *pdev = box->pci_dev;
518         int box_ctl = uncore_pci_box_ctl(box);
519
520         pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
521 }
522
523 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
524 {
525         u64 config;
526         unsigned msr;
527
528         msr = uncore_msr_box_ctl(box);
529         if (msr) {
530                 rdmsrl(msr, config);
531                 config |= SNBEP_PMON_BOX_CTL_FRZ;
532                 wrmsrl(msr, config);
533         }
534 }
535
536 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
537 {
538         u64 config;
539         unsigned msr;
540
541         msr = uncore_msr_box_ctl(box);
542         if (msr) {
543                 rdmsrl(msr, config);
544                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
545                 wrmsrl(msr, config);
546         }
547 }
548
549 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
550 {
551         struct hw_perf_event *hwc = &event->hw;
552         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
553
554         if (reg1->idx != EXTRA_REG_NONE)
555                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
556
557         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
558 }
559
560 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
561                                         struct perf_event *event)
562 {
563         struct hw_perf_event *hwc = &event->hw;
564
565         wrmsrl(hwc->config_base, hwc->config);
566 }
567
568 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
569 {
570         unsigned msr = uncore_msr_box_ctl(box);
571
572         if (msr)
573                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
574 }
575
576 static struct attribute *snbep_uncore_formats_attr[] = {
577         &format_attr_event.attr,
578         &format_attr_umask.attr,
579         &format_attr_edge.attr,
580         &format_attr_inv.attr,
581         &format_attr_thresh8.attr,
582         NULL,
583 };
584
585 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
586         &format_attr_event.attr,
587         &format_attr_umask.attr,
588         &format_attr_edge.attr,
589         &format_attr_inv.attr,
590         &format_attr_thresh5.attr,
591         NULL,
592 };
593
594 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
595         &format_attr_event.attr,
596         &format_attr_umask.attr,
597         &format_attr_edge.attr,
598         &format_attr_tid_en.attr,
599         &format_attr_inv.attr,
600         &format_attr_thresh8.attr,
601         &format_attr_filter_tid.attr,
602         &format_attr_filter_nid.attr,
603         &format_attr_filter_state.attr,
604         &format_attr_filter_opc.attr,
605         NULL,
606 };
607
608 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
609         &format_attr_event.attr,
610         &format_attr_occ_sel.attr,
611         &format_attr_edge.attr,
612         &format_attr_inv.attr,
613         &format_attr_thresh5.attr,
614         &format_attr_occ_invert.attr,
615         &format_attr_occ_edge.attr,
616         &format_attr_filter_band0.attr,
617         &format_attr_filter_band1.attr,
618         &format_attr_filter_band2.attr,
619         &format_attr_filter_band3.attr,
620         NULL,
621 };
622
623 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
624         &format_attr_event_ext.attr,
625         &format_attr_umask.attr,
626         &format_attr_edge.attr,
627         &format_attr_inv.attr,
628         &format_attr_thresh8.attr,
629         &format_attr_match_rds.attr,
630         &format_attr_match_rnid30.attr,
631         &format_attr_match_rnid4.attr,
632         &format_attr_match_dnid.attr,
633         &format_attr_match_mc.attr,
634         &format_attr_match_opc.attr,
635         &format_attr_match_vnw.attr,
636         &format_attr_match0.attr,
637         &format_attr_match1.attr,
638         &format_attr_mask_rds.attr,
639         &format_attr_mask_rnid30.attr,
640         &format_attr_mask_rnid4.attr,
641         &format_attr_mask_dnid.attr,
642         &format_attr_mask_mc.attr,
643         &format_attr_mask_opc.attr,
644         &format_attr_mask_vnw.attr,
645         &format_attr_mask0.attr,
646         &format_attr_mask1.attr,
647         NULL,
648 };
649
650 static struct uncore_event_desc snbep_uncore_imc_events[] = {
651         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
652         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
653         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
654         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
655         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
656         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
657         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
658         { /* end: all zeroes */ },
659 };
660
661 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
662         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
663         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
664         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
665         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
666         { /* end: all zeroes */ },
667 };
668
669 static const struct attribute_group snbep_uncore_format_group = {
670         .name = "format",
671         .attrs = snbep_uncore_formats_attr,
672 };
673
674 static const struct attribute_group snbep_uncore_ubox_format_group = {
675         .name = "format",
676         .attrs = snbep_uncore_ubox_formats_attr,
677 };
678
679 static const struct attribute_group snbep_uncore_cbox_format_group = {
680         .name = "format",
681         .attrs = snbep_uncore_cbox_formats_attr,
682 };
683
684 static const struct attribute_group snbep_uncore_pcu_format_group = {
685         .name = "format",
686         .attrs = snbep_uncore_pcu_formats_attr,
687 };
688
689 static const struct attribute_group snbep_uncore_qpi_format_group = {
690         .name = "format",
691         .attrs = snbep_uncore_qpi_formats_attr,
692 };
693
694 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                    \
695         .disable_box    = snbep_uncore_msr_disable_box,         \
696         .enable_box     = snbep_uncore_msr_enable_box,          \
697         .disable_event  = snbep_uncore_msr_disable_event,       \
698         .enable_event   = snbep_uncore_msr_enable_event,        \
699         .read_counter   = uncore_msr_read_counter
700
701 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
702         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),                   \
703         .init_box       = snbep_uncore_msr_init_box             \
704
705 static struct intel_uncore_ops snbep_uncore_msr_ops = {
706         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
707 };
708
709 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
710         .init_box       = snbep_uncore_pci_init_box,            \
711         .disable_box    = snbep_uncore_pci_disable_box,         \
712         .enable_box     = snbep_uncore_pci_enable_box,          \
713         .disable_event  = snbep_uncore_pci_disable_event,       \
714         .read_counter   = snbep_uncore_pci_read_counter
715
716 static struct intel_uncore_ops snbep_uncore_pci_ops = {
717         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
718         .enable_event   = snbep_uncore_pci_enable_event,        \
719 };
720
721 static struct event_constraint snbep_uncore_cbox_constraints[] = {
722         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
723         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
724         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
725         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
726         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
727         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
728         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
729         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
730         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
731         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
732         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
733         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
734         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
735         UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
736         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
737         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
738         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
739         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
740         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
741         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
742         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
743         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
744         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
745         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
746         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
747         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
748         EVENT_CONSTRAINT_END
749 };
750
751 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
752         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
753         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
754         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
755         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
756         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
757         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
758         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
759         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
760         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
761         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
762         EVENT_CONSTRAINT_END
763 };
764
765 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
766         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
767         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
768         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
769         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
770         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
771         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
772         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
773         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
774         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
775         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
776         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
777         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
778         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
779         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
780         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
781         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
782         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
783         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
784         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
785         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
786         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
787         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
788         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
789         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
790         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
791         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
792         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
793         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
794         EVENT_CONSTRAINT_END
795 };
796
797 static struct intel_uncore_type snbep_uncore_ubox = {
798         .name           = "ubox",
799         .num_counters   = 2,
800         .num_boxes      = 1,
801         .perf_ctr_bits  = 44,
802         .fixed_ctr_bits = 48,
803         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
804         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
805         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
806         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
807         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
808         .ops            = &snbep_uncore_msr_ops,
809         .format_group   = &snbep_uncore_ubox_format_group,
810 };
811
812 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
813         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
814                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
815         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
816         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
817         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
818         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
819         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
820         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
821         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
822         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
823         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
824         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
825         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
826         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
827         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
828         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
829         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
830         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
831         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
832         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
833         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
834         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
835         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
836         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
837         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
838         EVENT_EXTRA_END
839 };
840
841 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
842 {
843         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
844         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
845         int i;
846
847         if (uncore_box_is_fake(box))
848                 return;
849
850         for (i = 0; i < 5; i++) {
851                 if (reg1->alloc & (0x1 << i))
852                         atomic_sub(1 << (i * 6), &er->ref);
853         }
854         reg1->alloc = 0;
855 }
856
857 static struct event_constraint *
858 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
859                             u64 (*cbox_filter_mask)(int fields))
860 {
861         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
862         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
863         int i, alloc = 0;
864         unsigned long flags;
865         u64 mask;
866
867         if (reg1->idx == EXTRA_REG_NONE)
868                 return NULL;
869
870         raw_spin_lock_irqsave(&er->lock, flags);
871         for (i = 0; i < 5; i++) {
872                 if (!(reg1->idx & (0x1 << i)))
873                         continue;
874                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
875                         continue;
876
877                 mask = cbox_filter_mask(0x1 << i);
878                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
879                     !((reg1->config ^ er->config) & mask)) {
880                         atomic_add(1 << (i * 6), &er->ref);
881                         er->config &= ~mask;
882                         er->config |= reg1->config & mask;
883                         alloc |= (0x1 << i);
884                 } else {
885                         break;
886                 }
887         }
888         raw_spin_unlock_irqrestore(&er->lock, flags);
889         if (i < 5)
890                 goto fail;
891
892         if (!uncore_box_is_fake(box))
893                 reg1->alloc |= alloc;
894
895         return NULL;
896 fail:
897         for (; i >= 0; i--) {
898                 if (alloc & (0x1 << i))
899                         atomic_sub(1 << (i * 6), &er->ref);
900         }
901         return &uncore_constraint_empty;
902 }
903
904 static u64 snbep_cbox_filter_mask(int fields)
905 {
906         u64 mask = 0;
907
908         if (fields & 0x1)
909                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
910         if (fields & 0x2)
911                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
912         if (fields & 0x4)
913                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
914         if (fields & 0x8)
915                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
916
917         return mask;
918 }
919
920 static struct event_constraint *
921 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
922 {
923         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
924 }
925
926 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
927 {
928         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
929         struct extra_reg *er;
930         int idx = 0;
931
932         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
933                 if (er->event != (event->hw.config & er->config_mask))
934                         continue;
935                 idx |= er->idx;
936         }
937
938         if (idx) {
939                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
940                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
941                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
942                 reg1->idx = idx;
943         }
944         return 0;
945 }
946
947 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
948         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
949         .hw_config              = snbep_cbox_hw_config,
950         .get_constraint         = snbep_cbox_get_constraint,
951         .put_constraint         = snbep_cbox_put_constraint,
952 };
953
954 static struct intel_uncore_type snbep_uncore_cbox = {
955         .name                   = "cbox",
956         .num_counters           = 4,
957         .num_boxes              = 8,
958         .perf_ctr_bits          = 44,
959         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
960         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
961         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
962         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
963         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
964         .num_shared_regs        = 1,
965         .constraints            = snbep_uncore_cbox_constraints,
966         .ops                    = &snbep_uncore_cbox_ops,
967         .format_group           = &snbep_uncore_cbox_format_group,
968 };
969
970 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
971 {
972         struct hw_perf_event *hwc = &event->hw;
973         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
974         u64 config = reg1->config;
975
976         if (new_idx > reg1->idx)
977                 config <<= 8 * (new_idx - reg1->idx);
978         else
979                 config >>= 8 * (reg1->idx - new_idx);
980
981         if (modify) {
982                 hwc->config += new_idx - reg1->idx;
983                 reg1->config = config;
984                 reg1->idx = new_idx;
985         }
986         return config;
987 }
988
989 static struct event_constraint *
990 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
991 {
992         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
993         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
994         unsigned long flags;
995         int idx = reg1->idx;
996         u64 mask, config1 = reg1->config;
997         bool ok = false;
998
999         if (reg1->idx == EXTRA_REG_NONE ||
1000             (!uncore_box_is_fake(box) && reg1->alloc))
1001                 return NULL;
1002 again:
1003         mask = 0xffULL << (idx * 8);
1004         raw_spin_lock_irqsave(&er->lock, flags);
1005         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1006             !((config1 ^ er->config) & mask)) {
1007                 atomic_add(1 << (idx * 8), &er->ref);
1008                 er->config &= ~mask;
1009                 er->config |= config1 & mask;
1010                 ok = true;
1011         }
1012         raw_spin_unlock_irqrestore(&er->lock, flags);
1013
1014         if (!ok) {
1015                 idx = (idx + 1) % 4;
1016                 if (idx != reg1->idx) {
1017                         config1 = snbep_pcu_alter_er(event, idx, false);
1018                         goto again;
1019                 }
1020                 return &uncore_constraint_empty;
1021         }
1022
1023         if (!uncore_box_is_fake(box)) {
1024                 if (idx != reg1->idx)
1025                         snbep_pcu_alter_er(event, idx, true);
1026                 reg1->alloc = 1;
1027         }
1028         return NULL;
1029 }
1030
1031 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1032 {
1033         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1034         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1035
1036         if (uncore_box_is_fake(box) || !reg1->alloc)
1037                 return;
1038
1039         atomic_sub(1 << (reg1->idx * 8), &er->ref);
1040         reg1->alloc = 0;
1041 }
1042
1043 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1044 {
1045         struct hw_perf_event *hwc = &event->hw;
1046         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1047         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1048
1049         if (ev_sel >= 0xb && ev_sel <= 0xe) {
1050                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1051                 reg1->idx = ev_sel - 0xb;
1052                 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1053         }
1054         return 0;
1055 }
1056
1057 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1058         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1059         .hw_config              = snbep_pcu_hw_config,
1060         .get_constraint         = snbep_pcu_get_constraint,
1061         .put_constraint         = snbep_pcu_put_constraint,
1062 };
1063
1064 static struct intel_uncore_type snbep_uncore_pcu = {
1065         .name                   = "pcu",
1066         .num_counters           = 4,
1067         .num_boxes              = 1,
1068         .perf_ctr_bits          = 48,
1069         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1070         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1071         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1072         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1073         .num_shared_regs        = 1,
1074         .ops                    = &snbep_uncore_pcu_ops,
1075         .format_group           = &snbep_uncore_pcu_format_group,
1076 };
1077
1078 static struct intel_uncore_type *snbep_msr_uncores[] = {
1079         &snbep_uncore_ubox,
1080         &snbep_uncore_cbox,
1081         &snbep_uncore_pcu,
1082         NULL,
1083 };
1084
1085 void snbep_uncore_cpu_init(void)
1086 {
1087         if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1088                 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1089         uncore_msr_uncores = snbep_msr_uncores;
1090 }
1091
1092 enum {
1093         SNBEP_PCI_QPI_PORT0_FILTER,
1094         SNBEP_PCI_QPI_PORT1_FILTER,
1095         BDX_PCI_QPI_PORT2_FILTER,
1096 };
1097
1098 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1099 {
1100         struct hw_perf_event *hwc = &event->hw;
1101         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1102         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1103
1104         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1105                 reg1->idx = 0;
1106                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1107                 reg1->config = event->attr.config1;
1108                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1109                 reg2->config = event->attr.config2;
1110         }
1111         return 0;
1112 }
1113
1114 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1115 {
1116         struct pci_dev *pdev = box->pci_dev;
1117         struct hw_perf_event *hwc = &event->hw;
1118         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1119         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1120
1121         if (reg1->idx != EXTRA_REG_NONE) {
1122                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1123                 int die = box->dieid;
1124                 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1125
1126                 if (filter_pdev) {
1127                         pci_write_config_dword(filter_pdev, reg1->reg,
1128                                                 (u32)reg1->config);
1129                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
1130                                                 (u32)(reg1->config >> 32));
1131                         pci_write_config_dword(filter_pdev, reg2->reg,
1132                                                 (u32)reg2->config);
1133                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
1134                                                 (u32)(reg2->config >> 32));
1135                 }
1136         }
1137
1138         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1139 }
1140
1141 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1142         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1143         .enable_event           = snbep_qpi_enable_event,
1144         .hw_config              = snbep_qpi_hw_config,
1145         .get_constraint         = uncore_get_constraint,
1146         .put_constraint         = uncore_put_constraint,
1147 };
1148
1149 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
1150         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1151         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1152         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
1153         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1154         .ops            = &snbep_uncore_pci_ops,                \
1155         .format_group   = &snbep_uncore_format_group
1156
1157 static struct intel_uncore_type snbep_uncore_ha = {
1158         .name           = "ha",
1159         .num_counters   = 4,
1160         .num_boxes      = 1,
1161         .perf_ctr_bits  = 48,
1162         SNBEP_UNCORE_PCI_COMMON_INIT(),
1163 };
1164
1165 static struct intel_uncore_type snbep_uncore_imc = {
1166         .name           = "imc",
1167         .num_counters   = 4,
1168         .num_boxes      = 4,
1169         .perf_ctr_bits  = 48,
1170         .fixed_ctr_bits = 48,
1171         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1172         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1173         .event_descs    = snbep_uncore_imc_events,
1174         SNBEP_UNCORE_PCI_COMMON_INIT(),
1175 };
1176
1177 static struct intel_uncore_type snbep_uncore_qpi = {
1178         .name                   = "qpi",
1179         .num_counters           = 4,
1180         .num_boxes              = 2,
1181         .perf_ctr_bits          = 48,
1182         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1183         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1184         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1185         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1186         .num_shared_regs        = 1,
1187         .ops                    = &snbep_uncore_qpi_ops,
1188         .event_descs            = snbep_uncore_qpi_events,
1189         .format_group           = &snbep_uncore_qpi_format_group,
1190 };
1191
1192
1193 static struct intel_uncore_type snbep_uncore_r2pcie = {
1194         .name           = "r2pcie",
1195         .num_counters   = 4,
1196         .num_boxes      = 1,
1197         .perf_ctr_bits  = 44,
1198         .constraints    = snbep_uncore_r2pcie_constraints,
1199         SNBEP_UNCORE_PCI_COMMON_INIT(),
1200 };
1201
1202 static struct intel_uncore_type snbep_uncore_r3qpi = {
1203         .name           = "r3qpi",
1204         .num_counters   = 3,
1205         .num_boxes      = 2,
1206         .perf_ctr_bits  = 44,
1207         .constraints    = snbep_uncore_r3qpi_constraints,
1208         SNBEP_UNCORE_PCI_COMMON_INIT(),
1209 };
1210
1211 enum {
1212         SNBEP_PCI_UNCORE_HA,
1213         SNBEP_PCI_UNCORE_IMC,
1214         SNBEP_PCI_UNCORE_QPI,
1215         SNBEP_PCI_UNCORE_R2PCIE,
1216         SNBEP_PCI_UNCORE_R3QPI,
1217 };
1218
1219 static struct intel_uncore_type *snbep_pci_uncores[] = {
1220         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
1221         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
1222         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
1223         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
1224         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
1225         NULL,
1226 };
1227
1228 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1229         { /* Home Agent */
1230                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1231                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1232         },
1233         { /* MC Channel 0 */
1234                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1235                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1236         },
1237         { /* MC Channel 1 */
1238                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1239                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1240         },
1241         { /* MC Channel 2 */
1242                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1243                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1244         },
1245         { /* MC Channel 3 */
1246                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1247                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1248         },
1249         { /* QPI Port 0 */
1250                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1251                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1252         },
1253         { /* QPI Port 1 */
1254                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1255                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1256         },
1257         { /* R2PCIe */
1258                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1259                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1260         },
1261         { /* R3QPI Link 0 */
1262                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1263                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1264         },
1265         { /* R3QPI Link 1 */
1266                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1267                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1268         },
1269         { /* QPI Port 0 filter  */
1270                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1271                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1272                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1273         },
1274         { /* QPI Port 0 filter  */
1275                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1276                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1277                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1278         },
1279         { /* end: all zeroes */ }
1280 };
1281
1282 static struct pci_driver snbep_uncore_pci_driver = {
1283         .name           = "snbep_uncore",
1284         .id_table       = snbep_uncore_pci_ids,
1285 };
1286
1287 #define NODE_ID_MASK    0x7
1288
1289 /*
1290  * build pci bus to socket mapping
1291  */
1292 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1293 {
1294         struct pci_dev *ubox_dev = NULL;
1295         int i, bus, nodeid, segment;
1296         struct pci2phy_map *map;
1297         int err = 0;
1298         u32 config = 0;
1299
1300         while (1) {
1301                 /* find the UBOX device */
1302                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1303                 if (!ubox_dev)
1304                         break;
1305                 bus = ubox_dev->bus->number;
1306                 /* get the Node ID of the local register */
1307                 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1308                 if (err)
1309                         break;
1310                 nodeid = config & NODE_ID_MASK;
1311                 /* get the Node ID mapping */
1312                 err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1313                 if (err)
1314                         break;
1315
1316                 segment = pci_domain_nr(ubox_dev->bus);
1317                 raw_spin_lock(&pci2phy_map_lock);
1318                 map = __find_pci2phy_map(segment);
1319                 if (!map) {
1320                         raw_spin_unlock(&pci2phy_map_lock);
1321                         err = -ENOMEM;
1322                         break;
1323                 }
1324
1325                 /*
1326                  * every three bits in the Node ID mapping register maps
1327                  * to a particular node.
1328                  */
1329                 for (i = 0; i < 8; i++) {
1330                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
1331                                 map->pbus_to_physid[bus] = i;
1332                                 break;
1333                         }
1334                 }
1335                 raw_spin_unlock(&pci2phy_map_lock);
1336         }
1337
1338         if (!err) {
1339                 /*
1340                  * For PCI bus with no UBOX device, find the next bus
1341                  * that has UBOX device and use its mapping.
1342                  */
1343                 raw_spin_lock(&pci2phy_map_lock);
1344                 list_for_each_entry(map, &pci2phy_map_head, list) {
1345                         i = -1;
1346                         if (reverse) {
1347                                 for (bus = 255; bus >= 0; bus--) {
1348                                         if (map->pbus_to_physid[bus] >= 0)
1349                                                 i = map->pbus_to_physid[bus];
1350                                         else
1351                                                 map->pbus_to_physid[bus] = i;
1352                                 }
1353                         } else {
1354                                 for (bus = 0; bus <= 255; bus++) {
1355                                         if (map->pbus_to_physid[bus] >= 0)
1356                                                 i = map->pbus_to_physid[bus];
1357                                         else
1358                                                 map->pbus_to_physid[bus] = i;
1359                                 }
1360                         }
1361                 }
1362                 raw_spin_unlock(&pci2phy_map_lock);
1363         }
1364
1365         pci_dev_put(ubox_dev);
1366
1367         return err ? pcibios_err_to_errno(err) : 0;
1368 }
1369
1370 int snbep_uncore_pci_init(void)
1371 {
1372         int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1373         if (ret)
1374                 return ret;
1375         uncore_pci_uncores = snbep_pci_uncores;
1376         uncore_pci_driver = &snbep_uncore_pci_driver;
1377         return 0;
1378 }
1379 /* end of Sandy Bridge-EP uncore support */
1380
1381 /* IvyTown uncore support */
1382 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1383 {
1384         unsigned msr = uncore_msr_box_ctl(box);
1385         if (msr)
1386                 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1387 }
1388
1389 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1390 {
1391         struct pci_dev *pdev = box->pci_dev;
1392
1393         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1394 }
1395
1396 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
1397         .init_box       = ivbep_uncore_msr_init_box,            \
1398         .disable_box    = snbep_uncore_msr_disable_box,         \
1399         .enable_box     = snbep_uncore_msr_enable_box,          \
1400         .disable_event  = snbep_uncore_msr_disable_event,       \
1401         .enable_event   = snbep_uncore_msr_enable_event,        \
1402         .read_counter   = uncore_msr_read_counter
1403
1404 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1405         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1406 };
1407
1408 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1409         .init_box       = ivbep_uncore_pci_init_box,
1410         .disable_box    = snbep_uncore_pci_disable_box,
1411         .enable_box     = snbep_uncore_pci_enable_box,
1412         .disable_event  = snbep_uncore_pci_disable_event,
1413         .enable_event   = snbep_uncore_pci_enable_event,
1414         .read_counter   = snbep_uncore_pci_read_counter,
1415 };
1416
1417 #define IVBEP_UNCORE_PCI_COMMON_INIT()                          \
1418         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1419         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1420         .event_mask     = IVBEP_PMON_RAW_EVENT_MASK,            \
1421         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1422         .ops            = &ivbep_uncore_pci_ops,                        \
1423         .format_group   = &ivbep_uncore_format_group
1424
1425 static struct attribute *ivbep_uncore_formats_attr[] = {
1426         &format_attr_event.attr,
1427         &format_attr_umask.attr,
1428         &format_attr_edge.attr,
1429         &format_attr_inv.attr,
1430         &format_attr_thresh8.attr,
1431         NULL,
1432 };
1433
1434 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1435         &format_attr_event.attr,
1436         &format_attr_umask.attr,
1437         &format_attr_edge.attr,
1438         &format_attr_inv.attr,
1439         &format_attr_thresh5.attr,
1440         NULL,
1441 };
1442
1443 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1444         &format_attr_event.attr,
1445         &format_attr_umask.attr,
1446         &format_attr_edge.attr,
1447         &format_attr_tid_en.attr,
1448         &format_attr_thresh8.attr,
1449         &format_attr_filter_tid.attr,
1450         &format_attr_filter_link.attr,
1451         &format_attr_filter_state2.attr,
1452         &format_attr_filter_nid2.attr,
1453         &format_attr_filter_opc2.attr,
1454         &format_attr_filter_nc.attr,
1455         &format_attr_filter_c6.attr,
1456         &format_attr_filter_isoc.attr,
1457         NULL,
1458 };
1459
1460 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1461         &format_attr_event.attr,
1462         &format_attr_occ_sel.attr,
1463         &format_attr_edge.attr,
1464         &format_attr_thresh5.attr,
1465         &format_attr_occ_invert.attr,
1466         &format_attr_occ_edge.attr,
1467         &format_attr_filter_band0.attr,
1468         &format_attr_filter_band1.attr,
1469         &format_attr_filter_band2.attr,
1470         &format_attr_filter_band3.attr,
1471         NULL,
1472 };
1473
1474 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1475         &format_attr_event_ext.attr,
1476         &format_attr_umask.attr,
1477         &format_attr_edge.attr,
1478         &format_attr_thresh8.attr,
1479         &format_attr_match_rds.attr,
1480         &format_attr_match_rnid30.attr,
1481         &format_attr_match_rnid4.attr,
1482         &format_attr_match_dnid.attr,
1483         &format_attr_match_mc.attr,
1484         &format_attr_match_opc.attr,
1485         &format_attr_match_vnw.attr,
1486         &format_attr_match0.attr,
1487         &format_attr_match1.attr,
1488         &format_attr_mask_rds.attr,
1489         &format_attr_mask_rnid30.attr,
1490         &format_attr_mask_rnid4.attr,
1491         &format_attr_mask_dnid.attr,
1492         &format_attr_mask_mc.attr,
1493         &format_attr_mask_opc.attr,
1494         &format_attr_mask_vnw.attr,
1495         &format_attr_mask0.attr,
1496         &format_attr_mask1.attr,
1497         NULL,
1498 };
1499
1500 static const struct attribute_group ivbep_uncore_format_group = {
1501         .name = "format",
1502         .attrs = ivbep_uncore_formats_attr,
1503 };
1504
1505 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1506         .name = "format",
1507         .attrs = ivbep_uncore_ubox_formats_attr,
1508 };
1509
1510 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1511         .name = "format",
1512         .attrs = ivbep_uncore_cbox_formats_attr,
1513 };
1514
1515 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1516         .name = "format",
1517         .attrs = ivbep_uncore_pcu_formats_attr,
1518 };
1519
1520 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1521         .name = "format",
1522         .attrs = ivbep_uncore_qpi_formats_attr,
1523 };
1524
1525 static struct intel_uncore_type ivbep_uncore_ubox = {
1526         .name           = "ubox",
1527         .num_counters   = 2,
1528         .num_boxes      = 1,
1529         .perf_ctr_bits  = 44,
1530         .fixed_ctr_bits = 48,
1531         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1532         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1533         .event_mask     = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1534         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1535         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1536         .ops            = &ivbep_uncore_msr_ops,
1537         .format_group   = &ivbep_uncore_ubox_format_group,
1538 };
1539
1540 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1541         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1542                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1543         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1544         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1545         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1546         SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1547         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1548         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1549         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1550         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1551         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1552         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1553         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1554         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1555         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1556         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1557         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1558         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1559         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1560         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1561         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1562         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1563         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1564         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1565         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1566         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1567         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1568         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1569         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1570         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1571         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1572         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1573         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1574         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1575         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1576         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1577         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1578         EVENT_EXTRA_END
1579 };
1580
1581 static u64 ivbep_cbox_filter_mask(int fields)
1582 {
1583         u64 mask = 0;
1584
1585         if (fields & 0x1)
1586                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1587         if (fields & 0x2)
1588                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1589         if (fields & 0x4)
1590                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1591         if (fields & 0x8)
1592                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1593         if (fields & 0x10) {
1594                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1595                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1596                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1597                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1598         }
1599
1600         return mask;
1601 }
1602
1603 static struct event_constraint *
1604 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1605 {
1606         return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1607 }
1608
1609 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1610 {
1611         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1612         struct extra_reg *er;
1613         int idx = 0;
1614
1615         for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1616                 if (er->event != (event->hw.config & er->config_mask))
1617                         continue;
1618                 idx |= er->idx;
1619         }
1620
1621         if (idx) {
1622                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1623                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1624                 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1625                 reg1->idx = idx;
1626         }
1627         return 0;
1628 }
1629
1630 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1631 {
1632         struct hw_perf_event *hwc = &event->hw;
1633         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1634
1635         if (reg1->idx != EXTRA_REG_NONE) {
1636                 u64 filter = uncore_shared_reg_config(box, 0);
1637                 wrmsrl(reg1->reg, filter & 0xffffffff);
1638                 wrmsrl(reg1->reg + 6, filter >> 32);
1639         }
1640
1641         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1642 }
1643
1644 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1645         .init_box               = ivbep_uncore_msr_init_box,
1646         .disable_box            = snbep_uncore_msr_disable_box,
1647         .enable_box             = snbep_uncore_msr_enable_box,
1648         .disable_event          = snbep_uncore_msr_disable_event,
1649         .enable_event           = ivbep_cbox_enable_event,
1650         .read_counter           = uncore_msr_read_counter,
1651         .hw_config              = ivbep_cbox_hw_config,
1652         .get_constraint         = ivbep_cbox_get_constraint,
1653         .put_constraint         = snbep_cbox_put_constraint,
1654 };
1655
1656 static struct intel_uncore_type ivbep_uncore_cbox = {
1657         .name                   = "cbox",
1658         .num_counters           = 4,
1659         .num_boxes              = 15,
1660         .perf_ctr_bits          = 44,
1661         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1662         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1663         .event_mask             = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1664         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1665         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1666         .num_shared_regs        = 1,
1667         .constraints            = snbep_uncore_cbox_constraints,
1668         .ops                    = &ivbep_uncore_cbox_ops,
1669         .format_group           = &ivbep_uncore_cbox_format_group,
1670 };
1671
1672 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1673         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1674         .hw_config              = snbep_pcu_hw_config,
1675         .get_constraint         = snbep_pcu_get_constraint,
1676         .put_constraint         = snbep_pcu_put_constraint,
1677 };
1678
1679 static struct intel_uncore_type ivbep_uncore_pcu = {
1680         .name                   = "pcu",
1681         .num_counters           = 4,
1682         .num_boxes              = 1,
1683         .perf_ctr_bits          = 48,
1684         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1685         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1686         .event_mask             = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1687         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1688         .num_shared_regs        = 1,
1689         .ops                    = &ivbep_uncore_pcu_ops,
1690         .format_group           = &ivbep_uncore_pcu_format_group,
1691 };
1692
1693 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1694         &ivbep_uncore_ubox,
1695         &ivbep_uncore_cbox,
1696         &ivbep_uncore_pcu,
1697         NULL,
1698 };
1699
1700 void ivbep_uncore_cpu_init(void)
1701 {
1702         if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1703                 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1704         uncore_msr_uncores = ivbep_msr_uncores;
1705 }
1706
1707 static struct intel_uncore_type ivbep_uncore_ha = {
1708         .name           = "ha",
1709         .num_counters   = 4,
1710         .num_boxes      = 2,
1711         .perf_ctr_bits  = 48,
1712         IVBEP_UNCORE_PCI_COMMON_INIT(),
1713 };
1714
1715 static struct intel_uncore_type ivbep_uncore_imc = {
1716         .name           = "imc",
1717         .num_counters   = 4,
1718         .num_boxes      = 8,
1719         .perf_ctr_bits  = 48,
1720         .fixed_ctr_bits = 48,
1721         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1722         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1723         .event_descs    = snbep_uncore_imc_events,
1724         IVBEP_UNCORE_PCI_COMMON_INIT(),
1725 };
1726
1727 /* registers in IRP boxes are not properly aligned */
1728 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1729 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1730
1731 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1732 {
1733         struct pci_dev *pdev = box->pci_dev;
1734         struct hw_perf_event *hwc = &event->hw;
1735
1736         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1737                                hwc->config | SNBEP_PMON_CTL_EN);
1738 }
1739
1740 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1741 {
1742         struct pci_dev *pdev = box->pci_dev;
1743         struct hw_perf_event *hwc = &event->hw;
1744
1745         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1746 }
1747
1748 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1749 {
1750         struct pci_dev *pdev = box->pci_dev;
1751         struct hw_perf_event *hwc = &event->hw;
1752         u64 count = 0;
1753
1754         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1755         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1756
1757         return count;
1758 }
1759
1760 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1761         .init_box       = ivbep_uncore_pci_init_box,
1762         .disable_box    = snbep_uncore_pci_disable_box,
1763         .enable_box     = snbep_uncore_pci_enable_box,
1764         .disable_event  = ivbep_uncore_irp_disable_event,
1765         .enable_event   = ivbep_uncore_irp_enable_event,
1766         .read_counter   = ivbep_uncore_irp_read_counter,
1767 };
1768
1769 static struct intel_uncore_type ivbep_uncore_irp = {
1770         .name                   = "irp",
1771         .num_counters           = 4,
1772         .num_boxes              = 1,
1773         .perf_ctr_bits          = 48,
1774         .event_mask             = IVBEP_PMON_RAW_EVENT_MASK,
1775         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1776         .ops                    = &ivbep_uncore_irp_ops,
1777         .format_group           = &ivbep_uncore_format_group,
1778 };
1779
1780 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1781         .init_box       = ivbep_uncore_pci_init_box,
1782         .disable_box    = snbep_uncore_pci_disable_box,
1783         .enable_box     = snbep_uncore_pci_enable_box,
1784         .disable_event  = snbep_uncore_pci_disable_event,
1785         .enable_event   = snbep_qpi_enable_event,
1786         .read_counter   = snbep_uncore_pci_read_counter,
1787         .hw_config      = snbep_qpi_hw_config,
1788         .get_constraint = uncore_get_constraint,
1789         .put_constraint = uncore_put_constraint,
1790 };
1791
1792 static struct intel_uncore_type ivbep_uncore_qpi = {
1793         .name                   = "qpi",
1794         .num_counters           = 4,
1795         .num_boxes              = 3,
1796         .perf_ctr_bits          = 48,
1797         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1798         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1799         .event_mask             = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1800         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1801         .num_shared_regs        = 1,
1802         .ops                    = &ivbep_uncore_qpi_ops,
1803         .format_group           = &ivbep_uncore_qpi_format_group,
1804 };
1805
1806 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1807         .name           = "r2pcie",
1808         .num_counters   = 4,
1809         .num_boxes      = 1,
1810         .perf_ctr_bits  = 44,
1811         .constraints    = snbep_uncore_r2pcie_constraints,
1812         IVBEP_UNCORE_PCI_COMMON_INIT(),
1813 };
1814
1815 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1816         .name           = "r3qpi",
1817         .num_counters   = 3,
1818         .num_boxes      = 2,
1819         .perf_ctr_bits  = 44,
1820         .constraints    = snbep_uncore_r3qpi_constraints,
1821         IVBEP_UNCORE_PCI_COMMON_INIT(),
1822 };
1823
1824 enum {
1825         IVBEP_PCI_UNCORE_HA,
1826         IVBEP_PCI_UNCORE_IMC,
1827         IVBEP_PCI_UNCORE_IRP,
1828         IVBEP_PCI_UNCORE_QPI,
1829         IVBEP_PCI_UNCORE_R2PCIE,
1830         IVBEP_PCI_UNCORE_R3QPI,
1831 };
1832
1833 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1834         [IVBEP_PCI_UNCORE_HA]   = &ivbep_uncore_ha,
1835         [IVBEP_PCI_UNCORE_IMC]  = &ivbep_uncore_imc,
1836         [IVBEP_PCI_UNCORE_IRP]  = &ivbep_uncore_irp,
1837         [IVBEP_PCI_UNCORE_QPI]  = &ivbep_uncore_qpi,
1838         [IVBEP_PCI_UNCORE_R2PCIE]       = &ivbep_uncore_r2pcie,
1839         [IVBEP_PCI_UNCORE_R3QPI]        = &ivbep_uncore_r3qpi,
1840         NULL,
1841 };
1842
1843 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1844         { /* Home Agent 0 */
1845                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1846                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1847         },
1848         { /* Home Agent 1 */
1849                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1850                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1851         },
1852         { /* MC0 Channel 0 */
1853                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1854                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1855         },
1856         { /* MC0 Channel 1 */
1857                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1858                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1859         },
1860         { /* MC0 Channel 3 */
1861                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1862                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1863         },
1864         { /* MC0 Channel 4 */
1865                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1866                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1867         },
1868         { /* MC1 Channel 0 */
1869                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1870                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1871         },
1872         { /* MC1 Channel 1 */
1873                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1874                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1875         },
1876         { /* MC1 Channel 3 */
1877                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1878                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1879         },
1880         { /* MC1 Channel 4 */
1881                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1882                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1883         },
1884         { /* IRP */
1885                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1886                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1887         },
1888         { /* QPI0 Port 0 */
1889                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1890                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1891         },
1892         { /* QPI0 Port 1 */
1893                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1894                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1895         },
1896         { /* QPI1 Port 2 */
1897                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1898                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1899         },
1900         { /* R2PCIe */
1901                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1902                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1903         },
1904         { /* R3QPI0 Link 0 */
1905                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1906                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1907         },
1908         { /* R3QPI0 Link 1 */
1909                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1910                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1911         },
1912         { /* R3QPI1 Link 2 */
1913                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1914                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1915         },
1916         { /* QPI Port 0 filter  */
1917                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1918                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1919                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1920         },
1921         { /* QPI Port 0 filter  */
1922                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1923                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1924                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1925         },
1926         { /* end: all zeroes */ }
1927 };
1928
1929 static struct pci_driver ivbep_uncore_pci_driver = {
1930         .name           = "ivbep_uncore",
1931         .id_table       = ivbep_uncore_pci_ids,
1932 };
1933
1934 int ivbep_uncore_pci_init(void)
1935 {
1936         int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1937         if (ret)
1938                 return ret;
1939         uncore_pci_uncores = ivbep_pci_uncores;
1940         uncore_pci_driver = &ivbep_uncore_pci_driver;
1941         return 0;
1942 }
1943 /* end of IvyTown uncore support */
1944
1945 /* KNL uncore support */
1946 static struct attribute *knl_uncore_ubox_formats_attr[] = {
1947         &format_attr_event.attr,
1948         &format_attr_umask.attr,
1949         &format_attr_edge.attr,
1950         &format_attr_tid_en.attr,
1951         &format_attr_inv.attr,
1952         &format_attr_thresh5.attr,
1953         NULL,
1954 };
1955
1956 static const struct attribute_group knl_uncore_ubox_format_group = {
1957         .name = "format",
1958         .attrs = knl_uncore_ubox_formats_attr,
1959 };
1960
1961 static struct intel_uncore_type knl_uncore_ubox = {
1962         .name                   = "ubox",
1963         .num_counters           = 2,
1964         .num_boxes              = 1,
1965         .perf_ctr_bits          = 48,
1966         .fixed_ctr_bits         = 48,
1967         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
1968         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
1969         .event_mask             = KNL_U_MSR_PMON_RAW_EVENT_MASK,
1970         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1971         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1972         .ops                    = &snbep_uncore_msr_ops,
1973         .format_group           = &knl_uncore_ubox_format_group,
1974 };
1975
1976 static struct attribute *knl_uncore_cha_formats_attr[] = {
1977         &format_attr_event.attr,
1978         &format_attr_umask.attr,
1979         &format_attr_qor.attr,
1980         &format_attr_edge.attr,
1981         &format_attr_tid_en.attr,
1982         &format_attr_inv.attr,
1983         &format_attr_thresh8.attr,
1984         &format_attr_filter_tid4.attr,
1985         &format_attr_filter_link3.attr,
1986         &format_attr_filter_state4.attr,
1987         &format_attr_filter_local.attr,
1988         &format_attr_filter_all_op.attr,
1989         &format_attr_filter_nnm.attr,
1990         &format_attr_filter_opc3.attr,
1991         &format_attr_filter_nc.attr,
1992         &format_attr_filter_isoc.attr,
1993         NULL,
1994 };
1995
1996 static const struct attribute_group knl_uncore_cha_format_group = {
1997         .name = "format",
1998         .attrs = knl_uncore_cha_formats_attr,
1999 };
2000
2001 static struct event_constraint knl_uncore_cha_constraints[] = {
2002         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2003         UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2004         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2005         EVENT_CONSTRAINT_END
2006 };
2007
2008 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2009         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2010                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2011         SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2012         SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2013         SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2014         EVENT_EXTRA_END
2015 };
2016
2017 static u64 knl_cha_filter_mask(int fields)
2018 {
2019         u64 mask = 0;
2020
2021         if (fields & 0x1)
2022                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2023         if (fields & 0x2)
2024                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2025         if (fields & 0x4)
2026                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2027         return mask;
2028 }
2029
2030 static struct event_constraint *
2031 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2032 {
2033         return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2034 }
2035
2036 static int knl_cha_hw_config(struct intel_uncore_box *box,
2037                              struct perf_event *event)
2038 {
2039         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2040         struct extra_reg *er;
2041         int idx = 0;
2042
2043         for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2044                 if (er->event != (event->hw.config & er->config_mask))
2045                         continue;
2046                 idx |= er->idx;
2047         }
2048
2049         if (idx) {
2050                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2051                             KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2052                 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2053
2054                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2055                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2056                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2057                 reg1->idx = idx;
2058         }
2059         return 0;
2060 }
2061
2062 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2063                                     struct perf_event *event);
2064
2065 static struct intel_uncore_ops knl_uncore_cha_ops = {
2066         .init_box               = snbep_uncore_msr_init_box,
2067         .disable_box            = snbep_uncore_msr_disable_box,
2068         .enable_box             = snbep_uncore_msr_enable_box,
2069         .disable_event          = snbep_uncore_msr_disable_event,
2070         .enable_event           = hswep_cbox_enable_event,
2071         .read_counter           = uncore_msr_read_counter,
2072         .hw_config              = knl_cha_hw_config,
2073         .get_constraint         = knl_cha_get_constraint,
2074         .put_constraint         = snbep_cbox_put_constraint,
2075 };
2076
2077 static struct intel_uncore_type knl_uncore_cha = {
2078         .name                   = "cha",
2079         .num_counters           = 4,
2080         .num_boxes              = 38,
2081         .perf_ctr_bits          = 48,
2082         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2083         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2084         .event_mask             = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2085         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2086         .msr_offset             = KNL_CHA_MSR_OFFSET,
2087         .num_shared_regs        = 1,
2088         .constraints            = knl_uncore_cha_constraints,
2089         .ops                    = &knl_uncore_cha_ops,
2090         .format_group           = &knl_uncore_cha_format_group,
2091 };
2092
2093 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2094         &format_attr_event2.attr,
2095         &format_attr_use_occ_ctr.attr,
2096         &format_attr_occ_sel.attr,
2097         &format_attr_edge.attr,
2098         &format_attr_tid_en.attr,
2099         &format_attr_inv.attr,
2100         &format_attr_thresh6.attr,
2101         &format_attr_occ_invert.attr,
2102         &format_attr_occ_edge_det.attr,
2103         NULL,
2104 };
2105
2106 static const struct attribute_group knl_uncore_pcu_format_group = {
2107         .name = "format",
2108         .attrs = knl_uncore_pcu_formats_attr,
2109 };
2110
2111 static struct intel_uncore_type knl_uncore_pcu = {
2112         .name                   = "pcu",
2113         .num_counters           = 4,
2114         .num_boxes              = 1,
2115         .perf_ctr_bits          = 48,
2116         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2117         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2118         .event_mask             = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2119         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2120         .ops                    = &snbep_uncore_msr_ops,
2121         .format_group           = &knl_uncore_pcu_format_group,
2122 };
2123
2124 static struct intel_uncore_type *knl_msr_uncores[] = {
2125         &knl_uncore_ubox,
2126         &knl_uncore_cha,
2127         &knl_uncore_pcu,
2128         NULL,
2129 };
2130
2131 void knl_uncore_cpu_init(void)
2132 {
2133         uncore_msr_uncores = knl_msr_uncores;
2134 }
2135
2136 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2137 {
2138         struct pci_dev *pdev = box->pci_dev;
2139         int box_ctl = uncore_pci_box_ctl(box);
2140
2141         pci_write_config_dword(pdev, box_ctl, 0);
2142 }
2143
2144 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2145                                         struct perf_event *event)
2146 {
2147         struct pci_dev *pdev = box->pci_dev;
2148         struct hw_perf_event *hwc = &event->hw;
2149
2150         if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2151                                                         == UNCORE_FIXED_EVENT)
2152                 pci_write_config_dword(pdev, hwc->config_base,
2153                                        hwc->config | KNL_PMON_FIXED_CTL_EN);
2154         else
2155                 pci_write_config_dword(pdev, hwc->config_base,
2156                                        hwc->config | SNBEP_PMON_CTL_EN);
2157 }
2158
2159 static struct intel_uncore_ops knl_uncore_imc_ops = {
2160         .init_box       = snbep_uncore_pci_init_box,
2161         .disable_box    = snbep_uncore_pci_disable_box,
2162         .enable_box     = knl_uncore_imc_enable_box,
2163         .read_counter   = snbep_uncore_pci_read_counter,
2164         .enable_event   = knl_uncore_imc_enable_event,
2165         .disable_event  = snbep_uncore_pci_disable_event,
2166 };
2167
2168 static struct intel_uncore_type knl_uncore_imc_uclk = {
2169         .name                   = "imc_uclk",
2170         .num_counters           = 4,
2171         .num_boxes              = 2,
2172         .perf_ctr_bits          = 48,
2173         .fixed_ctr_bits         = 48,
2174         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2175         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2176         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2177         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2178         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2179         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2180         .ops                    = &knl_uncore_imc_ops,
2181         .format_group           = &snbep_uncore_format_group,
2182 };
2183
2184 static struct intel_uncore_type knl_uncore_imc_dclk = {
2185         .name                   = "imc",
2186         .num_counters           = 4,
2187         .num_boxes              = 6,
2188         .perf_ctr_bits          = 48,
2189         .fixed_ctr_bits         = 48,
2190         .perf_ctr               = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2191         .event_ctl              = KNL_MC0_CH0_MSR_PMON_CTL0,
2192         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2193         .fixed_ctr              = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2194         .fixed_ctl              = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2195         .box_ctl                = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2196         .ops                    = &knl_uncore_imc_ops,
2197         .format_group           = &snbep_uncore_format_group,
2198 };
2199
2200 static struct intel_uncore_type knl_uncore_edc_uclk = {
2201         .name                   = "edc_uclk",
2202         .num_counters           = 4,
2203         .num_boxes              = 8,
2204         .perf_ctr_bits          = 48,
2205         .fixed_ctr_bits         = 48,
2206         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2207         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2208         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2209         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2210         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2211         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2212         .ops                    = &knl_uncore_imc_ops,
2213         .format_group           = &snbep_uncore_format_group,
2214 };
2215
2216 static struct intel_uncore_type knl_uncore_edc_eclk = {
2217         .name                   = "edc_eclk",
2218         .num_counters           = 4,
2219         .num_boxes              = 8,
2220         .perf_ctr_bits          = 48,
2221         .fixed_ctr_bits         = 48,
2222         .perf_ctr               = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2223         .event_ctl              = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2224         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2225         .fixed_ctr              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2226         .fixed_ctl              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2227         .box_ctl                = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2228         .ops                    = &knl_uncore_imc_ops,
2229         .format_group           = &snbep_uncore_format_group,
2230 };
2231
2232 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2233         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2234         EVENT_CONSTRAINT_END
2235 };
2236
2237 static struct intel_uncore_type knl_uncore_m2pcie = {
2238         .name           = "m2pcie",
2239         .num_counters   = 4,
2240         .num_boxes      = 1,
2241         .perf_ctr_bits  = 48,
2242         .constraints    = knl_uncore_m2pcie_constraints,
2243         SNBEP_UNCORE_PCI_COMMON_INIT(),
2244 };
2245
2246 static struct attribute *knl_uncore_irp_formats_attr[] = {
2247         &format_attr_event.attr,
2248         &format_attr_umask.attr,
2249         &format_attr_qor.attr,
2250         &format_attr_edge.attr,
2251         &format_attr_inv.attr,
2252         &format_attr_thresh8.attr,
2253         NULL,
2254 };
2255
2256 static const struct attribute_group knl_uncore_irp_format_group = {
2257         .name = "format",
2258         .attrs = knl_uncore_irp_formats_attr,
2259 };
2260
2261 static struct intel_uncore_type knl_uncore_irp = {
2262         .name                   = "irp",
2263         .num_counters           = 2,
2264         .num_boxes              = 1,
2265         .perf_ctr_bits          = 48,
2266         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2267         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2268         .event_mask             = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2269         .box_ctl                = KNL_IRP_PCI_PMON_BOX_CTL,
2270         .ops                    = &snbep_uncore_pci_ops,
2271         .format_group           = &knl_uncore_irp_format_group,
2272 };
2273
2274 enum {
2275         KNL_PCI_UNCORE_MC_UCLK,
2276         KNL_PCI_UNCORE_MC_DCLK,
2277         KNL_PCI_UNCORE_EDC_UCLK,
2278         KNL_PCI_UNCORE_EDC_ECLK,
2279         KNL_PCI_UNCORE_M2PCIE,
2280         KNL_PCI_UNCORE_IRP,
2281 };
2282
2283 static struct intel_uncore_type *knl_pci_uncores[] = {
2284         [KNL_PCI_UNCORE_MC_UCLK]        = &knl_uncore_imc_uclk,
2285         [KNL_PCI_UNCORE_MC_DCLK]        = &knl_uncore_imc_dclk,
2286         [KNL_PCI_UNCORE_EDC_UCLK]       = &knl_uncore_edc_uclk,
2287         [KNL_PCI_UNCORE_EDC_ECLK]       = &knl_uncore_edc_eclk,
2288         [KNL_PCI_UNCORE_M2PCIE]         = &knl_uncore_m2pcie,
2289         [KNL_PCI_UNCORE_IRP]            = &knl_uncore_irp,
2290         NULL,
2291 };
2292
2293 /*
2294  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2295  * device type. prior to KNL, each instance of a PMU device type had a unique
2296  * device ID.
2297  *
2298  *      PCI Device ID   Uncore PMU Devices
2299  *      ----------------------------------
2300  *      0x7841          MC0 UClk, MC1 UClk
2301  *      0x7843          MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2302  *                      MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2303  *      0x7833          EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2304  *                      EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2305  *      0x7835          EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2306  *                      EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2307  *      0x7817          M2PCIe
2308  *      0x7814          IRP
2309 */
2310
2311 static const struct pci_device_id knl_uncore_pci_ids[] = {
2312         { /* MC0 UClk */
2313                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2314                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2315         },
2316         { /* MC1 UClk */
2317                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2318                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2319         },
2320         { /* MC0 DClk CH 0 */
2321                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2322                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2323         },
2324         { /* MC0 DClk CH 1 */
2325                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2326                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2327         },
2328         { /* MC0 DClk CH 2 */
2329                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2330                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2331         },
2332         { /* MC1 DClk CH 0 */
2333                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2334                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2335         },
2336         { /* MC1 DClk CH 1 */
2337                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2338                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2339         },
2340         { /* MC1 DClk CH 2 */
2341                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2342                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2343         },
2344         { /* EDC0 UClk */
2345                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2346                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2347         },
2348         { /* EDC1 UClk */
2349                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2350                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2351         },
2352         { /* EDC2 UClk */
2353                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2354                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2355         },
2356         { /* EDC3 UClk */
2357                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2358                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2359         },
2360         { /* EDC4 UClk */
2361                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2362                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2363         },
2364         { /* EDC5 UClk */
2365                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2366                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2367         },
2368         { /* EDC6 UClk */
2369                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2370                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2371         },
2372         { /* EDC7 UClk */
2373                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2374                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2375         },
2376         { /* EDC0 EClk */
2377                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2378                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2379         },
2380         { /* EDC1 EClk */
2381                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2382                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2383         },
2384         { /* EDC2 EClk */
2385                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2386                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2387         },
2388         { /* EDC3 EClk */
2389                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2390                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2391         },
2392         { /* EDC4 EClk */
2393                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2394                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2395         },
2396         { /* EDC5 EClk */
2397                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2398                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2399         },
2400         { /* EDC6 EClk */
2401                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2402                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2403         },
2404         { /* EDC7 EClk */
2405                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2406                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2407         },
2408         { /* M2PCIe */
2409                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2410                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2411         },
2412         { /* IRP */
2413                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2414                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2415         },
2416         { /* end: all zeroes */ }
2417 };
2418
2419 static struct pci_driver knl_uncore_pci_driver = {
2420         .name           = "knl_uncore",
2421         .id_table       = knl_uncore_pci_ids,
2422 };
2423
2424 int knl_uncore_pci_init(void)
2425 {
2426         int ret;
2427
2428         /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2429         ret = snb_pci2phy_map_init(0x7814); /* IRP */
2430         if (ret)
2431                 return ret;
2432         ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2433         if (ret)
2434                 return ret;
2435         uncore_pci_uncores = knl_pci_uncores;
2436         uncore_pci_driver = &knl_uncore_pci_driver;
2437         return 0;
2438 }
2439
2440 /* end of KNL uncore support */
2441
2442 /* Haswell-EP uncore support */
2443 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2444         &format_attr_event.attr,
2445         &format_attr_umask.attr,
2446         &format_attr_edge.attr,
2447         &format_attr_inv.attr,
2448         &format_attr_thresh5.attr,
2449         &format_attr_filter_tid2.attr,
2450         &format_attr_filter_cid.attr,
2451         NULL,
2452 };
2453
2454 static const struct attribute_group hswep_uncore_ubox_format_group = {
2455         .name = "format",
2456         .attrs = hswep_uncore_ubox_formats_attr,
2457 };
2458
2459 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2460 {
2461         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2462         reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2463         reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2464         reg1->idx = 0;
2465         return 0;
2466 }
2467
2468 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2469         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2470         .hw_config              = hswep_ubox_hw_config,
2471         .get_constraint         = uncore_get_constraint,
2472         .put_constraint         = uncore_put_constraint,
2473 };
2474
2475 static struct intel_uncore_type hswep_uncore_ubox = {
2476         .name                   = "ubox",
2477         .num_counters           = 2,
2478         .num_boxes              = 1,
2479         .perf_ctr_bits          = 44,
2480         .fixed_ctr_bits         = 48,
2481         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2482         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2483         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2484         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2485         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2486         .num_shared_regs        = 1,
2487         .ops                    = &hswep_uncore_ubox_ops,
2488         .format_group           = &hswep_uncore_ubox_format_group,
2489 };
2490
2491 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2492         &format_attr_event.attr,
2493         &format_attr_umask.attr,
2494         &format_attr_edge.attr,
2495         &format_attr_tid_en.attr,
2496         &format_attr_thresh8.attr,
2497         &format_attr_filter_tid3.attr,
2498         &format_attr_filter_link2.attr,
2499         &format_attr_filter_state3.attr,
2500         &format_attr_filter_nid2.attr,
2501         &format_attr_filter_opc2.attr,
2502         &format_attr_filter_nc.attr,
2503         &format_attr_filter_c6.attr,
2504         &format_attr_filter_isoc.attr,
2505         NULL,
2506 };
2507
2508 static const struct attribute_group hswep_uncore_cbox_format_group = {
2509         .name = "format",
2510         .attrs = hswep_uncore_cbox_formats_attr,
2511 };
2512
2513 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2514         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2515         UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2516         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2517         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2518         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2519         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2520         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2521         EVENT_CONSTRAINT_END
2522 };
2523
2524 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2525         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2526                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2527         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2528         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2529         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2530         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2531         SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2532         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2533         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2534         SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2535         SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2536         SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2537         SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2538         SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2539         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2540         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2541         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2542         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2543         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2544         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2545         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2546         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2547         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2548         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2549         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2550         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2551         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2552         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2553         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2554         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2555         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2556         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2557         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2558         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2559         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2560         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2561         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2562         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2563         EVENT_EXTRA_END
2564 };
2565
2566 static u64 hswep_cbox_filter_mask(int fields)
2567 {
2568         u64 mask = 0;
2569         if (fields & 0x1)
2570                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2571         if (fields & 0x2)
2572                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2573         if (fields & 0x4)
2574                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2575         if (fields & 0x8)
2576                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2577         if (fields & 0x10) {
2578                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2579                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2580                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2581                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2582         }
2583         return mask;
2584 }
2585
2586 static struct event_constraint *
2587 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2588 {
2589         return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2590 }
2591
2592 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2593 {
2594         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2595         struct extra_reg *er;
2596         int idx = 0;
2597
2598         for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2599                 if (er->event != (event->hw.config & er->config_mask))
2600                         continue;
2601                 idx |= er->idx;
2602         }
2603
2604         if (idx) {
2605                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2606                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2607                 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2608                 reg1->idx = idx;
2609         }
2610         return 0;
2611 }
2612
2613 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2614                                   struct perf_event *event)
2615 {
2616         struct hw_perf_event *hwc = &event->hw;
2617         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2618
2619         if (reg1->idx != EXTRA_REG_NONE) {
2620                 u64 filter = uncore_shared_reg_config(box, 0);
2621                 wrmsrl(reg1->reg, filter & 0xffffffff);
2622                 wrmsrl(reg1->reg + 1, filter >> 32);
2623         }
2624
2625         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2626 }
2627
2628 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2629         .init_box               = snbep_uncore_msr_init_box,
2630         .disable_box            = snbep_uncore_msr_disable_box,
2631         .enable_box             = snbep_uncore_msr_enable_box,
2632         .disable_event          = snbep_uncore_msr_disable_event,
2633         .enable_event           = hswep_cbox_enable_event,
2634         .read_counter           = uncore_msr_read_counter,
2635         .hw_config              = hswep_cbox_hw_config,
2636         .get_constraint         = hswep_cbox_get_constraint,
2637         .put_constraint         = snbep_cbox_put_constraint,
2638 };
2639
2640 static struct intel_uncore_type hswep_uncore_cbox = {
2641         .name                   = "cbox",
2642         .num_counters           = 4,
2643         .num_boxes              = 18,
2644         .perf_ctr_bits          = 48,
2645         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2646         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2647         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2648         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2649         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
2650         .num_shared_regs        = 1,
2651         .constraints            = hswep_uncore_cbox_constraints,
2652         .ops                    = &hswep_uncore_cbox_ops,
2653         .format_group           = &hswep_uncore_cbox_format_group,
2654 };
2655
2656 /*
2657  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2658  */
2659 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2660 {
2661         unsigned msr = uncore_msr_box_ctl(box);
2662
2663         if (msr) {
2664                 u64 init = SNBEP_PMON_BOX_CTL_INT;
2665                 u64 flags = 0;
2666                 int i;
2667
2668                 for_each_set_bit(i, (unsigned long *)&init, 64) {
2669                         flags |= (1ULL << i);
2670                         wrmsrl(msr, flags);
2671                 }
2672         }
2673 }
2674
2675 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2676         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2677         .init_box               = hswep_uncore_sbox_msr_init_box
2678 };
2679
2680 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2681         &format_attr_event.attr,
2682         &format_attr_umask.attr,
2683         &format_attr_edge.attr,
2684         &format_attr_tid_en.attr,
2685         &format_attr_inv.attr,
2686         &format_attr_thresh8.attr,
2687         NULL,
2688 };
2689
2690 static const struct attribute_group hswep_uncore_sbox_format_group = {
2691         .name = "format",
2692         .attrs = hswep_uncore_sbox_formats_attr,
2693 };
2694
2695 static struct intel_uncore_type hswep_uncore_sbox = {
2696         .name                   = "sbox",
2697         .num_counters           = 4,
2698         .num_boxes              = 4,
2699         .perf_ctr_bits          = 44,
2700         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
2701         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
2702         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2703         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
2704         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
2705         .ops                    = &hswep_uncore_sbox_msr_ops,
2706         .format_group           = &hswep_uncore_sbox_format_group,
2707 };
2708
2709 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2710 {
2711         struct hw_perf_event *hwc = &event->hw;
2712         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2713         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2714
2715         if (ev_sel >= 0xb && ev_sel <= 0xe) {
2716                 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2717                 reg1->idx = ev_sel - 0xb;
2718                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2719         }
2720         return 0;
2721 }
2722
2723 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2724         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2725         .hw_config              = hswep_pcu_hw_config,
2726         .get_constraint         = snbep_pcu_get_constraint,
2727         .put_constraint         = snbep_pcu_put_constraint,
2728 };
2729
2730 static struct intel_uncore_type hswep_uncore_pcu = {
2731         .name                   = "pcu",
2732         .num_counters           = 4,
2733         .num_boxes              = 1,
2734         .perf_ctr_bits          = 48,
2735         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2736         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2737         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2738         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2739         .num_shared_regs        = 1,
2740         .ops                    = &hswep_uncore_pcu_ops,
2741         .format_group           = &snbep_uncore_pcu_format_group,
2742 };
2743
2744 static struct intel_uncore_type *hswep_msr_uncores[] = {
2745         &hswep_uncore_ubox,
2746         &hswep_uncore_cbox,
2747         &hswep_uncore_sbox,
2748         &hswep_uncore_pcu,
2749         NULL,
2750 };
2751
2752 #define HSWEP_PCU_DID                   0x2fc0
2753 #define HSWEP_PCU_CAPID4_OFFET          0x94
2754 #define hswep_get_chop(_cap)            (((_cap) >> 6) & 0x3)
2755
2756 static bool hswep_has_limit_sbox(unsigned int device)
2757 {
2758         struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2759         u32 capid4;
2760
2761         if (!dev)
2762                 return false;
2763
2764         pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2765         pci_dev_put(dev);
2766         if (!hswep_get_chop(capid4))
2767                 return true;
2768
2769         return false;
2770 }
2771
2772 void hswep_uncore_cpu_init(void)
2773 {
2774         if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2775                 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2776
2777         /* Detect 6-8 core systems with only two SBOXes */
2778         if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2779                 hswep_uncore_sbox.num_boxes = 2;
2780
2781         uncore_msr_uncores = hswep_msr_uncores;
2782 }
2783
2784 static struct intel_uncore_type hswep_uncore_ha = {
2785         .name           = "ha",
2786         .num_counters   = 4,
2787         .num_boxes      = 2,
2788         .perf_ctr_bits  = 48,
2789         SNBEP_UNCORE_PCI_COMMON_INIT(),
2790 };
2791
2792 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2793         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2794         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2795         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2796         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2797         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2798         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2799         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2800         { /* end: all zeroes */ },
2801 };
2802
2803 static struct intel_uncore_type hswep_uncore_imc = {
2804         .name           = "imc",
2805         .num_counters   = 4,
2806         .num_boxes      = 8,
2807         .perf_ctr_bits  = 48,
2808         .fixed_ctr_bits = 48,
2809         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2810         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2811         .event_descs    = hswep_uncore_imc_events,
2812         SNBEP_UNCORE_PCI_COMMON_INIT(),
2813 };
2814
2815 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2816
2817 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2818 {
2819         struct pci_dev *pdev = box->pci_dev;
2820         struct hw_perf_event *hwc = &event->hw;
2821         u64 count = 0;
2822
2823         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2824         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2825
2826         return count;
2827 }
2828
2829 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2830         .init_box       = snbep_uncore_pci_init_box,
2831         .disable_box    = snbep_uncore_pci_disable_box,
2832         .enable_box     = snbep_uncore_pci_enable_box,
2833         .disable_event  = ivbep_uncore_irp_disable_event,
2834         .enable_event   = ivbep_uncore_irp_enable_event,
2835         .read_counter   = hswep_uncore_irp_read_counter,
2836 };
2837
2838 static struct intel_uncore_type hswep_uncore_irp = {
2839         .name                   = "irp",
2840         .num_counters           = 4,
2841         .num_boxes              = 1,
2842         .perf_ctr_bits          = 48,
2843         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2844         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2845         .ops                    = &hswep_uncore_irp_ops,
2846         .format_group           = &snbep_uncore_format_group,
2847 };
2848
2849 static struct intel_uncore_type hswep_uncore_qpi = {
2850         .name                   = "qpi",
2851         .num_counters           = 4,
2852         .num_boxes              = 3,
2853         .perf_ctr_bits          = 48,
2854         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2855         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2856         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2857         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2858         .num_shared_regs        = 1,
2859         .ops                    = &snbep_uncore_qpi_ops,
2860         .format_group           = &snbep_uncore_qpi_format_group,
2861 };
2862
2863 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2864         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2865         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2866         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2867         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2868         UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2869         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2870         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2871         UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2872         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2873         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2874         UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2875         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2876         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2877         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2878         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2879         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2880         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2881         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2882         EVENT_CONSTRAINT_END
2883 };
2884
2885 static struct intel_uncore_type hswep_uncore_r2pcie = {
2886         .name           = "r2pcie",
2887         .num_counters   = 4,
2888         .num_boxes      = 1,
2889         .perf_ctr_bits  = 48,
2890         .constraints    = hswep_uncore_r2pcie_constraints,
2891         SNBEP_UNCORE_PCI_COMMON_INIT(),
2892 };
2893
2894 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2895         UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2896         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2897         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2898         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2899         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2900         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2901         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2902         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2903         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2904         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2905         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2906         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2907         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2908         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2909         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2910         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2911         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2912         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2913         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2914         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2915         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2916         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2917         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2918         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2919         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2920         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2921         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2922         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2923         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2924         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2925         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2926         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2927         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2928         EVENT_CONSTRAINT_END
2929 };
2930
2931 static struct intel_uncore_type hswep_uncore_r3qpi = {
2932         .name           = "r3qpi",
2933         .num_counters   = 3,
2934         .num_boxes      = 3,
2935         .perf_ctr_bits  = 44,
2936         .constraints    = hswep_uncore_r3qpi_constraints,
2937         SNBEP_UNCORE_PCI_COMMON_INIT(),
2938 };
2939
2940 enum {
2941         HSWEP_PCI_UNCORE_HA,
2942         HSWEP_PCI_UNCORE_IMC,
2943         HSWEP_PCI_UNCORE_IRP,
2944         HSWEP_PCI_UNCORE_QPI,
2945         HSWEP_PCI_UNCORE_R2PCIE,
2946         HSWEP_PCI_UNCORE_R3QPI,
2947 };
2948
2949 static struct intel_uncore_type *hswep_pci_uncores[] = {
2950         [HSWEP_PCI_UNCORE_HA]   = &hswep_uncore_ha,
2951         [HSWEP_PCI_UNCORE_IMC]  = &hswep_uncore_imc,
2952         [HSWEP_PCI_UNCORE_IRP]  = &hswep_uncore_irp,
2953         [HSWEP_PCI_UNCORE_QPI]  = &hswep_uncore_qpi,
2954         [HSWEP_PCI_UNCORE_R2PCIE]       = &hswep_uncore_r2pcie,
2955         [HSWEP_PCI_UNCORE_R3QPI]        = &hswep_uncore_r3qpi,
2956         NULL,
2957 };
2958
2959 static const struct pci_device_id hswep_uncore_pci_ids[] = {
2960         { /* Home Agent 0 */
2961                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2962                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2963         },
2964         { /* Home Agent 1 */
2965                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2966                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2967         },
2968         { /* MC0 Channel 0 */
2969                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2970                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2971         },
2972         { /* MC0 Channel 1 */
2973                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2974                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2975         },
2976         { /* MC0 Channel 2 */
2977                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2978                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2979         },
2980         { /* MC0 Channel 3 */
2981                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2982                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2983         },
2984         { /* MC1 Channel 0 */
2985                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2986                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2987         },
2988         { /* MC1 Channel 1 */
2989                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2990                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2991         },
2992         { /* MC1 Channel 2 */
2993                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2994                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2995         },
2996         { /* MC1 Channel 3 */
2997                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2998                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2999         },
3000         { /* IRP */
3001                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3002                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3003         },
3004         { /* QPI0 Port 0 */
3005                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3006                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3007         },
3008         { /* QPI0 Port 1 */
3009                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3010                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3011         },
3012         { /* QPI1 Port 2 */
3013                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3014                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3015         },
3016         { /* R2PCIe */
3017                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3018                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3019         },
3020         { /* R3QPI0 Link 0 */
3021                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3022                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3023         },
3024         { /* R3QPI0 Link 1 */
3025                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3026                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3027         },
3028         { /* R3QPI1 Link 2 */
3029                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3030                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3031         },
3032         { /* QPI Port 0 filter  */
3033                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3034                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3035                                                    SNBEP_PCI_QPI_PORT0_FILTER),
3036         },
3037         { /* QPI Port 1 filter  */
3038                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3039                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3040                                                    SNBEP_PCI_QPI_PORT1_FILTER),
3041         },
3042         { /* end: all zeroes */ }
3043 };
3044
3045 static struct pci_driver hswep_uncore_pci_driver = {
3046         .name           = "hswep_uncore",
3047         .id_table       = hswep_uncore_pci_ids,
3048 };
3049
3050 int hswep_uncore_pci_init(void)
3051 {
3052         int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3053         if (ret)
3054                 return ret;
3055         uncore_pci_uncores = hswep_pci_uncores;
3056         uncore_pci_driver = &hswep_uncore_pci_driver;
3057         return 0;
3058 }
3059 /* end of Haswell-EP uncore support */
3060
3061 /* BDX uncore support */
3062
3063 static struct intel_uncore_type bdx_uncore_ubox = {
3064         .name                   = "ubox",
3065         .num_counters           = 2,
3066         .num_boxes              = 1,
3067         .perf_ctr_bits          = 48,
3068         .fixed_ctr_bits         = 48,
3069         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3070         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3071         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3072         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3073         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3074         .num_shared_regs        = 1,
3075         .ops                    = &ivbep_uncore_msr_ops,
3076         .format_group           = &ivbep_uncore_ubox_format_group,
3077 };
3078
3079 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3080         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3081         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3082         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3083         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3084         EVENT_CONSTRAINT_END
3085 };
3086
3087 static struct intel_uncore_type bdx_uncore_cbox = {
3088         .name                   = "cbox",
3089         .num_counters           = 4,
3090         .num_boxes              = 24,
3091         .perf_ctr_bits          = 48,
3092         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3093         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3094         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3095         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3096         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3097         .num_shared_regs        = 1,
3098         .constraints            = bdx_uncore_cbox_constraints,
3099         .ops                    = &hswep_uncore_cbox_ops,
3100         .format_group           = &hswep_uncore_cbox_format_group,
3101 };
3102
3103 static struct intel_uncore_type bdx_uncore_sbox = {
3104         .name                   = "sbox",
3105         .num_counters           = 4,
3106         .num_boxes              = 4,
3107         .perf_ctr_bits          = 48,
3108         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
3109         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
3110         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3111         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
3112         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
3113         .ops                    = &hswep_uncore_sbox_msr_ops,
3114         .format_group           = &hswep_uncore_sbox_format_group,
3115 };
3116
3117 #define BDX_MSR_UNCORE_SBOX     3
3118
3119 static struct intel_uncore_type *bdx_msr_uncores[] = {
3120         &bdx_uncore_ubox,
3121         &bdx_uncore_cbox,
3122         &hswep_uncore_pcu,
3123         &bdx_uncore_sbox,
3124         NULL,
3125 };
3126
3127 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3128 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3129         EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3130         EVENT_CONSTRAINT_END
3131 };
3132
3133 #define BDX_PCU_DID                     0x6fc0
3134
3135 void bdx_uncore_cpu_init(void)
3136 {
3137         if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3138                 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3139         uncore_msr_uncores = bdx_msr_uncores;
3140
3141         /* Detect systems with no SBOXes */
3142         if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
3143                 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3144
3145         hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3146 }
3147
3148 static struct intel_uncore_type bdx_uncore_ha = {
3149         .name           = "ha",
3150         .num_counters   = 4,
3151         .num_boxes      = 2,
3152         .perf_ctr_bits  = 48,
3153         SNBEP_UNCORE_PCI_COMMON_INIT(),
3154 };
3155
3156 static struct intel_uncore_type bdx_uncore_imc = {
3157         .name           = "imc",
3158         .num_counters   = 4,
3159         .num_boxes      = 8,
3160         .perf_ctr_bits  = 48,
3161         .fixed_ctr_bits = 48,
3162         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3163         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3164         .event_descs    = hswep_uncore_imc_events,
3165         SNBEP_UNCORE_PCI_COMMON_INIT(),
3166 };
3167
3168 static struct intel_uncore_type bdx_uncore_irp = {
3169         .name                   = "irp",
3170         .num_counters           = 4,
3171         .num_boxes              = 1,
3172         .perf_ctr_bits          = 48,
3173         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3174         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3175         .ops                    = &hswep_uncore_irp_ops,
3176         .format_group           = &snbep_uncore_format_group,
3177 };
3178
3179 static struct intel_uncore_type bdx_uncore_qpi = {
3180         .name                   = "qpi",
3181         .num_counters           = 4,
3182         .num_boxes              = 3,
3183         .perf_ctr_bits          = 48,
3184         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
3185         .event_ctl              = SNBEP_PCI_PMON_CTL0,
3186         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3187         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3188         .num_shared_regs        = 1,
3189         .ops                    = &snbep_uncore_qpi_ops,
3190         .format_group           = &snbep_uncore_qpi_format_group,
3191 };
3192
3193 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3194         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3195         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3196         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3197         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3198         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3199         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3200         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3201         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3202         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3203         EVENT_CONSTRAINT_END
3204 };
3205
3206 static struct intel_uncore_type bdx_uncore_r2pcie = {
3207         .name           = "r2pcie",
3208         .num_counters   = 4,
3209         .num_boxes      = 1,
3210         .perf_ctr_bits  = 48,
3211         .constraints    = bdx_uncore_r2pcie_constraints,
3212         SNBEP_UNCORE_PCI_COMMON_INIT(),
3213 };
3214
3215 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3216         UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3217         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3218         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3219         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3220         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3221         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3222         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3223         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3224         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3225         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3226         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3227         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3228         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3229         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3230         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3231         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3232         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3233         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3234         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3235         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3236         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3237         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3238         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3239         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3240         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3241         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3242         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3243         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3244         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3245         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3246         EVENT_CONSTRAINT_END
3247 };
3248
3249 static struct intel_uncore_type bdx_uncore_r3qpi = {
3250         .name           = "r3qpi",
3251         .num_counters   = 3,
3252         .num_boxes      = 3,
3253         .perf_ctr_bits  = 48,
3254         .constraints    = bdx_uncore_r3qpi_constraints,
3255         SNBEP_UNCORE_PCI_COMMON_INIT(),
3256 };
3257
3258 enum {
3259         BDX_PCI_UNCORE_HA,
3260         BDX_PCI_UNCORE_IMC,
3261         BDX_PCI_UNCORE_IRP,
3262         BDX_PCI_UNCORE_QPI,
3263         BDX_PCI_UNCORE_R2PCIE,
3264         BDX_PCI_UNCORE_R3QPI,
3265 };
3266
3267 static struct intel_uncore_type *bdx_pci_uncores[] = {
3268         [BDX_PCI_UNCORE_HA]     = &bdx_uncore_ha,
3269         [BDX_PCI_UNCORE_IMC]    = &bdx_uncore_imc,
3270         [BDX_PCI_UNCORE_IRP]    = &bdx_uncore_irp,
3271         [BDX_PCI_UNCORE_QPI]    = &bdx_uncore_qpi,
3272         [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3273         [BDX_PCI_UNCORE_R3QPI]  = &bdx_uncore_r3qpi,
3274         NULL,
3275 };
3276
3277 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3278         { /* Home Agent 0 */
3279                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3280                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3281         },
3282         { /* Home Agent 1 */
3283                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3284                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3285         },
3286         { /* MC0 Channel 0 */
3287                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3288                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3289         },
3290         { /* MC0 Channel 1 */
3291                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3292                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3293         },
3294         { /* MC0 Channel 2 */
3295                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3296                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3297         },
3298         { /* MC0 Channel 3 */
3299                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3300                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3301         },
3302         { /* MC1 Channel 0 */
3303                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3304                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3305         },
3306         { /* MC1 Channel 1 */
3307                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3308                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3309         },
3310         { /* MC1 Channel 2 */
3311                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3312                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3313         },
3314         { /* MC1 Channel 3 */
3315                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3316                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3317         },
3318         { /* IRP */
3319                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3320                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3321         },
3322         { /* QPI0 Port 0 */
3323                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3324                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3325         },
3326         { /* QPI0 Port 1 */
3327                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3328                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3329         },
3330         { /* QPI1 Port 2 */
3331                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3332                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3333         },
3334         { /* R2PCIe */
3335                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3336                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3337         },
3338         { /* R3QPI0 Link 0 */
3339                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3340                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3341         },
3342         { /* R3QPI0 Link 1 */
3343                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3344                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3345         },
3346         { /* R3QPI1 Link 2 */
3347                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3348                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3349         },
3350         { /* QPI Port 0 filter  */
3351                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3352                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3353                                                    SNBEP_PCI_QPI_PORT0_FILTER),
3354         },
3355         { /* QPI Port 1 filter  */
3356                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3357                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3358                                                    SNBEP_PCI_QPI_PORT1_FILTER),
3359         },
3360         { /* QPI Port 2 filter  */
3361                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3362                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3363                                                    BDX_PCI_QPI_PORT2_FILTER),
3364         },
3365         { /* end: all zeroes */ }
3366 };
3367
3368 static struct pci_driver bdx_uncore_pci_driver = {
3369         .name           = "bdx_uncore",
3370         .id_table       = bdx_uncore_pci_ids,
3371 };
3372
3373 int bdx_uncore_pci_init(void)
3374 {
3375         int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3376
3377         if (ret)
3378                 return ret;
3379         uncore_pci_uncores = bdx_pci_uncores;
3380         uncore_pci_driver = &bdx_uncore_pci_driver;
3381         return 0;
3382 }
3383
3384 /* end of BDX uncore support */
3385
3386 /* SKX uncore support */
3387
3388 static struct intel_uncore_type skx_uncore_ubox = {
3389         .name                   = "ubox",
3390         .num_counters           = 2,
3391         .num_boxes              = 1,
3392         .perf_ctr_bits          = 48,
3393         .fixed_ctr_bits         = 48,
3394         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3395         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3396         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3397         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3398         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3399         .ops                    = &ivbep_uncore_msr_ops,
3400         .format_group           = &ivbep_uncore_ubox_format_group,
3401 };
3402
3403 static struct attribute *skx_uncore_cha_formats_attr[] = {
3404         &format_attr_event.attr,
3405         &format_attr_umask.attr,
3406         &format_attr_edge.attr,
3407         &format_attr_tid_en.attr,
3408         &format_attr_inv.attr,
3409         &format_attr_thresh8.attr,
3410         &format_attr_filter_tid4.attr,
3411         &format_attr_filter_state5.attr,
3412         &format_attr_filter_rem.attr,
3413         &format_attr_filter_loc.attr,
3414         &format_attr_filter_nm.attr,
3415         &format_attr_filter_all_op.attr,
3416         &format_attr_filter_not_nm.attr,
3417         &format_attr_filter_opc_0.attr,
3418         &format_attr_filter_opc_1.attr,
3419         &format_attr_filter_nc.attr,
3420         &format_attr_filter_isoc.attr,
3421         NULL,
3422 };
3423
3424 static const struct attribute_group skx_uncore_chabox_format_group = {
3425         .name = "format",
3426         .attrs = skx_uncore_cha_formats_attr,
3427 };
3428
3429 static struct event_constraint skx_uncore_chabox_constraints[] = {
3430         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3431         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3432         EVENT_CONSTRAINT_END
3433 };
3434
3435 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3436         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3437         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3438         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3439         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3440         SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3441         SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3442         SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3443         SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3444         SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3445         EVENT_EXTRA_END
3446 };
3447
3448 static u64 skx_cha_filter_mask(int fields)
3449 {
3450         u64 mask = 0;
3451
3452         if (fields & 0x1)
3453                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3454         if (fields & 0x2)
3455                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3456         if (fields & 0x4)
3457                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3458         if (fields & 0x8) {
3459                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3460                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3461                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3462                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3463                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3464                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3465                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3466                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3467                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3468         }
3469         return mask;
3470 }
3471
3472 static struct event_constraint *
3473 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3474 {
3475         return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3476 }
3477
3478 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3479 {
3480         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3481         struct extra_reg *er;
3482         int idx = 0;
3483         /* Any of the CHA events may be filtered by Thread/Core-ID.*/
3484         if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN)
3485                 idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3486
3487         for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3488                 if (er->event != (event->hw.config & er->config_mask))
3489                         continue;
3490                 idx |= er->idx;
3491         }
3492
3493         if (idx) {
3494                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3495                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3496                 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3497                 reg1->idx = idx;
3498         }
3499         return 0;
3500 }
3501
3502 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3503         /* There is no frz_en for chabox ctl */
3504         .init_box               = ivbep_uncore_msr_init_box,
3505         .disable_box            = snbep_uncore_msr_disable_box,
3506         .enable_box             = snbep_uncore_msr_enable_box,
3507         .disable_event          = snbep_uncore_msr_disable_event,
3508         .enable_event           = hswep_cbox_enable_event,
3509         .read_counter           = uncore_msr_read_counter,
3510         .hw_config              = skx_cha_hw_config,
3511         .get_constraint         = skx_cha_get_constraint,
3512         .put_constraint         = snbep_cbox_put_constraint,
3513 };
3514
3515 static struct intel_uncore_type skx_uncore_chabox = {
3516         .name                   = "cha",
3517         .num_counters           = 4,
3518         .perf_ctr_bits          = 48,
3519         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3520         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3521         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3522         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3523         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3524         .num_shared_regs        = 1,
3525         .constraints            = skx_uncore_chabox_constraints,
3526         .ops                    = &skx_uncore_chabox_ops,
3527         .format_group           = &skx_uncore_chabox_format_group,
3528 };
3529
3530 static struct attribute *skx_uncore_iio_formats_attr[] = {
3531         &format_attr_event.attr,
3532         &format_attr_umask.attr,
3533         &format_attr_edge.attr,
3534         &format_attr_inv.attr,
3535         &format_attr_thresh9.attr,
3536         &format_attr_ch_mask.attr,
3537         &format_attr_fc_mask.attr,
3538         NULL,
3539 };
3540
3541 static const struct attribute_group skx_uncore_iio_format_group = {
3542         .name = "format",
3543         .attrs = skx_uncore_iio_formats_attr,
3544 };
3545
3546 static struct event_constraint skx_uncore_iio_constraints[] = {
3547         UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3548         UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3549         UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3550         UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3551         UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3552         UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3553         UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
3554         EVENT_CONSTRAINT_END
3555 };
3556
3557 static void skx_iio_enable_event(struct intel_uncore_box *box,
3558                                  struct perf_event *event)
3559 {
3560         struct hw_perf_event *hwc = &event->hw;
3561
3562         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3563 }
3564
3565 static struct intel_uncore_ops skx_uncore_iio_ops = {
3566         .init_box               = ivbep_uncore_msr_init_box,
3567         .disable_box            = snbep_uncore_msr_disable_box,
3568         .enable_box             = snbep_uncore_msr_enable_box,
3569         .disable_event          = snbep_uncore_msr_disable_event,
3570         .enable_event           = skx_iio_enable_event,
3571         .read_counter           = uncore_msr_read_counter,
3572 };
3573
3574 static struct intel_uncore_type skx_uncore_iio = {
3575         .name                   = "iio",
3576         .num_counters           = 4,
3577         .num_boxes              = 6,
3578         .perf_ctr_bits          = 48,
3579         .event_ctl              = SKX_IIO0_MSR_PMON_CTL0,
3580         .perf_ctr               = SKX_IIO0_MSR_PMON_CTR0,
3581         .event_mask             = SKX_IIO_PMON_RAW_EVENT_MASK,
3582         .event_mask_ext         = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3583         .box_ctl                = SKX_IIO0_MSR_PMON_BOX_CTL,
3584         .msr_offset             = SKX_IIO_MSR_OFFSET,
3585         .constraints            = skx_uncore_iio_constraints,
3586         .ops                    = &skx_uncore_iio_ops,
3587         .format_group           = &skx_uncore_iio_format_group,
3588 };
3589
3590 enum perf_uncore_iio_freerunning_type_id {
3591         SKX_IIO_MSR_IOCLK                       = 0,
3592         SKX_IIO_MSR_BW                          = 1,
3593         SKX_IIO_MSR_UTIL                        = 2,
3594
3595         SKX_IIO_FREERUNNING_TYPE_MAX,
3596 };
3597
3598
3599 static struct freerunning_counters skx_iio_freerunning[] = {
3600         [SKX_IIO_MSR_IOCLK]     = { 0xa45, 0x1, 0x20, 1, 36 },
3601         [SKX_IIO_MSR_BW]        = { 0xb00, 0x1, 0x10, 8, 36 },
3602         [SKX_IIO_MSR_UTIL]      = { 0xb08, 0x1, 0x10, 8, 36 },
3603 };
3604
3605 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3606         /* Free-Running IO CLOCKS Counter */
3607         INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
3608         /* Free-Running IIO BANDWIDTH Counters */
3609         INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
3610         INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
3611         INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
3612         INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
3613         INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
3614         INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
3615         INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
3616         INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
3617         INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
3618         INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
3619         INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
3620         INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
3621         INTEL_UNCORE_EVENT_DESC(bw_out_port0,           "event=0xff,umask=0x24"),
3622         INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,     "3.814697266e-6"),
3623         INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,      "MiB"),
3624         INTEL_UNCORE_EVENT_DESC(bw_out_port1,           "event=0xff,umask=0x25"),
3625         INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,     "3.814697266e-6"),
3626         INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,      "MiB"),
3627         INTEL_UNCORE_EVENT_DESC(bw_out_port2,           "event=0xff,umask=0x26"),
3628         INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,     "3.814697266e-6"),
3629         INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,      "MiB"),
3630         INTEL_UNCORE_EVENT_DESC(bw_out_port3,           "event=0xff,umask=0x27"),
3631         INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,     "3.814697266e-6"),
3632         INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,      "MiB"),
3633         /* Free-running IIO UTILIZATION Counters */
3634         INTEL_UNCORE_EVENT_DESC(util_in_port0,          "event=0xff,umask=0x30"),
3635         INTEL_UNCORE_EVENT_DESC(util_out_port0,         "event=0xff,umask=0x31"),
3636         INTEL_UNCORE_EVENT_DESC(util_in_port1,          "event=0xff,umask=0x32"),
3637         INTEL_UNCORE_EVENT_DESC(util_out_port1,         "event=0xff,umask=0x33"),
3638         INTEL_UNCORE_EVENT_DESC(util_in_port2,          "event=0xff,umask=0x34"),
3639         INTEL_UNCORE_EVENT_DESC(util_out_port2,         "event=0xff,umask=0x35"),
3640         INTEL_UNCORE_EVENT_DESC(util_in_port3,          "event=0xff,umask=0x36"),
3641         INTEL_UNCORE_EVENT_DESC(util_out_port3,         "event=0xff,umask=0x37"),
3642         { /* end: all zeroes */ },
3643 };
3644
3645 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3646         .read_counter           = uncore_msr_read_counter,
3647         .hw_config              = uncore_freerunning_hw_config,
3648 };
3649
3650 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3651         &format_attr_event.attr,
3652         &format_attr_umask.attr,
3653         NULL,
3654 };
3655
3656 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3657         .name = "format",
3658         .attrs = skx_uncore_iio_freerunning_formats_attr,
3659 };
3660
3661 static struct intel_uncore_type skx_uncore_iio_free_running = {
3662         .name                   = "iio_free_running",
3663         .num_counters           = 17,
3664         .num_boxes              = 6,
3665         .num_freerunning_types  = SKX_IIO_FREERUNNING_TYPE_MAX,
3666         .freerunning            = skx_iio_freerunning,
3667         .ops                    = &skx_uncore_iio_freerunning_ops,
3668         .event_descs            = skx_uncore_iio_freerunning_events,
3669         .format_group           = &skx_uncore_iio_freerunning_format_group,
3670 };
3671
3672 static struct attribute *skx_uncore_formats_attr[] = {
3673         &format_attr_event.attr,
3674         &format_attr_umask.attr,
3675         &format_attr_edge.attr,
3676         &format_attr_inv.attr,
3677         &format_attr_thresh8.attr,
3678         NULL,
3679 };
3680
3681 static const struct attribute_group skx_uncore_format_group = {
3682         .name = "format",
3683         .attrs = skx_uncore_formats_attr,
3684 };
3685
3686 static struct intel_uncore_type skx_uncore_irp = {
3687         .name                   = "irp",
3688         .num_counters           = 2,
3689         .num_boxes              = 6,
3690         .perf_ctr_bits          = 48,
3691         .event_ctl              = SKX_IRP0_MSR_PMON_CTL0,
3692         .perf_ctr               = SKX_IRP0_MSR_PMON_CTR0,
3693         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3694         .box_ctl                = SKX_IRP0_MSR_PMON_BOX_CTL,
3695         .msr_offset             = SKX_IRP_MSR_OFFSET,
3696         .ops                    = &skx_uncore_iio_ops,
3697         .format_group           = &skx_uncore_format_group,
3698 };
3699
3700 static struct attribute *skx_uncore_pcu_formats_attr[] = {
3701         &format_attr_event.attr,
3702         &format_attr_umask.attr,
3703         &format_attr_edge.attr,
3704         &format_attr_inv.attr,
3705         &format_attr_thresh8.attr,
3706         &format_attr_occ_invert.attr,
3707         &format_attr_occ_edge_det.attr,
3708         &format_attr_filter_band0.attr,
3709         &format_attr_filter_band1.attr,
3710         &format_attr_filter_band2.attr,
3711         &format_attr_filter_band3.attr,
3712         NULL,
3713 };
3714
3715 static struct attribute_group skx_uncore_pcu_format_group = {
3716         .name = "format",
3717         .attrs = skx_uncore_pcu_formats_attr,
3718 };
3719
3720 static struct intel_uncore_ops skx_uncore_pcu_ops = {
3721         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3722         .hw_config              = hswep_pcu_hw_config,
3723         .get_constraint         = snbep_pcu_get_constraint,
3724         .put_constraint         = snbep_pcu_put_constraint,
3725 };
3726
3727 static struct intel_uncore_type skx_uncore_pcu = {
3728         .name                   = "pcu",
3729         .num_counters           = 4,
3730         .num_boxes              = 1,
3731         .perf_ctr_bits          = 48,
3732         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
3733         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
3734         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
3735         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
3736         .num_shared_regs        = 1,
3737         .ops                    = &skx_uncore_pcu_ops,
3738         .format_group           = &skx_uncore_pcu_format_group,
3739 };
3740
3741 static struct intel_uncore_type *skx_msr_uncores[] = {
3742         &skx_uncore_ubox,
3743         &skx_uncore_chabox,
3744         &skx_uncore_iio,
3745         &skx_uncore_iio_free_running,
3746         &skx_uncore_irp,
3747         &skx_uncore_pcu,
3748         NULL,
3749 };
3750
3751 /*
3752  * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
3753  * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
3754  */
3755 #define SKX_CAPID6              0x9c
3756 #define SKX_CHA_BIT_MASK        GENMASK(27, 0)
3757
3758 static int skx_count_chabox(void)
3759 {
3760         struct pci_dev *dev = NULL;
3761         u32 val = 0;
3762
3763         dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
3764         if (!dev)
3765                 goto out;
3766
3767         pci_read_config_dword(dev, SKX_CAPID6, &val);
3768         val &= SKX_CHA_BIT_MASK;
3769 out:
3770         pci_dev_put(dev);
3771         return hweight32(val);
3772 }
3773
3774 void skx_uncore_cpu_init(void)
3775 {
3776         skx_uncore_chabox.num_boxes = skx_count_chabox();
3777         uncore_msr_uncores = skx_msr_uncores;
3778 }
3779
3780 static struct intel_uncore_type skx_uncore_imc = {
3781         .name           = "imc",
3782         .num_counters   = 4,
3783         .num_boxes      = 6,
3784         .perf_ctr_bits  = 48,
3785         .fixed_ctr_bits = 48,
3786         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3787         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3788         .event_descs    = hswep_uncore_imc_events,
3789         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3790         .event_ctl      = SNBEP_PCI_PMON_CTL0,
3791         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3792         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3793         .ops            = &ivbep_uncore_pci_ops,
3794         .format_group   = &skx_uncore_format_group,
3795 };
3796
3797 static struct attribute *skx_upi_uncore_formats_attr[] = {
3798         &format_attr_event.attr,
3799         &format_attr_umask_ext.attr,
3800         &format_attr_edge.attr,
3801         &format_attr_inv.attr,
3802         &format_attr_thresh8.attr,
3803         NULL,
3804 };
3805
3806 static const struct attribute_group skx_upi_uncore_format_group = {
3807         .name = "format",
3808         .attrs = skx_upi_uncore_formats_attr,
3809 };
3810
3811 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
3812 {
3813         struct pci_dev *pdev = box->pci_dev;
3814
3815         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3816         pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3817 }
3818
3819 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
3820         .init_box       = skx_upi_uncore_pci_init_box,
3821         .disable_box    = snbep_uncore_pci_disable_box,
3822         .enable_box     = snbep_uncore_pci_enable_box,
3823         .disable_event  = snbep_uncore_pci_disable_event,
3824         .enable_event   = snbep_uncore_pci_enable_event,
3825         .read_counter   = snbep_uncore_pci_read_counter,
3826 };
3827
3828 static struct intel_uncore_type skx_uncore_upi = {
3829         .name           = "upi",
3830         .num_counters   = 4,
3831         .num_boxes      = 3,
3832         .perf_ctr_bits  = 48,
3833         .perf_ctr       = SKX_UPI_PCI_PMON_CTR0,
3834         .event_ctl      = SKX_UPI_PCI_PMON_CTL0,
3835         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3836         .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
3837         .box_ctl        = SKX_UPI_PCI_PMON_BOX_CTL,
3838         .ops            = &skx_upi_uncore_pci_ops,
3839         .format_group   = &skx_upi_uncore_format_group,
3840 };
3841
3842 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
3843 {
3844         struct pci_dev *pdev = box->pci_dev;
3845
3846         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3847         pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3848 }
3849
3850 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
3851         .init_box       = skx_m2m_uncore_pci_init_box,
3852         .disable_box    = snbep_uncore_pci_disable_box,
3853         .enable_box     = snbep_uncore_pci_enable_box,
3854         .disable_event  = snbep_uncore_pci_disable_event,
3855         .enable_event   = snbep_uncore_pci_enable_event,
3856         .read_counter   = snbep_uncore_pci_read_counter,
3857 };
3858
3859 static struct intel_uncore_type skx_uncore_m2m = {
3860         .name           = "m2m",
3861         .num_counters   = 4,
3862         .num_boxes      = 2,
3863         .perf_ctr_bits  = 48,
3864         .perf_ctr       = SKX_M2M_PCI_PMON_CTR0,
3865         .event_ctl      = SKX_M2M_PCI_PMON_CTL0,
3866         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3867         .box_ctl        = SKX_M2M_PCI_PMON_BOX_CTL,
3868         .ops            = &skx_m2m_uncore_pci_ops,
3869         .format_group   = &skx_uncore_format_group,
3870 };
3871
3872 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
3873         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3874         EVENT_CONSTRAINT_END
3875 };
3876
3877 static struct intel_uncore_type skx_uncore_m2pcie = {
3878         .name           = "m2pcie",
3879         .num_counters   = 4,
3880         .num_boxes      = 4,
3881         .perf_ctr_bits  = 48,
3882         .constraints    = skx_uncore_m2pcie_constraints,
3883         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3884         .event_ctl      = SNBEP_PCI_PMON_CTL0,
3885         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3886         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3887         .ops            = &ivbep_uncore_pci_ops,
3888         .format_group   = &skx_uncore_format_group,
3889 };
3890
3891 static struct event_constraint skx_uncore_m3upi_constraints[] = {
3892         UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
3893         UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
3894         UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
3895         UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
3896         UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
3897         UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
3898         UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
3899         UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
3900         EVENT_CONSTRAINT_END
3901 };
3902
3903 static struct intel_uncore_type skx_uncore_m3upi = {
3904         .name           = "m3upi",
3905         .num_counters   = 3,
3906         .num_boxes      = 3,
3907         .perf_ctr_bits  = 48,
3908         .constraints    = skx_uncore_m3upi_constraints,
3909         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3910         .event_ctl      = SNBEP_PCI_PMON_CTL0,
3911         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3912         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3913         .ops            = &ivbep_uncore_pci_ops,
3914         .format_group   = &skx_uncore_format_group,
3915 };
3916
3917 enum {
3918         SKX_PCI_UNCORE_IMC,
3919         SKX_PCI_UNCORE_M2M,
3920         SKX_PCI_UNCORE_UPI,
3921         SKX_PCI_UNCORE_M2PCIE,
3922         SKX_PCI_UNCORE_M3UPI,
3923 };
3924
3925 static struct intel_uncore_type *skx_pci_uncores[] = {
3926         [SKX_PCI_UNCORE_IMC]    = &skx_uncore_imc,
3927         [SKX_PCI_UNCORE_M2M]    = &skx_uncore_m2m,
3928         [SKX_PCI_UNCORE_UPI]    = &skx_uncore_upi,
3929         [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
3930         [SKX_PCI_UNCORE_M3UPI]  = &skx_uncore_m3upi,
3931         NULL,
3932 };
3933
3934 static const struct pci_device_id skx_uncore_pci_ids[] = {
3935         { /* MC0 Channel 0 */
3936                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3937                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
3938         },
3939         { /* MC0 Channel 1 */
3940                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3941                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
3942         },
3943         { /* MC0 Channel 2 */
3944                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3945                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
3946         },
3947         { /* MC1 Channel 0 */
3948                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3949                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
3950         },
3951         { /* MC1 Channel 1 */
3952                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3953                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
3954         },
3955         { /* MC1 Channel 2 */
3956                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3957                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
3958         },
3959         { /* M2M0 */
3960                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3961                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
3962         },
3963         { /* M2M1 */
3964                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3965                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
3966         },
3967         { /* UPI0 Link 0 */
3968                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3969                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
3970         },
3971         { /* UPI0 Link 1 */
3972                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3973                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
3974         },
3975         { /* UPI1 Link 2 */
3976                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3977                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
3978         },
3979         { /* M2PCIe 0 */
3980                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3981                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
3982         },
3983         { /* M2PCIe 1 */
3984                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3985                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
3986         },
3987         { /* M2PCIe 2 */
3988                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3989                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
3990         },
3991         { /* M2PCIe 3 */
3992                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3993                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
3994         },
3995         { /* M3UPI0 Link 0 */
3996                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
3997                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
3998         },
3999         { /* M3UPI0 Link 1 */
4000                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4001                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4002         },
4003         { /* M3UPI1 Link 2 */
4004                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4005                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4006         },
4007         { /* end: all zeroes */ }
4008 };
4009
4010
4011 static struct pci_driver skx_uncore_pci_driver = {
4012         .name           = "skx_uncore",
4013         .id_table       = skx_uncore_pci_ids,
4014 };
4015
4016 int skx_uncore_pci_init(void)
4017 {
4018         /* need to double check pci address */
4019         int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4020
4021         if (ret)
4022                 return ret;
4023
4024         uncore_pci_uncores = skx_pci_uncores;
4025         uncore_pci_driver = &skx_uncore_pci_driver;
4026         return 0;
4027 }
4028
4029 /* end of SKX uncore support */
4030
4031 /* SNR uncore support */
4032
4033 static struct intel_uncore_type snr_uncore_ubox = {
4034         .name                   = "ubox",
4035         .num_counters           = 2,
4036         .num_boxes              = 1,
4037         .perf_ctr_bits          = 48,
4038         .fixed_ctr_bits         = 48,
4039         .perf_ctr               = SNR_U_MSR_PMON_CTR0,
4040         .event_ctl              = SNR_U_MSR_PMON_CTL0,
4041         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4042         .fixed_ctr              = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4043         .fixed_ctl              = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4044         .ops                    = &ivbep_uncore_msr_ops,
4045         .format_group           = &ivbep_uncore_format_group,
4046 };
4047
4048 static struct attribute *snr_uncore_cha_formats_attr[] = {
4049         &format_attr_event.attr,
4050         &format_attr_umask_ext2.attr,
4051         &format_attr_edge.attr,
4052         &format_attr_tid_en.attr,
4053         &format_attr_inv.attr,
4054         &format_attr_thresh8.attr,
4055         &format_attr_filter_tid5.attr,
4056         NULL,
4057 };
4058 static const struct attribute_group snr_uncore_chabox_format_group = {
4059         .name = "format",
4060         .attrs = snr_uncore_cha_formats_attr,
4061 };
4062
4063 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4064 {
4065         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4066
4067         reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4068                     box->pmu->type->msr_offset * box->pmu->pmu_idx;
4069         reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4070         reg1->idx = 0;
4071
4072         return 0;
4073 }
4074
4075 static void snr_cha_enable_event(struct intel_uncore_box *box,
4076                                    struct perf_event *event)
4077 {
4078         struct hw_perf_event *hwc = &event->hw;
4079         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4080
4081         if (reg1->idx != EXTRA_REG_NONE)
4082                 wrmsrl(reg1->reg, reg1->config);
4083
4084         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4085 }
4086
4087 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4088         .init_box               = ivbep_uncore_msr_init_box,
4089         .disable_box            = snbep_uncore_msr_disable_box,
4090         .enable_box             = snbep_uncore_msr_enable_box,
4091         .disable_event          = snbep_uncore_msr_disable_event,
4092         .enable_event           = snr_cha_enable_event,
4093         .read_counter           = uncore_msr_read_counter,
4094         .hw_config              = snr_cha_hw_config,
4095 };
4096
4097 static struct intel_uncore_type snr_uncore_chabox = {
4098         .name                   = "cha",
4099         .num_counters           = 4,
4100         .num_boxes              = 6,
4101         .perf_ctr_bits          = 48,
4102         .event_ctl              = SNR_CHA_MSR_PMON_CTL0,
4103         .perf_ctr               = SNR_CHA_MSR_PMON_CTR0,
4104         .box_ctl                = SNR_CHA_MSR_PMON_BOX_CTL,
4105         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
4106         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4107         .event_mask_ext         = SNR_CHA_RAW_EVENT_MASK_EXT,
4108         .ops                    = &snr_uncore_chabox_ops,
4109         .format_group           = &snr_uncore_chabox_format_group,
4110 };
4111
4112 static struct attribute *snr_uncore_iio_formats_attr[] = {
4113         &format_attr_event.attr,
4114         &format_attr_umask.attr,
4115         &format_attr_edge.attr,
4116         &format_attr_inv.attr,
4117         &format_attr_thresh9.attr,
4118         &format_attr_ch_mask2.attr,
4119         &format_attr_fc_mask2.attr,
4120         NULL,
4121 };
4122
4123 static const struct attribute_group snr_uncore_iio_format_group = {
4124         .name = "format",
4125         .attrs = snr_uncore_iio_formats_attr,
4126 };
4127
4128 static struct intel_uncore_type snr_uncore_iio = {
4129         .name                   = "iio",
4130         .num_counters           = 4,
4131         .num_boxes              = 5,
4132         .perf_ctr_bits          = 48,
4133         .event_ctl              = SNR_IIO_MSR_PMON_CTL0,
4134         .perf_ctr               = SNR_IIO_MSR_PMON_CTR0,
4135         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4136         .event_mask_ext         = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4137         .box_ctl                = SNR_IIO_MSR_PMON_BOX_CTL,
4138         .msr_offset             = SNR_IIO_MSR_OFFSET,
4139         .ops                    = &ivbep_uncore_msr_ops,
4140         .format_group           = &snr_uncore_iio_format_group,
4141 };
4142
4143 static struct intel_uncore_type snr_uncore_irp = {
4144         .name                   = "irp",
4145         .num_counters           = 2,
4146         .num_boxes              = 5,
4147         .perf_ctr_bits          = 48,
4148         .event_ctl              = SNR_IRP0_MSR_PMON_CTL0,
4149         .perf_ctr               = SNR_IRP0_MSR_PMON_CTR0,
4150         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4151         .box_ctl                = SNR_IRP0_MSR_PMON_BOX_CTL,
4152         .msr_offset             = SNR_IRP_MSR_OFFSET,
4153         .ops                    = &ivbep_uncore_msr_ops,
4154         .format_group           = &ivbep_uncore_format_group,
4155 };
4156
4157 static struct intel_uncore_type snr_uncore_m2pcie = {
4158         .name           = "m2pcie",
4159         .num_counters   = 4,
4160         .num_boxes      = 5,
4161         .perf_ctr_bits  = 48,
4162         .event_ctl      = SNR_M2PCIE_MSR_PMON_CTL0,
4163         .perf_ctr       = SNR_M2PCIE_MSR_PMON_CTR0,
4164         .box_ctl        = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4165         .msr_offset     = SNR_M2PCIE_MSR_OFFSET,
4166         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4167         .ops            = &ivbep_uncore_msr_ops,
4168         .format_group   = &ivbep_uncore_format_group,
4169 };
4170
4171 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4172 {
4173         struct hw_perf_event *hwc = &event->hw;
4174         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4175         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4176
4177         if (ev_sel >= 0xb && ev_sel <= 0xe) {
4178                 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4179                 reg1->idx = ev_sel - 0xb;
4180                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
4181         }
4182         return 0;
4183 }
4184
4185 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4186         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4187         .hw_config              = snr_pcu_hw_config,
4188         .get_constraint         = snbep_pcu_get_constraint,
4189         .put_constraint         = snbep_pcu_put_constraint,
4190 };
4191
4192 static struct intel_uncore_type snr_uncore_pcu = {
4193         .name                   = "pcu",
4194         .num_counters           = 4,
4195         .num_boxes              = 1,
4196         .perf_ctr_bits          = 48,
4197         .perf_ctr               = SNR_PCU_MSR_PMON_CTR0,
4198         .event_ctl              = SNR_PCU_MSR_PMON_CTL0,
4199         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4200         .box_ctl                = SNR_PCU_MSR_PMON_BOX_CTL,
4201         .num_shared_regs        = 1,
4202         .ops                    = &snr_uncore_pcu_ops,
4203         .format_group           = &skx_uncore_pcu_format_group,
4204 };
4205
4206 enum perf_uncore_snr_iio_freerunning_type_id {
4207         SNR_IIO_MSR_IOCLK,
4208         SNR_IIO_MSR_BW_IN,
4209
4210         SNR_IIO_FREERUNNING_TYPE_MAX,
4211 };
4212
4213 static struct freerunning_counters snr_iio_freerunning[] = {
4214         [SNR_IIO_MSR_IOCLK]     = { 0x1eac, 0x1, 0x10, 1, 48 },
4215         [SNR_IIO_MSR_BW_IN]     = { 0x1f00, 0x1, 0x10, 8, 48 },
4216 };
4217
4218 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4219         /* Free-Running IIO CLOCKS Counter */
4220         INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
4221         /* Free-Running IIO BANDWIDTH IN Counters */
4222         INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
4223         INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
4224         INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
4225         INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
4226         INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
4227         INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
4228         INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
4229         INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
4230         INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
4231         INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
4232         INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
4233         INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
4234         INTEL_UNCORE_EVENT_DESC(bw_in_port4,            "event=0xff,umask=0x24"),
4235         INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,      "3.814697266e-6"),
4236         INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,       "MiB"),
4237         INTEL_UNCORE_EVENT_DESC(bw_in_port5,            "event=0xff,umask=0x25"),
4238         INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,      "3.814697266e-6"),
4239         INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,       "MiB"),
4240         INTEL_UNCORE_EVENT_DESC(bw_in_port6,            "event=0xff,umask=0x26"),
4241         INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,      "3.814697266e-6"),
4242         INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,       "MiB"),
4243         INTEL_UNCORE_EVENT_DESC(bw_in_port7,            "event=0xff,umask=0x27"),
4244         INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,      "3.814697266e-6"),
4245         INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,       "MiB"),
4246         { /* end: all zeroes */ },
4247 };
4248
4249 static struct intel_uncore_type snr_uncore_iio_free_running = {
4250         .name                   = "iio_free_running",
4251         .num_counters           = 9,
4252         .num_boxes              = 5,
4253         .num_freerunning_types  = SNR_IIO_FREERUNNING_TYPE_MAX,
4254         .freerunning            = snr_iio_freerunning,
4255         .ops                    = &skx_uncore_iio_freerunning_ops,
4256         .event_descs            = snr_uncore_iio_freerunning_events,
4257         .format_group           = &skx_uncore_iio_freerunning_format_group,
4258 };
4259
4260 static struct intel_uncore_type *snr_msr_uncores[] = {
4261         &snr_uncore_ubox,
4262         &snr_uncore_chabox,
4263         &snr_uncore_iio,
4264         &snr_uncore_irp,
4265         &snr_uncore_m2pcie,
4266         &snr_uncore_pcu,
4267         &snr_uncore_iio_free_running,
4268         NULL,
4269 };
4270
4271 void snr_uncore_cpu_init(void)
4272 {
4273         uncore_msr_uncores = snr_msr_uncores;
4274 }
4275
4276 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4277 {
4278         struct pci_dev *pdev = box->pci_dev;
4279         int box_ctl = uncore_pci_box_ctl(box);
4280
4281         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4282         pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4283 }
4284
4285 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4286         .init_box       = snr_m2m_uncore_pci_init_box,
4287         .disable_box    = snbep_uncore_pci_disable_box,
4288         .enable_box     = snbep_uncore_pci_enable_box,
4289         .disable_event  = snbep_uncore_pci_disable_event,
4290         .enable_event   = snbep_uncore_pci_enable_event,
4291         .read_counter   = snbep_uncore_pci_read_counter,
4292 };
4293
4294 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4295         &format_attr_event.attr,
4296         &format_attr_umask_ext3.attr,
4297         &format_attr_edge.attr,
4298         &format_attr_inv.attr,
4299         &format_attr_thresh8.attr,
4300         NULL,
4301 };
4302
4303 static const struct attribute_group snr_m2m_uncore_format_group = {
4304         .name = "format",
4305         .attrs = snr_m2m_uncore_formats_attr,
4306 };
4307
4308 static struct intel_uncore_type snr_uncore_m2m = {
4309         .name           = "m2m",
4310         .num_counters   = 4,
4311         .num_boxes      = 1,
4312         .perf_ctr_bits  = 48,
4313         .perf_ctr       = SNR_M2M_PCI_PMON_CTR0,
4314         .event_ctl      = SNR_M2M_PCI_PMON_CTL0,
4315         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4316         .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4317         .box_ctl        = SNR_M2M_PCI_PMON_BOX_CTL,
4318         .ops            = &snr_m2m_uncore_pci_ops,
4319         .format_group   = &snr_m2m_uncore_format_group,
4320 };
4321
4322 enum {
4323         SNR_PCI_UNCORE_M2M,
4324 };
4325
4326 static struct intel_uncore_type *snr_pci_uncores[] = {
4327         [SNR_PCI_UNCORE_M2M]            = &snr_uncore_m2m,
4328         NULL,
4329 };
4330
4331 static const struct pci_device_id snr_uncore_pci_ids[] = {
4332         { /* M2M */
4333                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4334                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4335         },
4336         { /* end: all zeroes */ }
4337 };
4338
4339 static struct pci_driver snr_uncore_pci_driver = {
4340         .name           = "snr_uncore",
4341         .id_table       = snr_uncore_pci_ids,
4342 };
4343
4344 int snr_uncore_pci_init(void)
4345 {
4346         /* SNR UBOX DID */
4347         int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4348                                          SKX_GIDNIDMAP, true);
4349
4350         if (ret)
4351                 return ret;
4352
4353         uncore_pci_uncores = snr_pci_uncores;
4354         uncore_pci_driver = &snr_uncore_pci_driver;
4355         return 0;
4356 }
4357
4358 static struct pci_dev *snr_uncore_get_mc_dev(int id)
4359 {
4360         struct pci_dev *mc_dev = NULL;
4361         int phys_id, pkg;
4362
4363         while (1) {
4364                 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
4365                 if (!mc_dev)
4366                         break;
4367                 phys_id = uncore_pcibus_to_physid(mc_dev->bus);
4368                 if (phys_id < 0)
4369                         continue;
4370                 pkg = topology_phys_to_logical_pkg(phys_id);
4371                 if (pkg < 0)
4372                         continue;
4373                 else if (pkg == id)
4374                         break;
4375         }
4376         return mc_dev;
4377 }
4378
4379 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4380 {
4381         struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
4382         unsigned int box_ctl = uncore_mmio_box_ctl(box);
4383         resource_size_t addr;
4384         u32 pci_dword;
4385
4386         if (!pdev)
4387                 return;
4388
4389         pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4390         addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4391
4392         pci_read_config_dword(pdev, SNR_IMC_MMIO_MEM0_OFFSET, &pci_dword);
4393         addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4394
4395         addr += box_ctl;
4396
4397         box->io_addr = ioremap(addr, SNR_IMC_MMIO_SIZE);
4398         if (!box->io_addr)
4399                 return;
4400
4401         writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4402 }
4403
4404 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4405 {
4406         u32 config;
4407
4408         if (!box->io_addr)
4409                 return;
4410
4411         config = readl(box->io_addr);
4412         config |= SNBEP_PMON_BOX_CTL_FRZ;
4413         writel(config, box->io_addr);
4414 }
4415
4416 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4417 {
4418         u32 config;
4419
4420         if (!box->io_addr)
4421                 return;
4422
4423         config = readl(box->io_addr);
4424         config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4425         writel(config, box->io_addr);
4426 }
4427
4428 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4429                                            struct perf_event *event)
4430 {
4431         struct hw_perf_event *hwc = &event->hw;
4432
4433         if (!box->io_addr)
4434                 return;
4435
4436         writel(hwc->config | SNBEP_PMON_CTL_EN,
4437                box->io_addr + hwc->config_base);
4438 }
4439
4440 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4441                                             struct perf_event *event)
4442 {
4443         struct hw_perf_event *hwc = &event->hw;
4444
4445         if (!box->io_addr)
4446                 return;
4447
4448         writel(hwc->config, box->io_addr + hwc->config_base);
4449 }
4450
4451 static struct intel_uncore_ops snr_uncore_mmio_ops = {
4452         .init_box       = snr_uncore_mmio_init_box,
4453         .exit_box       = uncore_mmio_exit_box,
4454         .disable_box    = snr_uncore_mmio_disable_box,
4455         .enable_box     = snr_uncore_mmio_enable_box,
4456         .disable_event  = snr_uncore_mmio_disable_event,
4457         .enable_event   = snr_uncore_mmio_enable_event,
4458         .read_counter   = uncore_mmio_read_counter,
4459 };
4460
4461 static struct uncore_event_desc snr_uncore_imc_events[] = {
4462         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
4463         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x0f"),
4464         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4465         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4466         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4467         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4468         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4469         { /* end: all zeroes */ },
4470 };
4471
4472 static struct intel_uncore_type snr_uncore_imc = {
4473         .name           = "imc",
4474         .num_counters   = 4,
4475         .num_boxes      = 2,
4476         .perf_ctr_bits  = 48,
4477         .fixed_ctr_bits = 48,
4478         .fixed_ctr      = SNR_IMC_MMIO_PMON_FIXED_CTR,
4479         .fixed_ctl      = SNR_IMC_MMIO_PMON_FIXED_CTL,
4480         .event_descs    = snr_uncore_imc_events,
4481         .perf_ctr       = SNR_IMC_MMIO_PMON_CTR0,
4482         .event_ctl      = SNR_IMC_MMIO_PMON_CTL0,
4483         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4484         .box_ctl        = SNR_IMC_MMIO_PMON_BOX_CTL,
4485         .mmio_offset    = SNR_IMC_MMIO_OFFSET,
4486         .ops            = &snr_uncore_mmio_ops,
4487         .format_group   = &skx_uncore_format_group,
4488 };
4489
4490 enum perf_uncore_snr_imc_freerunning_type_id {
4491         SNR_IMC_DCLK,
4492         SNR_IMC_DDR,
4493
4494         SNR_IMC_FREERUNNING_TYPE_MAX,
4495 };
4496
4497 static struct freerunning_counters snr_imc_freerunning[] = {
4498         [SNR_IMC_DCLK]  = { 0x22b0, 0x0, 0, 1, 48 },
4499         [SNR_IMC_DDR]   = { 0x2290, 0x8, 0, 2, 48 },
4500 };
4501
4502 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4503         INTEL_UNCORE_EVENT_DESC(dclk,           "event=0xff,umask=0x10"),
4504
4505         INTEL_UNCORE_EVENT_DESC(read,           "event=0xff,umask=0x20"),
4506         INTEL_UNCORE_EVENT_DESC(read.scale,     "3.814697266e-6"),
4507         INTEL_UNCORE_EVENT_DESC(read.unit,      "MiB"),
4508         INTEL_UNCORE_EVENT_DESC(write,          "event=0xff,umask=0x21"),
4509         INTEL_UNCORE_EVENT_DESC(write.scale,    "3.814697266e-6"),
4510         INTEL_UNCORE_EVENT_DESC(write.unit,     "MiB"),
4511         { /* end: all zeroes */ },
4512 };
4513
4514 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4515         .init_box       = snr_uncore_mmio_init_box,
4516         .exit_box       = uncore_mmio_exit_box,
4517         .read_counter   = uncore_mmio_read_counter,
4518         .hw_config      = uncore_freerunning_hw_config,
4519 };
4520
4521 static struct intel_uncore_type snr_uncore_imc_free_running = {
4522         .name                   = "imc_free_running",
4523         .num_counters           = 3,
4524         .num_boxes              = 1,
4525         .num_freerunning_types  = SNR_IMC_FREERUNNING_TYPE_MAX,
4526         .freerunning            = snr_imc_freerunning,
4527         .ops                    = &snr_uncore_imc_freerunning_ops,
4528         .event_descs            = snr_uncore_imc_freerunning_events,
4529         .format_group           = &skx_uncore_iio_freerunning_format_group,
4530 };
4531
4532 static struct intel_uncore_type *snr_mmio_uncores[] = {
4533         &snr_uncore_imc,
4534         &snr_uncore_imc_free_running,
4535         NULL,
4536 };
4537
4538 void snr_uncore_mmio_init(void)
4539 {
4540         uncore_mmio_uncores = snr_mmio_uncores;
4541 }
4542
4543 /* end of SNR uncore support */