GNU Linux-libre 4.9.318-gnu1
[releases.git] / arch / x86 / events / intel / uncore_snbep.c
1 /* SandyBridge-EP/IvyTown uncore support */
2 #include "uncore.h"
3
4 /* SNB-EP pci bus to socket mapping */
5 #define SNBEP_CPUNODEID                 0x40
6 #define SNBEP_GIDNIDMAP                 0x54
7
8 /* SNB-EP Box level control */
9 #define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
10 #define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
11 #define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
12 #define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
13 #define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
14                                          SNBEP_PMON_BOX_CTL_RST_CTRS | \
15                                          SNBEP_PMON_BOX_CTL_FRZ_EN)
16 /* SNB-EP event control */
17 #define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
18 #define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
19 #define SNBEP_PMON_CTL_RST              (1 << 17)
20 #define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
21 #define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)
22 #define SNBEP_PMON_CTL_EN               (1 << 22)
23 #define SNBEP_PMON_CTL_INVERT           (1 << 23)
24 #define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
25 #define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
26                                          SNBEP_PMON_CTL_UMASK_MASK | \
27                                          SNBEP_PMON_CTL_EDGE_DET | \
28                                          SNBEP_PMON_CTL_INVERT | \
29                                          SNBEP_PMON_CTL_TRESH_MASK)
30
31 /* SNB-EP Ubox event control */
32 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
33 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
34                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
35                                  SNBEP_PMON_CTL_UMASK_MASK | \
36                                  SNBEP_PMON_CTL_EDGE_DET | \
37                                  SNBEP_PMON_CTL_INVERT | \
38                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
39
40 #define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
41 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
42                                                  SNBEP_CBO_PMON_CTL_TID_EN)
43
44 /* SNB-EP PCU event control */
45 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
46 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
47 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
49 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
50                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
51                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
52                                  SNBEP_PMON_CTL_EDGE_DET | \
53                                  SNBEP_PMON_CTL_INVERT | \
54                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
55                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
56                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
57
58 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
59                                 (SNBEP_PMON_RAW_EVENT_MASK | \
60                                  SNBEP_PMON_CTL_EV_SEL_EXT)
61
62 /* SNB-EP pci control register */
63 #define SNBEP_PCI_PMON_BOX_CTL                  0xf4
64 #define SNBEP_PCI_PMON_CTL0                     0xd8
65 /* SNB-EP pci counter register */
66 #define SNBEP_PCI_PMON_CTR0                     0xa0
67
68 /* SNB-EP home agent register */
69 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
71 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
72 /* SNB-EP memory controller register */
73 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
75 /* SNB-EP QPI register */
76 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
80
81 /* SNB-EP Ubox register */
82 #define SNBEP_U_MSR_PMON_CTR0                   0xc16
83 #define SNBEP_U_MSR_PMON_CTL0                   0xc10
84
85 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
87
88 /* SNB-EP Cbo register */
89 #define SNBEP_C0_MSR_PMON_CTR0                  0xd16
90 #define SNBEP_C0_MSR_PMON_CTL0                  0xd10
91 #define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
92 #define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
93 #define SNBEP_CBO_MSR_OFFSET                    0x20
94
95 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID       0x1f
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID       0x3fc00
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE     0x7c0000
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC       0xff800000
99
100 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {    \
101         .event = (e),                           \
102         .msr = SNBEP_C0_MSR_PMON_BOX_FILTER,    \
103         .config_mask = (m),                     \
104         .idx = (i)                              \
105 }
106
107 /* SNB-EP PCU register */
108 #define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
109 #define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
110 #define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
111 #define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
113 #define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
114 #define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
115
116 /* IVBEP event control */
117 #define IVBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
118                                          SNBEP_PMON_BOX_CTL_RST_CTRS)
119 #define IVBEP_PMON_RAW_EVENT_MASK               (SNBEP_PMON_CTL_EV_SEL_MASK | \
120                                          SNBEP_PMON_CTL_UMASK_MASK | \
121                                          SNBEP_PMON_CTL_EDGE_DET | \
122                                          SNBEP_PMON_CTL_TRESH_MASK)
123 /* IVBEP Ubox */
124 #define IVBEP_U_MSR_PMON_GLOBAL_CTL             0xc00
125 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL             (1 << 31)
126 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL           (1 << 29)
127
128 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
129                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
130                                  SNBEP_PMON_CTL_UMASK_MASK | \
131                                  SNBEP_PMON_CTL_EDGE_DET | \
132                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
133 /* IVBEP Cbo */
134 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK               (IVBEP_PMON_RAW_EVENT_MASK | \
135                                                  SNBEP_CBO_PMON_CTL_TID_EN)
136
137 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x1fULL << 0)
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 5)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x3fULL << 17)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
145
146 /* IVBEP home agent */
147 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST         (1 << 16)
148 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK                \
149                                 (IVBEP_PMON_RAW_EVENT_MASK | \
150                                  IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
151 /* IVBEP PCU */
152 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
153                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
154                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
155                                  SNBEP_PMON_CTL_EDGE_DET | \
156                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
157                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
158                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
159 /* IVBEP QPI */
160 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
161                                 (IVBEP_PMON_RAW_EVENT_MASK | \
162                                  SNBEP_PMON_CTL_EV_SEL_EXT)
163
164 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
165                                 ((1ULL << (n)) - 1)))
166
167 /* Haswell-EP Ubox */
168 #define HSWEP_U_MSR_PMON_CTR0                   0x709
169 #define HSWEP_U_MSR_PMON_CTL0                   0x705
170 #define HSWEP_U_MSR_PMON_FILTER                 0x707
171
172 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL         0x703
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR         0x704
174
175 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID         (0x1 << 0)
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID         (0x1fULL << 1)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
178                                         (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
179                                          HSWEP_U_MSR_PMON_BOX_FILTER_CID)
180
181 /* Haswell-EP CBo */
182 #define HSWEP_C0_MSR_PMON_CTR0                  0xe08
183 #define HSWEP_C0_MSR_PMON_CTL0                  0xe01
184 #define HSWEP_C0_MSR_PMON_BOX_CTL                       0xe00
185 #define HSWEP_C0_MSR_PMON_BOX_FILTER0           0xe05
186 #define HSWEP_CBO_MSR_OFFSET                    0x10
187
188
189 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x3fULL << 0)
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 6)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x7fULL << 17)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
197
198
199 /* Haswell-EP Sbox */
200 #define HSWEP_S0_MSR_PMON_CTR0                  0x726
201 #define HSWEP_S0_MSR_PMON_CTL0                  0x721
202 #define HSWEP_S0_MSR_PMON_BOX_CTL                       0x720
203 #define HSWEP_SBOX_MSR_OFFSET                   0xa
204 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
205                                                  SNBEP_CBO_PMON_CTL_TID_EN)
206
207 /* Haswell-EP PCU */
208 #define HSWEP_PCU_MSR_PMON_CTR0                 0x717
209 #define HSWEP_PCU_MSR_PMON_CTL0                 0x711
210 #define HSWEP_PCU_MSR_PMON_BOX_CTL              0x710
211 #define HSWEP_PCU_MSR_PMON_BOX_FILTER           0x715
212
213 /* KNL Ubox */
214 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
215                                         (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
216                                                 SNBEP_CBO_PMON_CTL_TID_EN)
217 /* KNL CHA */
218 #define KNL_CHA_MSR_OFFSET                      0xc
219 #define KNL_CHA_MSR_PMON_CTL_QOR                (1 << 16)
220 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
221                                         (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
222                                          KNL_CHA_MSR_PMON_CTL_QOR)
223 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID         0x1ff
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE       (7 << 18)
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP          (0xfffffe2aULL << 32)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE  (0x1ULL << 33)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC         (0x1ULL << 37)
229
230 /* KNL EDC/MC UCLK */
231 #define KNL_UCLK_MSR_PMON_CTR0_LOW              0x400
232 #define KNL_UCLK_MSR_PMON_CTL0                  0x420
233 #define KNL_UCLK_MSR_PMON_BOX_CTL               0x430
234 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW        0x44c
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL        0x454
236 #define KNL_PMON_FIXED_CTL_EN                   0x1
237
238 /* KNL EDC */
239 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW         0xa00
240 #define KNL_EDC0_ECLK_MSR_PMON_CTL0             0xa20
241 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL          0xa30
242 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW   0xa3c
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL   0xa44
244
245 /* KNL MC */
246 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW           0xb00
247 #define KNL_MC0_CH0_MSR_PMON_CTL0               0xb20
248 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL            0xb30
249 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW          0xb3c
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL          0xb44
251
252 /* KNL IRP */
253 #define KNL_IRP_PCI_PMON_BOX_CTL                0xf0
254 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
255                                                  KNL_CHA_MSR_PMON_CTL_QOR)
256 /* KNL PCU */
257 #define KNL_PCU_PMON_CTL_EV_SEL_MASK            0x0000007f
258 #define KNL_PCU_PMON_CTL_USE_OCC_CTR            (1 << 7)
259 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK         0x3f000000
260 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
261                                 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
262                                  KNL_PCU_PMON_CTL_USE_OCC_CTR | \
263                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
264                                  SNBEP_PMON_CTL_EDGE_DET | \
265                                  SNBEP_CBO_PMON_CTL_TID_EN | \
266                                  SNBEP_PMON_CTL_INVERT | \
267                                  KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
268                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
269                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
270
271 /* SKX pci bus to socket mapping */
272 #define SKX_CPUNODEID                   0xc0
273 #define SKX_GIDNIDMAP                   0xd4
274
275 /* SKX CHA */
276 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID         (0x1ffULL << 0)
277 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK        (0xfULL << 9)
278 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE       (0x3ffULL << 17)
279 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM         (0x1ULL << 32)
280 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC         (0x1ULL << 33)
281 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC     (0x1ULL << 35)
282 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM          (0x1ULL << 36)
283 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM      (0x1ULL << 37)
284 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0        (0x3ffULL << 41)
285 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1        (0x3ffULL << 51)
286 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6          (0x1ULL << 61)
287 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC          (0x1ULL << 62)
288 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC        (0x1ULL << 63)
289
290 /* SKX IIO */
291 #define SKX_IIO0_MSR_PMON_CTL0          0xa48
292 #define SKX_IIO0_MSR_PMON_CTR0          0xa41
293 #define SKX_IIO0_MSR_PMON_BOX_CTL       0xa40
294 #define SKX_IIO_MSR_OFFSET              0x20
295
296 #define SKX_PMON_CTL_TRESH_MASK         (0xff << 24)
297 #define SKX_PMON_CTL_TRESH_MASK_EXT     (0xf)
298 #define SKX_PMON_CTL_CH_MASK            (0xff << 4)
299 #define SKX_PMON_CTL_FC_MASK            (0x7 << 12)
300 #define SKX_IIO_PMON_RAW_EVENT_MASK     (SNBEP_PMON_CTL_EV_SEL_MASK | \
301                                          SNBEP_PMON_CTL_UMASK_MASK | \
302                                          SNBEP_PMON_CTL_EDGE_DET | \
303                                          SNBEP_PMON_CTL_INVERT | \
304                                          SKX_PMON_CTL_TRESH_MASK)
305 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
306                                          SKX_PMON_CTL_CH_MASK | \
307                                          SKX_PMON_CTL_FC_MASK)
308
309 /* SKX IRP */
310 #define SKX_IRP0_MSR_PMON_CTL0          0xa5b
311 #define SKX_IRP0_MSR_PMON_CTR0          0xa59
312 #define SKX_IRP0_MSR_PMON_BOX_CTL       0xa58
313 #define SKX_IRP_MSR_OFFSET              0x20
314
315 /* SKX UPI */
316 #define SKX_UPI_PCI_PMON_CTL0           0x350
317 #define SKX_UPI_PCI_PMON_CTR0           0x318
318 #define SKX_UPI_PCI_PMON_BOX_CTL        0x378
319 #define SKX_PMON_CTL_UMASK_EXT          0xff
320
321 /* SKX M2M */
322 #define SKX_M2M_PCI_PMON_CTL0           0x228
323 #define SKX_M2M_PCI_PMON_CTR0           0x200
324 #define SKX_M2M_PCI_PMON_BOX_CTL        0x258
325
326 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
327 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
328 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
329 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
330 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
331 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-39");
332 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
333 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
334 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
335 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
336 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
337 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
338 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
339 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
340 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
341 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
342 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
343 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
344 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
345 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
346 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
347 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
348 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
349 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
350 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
351 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
352 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
353 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
354 DEFINE_UNCORE_FORMAT_ATTR(filter_link4, filter_link, "config1:9-12");
355 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
356 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
357 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
358 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
359 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
360 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
361 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
362 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
363 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
364 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
365 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
366 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
367 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
368 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
369 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
370 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
371 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
372 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
373 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
374 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
375 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
376 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
377 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
378 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
379 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
380 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
381 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
382 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
383 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
384 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
385 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
386 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
387 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
388 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
389 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
390 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
391 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
392 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
393 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
394 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
395 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
396 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
397 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
398 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
399
400 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
401 {
402         struct pci_dev *pdev = box->pci_dev;
403         int box_ctl = uncore_pci_box_ctl(box);
404         u32 config = 0;
405
406         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
407                 config |= SNBEP_PMON_BOX_CTL_FRZ;
408                 pci_write_config_dword(pdev, box_ctl, config);
409         }
410 }
411
412 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
413 {
414         struct pci_dev *pdev = box->pci_dev;
415         int box_ctl = uncore_pci_box_ctl(box);
416         u32 config = 0;
417
418         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
419                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
420                 pci_write_config_dword(pdev, box_ctl, config);
421         }
422 }
423
424 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
425 {
426         struct pci_dev *pdev = box->pci_dev;
427         struct hw_perf_event *hwc = &event->hw;
428
429         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
430 }
431
432 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
433 {
434         struct pci_dev *pdev = box->pci_dev;
435         struct hw_perf_event *hwc = &event->hw;
436
437         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
438 }
439
440 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
441 {
442         struct pci_dev *pdev = box->pci_dev;
443         struct hw_perf_event *hwc = &event->hw;
444         u64 count = 0;
445
446         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
447         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
448
449         return count;
450 }
451
452 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
453 {
454         struct pci_dev *pdev = box->pci_dev;
455         int box_ctl = uncore_pci_box_ctl(box);
456
457         pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
458 }
459
460 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
461 {
462         u64 config;
463         unsigned msr;
464
465         msr = uncore_msr_box_ctl(box);
466         if (msr) {
467                 rdmsrl(msr, config);
468                 config |= SNBEP_PMON_BOX_CTL_FRZ;
469                 wrmsrl(msr, config);
470         }
471 }
472
473 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
474 {
475         u64 config;
476         unsigned msr;
477
478         msr = uncore_msr_box_ctl(box);
479         if (msr) {
480                 rdmsrl(msr, config);
481                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
482                 wrmsrl(msr, config);
483         }
484 }
485
486 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
487 {
488         struct hw_perf_event *hwc = &event->hw;
489         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
490
491         if (reg1->idx != EXTRA_REG_NONE)
492                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
493
494         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
495 }
496
497 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
498                                         struct perf_event *event)
499 {
500         struct hw_perf_event *hwc = &event->hw;
501
502         wrmsrl(hwc->config_base, hwc->config);
503 }
504
505 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
506 {
507         unsigned msr = uncore_msr_box_ctl(box);
508
509         if (msr)
510                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
511 }
512
513 static struct attribute *snbep_uncore_formats_attr[] = {
514         &format_attr_event.attr,
515         &format_attr_umask.attr,
516         &format_attr_edge.attr,
517         &format_attr_inv.attr,
518         &format_attr_thresh8.attr,
519         NULL,
520 };
521
522 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
523         &format_attr_event.attr,
524         &format_attr_umask.attr,
525         &format_attr_edge.attr,
526         &format_attr_inv.attr,
527         &format_attr_thresh5.attr,
528         NULL,
529 };
530
531 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
532         &format_attr_event.attr,
533         &format_attr_umask.attr,
534         &format_attr_edge.attr,
535         &format_attr_tid_en.attr,
536         &format_attr_inv.attr,
537         &format_attr_thresh8.attr,
538         &format_attr_filter_tid.attr,
539         &format_attr_filter_nid.attr,
540         &format_attr_filter_state.attr,
541         &format_attr_filter_opc.attr,
542         NULL,
543 };
544
545 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
546         &format_attr_event.attr,
547         &format_attr_occ_sel.attr,
548         &format_attr_edge.attr,
549         &format_attr_inv.attr,
550         &format_attr_thresh5.attr,
551         &format_attr_occ_invert.attr,
552         &format_attr_occ_edge.attr,
553         &format_attr_filter_band0.attr,
554         &format_attr_filter_band1.attr,
555         &format_attr_filter_band2.attr,
556         &format_attr_filter_band3.attr,
557         NULL,
558 };
559
560 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
561         &format_attr_event_ext.attr,
562         &format_attr_umask.attr,
563         &format_attr_edge.attr,
564         &format_attr_inv.attr,
565         &format_attr_thresh8.attr,
566         &format_attr_match_rds.attr,
567         &format_attr_match_rnid30.attr,
568         &format_attr_match_rnid4.attr,
569         &format_attr_match_dnid.attr,
570         &format_attr_match_mc.attr,
571         &format_attr_match_opc.attr,
572         &format_attr_match_vnw.attr,
573         &format_attr_match0.attr,
574         &format_attr_match1.attr,
575         &format_attr_mask_rds.attr,
576         &format_attr_mask_rnid30.attr,
577         &format_attr_mask_rnid4.attr,
578         &format_attr_mask_dnid.attr,
579         &format_attr_mask_mc.attr,
580         &format_attr_mask_opc.attr,
581         &format_attr_mask_vnw.attr,
582         &format_attr_mask0.attr,
583         &format_attr_mask1.attr,
584         NULL,
585 };
586
587 static struct uncore_event_desc snbep_uncore_imc_events[] = {
588         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
589         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
590         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
591         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
592         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
593         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
594         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
595         { /* end: all zeroes */ },
596 };
597
598 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
599         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
600         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
601         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
602         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
603         { /* end: all zeroes */ },
604 };
605
606 static struct attribute_group snbep_uncore_format_group = {
607         .name = "format",
608         .attrs = snbep_uncore_formats_attr,
609 };
610
611 static struct attribute_group snbep_uncore_ubox_format_group = {
612         .name = "format",
613         .attrs = snbep_uncore_ubox_formats_attr,
614 };
615
616 static struct attribute_group snbep_uncore_cbox_format_group = {
617         .name = "format",
618         .attrs = snbep_uncore_cbox_formats_attr,
619 };
620
621 static struct attribute_group snbep_uncore_pcu_format_group = {
622         .name = "format",
623         .attrs = snbep_uncore_pcu_formats_attr,
624 };
625
626 static struct attribute_group snbep_uncore_qpi_format_group = {
627         .name = "format",
628         .attrs = snbep_uncore_qpi_formats_attr,
629 };
630
631 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                    \
632         .disable_box    = snbep_uncore_msr_disable_box,         \
633         .enable_box     = snbep_uncore_msr_enable_box,          \
634         .disable_event  = snbep_uncore_msr_disable_event,       \
635         .enable_event   = snbep_uncore_msr_enable_event,        \
636         .read_counter   = uncore_msr_read_counter
637
638 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
639         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),                   \
640         .init_box       = snbep_uncore_msr_init_box             \
641
642 static struct intel_uncore_ops snbep_uncore_msr_ops = {
643         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
644 };
645
646 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
647         .init_box       = snbep_uncore_pci_init_box,            \
648         .disable_box    = snbep_uncore_pci_disable_box,         \
649         .enable_box     = snbep_uncore_pci_enable_box,          \
650         .disable_event  = snbep_uncore_pci_disable_event,       \
651         .read_counter   = snbep_uncore_pci_read_counter
652
653 static struct intel_uncore_ops snbep_uncore_pci_ops = {
654         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
655         .enable_event   = snbep_uncore_pci_enable_event,        \
656 };
657
658 static struct event_constraint snbep_uncore_cbox_constraints[] = {
659         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
660         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
661         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
662         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
663         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
664         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
665         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
666         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
667         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
668         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
669         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
670         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
671         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
672         EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
673         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
674         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
675         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
676         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
677         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
678         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
679         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
680         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
681         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
682         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
683         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
684         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
685         EVENT_CONSTRAINT_END
686 };
687
688 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
689         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
690         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
691         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
692         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
693         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
694         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
695         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
696         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
697         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
698         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
699         EVENT_CONSTRAINT_END
700 };
701
702 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
703         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
704         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
705         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
706         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
707         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
708         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
709         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
710         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
711         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
712         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
713         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
714         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
715         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
716         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
717         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
718         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
719         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
720         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
721         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
722         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
723         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
724         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
725         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
726         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
727         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
728         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
729         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
730         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
731         EVENT_CONSTRAINT_END
732 };
733
734 static struct intel_uncore_type snbep_uncore_ubox = {
735         .name           = "ubox",
736         .num_counters   = 2,
737         .num_boxes      = 1,
738         .perf_ctr_bits  = 44,
739         .fixed_ctr_bits = 48,
740         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
741         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
742         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
743         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
744         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
745         .ops            = &snbep_uncore_msr_ops,
746         .format_group   = &snbep_uncore_ubox_format_group,
747 };
748
749 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
750         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
751                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
752         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
753         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
754         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
755         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
756         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
757         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
758         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
759         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
760         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
761         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
762         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
763         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
764         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
765         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
766         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
767         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
768         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
769         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
770         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
771         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
772         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
773         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
774         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
775         EVENT_EXTRA_END
776 };
777
778 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
779 {
780         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
781         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
782         int i;
783
784         if (uncore_box_is_fake(box))
785                 return;
786
787         for (i = 0; i < 5; i++) {
788                 if (reg1->alloc & (0x1 << i))
789                         atomic_sub(1 << (i * 6), &er->ref);
790         }
791         reg1->alloc = 0;
792 }
793
794 static struct event_constraint *
795 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
796                             u64 (*cbox_filter_mask)(int fields))
797 {
798         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
799         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
800         int i, alloc = 0;
801         unsigned long flags;
802         u64 mask;
803
804         if (reg1->idx == EXTRA_REG_NONE)
805                 return NULL;
806
807         raw_spin_lock_irqsave(&er->lock, flags);
808         for (i = 0; i < 5; i++) {
809                 if (!(reg1->idx & (0x1 << i)))
810                         continue;
811                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
812                         continue;
813
814                 mask = cbox_filter_mask(0x1 << i);
815                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
816                     !((reg1->config ^ er->config) & mask)) {
817                         atomic_add(1 << (i * 6), &er->ref);
818                         er->config &= ~mask;
819                         er->config |= reg1->config & mask;
820                         alloc |= (0x1 << i);
821                 } else {
822                         break;
823                 }
824         }
825         raw_spin_unlock_irqrestore(&er->lock, flags);
826         if (i < 5)
827                 goto fail;
828
829         if (!uncore_box_is_fake(box))
830                 reg1->alloc |= alloc;
831
832         return NULL;
833 fail:
834         for (; i >= 0; i--) {
835                 if (alloc & (0x1 << i))
836                         atomic_sub(1 << (i * 6), &er->ref);
837         }
838         return &uncore_constraint_empty;
839 }
840
841 static u64 snbep_cbox_filter_mask(int fields)
842 {
843         u64 mask = 0;
844
845         if (fields & 0x1)
846                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
847         if (fields & 0x2)
848                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
849         if (fields & 0x4)
850                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
851         if (fields & 0x8)
852                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
853
854         return mask;
855 }
856
857 static struct event_constraint *
858 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
859 {
860         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
861 }
862
863 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
864 {
865         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
866         struct extra_reg *er;
867         int idx = 0;
868
869         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
870                 if (er->event != (event->hw.config & er->config_mask))
871                         continue;
872                 idx |= er->idx;
873         }
874
875         if (idx) {
876                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
877                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
878                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
879                 reg1->idx = idx;
880         }
881         return 0;
882 }
883
884 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
885         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
886         .hw_config              = snbep_cbox_hw_config,
887         .get_constraint         = snbep_cbox_get_constraint,
888         .put_constraint         = snbep_cbox_put_constraint,
889 };
890
891 static struct intel_uncore_type snbep_uncore_cbox = {
892         .name                   = "cbox",
893         .num_counters           = 4,
894         .num_boxes              = 8,
895         .perf_ctr_bits          = 44,
896         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
897         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
898         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
899         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
900         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
901         .num_shared_regs        = 1,
902         .constraints            = snbep_uncore_cbox_constraints,
903         .ops                    = &snbep_uncore_cbox_ops,
904         .format_group           = &snbep_uncore_cbox_format_group,
905 };
906
907 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
908 {
909         struct hw_perf_event *hwc = &event->hw;
910         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
911         u64 config = reg1->config;
912
913         if (new_idx > reg1->idx)
914                 config <<= 8 * (new_idx - reg1->idx);
915         else
916                 config >>= 8 * (reg1->idx - new_idx);
917
918         if (modify) {
919                 hwc->config += new_idx - reg1->idx;
920                 reg1->config = config;
921                 reg1->idx = new_idx;
922         }
923         return config;
924 }
925
926 static struct event_constraint *
927 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
928 {
929         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
930         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
931         unsigned long flags;
932         int idx = reg1->idx;
933         u64 mask, config1 = reg1->config;
934         bool ok = false;
935
936         if (reg1->idx == EXTRA_REG_NONE ||
937             (!uncore_box_is_fake(box) && reg1->alloc))
938                 return NULL;
939 again:
940         mask = 0xffULL << (idx * 8);
941         raw_spin_lock_irqsave(&er->lock, flags);
942         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
943             !((config1 ^ er->config) & mask)) {
944                 atomic_add(1 << (idx * 8), &er->ref);
945                 er->config &= ~mask;
946                 er->config |= config1 & mask;
947                 ok = true;
948         }
949         raw_spin_unlock_irqrestore(&er->lock, flags);
950
951         if (!ok) {
952                 idx = (idx + 1) % 4;
953                 if (idx != reg1->idx) {
954                         config1 = snbep_pcu_alter_er(event, idx, false);
955                         goto again;
956                 }
957                 return &uncore_constraint_empty;
958         }
959
960         if (!uncore_box_is_fake(box)) {
961                 if (idx != reg1->idx)
962                         snbep_pcu_alter_er(event, idx, true);
963                 reg1->alloc = 1;
964         }
965         return NULL;
966 }
967
968 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
969 {
970         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
971         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
972
973         if (uncore_box_is_fake(box) || !reg1->alloc)
974                 return;
975
976         atomic_sub(1 << (reg1->idx * 8), &er->ref);
977         reg1->alloc = 0;
978 }
979
980 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
981 {
982         struct hw_perf_event *hwc = &event->hw;
983         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
984         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
985
986         if (ev_sel >= 0xb && ev_sel <= 0xe) {
987                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
988                 reg1->idx = ev_sel - 0xb;
989                 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
990         }
991         return 0;
992 }
993
994 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
995         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
996         .hw_config              = snbep_pcu_hw_config,
997         .get_constraint         = snbep_pcu_get_constraint,
998         .put_constraint         = snbep_pcu_put_constraint,
999 };
1000
1001 static struct intel_uncore_type snbep_uncore_pcu = {
1002         .name                   = "pcu",
1003         .num_counters           = 4,
1004         .num_boxes              = 1,
1005         .perf_ctr_bits          = 48,
1006         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1007         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1008         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1009         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1010         .num_shared_regs        = 1,
1011         .ops                    = &snbep_uncore_pcu_ops,
1012         .format_group           = &snbep_uncore_pcu_format_group,
1013 };
1014
1015 static struct intel_uncore_type *snbep_msr_uncores[] = {
1016         &snbep_uncore_ubox,
1017         &snbep_uncore_cbox,
1018         &snbep_uncore_pcu,
1019         NULL,
1020 };
1021
1022 void snbep_uncore_cpu_init(void)
1023 {
1024         if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1025                 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1026         uncore_msr_uncores = snbep_msr_uncores;
1027 }
1028
1029 enum {
1030         SNBEP_PCI_QPI_PORT0_FILTER,
1031         SNBEP_PCI_QPI_PORT1_FILTER,
1032         HSWEP_PCI_PCU_3,
1033 };
1034
1035 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1036 {
1037         struct hw_perf_event *hwc = &event->hw;
1038         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1039         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1040
1041         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1042                 reg1->idx = 0;
1043                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1044                 reg1->config = event->attr.config1;
1045                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1046                 reg2->config = event->attr.config2;
1047         }
1048         return 0;
1049 }
1050
1051 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1052 {
1053         struct pci_dev *pdev = box->pci_dev;
1054         struct hw_perf_event *hwc = &event->hw;
1055         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1056         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1057
1058         if (reg1->idx != EXTRA_REG_NONE) {
1059                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1060                 int pkg = topology_phys_to_logical_pkg(box->pci_phys_id);
1061                 struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx];
1062
1063                 if (filter_pdev) {
1064                         pci_write_config_dword(filter_pdev, reg1->reg,
1065                                                 (u32)reg1->config);
1066                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
1067                                                 (u32)(reg1->config >> 32));
1068                         pci_write_config_dword(filter_pdev, reg2->reg,
1069                                                 (u32)reg2->config);
1070                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
1071                                                 (u32)(reg2->config >> 32));
1072                 }
1073         }
1074
1075         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1076 }
1077
1078 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1079         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1080         .enable_event           = snbep_qpi_enable_event,
1081         .hw_config              = snbep_qpi_hw_config,
1082         .get_constraint         = uncore_get_constraint,
1083         .put_constraint         = uncore_put_constraint,
1084 };
1085
1086 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
1087         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1088         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1089         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
1090         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1091         .ops            = &snbep_uncore_pci_ops,                \
1092         .format_group   = &snbep_uncore_format_group
1093
1094 static struct intel_uncore_type snbep_uncore_ha = {
1095         .name           = "ha",
1096         .num_counters   = 4,
1097         .num_boxes      = 1,
1098         .perf_ctr_bits  = 48,
1099         SNBEP_UNCORE_PCI_COMMON_INIT(),
1100 };
1101
1102 static struct intel_uncore_type snbep_uncore_imc = {
1103         .name           = "imc",
1104         .num_counters   = 4,
1105         .num_boxes      = 4,
1106         .perf_ctr_bits  = 48,
1107         .fixed_ctr_bits = 48,
1108         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1109         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1110         .event_descs    = snbep_uncore_imc_events,
1111         SNBEP_UNCORE_PCI_COMMON_INIT(),
1112 };
1113
1114 static struct intel_uncore_type snbep_uncore_qpi = {
1115         .name                   = "qpi",
1116         .num_counters           = 4,
1117         .num_boxes              = 2,
1118         .perf_ctr_bits          = 48,
1119         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1120         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1121         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1122         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1123         .num_shared_regs        = 1,
1124         .ops                    = &snbep_uncore_qpi_ops,
1125         .event_descs            = snbep_uncore_qpi_events,
1126         .format_group           = &snbep_uncore_qpi_format_group,
1127 };
1128
1129
1130 static struct intel_uncore_type snbep_uncore_r2pcie = {
1131         .name           = "r2pcie",
1132         .num_counters   = 4,
1133         .num_boxes      = 1,
1134         .perf_ctr_bits  = 44,
1135         .constraints    = snbep_uncore_r2pcie_constraints,
1136         SNBEP_UNCORE_PCI_COMMON_INIT(),
1137 };
1138
1139 static struct intel_uncore_type snbep_uncore_r3qpi = {
1140         .name           = "r3qpi",
1141         .num_counters   = 3,
1142         .num_boxes      = 2,
1143         .perf_ctr_bits  = 44,
1144         .constraints    = snbep_uncore_r3qpi_constraints,
1145         SNBEP_UNCORE_PCI_COMMON_INIT(),
1146 };
1147
1148 enum {
1149         SNBEP_PCI_UNCORE_HA,
1150         SNBEP_PCI_UNCORE_IMC,
1151         SNBEP_PCI_UNCORE_QPI,
1152         SNBEP_PCI_UNCORE_R2PCIE,
1153         SNBEP_PCI_UNCORE_R3QPI,
1154 };
1155
1156 static struct intel_uncore_type *snbep_pci_uncores[] = {
1157         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
1158         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
1159         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
1160         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
1161         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
1162         NULL,
1163 };
1164
1165 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1166         { /* Home Agent */
1167                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1168                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1169         },
1170         { /* MC Channel 0 */
1171                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1172                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1173         },
1174         { /* MC Channel 1 */
1175                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1176                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1177         },
1178         { /* MC Channel 2 */
1179                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1180                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1181         },
1182         { /* MC Channel 3 */
1183                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1184                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1185         },
1186         { /* QPI Port 0 */
1187                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1188                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1189         },
1190         { /* QPI Port 1 */
1191                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1192                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1193         },
1194         { /* R2PCIe */
1195                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1196                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1197         },
1198         { /* R3QPI Link 0 */
1199                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1200                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1201         },
1202         { /* R3QPI Link 1 */
1203                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1204                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1205         },
1206         { /* QPI Port 0 filter  */
1207                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1208                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1209                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1210         },
1211         { /* QPI Port 0 filter  */
1212                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1213                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1214                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1215         },
1216         { /* end: all zeroes */ }
1217 };
1218
1219 static struct pci_driver snbep_uncore_pci_driver = {
1220         .name           = "snbep_uncore",
1221         .id_table       = snbep_uncore_pci_ids,
1222 };
1223
1224 #define NODE_ID_MASK    0x7
1225
1226 /*
1227  * build pci bus to socket mapping
1228  */
1229 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1230 {
1231         struct pci_dev *ubox_dev = NULL;
1232         int i, bus, nodeid, segment;
1233         struct pci2phy_map *map;
1234         int err = 0;
1235         u32 config = 0;
1236
1237         while (1) {
1238                 /* find the UBOX device */
1239                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1240                 if (!ubox_dev)
1241                         break;
1242                 bus = ubox_dev->bus->number;
1243                 /* get the Node ID of the local register */
1244                 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1245                 if (err)
1246                         break;
1247                 nodeid = config & NODE_ID_MASK;
1248                 /* get the Node ID mapping */
1249                 err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1250                 if (err)
1251                         break;
1252
1253                 segment = pci_domain_nr(ubox_dev->bus);
1254                 raw_spin_lock(&pci2phy_map_lock);
1255                 map = __find_pci2phy_map(segment);
1256                 if (!map) {
1257                         raw_spin_unlock(&pci2phy_map_lock);
1258                         err = -ENOMEM;
1259                         break;
1260                 }
1261
1262                 /*
1263                  * every three bits in the Node ID mapping register maps
1264                  * to a particular node.
1265                  */
1266                 for (i = 0; i < 8; i++) {
1267                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
1268                                 map->pbus_to_physid[bus] = i;
1269                                 break;
1270                         }
1271                 }
1272                 raw_spin_unlock(&pci2phy_map_lock);
1273         }
1274
1275         if (!err) {
1276                 /*
1277                  * For PCI bus with no UBOX device, find the next bus
1278                  * that has UBOX device and use its mapping.
1279                  */
1280                 raw_spin_lock(&pci2phy_map_lock);
1281                 list_for_each_entry(map, &pci2phy_map_head, list) {
1282                         i = -1;
1283                         if (reverse) {
1284                                 for (bus = 255; bus >= 0; bus--) {
1285                                         if (map->pbus_to_physid[bus] >= 0)
1286                                                 i = map->pbus_to_physid[bus];
1287                                         else
1288                                                 map->pbus_to_physid[bus] = i;
1289                                 }
1290                         } else {
1291                                 for (bus = 0; bus <= 255; bus++) {
1292                                         if (map->pbus_to_physid[bus] >= 0)
1293                                                 i = map->pbus_to_physid[bus];
1294                                         else
1295                                                 map->pbus_to_physid[bus] = i;
1296                                 }
1297                         }
1298                 }
1299                 raw_spin_unlock(&pci2phy_map_lock);
1300         }
1301
1302         pci_dev_put(ubox_dev);
1303
1304         return err ? pcibios_err_to_errno(err) : 0;
1305 }
1306
1307 int snbep_uncore_pci_init(void)
1308 {
1309         int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1310         if (ret)
1311                 return ret;
1312         uncore_pci_uncores = snbep_pci_uncores;
1313         uncore_pci_driver = &snbep_uncore_pci_driver;
1314         return 0;
1315 }
1316 /* end of Sandy Bridge-EP uncore support */
1317
1318 /* IvyTown uncore support */
1319 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1320 {
1321         unsigned msr = uncore_msr_box_ctl(box);
1322         if (msr)
1323                 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1324 }
1325
1326 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1327 {
1328         struct pci_dev *pdev = box->pci_dev;
1329
1330         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1331 }
1332
1333 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
1334         .init_box       = ivbep_uncore_msr_init_box,            \
1335         .disable_box    = snbep_uncore_msr_disable_box,         \
1336         .enable_box     = snbep_uncore_msr_enable_box,          \
1337         .disable_event  = snbep_uncore_msr_disable_event,       \
1338         .enable_event   = snbep_uncore_msr_enable_event,        \
1339         .read_counter   = uncore_msr_read_counter
1340
1341 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1342         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1343 };
1344
1345 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1346         .init_box       = ivbep_uncore_pci_init_box,
1347         .disable_box    = snbep_uncore_pci_disable_box,
1348         .enable_box     = snbep_uncore_pci_enable_box,
1349         .disable_event  = snbep_uncore_pci_disable_event,
1350         .enable_event   = snbep_uncore_pci_enable_event,
1351         .read_counter   = snbep_uncore_pci_read_counter,
1352 };
1353
1354 #define IVBEP_UNCORE_PCI_COMMON_INIT()                          \
1355         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1356         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1357         .event_mask     = IVBEP_PMON_RAW_EVENT_MASK,            \
1358         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1359         .ops            = &ivbep_uncore_pci_ops,                        \
1360         .format_group   = &ivbep_uncore_format_group
1361
1362 static struct attribute *ivbep_uncore_formats_attr[] = {
1363         &format_attr_event.attr,
1364         &format_attr_umask.attr,
1365         &format_attr_edge.attr,
1366         &format_attr_inv.attr,
1367         &format_attr_thresh8.attr,
1368         NULL,
1369 };
1370
1371 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1372         &format_attr_event.attr,
1373         &format_attr_umask.attr,
1374         &format_attr_edge.attr,
1375         &format_attr_inv.attr,
1376         &format_attr_thresh5.attr,
1377         NULL,
1378 };
1379
1380 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1381         &format_attr_event.attr,
1382         &format_attr_umask.attr,
1383         &format_attr_edge.attr,
1384         &format_attr_tid_en.attr,
1385         &format_attr_thresh8.attr,
1386         &format_attr_filter_tid.attr,
1387         &format_attr_filter_link.attr,
1388         &format_attr_filter_state2.attr,
1389         &format_attr_filter_nid2.attr,
1390         &format_attr_filter_opc2.attr,
1391         &format_attr_filter_nc.attr,
1392         &format_attr_filter_c6.attr,
1393         &format_attr_filter_isoc.attr,
1394         NULL,
1395 };
1396
1397 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1398         &format_attr_event.attr,
1399         &format_attr_occ_sel.attr,
1400         &format_attr_edge.attr,
1401         &format_attr_thresh5.attr,
1402         &format_attr_occ_invert.attr,
1403         &format_attr_occ_edge.attr,
1404         &format_attr_filter_band0.attr,
1405         &format_attr_filter_band1.attr,
1406         &format_attr_filter_band2.attr,
1407         &format_attr_filter_band3.attr,
1408         NULL,
1409 };
1410
1411 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1412         &format_attr_event_ext.attr,
1413         &format_attr_umask.attr,
1414         &format_attr_edge.attr,
1415         &format_attr_thresh8.attr,
1416         &format_attr_match_rds.attr,
1417         &format_attr_match_rnid30.attr,
1418         &format_attr_match_rnid4.attr,
1419         &format_attr_match_dnid.attr,
1420         &format_attr_match_mc.attr,
1421         &format_attr_match_opc.attr,
1422         &format_attr_match_vnw.attr,
1423         &format_attr_match0.attr,
1424         &format_attr_match1.attr,
1425         &format_attr_mask_rds.attr,
1426         &format_attr_mask_rnid30.attr,
1427         &format_attr_mask_rnid4.attr,
1428         &format_attr_mask_dnid.attr,
1429         &format_attr_mask_mc.attr,
1430         &format_attr_mask_opc.attr,
1431         &format_attr_mask_vnw.attr,
1432         &format_attr_mask0.attr,
1433         &format_attr_mask1.attr,
1434         NULL,
1435 };
1436
1437 static struct attribute_group ivbep_uncore_format_group = {
1438         .name = "format",
1439         .attrs = ivbep_uncore_formats_attr,
1440 };
1441
1442 static struct attribute_group ivbep_uncore_ubox_format_group = {
1443         .name = "format",
1444         .attrs = ivbep_uncore_ubox_formats_attr,
1445 };
1446
1447 static struct attribute_group ivbep_uncore_cbox_format_group = {
1448         .name = "format",
1449         .attrs = ivbep_uncore_cbox_formats_attr,
1450 };
1451
1452 static struct attribute_group ivbep_uncore_pcu_format_group = {
1453         .name = "format",
1454         .attrs = ivbep_uncore_pcu_formats_attr,
1455 };
1456
1457 static struct attribute_group ivbep_uncore_qpi_format_group = {
1458         .name = "format",
1459         .attrs = ivbep_uncore_qpi_formats_attr,
1460 };
1461
1462 static struct intel_uncore_type ivbep_uncore_ubox = {
1463         .name           = "ubox",
1464         .num_counters   = 2,
1465         .num_boxes      = 1,
1466         .perf_ctr_bits  = 44,
1467         .fixed_ctr_bits = 48,
1468         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1469         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1470         .event_mask     = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1471         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1472         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1473         .ops            = &ivbep_uncore_msr_ops,
1474         .format_group   = &ivbep_uncore_ubox_format_group,
1475 };
1476
1477 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1478         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1479                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1480         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1481         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1482         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1483         SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1484         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1485         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1486         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1487         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1488         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1489         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1490         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1491         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1492         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1493         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1494         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1495         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1496         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1497         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1498         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1499         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1500         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1501         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1502         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1503         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1504         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1505         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1506         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1507         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1508         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1509         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1510         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1511         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1512         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1513         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1514         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1515         EVENT_EXTRA_END
1516 };
1517
1518 static u64 ivbep_cbox_filter_mask(int fields)
1519 {
1520         u64 mask = 0;
1521
1522         if (fields & 0x1)
1523                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1524         if (fields & 0x2)
1525                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1526         if (fields & 0x4)
1527                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1528         if (fields & 0x8)
1529                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1530         if (fields & 0x10) {
1531                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1532                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1533                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1534                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1535         }
1536
1537         return mask;
1538 }
1539
1540 static struct event_constraint *
1541 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1542 {
1543         return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1544 }
1545
1546 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1547 {
1548         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1549         struct extra_reg *er;
1550         int idx = 0;
1551
1552         for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1553                 if (er->event != (event->hw.config & er->config_mask))
1554                         continue;
1555                 idx |= er->idx;
1556         }
1557
1558         if (idx) {
1559                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1560                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1561                 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1562                 reg1->idx = idx;
1563         }
1564         return 0;
1565 }
1566
1567 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1568 {
1569         struct hw_perf_event *hwc = &event->hw;
1570         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1571
1572         if (reg1->idx != EXTRA_REG_NONE) {
1573                 u64 filter = uncore_shared_reg_config(box, 0);
1574                 wrmsrl(reg1->reg, filter & 0xffffffff);
1575                 wrmsrl(reg1->reg + 6, filter >> 32);
1576         }
1577
1578         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1579 }
1580
1581 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1582         .init_box               = ivbep_uncore_msr_init_box,
1583         .disable_box            = snbep_uncore_msr_disable_box,
1584         .enable_box             = snbep_uncore_msr_enable_box,
1585         .disable_event          = snbep_uncore_msr_disable_event,
1586         .enable_event           = ivbep_cbox_enable_event,
1587         .read_counter           = uncore_msr_read_counter,
1588         .hw_config              = ivbep_cbox_hw_config,
1589         .get_constraint         = ivbep_cbox_get_constraint,
1590         .put_constraint         = snbep_cbox_put_constraint,
1591 };
1592
1593 static struct intel_uncore_type ivbep_uncore_cbox = {
1594         .name                   = "cbox",
1595         .num_counters           = 4,
1596         .num_boxes              = 15,
1597         .perf_ctr_bits          = 44,
1598         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1599         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1600         .event_mask             = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1601         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1602         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1603         .num_shared_regs        = 1,
1604         .constraints            = snbep_uncore_cbox_constraints,
1605         .ops                    = &ivbep_uncore_cbox_ops,
1606         .format_group           = &ivbep_uncore_cbox_format_group,
1607 };
1608
1609 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1610         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1611         .hw_config              = snbep_pcu_hw_config,
1612         .get_constraint         = snbep_pcu_get_constraint,
1613         .put_constraint         = snbep_pcu_put_constraint,
1614 };
1615
1616 static struct intel_uncore_type ivbep_uncore_pcu = {
1617         .name                   = "pcu",
1618         .num_counters           = 4,
1619         .num_boxes              = 1,
1620         .perf_ctr_bits          = 48,
1621         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1622         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1623         .event_mask             = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1624         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1625         .num_shared_regs        = 1,
1626         .ops                    = &ivbep_uncore_pcu_ops,
1627         .format_group           = &ivbep_uncore_pcu_format_group,
1628 };
1629
1630 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1631         &ivbep_uncore_ubox,
1632         &ivbep_uncore_cbox,
1633         &ivbep_uncore_pcu,
1634         NULL,
1635 };
1636
1637 void ivbep_uncore_cpu_init(void)
1638 {
1639         if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1640                 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1641         uncore_msr_uncores = ivbep_msr_uncores;
1642 }
1643
1644 static struct intel_uncore_type ivbep_uncore_ha = {
1645         .name           = "ha",
1646         .num_counters   = 4,
1647         .num_boxes      = 2,
1648         .perf_ctr_bits  = 48,
1649         IVBEP_UNCORE_PCI_COMMON_INIT(),
1650 };
1651
1652 static struct intel_uncore_type ivbep_uncore_imc = {
1653         .name           = "imc",
1654         .num_counters   = 4,
1655         .num_boxes      = 8,
1656         .perf_ctr_bits  = 48,
1657         .fixed_ctr_bits = 48,
1658         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1659         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1660         .event_descs    = snbep_uncore_imc_events,
1661         IVBEP_UNCORE_PCI_COMMON_INIT(),
1662 };
1663
1664 /* registers in IRP boxes are not properly aligned */
1665 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1666 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1667
1668 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1669 {
1670         struct pci_dev *pdev = box->pci_dev;
1671         struct hw_perf_event *hwc = &event->hw;
1672
1673         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1674                                hwc->config | SNBEP_PMON_CTL_EN);
1675 }
1676
1677 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1678 {
1679         struct pci_dev *pdev = box->pci_dev;
1680         struct hw_perf_event *hwc = &event->hw;
1681
1682         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1683 }
1684
1685 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1686 {
1687         struct pci_dev *pdev = box->pci_dev;
1688         struct hw_perf_event *hwc = &event->hw;
1689         u64 count = 0;
1690
1691         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1692         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1693
1694         return count;
1695 }
1696
1697 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1698         .init_box       = ivbep_uncore_pci_init_box,
1699         .disable_box    = snbep_uncore_pci_disable_box,
1700         .enable_box     = snbep_uncore_pci_enable_box,
1701         .disable_event  = ivbep_uncore_irp_disable_event,
1702         .enable_event   = ivbep_uncore_irp_enable_event,
1703         .read_counter   = ivbep_uncore_irp_read_counter,
1704 };
1705
1706 static struct intel_uncore_type ivbep_uncore_irp = {
1707         .name                   = "irp",
1708         .num_counters           = 4,
1709         .num_boxes              = 1,
1710         .perf_ctr_bits          = 48,
1711         .event_mask             = IVBEP_PMON_RAW_EVENT_MASK,
1712         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1713         .ops                    = &ivbep_uncore_irp_ops,
1714         .format_group           = &ivbep_uncore_format_group,
1715 };
1716
1717 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1718         .init_box       = ivbep_uncore_pci_init_box,
1719         .disable_box    = snbep_uncore_pci_disable_box,
1720         .enable_box     = snbep_uncore_pci_enable_box,
1721         .disable_event  = snbep_uncore_pci_disable_event,
1722         .enable_event   = snbep_qpi_enable_event,
1723         .read_counter   = snbep_uncore_pci_read_counter,
1724         .hw_config      = snbep_qpi_hw_config,
1725         .get_constraint = uncore_get_constraint,
1726         .put_constraint = uncore_put_constraint,
1727 };
1728
1729 static struct intel_uncore_type ivbep_uncore_qpi = {
1730         .name                   = "qpi",
1731         .num_counters           = 4,
1732         .num_boxes              = 3,
1733         .perf_ctr_bits          = 48,
1734         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1735         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1736         .event_mask             = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1737         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1738         .num_shared_regs        = 1,
1739         .ops                    = &ivbep_uncore_qpi_ops,
1740         .format_group           = &ivbep_uncore_qpi_format_group,
1741 };
1742
1743 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1744         .name           = "r2pcie",
1745         .num_counters   = 4,
1746         .num_boxes      = 1,
1747         .perf_ctr_bits  = 44,
1748         .constraints    = snbep_uncore_r2pcie_constraints,
1749         IVBEP_UNCORE_PCI_COMMON_INIT(),
1750 };
1751
1752 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1753         .name           = "r3qpi",
1754         .num_counters   = 3,
1755         .num_boxes      = 2,
1756         .perf_ctr_bits  = 44,
1757         .constraints    = snbep_uncore_r3qpi_constraints,
1758         IVBEP_UNCORE_PCI_COMMON_INIT(),
1759 };
1760
1761 enum {
1762         IVBEP_PCI_UNCORE_HA,
1763         IVBEP_PCI_UNCORE_IMC,
1764         IVBEP_PCI_UNCORE_IRP,
1765         IVBEP_PCI_UNCORE_QPI,
1766         IVBEP_PCI_UNCORE_R2PCIE,
1767         IVBEP_PCI_UNCORE_R3QPI,
1768 };
1769
1770 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1771         [IVBEP_PCI_UNCORE_HA]   = &ivbep_uncore_ha,
1772         [IVBEP_PCI_UNCORE_IMC]  = &ivbep_uncore_imc,
1773         [IVBEP_PCI_UNCORE_IRP]  = &ivbep_uncore_irp,
1774         [IVBEP_PCI_UNCORE_QPI]  = &ivbep_uncore_qpi,
1775         [IVBEP_PCI_UNCORE_R2PCIE]       = &ivbep_uncore_r2pcie,
1776         [IVBEP_PCI_UNCORE_R3QPI]        = &ivbep_uncore_r3qpi,
1777         NULL,
1778 };
1779
1780 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1781         { /* Home Agent 0 */
1782                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1783                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1784         },
1785         { /* Home Agent 1 */
1786                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1787                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1788         },
1789         { /* MC0 Channel 0 */
1790                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1791                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1792         },
1793         { /* MC0 Channel 1 */
1794                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1795                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1796         },
1797         { /* MC0 Channel 3 */
1798                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1799                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1800         },
1801         { /* MC0 Channel 4 */
1802                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1803                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1804         },
1805         { /* MC1 Channel 0 */
1806                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1807                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1808         },
1809         { /* MC1 Channel 1 */
1810                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1811                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1812         },
1813         { /* MC1 Channel 3 */
1814                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1815                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1816         },
1817         { /* MC1 Channel 4 */
1818                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1819                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1820         },
1821         { /* IRP */
1822                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1823                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1824         },
1825         { /* QPI0 Port 0 */
1826                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1827                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1828         },
1829         { /* QPI0 Port 1 */
1830                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1831                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1832         },
1833         { /* QPI1 Port 2 */
1834                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1835                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1836         },
1837         { /* R2PCIe */
1838                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1839                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1840         },
1841         { /* R3QPI0 Link 0 */
1842                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1843                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1844         },
1845         { /* R3QPI0 Link 1 */
1846                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1847                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1848         },
1849         { /* R3QPI1 Link 2 */
1850                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1851                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1852         },
1853         { /* QPI Port 0 filter  */
1854                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1855                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1856                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1857         },
1858         { /* QPI Port 0 filter  */
1859                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1860                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1861                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1862         },
1863         { /* end: all zeroes */ }
1864 };
1865
1866 static struct pci_driver ivbep_uncore_pci_driver = {
1867         .name           = "ivbep_uncore",
1868         .id_table       = ivbep_uncore_pci_ids,
1869 };
1870
1871 int ivbep_uncore_pci_init(void)
1872 {
1873         int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1874         if (ret)
1875                 return ret;
1876         uncore_pci_uncores = ivbep_pci_uncores;
1877         uncore_pci_driver = &ivbep_uncore_pci_driver;
1878         return 0;
1879 }
1880 /* end of IvyTown uncore support */
1881
1882 /* KNL uncore support */
1883 static struct attribute *knl_uncore_ubox_formats_attr[] = {
1884         &format_attr_event.attr,
1885         &format_attr_umask.attr,
1886         &format_attr_edge.attr,
1887         &format_attr_tid_en.attr,
1888         &format_attr_inv.attr,
1889         &format_attr_thresh5.attr,
1890         NULL,
1891 };
1892
1893 static struct attribute_group knl_uncore_ubox_format_group = {
1894         .name = "format",
1895         .attrs = knl_uncore_ubox_formats_attr,
1896 };
1897
1898 static struct intel_uncore_type knl_uncore_ubox = {
1899         .name                   = "ubox",
1900         .num_counters           = 2,
1901         .num_boxes              = 1,
1902         .perf_ctr_bits          = 48,
1903         .fixed_ctr_bits         = 48,
1904         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
1905         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
1906         .event_mask             = KNL_U_MSR_PMON_RAW_EVENT_MASK,
1907         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1908         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1909         .ops                    = &snbep_uncore_msr_ops,
1910         .format_group           = &knl_uncore_ubox_format_group,
1911 };
1912
1913 static struct attribute *knl_uncore_cha_formats_attr[] = {
1914         &format_attr_event.attr,
1915         &format_attr_umask.attr,
1916         &format_attr_qor.attr,
1917         &format_attr_edge.attr,
1918         &format_attr_tid_en.attr,
1919         &format_attr_inv.attr,
1920         &format_attr_thresh8.attr,
1921         &format_attr_filter_tid4.attr,
1922         &format_attr_filter_link3.attr,
1923         &format_attr_filter_state4.attr,
1924         &format_attr_filter_local.attr,
1925         &format_attr_filter_all_op.attr,
1926         &format_attr_filter_nnm.attr,
1927         &format_attr_filter_opc3.attr,
1928         &format_attr_filter_nc.attr,
1929         &format_attr_filter_isoc.attr,
1930         NULL,
1931 };
1932
1933 static struct attribute_group knl_uncore_cha_format_group = {
1934         .name = "format",
1935         .attrs = knl_uncore_cha_formats_attr,
1936 };
1937
1938 static struct event_constraint knl_uncore_cha_constraints[] = {
1939         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
1940         UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
1941         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
1942         EVENT_CONSTRAINT_END
1943 };
1944
1945 static struct extra_reg knl_uncore_cha_extra_regs[] = {
1946         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1947                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1948         SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
1949         SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
1950         SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
1951         EVENT_EXTRA_END
1952 };
1953
1954 static u64 knl_cha_filter_mask(int fields)
1955 {
1956         u64 mask = 0;
1957
1958         if (fields & 0x1)
1959                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
1960         if (fields & 0x2)
1961                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
1962         if (fields & 0x4)
1963                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
1964         return mask;
1965 }
1966
1967 static struct event_constraint *
1968 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1969 {
1970         return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
1971 }
1972
1973 static int knl_cha_hw_config(struct intel_uncore_box *box,
1974                              struct perf_event *event)
1975 {
1976         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1977         struct extra_reg *er;
1978         int idx = 0;
1979
1980         for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
1981                 if (er->event != (event->hw.config & er->config_mask))
1982                         continue;
1983                 idx |= er->idx;
1984         }
1985
1986         if (idx) {
1987                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
1988                             KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
1989                 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
1990
1991                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
1992                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
1993                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
1994                 reg1->idx = idx;
1995         }
1996         return 0;
1997 }
1998
1999 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2000                                     struct perf_event *event);
2001
2002 static struct intel_uncore_ops knl_uncore_cha_ops = {
2003         .init_box               = snbep_uncore_msr_init_box,
2004         .disable_box            = snbep_uncore_msr_disable_box,
2005         .enable_box             = snbep_uncore_msr_enable_box,
2006         .disable_event          = snbep_uncore_msr_disable_event,
2007         .enable_event           = hswep_cbox_enable_event,
2008         .read_counter           = uncore_msr_read_counter,
2009         .hw_config              = knl_cha_hw_config,
2010         .get_constraint         = knl_cha_get_constraint,
2011         .put_constraint         = snbep_cbox_put_constraint,
2012 };
2013
2014 static struct intel_uncore_type knl_uncore_cha = {
2015         .name                   = "cha",
2016         .num_counters           = 4,
2017         .num_boxes              = 38,
2018         .perf_ctr_bits          = 48,
2019         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2020         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2021         .event_mask             = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2022         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2023         .msr_offset             = KNL_CHA_MSR_OFFSET,
2024         .num_shared_regs        = 1,
2025         .constraints            = knl_uncore_cha_constraints,
2026         .ops                    = &knl_uncore_cha_ops,
2027         .format_group           = &knl_uncore_cha_format_group,
2028 };
2029
2030 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2031         &format_attr_event2.attr,
2032         &format_attr_use_occ_ctr.attr,
2033         &format_attr_occ_sel.attr,
2034         &format_attr_edge.attr,
2035         &format_attr_tid_en.attr,
2036         &format_attr_inv.attr,
2037         &format_attr_thresh6.attr,
2038         &format_attr_occ_invert.attr,
2039         &format_attr_occ_edge_det.attr,
2040         NULL,
2041 };
2042
2043 static struct attribute_group knl_uncore_pcu_format_group = {
2044         .name = "format",
2045         .attrs = knl_uncore_pcu_formats_attr,
2046 };
2047
2048 static struct intel_uncore_type knl_uncore_pcu = {
2049         .name                   = "pcu",
2050         .num_counters           = 4,
2051         .num_boxes              = 1,
2052         .perf_ctr_bits          = 48,
2053         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2054         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2055         .event_mask             = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2056         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2057         .ops                    = &snbep_uncore_msr_ops,
2058         .format_group           = &knl_uncore_pcu_format_group,
2059 };
2060
2061 static struct intel_uncore_type *knl_msr_uncores[] = {
2062         &knl_uncore_ubox,
2063         &knl_uncore_cha,
2064         &knl_uncore_pcu,
2065         NULL,
2066 };
2067
2068 void knl_uncore_cpu_init(void)
2069 {
2070         uncore_msr_uncores = knl_msr_uncores;
2071 }
2072
2073 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2074 {
2075         struct pci_dev *pdev = box->pci_dev;
2076         int box_ctl = uncore_pci_box_ctl(box);
2077
2078         pci_write_config_dword(pdev, box_ctl, 0);
2079 }
2080
2081 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2082                                         struct perf_event *event)
2083 {
2084         struct pci_dev *pdev = box->pci_dev;
2085         struct hw_perf_event *hwc = &event->hw;
2086
2087         if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2088                                                         == UNCORE_FIXED_EVENT)
2089                 pci_write_config_dword(pdev, hwc->config_base,
2090                                        hwc->config | KNL_PMON_FIXED_CTL_EN);
2091         else
2092                 pci_write_config_dword(pdev, hwc->config_base,
2093                                        hwc->config | SNBEP_PMON_CTL_EN);
2094 }
2095
2096 static struct intel_uncore_ops knl_uncore_imc_ops = {
2097         .init_box       = snbep_uncore_pci_init_box,
2098         .disable_box    = snbep_uncore_pci_disable_box,
2099         .enable_box     = knl_uncore_imc_enable_box,
2100         .read_counter   = snbep_uncore_pci_read_counter,
2101         .enable_event   = knl_uncore_imc_enable_event,
2102         .disable_event  = snbep_uncore_pci_disable_event,
2103 };
2104
2105 static struct intel_uncore_type knl_uncore_imc_uclk = {
2106         .name                   = "imc_uclk",
2107         .num_counters           = 4,
2108         .num_boxes              = 2,
2109         .perf_ctr_bits          = 48,
2110         .fixed_ctr_bits         = 48,
2111         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2112         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2113         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2114         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2115         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2116         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2117         .ops                    = &knl_uncore_imc_ops,
2118         .format_group           = &snbep_uncore_format_group,
2119 };
2120
2121 static struct intel_uncore_type knl_uncore_imc_dclk = {
2122         .name                   = "imc",
2123         .num_counters           = 4,
2124         .num_boxes              = 6,
2125         .perf_ctr_bits          = 48,
2126         .fixed_ctr_bits         = 48,
2127         .perf_ctr               = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2128         .event_ctl              = KNL_MC0_CH0_MSR_PMON_CTL0,
2129         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2130         .fixed_ctr              = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2131         .fixed_ctl              = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2132         .box_ctl                = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2133         .ops                    = &knl_uncore_imc_ops,
2134         .format_group           = &snbep_uncore_format_group,
2135 };
2136
2137 static struct intel_uncore_type knl_uncore_edc_uclk = {
2138         .name                   = "edc_uclk",
2139         .num_counters           = 4,
2140         .num_boxes              = 8,
2141         .perf_ctr_bits          = 48,
2142         .fixed_ctr_bits         = 48,
2143         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2144         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2145         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2146         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2147         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2148         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2149         .ops                    = &knl_uncore_imc_ops,
2150         .format_group           = &snbep_uncore_format_group,
2151 };
2152
2153 static struct intel_uncore_type knl_uncore_edc_eclk = {
2154         .name                   = "edc_eclk",
2155         .num_counters           = 4,
2156         .num_boxes              = 8,
2157         .perf_ctr_bits          = 48,
2158         .fixed_ctr_bits         = 48,
2159         .perf_ctr               = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2160         .event_ctl              = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2161         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2162         .fixed_ctr              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2163         .fixed_ctl              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2164         .box_ctl                = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2165         .ops                    = &knl_uncore_imc_ops,
2166         .format_group           = &snbep_uncore_format_group,
2167 };
2168
2169 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2170         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2171         EVENT_CONSTRAINT_END
2172 };
2173
2174 static struct intel_uncore_type knl_uncore_m2pcie = {
2175         .name           = "m2pcie",
2176         .num_counters   = 4,
2177         .num_boxes      = 1,
2178         .perf_ctr_bits  = 48,
2179         .constraints    = knl_uncore_m2pcie_constraints,
2180         SNBEP_UNCORE_PCI_COMMON_INIT(),
2181 };
2182
2183 static struct attribute *knl_uncore_irp_formats_attr[] = {
2184         &format_attr_event.attr,
2185         &format_attr_umask.attr,
2186         &format_attr_qor.attr,
2187         &format_attr_edge.attr,
2188         &format_attr_inv.attr,
2189         &format_attr_thresh8.attr,
2190         NULL,
2191 };
2192
2193 static struct attribute_group knl_uncore_irp_format_group = {
2194         .name = "format",
2195         .attrs = knl_uncore_irp_formats_attr,
2196 };
2197
2198 static struct intel_uncore_type knl_uncore_irp = {
2199         .name                   = "irp",
2200         .num_counters           = 2,
2201         .num_boxes              = 1,
2202         .perf_ctr_bits          = 48,
2203         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2204         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2205         .event_mask             = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2206         .box_ctl                = KNL_IRP_PCI_PMON_BOX_CTL,
2207         .ops                    = &snbep_uncore_pci_ops,
2208         .format_group           = &knl_uncore_irp_format_group,
2209 };
2210
2211 enum {
2212         KNL_PCI_UNCORE_MC_UCLK,
2213         KNL_PCI_UNCORE_MC_DCLK,
2214         KNL_PCI_UNCORE_EDC_UCLK,
2215         KNL_PCI_UNCORE_EDC_ECLK,
2216         KNL_PCI_UNCORE_M2PCIE,
2217         KNL_PCI_UNCORE_IRP,
2218 };
2219
2220 static struct intel_uncore_type *knl_pci_uncores[] = {
2221         [KNL_PCI_UNCORE_MC_UCLK]        = &knl_uncore_imc_uclk,
2222         [KNL_PCI_UNCORE_MC_DCLK]        = &knl_uncore_imc_dclk,
2223         [KNL_PCI_UNCORE_EDC_UCLK]       = &knl_uncore_edc_uclk,
2224         [KNL_PCI_UNCORE_EDC_ECLK]       = &knl_uncore_edc_eclk,
2225         [KNL_PCI_UNCORE_M2PCIE]         = &knl_uncore_m2pcie,
2226         [KNL_PCI_UNCORE_IRP]            = &knl_uncore_irp,
2227         NULL,
2228 };
2229
2230 /*
2231  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2232  * device type. prior to KNL, each instance of a PMU device type had a unique
2233  * device ID.
2234  *
2235  *      PCI Device ID   Uncore PMU Devices
2236  *      ----------------------------------
2237  *      0x7841          MC0 UClk, MC1 UClk
2238  *      0x7843          MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2239  *                      MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2240  *      0x7833          EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2241  *                      EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2242  *      0x7835          EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2243  *                      EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2244  *      0x7817          M2PCIe
2245  *      0x7814          IRP
2246 */
2247
2248 static const struct pci_device_id knl_uncore_pci_ids[] = {
2249         { /* MC0 UClk */
2250                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2251                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2252         },
2253         { /* MC1 UClk */
2254                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2255                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2256         },
2257         { /* MC0 DClk CH 0 */
2258                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2259                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2260         },
2261         { /* MC0 DClk CH 1 */
2262                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2263                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2264         },
2265         { /* MC0 DClk CH 2 */
2266                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2267                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2268         },
2269         { /* MC1 DClk CH 0 */
2270                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2271                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2272         },
2273         { /* MC1 DClk CH 1 */
2274                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2275                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2276         },
2277         { /* MC1 DClk CH 2 */
2278                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2279                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2280         },
2281         { /* EDC0 UClk */
2282                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2283                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2284         },
2285         { /* EDC1 UClk */
2286                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2287                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2288         },
2289         { /* EDC2 UClk */
2290                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2291                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2292         },
2293         { /* EDC3 UClk */
2294                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2295                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2296         },
2297         { /* EDC4 UClk */
2298                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2299                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2300         },
2301         { /* EDC5 UClk */
2302                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2303                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2304         },
2305         { /* EDC6 UClk */
2306                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2307                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2308         },
2309         { /* EDC7 UClk */
2310                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2311                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2312         },
2313         { /* EDC0 EClk */
2314                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2315                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2316         },
2317         { /* EDC1 EClk */
2318                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2319                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2320         },
2321         { /* EDC2 EClk */
2322                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2323                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2324         },
2325         { /* EDC3 EClk */
2326                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2327                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2328         },
2329         { /* EDC4 EClk */
2330                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2331                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2332         },
2333         { /* EDC5 EClk */
2334                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2335                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2336         },
2337         { /* EDC6 EClk */
2338                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2339                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2340         },
2341         { /* EDC7 EClk */
2342                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2343                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2344         },
2345         { /* M2PCIe */
2346                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2347                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2348         },
2349         { /* IRP */
2350                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2351                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2352         },
2353         { /* end: all zeroes */ }
2354 };
2355
2356 static struct pci_driver knl_uncore_pci_driver = {
2357         .name           = "knl_uncore",
2358         .id_table       = knl_uncore_pci_ids,
2359 };
2360
2361 int knl_uncore_pci_init(void)
2362 {
2363         int ret;
2364
2365         /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2366         ret = snb_pci2phy_map_init(0x7814); /* IRP */
2367         if (ret)
2368                 return ret;
2369         ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2370         if (ret)
2371                 return ret;
2372         uncore_pci_uncores = knl_pci_uncores;
2373         uncore_pci_driver = &knl_uncore_pci_driver;
2374         return 0;
2375 }
2376
2377 /* end of KNL uncore support */
2378
2379 /* Haswell-EP uncore support */
2380 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2381         &format_attr_event.attr,
2382         &format_attr_umask.attr,
2383         &format_attr_edge.attr,
2384         &format_attr_inv.attr,
2385         &format_attr_thresh5.attr,
2386         &format_attr_filter_tid2.attr,
2387         &format_attr_filter_cid.attr,
2388         NULL,
2389 };
2390
2391 static struct attribute_group hswep_uncore_ubox_format_group = {
2392         .name = "format",
2393         .attrs = hswep_uncore_ubox_formats_attr,
2394 };
2395
2396 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2397 {
2398         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2399         reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2400         reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2401         reg1->idx = 0;
2402         return 0;
2403 }
2404
2405 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2406         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2407         .hw_config              = hswep_ubox_hw_config,
2408         .get_constraint         = uncore_get_constraint,
2409         .put_constraint         = uncore_put_constraint,
2410 };
2411
2412 static struct intel_uncore_type hswep_uncore_ubox = {
2413         .name                   = "ubox",
2414         .num_counters           = 2,
2415         .num_boxes              = 1,
2416         .perf_ctr_bits          = 44,
2417         .fixed_ctr_bits         = 48,
2418         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2419         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2420         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2421         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2422         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2423         .num_shared_regs        = 1,
2424         .ops                    = &hswep_uncore_ubox_ops,
2425         .format_group           = &hswep_uncore_ubox_format_group,
2426 };
2427
2428 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2429         &format_attr_event.attr,
2430         &format_attr_umask.attr,
2431         &format_attr_edge.attr,
2432         &format_attr_tid_en.attr,
2433         &format_attr_thresh8.attr,
2434         &format_attr_filter_tid3.attr,
2435         &format_attr_filter_link2.attr,
2436         &format_attr_filter_state3.attr,
2437         &format_attr_filter_nid2.attr,
2438         &format_attr_filter_opc2.attr,
2439         &format_attr_filter_nc.attr,
2440         &format_attr_filter_c6.attr,
2441         &format_attr_filter_isoc.attr,
2442         NULL,
2443 };
2444
2445 static struct attribute_group hswep_uncore_cbox_format_group = {
2446         .name = "format",
2447         .attrs = hswep_uncore_cbox_formats_attr,
2448 };
2449
2450 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2451         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2452         UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2453         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2454         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2455         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2456         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2457         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2458         EVENT_CONSTRAINT_END
2459 };
2460
2461 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2462         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2463                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2464         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2465         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2466         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2467         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2468         SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2469         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2470         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2471         SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2472         SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2473         SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2474         SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2475         SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2476         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2477         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2478         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2479         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2480         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2481         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2482         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2483         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2484         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2485         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2486         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2487         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2488         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2489         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2490         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2491         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2492         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2493         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2494         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2495         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2496         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2497         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2498         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2499         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2500         EVENT_EXTRA_END
2501 };
2502
2503 static u64 hswep_cbox_filter_mask(int fields)
2504 {
2505         u64 mask = 0;
2506         if (fields & 0x1)
2507                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2508         if (fields & 0x2)
2509                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2510         if (fields & 0x4)
2511                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2512         if (fields & 0x8)
2513                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2514         if (fields & 0x10) {
2515                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2516                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2517                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2518                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2519         }
2520         return mask;
2521 }
2522
2523 static struct event_constraint *
2524 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2525 {
2526         return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2527 }
2528
2529 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2530 {
2531         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2532         struct extra_reg *er;
2533         int idx = 0;
2534
2535         for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2536                 if (er->event != (event->hw.config & er->config_mask))
2537                         continue;
2538                 idx |= er->idx;
2539         }
2540
2541         if (idx) {
2542                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2543                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2544                 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2545                 reg1->idx = idx;
2546         }
2547         return 0;
2548 }
2549
2550 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2551                                   struct perf_event *event)
2552 {
2553         struct hw_perf_event *hwc = &event->hw;
2554         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2555
2556         if (reg1->idx != EXTRA_REG_NONE) {
2557                 u64 filter = uncore_shared_reg_config(box, 0);
2558                 wrmsrl(reg1->reg, filter & 0xffffffff);
2559                 wrmsrl(reg1->reg + 1, filter >> 32);
2560         }
2561
2562         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2563 }
2564
2565 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2566         .init_box               = snbep_uncore_msr_init_box,
2567         .disable_box            = snbep_uncore_msr_disable_box,
2568         .enable_box             = snbep_uncore_msr_enable_box,
2569         .disable_event          = snbep_uncore_msr_disable_event,
2570         .enable_event           = hswep_cbox_enable_event,
2571         .read_counter           = uncore_msr_read_counter,
2572         .hw_config              = hswep_cbox_hw_config,
2573         .get_constraint         = hswep_cbox_get_constraint,
2574         .put_constraint         = snbep_cbox_put_constraint,
2575 };
2576
2577 static struct intel_uncore_type hswep_uncore_cbox = {
2578         .name                   = "cbox",
2579         .num_counters           = 4,
2580         .num_boxes              = 18,
2581         .perf_ctr_bits          = 48,
2582         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2583         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2584         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2585         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2586         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
2587         .num_shared_regs        = 1,
2588         .constraints            = hswep_uncore_cbox_constraints,
2589         .ops                    = &hswep_uncore_cbox_ops,
2590         .format_group           = &hswep_uncore_cbox_format_group,
2591 };
2592
2593 /*
2594  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2595  */
2596 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2597 {
2598         unsigned msr = uncore_msr_box_ctl(box);
2599
2600         if (msr) {
2601                 u64 init = SNBEP_PMON_BOX_CTL_INT;
2602                 u64 flags = 0;
2603                 int i;
2604
2605                 for_each_set_bit(i, (unsigned long *)&init, 64) {
2606                         flags |= (1ULL << i);
2607                         wrmsrl(msr, flags);
2608                 }
2609         }
2610 }
2611
2612 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2613         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2614         .init_box               = hswep_uncore_sbox_msr_init_box
2615 };
2616
2617 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2618         &format_attr_event.attr,
2619         &format_attr_umask.attr,
2620         &format_attr_edge.attr,
2621         &format_attr_tid_en.attr,
2622         &format_attr_inv.attr,
2623         &format_attr_thresh8.attr,
2624         NULL,
2625 };
2626
2627 static struct attribute_group hswep_uncore_sbox_format_group = {
2628         .name = "format",
2629         .attrs = hswep_uncore_sbox_formats_attr,
2630 };
2631
2632 static struct intel_uncore_type hswep_uncore_sbox = {
2633         .name                   = "sbox",
2634         .num_counters           = 4,
2635         .num_boxes              = 4,
2636         .perf_ctr_bits          = 44,
2637         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
2638         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
2639         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2640         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
2641         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
2642         .ops                    = &hswep_uncore_sbox_msr_ops,
2643         .format_group           = &hswep_uncore_sbox_format_group,
2644 };
2645
2646 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2647 {
2648         struct hw_perf_event *hwc = &event->hw;
2649         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2650         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2651
2652         if (ev_sel >= 0xb && ev_sel <= 0xe) {
2653                 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2654                 reg1->idx = ev_sel - 0xb;
2655                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2656         }
2657         return 0;
2658 }
2659
2660 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2661         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2662         .hw_config              = hswep_pcu_hw_config,
2663         .get_constraint         = snbep_pcu_get_constraint,
2664         .put_constraint         = snbep_pcu_put_constraint,
2665 };
2666
2667 static struct intel_uncore_type hswep_uncore_pcu = {
2668         .name                   = "pcu",
2669         .num_counters           = 4,
2670         .num_boxes              = 1,
2671         .perf_ctr_bits          = 48,
2672         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2673         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2674         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2675         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2676         .num_shared_regs        = 1,
2677         .ops                    = &hswep_uncore_pcu_ops,
2678         .format_group           = &snbep_uncore_pcu_format_group,
2679 };
2680
2681 static struct intel_uncore_type *hswep_msr_uncores[] = {
2682         &hswep_uncore_ubox,
2683         &hswep_uncore_cbox,
2684         &hswep_uncore_sbox,
2685         &hswep_uncore_pcu,
2686         NULL,
2687 };
2688
2689 void hswep_uncore_cpu_init(void)
2690 {
2691         int pkg = boot_cpu_data.logical_proc_id;
2692
2693         if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2694                 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2695
2696         /* Detect 6-8 core systems with only two SBOXes */
2697         if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
2698                 u32 capid4;
2699
2700                 pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
2701                                       0x94, &capid4);
2702                 if (((capid4 >> 6) & 0x3) == 0)
2703                         hswep_uncore_sbox.num_boxes = 2;
2704         }
2705
2706         uncore_msr_uncores = hswep_msr_uncores;
2707 }
2708
2709 static struct intel_uncore_type hswep_uncore_ha = {
2710         .name           = "ha",
2711         .num_counters   = 4,
2712         .num_boxes      = 2,
2713         .perf_ctr_bits  = 48,
2714         SNBEP_UNCORE_PCI_COMMON_INIT(),
2715 };
2716
2717 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2718         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2719         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2720         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2721         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2722         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2723         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2724         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2725         { /* end: all zeroes */ },
2726 };
2727
2728 static struct intel_uncore_type hswep_uncore_imc = {
2729         .name           = "imc",
2730         .num_counters   = 4,
2731         .num_boxes      = 8,
2732         .perf_ctr_bits  = 48,
2733         .fixed_ctr_bits = 48,
2734         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2735         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2736         .event_descs    = hswep_uncore_imc_events,
2737         SNBEP_UNCORE_PCI_COMMON_INIT(),
2738 };
2739
2740 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2741
2742 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2743 {
2744         struct pci_dev *pdev = box->pci_dev;
2745         struct hw_perf_event *hwc = &event->hw;
2746         u64 count = 0;
2747
2748         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2749         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2750
2751         return count;
2752 }
2753
2754 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2755         .init_box       = snbep_uncore_pci_init_box,
2756         .disable_box    = snbep_uncore_pci_disable_box,
2757         .enable_box     = snbep_uncore_pci_enable_box,
2758         .disable_event  = ivbep_uncore_irp_disable_event,
2759         .enable_event   = ivbep_uncore_irp_enable_event,
2760         .read_counter   = hswep_uncore_irp_read_counter,
2761 };
2762
2763 static struct intel_uncore_type hswep_uncore_irp = {
2764         .name                   = "irp",
2765         .num_counters           = 4,
2766         .num_boxes              = 1,
2767         .perf_ctr_bits          = 48,
2768         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2769         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2770         .ops                    = &hswep_uncore_irp_ops,
2771         .format_group           = &snbep_uncore_format_group,
2772 };
2773
2774 static struct intel_uncore_type hswep_uncore_qpi = {
2775         .name                   = "qpi",
2776         .num_counters           = 4,
2777         .num_boxes              = 3,
2778         .perf_ctr_bits          = 48,
2779         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2780         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2781         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2782         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2783         .num_shared_regs        = 1,
2784         .ops                    = &snbep_uncore_qpi_ops,
2785         .format_group           = &snbep_uncore_qpi_format_group,
2786 };
2787
2788 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2789         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2790         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2791         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2792         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2793         UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2794         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2795         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2796         UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2797         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2798         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2799         UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2800         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2801         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2802         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2803         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2804         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2805         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2806         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2807         EVENT_CONSTRAINT_END
2808 };
2809
2810 static struct intel_uncore_type hswep_uncore_r2pcie = {
2811         .name           = "r2pcie",
2812         .num_counters   = 4,
2813         .num_boxes      = 1,
2814         .perf_ctr_bits  = 48,
2815         .constraints    = hswep_uncore_r2pcie_constraints,
2816         SNBEP_UNCORE_PCI_COMMON_INIT(),
2817 };
2818
2819 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2820         UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2821         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2822         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2823         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2824         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2825         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2826         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2827         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2828         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2829         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2830         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2831         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2832         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2833         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2834         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2835         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2836         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2837         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2838         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2839         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2840         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2841         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2842         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2843         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2844         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2845         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2846         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2847         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2848         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2849         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2850         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2851         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2852         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2853         EVENT_CONSTRAINT_END
2854 };
2855
2856 static struct intel_uncore_type hswep_uncore_r3qpi = {
2857         .name           = "r3qpi",
2858         .num_counters   = 3,
2859         .num_boxes      = 3,
2860         .perf_ctr_bits  = 44,
2861         .constraints    = hswep_uncore_r3qpi_constraints,
2862         SNBEP_UNCORE_PCI_COMMON_INIT(),
2863 };
2864
2865 enum {
2866         HSWEP_PCI_UNCORE_HA,
2867         HSWEP_PCI_UNCORE_IMC,
2868         HSWEP_PCI_UNCORE_IRP,
2869         HSWEP_PCI_UNCORE_QPI,
2870         HSWEP_PCI_UNCORE_R2PCIE,
2871         HSWEP_PCI_UNCORE_R3QPI,
2872 };
2873
2874 static struct intel_uncore_type *hswep_pci_uncores[] = {
2875         [HSWEP_PCI_UNCORE_HA]   = &hswep_uncore_ha,
2876         [HSWEP_PCI_UNCORE_IMC]  = &hswep_uncore_imc,
2877         [HSWEP_PCI_UNCORE_IRP]  = &hswep_uncore_irp,
2878         [HSWEP_PCI_UNCORE_QPI]  = &hswep_uncore_qpi,
2879         [HSWEP_PCI_UNCORE_R2PCIE]       = &hswep_uncore_r2pcie,
2880         [HSWEP_PCI_UNCORE_R3QPI]        = &hswep_uncore_r3qpi,
2881         NULL,
2882 };
2883
2884 static const struct pci_device_id hswep_uncore_pci_ids[] = {
2885         { /* Home Agent 0 */
2886                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2887                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2888         },
2889         { /* Home Agent 1 */
2890                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2891                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2892         },
2893         { /* MC0 Channel 0 */
2894                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2895                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2896         },
2897         { /* MC0 Channel 1 */
2898                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2899                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2900         },
2901         { /* MC0 Channel 2 */
2902                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2903                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2904         },
2905         { /* MC0 Channel 3 */
2906                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2907                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2908         },
2909         { /* MC1 Channel 0 */
2910                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2911                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2912         },
2913         { /* MC1 Channel 1 */
2914                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2915                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2916         },
2917         { /* MC1 Channel 2 */
2918                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2919                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2920         },
2921         { /* MC1 Channel 3 */
2922                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2923                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2924         },
2925         { /* IRP */
2926                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
2927                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
2928         },
2929         { /* QPI0 Port 0 */
2930                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
2931                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
2932         },
2933         { /* QPI0 Port 1 */
2934                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
2935                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
2936         },
2937         { /* QPI1 Port 2 */
2938                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
2939                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
2940         },
2941         { /* R2PCIe */
2942                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
2943                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
2944         },
2945         { /* R3QPI0 Link 0 */
2946                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
2947                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
2948         },
2949         { /* R3QPI0 Link 1 */
2950                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
2951                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
2952         },
2953         { /* R3QPI1 Link 2 */
2954                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
2955                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
2956         },
2957         { /* QPI Port 0 filter  */
2958                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
2959                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2960                                                    SNBEP_PCI_QPI_PORT0_FILTER),
2961         },
2962         { /* QPI Port 1 filter  */
2963                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
2964                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2965                                                    SNBEP_PCI_QPI_PORT1_FILTER),
2966         },
2967         { /* PCU.3 (for Capability registers) */
2968                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
2969                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2970                                                    HSWEP_PCI_PCU_3),
2971         },
2972         { /* end: all zeroes */ }
2973 };
2974
2975 static struct pci_driver hswep_uncore_pci_driver = {
2976         .name           = "hswep_uncore",
2977         .id_table       = hswep_uncore_pci_ids,
2978 };
2979
2980 int hswep_uncore_pci_init(void)
2981 {
2982         int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2983         if (ret)
2984                 return ret;
2985         uncore_pci_uncores = hswep_pci_uncores;
2986         uncore_pci_driver = &hswep_uncore_pci_driver;
2987         return 0;
2988 }
2989 /* end of Haswell-EP uncore support */
2990
2991 /* BDX uncore support */
2992
2993 static struct intel_uncore_type bdx_uncore_ubox = {
2994         .name                   = "ubox",
2995         .num_counters           = 2,
2996         .num_boxes              = 1,
2997         .perf_ctr_bits          = 48,
2998         .fixed_ctr_bits         = 48,
2999         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3000         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3001         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3002         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3003         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3004         .num_shared_regs        = 1,
3005         .ops                    = &ivbep_uncore_msr_ops,
3006         .format_group           = &ivbep_uncore_ubox_format_group,
3007 };
3008
3009 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3010         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3011         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3012         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3013         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3014         EVENT_CONSTRAINT_END
3015 };
3016
3017 static struct intel_uncore_type bdx_uncore_cbox = {
3018         .name                   = "cbox",
3019         .num_counters           = 4,
3020         .num_boxes              = 24,
3021         .perf_ctr_bits          = 48,
3022         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3023         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3024         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3025         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3026         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3027         .num_shared_regs        = 1,
3028         .constraints            = bdx_uncore_cbox_constraints,
3029         .ops                    = &hswep_uncore_cbox_ops,
3030         .format_group           = &hswep_uncore_cbox_format_group,
3031 };
3032
3033 static struct intel_uncore_type *bdx_msr_uncores[] = {
3034         &bdx_uncore_ubox,
3035         &bdx_uncore_cbox,
3036         &hswep_uncore_pcu,
3037         NULL,
3038 };
3039
3040 void bdx_uncore_cpu_init(void)
3041 {
3042         if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3043                 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3044         uncore_msr_uncores = bdx_msr_uncores;
3045 }
3046
3047 static struct intel_uncore_type bdx_uncore_ha = {
3048         .name           = "ha",
3049         .num_counters   = 4,
3050         .num_boxes      = 2,
3051         .perf_ctr_bits  = 48,
3052         SNBEP_UNCORE_PCI_COMMON_INIT(),
3053 };
3054
3055 static struct intel_uncore_type bdx_uncore_imc = {
3056         .name           = "imc",
3057         .num_counters   = 4,
3058         .num_boxes      = 8,
3059         .perf_ctr_bits  = 48,
3060         .fixed_ctr_bits = 48,
3061         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3062         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3063         .event_descs    = hswep_uncore_imc_events,
3064         SNBEP_UNCORE_PCI_COMMON_INIT(),
3065 };
3066
3067 static struct intel_uncore_type bdx_uncore_irp = {
3068         .name                   = "irp",
3069         .num_counters           = 4,
3070         .num_boxes              = 1,
3071         .perf_ctr_bits          = 48,
3072         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3073         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3074         .ops                    = &hswep_uncore_irp_ops,
3075         .format_group           = &snbep_uncore_format_group,
3076 };
3077
3078 static struct intel_uncore_type bdx_uncore_qpi = {
3079         .name                   = "qpi",
3080         .num_counters           = 4,
3081         .num_boxes              = 3,
3082         .perf_ctr_bits          = 48,
3083         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
3084         .event_ctl              = SNBEP_PCI_PMON_CTL0,
3085         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3086         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3087         .num_shared_regs        = 1,
3088         .ops                    = &snbep_uncore_qpi_ops,
3089         .format_group           = &snbep_uncore_qpi_format_group,
3090 };
3091
3092 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3093         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3094         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3095         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3096         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3097         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3098         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3099         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3100         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3101         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3102         EVENT_CONSTRAINT_END
3103 };
3104
3105 static struct intel_uncore_type bdx_uncore_r2pcie = {
3106         .name           = "r2pcie",
3107         .num_counters   = 4,
3108         .num_boxes      = 1,
3109         .perf_ctr_bits  = 48,
3110         .constraints    = bdx_uncore_r2pcie_constraints,
3111         SNBEP_UNCORE_PCI_COMMON_INIT(),
3112 };
3113
3114 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3115         UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3116         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3117         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3118         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3119         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3120         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3121         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3122         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3123         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3124         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3125         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3126         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3127         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3128         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3129         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3130         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3131         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3132         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3133         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3134         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3135         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3136         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3137         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3138         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3139         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3140         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3141         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3142         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3143         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3144         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3145         EVENT_CONSTRAINT_END
3146 };
3147
3148 static struct intel_uncore_type bdx_uncore_r3qpi = {
3149         .name           = "r3qpi",
3150         .num_counters   = 3,
3151         .num_boxes      = 3,
3152         .perf_ctr_bits  = 48,
3153         .constraints    = bdx_uncore_r3qpi_constraints,
3154         SNBEP_UNCORE_PCI_COMMON_INIT(),
3155 };
3156
3157 enum {
3158         BDX_PCI_UNCORE_HA,
3159         BDX_PCI_UNCORE_IMC,
3160         BDX_PCI_UNCORE_IRP,
3161         BDX_PCI_UNCORE_QPI,
3162         BDX_PCI_UNCORE_R2PCIE,
3163         BDX_PCI_UNCORE_R3QPI,
3164 };
3165
3166 static struct intel_uncore_type *bdx_pci_uncores[] = {
3167         [BDX_PCI_UNCORE_HA]     = &bdx_uncore_ha,
3168         [BDX_PCI_UNCORE_IMC]    = &bdx_uncore_imc,
3169         [BDX_PCI_UNCORE_IRP]    = &bdx_uncore_irp,
3170         [BDX_PCI_UNCORE_QPI]    = &bdx_uncore_qpi,
3171         [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3172         [BDX_PCI_UNCORE_R3QPI]  = &bdx_uncore_r3qpi,
3173         NULL,
3174 };
3175
3176 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3177         { /* Home Agent 0 */
3178                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3179                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3180         },
3181         { /* Home Agent 1 */
3182                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3183                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3184         },
3185         { /* MC0 Channel 0 */
3186                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3187                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3188         },
3189         { /* MC0 Channel 1 */
3190                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3191                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3192         },
3193         { /* MC0 Channel 2 */
3194                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3195                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3196         },
3197         { /* MC0 Channel 3 */
3198                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3199                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3200         },
3201         { /* MC1 Channel 0 */
3202                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3203                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3204         },
3205         { /* MC1 Channel 1 */
3206                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3207                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3208         },
3209         { /* MC1 Channel 2 */
3210                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3211                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3212         },
3213         { /* MC1 Channel 3 */
3214                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3215                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3216         },
3217         { /* IRP */
3218                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3219                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3220         },
3221         { /* QPI0 Port 0 */
3222                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3223                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3224         },
3225         { /* QPI0 Port 1 */
3226                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3227                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3228         },
3229         { /* QPI1 Port 2 */
3230                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3231                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3232         },
3233         { /* R2PCIe */
3234                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3235                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3236         },
3237         { /* R3QPI0 Link 0 */
3238                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3239                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3240         },
3241         { /* R3QPI0 Link 1 */
3242                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3243                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3244         },
3245         { /* R3QPI1 Link 2 */
3246                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3247                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3248         },
3249         { /* QPI Port 0 filter  */
3250                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3251                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0),
3252         },
3253         { /* QPI Port 1 filter  */
3254                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3255                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1),
3256         },
3257         { /* QPI Port 2 filter  */
3258                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3259                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
3260         },
3261         { /* end: all zeroes */ }
3262 };
3263
3264 static struct pci_driver bdx_uncore_pci_driver = {
3265         .name           = "bdx_uncore",
3266         .id_table       = bdx_uncore_pci_ids,
3267 };
3268
3269 int bdx_uncore_pci_init(void)
3270 {
3271         int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3272
3273         if (ret)
3274                 return ret;
3275         uncore_pci_uncores = bdx_pci_uncores;
3276         uncore_pci_driver = &bdx_uncore_pci_driver;
3277         return 0;
3278 }
3279
3280 /* end of BDX uncore support */
3281
3282 /* SKX uncore support */
3283
3284 static struct intel_uncore_type skx_uncore_ubox = {
3285         .name                   = "ubox",
3286         .num_counters           = 2,
3287         .num_boxes              = 1,
3288         .perf_ctr_bits          = 48,
3289         .fixed_ctr_bits         = 48,
3290         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3291         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3292         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3293         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3294         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3295         .ops                    = &ivbep_uncore_msr_ops,
3296         .format_group           = &ivbep_uncore_ubox_format_group,
3297 };
3298
3299 static struct attribute *skx_uncore_cha_formats_attr[] = {
3300         &format_attr_event.attr,
3301         &format_attr_umask.attr,
3302         &format_attr_edge.attr,
3303         &format_attr_tid_en.attr,
3304         &format_attr_inv.attr,
3305         &format_attr_thresh8.attr,
3306         &format_attr_filter_tid4.attr,
3307         &format_attr_filter_link4.attr,
3308         &format_attr_filter_state5.attr,
3309         &format_attr_filter_rem.attr,
3310         &format_attr_filter_loc.attr,
3311         &format_attr_filter_nm.attr,
3312         &format_attr_filter_all_op.attr,
3313         &format_attr_filter_not_nm.attr,
3314         &format_attr_filter_opc_0.attr,
3315         &format_attr_filter_opc_1.attr,
3316         &format_attr_filter_nc.attr,
3317         &format_attr_filter_c6.attr,
3318         &format_attr_filter_isoc.attr,
3319         NULL,
3320 };
3321
3322 static struct attribute_group skx_uncore_chabox_format_group = {
3323         .name = "format",
3324         .attrs = skx_uncore_cha_formats_attr,
3325 };
3326
3327 static struct event_constraint skx_uncore_chabox_constraints[] = {
3328         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3329         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3330         EVENT_CONSTRAINT_END
3331 };
3332
3333 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3334         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3335         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3336         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3337         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3338         SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
3339         SNBEP_CBO_EVENT_EXTRA_REG(0x8134, 0xffff, 0x4),
3340 };
3341
3342 static u64 skx_cha_filter_mask(int fields)
3343 {
3344         u64 mask = 0;
3345
3346         if (fields & 0x1)
3347                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3348         if (fields & 0x2)
3349                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3350         if (fields & 0x4)
3351                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3352         return mask;
3353 }
3354
3355 static struct event_constraint *
3356 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3357 {
3358         return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3359 }
3360
3361 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3362 {
3363         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3364         struct extra_reg *er;
3365         int idx = 0;
3366         /* Any of the CHA events may be filtered by Thread/Core-ID.*/
3367         if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN)
3368                 idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3369
3370         for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3371                 if (er->event != (event->hw.config & er->config_mask))
3372                         continue;
3373                 idx |= er->idx;
3374         }
3375
3376         if (idx) {
3377                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3378                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3379                 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3380                 reg1->idx = idx;
3381         }
3382         return 0;
3383 }
3384
3385 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3386         /* There is no frz_en for chabox ctl */
3387         .init_box               = ivbep_uncore_msr_init_box,
3388         .disable_box            = snbep_uncore_msr_disable_box,
3389         .enable_box             = snbep_uncore_msr_enable_box,
3390         .disable_event          = snbep_uncore_msr_disable_event,
3391         .enable_event           = hswep_cbox_enable_event,
3392         .read_counter           = uncore_msr_read_counter,
3393         .hw_config              = skx_cha_hw_config,
3394         .get_constraint         = skx_cha_get_constraint,
3395         .put_constraint         = snbep_cbox_put_constraint,
3396 };
3397
3398 static struct intel_uncore_type skx_uncore_chabox = {
3399         .name                   = "cha",
3400         .num_counters           = 4,
3401         .perf_ctr_bits          = 48,
3402         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3403         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3404         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3405         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3406         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3407         .num_shared_regs        = 1,
3408         .constraints            = skx_uncore_chabox_constraints,
3409         .ops                    = &skx_uncore_chabox_ops,
3410         .format_group           = &skx_uncore_chabox_format_group,
3411 };
3412
3413 static struct attribute *skx_uncore_iio_formats_attr[] = {
3414         &format_attr_event.attr,
3415         &format_attr_umask.attr,
3416         &format_attr_edge.attr,
3417         &format_attr_inv.attr,
3418         &format_attr_thresh9.attr,
3419         &format_attr_ch_mask.attr,
3420         &format_attr_fc_mask.attr,
3421         NULL,
3422 };
3423
3424 static struct attribute_group skx_uncore_iio_format_group = {
3425         .name = "format",
3426         .attrs = skx_uncore_iio_formats_attr,
3427 };
3428
3429 static struct event_constraint skx_uncore_iio_constraints[] = {
3430         UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3431         UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3432         UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3433         UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3434         UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3435         UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3436         UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
3437         EVENT_CONSTRAINT_END
3438 };
3439
3440 static void skx_iio_enable_event(struct intel_uncore_box *box,
3441                                  struct perf_event *event)
3442 {
3443         struct hw_perf_event *hwc = &event->hw;
3444
3445         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3446 }
3447
3448 static struct intel_uncore_ops skx_uncore_iio_ops = {
3449         .init_box               = ivbep_uncore_msr_init_box,
3450         .disable_box            = snbep_uncore_msr_disable_box,
3451         .enable_box             = snbep_uncore_msr_enable_box,
3452         .disable_event          = snbep_uncore_msr_disable_event,
3453         .enable_event           = skx_iio_enable_event,
3454         .read_counter           = uncore_msr_read_counter,
3455 };
3456
3457 static struct intel_uncore_type skx_uncore_iio = {
3458         .name                   = "iio",
3459         .num_counters           = 4,
3460         .num_boxes              = 5,
3461         .perf_ctr_bits          = 48,
3462         .event_ctl              = SKX_IIO0_MSR_PMON_CTL0,
3463         .perf_ctr               = SKX_IIO0_MSR_PMON_CTR0,
3464         .event_mask             = SKX_IIO_PMON_RAW_EVENT_MASK,
3465         .event_mask_ext         = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3466         .box_ctl                = SKX_IIO0_MSR_PMON_BOX_CTL,
3467         .msr_offset             = SKX_IIO_MSR_OFFSET,
3468         .constraints            = skx_uncore_iio_constraints,
3469         .ops                    = &skx_uncore_iio_ops,
3470         .format_group           = &skx_uncore_iio_format_group,
3471 };
3472
3473 static struct attribute *skx_uncore_formats_attr[] = {
3474         &format_attr_event.attr,
3475         &format_attr_umask.attr,
3476         &format_attr_edge.attr,
3477         &format_attr_inv.attr,
3478         &format_attr_thresh8.attr,
3479         NULL,
3480 };
3481
3482 static struct attribute_group skx_uncore_format_group = {
3483         .name = "format",
3484         .attrs = skx_uncore_formats_attr,
3485 };
3486
3487 static struct intel_uncore_type skx_uncore_irp = {
3488         .name                   = "irp",
3489         .num_counters           = 2,
3490         .num_boxes              = 5,
3491         .perf_ctr_bits          = 48,
3492         .event_ctl              = SKX_IRP0_MSR_PMON_CTL0,
3493         .perf_ctr               = SKX_IRP0_MSR_PMON_CTR0,
3494         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3495         .box_ctl                = SKX_IRP0_MSR_PMON_BOX_CTL,
3496         .msr_offset             = SKX_IRP_MSR_OFFSET,
3497         .ops                    = &skx_uncore_iio_ops,
3498         .format_group           = &skx_uncore_format_group,
3499 };
3500
3501 static struct intel_uncore_ops skx_uncore_pcu_ops = {
3502         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3503         .hw_config              = hswep_pcu_hw_config,
3504         .get_constraint         = snbep_pcu_get_constraint,
3505         .put_constraint         = snbep_pcu_put_constraint,
3506 };
3507
3508 static struct intel_uncore_type skx_uncore_pcu = {
3509         .name                   = "pcu",
3510         .num_counters           = 4,
3511         .num_boxes              = 1,
3512         .perf_ctr_bits          = 48,
3513         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
3514         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
3515         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
3516         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
3517         .num_shared_regs        = 1,
3518         .ops                    = &skx_uncore_pcu_ops,
3519         .format_group           = &snbep_uncore_pcu_format_group,
3520 };
3521
3522 static struct intel_uncore_type *skx_msr_uncores[] = {
3523         &skx_uncore_ubox,
3524         &skx_uncore_chabox,
3525         &skx_uncore_iio,
3526         &skx_uncore_irp,
3527         &skx_uncore_pcu,
3528         NULL,
3529 };
3530
3531 /*
3532  * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
3533  * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
3534  */
3535 #define SKX_CAPID6              0x9c
3536 #define SKX_CHA_BIT_MASK        GENMASK(27, 0)
3537
3538 static int skx_count_chabox(void)
3539 {
3540         struct pci_dev *dev = NULL;
3541         u32 val = 0;
3542
3543         dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
3544         if (!dev)
3545                 goto out;
3546
3547         pci_read_config_dword(dev, SKX_CAPID6, &val);
3548         val &= SKX_CHA_BIT_MASK;
3549 out:
3550         pci_dev_put(dev);
3551         return hweight32(val);
3552 }
3553
3554 void skx_uncore_cpu_init(void)
3555 {
3556         skx_uncore_chabox.num_boxes = skx_count_chabox();
3557         uncore_msr_uncores = skx_msr_uncores;
3558 }
3559
3560 static struct intel_uncore_type skx_uncore_imc = {
3561         .name           = "imc",
3562         .num_counters   = 4,
3563         .num_boxes      = 6,
3564         .perf_ctr_bits  = 48,
3565         .fixed_ctr_bits = 48,
3566         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3567         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3568         .event_descs    = hswep_uncore_imc_events,
3569         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3570         .event_ctl      = SNBEP_PCI_PMON_CTL0,
3571         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3572         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3573         .ops            = &ivbep_uncore_pci_ops,
3574         .format_group   = &skx_uncore_format_group,
3575 };
3576
3577 static struct attribute *skx_upi_uncore_formats_attr[] = {
3578         &format_attr_event.attr,
3579         &format_attr_umask_ext.attr,
3580         &format_attr_edge.attr,
3581         &format_attr_inv.attr,
3582         &format_attr_thresh8.attr,
3583         NULL,
3584 };
3585
3586 static struct attribute_group skx_upi_uncore_format_group = {
3587         .name = "format",
3588         .attrs = skx_upi_uncore_formats_attr,
3589 };
3590
3591 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
3592 {
3593         struct pci_dev *pdev = box->pci_dev;
3594
3595         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3596         pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3597 }
3598
3599 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
3600         .init_box       = skx_upi_uncore_pci_init_box,
3601         .disable_box    = snbep_uncore_pci_disable_box,
3602         .enable_box     = snbep_uncore_pci_enable_box,
3603         .disable_event  = snbep_uncore_pci_disable_event,
3604         .enable_event   = snbep_uncore_pci_enable_event,
3605         .read_counter   = snbep_uncore_pci_read_counter,
3606 };
3607
3608 static struct intel_uncore_type skx_uncore_upi = {
3609         .name           = "upi",
3610         .num_counters   = 4,
3611         .num_boxes      = 3,
3612         .perf_ctr_bits  = 48,
3613         .perf_ctr       = SKX_UPI_PCI_PMON_CTR0,
3614         .event_ctl      = SKX_UPI_PCI_PMON_CTL0,
3615         .event_mask     = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3616         .event_mask_ext = SKX_PMON_CTL_UMASK_EXT,
3617         .box_ctl        = SKX_UPI_PCI_PMON_BOX_CTL,
3618         .ops            = &skx_upi_uncore_pci_ops,
3619         .format_group   = &skx_upi_uncore_format_group,
3620 };
3621
3622 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
3623 {
3624         struct pci_dev *pdev = box->pci_dev;
3625
3626         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3627         pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3628 }
3629
3630 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
3631         .init_box       = skx_m2m_uncore_pci_init_box,
3632         .disable_box    = snbep_uncore_pci_disable_box,
3633         .enable_box     = snbep_uncore_pci_enable_box,
3634         .disable_event  = snbep_uncore_pci_disable_event,
3635         .enable_event   = snbep_uncore_pci_enable_event,
3636         .read_counter   = snbep_uncore_pci_read_counter,
3637 };
3638
3639 static struct intel_uncore_type skx_uncore_m2m = {
3640         .name           = "m2m",
3641         .num_counters   = 4,
3642         .num_boxes      = 2,
3643         .perf_ctr_bits  = 48,
3644         .perf_ctr       = SKX_M2M_PCI_PMON_CTR0,
3645         .event_ctl      = SKX_M2M_PCI_PMON_CTL0,
3646         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3647         .box_ctl        = SKX_M2M_PCI_PMON_BOX_CTL,
3648         .ops            = &skx_m2m_uncore_pci_ops,
3649         .format_group   = &skx_uncore_format_group,
3650 };
3651
3652 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
3653         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3654         EVENT_CONSTRAINT_END
3655 };
3656
3657 static struct intel_uncore_type skx_uncore_m2pcie = {
3658         .name           = "m2pcie",
3659         .num_counters   = 4,
3660         .num_boxes      = 4,
3661         .perf_ctr_bits  = 48,
3662         .constraints    = skx_uncore_m2pcie_constraints,
3663         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3664         .event_ctl      = SNBEP_PCI_PMON_CTL0,
3665         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3666         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3667         .ops            = &ivbep_uncore_pci_ops,
3668         .format_group   = &skx_uncore_format_group,
3669 };
3670
3671 static struct event_constraint skx_uncore_m3upi_constraints[] = {
3672         UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
3673         UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
3674         UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
3675         UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
3676         UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
3677         UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
3678         UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
3679         UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
3680         EVENT_CONSTRAINT_END
3681 };
3682
3683 static struct intel_uncore_type skx_uncore_m3upi = {
3684         .name           = "m3upi",
3685         .num_counters   = 3,
3686         .num_boxes      = 3,
3687         .perf_ctr_bits  = 48,
3688         .constraints    = skx_uncore_m3upi_constraints,
3689         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3690         .event_ctl      = SNBEP_PCI_PMON_CTL0,
3691         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3692         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3693         .ops            = &ivbep_uncore_pci_ops,
3694         .format_group   = &skx_uncore_format_group,
3695 };
3696
3697 enum {
3698         SKX_PCI_UNCORE_IMC,
3699         SKX_PCI_UNCORE_M2M,
3700         SKX_PCI_UNCORE_UPI,
3701         SKX_PCI_UNCORE_M2PCIE,
3702         SKX_PCI_UNCORE_M3UPI,
3703 };
3704
3705 static struct intel_uncore_type *skx_pci_uncores[] = {
3706         [SKX_PCI_UNCORE_IMC]    = &skx_uncore_imc,
3707         [SKX_PCI_UNCORE_M2M]    = &skx_uncore_m2m,
3708         [SKX_PCI_UNCORE_UPI]    = &skx_uncore_upi,
3709         [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
3710         [SKX_PCI_UNCORE_M3UPI]  = &skx_uncore_m3upi,
3711         NULL,
3712 };
3713
3714 static const struct pci_device_id skx_uncore_pci_ids[] = {
3715         { /* MC0 Channel 0 */
3716                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3717                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
3718         },
3719         { /* MC0 Channel 1 */
3720                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3721                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
3722         },
3723         { /* MC0 Channel 2 */
3724                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3725                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
3726         },
3727         { /* MC1 Channel 0 */
3728                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3729                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
3730         },
3731         { /* MC1 Channel 1 */
3732                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3733                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
3734         },
3735         { /* MC1 Channel 2 */
3736                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3737                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
3738         },
3739         { /* M2M0 */
3740                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3741                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
3742         },
3743         { /* M2M1 */
3744                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3745                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
3746         },
3747         { /* UPI0 Link 0 */
3748                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3749                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
3750         },
3751         { /* UPI0 Link 1 */
3752                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3753                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
3754         },
3755         { /* UPI1 Link 2 */
3756                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3757                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
3758         },
3759         { /* M2PCIe 0 */
3760                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3761                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
3762         },
3763         { /* M2PCIe 1 */
3764                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3765                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
3766         },
3767         { /* M2PCIe 2 */
3768                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3769                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
3770         },
3771         { /* M2PCIe 3 */
3772                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3773                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
3774         },
3775         { /* M3UPI0 Link 0 */
3776                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
3777                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
3778         },
3779         { /* M3UPI0 Link 1 */
3780                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
3781                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
3782         },
3783         { /* M3UPI1 Link 2 */
3784                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
3785                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
3786         },
3787         { /* end: all zeroes */ }
3788 };
3789
3790
3791 static struct pci_driver skx_uncore_pci_driver = {
3792         .name           = "skx_uncore",
3793         .id_table       = skx_uncore_pci_ids,
3794 };
3795
3796 int skx_uncore_pci_init(void)
3797 {
3798         /* need to double check pci address */
3799         int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
3800
3801         if (ret)
3802                 return ret;
3803
3804         uncore_pci_uncores = skx_pci_uncores;
3805         uncore_pci_driver = &skx_uncore_pci_driver;
3806         return 0;
3807 }
3808
3809 /* end of SKX uncore support */