1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include <asm/cpu_device_id.h>
4 #include <asm/msr.h>
5 #include "uncore.h"
6 #include "uncore_discovery.h"
7
8 /* SNB-EP pci bus to socket mapping */
9 #define SNBEP_CPUNODEID 0x40
10 #define SNBEP_GIDNIDMAP 0x54
11
12 /* SNB-EP Box level control */
13 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
14 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
15 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
16 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
17 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
18 SNBEP_PMON_BOX_CTL_RST_CTRS | \
19 SNBEP_PMON_BOX_CTL_FRZ_EN)
20 /* SNB-EP event control */
21 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
22 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
23 #define SNBEP_PMON_CTL_RST (1 << 17)
24 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
25 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
26 #define SNBEP_PMON_CTL_EN (1 << 22)
27 #define SNBEP_PMON_CTL_INVERT (1 << 23)
28 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
29 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
30 SNBEP_PMON_CTL_UMASK_MASK | \
31 SNBEP_PMON_CTL_EDGE_DET | \
32 SNBEP_PMON_CTL_INVERT | \
33 SNBEP_PMON_CTL_TRESH_MASK)
34
35 /* SNB-EP Ubox event control */
36 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
37 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
38 (SNBEP_PMON_CTL_EV_SEL_MASK | \
39 SNBEP_PMON_CTL_UMASK_MASK | \
40 SNBEP_PMON_CTL_EDGE_DET | \
41 SNBEP_PMON_CTL_INVERT | \
42 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
43
44 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
45 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
46 SNBEP_CBO_PMON_CTL_TID_EN)
47
48 /* SNB-EP PCU event control */
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
50 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
51 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
52 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
53 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
54 (SNBEP_PMON_CTL_EV_SEL_MASK | \
55 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
56 SNBEP_PMON_CTL_EDGE_DET | \
57 SNBEP_PMON_CTL_INVERT | \
58 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
59 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
60 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
61
62 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
63 (SNBEP_PMON_RAW_EVENT_MASK | \
64 SNBEP_PMON_CTL_EV_SEL_EXT)
65
66 /* SNB-EP pci control register */
67 #define SNBEP_PCI_PMON_BOX_CTL 0xf4
68 #define SNBEP_PCI_PMON_CTL0 0xd8
69 /* SNB-EP pci counter register */
70 #define SNBEP_PCI_PMON_CTR0 0xa0
71
72 /* SNB-EP home agent register */
73 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
74 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
75 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
76 /* SNB-EP memory controller register */
77 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
78 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
79 /* SNB-EP QPI register */
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
81 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
82 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
83 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
84
85 /* SNB-EP Ubox register */
86 #define SNBEP_U_MSR_PMON_CTR0 0xc16
87 #define SNBEP_U_MSR_PMON_CTL0 0xc10
88
89 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
90 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
91
92 /* SNB-EP Cbo register */
93 #define SNBEP_C0_MSR_PMON_CTR0 0xd16
94 #define SNBEP_C0_MSR_PMON_CTL0 0xd10
95 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
96 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
97 #define SNBEP_CBO_MSR_OFFSET 0x20
98
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
100 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
101 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
102 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
103
104 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
105 .event = (e), \
106 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
107 .config_mask = (m), \
108 .idx = (i) \
109 }
110
111 /* SNB-EP PCU register */
112 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36
113 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30
114 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
115 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
116 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
117 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
118 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
119
120 /* IVBEP event control */
121 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
122 SNBEP_PMON_BOX_CTL_RST_CTRS)
123 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
124 SNBEP_PMON_CTL_UMASK_MASK | \
125 SNBEP_PMON_CTL_EDGE_DET | \
126 SNBEP_PMON_CTL_TRESH_MASK)
127 /* IVBEP Ubox */
128 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
129 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
130 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
131
132 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
133 (SNBEP_PMON_CTL_EV_SEL_MASK | \
134 SNBEP_PMON_CTL_UMASK_MASK | \
135 SNBEP_PMON_CTL_EDGE_DET | \
136 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
137 /* IVBEP Cbo */
138 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
139 SNBEP_CBO_PMON_CTL_TID_EN)
140
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
146 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
147 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
148 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
149
150 /* IVBEP home agent */
151 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
152 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
153 (IVBEP_PMON_RAW_EVENT_MASK | \
154 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
155 /* IVBEP PCU */
156 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
157 (SNBEP_PMON_CTL_EV_SEL_MASK | \
158 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
159 SNBEP_PMON_CTL_EDGE_DET | \
160 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
161 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
162 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
163 /* IVBEP QPI */
164 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
165 (IVBEP_PMON_RAW_EVENT_MASK | \
166 SNBEP_PMON_CTL_EV_SEL_EXT)
167
168 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
169 ((1ULL << (n)) - 1)))
170
171 /* Haswell-EP Ubox */
172 #define HSWEP_U_MSR_PMON_CTR0 0x709
173 #define HSWEP_U_MSR_PMON_CTL0 0x705
174 #define HSWEP_U_MSR_PMON_FILTER 0x707
175
176 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
177 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
178
179 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
180 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
181 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
182 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
183 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
184
185 /* Haswell-EP CBo */
186 #define HSWEP_C0_MSR_PMON_CTR0 0xe08
187 #define HSWEP_C0_MSR_PMON_CTL0 0xe01
188 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
189 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
190 #define HSWEP_CBO_MSR_OFFSET 0x10
191
192
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
198 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
199 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
200 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
201
202
203 /* Haswell-EP Sbox */
204 #define HSWEP_S0_MSR_PMON_CTR0 0x726
205 #define HSWEP_S0_MSR_PMON_CTL0 0x721
206 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
207 #define HSWEP_SBOX_MSR_OFFSET 0xa
208 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
209 SNBEP_CBO_PMON_CTL_TID_EN)
210
211 /* Haswell-EP PCU */
212 #define HSWEP_PCU_MSR_PMON_CTR0 0x717
213 #define HSWEP_PCU_MSR_PMON_CTL0 0x711
214 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
215 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
216
217 /* KNL Ubox */
218 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
219 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
220 SNBEP_CBO_PMON_CTL_TID_EN)
221 /* KNL CHA */
222 #define KNL_CHA_MSR_OFFSET 0xc
223 #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16)
224 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
225 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
226 KNL_CHA_MSR_PMON_CTL_QOR)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
230 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
231 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
232 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
233
234 /* KNL EDC/MC UCLK */
235 #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
236 #define KNL_UCLK_MSR_PMON_CTL0 0x420
237 #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430
238 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c
239 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454
240 #define KNL_PMON_FIXED_CTL_EN 0x1
241
242 /* KNL EDC */
243 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00
244 #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20
245 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30
246 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c
247 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44
248
249 /* KNL MC */
250 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00
251 #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20
252 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30
253 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c
254 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44
255
256 /* KNL IRP */
257 #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0
258 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
259 KNL_CHA_MSR_PMON_CTL_QOR)
260 /* KNL PCU */
261 #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f
262 #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7)
263 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000
264 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
265 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
266 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
267 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
268 SNBEP_PMON_CTL_EDGE_DET | \
269 SNBEP_CBO_PMON_CTL_TID_EN | \
270 SNBEP_PMON_CTL_INVERT | \
271 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
272 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
273 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
274
275 /* SKX pci bus to socket mapping */
276 #define SKX_CPUNODEID 0xc0
277 #define SKX_GIDNIDMAP 0xd4
278
279 /*
280 * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
281 * that BIOS programmed. MSR has package scope.
282 * | Bit | Default | Description
283 * | [63] | 00h | VALID - When set, indicates the CPU bus
284 * numbers have been initialized. (RO)
285 * |[62:48]| --- | Reserved
286 * |[47:40]| 00h | BUS_NUM_5 - Return the bus number BIOS assigned
287 * CPUBUSNO(5). (RO)
288 * |[39:32]| 00h | BUS_NUM_4 - Return the bus number BIOS assigned
289 * CPUBUSNO(4). (RO)
290 * |[31:24]| 00h | BUS_NUM_3 - Return the bus number BIOS assigned
291 * CPUBUSNO(3). (RO)
292 * |[23:16]| 00h | BUS_NUM_2 - Return the bus number BIOS assigned
293 * CPUBUSNO(2). (RO)
294 * |[15:8] | 00h | BUS_NUM_1 - Return the bus number BIOS assigned
295 * CPUBUSNO(1). (RO)
296 * | [7:0] | 00h | BUS_NUM_0 - Return the bus number BIOS assigned
297 * CPUBUSNO(0). (RO)
298 */
299 #define SKX_MSR_CPU_BUS_NUMBER 0x300
300 #define SKX_MSR_CPU_BUS_VALID_BIT (1ULL << 63)
301 #define BUS_NUM_STRIDE 8
302
303 /* SKX CHA */
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51)
314 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
315 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
316 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
317
318 /* SKX IIO */
319 #define SKX_IIO0_MSR_PMON_CTL0 0xa48
320 #define SKX_IIO0_MSR_PMON_CTR0 0xa41
321 #define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40
322 #define SKX_IIO_MSR_OFFSET 0x20
323
324 #define SKX_PMON_CTL_TRESH_MASK (0xff << 24)
325 #define SKX_PMON_CTL_TRESH_MASK_EXT (0xf)
326 #define SKX_PMON_CTL_CH_MASK (0xff << 4)
327 #define SKX_PMON_CTL_FC_MASK (0x7 << 12)
328 #define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
329 SNBEP_PMON_CTL_UMASK_MASK | \
330 SNBEP_PMON_CTL_EDGE_DET | \
331 SNBEP_PMON_CTL_INVERT | \
332 SKX_PMON_CTL_TRESH_MASK)
333 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
334 SKX_PMON_CTL_CH_MASK | \
335 SKX_PMON_CTL_FC_MASK)
336
337 /* SKX IRP */
338 #define SKX_IRP0_MSR_PMON_CTL0 0xa5b
339 #define SKX_IRP0_MSR_PMON_CTR0 0xa59
340 #define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58
341 #define SKX_IRP_MSR_OFFSET 0x20
342
343 /* SKX UPI */
344 #define SKX_UPI_PCI_PMON_CTL0 0x350
345 #define SKX_UPI_PCI_PMON_CTR0 0x318
346 #define SKX_UPI_PCI_PMON_BOX_CTL 0x378
347 #define SKX_UPI_CTL_UMASK_EXT 0xffefff
348
349 /* SKX M2M */
350 #define SKX_M2M_PCI_PMON_CTL0 0x228
351 #define SKX_M2M_PCI_PMON_CTR0 0x200
352 #define SKX_M2M_PCI_PMON_BOX_CTL 0x258
353
354 /* Memory Map registers device ID */
355 #define SNR_ICX_MESH2IIO_MMAP_DID 0x9a2
356 #define SNR_ICX_SAD_CONTROL_CFG 0x3f4
357
358 /* Getting I/O stack id in SAD_COTROL_CFG notation */
359 #define SAD_CONTROL_STACK_ID(data) (((data) >> 4) & 0x7)
360
361 /* SNR Ubox */
362 #define SNR_U_MSR_PMON_CTR0 0x1f98
363 #define SNR_U_MSR_PMON_CTL0 0x1f91
364 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93
365 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94
366
367 /* SNR CHA */
368 #define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff
369 #define SNR_CHA_MSR_PMON_CTL0 0x1c01
370 #define SNR_CHA_MSR_PMON_CTR0 0x1c08
371 #define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00
372 #define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05
373
374
375 /* SNR IIO */
376 #define SNR_IIO_MSR_PMON_CTL0 0x1e08
377 #define SNR_IIO_MSR_PMON_CTR0 0x1e01
378 #define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00
379 #define SNR_IIO_MSR_OFFSET 0x10
380 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff
381
382 /* SNR IRP */
383 #define SNR_IRP0_MSR_PMON_CTL0 0x1ea8
384 #define SNR_IRP0_MSR_PMON_CTR0 0x1ea1
385 #define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0
386 #define SNR_IRP_MSR_OFFSET 0x10
387
388 /* SNR M2PCIE */
389 #define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58
390 #define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51
391 #define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50
392 #define SNR_M2PCIE_MSR_OFFSET 0x10
393
394 /* SNR PCU */
395 #define SNR_PCU_MSR_PMON_CTL0 0x1ef1
396 #define SNR_PCU_MSR_PMON_CTR0 0x1ef8
397 #define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0
398 #define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc
399
400 /* SNR M2M */
401 #define SNR_M2M_PCI_PMON_CTL0 0x468
402 #define SNR_M2M_PCI_PMON_CTR0 0x440
403 #define SNR_M2M_PCI_PMON_BOX_CTL 0x438
404 #define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
405
406 /* SNR PCIE3 */
407 #define SNR_PCIE3_PCI_PMON_CTL0 0x508
408 #define SNR_PCIE3_PCI_PMON_CTR0 0x4e8
409 #define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e0
410
411 /* SNR IMC */
412 #define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
413 #define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
414 #define SNR_IMC_MMIO_PMON_CTL0 0x40
415 #define SNR_IMC_MMIO_PMON_CTR0 0x8
416 #define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800
417 #define SNR_IMC_MMIO_OFFSET 0x4000
418 #define SNR_IMC_MMIO_SIZE 0x4000
419 #define SNR_IMC_MMIO_BASE_OFFSET 0xd0
420 #define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF
421 #define SNR_IMC_MMIO_MEM0_OFFSET 0xd8
422 #define SNR_IMC_MMIO_MEM0_MASK 0x7FF
423
424 /* ICX CHA */
425 #define ICX_C34_MSR_PMON_CTR0 0xb68
426 #define ICX_C34_MSR_PMON_CTL0 0xb61
427 #define ICX_C34_MSR_PMON_BOX_CTL 0xb60
428 #define ICX_C34_MSR_PMON_BOX_FILTER0 0xb65
429
430 /* ICX IIO */
431 #define ICX_IIO_MSR_PMON_CTL0 0xa58
432 #define ICX_IIO_MSR_PMON_CTR0 0xa51
433 #define ICX_IIO_MSR_PMON_BOX_CTL 0xa50
434
435 /* ICX IRP */
436 #define ICX_IRP0_MSR_PMON_CTL0 0xa4d
437 #define ICX_IRP0_MSR_PMON_CTR0 0xa4b
438 #define ICX_IRP0_MSR_PMON_BOX_CTL 0xa4a
439
440 /* ICX M2PCIE */
441 #define ICX_M2PCIE_MSR_PMON_CTL0 0xa46
442 #define ICX_M2PCIE_MSR_PMON_CTR0 0xa41
443 #define ICX_M2PCIE_MSR_PMON_BOX_CTL 0xa40
444
445 /* ICX UPI */
446 #define ICX_UPI_PCI_PMON_CTL0 0x350
447 #define ICX_UPI_PCI_PMON_CTR0 0x320
448 #define ICX_UPI_PCI_PMON_BOX_CTL 0x318
449 #define ICX_UPI_CTL_UMASK_EXT 0xffffff
450 #define ICX_UBOX_DID 0x3450
451
452 /* ICX M3UPI*/
453 #define ICX_M3UPI_PCI_PMON_CTL0 0xd8
454 #define ICX_M3UPI_PCI_PMON_CTR0 0xa8
455 #define ICX_M3UPI_PCI_PMON_BOX_CTL 0xa0
456
457 /* ICX IMC */
458 #define ICX_NUMBER_IMC_CHN 3
459 #define ICX_IMC_MEM_STRIDE 0x4
460
461 /* SPR */
462 #define SPR_RAW_EVENT_MASK_EXT 0xffffff
463 #define SPR_UBOX_DID 0x3250
464
465 /* SPR CHA */
466 #define SPR_CHA_EVENT_MASK_EXT 0xffffffff
467 #define SPR_CHA_PMON_CTL_TID_EN (1 << 16)
468 #define SPR_CHA_PMON_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
469 SPR_CHA_PMON_CTL_TID_EN)
470 #define SPR_CHA_PMON_BOX_FILTER_TID 0x3ff
471
472 #define SPR_C0_MSR_PMON_BOX_FILTER0 0x200e
473
474 /* DMR */
475 #define DMR_IMH1_HIOP_MMIO_BASE 0x1ffff6ae7000
476 #define DMR_HIOP_MMIO_SIZE 0x8000
477 #define DMR_CXLCM_EVENT_MASK_EXT 0xf
478 #define DMR_HAMVF_EVENT_MASK_EXT 0xffffffff
479 #define DMR_PCIE4_EVENT_MASK_EXT 0xffffff
480
481 #define UNCORE_DMR_ITC 0x30
482
483 #define DMR_IMC_PMON_FIXED_CTR 0x18
484 #define DMR_IMC_PMON_FIXED_CTL 0x10
485
486 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
487 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
488 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
489 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
490 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
491 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
492 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
493 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
494 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
495 DEFINE_UNCORE_FORMAT_ATTR(umask_ext5, umask, "config:8-15,32-63");
496 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
497 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
498 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
499 DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16");
500 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
501 DEFINE_UNCORE_FORMAT_ATTR(inv2, inv, "config:21");
502 DEFINE_UNCORE_FORMAT_ATTR(thresh_ext, thresh_ext, "config:32-35");
503 DEFINE_UNCORE_FORMAT_ATTR(thresh10, thresh, "config:23-32");
504 DEFINE_UNCORE_FORMAT_ATTR(thresh9_2, thresh, "config:23-31");
505 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
506 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
507 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
508 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
509 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
510 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
511 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
512 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
513 DEFINE_UNCORE_FORMAT_ATTR(port_en, port_en, "config:32-35");
514 DEFINE_UNCORE_FORMAT_ATTR(rs3_sel, rs3_sel, "config:36");
515 DEFINE_UNCORE_FORMAT_ATTR(rx_sel, rx_sel, "config:37");
516 DEFINE_UNCORE_FORMAT_ATTR(tx_sel, tx_sel, "config:38");
517 DEFINE_UNCORE_FORMAT_ATTR(iep_sel, iep_sel, "config:39");
518 DEFINE_UNCORE_FORMAT_ATTR(vc_sel, vc_sel, "config:40-47");
519 DEFINE_UNCORE_FORMAT_ATTR(port_sel, port_sel, "config:48-55");
520 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
521 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
522 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
523 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
524 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
525 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
526 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
527 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
528 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
529 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
530 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
531 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
532 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
533 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
534 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
535 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
536 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
537 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
538 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
539 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
540 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
541 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
542 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
543 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
544 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
545 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
546 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
547 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
548 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
549 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
550 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
551 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
552 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
553 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
554 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
555 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
556 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
557 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
558 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
559 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
560 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
561 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
562 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
563 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
564 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
565 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
566 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
567 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
568 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
569 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
570 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
571 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
572 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
573 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
574 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
575 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
576 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
577
snbep_uncore_pci_disable_box(struct intel_uncore_box * box)578 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
579 {
580 struct pci_dev *pdev = box->pci_dev;
581 int box_ctl = uncore_pci_box_ctl(box);
582 u32 config = 0;
583
584 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
585 config |= SNBEP_PMON_BOX_CTL_FRZ;
586 pci_write_config_dword(pdev, box_ctl, config);
587 }
588 }
589
snbep_uncore_pci_enable_box(struct intel_uncore_box * box)590 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
591 {
592 struct pci_dev *pdev = box->pci_dev;
593 int box_ctl = uncore_pci_box_ctl(box);
594 u32 config = 0;
595
596 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
597 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
598 pci_write_config_dword(pdev, box_ctl, config);
599 }
600 }
601
snbep_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)602 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
603 {
604 struct pci_dev *pdev = box->pci_dev;
605 struct hw_perf_event *hwc = &event->hw;
606
607 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
608 }
609
snbep_uncore_pci_disable_event(struct intel_uncore_box * box,struct perf_event * event)610 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
611 {
612 struct pci_dev *pdev = box->pci_dev;
613 struct hw_perf_event *hwc = &event->hw;
614
615 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
616 }
617
snbep_uncore_pci_read_counter(struct intel_uncore_box * box,struct perf_event * event)618 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
619 {
620 struct pci_dev *pdev = box->pci_dev;
621 struct hw_perf_event *hwc = &event->hw;
622 u64 count = 0;
623
624 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
625 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
626
627 return count;
628 }
629
snbep_uncore_pci_init_box(struct intel_uncore_box * box)630 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
631 {
632 struct pci_dev *pdev = box->pci_dev;
633 int box_ctl = uncore_pci_box_ctl(box);
634
635 pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
636 }
637
snbep_uncore_msr_disable_box(struct intel_uncore_box * box)638 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
639 {
640 u64 config;
641 unsigned msr;
642
643 msr = uncore_msr_box_ctl(box);
644 if (msr) {
645 rdmsrq(msr, config);
646 config |= SNBEP_PMON_BOX_CTL_FRZ;
647 wrmsrq(msr, config);
648 }
649 }
650
snbep_uncore_msr_enable_box(struct intel_uncore_box * box)651 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
652 {
653 u64 config;
654 unsigned msr;
655
656 msr = uncore_msr_box_ctl(box);
657 if (msr) {
658 rdmsrq(msr, config);
659 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
660 wrmsrq(msr, config);
661 }
662 }
663
snbep_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)664 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
665 {
666 struct hw_perf_event *hwc = &event->hw;
667 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
668
669 if (reg1->idx != EXTRA_REG_NONE)
670 wrmsrq(reg1->reg, uncore_shared_reg_config(box, 0));
671
672 wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
673 }
674
snbep_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)675 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
676 struct perf_event *event)
677 {
678 struct hw_perf_event *hwc = &event->hw;
679
680 wrmsrq(hwc->config_base, hwc->config);
681 }
682
snbep_uncore_msr_init_box(struct intel_uncore_box * box)683 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
684 {
685 unsigned msr = uncore_msr_box_ctl(box);
686
687 if (msr)
688 wrmsrq(msr, SNBEP_PMON_BOX_CTL_INT);
689 }
690
691 static struct attribute *snbep_uncore_formats_attr[] = {
692 &format_attr_event.attr,
693 &format_attr_umask.attr,
694 &format_attr_edge.attr,
695 &format_attr_inv.attr,
696 &format_attr_thresh8.attr,
697 NULL,
698 };
699
700 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
701 &format_attr_event.attr,
702 &format_attr_umask.attr,
703 &format_attr_edge.attr,
704 &format_attr_inv.attr,
705 &format_attr_thresh5.attr,
706 NULL,
707 };
708
709 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
710 &format_attr_event.attr,
711 &format_attr_umask.attr,
712 &format_attr_edge.attr,
713 &format_attr_tid_en.attr,
714 &format_attr_inv.attr,
715 &format_attr_thresh8.attr,
716 &format_attr_filter_tid.attr,
717 &format_attr_filter_nid.attr,
718 &format_attr_filter_state.attr,
719 &format_attr_filter_opc.attr,
720 NULL,
721 };
722
723 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
724 &format_attr_event.attr,
725 &format_attr_occ_sel.attr,
726 &format_attr_edge.attr,
727 &format_attr_inv.attr,
728 &format_attr_thresh5.attr,
729 &format_attr_occ_invert.attr,
730 &format_attr_occ_edge.attr,
731 &format_attr_filter_band0.attr,
732 &format_attr_filter_band1.attr,
733 &format_attr_filter_band2.attr,
734 &format_attr_filter_band3.attr,
735 NULL,
736 };
737
738 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
739 &format_attr_event_ext.attr,
740 &format_attr_umask.attr,
741 &format_attr_edge.attr,
742 &format_attr_inv.attr,
743 &format_attr_thresh8.attr,
744 &format_attr_match_rds.attr,
745 &format_attr_match_rnid30.attr,
746 &format_attr_match_rnid4.attr,
747 &format_attr_match_dnid.attr,
748 &format_attr_match_mc.attr,
749 &format_attr_match_opc.attr,
750 &format_attr_match_vnw.attr,
751 &format_attr_match0.attr,
752 &format_attr_match1.attr,
753 &format_attr_mask_rds.attr,
754 &format_attr_mask_rnid30.attr,
755 &format_attr_mask_rnid4.attr,
756 &format_attr_mask_dnid.attr,
757 &format_attr_mask_mc.attr,
758 &format_attr_mask_opc.attr,
759 &format_attr_mask_vnw.attr,
760 &format_attr_mask0.attr,
761 &format_attr_mask1.attr,
762 NULL,
763 };
764
765 static struct uncore_event_desc snbep_uncore_imc_events[] = {
766 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
767 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
768 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
769 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
770 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
771 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
772 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
773 { /* end: all zeroes */ },
774 };
775
776 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
777 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
778 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
779 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
780 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
781 { /* end: all zeroes */ },
782 };
783
784 static const struct attribute_group snbep_uncore_format_group = {
785 .name = "format",
786 .attrs = snbep_uncore_formats_attr,
787 };
788
789 static const struct attribute_group snbep_uncore_ubox_format_group = {
790 .name = "format",
791 .attrs = snbep_uncore_ubox_formats_attr,
792 };
793
794 static const struct attribute_group snbep_uncore_cbox_format_group = {
795 .name = "format",
796 .attrs = snbep_uncore_cbox_formats_attr,
797 };
798
799 static const struct attribute_group snbep_uncore_pcu_format_group = {
800 .name = "format",
801 .attrs = snbep_uncore_pcu_formats_attr,
802 };
803
804 static const struct attribute_group snbep_uncore_qpi_format_group = {
805 .name = "format",
806 .attrs = snbep_uncore_qpi_formats_attr,
807 };
808
809 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
810 .disable_box = snbep_uncore_msr_disable_box, \
811 .enable_box = snbep_uncore_msr_enable_box, \
812 .disable_event = snbep_uncore_msr_disable_event, \
813 .enable_event = snbep_uncore_msr_enable_event, \
814 .read_counter = uncore_msr_read_counter
815
816 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
817 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
818 .init_box = snbep_uncore_msr_init_box \
819
820 static struct intel_uncore_ops snbep_uncore_msr_ops = {
821 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
822 };
823
824 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
825 .init_box = snbep_uncore_pci_init_box, \
826 .disable_box = snbep_uncore_pci_disable_box, \
827 .enable_box = snbep_uncore_pci_enable_box, \
828 .disable_event = snbep_uncore_pci_disable_event, \
829 .read_counter = snbep_uncore_pci_read_counter
830
831 static struct intel_uncore_ops snbep_uncore_pci_ops = {
832 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
833 .enable_event = snbep_uncore_pci_enable_event, \
834 };
835
836 static struct event_constraint snbep_uncore_cbox_constraints[] = {
837 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
838 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
839 UNCORE_EVENT_CONSTRAINT_RANGE(0x04, 0x5, 0x3),
840 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
841 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
842 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
843 UNCORE_EVENT_CONSTRAINT_RANGE(0x12, 0x13, 0x3),
844 UNCORE_EVENT_CONSTRAINT_RANGE(0x1b, 0x1e, 0xc),
845 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
846 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
847 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
848 UNCORE_EVENT_CONSTRAINT_RANGE(0x31, 0x35, 0x3),
849 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
850 UNCORE_EVENT_CONSTRAINT_RANGE(0x37, 0x39, 0x3),
851 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
852 EVENT_CONSTRAINT_END
853 };
854
855 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
856 UNCORE_EVENT_CONSTRAINT_RANGE(0x10, 0x11, 0x3),
857 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
858 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
859 UNCORE_EVENT_CONSTRAINT_RANGE(0x24, 0x26, 0x3),
860 UNCORE_EVENT_CONSTRAINT_RANGE(0x32, 0x34, 0x3),
861 EVENT_CONSTRAINT_END
862 };
863
864 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
865 UNCORE_EVENT_CONSTRAINT_RANGE(0x10, 0x12, 0x3),
866 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
867 UNCORE_EVENT_CONSTRAINT_RANGE(0x20, 0x26, 0x3),
868 UNCORE_EVENT_CONSTRAINT_RANGE(0x28, 0x34, 0x3),
869 UNCORE_EVENT_CONSTRAINT_RANGE(0x36, 0x39, 0x3),
870 EVENT_CONSTRAINT_END
871 };
872
873 static struct intel_uncore_type snbep_uncore_ubox = {
874 .name = "ubox",
875 .num_counters = 2,
876 .num_boxes = 1,
877 .perf_ctr_bits = 44,
878 .fixed_ctr_bits = 48,
879 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
880 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
881 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
882 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
883 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
884 .ops = &snbep_uncore_msr_ops,
885 .format_group = &snbep_uncore_ubox_format_group,
886 };
887
888 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
889 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
890 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
891 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
892 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
893 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
894 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
895 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
896 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
897 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
898 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
899 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
900 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
901 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
902 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
903 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
904 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
905 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
906 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
907 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
908 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
909 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
910 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
911 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
912 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
913 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
914 EVENT_EXTRA_END
915 };
916
snbep_cbox_put_constraint(struct intel_uncore_box * box,struct perf_event * event)917 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
918 {
919 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
920 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
921 int i;
922
923 if (uncore_box_is_fake(box))
924 return;
925
926 for (i = 0; i < 5; i++) {
927 if (reg1->alloc & (0x1 << i))
928 atomic_sub(1 << (i * 6), &er->ref);
929 }
930 reg1->alloc = 0;
931 }
932
933 static struct event_constraint *
__snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event,u64 (* cbox_filter_mask)(int fields))934 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
935 u64 (*cbox_filter_mask)(int fields))
936 {
937 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
938 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
939 int i, alloc = 0;
940 unsigned long flags;
941 u64 mask;
942
943 if (reg1->idx == EXTRA_REG_NONE)
944 return NULL;
945
946 raw_spin_lock_irqsave(&er->lock, flags);
947 for (i = 0; i < 5; i++) {
948 if (!(reg1->idx & (0x1 << i)))
949 continue;
950 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
951 continue;
952
953 mask = cbox_filter_mask(0x1 << i);
954 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
955 !((reg1->config ^ er->config) & mask)) {
956 atomic_add(1 << (i * 6), &er->ref);
957 er->config &= ~mask;
958 er->config |= reg1->config & mask;
959 alloc |= (0x1 << i);
960 } else {
961 break;
962 }
963 }
964 raw_spin_unlock_irqrestore(&er->lock, flags);
965 if (i < 5)
966 goto fail;
967
968 if (!uncore_box_is_fake(box))
969 reg1->alloc |= alloc;
970
971 return NULL;
972 fail:
973 for (; i >= 0; i--) {
974 if (alloc & (0x1 << i))
975 atomic_sub(1 << (i * 6), &er->ref);
976 }
977 return &uncore_constraint_empty;
978 }
979
snbep_cbox_filter_mask(int fields)980 static u64 snbep_cbox_filter_mask(int fields)
981 {
982 u64 mask = 0;
983
984 if (fields & 0x1)
985 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
986 if (fields & 0x2)
987 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
988 if (fields & 0x4)
989 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
990 if (fields & 0x8)
991 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
992
993 return mask;
994 }
995
996 static struct event_constraint *
snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)997 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
998 {
999 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
1000 }
1001
snbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1002 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1003 {
1004 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1005 struct extra_reg *er;
1006 int idx = 0;
1007
1008 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
1009 if (er->event != (event->hw.config & er->config_mask))
1010 continue;
1011 idx |= er->idx;
1012 }
1013
1014 if (idx) {
1015 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1016 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1017 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1018 reg1->idx = idx;
1019 }
1020 return 0;
1021 }
1022
1023 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1024 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1025 .hw_config = snbep_cbox_hw_config,
1026 .get_constraint = snbep_cbox_get_constraint,
1027 .put_constraint = snbep_cbox_put_constraint,
1028 };
1029
1030 static struct intel_uncore_type snbep_uncore_cbox = {
1031 .name = "cbox",
1032 .num_counters = 4,
1033 .num_boxes = 8,
1034 .perf_ctr_bits = 44,
1035 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1036 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1037 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1038 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1039 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1040 .num_shared_regs = 1,
1041 .constraints = snbep_uncore_cbox_constraints,
1042 .ops = &snbep_uncore_cbox_ops,
1043 .format_group = &snbep_uncore_cbox_format_group,
1044 };
1045
snbep_pcu_alter_er(struct perf_event * event,int new_idx,bool modify)1046 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1047 {
1048 struct hw_perf_event *hwc = &event->hw;
1049 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1050 u64 config = reg1->config;
1051
1052 if (new_idx > reg1->idx)
1053 config <<= 8 * (new_idx - reg1->idx);
1054 else
1055 config >>= 8 * (reg1->idx - new_idx);
1056
1057 if (modify) {
1058 hwc->config += new_idx - reg1->idx;
1059 reg1->config = config;
1060 reg1->idx = new_idx;
1061 }
1062 return config;
1063 }
1064
1065 static struct event_constraint *
snbep_pcu_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1066 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1067 {
1068 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1069 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1070 unsigned long flags;
1071 int idx = reg1->idx;
1072 u64 mask, config1 = reg1->config;
1073 bool ok = false;
1074
1075 if (reg1->idx == EXTRA_REG_NONE ||
1076 (!uncore_box_is_fake(box) && reg1->alloc))
1077 return NULL;
1078 again:
1079 mask = 0xffULL << (idx * 8);
1080 raw_spin_lock_irqsave(&er->lock, flags);
1081 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1082 !((config1 ^ er->config) & mask)) {
1083 atomic_add(1 << (idx * 8), &er->ref);
1084 er->config &= ~mask;
1085 er->config |= config1 & mask;
1086 ok = true;
1087 }
1088 raw_spin_unlock_irqrestore(&er->lock, flags);
1089
1090 if (!ok) {
1091 idx = (idx + 1) % 4;
1092 if (idx != reg1->idx) {
1093 config1 = snbep_pcu_alter_er(event, idx, false);
1094 goto again;
1095 }
1096 return &uncore_constraint_empty;
1097 }
1098
1099 if (!uncore_box_is_fake(box)) {
1100 if (idx != reg1->idx)
1101 snbep_pcu_alter_er(event, idx, true);
1102 reg1->alloc = 1;
1103 }
1104 return NULL;
1105 }
1106
snbep_pcu_put_constraint(struct intel_uncore_box * box,struct perf_event * event)1107 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1108 {
1109 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1110 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1111
1112 if (uncore_box_is_fake(box) || !reg1->alloc)
1113 return;
1114
1115 atomic_sub(1 << (reg1->idx * 8), &er->ref);
1116 reg1->alloc = 0;
1117 }
1118
snbep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)1119 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1120 {
1121 struct hw_perf_event *hwc = &event->hw;
1122 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1123 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1124
1125 if (ev_sel >= 0xb && ev_sel <= 0xe) {
1126 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1127 reg1->idx = ev_sel - 0xb;
1128 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1129 }
1130 return 0;
1131 }
1132
1133 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1134 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1135 .hw_config = snbep_pcu_hw_config,
1136 .get_constraint = snbep_pcu_get_constraint,
1137 .put_constraint = snbep_pcu_put_constraint,
1138 };
1139
1140 static struct intel_uncore_type snbep_uncore_pcu = {
1141 .name = "pcu",
1142 .num_counters = 4,
1143 .num_boxes = 1,
1144 .perf_ctr_bits = 48,
1145 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1146 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1147 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1148 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1149 .num_shared_regs = 1,
1150 .ops = &snbep_uncore_pcu_ops,
1151 .format_group = &snbep_uncore_pcu_format_group,
1152 };
1153
1154 static struct intel_uncore_type *snbep_msr_uncores[] = {
1155 &snbep_uncore_ubox,
1156 &snbep_uncore_cbox,
1157 &snbep_uncore_pcu,
1158 NULL,
1159 };
1160
snbep_uncore_cpu_init(void)1161 void snbep_uncore_cpu_init(void)
1162 {
1163 if (snbep_uncore_cbox.num_boxes > topology_num_cores_per_package())
1164 snbep_uncore_cbox.num_boxes = topology_num_cores_per_package();
1165 uncore_msr_uncores = snbep_msr_uncores;
1166 }
1167
1168 enum {
1169 SNBEP_PCI_QPI_PORT0_FILTER,
1170 SNBEP_PCI_QPI_PORT1_FILTER,
1171 BDX_PCI_QPI_PORT2_FILTER,
1172 };
1173
snbep_qpi_hw_config(struct intel_uncore_box * box,struct perf_event * event)1174 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1175 {
1176 struct hw_perf_event *hwc = &event->hw;
1177 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1178 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1179
1180 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1181 reg1->idx = 0;
1182 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1183 reg1->config = event->attr.config1;
1184 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1185 reg2->config = event->attr.config2;
1186 }
1187 return 0;
1188 }
1189
snbep_qpi_enable_event(struct intel_uncore_box * box,struct perf_event * event)1190 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1191 {
1192 struct pci_dev *pdev = box->pci_dev;
1193 struct hw_perf_event *hwc = &event->hw;
1194 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1195 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1196
1197 if (reg1->idx != EXTRA_REG_NONE) {
1198 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1199 int die = box->dieid;
1200 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1201
1202 if (filter_pdev) {
1203 pci_write_config_dword(filter_pdev, reg1->reg,
1204 (u32)reg1->config);
1205 pci_write_config_dword(filter_pdev, reg1->reg + 4,
1206 (u32)(reg1->config >> 32));
1207 pci_write_config_dword(filter_pdev, reg2->reg,
1208 (u32)reg2->config);
1209 pci_write_config_dword(filter_pdev, reg2->reg + 4,
1210 (u32)(reg2->config >> 32));
1211 }
1212 }
1213
1214 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1215 }
1216
1217 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1218 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1219 .enable_event = snbep_qpi_enable_event,
1220 .hw_config = snbep_qpi_hw_config,
1221 .get_constraint = uncore_get_constraint,
1222 .put_constraint = uncore_put_constraint,
1223 };
1224
1225 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
1226 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1227 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1228 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
1229 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1230 .ops = &snbep_uncore_pci_ops, \
1231 .format_group = &snbep_uncore_format_group
1232
1233 static struct intel_uncore_type snbep_uncore_ha = {
1234 .name = "ha",
1235 .num_counters = 4,
1236 .num_boxes = 1,
1237 .perf_ctr_bits = 48,
1238 SNBEP_UNCORE_PCI_COMMON_INIT(),
1239 };
1240
1241 static struct intel_uncore_type snbep_uncore_imc = {
1242 .name = "imc",
1243 .num_counters = 4,
1244 .num_boxes = 4,
1245 .perf_ctr_bits = 48,
1246 .fixed_ctr_bits = 48,
1247 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1248 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1249 .event_descs = snbep_uncore_imc_events,
1250 SNBEP_UNCORE_PCI_COMMON_INIT(),
1251 };
1252
1253 static struct intel_uncore_type snbep_uncore_qpi = {
1254 .name = "qpi",
1255 .num_counters = 4,
1256 .num_boxes = 2,
1257 .perf_ctr_bits = 48,
1258 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1259 .event_ctl = SNBEP_PCI_PMON_CTL0,
1260 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1261 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1262 .num_shared_regs = 1,
1263 .ops = &snbep_uncore_qpi_ops,
1264 .event_descs = snbep_uncore_qpi_events,
1265 .format_group = &snbep_uncore_qpi_format_group,
1266 };
1267
1268
1269 static struct intel_uncore_type snbep_uncore_r2pcie = {
1270 .name = "r2pcie",
1271 .num_counters = 4,
1272 .num_boxes = 1,
1273 .perf_ctr_bits = 44,
1274 .constraints = snbep_uncore_r2pcie_constraints,
1275 SNBEP_UNCORE_PCI_COMMON_INIT(),
1276 };
1277
1278 static struct intel_uncore_type snbep_uncore_r3qpi = {
1279 .name = "r3qpi",
1280 .num_counters = 3,
1281 .num_boxes = 2,
1282 .perf_ctr_bits = 44,
1283 .constraints = snbep_uncore_r3qpi_constraints,
1284 SNBEP_UNCORE_PCI_COMMON_INIT(),
1285 };
1286
1287 enum {
1288 SNBEP_PCI_UNCORE_HA,
1289 SNBEP_PCI_UNCORE_IMC,
1290 SNBEP_PCI_UNCORE_QPI,
1291 SNBEP_PCI_UNCORE_R2PCIE,
1292 SNBEP_PCI_UNCORE_R3QPI,
1293 };
1294
1295 static struct intel_uncore_type *snbep_pci_uncores[] = {
1296 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
1297 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
1298 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
1299 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
1300 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
1301 NULL,
1302 };
1303
1304 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1305 { /* Home Agent */
1306 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1307 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1308 },
1309 { /* MC Channel 0 */
1310 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1311 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1312 },
1313 { /* MC Channel 1 */
1314 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1315 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1316 },
1317 { /* MC Channel 2 */
1318 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1319 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1320 },
1321 { /* MC Channel 3 */
1322 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1323 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1324 },
1325 { /* QPI Port 0 */
1326 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1327 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1328 },
1329 { /* QPI Port 1 */
1330 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1331 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1332 },
1333 { /* R2PCIe */
1334 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1335 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1336 },
1337 { /* R3QPI Link 0 */
1338 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1339 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1340 },
1341 { /* R3QPI Link 1 */
1342 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1343 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1344 },
1345 { /* QPI Port 0 filter */
1346 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1347 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1348 SNBEP_PCI_QPI_PORT0_FILTER),
1349 },
1350 { /* QPI Port 0 filter */
1351 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1352 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1353 SNBEP_PCI_QPI_PORT1_FILTER),
1354 },
1355 { /* end: all zeroes */ }
1356 };
1357
1358 static struct pci_driver snbep_uncore_pci_driver = {
1359 .name = "snbep_uncore",
1360 .id_table = snbep_uncore_pci_ids,
1361 };
1362
1363 #define NODE_ID_MASK 0x7
1364
1365 /* Each three bits from 0 to 23 of GIDNIDMAP register correspond Node ID. */
1366 #define GIDNIDMAP(config, id) (((config) >> (3 * (id))) & 0x7)
1367
upi_nodeid_groupid(struct pci_dev * ubox_dev,int nodeid_loc,int idmap_loc,int * nodeid,int * groupid)1368 static int upi_nodeid_groupid(struct pci_dev *ubox_dev, int nodeid_loc, int idmap_loc,
1369 int *nodeid, int *groupid)
1370 {
1371 int ret;
1372
1373 /* get the Node ID of the local register */
1374 ret = pci_read_config_dword(ubox_dev, nodeid_loc, nodeid);
1375 if (ret)
1376 goto err;
1377
1378 *nodeid = *nodeid & NODE_ID_MASK;
1379 /* get the Node ID mapping */
1380 ret = pci_read_config_dword(ubox_dev, idmap_loc, groupid);
1381 if (ret)
1382 goto err;
1383 err:
1384 return ret;
1385 }
1386
topology_gidnid_map(int nodeid,u32 gidnid)1387 static int topology_gidnid_map(int nodeid, u32 gidnid)
1388 {
1389 int i, die_id = -1;
1390
1391 /*
1392 * every three bits in the Node ID mapping register maps
1393 * to a particular node.
1394 */
1395 for (i = 0; i < 8; i++) {
1396 if (nodeid == GIDNIDMAP(gidnid, i)) {
1397 if (topology_max_dies_per_package() > 1)
1398 die_id = i;
1399 else
1400 die_id = topology_phys_to_logical_pkg(i);
1401 if (die_id < 0)
1402 die_id = -ENODEV;
1403 break;
1404 }
1405 }
1406
1407 return die_id;
1408 }
1409
1410 /*
1411 * build pci bus to socket mapping
1412 */
snbep_pci2phy_map_init(int devid,int nodeid_loc,int idmap_loc,bool reverse)1413 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1414 {
1415 struct pci_dev *ubox_dev = NULL;
1416 int i, bus, nodeid, segment, die_id;
1417 struct pci2phy_map *map;
1418 int err = 0;
1419 u32 config = 0;
1420
1421 while (1) {
1422 /* find the UBOX device */
1423 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1424 if (!ubox_dev)
1425 break;
1426 bus = ubox_dev->bus->number;
1427 /*
1428 * The nodeid and idmap registers only contain enough
1429 * information to handle 8 nodes. On systems with more
1430 * than 8 nodes, we need to rely on NUMA information,
1431 * filled in from BIOS supplied information, to determine
1432 * the topology.
1433 */
1434 if (nr_node_ids <= 8) {
1435 err = upi_nodeid_groupid(ubox_dev, nodeid_loc, idmap_loc,
1436 &nodeid, &config);
1437 if (err)
1438 break;
1439
1440 segment = pci_domain_nr(ubox_dev->bus);
1441 raw_spin_lock(&pci2phy_map_lock);
1442 map = __find_pci2phy_map(segment);
1443 if (!map) {
1444 raw_spin_unlock(&pci2phy_map_lock);
1445 err = -ENOMEM;
1446 break;
1447 }
1448
1449 map->pbus_to_dieid[bus] = topology_gidnid_map(nodeid, config);
1450 raw_spin_unlock(&pci2phy_map_lock);
1451 } else {
1452 segment = pci_domain_nr(ubox_dev->bus);
1453 raw_spin_lock(&pci2phy_map_lock);
1454 map = __find_pci2phy_map(segment);
1455 if (!map) {
1456 raw_spin_unlock(&pci2phy_map_lock);
1457 err = -ENOMEM;
1458 break;
1459 }
1460
1461 map->pbus_to_dieid[bus] = die_id = uncore_device_to_die(ubox_dev);
1462 raw_spin_unlock(&pci2phy_map_lock);
1463 }
1464 }
1465
1466 if (!err) {
1467 /*
1468 * For PCI bus with no UBOX device, find the next bus
1469 * that has UBOX device and use its mapping.
1470 */
1471 raw_spin_lock(&pci2phy_map_lock);
1472 list_for_each_entry(map, &pci2phy_map_head, list) {
1473 i = -1;
1474 if (reverse) {
1475 for (bus = 255; bus >= 0; bus--) {
1476 if (map->pbus_to_dieid[bus] != -1)
1477 i = map->pbus_to_dieid[bus];
1478 else
1479 map->pbus_to_dieid[bus] = i;
1480 }
1481 } else {
1482 for (bus = 0; bus <= 255; bus++) {
1483 if (map->pbus_to_dieid[bus] != -1)
1484 i = map->pbus_to_dieid[bus];
1485 else
1486 map->pbus_to_dieid[bus] = i;
1487 }
1488 }
1489 }
1490 raw_spin_unlock(&pci2phy_map_lock);
1491 }
1492
1493 pci_dev_put(ubox_dev);
1494
1495 return pcibios_err_to_errno(err);
1496 }
1497
snbep_uncore_pci_init(void)1498 int snbep_uncore_pci_init(void)
1499 {
1500 int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1501 if (ret)
1502 return ret;
1503 uncore_pci_uncores = snbep_pci_uncores;
1504 uncore_pci_driver = &snbep_uncore_pci_driver;
1505 return 0;
1506 }
1507 /* end of Sandy Bridge-EP uncore support */
1508
1509 /* IvyTown uncore support */
ivbep_uncore_msr_init_box(struct intel_uncore_box * box)1510 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1511 {
1512 unsigned msr = uncore_msr_box_ctl(box);
1513 if (msr)
1514 wrmsrq(msr, IVBEP_PMON_BOX_CTL_INT);
1515 }
1516
ivbep_uncore_pci_init_box(struct intel_uncore_box * box)1517 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1518 {
1519 struct pci_dev *pdev = box->pci_dev;
1520
1521 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1522 }
1523
1524 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1525 .init_box = ivbep_uncore_msr_init_box, \
1526 .disable_box = snbep_uncore_msr_disable_box, \
1527 .enable_box = snbep_uncore_msr_enable_box, \
1528 .disable_event = snbep_uncore_msr_disable_event, \
1529 .enable_event = snbep_uncore_msr_enable_event, \
1530 .read_counter = uncore_msr_read_counter
1531
1532 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1533 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1534 };
1535
1536 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1537 .init_box = ivbep_uncore_pci_init_box,
1538 .disable_box = snbep_uncore_pci_disable_box,
1539 .enable_box = snbep_uncore_pci_enable_box,
1540 .disable_event = snbep_uncore_pci_disable_event,
1541 .enable_event = snbep_uncore_pci_enable_event,
1542 .read_counter = snbep_uncore_pci_read_counter,
1543 };
1544
1545 #define IVBEP_UNCORE_PCI_COMMON_INIT() \
1546 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1547 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1548 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
1549 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1550 .ops = &ivbep_uncore_pci_ops, \
1551 .format_group = &ivbep_uncore_format_group
1552
1553 static struct attribute *ivbep_uncore_formats_attr[] = {
1554 &format_attr_event.attr,
1555 &format_attr_umask.attr,
1556 &format_attr_edge.attr,
1557 &format_attr_inv.attr,
1558 &format_attr_thresh8.attr,
1559 NULL,
1560 };
1561
1562 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1563 &format_attr_event.attr,
1564 &format_attr_umask.attr,
1565 &format_attr_edge.attr,
1566 &format_attr_inv.attr,
1567 &format_attr_thresh5.attr,
1568 NULL,
1569 };
1570
1571 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1572 &format_attr_event.attr,
1573 &format_attr_umask.attr,
1574 &format_attr_edge.attr,
1575 &format_attr_tid_en.attr,
1576 &format_attr_thresh8.attr,
1577 &format_attr_filter_tid.attr,
1578 &format_attr_filter_link.attr,
1579 &format_attr_filter_state2.attr,
1580 &format_attr_filter_nid2.attr,
1581 &format_attr_filter_opc2.attr,
1582 &format_attr_filter_nc.attr,
1583 &format_attr_filter_c6.attr,
1584 &format_attr_filter_isoc.attr,
1585 NULL,
1586 };
1587
1588 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1589 &format_attr_event.attr,
1590 &format_attr_occ_sel.attr,
1591 &format_attr_edge.attr,
1592 &format_attr_thresh5.attr,
1593 &format_attr_occ_invert.attr,
1594 &format_attr_occ_edge.attr,
1595 &format_attr_filter_band0.attr,
1596 &format_attr_filter_band1.attr,
1597 &format_attr_filter_band2.attr,
1598 &format_attr_filter_band3.attr,
1599 NULL,
1600 };
1601
1602 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1603 &format_attr_event_ext.attr,
1604 &format_attr_umask.attr,
1605 &format_attr_edge.attr,
1606 &format_attr_thresh8.attr,
1607 &format_attr_match_rds.attr,
1608 &format_attr_match_rnid30.attr,
1609 &format_attr_match_rnid4.attr,
1610 &format_attr_match_dnid.attr,
1611 &format_attr_match_mc.attr,
1612 &format_attr_match_opc.attr,
1613 &format_attr_match_vnw.attr,
1614 &format_attr_match0.attr,
1615 &format_attr_match1.attr,
1616 &format_attr_mask_rds.attr,
1617 &format_attr_mask_rnid30.attr,
1618 &format_attr_mask_rnid4.attr,
1619 &format_attr_mask_dnid.attr,
1620 &format_attr_mask_mc.attr,
1621 &format_attr_mask_opc.attr,
1622 &format_attr_mask_vnw.attr,
1623 &format_attr_mask0.attr,
1624 &format_attr_mask1.attr,
1625 NULL,
1626 };
1627
1628 static const struct attribute_group ivbep_uncore_format_group = {
1629 .name = "format",
1630 .attrs = ivbep_uncore_formats_attr,
1631 };
1632
1633 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1634 .name = "format",
1635 .attrs = ivbep_uncore_ubox_formats_attr,
1636 };
1637
1638 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1639 .name = "format",
1640 .attrs = ivbep_uncore_cbox_formats_attr,
1641 };
1642
1643 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1644 .name = "format",
1645 .attrs = ivbep_uncore_pcu_formats_attr,
1646 };
1647
1648 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1649 .name = "format",
1650 .attrs = ivbep_uncore_qpi_formats_attr,
1651 };
1652
1653 static struct intel_uncore_type ivbep_uncore_ubox = {
1654 .name = "ubox",
1655 .num_counters = 2,
1656 .num_boxes = 1,
1657 .perf_ctr_bits = 44,
1658 .fixed_ctr_bits = 48,
1659 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1660 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1661 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1662 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1663 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1664 .ops = &ivbep_uncore_msr_ops,
1665 .format_group = &ivbep_uncore_ubox_format_group,
1666 };
1667
1668 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1669 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1670 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1671 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1672 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1673 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1674 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1675 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1676 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1677 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1678 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1679 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1680 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1681 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1682 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1683 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1684 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1685 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1686 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1687 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1688 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1689 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1690 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1691 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1692 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1693 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1694 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1695 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1696 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1697 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1698 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1699 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1700 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1701 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1702 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1703 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1704 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1705 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1706 EVENT_EXTRA_END
1707 };
1708
ivbep_cbox_filter_mask(int fields)1709 static u64 ivbep_cbox_filter_mask(int fields)
1710 {
1711 u64 mask = 0;
1712
1713 if (fields & 0x1)
1714 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1715 if (fields & 0x2)
1716 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1717 if (fields & 0x4)
1718 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1719 if (fields & 0x8)
1720 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1721 if (fields & 0x10) {
1722 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1723 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1724 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1725 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1726 }
1727
1728 return mask;
1729 }
1730
1731 static struct event_constraint *
ivbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1732 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1733 {
1734 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1735 }
1736
ivbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1737 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1738 {
1739 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1740 struct extra_reg *er;
1741 int idx = 0;
1742
1743 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1744 if (er->event != (event->hw.config & er->config_mask))
1745 continue;
1746 idx |= er->idx;
1747 }
1748
1749 if (idx) {
1750 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1751 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1752 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1753 reg1->idx = idx;
1754 }
1755 return 0;
1756 }
1757
ivbep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)1758 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1759 {
1760 struct hw_perf_event *hwc = &event->hw;
1761 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1762
1763 if (reg1->idx != EXTRA_REG_NONE) {
1764 u64 filter = uncore_shared_reg_config(box, 0);
1765 wrmsrq(reg1->reg, filter & 0xffffffff);
1766 wrmsrq(reg1->reg + 6, filter >> 32);
1767 }
1768
1769 wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1770 }
1771
1772 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1773 .init_box = ivbep_uncore_msr_init_box,
1774 .disable_box = snbep_uncore_msr_disable_box,
1775 .enable_box = snbep_uncore_msr_enable_box,
1776 .disable_event = snbep_uncore_msr_disable_event,
1777 .enable_event = ivbep_cbox_enable_event,
1778 .read_counter = uncore_msr_read_counter,
1779 .hw_config = ivbep_cbox_hw_config,
1780 .get_constraint = ivbep_cbox_get_constraint,
1781 .put_constraint = snbep_cbox_put_constraint,
1782 };
1783
1784 static struct intel_uncore_type ivbep_uncore_cbox = {
1785 .name = "cbox",
1786 .num_counters = 4,
1787 .num_boxes = 15,
1788 .perf_ctr_bits = 44,
1789 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1790 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1791 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1792 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1793 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1794 .num_shared_regs = 1,
1795 .constraints = snbep_uncore_cbox_constraints,
1796 .ops = &ivbep_uncore_cbox_ops,
1797 .format_group = &ivbep_uncore_cbox_format_group,
1798 };
1799
1800 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1801 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1802 .hw_config = snbep_pcu_hw_config,
1803 .get_constraint = snbep_pcu_get_constraint,
1804 .put_constraint = snbep_pcu_put_constraint,
1805 };
1806
1807 static struct intel_uncore_type ivbep_uncore_pcu = {
1808 .name = "pcu",
1809 .num_counters = 4,
1810 .num_boxes = 1,
1811 .perf_ctr_bits = 48,
1812 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1813 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1814 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1815 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1816 .num_shared_regs = 1,
1817 .ops = &ivbep_uncore_pcu_ops,
1818 .format_group = &ivbep_uncore_pcu_format_group,
1819 };
1820
1821 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1822 &ivbep_uncore_ubox,
1823 &ivbep_uncore_cbox,
1824 &ivbep_uncore_pcu,
1825 NULL,
1826 };
1827
ivbep_uncore_cpu_init(void)1828 void ivbep_uncore_cpu_init(void)
1829 {
1830 if (ivbep_uncore_cbox.num_boxes > topology_num_cores_per_package())
1831 ivbep_uncore_cbox.num_boxes = topology_num_cores_per_package();
1832 uncore_msr_uncores = ivbep_msr_uncores;
1833 }
1834
1835 static struct intel_uncore_type ivbep_uncore_ha = {
1836 .name = "ha",
1837 .num_counters = 4,
1838 .num_boxes = 2,
1839 .perf_ctr_bits = 48,
1840 IVBEP_UNCORE_PCI_COMMON_INIT(),
1841 };
1842
1843 static struct intel_uncore_type ivbep_uncore_imc = {
1844 .name = "imc",
1845 .num_counters = 4,
1846 .num_boxes = 8,
1847 .perf_ctr_bits = 48,
1848 .fixed_ctr_bits = 48,
1849 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1850 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1851 .event_descs = snbep_uncore_imc_events,
1852 IVBEP_UNCORE_PCI_COMMON_INIT(),
1853 };
1854
1855 /* registers in IRP boxes are not properly aligned */
1856 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1857 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1858
ivbep_uncore_irp_enable_event(struct intel_uncore_box * box,struct perf_event * event)1859 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1860 {
1861 struct pci_dev *pdev = box->pci_dev;
1862 struct hw_perf_event *hwc = &event->hw;
1863
1864 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1865 hwc->config | SNBEP_PMON_CTL_EN);
1866 }
1867
ivbep_uncore_irp_disable_event(struct intel_uncore_box * box,struct perf_event * event)1868 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1869 {
1870 struct pci_dev *pdev = box->pci_dev;
1871 struct hw_perf_event *hwc = &event->hw;
1872
1873 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1874 }
1875
ivbep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)1876 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1877 {
1878 struct pci_dev *pdev = box->pci_dev;
1879 struct hw_perf_event *hwc = &event->hw;
1880 u64 count = 0;
1881
1882 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1883 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1884
1885 return count;
1886 }
1887
1888 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1889 .init_box = ivbep_uncore_pci_init_box,
1890 .disable_box = snbep_uncore_pci_disable_box,
1891 .enable_box = snbep_uncore_pci_enable_box,
1892 .disable_event = ivbep_uncore_irp_disable_event,
1893 .enable_event = ivbep_uncore_irp_enable_event,
1894 .read_counter = ivbep_uncore_irp_read_counter,
1895 };
1896
1897 static struct intel_uncore_type ivbep_uncore_irp = {
1898 .name = "irp",
1899 .num_counters = 4,
1900 .num_boxes = 1,
1901 .perf_ctr_bits = 48,
1902 .event_mask = IVBEP_PMON_RAW_EVENT_MASK,
1903 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1904 .ops = &ivbep_uncore_irp_ops,
1905 .format_group = &ivbep_uncore_format_group,
1906 };
1907
1908 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1909 .init_box = ivbep_uncore_pci_init_box,
1910 .disable_box = snbep_uncore_pci_disable_box,
1911 .enable_box = snbep_uncore_pci_enable_box,
1912 .disable_event = snbep_uncore_pci_disable_event,
1913 .enable_event = snbep_qpi_enable_event,
1914 .read_counter = snbep_uncore_pci_read_counter,
1915 .hw_config = snbep_qpi_hw_config,
1916 .get_constraint = uncore_get_constraint,
1917 .put_constraint = uncore_put_constraint,
1918 };
1919
1920 static struct intel_uncore_type ivbep_uncore_qpi = {
1921 .name = "qpi",
1922 .num_counters = 4,
1923 .num_boxes = 3,
1924 .perf_ctr_bits = 48,
1925 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1926 .event_ctl = SNBEP_PCI_PMON_CTL0,
1927 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1928 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1929 .num_shared_regs = 1,
1930 .ops = &ivbep_uncore_qpi_ops,
1931 .format_group = &ivbep_uncore_qpi_format_group,
1932 };
1933
1934 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1935 .name = "r2pcie",
1936 .num_counters = 4,
1937 .num_boxes = 1,
1938 .perf_ctr_bits = 44,
1939 .constraints = snbep_uncore_r2pcie_constraints,
1940 IVBEP_UNCORE_PCI_COMMON_INIT(),
1941 };
1942
1943 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1944 .name = "r3qpi",
1945 .num_counters = 3,
1946 .num_boxes = 2,
1947 .perf_ctr_bits = 44,
1948 .constraints = snbep_uncore_r3qpi_constraints,
1949 IVBEP_UNCORE_PCI_COMMON_INIT(),
1950 };
1951
1952 enum {
1953 IVBEP_PCI_UNCORE_HA,
1954 IVBEP_PCI_UNCORE_IMC,
1955 IVBEP_PCI_UNCORE_IRP,
1956 IVBEP_PCI_UNCORE_QPI,
1957 IVBEP_PCI_UNCORE_R2PCIE,
1958 IVBEP_PCI_UNCORE_R3QPI,
1959 };
1960
1961 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1962 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha,
1963 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc,
1964 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp,
1965 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi,
1966 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie,
1967 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi,
1968 NULL,
1969 };
1970
1971 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1972 { /* Home Agent 0 */
1973 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1974 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1975 },
1976 { /* Home Agent 1 */
1977 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1978 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1979 },
1980 { /* MC0 Channel 0 */
1981 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1982 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1983 },
1984 { /* MC0 Channel 1 */
1985 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1986 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1987 },
1988 { /* MC0 Channel 3 */
1989 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1990 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1991 },
1992 { /* MC0 Channel 4 */
1993 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1994 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1995 },
1996 { /* MC1 Channel 0 */
1997 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1998 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1999 },
2000 { /* MC1 Channel 1 */
2001 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
2002 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
2003 },
2004 { /* MC1 Channel 3 */
2005 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
2006 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
2007 },
2008 { /* MC1 Channel 4 */
2009 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
2010 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
2011 },
2012 { /* IRP */
2013 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
2014 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
2015 },
2016 { /* QPI0 Port 0 */
2017 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
2018 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
2019 },
2020 { /* QPI0 Port 1 */
2021 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2022 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2023 },
2024 { /* QPI1 Port 2 */
2025 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2026 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2027 },
2028 { /* R2PCIe */
2029 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2030 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2031 },
2032 { /* R3QPI0 Link 0 */
2033 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2034 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2035 },
2036 { /* R3QPI0 Link 1 */
2037 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2038 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2039 },
2040 { /* R3QPI1 Link 2 */
2041 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2042 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2043 },
2044 { /* QPI Port 0 filter */
2045 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2046 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2047 SNBEP_PCI_QPI_PORT0_FILTER),
2048 },
2049 { /* QPI Port 0 filter */
2050 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2051 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2052 SNBEP_PCI_QPI_PORT1_FILTER),
2053 },
2054 { /* end: all zeroes */ }
2055 };
2056
2057 static struct pci_driver ivbep_uncore_pci_driver = {
2058 .name = "ivbep_uncore",
2059 .id_table = ivbep_uncore_pci_ids,
2060 };
2061
ivbep_uncore_pci_init(void)2062 int ivbep_uncore_pci_init(void)
2063 {
2064 int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2065 if (ret)
2066 return ret;
2067 uncore_pci_uncores = ivbep_pci_uncores;
2068 uncore_pci_driver = &ivbep_uncore_pci_driver;
2069 return 0;
2070 }
2071 /* end of IvyTown uncore support */
2072
2073 /* KNL uncore support */
2074 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2075 &format_attr_event.attr,
2076 &format_attr_umask.attr,
2077 &format_attr_edge.attr,
2078 &format_attr_tid_en.attr,
2079 &format_attr_inv.attr,
2080 &format_attr_thresh5.attr,
2081 NULL,
2082 };
2083
2084 static const struct attribute_group knl_uncore_ubox_format_group = {
2085 .name = "format",
2086 .attrs = knl_uncore_ubox_formats_attr,
2087 };
2088
2089 static struct intel_uncore_type knl_uncore_ubox = {
2090 .name = "ubox",
2091 .num_counters = 2,
2092 .num_boxes = 1,
2093 .perf_ctr_bits = 48,
2094 .fixed_ctr_bits = 48,
2095 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2096 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2097 .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK,
2098 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2099 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2100 .ops = &snbep_uncore_msr_ops,
2101 .format_group = &knl_uncore_ubox_format_group,
2102 };
2103
2104 static struct attribute *knl_uncore_cha_formats_attr[] = {
2105 &format_attr_event.attr,
2106 &format_attr_umask.attr,
2107 &format_attr_qor.attr,
2108 &format_attr_edge.attr,
2109 &format_attr_tid_en.attr,
2110 &format_attr_inv.attr,
2111 &format_attr_thresh8.attr,
2112 &format_attr_filter_tid4.attr,
2113 &format_attr_filter_link3.attr,
2114 &format_attr_filter_state4.attr,
2115 &format_attr_filter_local.attr,
2116 &format_attr_filter_all_op.attr,
2117 &format_attr_filter_nnm.attr,
2118 &format_attr_filter_opc3.attr,
2119 &format_attr_filter_nc.attr,
2120 &format_attr_filter_isoc.attr,
2121 NULL,
2122 };
2123
2124 static const struct attribute_group knl_uncore_cha_format_group = {
2125 .name = "format",
2126 .attrs = knl_uncore_cha_formats_attr,
2127 };
2128
2129 static struct event_constraint knl_uncore_cha_constraints[] = {
2130 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2131 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2132 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2133 EVENT_CONSTRAINT_END
2134 };
2135
2136 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2137 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2138 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2139 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2140 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2141 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2142 EVENT_EXTRA_END
2143 };
2144
knl_cha_filter_mask(int fields)2145 static u64 knl_cha_filter_mask(int fields)
2146 {
2147 u64 mask = 0;
2148
2149 if (fields & 0x1)
2150 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2151 if (fields & 0x2)
2152 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2153 if (fields & 0x4)
2154 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2155 return mask;
2156 }
2157
2158 static struct event_constraint *
knl_cha_get_constraint(struct intel_uncore_box * box,struct perf_event * event)2159 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2160 {
2161 return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2162 }
2163
knl_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)2164 static int knl_cha_hw_config(struct intel_uncore_box *box,
2165 struct perf_event *event)
2166 {
2167 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2168 struct extra_reg *er;
2169 int idx = 0;
2170
2171 for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2172 if (er->event != (event->hw.config & er->config_mask))
2173 continue;
2174 idx |= er->idx;
2175 }
2176
2177 if (idx) {
2178 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2179 KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2180 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2181
2182 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2183 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2184 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2185 reg1->idx = idx;
2186 }
2187 return 0;
2188 }
2189
2190 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2191 struct perf_event *event);
2192
2193 static struct intel_uncore_ops knl_uncore_cha_ops = {
2194 .init_box = snbep_uncore_msr_init_box,
2195 .disable_box = snbep_uncore_msr_disable_box,
2196 .enable_box = snbep_uncore_msr_enable_box,
2197 .disable_event = snbep_uncore_msr_disable_event,
2198 .enable_event = hswep_cbox_enable_event,
2199 .read_counter = uncore_msr_read_counter,
2200 .hw_config = knl_cha_hw_config,
2201 .get_constraint = knl_cha_get_constraint,
2202 .put_constraint = snbep_cbox_put_constraint,
2203 };
2204
2205 static struct intel_uncore_type knl_uncore_cha = {
2206 .name = "cha",
2207 .num_counters = 4,
2208 .num_boxes = 38,
2209 .perf_ctr_bits = 48,
2210 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2211 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2212 .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2213 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2214 .msr_offset = KNL_CHA_MSR_OFFSET,
2215 .num_shared_regs = 1,
2216 .constraints = knl_uncore_cha_constraints,
2217 .ops = &knl_uncore_cha_ops,
2218 .format_group = &knl_uncore_cha_format_group,
2219 };
2220
2221 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2222 &format_attr_event2.attr,
2223 &format_attr_use_occ_ctr.attr,
2224 &format_attr_occ_sel.attr,
2225 &format_attr_edge.attr,
2226 &format_attr_tid_en.attr,
2227 &format_attr_inv.attr,
2228 &format_attr_thresh6.attr,
2229 &format_attr_occ_invert.attr,
2230 &format_attr_occ_edge_det.attr,
2231 NULL,
2232 };
2233
2234 static const struct attribute_group knl_uncore_pcu_format_group = {
2235 .name = "format",
2236 .attrs = knl_uncore_pcu_formats_attr,
2237 };
2238
2239 static struct intel_uncore_type knl_uncore_pcu = {
2240 .name = "pcu",
2241 .num_counters = 4,
2242 .num_boxes = 1,
2243 .perf_ctr_bits = 48,
2244 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2245 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2246 .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2247 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2248 .ops = &snbep_uncore_msr_ops,
2249 .format_group = &knl_uncore_pcu_format_group,
2250 };
2251
2252 static struct intel_uncore_type *knl_msr_uncores[] = {
2253 &knl_uncore_ubox,
2254 &knl_uncore_cha,
2255 &knl_uncore_pcu,
2256 NULL,
2257 };
2258
knl_uncore_cpu_init(void)2259 void knl_uncore_cpu_init(void)
2260 {
2261 uncore_msr_uncores = knl_msr_uncores;
2262 }
2263
knl_uncore_imc_enable_box(struct intel_uncore_box * box)2264 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2265 {
2266 struct pci_dev *pdev = box->pci_dev;
2267 int box_ctl = uncore_pci_box_ctl(box);
2268
2269 pci_write_config_dword(pdev, box_ctl, 0);
2270 }
2271
knl_uncore_imc_enable_event(struct intel_uncore_box * box,struct perf_event * event)2272 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2273 struct perf_event *event)
2274 {
2275 struct pci_dev *pdev = box->pci_dev;
2276 struct hw_perf_event *hwc = &event->hw;
2277
2278 if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2279 == UNCORE_FIXED_EVENT)
2280 pci_write_config_dword(pdev, hwc->config_base,
2281 hwc->config | KNL_PMON_FIXED_CTL_EN);
2282 else
2283 pci_write_config_dword(pdev, hwc->config_base,
2284 hwc->config | SNBEP_PMON_CTL_EN);
2285 }
2286
2287 static struct intel_uncore_ops knl_uncore_imc_ops = {
2288 .init_box = snbep_uncore_pci_init_box,
2289 .disable_box = snbep_uncore_pci_disable_box,
2290 .enable_box = knl_uncore_imc_enable_box,
2291 .read_counter = snbep_uncore_pci_read_counter,
2292 .enable_event = knl_uncore_imc_enable_event,
2293 .disable_event = snbep_uncore_pci_disable_event,
2294 };
2295
2296 static struct intel_uncore_type knl_uncore_imc_uclk = {
2297 .name = "imc_uclk",
2298 .num_counters = 4,
2299 .num_boxes = 2,
2300 .perf_ctr_bits = 48,
2301 .fixed_ctr_bits = 48,
2302 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2303 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2304 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2305 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2306 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2307 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2308 .ops = &knl_uncore_imc_ops,
2309 .format_group = &snbep_uncore_format_group,
2310 };
2311
2312 static struct intel_uncore_type knl_uncore_imc_dclk = {
2313 .name = "imc",
2314 .num_counters = 4,
2315 .num_boxes = 6,
2316 .perf_ctr_bits = 48,
2317 .fixed_ctr_bits = 48,
2318 .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2319 .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0,
2320 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2321 .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2322 .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2323 .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2324 .ops = &knl_uncore_imc_ops,
2325 .format_group = &snbep_uncore_format_group,
2326 };
2327
2328 static struct intel_uncore_type knl_uncore_edc_uclk = {
2329 .name = "edc_uclk",
2330 .num_counters = 4,
2331 .num_boxes = 8,
2332 .perf_ctr_bits = 48,
2333 .fixed_ctr_bits = 48,
2334 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2335 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2336 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2337 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2338 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2339 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2340 .ops = &knl_uncore_imc_ops,
2341 .format_group = &snbep_uncore_format_group,
2342 };
2343
2344 static struct intel_uncore_type knl_uncore_edc_eclk = {
2345 .name = "edc_eclk",
2346 .num_counters = 4,
2347 .num_boxes = 8,
2348 .perf_ctr_bits = 48,
2349 .fixed_ctr_bits = 48,
2350 .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2351 .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2352 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2353 .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2354 .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2355 .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2356 .ops = &knl_uncore_imc_ops,
2357 .format_group = &snbep_uncore_format_group,
2358 };
2359
2360 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2361 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2362 EVENT_CONSTRAINT_END
2363 };
2364
2365 static struct intel_uncore_type knl_uncore_m2pcie = {
2366 .name = "m2pcie",
2367 .num_counters = 4,
2368 .num_boxes = 1,
2369 .perf_ctr_bits = 48,
2370 .constraints = knl_uncore_m2pcie_constraints,
2371 SNBEP_UNCORE_PCI_COMMON_INIT(),
2372 };
2373
2374 static struct attribute *knl_uncore_irp_formats_attr[] = {
2375 &format_attr_event.attr,
2376 &format_attr_umask.attr,
2377 &format_attr_qor.attr,
2378 &format_attr_edge.attr,
2379 &format_attr_inv.attr,
2380 &format_attr_thresh8.attr,
2381 NULL,
2382 };
2383
2384 static const struct attribute_group knl_uncore_irp_format_group = {
2385 .name = "format",
2386 .attrs = knl_uncore_irp_formats_attr,
2387 };
2388
2389 static struct intel_uncore_type knl_uncore_irp = {
2390 .name = "irp",
2391 .num_counters = 2,
2392 .num_boxes = 1,
2393 .perf_ctr_bits = 48,
2394 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2395 .event_ctl = SNBEP_PCI_PMON_CTL0,
2396 .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2397 .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL,
2398 .ops = &snbep_uncore_pci_ops,
2399 .format_group = &knl_uncore_irp_format_group,
2400 };
2401
2402 enum {
2403 KNL_PCI_UNCORE_MC_UCLK,
2404 KNL_PCI_UNCORE_MC_DCLK,
2405 KNL_PCI_UNCORE_EDC_UCLK,
2406 KNL_PCI_UNCORE_EDC_ECLK,
2407 KNL_PCI_UNCORE_M2PCIE,
2408 KNL_PCI_UNCORE_IRP,
2409 };
2410
2411 static struct intel_uncore_type *knl_pci_uncores[] = {
2412 [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk,
2413 [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk,
2414 [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk,
2415 [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk,
2416 [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie,
2417 [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp,
2418 NULL,
2419 };
2420
2421 /*
2422 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2423 * device type. prior to KNL, each instance of a PMU device type had a unique
2424 * device ID.
2425 *
2426 * PCI Device ID Uncore PMU Devices
2427 * ----------------------------------
2428 * 0x7841 MC0 UClk, MC1 UClk
2429 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2430 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2431 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2432 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2433 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2434 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2435 * 0x7817 M2PCIe
2436 * 0x7814 IRP
2437 */
2438
2439 static const struct pci_device_id knl_uncore_pci_ids[] = {
2440 { /* MC0 UClk */
2441 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2442 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2443 },
2444 { /* MC1 UClk */
2445 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2446 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2447 },
2448 { /* MC0 DClk CH 0 */
2449 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2450 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2451 },
2452 { /* MC0 DClk CH 1 */
2453 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2454 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2455 },
2456 { /* MC0 DClk CH 2 */
2457 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2458 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2459 },
2460 { /* MC1 DClk CH 0 */
2461 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2462 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2463 },
2464 { /* MC1 DClk CH 1 */
2465 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2466 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2467 },
2468 { /* MC1 DClk CH 2 */
2469 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2470 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2471 },
2472 { /* EDC0 UClk */
2473 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2474 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2475 },
2476 { /* EDC1 UClk */
2477 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2478 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2479 },
2480 { /* EDC2 UClk */
2481 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2482 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2483 },
2484 { /* EDC3 UClk */
2485 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2486 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2487 },
2488 { /* EDC4 UClk */
2489 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2490 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2491 },
2492 { /* EDC5 UClk */
2493 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2494 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2495 },
2496 { /* EDC6 UClk */
2497 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2498 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2499 },
2500 { /* EDC7 UClk */
2501 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2502 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2503 },
2504 { /* EDC0 EClk */
2505 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2506 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2507 },
2508 { /* EDC1 EClk */
2509 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2510 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2511 },
2512 { /* EDC2 EClk */
2513 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2514 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2515 },
2516 { /* EDC3 EClk */
2517 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2518 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2519 },
2520 { /* EDC4 EClk */
2521 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2522 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2523 },
2524 { /* EDC5 EClk */
2525 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2526 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2527 },
2528 { /* EDC6 EClk */
2529 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2530 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2531 },
2532 { /* EDC7 EClk */
2533 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2534 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2535 },
2536 { /* M2PCIe */
2537 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2538 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2539 },
2540 { /* IRP */
2541 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2542 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2543 },
2544 { /* end: all zeroes */ }
2545 };
2546
2547 static struct pci_driver knl_uncore_pci_driver = {
2548 .name = "knl_uncore",
2549 .id_table = knl_uncore_pci_ids,
2550 };
2551
knl_uncore_pci_init(void)2552 int knl_uncore_pci_init(void)
2553 {
2554 int ret;
2555
2556 /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2557 ret = snb_pci2phy_map_init(0x7814); /* IRP */
2558 if (ret)
2559 return ret;
2560 ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2561 if (ret)
2562 return ret;
2563 uncore_pci_uncores = knl_pci_uncores;
2564 uncore_pci_driver = &knl_uncore_pci_driver;
2565 return 0;
2566 }
2567
2568 /* end of KNL uncore support */
2569
2570 /* Haswell-EP uncore support */
2571 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2572 &format_attr_event.attr,
2573 &format_attr_umask.attr,
2574 &format_attr_edge.attr,
2575 &format_attr_inv.attr,
2576 &format_attr_thresh5.attr,
2577 &format_attr_filter_tid2.attr,
2578 &format_attr_filter_cid.attr,
2579 NULL,
2580 };
2581
2582 static const struct attribute_group hswep_uncore_ubox_format_group = {
2583 .name = "format",
2584 .attrs = hswep_uncore_ubox_formats_attr,
2585 };
2586
hswep_ubox_hw_config(struct intel_uncore_box * box,struct perf_event * event)2587 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2588 {
2589 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2590 reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2591 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2592 reg1->idx = 0;
2593 return 0;
2594 }
2595
2596 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2597 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2598 .hw_config = hswep_ubox_hw_config,
2599 .get_constraint = uncore_get_constraint,
2600 .put_constraint = uncore_put_constraint,
2601 };
2602
2603 static struct intel_uncore_type hswep_uncore_ubox = {
2604 .name = "ubox",
2605 .num_counters = 2,
2606 .num_boxes = 1,
2607 .perf_ctr_bits = 44,
2608 .fixed_ctr_bits = 48,
2609 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2610 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2611 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2612 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2613 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2614 .num_shared_regs = 1,
2615 .ops = &hswep_uncore_ubox_ops,
2616 .format_group = &hswep_uncore_ubox_format_group,
2617 };
2618
2619 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2620 &format_attr_event.attr,
2621 &format_attr_umask.attr,
2622 &format_attr_edge.attr,
2623 &format_attr_tid_en.attr,
2624 &format_attr_thresh8.attr,
2625 &format_attr_filter_tid3.attr,
2626 &format_attr_filter_link2.attr,
2627 &format_attr_filter_state3.attr,
2628 &format_attr_filter_nid2.attr,
2629 &format_attr_filter_opc2.attr,
2630 &format_attr_filter_nc.attr,
2631 &format_attr_filter_c6.attr,
2632 &format_attr_filter_isoc.attr,
2633 NULL,
2634 };
2635
2636 static const struct attribute_group hswep_uncore_cbox_format_group = {
2637 .name = "format",
2638 .attrs = hswep_uncore_cbox_formats_attr,
2639 };
2640
2641 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2642 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2643 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2644 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2645 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2646 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2647 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2648 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2649 EVENT_CONSTRAINT_END
2650 };
2651
2652 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2653 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2654 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2655 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2656 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2657 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2658 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2659 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2660 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2661 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2662 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2663 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2664 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2665 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2666 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2667 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2668 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2669 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2670 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2671 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2672 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2673 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2674 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2675 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2676 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2677 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2678 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2679 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2680 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2681 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2682 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2683 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2684 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2685 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2686 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2687 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2688 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2689 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2690 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2691 EVENT_EXTRA_END
2692 };
2693
hswep_cbox_filter_mask(int fields)2694 static u64 hswep_cbox_filter_mask(int fields)
2695 {
2696 u64 mask = 0;
2697 if (fields & 0x1)
2698 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2699 if (fields & 0x2)
2700 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2701 if (fields & 0x4)
2702 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2703 if (fields & 0x8)
2704 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2705 if (fields & 0x10) {
2706 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2707 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2708 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2709 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2710 }
2711 return mask;
2712 }
2713
2714 static struct event_constraint *
hswep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)2715 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2716 {
2717 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2718 }
2719
hswep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)2720 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2721 {
2722 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2723 struct extra_reg *er;
2724 int idx = 0;
2725
2726 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2727 if (er->event != (event->hw.config & er->config_mask))
2728 continue;
2729 idx |= er->idx;
2730 }
2731
2732 if (idx) {
2733 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2734 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2735 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2736 reg1->idx = idx;
2737 }
2738 return 0;
2739 }
2740
hswep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)2741 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2742 struct perf_event *event)
2743 {
2744 struct hw_perf_event *hwc = &event->hw;
2745 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2746
2747 if (reg1->idx != EXTRA_REG_NONE) {
2748 u64 filter = uncore_shared_reg_config(box, 0);
2749 wrmsrq(reg1->reg, filter & 0xffffffff);
2750 wrmsrq(reg1->reg + 1, filter >> 32);
2751 }
2752
2753 wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2754 }
2755
2756 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2757 .init_box = snbep_uncore_msr_init_box,
2758 .disable_box = snbep_uncore_msr_disable_box,
2759 .enable_box = snbep_uncore_msr_enable_box,
2760 .disable_event = snbep_uncore_msr_disable_event,
2761 .enable_event = hswep_cbox_enable_event,
2762 .read_counter = uncore_msr_read_counter,
2763 .hw_config = hswep_cbox_hw_config,
2764 .get_constraint = hswep_cbox_get_constraint,
2765 .put_constraint = snbep_cbox_put_constraint,
2766 };
2767
2768 static struct intel_uncore_type hswep_uncore_cbox = {
2769 .name = "cbox",
2770 .num_counters = 4,
2771 .num_boxes = 18,
2772 .perf_ctr_bits = 48,
2773 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2774 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2775 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2776 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2777 .msr_offset = HSWEP_CBO_MSR_OFFSET,
2778 .num_shared_regs = 1,
2779 .constraints = hswep_uncore_cbox_constraints,
2780 .ops = &hswep_uncore_cbox_ops,
2781 .format_group = &hswep_uncore_cbox_format_group,
2782 };
2783
2784 /*
2785 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2786 */
hswep_uncore_sbox_msr_init_box(struct intel_uncore_box * box)2787 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2788 {
2789 unsigned msr = uncore_msr_box_ctl(box);
2790
2791 if (msr) {
2792 u64 init = SNBEP_PMON_BOX_CTL_INT;
2793 u64 flags = 0;
2794 int i;
2795
2796 for_each_set_bit(i, (unsigned long *)&init, 64) {
2797 flags |= (1ULL << i);
2798 wrmsrq(msr, flags);
2799 }
2800 }
2801 }
2802
2803 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2804 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2805 .init_box = hswep_uncore_sbox_msr_init_box
2806 };
2807
2808 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2809 &format_attr_event.attr,
2810 &format_attr_umask.attr,
2811 &format_attr_edge.attr,
2812 &format_attr_tid_en.attr,
2813 &format_attr_inv.attr,
2814 &format_attr_thresh8.attr,
2815 NULL,
2816 };
2817
2818 static const struct attribute_group hswep_uncore_sbox_format_group = {
2819 .name = "format",
2820 .attrs = hswep_uncore_sbox_formats_attr,
2821 };
2822
2823 static struct intel_uncore_type hswep_uncore_sbox = {
2824 .name = "sbox",
2825 .num_counters = 4,
2826 .num_boxes = 4,
2827 .perf_ctr_bits = 44,
2828 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
2829 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
2830 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2831 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
2832 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
2833 .ops = &hswep_uncore_sbox_msr_ops,
2834 .format_group = &hswep_uncore_sbox_format_group,
2835 };
2836
hswep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)2837 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2838 {
2839 struct hw_perf_event *hwc = &event->hw;
2840 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2841 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2842
2843 if (ev_sel >= 0xb && ev_sel <= 0xe) {
2844 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2845 reg1->idx = ev_sel - 0xb;
2846 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2847 }
2848 return 0;
2849 }
2850
2851 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2852 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2853 .hw_config = hswep_pcu_hw_config,
2854 .get_constraint = snbep_pcu_get_constraint,
2855 .put_constraint = snbep_pcu_put_constraint,
2856 };
2857
2858 static struct intel_uncore_type hswep_uncore_pcu = {
2859 .name = "pcu",
2860 .num_counters = 4,
2861 .num_boxes = 1,
2862 .perf_ctr_bits = 48,
2863 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2864 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2865 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2866 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2867 .num_shared_regs = 1,
2868 .ops = &hswep_uncore_pcu_ops,
2869 .format_group = &snbep_uncore_pcu_format_group,
2870 };
2871
2872 static struct intel_uncore_type *hswep_msr_uncores[] = {
2873 &hswep_uncore_ubox,
2874 &hswep_uncore_cbox,
2875 &hswep_uncore_sbox,
2876 &hswep_uncore_pcu,
2877 NULL,
2878 };
2879
2880 #define HSWEP_PCU_DID 0x2fc0
2881 #define HSWEP_PCU_CAPID4_OFFET 0x94
2882 #define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3)
2883
hswep_has_limit_sbox(unsigned int device)2884 static bool hswep_has_limit_sbox(unsigned int device)
2885 {
2886 struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2887 u32 capid4;
2888
2889 if (!dev)
2890 return false;
2891
2892 pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2893 pci_dev_put(dev);
2894 if (!hswep_get_chop(capid4))
2895 return true;
2896
2897 return false;
2898 }
2899
hswep_uncore_cpu_init(void)2900 void hswep_uncore_cpu_init(void)
2901 {
2902 if (hswep_uncore_cbox.num_boxes > topology_num_cores_per_package())
2903 hswep_uncore_cbox.num_boxes = topology_num_cores_per_package();
2904
2905 /* Detect 6-8 core systems with only two SBOXes */
2906 if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2907 hswep_uncore_sbox.num_boxes = 2;
2908
2909 uncore_msr_uncores = hswep_msr_uncores;
2910 }
2911
2912 static struct intel_uncore_type hswep_uncore_ha = {
2913 .name = "ha",
2914 .num_counters = 4,
2915 .num_boxes = 2,
2916 .perf_ctr_bits = 48,
2917 SNBEP_UNCORE_PCI_COMMON_INIT(),
2918 };
2919
2920 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2921 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
2922 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
2923 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2924 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2925 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2926 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2927 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2928 { /* end: all zeroes */ },
2929 };
2930
2931 static struct intel_uncore_type hswep_uncore_imc = {
2932 .name = "imc",
2933 .num_counters = 4,
2934 .num_boxes = 8,
2935 .perf_ctr_bits = 48,
2936 .fixed_ctr_bits = 48,
2937 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2938 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2939 .event_descs = hswep_uncore_imc_events,
2940 SNBEP_UNCORE_PCI_COMMON_INIT(),
2941 };
2942
2943 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2944
hswep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)2945 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2946 {
2947 struct pci_dev *pdev = box->pci_dev;
2948 struct hw_perf_event *hwc = &event->hw;
2949 u64 count = 0;
2950
2951 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2952 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2953
2954 return count;
2955 }
2956
2957 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2958 .init_box = snbep_uncore_pci_init_box,
2959 .disable_box = snbep_uncore_pci_disable_box,
2960 .enable_box = snbep_uncore_pci_enable_box,
2961 .disable_event = ivbep_uncore_irp_disable_event,
2962 .enable_event = ivbep_uncore_irp_enable_event,
2963 .read_counter = hswep_uncore_irp_read_counter,
2964 };
2965
2966 static struct intel_uncore_type hswep_uncore_irp = {
2967 .name = "irp",
2968 .num_counters = 4,
2969 .num_boxes = 1,
2970 .perf_ctr_bits = 48,
2971 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2972 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2973 .ops = &hswep_uncore_irp_ops,
2974 .format_group = &snbep_uncore_format_group,
2975 };
2976
2977 static struct intel_uncore_type hswep_uncore_qpi = {
2978 .name = "qpi",
2979 .num_counters = 4,
2980 .num_boxes = 3,
2981 .perf_ctr_bits = 48,
2982 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2983 .event_ctl = SNBEP_PCI_PMON_CTL0,
2984 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2985 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2986 .num_shared_regs = 1,
2987 .ops = &snbep_uncore_qpi_ops,
2988 .format_group = &snbep_uncore_qpi_format_group,
2989 };
2990
2991 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2992 UNCORE_EVENT_CONSTRAINT_RANGE(0x10, 0x11, 0x3),
2993 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2994 UNCORE_EVENT_CONSTRAINT_RANGE(0x23, 0x25, 0x1),
2995 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2996 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2997 UNCORE_EVENT_CONSTRAINT_RANGE(0x28, 0x29, 0x3),
2998 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2999 UNCORE_EVENT_CONSTRAINT_RANGE(0x2b, 0x2d, 0x3),
3000 UNCORE_EVENT_CONSTRAINT_RANGE(0x32, 0x35, 0x3),
3001 EVENT_CONSTRAINT_END
3002 };
3003
3004 static struct intel_uncore_type hswep_uncore_r2pcie = {
3005 .name = "r2pcie",
3006 .num_counters = 4,
3007 .num_boxes = 1,
3008 .perf_ctr_bits = 48,
3009 .constraints = hswep_uncore_r2pcie_constraints,
3010 SNBEP_UNCORE_PCI_COMMON_INIT(),
3011 };
3012
3013 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
3014 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
3015 UNCORE_EVENT_CONSTRAINT_RANGE(0x7, 0x0a, 0x7),
3016 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3017 UNCORE_EVENT_CONSTRAINT_RANGE(0x10, 0x12, 0x3),
3018 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3019 UNCORE_EVENT_CONSTRAINT_RANGE(0x14, 0x15, 0x3),
3020 UNCORE_EVENT_CONSTRAINT_RANGE(0x1f, 0x23, 0x3),
3021 UNCORE_EVENT_CONSTRAINT_RANGE(0x25, 0x26, 0x3),
3022 UNCORE_EVENT_CONSTRAINT_RANGE(0x28, 0x29, 0x3),
3023 UNCORE_EVENT_CONSTRAINT_RANGE(0x2c, 0x2f, 0x3),
3024 UNCORE_EVENT_CONSTRAINT_RANGE(0x31, 0x34, 0x3),
3025 UNCORE_EVENT_CONSTRAINT_RANGE(0x36, 0x39, 0x3),
3026 EVENT_CONSTRAINT_END
3027 };
3028
3029 static struct intel_uncore_type hswep_uncore_r3qpi = {
3030 .name = "r3qpi",
3031 .num_counters = 3,
3032 .num_boxes = 3,
3033 .perf_ctr_bits = 44,
3034 .constraints = hswep_uncore_r3qpi_constraints,
3035 SNBEP_UNCORE_PCI_COMMON_INIT(),
3036 };
3037
3038 enum {
3039 HSWEP_PCI_UNCORE_HA,
3040 HSWEP_PCI_UNCORE_IMC,
3041 HSWEP_PCI_UNCORE_IRP,
3042 HSWEP_PCI_UNCORE_QPI,
3043 HSWEP_PCI_UNCORE_R2PCIE,
3044 HSWEP_PCI_UNCORE_R3QPI,
3045 };
3046
3047 static struct intel_uncore_type *hswep_pci_uncores[] = {
3048 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha,
3049 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc,
3050 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp,
3051 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi,
3052 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie,
3053 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi,
3054 NULL,
3055 };
3056
3057 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3058 { /* Home Agent 0 */
3059 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3060 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3061 },
3062 { /* Home Agent 1 */
3063 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3064 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3065 },
3066 { /* MC0 Channel 0 */
3067 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3068 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3069 },
3070 { /* MC0 Channel 1 */
3071 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3072 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3073 },
3074 { /* MC0 Channel 2 */
3075 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3076 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3077 },
3078 { /* MC0 Channel 3 */
3079 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3080 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3081 },
3082 { /* MC1 Channel 0 */
3083 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3084 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3085 },
3086 { /* MC1 Channel 1 */
3087 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3088 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3089 },
3090 { /* MC1 Channel 2 */
3091 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3092 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3093 },
3094 { /* MC1 Channel 3 */
3095 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3096 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3097 },
3098 { /* IRP */
3099 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3100 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3101 },
3102 { /* QPI0 Port 0 */
3103 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3104 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3105 },
3106 { /* QPI0 Port 1 */
3107 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3108 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3109 },
3110 { /* QPI1 Port 2 */
3111 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3112 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3113 },
3114 { /* R2PCIe */
3115 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3116 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3117 },
3118 { /* R3QPI0 Link 0 */
3119 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3120 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3121 },
3122 { /* R3QPI0 Link 1 */
3123 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3124 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3125 },
3126 { /* R3QPI1 Link 2 */
3127 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3128 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3129 },
3130 { /* QPI Port 0 filter */
3131 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3132 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3133 SNBEP_PCI_QPI_PORT0_FILTER),
3134 },
3135 { /* QPI Port 1 filter */
3136 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3137 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3138 SNBEP_PCI_QPI_PORT1_FILTER),
3139 },
3140 { /* end: all zeroes */ }
3141 };
3142
3143 static struct pci_driver hswep_uncore_pci_driver = {
3144 .name = "hswep_uncore",
3145 .id_table = hswep_uncore_pci_ids,
3146 };
3147
hswep_uncore_pci_init(void)3148 int hswep_uncore_pci_init(void)
3149 {
3150 int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3151 if (ret)
3152 return ret;
3153 uncore_pci_uncores = hswep_pci_uncores;
3154 uncore_pci_driver = &hswep_uncore_pci_driver;
3155 return 0;
3156 }
3157 /* end of Haswell-EP uncore support */
3158
3159 /* BDX uncore support */
3160
3161 static struct intel_uncore_type bdx_uncore_ubox = {
3162 .name = "ubox",
3163 .num_counters = 2,
3164 .num_boxes = 1,
3165 .perf_ctr_bits = 48,
3166 .fixed_ctr_bits = 48,
3167 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3168 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3169 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3170 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3171 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3172 .num_shared_regs = 1,
3173 .ops = &ivbep_uncore_msr_ops,
3174 .format_group = &ivbep_uncore_ubox_format_group,
3175 };
3176
3177 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3178 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3179 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3180 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3181 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3182 EVENT_CONSTRAINT_END
3183 };
3184
3185 static struct intel_uncore_type bdx_uncore_cbox = {
3186 .name = "cbox",
3187 .num_counters = 4,
3188 .num_boxes = 24,
3189 .perf_ctr_bits = 48,
3190 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3191 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3192 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3193 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3194 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3195 .num_shared_regs = 1,
3196 .constraints = bdx_uncore_cbox_constraints,
3197 .ops = &hswep_uncore_cbox_ops,
3198 .format_group = &hswep_uncore_cbox_format_group,
3199 };
3200
3201 static struct intel_uncore_type bdx_uncore_sbox = {
3202 .name = "sbox",
3203 .num_counters = 4,
3204 .num_boxes = 4,
3205 .perf_ctr_bits = 48,
3206 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
3207 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
3208 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3209 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
3210 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
3211 .ops = &hswep_uncore_sbox_msr_ops,
3212 .format_group = &hswep_uncore_sbox_format_group,
3213 };
3214
3215 #define BDX_MSR_UNCORE_SBOX 3
3216
3217 static struct intel_uncore_type *bdx_msr_uncores[] = {
3218 &bdx_uncore_ubox,
3219 &bdx_uncore_cbox,
3220 &hswep_uncore_pcu,
3221 &bdx_uncore_sbox,
3222 NULL,
3223 };
3224
3225 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3226 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3227 EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3228 EVENT_CONSTRAINT_END
3229 };
3230
3231 #define BDX_PCU_DID 0x6fc0
3232
bdx_uncore_cpu_init(void)3233 void bdx_uncore_cpu_init(void)
3234 {
3235 if (bdx_uncore_cbox.num_boxes > topology_num_cores_per_package())
3236 bdx_uncore_cbox.num_boxes = topology_num_cores_per_package();
3237 uncore_msr_uncores = bdx_msr_uncores;
3238
3239 /* Detect systems with no SBOXes */
3240 if (boot_cpu_data.x86_vfm == INTEL_BROADWELL_D || hswep_has_limit_sbox(BDX_PCU_DID))
3241 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3242
3243 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3244 }
3245
3246 static struct intel_uncore_type bdx_uncore_ha = {
3247 .name = "ha",
3248 .num_counters = 4,
3249 .num_boxes = 2,
3250 .perf_ctr_bits = 48,
3251 SNBEP_UNCORE_PCI_COMMON_INIT(),
3252 };
3253
3254 static struct intel_uncore_type bdx_uncore_imc = {
3255 .name = "imc",
3256 .num_counters = 4,
3257 .num_boxes = 8,
3258 .perf_ctr_bits = 48,
3259 .fixed_ctr_bits = 48,
3260 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3261 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3262 .event_descs = hswep_uncore_imc_events,
3263 SNBEP_UNCORE_PCI_COMMON_INIT(),
3264 };
3265
3266 static struct intel_uncore_type bdx_uncore_irp = {
3267 .name = "irp",
3268 .num_counters = 4,
3269 .num_boxes = 1,
3270 .perf_ctr_bits = 48,
3271 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3272 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3273 .ops = &hswep_uncore_irp_ops,
3274 .format_group = &snbep_uncore_format_group,
3275 };
3276
3277 static struct intel_uncore_type bdx_uncore_qpi = {
3278 .name = "qpi",
3279 .num_counters = 4,
3280 .num_boxes = 3,
3281 .perf_ctr_bits = 48,
3282 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3283 .event_ctl = SNBEP_PCI_PMON_CTL0,
3284 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3285 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3286 .num_shared_regs = 1,
3287 .ops = &snbep_uncore_qpi_ops,
3288 .format_group = &snbep_uncore_qpi_format_group,
3289 };
3290
3291 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3292 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3293 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3294 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3295 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3296 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3297 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3298 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3299 UNCORE_EVENT_CONSTRAINT_RANGE(0x2c, 0x2d, 0x3),
3300 EVENT_CONSTRAINT_END
3301 };
3302
3303 static struct intel_uncore_type bdx_uncore_r2pcie = {
3304 .name = "r2pcie",
3305 .num_counters = 4,
3306 .num_boxes = 1,
3307 .perf_ctr_bits = 48,
3308 .constraints = bdx_uncore_r2pcie_constraints,
3309 SNBEP_UNCORE_PCI_COMMON_INIT(),
3310 };
3311
3312 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3313 UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3314 UNCORE_EVENT_CONSTRAINT_RANGE(0x07, 0x0a, 0x7),
3315 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3316 UNCORE_EVENT_CONSTRAINT_RANGE(0x10, 0x11, 0x3),
3317 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3318 UNCORE_EVENT_CONSTRAINT_RANGE(0x14, 0x15, 0x3),
3319 UNCORE_EVENT_CONSTRAINT_RANGE(0x1f, 0x23, 0x3),
3320 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3321 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3322 UNCORE_EVENT_CONSTRAINT_RANGE(0x28, 0x29, 0x3),
3323 UNCORE_EVENT_CONSTRAINT_RANGE(0x2c, 0x2f, 0x3),
3324 UNCORE_EVENT_CONSTRAINT_RANGE(0x33, 0x34, 0x3),
3325 UNCORE_EVENT_CONSTRAINT_RANGE(0x36, 0x39, 0x3),
3326 EVENT_CONSTRAINT_END
3327 };
3328
3329 static struct intel_uncore_type bdx_uncore_r3qpi = {
3330 .name = "r3qpi",
3331 .num_counters = 3,
3332 .num_boxes = 3,
3333 .perf_ctr_bits = 48,
3334 .constraints = bdx_uncore_r3qpi_constraints,
3335 SNBEP_UNCORE_PCI_COMMON_INIT(),
3336 };
3337
3338 enum {
3339 BDX_PCI_UNCORE_HA,
3340 BDX_PCI_UNCORE_IMC,
3341 BDX_PCI_UNCORE_IRP,
3342 BDX_PCI_UNCORE_QPI,
3343 BDX_PCI_UNCORE_R2PCIE,
3344 BDX_PCI_UNCORE_R3QPI,
3345 };
3346
3347 static struct intel_uncore_type *bdx_pci_uncores[] = {
3348 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha,
3349 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc,
3350 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp,
3351 [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi,
3352 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3353 [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi,
3354 NULL,
3355 };
3356
3357 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3358 { /* Home Agent 0 */
3359 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3360 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3361 },
3362 { /* Home Agent 1 */
3363 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3364 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3365 },
3366 { /* MC0 Channel 0 */
3367 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3368 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3369 },
3370 { /* MC0 Channel 1 */
3371 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3372 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3373 },
3374 { /* MC0 Channel 2 */
3375 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3376 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3377 },
3378 { /* MC0 Channel 3 */
3379 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3380 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3381 },
3382 { /* MC1 Channel 0 */
3383 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3384 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3385 },
3386 { /* MC1 Channel 1 */
3387 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3388 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3389 },
3390 { /* MC1 Channel 2 */
3391 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3392 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3393 },
3394 { /* MC1 Channel 3 */
3395 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3396 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3397 },
3398 { /* IRP */
3399 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3400 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3401 },
3402 { /* QPI0 Port 0 */
3403 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3404 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3405 },
3406 { /* QPI0 Port 1 */
3407 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3408 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3409 },
3410 { /* QPI1 Port 2 */
3411 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3412 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3413 },
3414 { /* R2PCIe */
3415 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3416 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3417 },
3418 { /* R3QPI0 Link 0 */
3419 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3420 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3421 },
3422 { /* R3QPI0 Link 1 */
3423 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3424 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3425 },
3426 { /* R3QPI1 Link 2 */
3427 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3428 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3429 },
3430 { /* QPI Port 0 filter */
3431 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3432 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3433 SNBEP_PCI_QPI_PORT0_FILTER),
3434 },
3435 { /* QPI Port 1 filter */
3436 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3437 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3438 SNBEP_PCI_QPI_PORT1_FILTER),
3439 },
3440 { /* QPI Port 2 filter */
3441 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3442 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3443 BDX_PCI_QPI_PORT2_FILTER),
3444 },
3445 { /* end: all zeroes */ }
3446 };
3447
3448 static struct pci_driver bdx_uncore_pci_driver = {
3449 .name = "bdx_uncore",
3450 .id_table = bdx_uncore_pci_ids,
3451 };
3452
bdx_uncore_pci_init(void)3453 int bdx_uncore_pci_init(void)
3454 {
3455 int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3456
3457 if (ret)
3458 return ret;
3459 uncore_pci_uncores = bdx_pci_uncores;
3460 uncore_pci_driver = &bdx_uncore_pci_driver;
3461 return 0;
3462 }
3463
3464 /* end of BDX uncore support */
3465
3466 /* SKX uncore support */
3467
3468 static struct intel_uncore_type skx_uncore_ubox = {
3469 .name = "ubox",
3470 .num_counters = 2,
3471 .num_boxes = 1,
3472 .perf_ctr_bits = 48,
3473 .fixed_ctr_bits = 48,
3474 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3475 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3476 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3477 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3478 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3479 .ops = &ivbep_uncore_msr_ops,
3480 .format_group = &ivbep_uncore_ubox_format_group,
3481 };
3482
3483 static struct attribute *skx_uncore_cha_formats_attr[] = {
3484 &format_attr_event.attr,
3485 &format_attr_umask.attr,
3486 &format_attr_edge.attr,
3487 &format_attr_tid_en.attr,
3488 &format_attr_inv.attr,
3489 &format_attr_thresh8.attr,
3490 &format_attr_filter_tid4.attr,
3491 &format_attr_filter_state5.attr,
3492 &format_attr_filter_rem.attr,
3493 &format_attr_filter_loc.attr,
3494 &format_attr_filter_nm.attr,
3495 &format_attr_filter_all_op.attr,
3496 &format_attr_filter_not_nm.attr,
3497 &format_attr_filter_opc_0.attr,
3498 &format_attr_filter_opc_1.attr,
3499 &format_attr_filter_nc.attr,
3500 &format_attr_filter_isoc.attr,
3501 NULL,
3502 };
3503
3504 static const struct attribute_group skx_uncore_chabox_format_group = {
3505 .name = "format",
3506 .attrs = skx_uncore_cha_formats_attr,
3507 };
3508
3509 static struct event_constraint skx_uncore_chabox_constraints[] = {
3510 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3511 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3512 EVENT_CONSTRAINT_END
3513 };
3514
3515 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3516 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3517 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3518 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3519 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3520 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3521 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3522 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3523 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3524 SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3525 EVENT_EXTRA_END
3526 };
3527
skx_cha_filter_mask(int fields)3528 static u64 skx_cha_filter_mask(int fields)
3529 {
3530 u64 mask = 0;
3531
3532 if (fields & 0x1)
3533 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3534 if (fields & 0x2)
3535 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3536 if (fields & 0x4)
3537 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3538 if (fields & 0x8) {
3539 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3540 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3541 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3542 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3543 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3544 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3545 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3546 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3547 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3548 }
3549 return mask;
3550 }
3551
3552 static struct event_constraint *
skx_cha_get_constraint(struct intel_uncore_box * box,struct perf_event * event)3553 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3554 {
3555 return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3556 }
3557
skx_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)3558 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3559 {
3560 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3561 struct extra_reg *er;
3562 int idx = 0;
3563 /* Any of the CHA events may be filtered by Thread/Core-ID.*/
3564 if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN)
3565 idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3566
3567 for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3568 if (er->event != (event->hw.config & er->config_mask))
3569 continue;
3570 idx |= er->idx;
3571 }
3572
3573 if (idx) {
3574 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3575 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3576 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3577 reg1->idx = idx;
3578 }
3579 return 0;
3580 }
3581
3582 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3583 /* There is no frz_en for chabox ctl */
3584 .init_box = ivbep_uncore_msr_init_box,
3585 .disable_box = snbep_uncore_msr_disable_box,
3586 .enable_box = snbep_uncore_msr_enable_box,
3587 .disable_event = snbep_uncore_msr_disable_event,
3588 .enable_event = hswep_cbox_enable_event,
3589 .read_counter = uncore_msr_read_counter,
3590 .hw_config = skx_cha_hw_config,
3591 .get_constraint = skx_cha_get_constraint,
3592 .put_constraint = snbep_cbox_put_constraint,
3593 };
3594
3595 static struct intel_uncore_type skx_uncore_chabox = {
3596 .name = "cha",
3597 .num_counters = 4,
3598 .perf_ctr_bits = 48,
3599 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3600 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3601 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3602 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3603 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3604 .num_shared_regs = 1,
3605 .constraints = skx_uncore_chabox_constraints,
3606 .ops = &skx_uncore_chabox_ops,
3607 .format_group = &skx_uncore_chabox_format_group,
3608 };
3609
3610 static struct attribute *skx_uncore_iio_formats_attr[] = {
3611 &format_attr_event.attr,
3612 &format_attr_umask.attr,
3613 &format_attr_edge.attr,
3614 &format_attr_inv.attr,
3615 &format_attr_thresh9.attr,
3616 &format_attr_ch_mask.attr,
3617 &format_attr_fc_mask.attr,
3618 NULL,
3619 };
3620
3621 static const struct attribute_group skx_uncore_iio_format_group = {
3622 .name = "format",
3623 .attrs = skx_uncore_iio_formats_attr,
3624 };
3625
3626 static struct event_constraint skx_uncore_iio_constraints[] = {
3627 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3628 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3629 UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3630 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3631 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3632 UNCORE_EVENT_CONSTRAINT_RANGE(0xd4, 0xd5, 0xc),
3633 EVENT_CONSTRAINT_END
3634 };
3635
skx_iio_enable_event(struct intel_uncore_box * box,struct perf_event * event)3636 static void skx_iio_enable_event(struct intel_uncore_box *box,
3637 struct perf_event *event)
3638 {
3639 struct hw_perf_event *hwc = &event->hw;
3640
3641 wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3642 }
3643
3644 static struct intel_uncore_ops skx_uncore_iio_ops = {
3645 .init_box = ivbep_uncore_msr_init_box,
3646 .disable_box = snbep_uncore_msr_disable_box,
3647 .enable_box = snbep_uncore_msr_enable_box,
3648 .disable_event = snbep_uncore_msr_disable_event,
3649 .enable_event = skx_iio_enable_event,
3650 .read_counter = uncore_msr_read_counter,
3651 };
3652
pmu_topology(struct intel_uncore_pmu * pmu,int die)3653 static struct intel_uncore_topology *pmu_topology(struct intel_uncore_pmu *pmu, int die)
3654 {
3655 int idx;
3656
3657 for (idx = 0; idx < pmu->type->num_boxes; idx++) {
3658 if (pmu->type->topology[die][idx].pmu_idx == pmu->pmu_idx)
3659 return &pmu->type->topology[die][idx];
3660 }
3661
3662 return NULL;
3663 }
3664
3665 static umode_t
pmu_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die,int zero_bus_pmu)3666 pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
3667 int die, int zero_bus_pmu)
3668 {
3669 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3670 struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
3671
3672 return (pmut && !pmut->iio->pci_bus_no && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
3673 }
3674
3675 static umode_t
skx_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)3676 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3677 {
3678 /* Root bus 0x00 is valid only for pmu_idx = 0. */
3679 return pmu_iio_mapping_visible(kobj, attr, die, 0);
3680 }
3681
skx_iio_mapping_show(struct device * dev,struct device_attribute * attr,char * buf)3682 static ssize_t skx_iio_mapping_show(struct device *dev,
3683 struct device_attribute *attr, char *buf)
3684 {
3685 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
3686 struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3687 long die = (long)ea->var;
3688 struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
3689
3690 return sprintf(buf, "%04x:%02x\n", pmut ? pmut->iio->segment : 0,
3691 pmut ? pmut->iio->pci_bus_no : 0);
3692 }
3693
skx_msr_cpu_bus_read(int cpu,u64 * topology)3694 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3695 {
3696 u64 msr_value;
3697
3698 if (rdmsrq_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3699 !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3700 return -ENXIO;
3701
3702 *topology = msr_value;
3703
3704 return 0;
3705 }
3706
die_to_cpu(int die)3707 static int die_to_cpu(int die)
3708 {
3709 int res = 0, cpu, current_die;
3710 /*
3711 * Using cpus_read_lock() to ensure cpu is not going down between
3712 * looking at cpu_online_mask.
3713 */
3714 cpus_read_lock();
3715 for_each_online_cpu(cpu) {
3716 current_die = topology_logical_die_id(cpu);
3717 if (current_die == die) {
3718 res = cpu;
3719 break;
3720 }
3721 }
3722 cpus_read_unlock();
3723 return res;
3724 }
3725
3726 enum {
3727 IIO_TOPOLOGY_TYPE,
3728 UPI_TOPOLOGY_TYPE,
3729 TOPOLOGY_MAX
3730 };
3731
3732 static const size_t topology_size[TOPOLOGY_MAX] = {
3733 sizeof(*((struct intel_uncore_topology *)NULL)->iio),
3734 sizeof(*((struct intel_uncore_topology *)NULL)->upi)
3735 };
3736
pmu_alloc_topology(struct intel_uncore_type * type,int topology_type)3737 static int pmu_alloc_topology(struct intel_uncore_type *type, int topology_type)
3738 {
3739 int die, idx;
3740 struct intel_uncore_topology **topology;
3741
3742 if (!type->num_boxes)
3743 return -EPERM;
3744
3745 topology = kzalloc_objs(*topology, uncore_max_dies());
3746 if (!topology)
3747 goto err;
3748
3749 for (die = 0; die < uncore_max_dies(); die++) {
3750 topology[die] = kzalloc_objs(**topology, type->num_boxes);
3751 if (!topology[die])
3752 goto clear;
3753 for (idx = 0; idx < type->num_boxes; idx++) {
3754 topology[die][idx].untyped = kcalloc(type->num_boxes,
3755 topology_size[topology_type],
3756 GFP_KERNEL);
3757 if (!topology[die][idx].untyped)
3758 goto clear;
3759 }
3760 }
3761
3762 type->topology = topology;
3763
3764 return 0;
3765 clear:
3766 for (; die >= 0; die--) {
3767 for (idx = 0; idx < type->num_boxes; idx++)
3768 kfree(topology[die][idx].untyped);
3769 kfree(topology[die]);
3770 }
3771 kfree(topology);
3772 err:
3773 return -ENOMEM;
3774 }
3775
pmu_free_topology(struct intel_uncore_type * type)3776 static void pmu_free_topology(struct intel_uncore_type *type)
3777 {
3778 int die, idx;
3779
3780 if (type->topology) {
3781 for (die = 0; die < uncore_max_dies(); die++) {
3782 for (idx = 0; idx < type->num_boxes; idx++)
3783 kfree(type->topology[die][idx].untyped);
3784 kfree(type->topology[die]);
3785 }
3786 kfree(type->topology);
3787 type->topology = NULL;
3788 }
3789 }
3790
skx_pmu_get_topology(struct intel_uncore_type * type,int (* topology_cb)(struct intel_uncore_type *,int,int,u64))3791 static int skx_pmu_get_topology(struct intel_uncore_type *type,
3792 int (*topology_cb)(struct intel_uncore_type*, int, int, u64))
3793 {
3794 int die, ret = -EPERM;
3795 u64 cpu_bus_msr;
3796
3797 for (die = 0; die < uncore_max_dies(); die++) {
3798 ret = skx_msr_cpu_bus_read(die_to_cpu(die), &cpu_bus_msr);
3799 if (ret)
3800 break;
3801
3802 ret = uncore_die_to_segment(die);
3803 if (ret < 0)
3804 break;
3805
3806 ret = topology_cb(type, ret, die, cpu_bus_msr);
3807 if (ret)
3808 break;
3809 }
3810
3811 return ret;
3812 }
3813
skx_iio_topology_cb(struct intel_uncore_type * type,int segment,int die,u64 cpu_bus_msr)3814 static int skx_iio_topology_cb(struct intel_uncore_type *type, int segment,
3815 int die, u64 cpu_bus_msr)
3816 {
3817 int idx;
3818 struct intel_uncore_topology *t;
3819
3820 for (idx = 0; idx < type->num_boxes; idx++) {
3821 t = &type->topology[die][idx];
3822 t->pmu_idx = idx;
3823 t->iio->segment = segment;
3824 t->iio->pci_bus_no = (cpu_bus_msr >> (idx * BUS_NUM_STRIDE)) & 0xff;
3825 }
3826
3827 return 0;
3828 }
3829
skx_iio_get_topology(struct intel_uncore_type * type)3830 static int skx_iio_get_topology(struct intel_uncore_type *type)
3831 {
3832 return skx_pmu_get_topology(type, skx_iio_topology_cb);
3833 }
3834
3835 static struct attribute_group skx_iio_mapping_group = {
3836 .is_visible = skx_iio_mapping_visible,
3837 };
3838
3839 static const struct attribute_group *skx_iio_attr_update[] = {
3840 &skx_iio_mapping_group,
3841 NULL,
3842 };
3843
pmu_clear_mapping_attr(const struct attribute_group ** groups,struct attribute_group * ag)3844 static void pmu_clear_mapping_attr(const struct attribute_group **groups,
3845 struct attribute_group *ag)
3846 {
3847 int i;
3848
3849 for (i = 0; groups[i]; i++) {
3850 if (groups[i] == ag) {
3851 for (i++; groups[i]; i++)
3852 groups[i - 1] = groups[i];
3853 groups[i - 1] = NULL;
3854 break;
3855 }
3856 }
3857 }
3858
3859 static void
pmu_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag,ssize_t (* show)(struct device *,struct device_attribute *,char *),int topology_type)3860 pmu_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag,
3861 ssize_t (*show)(struct device*, struct device_attribute*, char*),
3862 int topology_type)
3863 {
3864 char buf[64];
3865 int ret;
3866 long die = -1;
3867 struct attribute **attrs = NULL;
3868 struct dev_ext_attribute *eas = NULL;
3869
3870 ret = pmu_alloc_topology(type, topology_type);
3871 if (ret < 0)
3872 goto clear_attr_update;
3873
3874 ret = type->get_topology(type);
3875 if (ret < 0)
3876 goto clear_topology;
3877
3878 /* One more for NULL. */
3879 attrs = kzalloc_objs(*attrs, (uncore_max_dies() + 1));
3880 if (!attrs)
3881 goto clear_topology;
3882
3883 eas = kzalloc_objs(*eas, uncore_max_dies());
3884 if (!eas)
3885 goto clear_attrs;
3886
3887 for (die = 0; die < uncore_max_dies(); die++) {
3888 snprintf(buf, sizeof(buf), "die%ld", die);
3889 sysfs_attr_init(&eas[die].attr.attr);
3890 eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3891 if (!eas[die].attr.attr.name)
3892 goto err;
3893 eas[die].attr.attr.mode = 0444;
3894 eas[die].attr.show = show;
3895 eas[die].attr.store = NULL;
3896 eas[die].var = (void *)die;
3897 attrs[die] = &eas[die].attr.attr;
3898 }
3899 ag->attrs = attrs;
3900
3901 return;
3902 err:
3903 for (; die >= 0; die--)
3904 kfree(eas[die].attr.attr.name);
3905 kfree(eas);
3906 clear_attrs:
3907 kfree(attrs);
3908 clear_topology:
3909 pmu_free_topology(type);
3910 clear_attr_update:
3911 pmu_clear_mapping_attr(type->attr_update, ag);
3912 }
3913
3914 static void
pmu_cleanup_mapping(struct intel_uncore_type * type,struct attribute_group * ag)3915 pmu_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3916 {
3917 struct attribute **attr = ag->attrs;
3918
3919 if (!attr)
3920 return;
3921
3922 for (; *attr; attr++)
3923 kfree((*attr)->name);
3924 kfree(attr_to_ext_attr(*ag->attrs));
3925 kfree(ag->attrs);
3926 ag->attrs = NULL;
3927 pmu_free_topology(type);
3928 }
3929
3930 static void
pmu_iio_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag)3931 pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3932 {
3933 pmu_set_mapping(type, ag, skx_iio_mapping_show, IIO_TOPOLOGY_TYPE);
3934 }
3935
skx_iio_set_mapping(struct intel_uncore_type * type)3936 static void skx_iio_set_mapping(struct intel_uncore_type *type)
3937 {
3938 pmu_iio_set_mapping(type, &skx_iio_mapping_group);
3939 }
3940
skx_iio_cleanup_mapping(struct intel_uncore_type * type)3941 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3942 {
3943 pmu_cleanup_mapping(type, &skx_iio_mapping_group);
3944 }
3945
3946 static struct intel_uncore_type skx_uncore_iio = {
3947 .name = "iio",
3948 .num_counters = 4,
3949 .num_boxes = 6,
3950 .perf_ctr_bits = 48,
3951 .event_ctl = SKX_IIO0_MSR_PMON_CTL0,
3952 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
3953 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
3954 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3955 .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL,
3956 .msr_offset = SKX_IIO_MSR_OFFSET,
3957 .constraints = skx_uncore_iio_constraints,
3958 .ops = &skx_uncore_iio_ops,
3959 .format_group = &skx_uncore_iio_format_group,
3960 .attr_update = skx_iio_attr_update,
3961 .get_topology = skx_iio_get_topology,
3962 .set_mapping = skx_iio_set_mapping,
3963 .cleanup_mapping = skx_iio_cleanup_mapping,
3964 };
3965
3966 enum perf_uncore_iio_freerunning_type_id {
3967 SKX_IIO_MSR_IOCLK = 0,
3968 SKX_IIO_MSR_BW = 1,
3969 SKX_IIO_MSR_UTIL = 2,
3970
3971 SKX_IIO_FREERUNNING_TYPE_MAX,
3972 };
3973
3974
3975 static struct freerunning_counters skx_iio_freerunning[] = {
3976 [SKX_IIO_MSR_IOCLK] = { 0xa45, 0x1, 0x20, 1, 36 },
3977 [SKX_IIO_MSR_BW] = { 0xb00, 0x1, 0x10, 8, 36 },
3978 [SKX_IIO_MSR_UTIL] = { 0xb08, 0x1, 0x10, 8, 36 },
3979 };
3980
3981 #define INTEL_UNCORE_FR_EVENT_DESC(name, umask, scl) \
3982 INTEL_UNCORE_EVENT_DESC(name, \
3983 "event=0xff,umask=" __stringify(umask)),\
3984 INTEL_UNCORE_EVENT_DESC(name.scale, __stringify(scl)), \
3985 INTEL_UNCORE_EVENT_DESC(name.unit, "MiB")
3986
3987 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3988 /* Free-Running IO CLOCKS Counter */
3989 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
3990 /* Free-Running IIO BANDWIDTH Counters */
3991 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port0, 0x20, 3.814697266e-6),
3992 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port1, 0x21, 3.814697266e-6),
3993 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port2, 0x22, 3.814697266e-6),
3994 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port3, 0x23, 3.814697266e-6),
3995 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port0, 0x24, 3.814697266e-6),
3996 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port1, 0x25, 3.814697266e-6),
3997 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port2, 0x26, 3.814697266e-6),
3998 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port3, 0x27, 3.814697266e-6),
3999 /* Free-running IIO UTILIZATION Counters */
4000 INTEL_UNCORE_EVENT_DESC(util_in_port0, "event=0xff,umask=0x30"),
4001 INTEL_UNCORE_EVENT_DESC(util_out_port0, "event=0xff,umask=0x31"),
4002 INTEL_UNCORE_EVENT_DESC(util_in_port1, "event=0xff,umask=0x32"),
4003 INTEL_UNCORE_EVENT_DESC(util_out_port1, "event=0xff,umask=0x33"),
4004 INTEL_UNCORE_EVENT_DESC(util_in_port2, "event=0xff,umask=0x34"),
4005 INTEL_UNCORE_EVENT_DESC(util_out_port2, "event=0xff,umask=0x35"),
4006 INTEL_UNCORE_EVENT_DESC(util_in_port3, "event=0xff,umask=0x36"),
4007 INTEL_UNCORE_EVENT_DESC(util_out_port3, "event=0xff,umask=0x37"),
4008 { /* end: all zeroes */ },
4009 };
4010
4011 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
4012 .read_counter = uncore_msr_read_counter,
4013 .hw_config = uncore_freerunning_hw_config,
4014 };
4015
4016 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
4017 &format_attr_event.attr,
4018 &format_attr_umask.attr,
4019 NULL,
4020 };
4021
4022 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
4023 .name = "format",
4024 .attrs = skx_uncore_iio_freerunning_formats_attr,
4025 };
4026
4027 static struct intel_uncore_type skx_uncore_iio_free_running = {
4028 .name = "iio_free_running",
4029 .num_counters = 17,
4030 .num_boxes = 6,
4031 .num_freerunning_types = SKX_IIO_FREERUNNING_TYPE_MAX,
4032 .freerunning = skx_iio_freerunning,
4033 .ops = &skx_uncore_iio_freerunning_ops,
4034 .event_descs = skx_uncore_iio_freerunning_events,
4035 .format_group = &skx_uncore_iio_freerunning_format_group,
4036 };
4037
4038 static struct attribute *skx_uncore_formats_attr[] = {
4039 &format_attr_event.attr,
4040 &format_attr_umask.attr,
4041 &format_attr_edge.attr,
4042 &format_attr_inv.attr,
4043 &format_attr_thresh8.attr,
4044 NULL,
4045 };
4046
4047 static const struct attribute_group skx_uncore_format_group = {
4048 .name = "format",
4049 .attrs = skx_uncore_formats_attr,
4050 };
4051
4052 static struct intel_uncore_type skx_uncore_irp = {
4053 .name = "irp",
4054 .num_counters = 2,
4055 .num_boxes = 6,
4056 .perf_ctr_bits = 48,
4057 .event_ctl = SKX_IRP0_MSR_PMON_CTL0,
4058 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0,
4059 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4060 .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL,
4061 .msr_offset = SKX_IRP_MSR_OFFSET,
4062 .ops = &skx_uncore_iio_ops,
4063 .format_group = &skx_uncore_format_group,
4064 };
4065
4066 static struct attribute *skx_uncore_pcu_formats_attr[] = {
4067 &format_attr_event.attr,
4068 &format_attr_umask.attr,
4069 &format_attr_edge.attr,
4070 &format_attr_inv.attr,
4071 &format_attr_thresh8.attr,
4072 &format_attr_occ_invert.attr,
4073 &format_attr_occ_edge_det.attr,
4074 &format_attr_filter_band0.attr,
4075 &format_attr_filter_band1.attr,
4076 &format_attr_filter_band2.attr,
4077 &format_attr_filter_band3.attr,
4078 NULL,
4079 };
4080
4081 static struct attribute_group skx_uncore_pcu_format_group = {
4082 .name = "format",
4083 .attrs = skx_uncore_pcu_formats_attr,
4084 };
4085
4086 static struct intel_uncore_ops skx_uncore_pcu_ops = {
4087 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4088 .hw_config = hswep_pcu_hw_config,
4089 .get_constraint = snbep_pcu_get_constraint,
4090 .put_constraint = snbep_pcu_put_constraint,
4091 };
4092
4093 static struct intel_uncore_type skx_uncore_pcu = {
4094 .name = "pcu",
4095 .num_counters = 4,
4096 .num_boxes = 1,
4097 .perf_ctr_bits = 48,
4098 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
4099 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
4100 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4101 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
4102 .num_shared_regs = 1,
4103 .ops = &skx_uncore_pcu_ops,
4104 .format_group = &skx_uncore_pcu_format_group,
4105 };
4106
4107 static struct intel_uncore_type *skx_msr_uncores[] = {
4108 &skx_uncore_ubox,
4109 &skx_uncore_chabox,
4110 &skx_uncore_iio,
4111 &skx_uncore_iio_free_running,
4112 &skx_uncore_irp,
4113 &skx_uncore_pcu,
4114 NULL,
4115 };
4116
4117 /*
4118 * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4119 * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4120 */
4121 #define SKX_CAPID6 0x9c
4122 #define SKX_CHA_BIT_MASK GENMASK(27, 0)
4123
skx_count_chabox(void)4124 static int skx_count_chabox(void)
4125 {
4126 struct pci_dev *dev = NULL;
4127 u32 val = 0;
4128
4129 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4130 if (!dev)
4131 goto out;
4132
4133 pci_read_config_dword(dev, SKX_CAPID6, &val);
4134 val &= SKX_CHA_BIT_MASK;
4135 out:
4136 pci_dev_put(dev);
4137 return hweight32(val);
4138 }
4139
skx_uncore_cpu_init(void)4140 void skx_uncore_cpu_init(void)
4141 {
4142 skx_uncore_chabox.num_boxes = skx_count_chabox();
4143 uncore_msr_uncores = skx_msr_uncores;
4144 }
4145
4146 static struct intel_uncore_type skx_uncore_imc = {
4147 .name = "imc",
4148 .num_counters = 4,
4149 .num_boxes = 6,
4150 .perf_ctr_bits = 48,
4151 .fixed_ctr_bits = 48,
4152 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4153 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4154 .event_descs = hswep_uncore_imc_events,
4155 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4156 .event_ctl = SNBEP_PCI_PMON_CTL0,
4157 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4158 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4159 .ops = &ivbep_uncore_pci_ops,
4160 .format_group = &skx_uncore_format_group,
4161 };
4162
4163 static struct attribute *skx_upi_uncore_formats_attr[] = {
4164 &format_attr_event.attr,
4165 &format_attr_umask_ext.attr,
4166 &format_attr_edge.attr,
4167 &format_attr_inv.attr,
4168 &format_attr_thresh8.attr,
4169 NULL,
4170 };
4171
4172 static const struct attribute_group skx_upi_uncore_format_group = {
4173 .name = "format",
4174 .attrs = skx_upi_uncore_formats_attr,
4175 };
4176
skx_upi_uncore_pci_init_box(struct intel_uncore_box * box)4177 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4178 {
4179 struct pci_dev *pdev = box->pci_dev;
4180
4181 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4182 pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4183 }
4184
4185 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4186 .init_box = skx_upi_uncore_pci_init_box,
4187 .disable_box = snbep_uncore_pci_disable_box,
4188 .enable_box = snbep_uncore_pci_enable_box,
4189 .disable_event = snbep_uncore_pci_disable_event,
4190 .enable_event = snbep_uncore_pci_enable_event,
4191 .read_counter = snbep_uncore_pci_read_counter,
4192 };
4193
4194 static umode_t
skx_upi_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)4195 skx_upi_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4196 {
4197 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
4198
4199 return pmu->type->topology[die][pmu->pmu_idx].upi->enabled ? attr->mode : 0;
4200 }
4201
skx_upi_mapping_show(struct device * dev,struct device_attribute * attr,char * buf)4202 static ssize_t skx_upi_mapping_show(struct device *dev,
4203 struct device_attribute *attr, char *buf)
4204 {
4205 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
4206 struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
4207 long die = (long)ea->var;
4208 struct uncore_upi_topology *upi = pmu->type->topology[die][pmu->pmu_idx].upi;
4209
4210 return sysfs_emit(buf, "upi_%d,die_%d\n", upi->pmu_idx_to, upi->die_to);
4211 }
4212
4213 #define SKX_UPI_REG_DID 0x2058
4214 #define SKX_UPI_REGS_ADDR_DEVICE_LINK0 0x0e
4215 #define SKX_UPI_REGS_ADDR_FUNCTION 0x00
4216
4217 /*
4218 * UPI Link Parameter 0
4219 * | Bit | Default | Description
4220 * | 19:16 | 0h | base_nodeid - The NodeID of the sending socket.
4221 * | 12:8 | 00h | sending_port - The processor die port number of the sending port.
4222 */
4223 #define SKX_KTILP0_OFFSET 0x94
4224
4225 /*
4226 * UPI Pcode Status. This register is used by PCode to store the link training status.
4227 * | Bit | Default | Description
4228 * | 4 | 0h | ll_status_valid — Bit indicates the valid training status
4229 * logged from PCode to the BIOS.
4230 */
4231 #define SKX_KTIPCSTS_OFFSET 0x120
4232
upi_fill_topology(struct pci_dev * dev,struct intel_uncore_topology * tp,int pmu_idx)4233 static int upi_fill_topology(struct pci_dev *dev, struct intel_uncore_topology *tp,
4234 int pmu_idx)
4235 {
4236 int ret;
4237 u32 upi_conf;
4238 struct uncore_upi_topology *upi = tp->upi;
4239
4240 tp->pmu_idx = pmu_idx;
4241 ret = pci_read_config_dword(dev, SKX_KTIPCSTS_OFFSET, &upi_conf);
4242 if (ret) {
4243 ret = pcibios_err_to_errno(ret);
4244 goto err;
4245 }
4246 upi->enabled = (upi_conf >> 4) & 1;
4247 if (upi->enabled) {
4248 ret = pci_read_config_dword(dev, SKX_KTILP0_OFFSET,
4249 &upi_conf);
4250 if (ret) {
4251 ret = pcibios_err_to_errno(ret);
4252 goto err;
4253 }
4254 upi->die_to = (upi_conf >> 16) & 0xf;
4255 upi->pmu_idx_to = (upi_conf >> 8) & 0x1f;
4256 }
4257 err:
4258 return ret;
4259 }
4260
skx_upi_topology_cb(struct intel_uncore_type * type,int segment,int die,u64 cpu_bus_msr)4261 static int skx_upi_topology_cb(struct intel_uncore_type *type, int segment,
4262 int die, u64 cpu_bus_msr)
4263 {
4264 int idx, ret;
4265 struct intel_uncore_topology *upi;
4266 unsigned int devfn;
4267 struct pci_dev *dev = NULL;
4268 u8 bus = cpu_bus_msr >> (3 * BUS_NUM_STRIDE);
4269
4270 for (idx = 0; idx < type->num_boxes; idx++) {
4271 upi = &type->topology[die][idx];
4272 devfn = PCI_DEVFN(SKX_UPI_REGS_ADDR_DEVICE_LINK0 + idx,
4273 SKX_UPI_REGS_ADDR_FUNCTION);
4274 dev = pci_get_domain_bus_and_slot(segment, bus, devfn);
4275 if (dev) {
4276 ret = upi_fill_topology(dev, upi, idx);
4277 if (ret)
4278 break;
4279 }
4280 }
4281
4282 pci_dev_put(dev);
4283 return ret;
4284 }
4285
skx_upi_get_topology(struct intel_uncore_type * type)4286 static int skx_upi_get_topology(struct intel_uncore_type *type)
4287 {
4288 /* CPX case is not supported */
4289 if (boot_cpu_data.x86_stepping == 11)
4290 return -EPERM;
4291
4292 return skx_pmu_get_topology(type, skx_upi_topology_cb);
4293 }
4294
4295 static struct attribute_group skx_upi_mapping_group = {
4296 .is_visible = skx_upi_mapping_visible,
4297 };
4298
4299 static const struct attribute_group *skx_upi_attr_update[] = {
4300 &skx_upi_mapping_group,
4301 NULL
4302 };
4303
4304 static void
pmu_upi_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag)4305 pmu_upi_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
4306 {
4307 pmu_set_mapping(type, ag, skx_upi_mapping_show, UPI_TOPOLOGY_TYPE);
4308 }
4309
skx_upi_set_mapping(struct intel_uncore_type * type)4310 static void skx_upi_set_mapping(struct intel_uncore_type *type)
4311 {
4312 pmu_upi_set_mapping(type, &skx_upi_mapping_group);
4313 }
4314
skx_upi_cleanup_mapping(struct intel_uncore_type * type)4315 static void skx_upi_cleanup_mapping(struct intel_uncore_type *type)
4316 {
4317 pmu_cleanup_mapping(type, &skx_upi_mapping_group);
4318 }
4319
4320 static struct intel_uncore_type skx_uncore_upi = {
4321 .name = "upi",
4322 .num_counters = 4,
4323 .num_boxes = 3,
4324 .perf_ctr_bits = 48,
4325 .perf_ctr = SKX_UPI_PCI_PMON_CTR0,
4326 .event_ctl = SKX_UPI_PCI_PMON_CTL0,
4327 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4328 .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4329 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
4330 .ops = &skx_upi_uncore_pci_ops,
4331 .format_group = &skx_upi_uncore_format_group,
4332 .attr_update = skx_upi_attr_update,
4333 .get_topology = skx_upi_get_topology,
4334 .set_mapping = skx_upi_set_mapping,
4335 .cleanup_mapping = skx_upi_cleanup_mapping,
4336 };
4337
skx_m2m_uncore_pci_init_box(struct intel_uncore_box * box)4338 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4339 {
4340 struct pci_dev *pdev = box->pci_dev;
4341
4342 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4343 pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4344 }
4345
4346 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4347 .init_box = skx_m2m_uncore_pci_init_box,
4348 .disable_box = snbep_uncore_pci_disable_box,
4349 .enable_box = snbep_uncore_pci_enable_box,
4350 .disable_event = snbep_uncore_pci_disable_event,
4351 .enable_event = snbep_uncore_pci_enable_event,
4352 .read_counter = snbep_uncore_pci_read_counter,
4353 };
4354
4355 static struct intel_uncore_type skx_uncore_m2m = {
4356 .name = "m2m",
4357 .num_counters = 4,
4358 .num_boxes = 2,
4359 .perf_ctr_bits = 48,
4360 .perf_ctr = SKX_M2M_PCI_PMON_CTR0,
4361 .event_ctl = SKX_M2M_PCI_PMON_CTL0,
4362 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4363 .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL,
4364 .ops = &skx_m2m_uncore_pci_ops,
4365 .format_group = &skx_uncore_format_group,
4366 };
4367
4368 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4369 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4370 EVENT_CONSTRAINT_END
4371 };
4372
4373 static struct intel_uncore_type skx_uncore_m2pcie = {
4374 .name = "m2pcie",
4375 .num_counters = 4,
4376 .num_boxes = 4,
4377 .perf_ctr_bits = 48,
4378 .constraints = skx_uncore_m2pcie_constraints,
4379 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4380 .event_ctl = SNBEP_PCI_PMON_CTL0,
4381 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4382 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4383 .ops = &ivbep_uncore_pci_ops,
4384 .format_group = &skx_uncore_format_group,
4385 };
4386
4387 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4388 UNCORE_EVENT_CONSTRAINT_RANGE(0x1d, 0x1e, 0x1),
4389 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4390 UNCORE_EVENT_CONSTRAINT_RANGE(0x4e, 0x52, 0x7),
4391 EVENT_CONSTRAINT_END
4392 };
4393
4394 static struct intel_uncore_type skx_uncore_m3upi = {
4395 .name = "m3upi",
4396 .num_counters = 3,
4397 .num_boxes = 3,
4398 .perf_ctr_bits = 48,
4399 .constraints = skx_uncore_m3upi_constraints,
4400 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4401 .event_ctl = SNBEP_PCI_PMON_CTL0,
4402 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4403 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4404 .ops = &ivbep_uncore_pci_ops,
4405 .format_group = &skx_uncore_format_group,
4406 };
4407
4408 enum {
4409 SKX_PCI_UNCORE_IMC,
4410 SKX_PCI_UNCORE_M2M,
4411 SKX_PCI_UNCORE_UPI,
4412 SKX_PCI_UNCORE_M2PCIE,
4413 SKX_PCI_UNCORE_M3UPI,
4414 };
4415
4416 static struct intel_uncore_type *skx_pci_uncores[] = {
4417 [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc,
4418 [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m,
4419 [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi,
4420 [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
4421 [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi,
4422 NULL,
4423 };
4424
4425 static const struct pci_device_id skx_uncore_pci_ids[] = {
4426 { /* MC0 Channel 0 */
4427 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4428 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4429 },
4430 { /* MC0 Channel 1 */
4431 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4432 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4433 },
4434 { /* MC0 Channel 2 */
4435 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4436 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4437 },
4438 { /* MC1 Channel 0 */
4439 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4440 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4441 },
4442 { /* MC1 Channel 1 */
4443 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4444 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4445 },
4446 { /* MC1 Channel 2 */
4447 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4448 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4449 },
4450 { /* M2M0 */
4451 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4452 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4453 },
4454 { /* M2M1 */
4455 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4456 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4457 },
4458 { /* UPI0 Link 0 */
4459 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4460 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4461 },
4462 { /* UPI0 Link 1 */
4463 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4464 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4465 },
4466 { /* UPI1 Link 2 */
4467 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4468 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4469 },
4470 { /* M2PCIe 0 */
4471 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4472 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4473 },
4474 { /* M2PCIe 1 */
4475 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4476 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4477 },
4478 { /* M2PCIe 2 */
4479 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4480 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4481 },
4482 { /* M2PCIe 3 */
4483 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4484 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4485 },
4486 { /* M3UPI0 Link 0 */
4487 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4488 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4489 },
4490 { /* M3UPI0 Link 1 */
4491 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4492 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4493 },
4494 { /* M3UPI1 Link 2 */
4495 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4496 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4497 },
4498 { /* end: all zeroes */ }
4499 };
4500
4501
4502 static struct pci_driver skx_uncore_pci_driver = {
4503 .name = "skx_uncore",
4504 .id_table = skx_uncore_pci_ids,
4505 };
4506
skx_uncore_pci_init(void)4507 int skx_uncore_pci_init(void)
4508 {
4509 /* need to double check pci address */
4510 int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4511
4512 if (ret)
4513 return ret;
4514
4515 uncore_pci_uncores = skx_pci_uncores;
4516 uncore_pci_driver = &skx_uncore_pci_driver;
4517 return 0;
4518 }
4519
4520 /* end of SKX uncore support */
4521
4522 /* SNR uncore support */
4523
4524 static struct intel_uncore_type snr_uncore_ubox = {
4525 .name = "ubox",
4526 .num_counters = 2,
4527 .num_boxes = 1,
4528 .perf_ctr_bits = 48,
4529 .fixed_ctr_bits = 48,
4530 .perf_ctr = SNR_U_MSR_PMON_CTR0,
4531 .event_ctl = SNR_U_MSR_PMON_CTL0,
4532 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4533 .fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4534 .fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4535 .ops = &ivbep_uncore_msr_ops,
4536 .format_group = &ivbep_uncore_format_group,
4537 };
4538
4539 static struct attribute *snr_uncore_cha_formats_attr[] = {
4540 &format_attr_event.attr,
4541 &format_attr_umask_ext2.attr,
4542 &format_attr_edge.attr,
4543 &format_attr_tid_en.attr,
4544 &format_attr_inv.attr,
4545 &format_attr_thresh8.attr,
4546 &format_attr_filter_tid5.attr,
4547 NULL,
4548 };
4549 static const struct attribute_group snr_uncore_chabox_format_group = {
4550 .name = "format",
4551 .attrs = snr_uncore_cha_formats_attr,
4552 };
4553
snr_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)4554 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4555 {
4556 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4557
4558 reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4559 box->pmu->type->msr_offset * box->pmu->pmu_idx;
4560 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4561 reg1->idx = 0;
4562
4563 return 0;
4564 }
4565
snr_cha_enable_event(struct intel_uncore_box * box,struct perf_event * event)4566 static void snr_cha_enable_event(struct intel_uncore_box *box,
4567 struct perf_event *event)
4568 {
4569 struct hw_perf_event *hwc = &event->hw;
4570 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4571
4572 if (reg1->idx != EXTRA_REG_NONE)
4573 wrmsrq(reg1->reg, reg1->config);
4574
4575 wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4576 }
4577
4578 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4579 .init_box = ivbep_uncore_msr_init_box,
4580 .disable_box = snbep_uncore_msr_disable_box,
4581 .enable_box = snbep_uncore_msr_enable_box,
4582 .disable_event = snbep_uncore_msr_disable_event,
4583 .enable_event = snr_cha_enable_event,
4584 .read_counter = uncore_msr_read_counter,
4585 .hw_config = snr_cha_hw_config,
4586 };
4587
4588 static struct intel_uncore_type snr_uncore_chabox = {
4589 .name = "cha",
4590 .num_counters = 4,
4591 .num_boxes = 6,
4592 .perf_ctr_bits = 48,
4593 .event_ctl = SNR_CHA_MSR_PMON_CTL0,
4594 .perf_ctr = SNR_CHA_MSR_PMON_CTR0,
4595 .box_ctl = SNR_CHA_MSR_PMON_BOX_CTL,
4596 .msr_offset = HSWEP_CBO_MSR_OFFSET,
4597 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4598 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4599 .ops = &snr_uncore_chabox_ops,
4600 .format_group = &snr_uncore_chabox_format_group,
4601 };
4602
4603 static struct attribute *snr_uncore_iio_formats_attr[] = {
4604 &format_attr_event.attr,
4605 &format_attr_umask.attr,
4606 &format_attr_edge.attr,
4607 &format_attr_inv.attr,
4608 &format_attr_thresh9.attr,
4609 &format_attr_ch_mask2.attr,
4610 &format_attr_fc_mask2.attr,
4611 NULL,
4612 };
4613
4614 static const struct attribute_group snr_uncore_iio_format_group = {
4615 .name = "format",
4616 .attrs = snr_uncore_iio_formats_attr,
4617 };
4618
4619 static umode_t
snr_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)4620 snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4621 {
4622 /* Root bus 0x00 is valid only for pmu_idx = 1. */
4623 return pmu_iio_mapping_visible(kobj, attr, die, 1);
4624 }
4625
4626 static struct attribute_group snr_iio_mapping_group = {
4627 .is_visible = snr_iio_mapping_visible,
4628 };
4629
4630 static const struct attribute_group *snr_iio_attr_update[] = {
4631 &snr_iio_mapping_group,
4632 NULL,
4633 };
4634
sad_cfg_iio_topology(struct intel_uncore_type * type,u8 * sad_pmon_mapping)4635 static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping)
4636 {
4637 u32 sad_cfg;
4638 int die, stack_id, ret = -EPERM;
4639 struct pci_dev *dev = NULL;
4640
4641 while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
4642 ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
4643 if (ret) {
4644 ret = pcibios_err_to_errno(ret);
4645 break;
4646 }
4647
4648 die = uncore_pcibus_to_dieid(dev->bus);
4649 stack_id = SAD_CONTROL_STACK_ID(sad_cfg);
4650 if (die < 0 || stack_id >= type->num_boxes) {
4651 ret = -EPERM;
4652 break;
4653 }
4654
4655 /* Convert stack id from SAD_CONTROL to PMON notation. */
4656 stack_id = sad_pmon_mapping[stack_id];
4657
4658 type->topology[die][stack_id].iio->segment = pci_domain_nr(dev->bus);
4659 type->topology[die][stack_id].pmu_idx = stack_id;
4660 type->topology[die][stack_id].iio->pci_bus_no = dev->bus->number;
4661 }
4662
4663 pci_dev_put(dev);
4664
4665 return ret;
4666 }
4667
4668 /*
4669 * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
4670 */
4671 enum {
4672 SNR_QAT_PMON_ID,
4673 SNR_CBDMA_DMI_PMON_ID,
4674 SNR_NIS_PMON_ID,
4675 SNR_DLB_PMON_ID,
4676 SNR_PCIE_GEN3_PMON_ID
4677 };
4678
4679 static u8 snr_sad_pmon_mapping[] = {
4680 SNR_CBDMA_DMI_PMON_ID,
4681 SNR_PCIE_GEN3_PMON_ID,
4682 SNR_DLB_PMON_ID,
4683 SNR_NIS_PMON_ID,
4684 SNR_QAT_PMON_ID
4685 };
4686
snr_iio_get_topology(struct intel_uncore_type * type)4687 static int snr_iio_get_topology(struct intel_uncore_type *type)
4688 {
4689 return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
4690 }
4691
snr_iio_set_mapping(struct intel_uncore_type * type)4692 static void snr_iio_set_mapping(struct intel_uncore_type *type)
4693 {
4694 pmu_iio_set_mapping(type, &snr_iio_mapping_group);
4695 }
4696
snr_iio_cleanup_mapping(struct intel_uncore_type * type)4697 static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
4698 {
4699 pmu_cleanup_mapping(type, &snr_iio_mapping_group);
4700 }
4701
4702 static struct event_constraint snr_uncore_iio_constraints[] = {
4703 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4704 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4705 UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
4706 EVENT_CONSTRAINT_END
4707 };
4708
4709 static struct intel_uncore_type snr_uncore_iio = {
4710 .name = "iio",
4711 .num_counters = 4,
4712 .num_boxes = 5,
4713 .perf_ctr_bits = 48,
4714 .event_ctl = SNR_IIO_MSR_PMON_CTL0,
4715 .perf_ctr = SNR_IIO_MSR_PMON_CTR0,
4716 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4717 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4718 .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL,
4719 .msr_offset = SNR_IIO_MSR_OFFSET,
4720 .constraints = snr_uncore_iio_constraints,
4721 .ops = &ivbep_uncore_msr_ops,
4722 .format_group = &snr_uncore_iio_format_group,
4723 .attr_update = snr_iio_attr_update,
4724 .get_topology = snr_iio_get_topology,
4725 .set_mapping = snr_iio_set_mapping,
4726 .cleanup_mapping = snr_iio_cleanup_mapping,
4727 };
4728
4729 static struct intel_uncore_type snr_uncore_irp = {
4730 .name = "irp",
4731 .num_counters = 2,
4732 .num_boxes = 5,
4733 .perf_ctr_bits = 48,
4734 .event_ctl = SNR_IRP0_MSR_PMON_CTL0,
4735 .perf_ctr = SNR_IRP0_MSR_PMON_CTR0,
4736 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4737 .box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL,
4738 .msr_offset = SNR_IRP_MSR_OFFSET,
4739 .ops = &ivbep_uncore_msr_ops,
4740 .format_group = &ivbep_uncore_format_group,
4741 };
4742
4743 static struct intel_uncore_type snr_uncore_m2pcie = {
4744 .name = "m2pcie",
4745 .num_counters = 4,
4746 .num_boxes = 5,
4747 .perf_ctr_bits = 48,
4748 .event_ctl = SNR_M2PCIE_MSR_PMON_CTL0,
4749 .perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0,
4750 .box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4751 .msr_offset = SNR_M2PCIE_MSR_OFFSET,
4752 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4753 .ops = &ivbep_uncore_msr_ops,
4754 .format_group = &ivbep_uncore_format_group,
4755 };
4756
snr_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)4757 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4758 {
4759 struct hw_perf_event *hwc = &event->hw;
4760 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4761 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4762
4763 if (ev_sel >= 0xb && ev_sel <= 0xe) {
4764 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4765 reg1->idx = ev_sel - 0xb;
4766 reg1->config = event->attr.config1 & (0xff << reg1->idx);
4767 }
4768 return 0;
4769 }
4770
4771 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4772 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4773 .hw_config = snr_pcu_hw_config,
4774 .get_constraint = snbep_pcu_get_constraint,
4775 .put_constraint = snbep_pcu_put_constraint,
4776 };
4777
4778 static struct intel_uncore_type snr_uncore_pcu = {
4779 .name = "pcu",
4780 .num_counters = 4,
4781 .num_boxes = 1,
4782 .perf_ctr_bits = 48,
4783 .perf_ctr = SNR_PCU_MSR_PMON_CTR0,
4784 .event_ctl = SNR_PCU_MSR_PMON_CTL0,
4785 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4786 .box_ctl = SNR_PCU_MSR_PMON_BOX_CTL,
4787 .num_shared_regs = 1,
4788 .ops = &snr_uncore_pcu_ops,
4789 .format_group = &skx_uncore_pcu_format_group,
4790 };
4791
4792 enum perf_uncore_snr_iio_freerunning_type_id {
4793 SNR_IIO_MSR_IOCLK,
4794 SNR_IIO_MSR_BW_IN,
4795
4796 SNR_IIO_FREERUNNING_TYPE_MAX,
4797 };
4798
4799 static struct freerunning_counters snr_iio_freerunning[] = {
4800 [SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 },
4801 [SNR_IIO_MSR_BW_IN] = { 0x1f00, 0x1, 0x10, 8, 48 },
4802 };
4803
4804 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4805 /* Free-Running IIO CLOCKS Counter */
4806 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
4807 /* Free-Running IIO BANDWIDTH IN Counters */
4808 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port0, 0x20, 3.0517578125e-5),
4809 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port1, 0x21, 3.0517578125e-5),
4810 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port2, 0x22, 3.0517578125e-5),
4811 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port3, 0x23, 3.0517578125e-5),
4812 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port4, 0x24, 3.0517578125e-5),
4813 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port5, 0x25, 3.0517578125e-5),
4814 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port6, 0x26, 3.0517578125e-5),
4815 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port7, 0x27, 3.0517578125e-5),
4816 { /* end: all zeroes */ },
4817 };
4818
4819 static struct intel_uncore_type snr_uncore_iio_free_running = {
4820 .name = "iio_free_running",
4821 .num_counters = 9,
4822 .num_boxes = 5,
4823 .num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX,
4824 .freerunning = snr_iio_freerunning,
4825 .ops = &skx_uncore_iio_freerunning_ops,
4826 .event_descs = snr_uncore_iio_freerunning_events,
4827 .format_group = &skx_uncore_iio_freerunning_format_group,
4828 };
4829
4830 static struct intel_uncore_type *snr_msr_uncores[] = {
4831 &snr_uncore_ubox,
4832 &snr_uncore_chabox,
4833 &snr_uncore_iio,
4834 &snr_uncore_irp,
4835 &snr_uncore_m2pcie,
4836 &snr_uncore_pcu,
4837 &snr_uncore_iio_free_running,
4838 NULL,
4839 };
4840
snr_uncore_cpu_init(void)4841 void snr_uncore_cpu_init(void)
4842 {
4843 uncore_msr_uncores = snr_msr_uncores;
4844 }
4845
snr_m2m_uncore_pci_init_box(struct intel_uncore_box * box)4846 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4847 {
4848 struct pci_dev *pdev = box->pci_dev;
4849 int box_ctl = uncore_pci_box_ctl(box);
4850
4851 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4852 pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4853 }
4854
4855 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4856 .init_box = snr_m2m_uncore_pci_init_box,
4857 .disable_box = snbep_uncore_pci_disable_box,
4858 .enable_box = snbep_uncore_pci_enable_box,
4859 .disable_event = snbep_uncore_pci_disable_event,
4860 .enable_event = snbep_uncore_pci_enable_event,
4861 .read_counter = snbep_uncore_pci_read_counter,
4862 };
4863
4864 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4865 &format_attr_event.attr,
4866 &format_attr_umask_ext3.attr,
4867 &format_attr_edge.attr,
4868 &format_attr_inv.attr,
4869 &format_attr_thresh8.attr,
4870 NULL,
4871 };
4872
4873 static const struct attribute_group snr_m2m_uncore_format_group = {
4874 .name = "format",
4875 .attrs = snr_m2m_uncore_formats_attr,
4876 };
4877
4878 static struct intel_uncore_type snr_uncore_m2m = {
4879 .name = "m2m",
4880 .num_counters = 4,
4881 .num_boxes = 1,
4882 .perf_ctr_bits = 48,
4883 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
4884 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
4885 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4886 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4887 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
4888 .ops = &snr_m2m_uncore_pci_ops,
4889 .format_group = &snr_m2m_uncore_format_group,
4890 };
4891
snr_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)4892 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4893 {
4894 struct pci_dev *pdev = box->pci_dev;
4895 struct hw_perf_event *hwc = &event->hw;
4896
4897 pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4898 pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4899 }
4900
4901 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4902 .init_box = snr_m2m_uncore_pci_init_box,
4903 .disable_box = snbep_uncore_pci_disable_box,
4904 .enable_box = snbep_uncore_pci_enable_box,
4905 .disable_event = snbep_uncore_pci_disable_event,
4906 .enable_event = snr_uncore_pci_enable_event,
4907 .read_counter = snbep_uncore_pci_read_counter,
4908 };
4909
4910 static struct intel_uncore_type snr_uncore_pcie3 = {
4911 .name = "pcie3",
4912 .num_counters = 4,
4913 .num_boxes = 1,
4914 .perf_ctr_bits = 48,
4915 .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0,
4916 .event_ctl = SNR_PCIE3_PCI_PMON_CTL0,
4917 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
4918 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4919 .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL,
4920 .ops = &snr_pcie3_uncore_pci_ops,
4921 .format_group = &skx_uncore_iio_format_group,
4922 };
4923
4924 enum {
4925 SNR_PCI_UNCORE_M2M,
4926 SNR_PCI_UNCORE_PCIE3,
4927 };
4928
4929 static struct intel_uncore_type *snr_pci_uncores[] = {
4930 [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
4931 [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3,
4932 NULL,
4933 };
4934
4935 static const struct pci_device_id snr_uncore_pci_ids[] = {
4936 { /* M2M */
4937 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4938 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4939 },
4940 { /* end: all zeroes */ }
4941 };
4942
4943 static struct pci_driver snr_uncore_pci_driver = {
4944 .name = "snr_uncore",
4945 .id_table = snr_uncore_pci_ids,
4946 };
4947
4948 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
4949 { /* PCIe3 RP */
4950 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4951 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4952 },
4953 { /* end: all zeroes */ }
4954 };
4955
4956 static struct pci_driver snr_uncore_pci_sub_driver = {
4957 .name = "snr_uncore_sub",
4958 .id_table = snr_uncore_pci_sub_ids,
4959 };
4960
snr_uncore_pci_init(void)4961 int snr_uncore_pci_init(void)
4962 {
4963 /* SNR UBOX DID */
4964 int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4965 SKX_GIDNIDMAP, true);
4966
4967 if (ret)
4968 return ret;
4969
4970 uncore_pci_uncores = snr_pci_uncores;
4971 uncore_pci_driver = &snr_uncore_pci_driver;
4972 uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
4973 return 0;
4974 }
4975
4976 #define SNR_MC_DEVICE_ID 0x3451
4977
snr_uncore_get_mc_dev(unsigned int device,int id)4978 static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id)
4979 {
4980 struct pci_dev *mc_dev = NULL;
4981 int pkg;
4982
4983 while (1) {
4984 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, mc_dev);
4985 if (!mc_dev)
4986 break;
4987 pkg = uncore_pcibus_to_dieid(mc_dev->bus);
4988 if (pkg == id)
4989 break;
4990 }
4991 return mc_dev;
4992 }
4993
snr_uncore_mmio_map(struct intel_uncore_box * box,unsigned int box_ctl,int mem_offset,unsigned int device)4994 static int snr_uncore_mmio_map(struct intel_uncore_box *box,
4995 unsigned int box_ctl, int mem_offset,
4996 unsigned int device)
4997 {
4998 struct pci_dev *pdev = snr_uncore_get_mc_dev(device, box->dieid);
4999 struct intel_uncore_type *type = box->pmu->type;
5000 resource_size_t addr;
5001 u32 pci_dword;
5002
5003 if (!pdev)
5004 return -ENODEV;
5005
5006 pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
5007 addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
5008
5009 pci_read_config_dword(pdev, mem_offset, &pci_dword);
5010 addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
5011
5012 addr += box_ctl;
5013
5014 pci_dev_put(pdev);
5015
5016 box->io_addr = ioremap(addr, type->mmio_map_size);
5017 if (!box->io_addr) {
5018 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
5019 return -EINVAL;
5020 }
5021
5022 return 0;
5023 }
5024
__snr_uncore_mmio_init_box(struct intel_uncore_box * box,unsigned int box_ctl,int mem_offset,unsigned int device)5025 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
5026 unsigned int box_ctl, int mem_offset,
5027 unsigned int device)
5028 {
5029 if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device))
5030 writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
5031 }
5032
snr_uncore_mmio_init_box(struct intel_uncore_box * box)5033 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
5034 {
5035 __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
5036 SNR_IMC_MMIO_MEM0_OFFSET,
5037 SNR_MC_DEVICE_ID);
5038 }
5039
snr_uncore_mmio_disable_box(struct intel_uncore_box * box)5040 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
5041 {
5042 u32 config;
5043
5044 if (!box->io_addr)
5045 return;
5046
5047 config = readl(box->io_addr);
5048 config |= SNBEP_PMON_BOX_CTL_FRZ;
5049 writel(config, box->io_addr);
5050 }
5051
snr_uncore_mmio_enable_box(struct intel_uncore_box * box)5052 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
5053 {
5054 u32 config;
5055
5056 if (!box->io_addr)
5057 return;
5058
5059 config = readl(box->io_addr);
5060 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
5061 writel(config, box->io_addr);
5062 }
5063
snr_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)5064 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
5065 struct perf_event *event)
5066 {
5067 struct hw_perf_event *hwc = &event->hw;
5068
5069 if (!box->io_addr)
5070 return;
5071
5072 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
5073 return;
5074
5075 writel(hwc->config | SNBEP_PMON_CTL_EN,
5076 box->io_addr + hwc->config_base);
5077 }
5078
snr_uncore_mmio_disable_event(struct intel_uncore_box * box,struct perf_event * event)5079 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
5080 struct perf_event *event)
5081 {
5082 struct hw_perf_event *hwc = &event->hw;
5083
5084 if (!box->io_addr)
5085 return;
5086
5087 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
5088 return;
5089
5090 writel(hwc->config, box->io_addr + hwc->config_base);
5091 }
5092
5093 static struct intel_uncore_ops snr_uncore_mmio_ops = {
5094 .init_box = snr_uncore_mmio_init_box,
5095 .exit_box = uncore_mmio_exit_box,
5096 .disable_box = snr_uncore_mmio_disable_box,
5097 .enable_box = snr_uncore_mmio_enable_box,
5098 .disable_event = snr_uncore_mmio_disable_event,
5099 .enable_event = snr_uncore_mmio_enable_event,
5100 .read_counter = uncore_mmio_read_counter,
5101 };
5102
5103 static struct uncore_event_desc snr_uncore_imc_events[] = {
5104 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
5105 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f"),
5106 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
5107 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
5108 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
5109 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
5110 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
5111 { /* end: all zeroes */ },
5112 };
5113
5114 static struct intel_uncore_type snr_uncore_imc = {
5115 .name = "imc",
5116 .num_counters = 4,
5117 .num_boxes = 2,
5118 .perf_ctr_bits = 48,
5119 .fixed_ctr_bits = 48,
5120 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5121 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5122 .event_descs = snr_uncore_imc_events,
5123 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
5124 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
5125 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5126 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
5127 .mmio_offset = SNR_IMC_MMIO_OFFSET,
5128 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5129 .ops = &snr_uncore_mmio_ops,
5130 .format_group = &skx_uncore_format_group,
5131 };
5132
5133 enum perf_uncore_snr_imc_freerunning_type_id {
5134 SNR_IMC_DCLK,
5135 SNR_IMC_DDR,
5136
5137 SNR_IMC_FREERUNNING_TYPE_MAX,
5138 };
5139
5140 static struct freerunning_counters snr_imc_freerunning[] = {
5141 [SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
5142 [SNR_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
5143 };
5144
5145 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
5146 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
5147
5148 INTEL_UNCORE_FR_EVENT_DESC(read, 0x20, 6.103515625e-5),
5149 INTEL_UNCORE_FR_EVENT_DESC(write, 0x21, 6.103515625e-5),
5150 { /* end: all zeroes */ },
5151 };
5152
5153 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
5154 .init_box = snr_uncore_mmio_init_box,
5155 .exit_box = uncore_mmio_exit_box,
5156 .read_counter = uncore_mmio_read_counter,
5157 .hw_config = uncore_freerunning_hw_config,
5158 };
5159
5160 static struct intel_uncore_type snr_uncore_imc_free_running = {
5161 .name = "imc_free_running",
5162 .num_counters = 3,
5163 .num_boxes = 1,
5164 .num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX,
5165 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5166 .freerunning = snr_imc_freerunning,
5167 .ops = &snr_uncore_imc_freerunning_ops,
5168 .event_descs = snr_uncore_imc_freerunning_events,
5169 .format_group = &skx_uncore_iio_freerunning_format_group,
5170 };
5171
5172 static struct intel_uncore_type *snr_mmio_uncores[] = {
5173 &snr_uncore_imc,
5174 &snr_uncore_imc_free_running,
5175 NULL,
5176 };
5177
snr_uncore_mmio_init(void)5178 void snr_uncore_mmio_init(void)
5179 {
5180 uncore_mmio_uncores = snr_mmio_uncores;
5181 }
5182
5183 /* end of SNR uncore support */
5184
5185 /* ICX uncore support */
5186
5187 static u64 icx_cha_msr_offsets[] = {
5188 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
5189 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
5190 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
5191 0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0, 0xe,
5192 0x1c, 0x2a, 0x38, 0x46,
5193 };
5194
icx_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)5195 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5196 {
5197 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5198 bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
5199
5200 if (tie_en) {
5201 reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
5202 icx_cha_msr_offsets[box->pmu->pmu_idx];
5203 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
5204 reg1->idx = 0;
5205 }
5206
5207 return 0;
5208 }
5209
5210 static struct intel_uncore_ops icx_uncore_chabox_ops = {
5211 .init_box = ivbep_uncore_msr_init_box,
5212 .disable_box = snbep_uncore_msr_disable_box,
5213 .enable_box = snbep_uncore_msr_enable_box,
5214 .disable_event = snbep_uncore_msr_disable_event,
5215 .enable_event = snr_cha_enable_event,
5216 .read_counter = uncore_msr_read_counter,
5217 .hw_config = icx_cha_hw_config,
5218 };
5219
5220 static struct intel_uncore_type icx_uncore_chabox = {
5221 .name = "cha",
5222 .num_counters = 4,
5223 .perf_ctr_bits = 48,
5224 .event_ctl = ICX_C34_MSR_PMON_CTL0,
5225 .perf_ctr = ICX_C34_MSR_PMON_CTR0,
5226 .box_ctl = ICX_C34_MSR_PMON_BOX_CTL,
5227 .msr_offsets = icx_cha_msr_offsets,
5228 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
5229 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
5230 .constraints = skx_uncore_chabox_constraints,
5231 .ops = &icx_uncore_chabox_ops,
5232 .format_group = &snr_uncore_chabox_format_group,
5233 };
5234
5235 static u64 icx_msr_offsets[] = {
5236 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5237 };
5238
5239 static struct event_constraint icx_uncore_iio_constraints[] = {
5240 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
5241 UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
5242 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
5243 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
5244 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
5245 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
5246 UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
5247 EVENT_CONSTRAINT_END
5248 };
5249
5250 static umode_t
icx_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)5251 icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
5252 {
5253 /* Root bus 0x00 is valid only for pmu_idx = 5. */
5254 return pmu_iio_mapping_visible(kobj, attr, die, 5);
5255 }
5256
5257 static struct attribute_group icx_iio_mapping_group = {
5258 .is_visible = icx_iio_mapping_visible,
5259 };
5260
5261 static const struct attribute_group *icx_iio_attr_update[] = {
5262 &icx_iio_mapping_group,
5263 NULL,
5264 };
5265
5266 /*
5267 * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
5268 */
5269 enum {
5270 ICX_PCIE1_PMON_ID,
5271 ICX_PCIE2_PMON_ID,
5272 ICX_PCIE3_PMON_ID,
5273 ICX_PCIE4_PMON_ID,
5274 ICX_PCIE5_PMON_ID,
5275 ICX_CBDMA_DMI_PMON_ID
5276 };
5277
5278 static u8 icx_sad_pmon_mapping[] = {
5279 ICX_CBDMA_DMI_PMON_ID,
5280 ICX_PCIE1_PMON_ID,
5281 ICX_PCIE2_PMON_ID,
5282 ICX_PCIE3_PMON_ID,
5283 ICX_PCIE4_PMON_ID,
5284 ICX_PCIE5_PMON_ID,
5285 };
5286
icx_iio_get_topology(struct intel_uncore_type * type)5287 static int icx_iio_get_topology(struct intel_uncore_type *type)
5288 {
5289 return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
5290 }
5291
icx_iio_set_mapping(struct intel_uncore_type * type)5292 static void icx_iio_set_mapping(struct intel_uncore_type *type)
5293 {
5294 /* Detect ICX-D system. This case is not supported */
5295 if (boot_cpu_data.x86_vfm == INTEL_ICELAKE_D) {
5296 pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group);
5297 return;
5298 }
5299 pmu_iio_set_mapping(type, &icx_iio_mapping_group);
5300 }
5301
icx_iio_cleanup_mapping(struct intel_uncore_type * type)5302 static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
5303 {
5304 pmu_cleanup_mapping(type, &icx_iio_mapping_group);
5305 }
5306
5307 static struct intel_uncore_type icx_uncore_iio = {
5308 .name = "iio",
5309 .num_counters = 4,
5310 .num_boxes = 6,
5311 .perf_ctr_bits = 48,
5312 .event_ctl = ICX_IIO_MSR_PMON_CTL0,
5313 .perf_ctr = ICX_IIO_MSR_PMON_CTR0,
5314 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5315 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5316 .box_ctl = ICX_IIO_MSR_PMON_BOX_CTL,
5317 .msr_offsets = icx_msr_offsets,
5318 .constraints = icx_uncore_iio_constraints,
5319 .ops = &skx_uncore_iio_ops,
5320 .format_group = &snr_uncore_iio_format_group,
5321 .attr_update = icx_iio_attr_update,
5322 .get_topology = icx_iio_get_topology,
5323 .set_mapping = icx_iio_set_mapping,
5324 .cleanup_mapping = icx_iio_cleanup_mapping,
5325 };
5326
5327 static struct intel_uncore_type icx_uncore_irp = {
5328 .name = "irp",
5329 .num_counters = 2,
5330 .num_boxes = 6,
5331 .perf_ctr_bits = 48,
5332 .event_ctl = ICX_IRP0_MSR_PMON_CTL0,
5333 .perf_ctr = ICX_IRP0_MSR_PMON_CTR0,
5334 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5335 .box_ctl = ICX_IRP0_MSR_PMON_BOX_CTL,
5336 .msr_offsets = icx_msr_offsets,
5337 .ops = &ivbep_uncore_msr_ops,
5338 .format_group = &ivbep_uncore_format_group,
5339 };
5340
5341 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
5342 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5343 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
5344 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5345 EVENT_CONSTRAINT_END
5346 };
5347
5348 static struct intel_uncore_type icx_uncore_m2pcie = {
5349 .name = "m2pcie",
5350 .num_counters = 4,
5351 .num_boxes = 6,
5352 .perf_ctr_bits = 48,
5353 .event_ctl = ICX_M2PCIE_MSR_PMON_CTL0,
5354 .perf_ctr = ICX_M2PCIE_MSR_PMON_CTR0,
5355 .box_ctl = ICX_M2PCIE_MSR_PMON_BOX_CTL,
5356 .msr_offsets = icx_msr_offsets,
5357 .constraints = icx_uncore_m2pcie_constraints,
5358 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5359 .ops = &ivbep_uncore_msr_ops,
5360 .format_group = &ivbep_uncore_format_group,
5361 };
5362
5363 enum perf_uncore_icx_iio_freerunning_type_id {
5364 ICX_IIO_MSR_IOCLK,
5365 ICX_IIO_MSR_BW_IN,
5366
5367 ICX_IIO_FREERUNNING_TYPE_MAX,
5368 };
5369
5370 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5371 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5372 };
5373
5374 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5375 0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5376 };
5377
5378 static struct freerunning_counters icx_iio_freerunning[] = {
5379 [ICX_IIO_MSR_IOCLK] = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5380 [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5381 };
5382
5383 static struct intel_uncore_type icx_uncore_iio_free_running = {
5384 .name = "iio_free_running",
5385 .num_counters = 9,
5386 .num_boxes = 6,
5387 .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
5388 .freerunning = icx_iio_freerunning,
5389 .ops = &skx_uncore_iio_freerunning_ops,
5390 .event_descs = snr_uncore_iio_freerunning_events,
5391 .format_group = &skx_uncore_iio_freerunning_format_group,
5392 };
5393
5394 static struct intel_uncore_type *icx_msr_uncores[] = {
5395 &skx_uncore_ubox,
5396 &icx_uncore_chabox,
5397 &icx_uncore_iio,
5398 &icx_uncore_irp,
5399 &icx_uncore_m2pcie,
5400 &skx_uncore_pcu,
5401 &icx_uncore_iio_free_running,
5402 NULL,
5403 };
5404
5405 /*
5406 * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5407 * registers which located at Device 30, Function 3
5408 */
5409 #define ICX_CAPID6 0x9c
5410 #define ICX_CAPID7 0xa0
5411
icx_count_chabox(void)5412 static u64 icx_count_chabox(void)
5413 {
5414 struct pci_dev *dev = NULL;
5415 u64 caps = 0;
5416
5417 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5418 if (!dev)
5419 goto out;
5420
5421 pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5422 pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5423 out:
5424 pci_dev_put(dev);
5425 return hweight64(caps);
5426 }
5427
icx_uncore_cpu_init(void)5428 void icx_uncore_cpu_init(void)
5429 {
5430 u64 num_boxes = icx_count_chabox();
5431
5432 if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5433 return;
5434 icx_uncore_chabox.num_boxes = num_boxes;
5435 uncore_msr_uncores = icx_msr_uncores;
5436 }
5437
5438 static struct intel_uncore_type icx_uncore_m2m = {
5439 .name = "m2m",
5440 .num_counters = 4,
5441 .num_boxes = 4,
5442 .perf_ctr_bits = 48,
5443 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
5444 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
5445 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5446 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
5447 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
5448 .ops = &snr_m2m_uncore_pci_ops,
5449 .format_group = &snr_m2m_uncore_format_group,
5450 };
5451
5452 static struct attribute *icx_upi_uncore_formats_attr[] = {
5453 &format_attr_event.attr,
5454 &format_attr_umask_ext4.attr,
5455 &format_attr_edge.attr,
5456 &format_attr_inv.attr,
5457 &format_attr_thresh8.attr,
5458 NULL,
5459 };
5460
5461 static const struct attribute_group icx_upi_uncore_format_group = {
5462 .name = "format",
5463 .attrs = icx_upi_uncore_formats_attr,
5464 };
5465
5466 #define ICX_UPI_REGS_ADDR_DEVICE_LINK0 0x02
5467 #define ICX_UPI_REGS_ADDR_FUNCTION 0x01
5468
discover_upi_topology(struct intel_uncore_type * type,int ubox_did,int dev_link0)5469 static int discover_upi_topology(struct intel_uncore_type *type, int ubox_did, int dev_link0)
5470 {
5471 struct pci_dev *ubox = NULL;
5472 struct pci_dev *dev = NULL;
5473 u32 nid, gid;
5474 int idx, lgc_pkg, ret = -EPERM;
5475 struct intel_uncore_topology *upi;
5476 unsigned int devfn;
5477
5478 /* GIDNIDMAP method supports machines which have less than 8 sockets. */
5479 if (uncore_max_dies() > 8)
5480 goto err;
5481
5482 while ((ubox = pci_get_device(PCI_VENDOR_ID_INTEL, ubox_did, ubox))) {
5483 ret = upi_nodeid_groupid(ubox, SKX_CPUNODEID, SKX_GIDNIDMAP, &nid, &gid);
5484 if (ret) {
5485 ret = pcibios_err_to_errno(ret);
5486 break;
5487 }
5488
5489 lgc_pkg = topology_gidnid_map(nid, gid);
5490 if (lgc_pkg < 0) {
5491 ret = -EPERM;
5492 goto err;
5493 }
5494 for (idx = 0; idx < type->num_boxes; idx++) {
5495 upi = &type->topology[lgc_pkg][idx];
5496 devfn = PCI_DEVFN(dev_link0 + idx, ICX_UPI_REGS_ADDR_FUNCTION);
5497 dev = pci_get_domain_bus_and_slot(pci_domain_nr(ubox->bus),
5498 ubox->bus->number,
5499 devfn);
5500 if (dev) {
5501 ret = upi_fill_topology(dev, upi, idx);
5502 if (ret)
5503 goto err;
5504 }
5505 }
5506 }
5507 err:
5508 pci_dev_put(ubox);
5509 pci_dev_put(dev);
5510 return ret;
5511 }
5512
icx_upi_get_topology(struct intel_uncore_type * type)5513 static int icx_upi_get_topology(struct intel_uncore_type *type)
5514 {
5515 return discover_upi_topology(type, ICX_UBOX_DID, ICX_UPI_REGS_ADDR_DEVICE_LINK0);
5516 }
5517
5518 static struct attribute_group icx_upi_mapping_group = {
5519 .is_visible = skx_upi_mapping_visible,
5520 };
5521
5522 static const struct attribute_group *icx_upi_attr_update[] = {
5523 &icx_upi_mapping_group,
5524 NULL
5525 };
5526
icx_upi_set_mapping(struct intel_uncore_type * type)5527 static void icx_upi_set_mapping(struct intel_uncore_type *type)
5528 {
5529 pmu_upi_set_mapping(type, &icx_upi_mapping_group);
5530 }
5531
icx_upi_cleanup_mapping(struct intel_uncore_type * type)5532 static void icx_upi_cleanup_mapping(struct intel_uncore_type *type)
5533 {
5534 pmu_cleanup_mapping(type, &icx_upi_mapping_group);
5535 }
5536
5537 static struct intel_uncore_type icx_uncore_upi = {
5538 .name = "upi",
5539 .num_counters = 4,
5540 .num_boxes = 3,
5541 .perf_ctr_bits = 48,
5542 .perf_ctr = ICX_UPI_PCI_PMON_CTR0,
5543 .event_ctl = ICX_UPI_PCI_PMON_CTL0,
5544 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5545 .event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5546 .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL,
5547 .ops = &skx_upi_uncore_pci_ops,
5548 .format_group = &icx_upi_uncore_format_group,
5549 .attr_update = icx_upi_attr_update,
5550 .get_topology = icx_upi_get_topology,
5551 .set_mapping = icx_upi_set_mapping,
5552 .cleanup_mapping = icx_upi_cleanup_mapping,
5553 };
5554
5555 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5556 UNCORE_EVENT_CONSTRAINT_RANGE(0x1c, 0x1f, 0x1),
5557 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5558 UNCORE_EVENT_CONSTRAINT_RANGE(0x4e, 0x50, 0x7),
5559 EVENT_CONSTRAINT_END
5560 };
5561
5562 static struct intel_uncore_type icx_uncore_m3upi = {
5563 .name = "m3upi",
5564 .num_counters = 4,
5565 .num_boxes = 3,
5566 .perf_ctr_bits = 48,
5567 .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0,
5568 .event_ctl = ICX_M3UPI_PCI_PMON_CTL0,
5569 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5570 .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL,
5571 .constraints = icx_uncore_m3upi_constraints,
5572 .ops = &ivbep_uncore_pci_ops,
5573 .format_group = &skx_uncore_format_group,
5574 };
5575
5576 enum {
5577 ICX_PCI_UNCORE_M2M,
5578 ICX_PCI_UNCORE_UPI,
5579 ICX_PCI_UNCORE_M3UPI,
5580 };
5581
5582 static struct intel_uncore_type *icx_pci_uncores[] = {
5583 [ICX_PCI_UNCORE_M2M] = &icx_uncore_m2m,
5584 [ICX_PCI_UNCORE_UPI] = &icx_uncore_upi,
5585 [ICX_PCI_UNCORE_M3UPI] = &icx_uncore_m3upi,
5586 NULL,
5587 };
5588
5589 static const struct pci_device_id icx_uncore_pci_ids[] = {
5590 { /* M2M 0 */
5591 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5592 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5593 },
5594 { /* M2M 1 */
5595 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5596 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5597 },
5598 { /* M2M 2 */
5599 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5600 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5601 },
5602 { /* M2M 3 */
5603 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5604 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5605 },
5606 { /* UPI Link 0 */
5607 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5608 .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5609 },
5610 { /* UPI Link 1 */
5611 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5612 .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5613 },
5614 { /* UPI Link 2 */
5615 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5616 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5617 },
5618 { /* M3UPI Link 0 */
5619 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5620 .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5621 },
5622 { /* M3UPI Link 1 */
5623 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5624 .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5625 },
5626 { /* M3UPI Link 2 */
5627 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5628 .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5629 },
5630 { /* end: all zeroes */ }
5631 };
5632
5633 static struct pci_driver icx_uncore_pci_driver = {
5634 .name = "icx_uncore",
5635 .id_table = icx_uncore_pci_ids,
5636 };
5637
icx_uncore_pci_init(void)5638 int icx_uncore_pci_init(void)
5639 {
5640 /* ICX UBOX DID */
5641 int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5642 SKX_GIDNIDMAP, true);
5643
5644 if (ret)
5645 return ret;
5646
5647 uncore_pci_uncores = icx_pci_uncores;
5648 uncore_pci_driver = &icx_uncore_pci_driver;
5649 return 0;
5650 }
5651
icx_uncore_imc_init_box(struct intel_uncore_box * box)5652 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5653 {
5654 unsigned int box_ctl = box->pmu->type->box_ctl +
5655 box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5656 int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5657 SNR_IMC_MMIO_MEM0_OFFSET;
5658
5659 __snr_uncore_mmio_init_box(box, box_ctl, mem_offset,
5660 SNR_MC_DEVICE_ID);
5661 }
5662
5663 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5664 .init_box = icx_uncore_imc_init_box,
5665 .exit_box = uncore_mmio_exit_box,
5666 .disable_box = snr_uncore_mmio_disable_box,
5667 .enable_box = snr_uncore_mmio_enable_box,
5668 .disable_event = snr_uncore_mmio_disable_event,
5669 .enable_event = snr_uncore_mmio_enable_event,
5670 .read_counter = uncore_mmio_read_counter,
5671 };
5672
5673 static struct intel_uncore_type icx_uncore_imc = {
5674 .name = "imc",
5675 .num_counters = 4,
5676 .num_boxes = 12,
5677 .perf_ctr_bits = 48,
5678 .fixed_ctr_bits = 48,
5679 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5680 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5681 .event_descs = snr_uncore_imc_events,
5682 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
5683 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
5684 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5685 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
5686 .mmio_offset = SNR_IMC_MMIO_OFFSET,
5687 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5688 .ops = &icx_uncore_mmio_ops,
5689 .format_group = &skx_uncore_format_group,
5690 };
5691
5692 enum perf_uncore_icx_imc_freerunning_type_id {
5693 ICX_IMC_DCLK,
5694 ICX_IMC_DDR,
5695 ICX_IMC_DDRT,
5696
5697 ICX_IMC_FREERUNNING_TYPE_MAX,
5698 };
5699
5700 static struct freerunning_counters icx_imc_freerunning[] = {
5701 [ICX_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
5702 [ICX_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
5703 [ICX_IMC_DDRT] = { 0x22a0, 0x8, 0, 2, 48 },
5704 };
5705
5706 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5707 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
5708
5709 INTEL_UNCORE_FR_EVENT_DESC(read, 0x20, 6.103515625e-5),
5710 INTEL_UNCORE_FR_EVENT_DESC(write, 0x21, 6.103515625e-5),
5711 INTEL_UNCORE_FR_EVENT_DESC(ddrt_read, 0x30, 6.103515625e-5),
5712 INTEL_UNCORE_FR_EVENT_DESC(ddrt_write, 0x31, 6.103515625e-5),
5713 { /* end: all zeroes */ },
5714 };
5715
icx_uncore_imc_freerunning_init_box(struct intel_uncore_box * box)5716 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5717 {
5718 int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5719 SNR_IMC_MMIO_MEM0_OFFSET;
5720
5721 snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
5722 mem_offset, SNR_MC_DEVICE_ID);
5723 }
5724
5725 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5726 .init_box = icx_uncore_imc_freerunning_init_box,
5727 .exit_box = uncore_mmio_exit_box,
5728 .read_counter = uncore_mmio_read_counter,
5729 .hw_config = uncore_freerunning_hw_config,
5730 };
5731
5732 static struct intel_uncore_type icx_uncore_imc_free_running = {
5733 .name = "imc_free_running",
5734 .num_counters = 5,
5735 .num_boxes = 4,
5736 .num_freerunning_types = ICX_IMC_FREERUNNING_TYPE_MAX,
5737 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5738 .freerunning = icx_imc_freerunning,
5739 .ops = &icx_uncore_imc_freerunning_ops,
5740 .event_descs = icx_uncore_imc_freerunning_events,
5741 .format_group = &skx_uncore_iio_freerunning_format_group,
5742 };
5743
5744 static struct intel_uncore_type *icx_mmio_uncores[] = {
5745 &icx_uncore_imc,
5746 &icx_uncore_imc_free_running,
5747 NULL,
5748 };
5749
icx_uncore_mmio_init(void)5750 void icx_uncore_mmio_init(void)
5751 {
5752 uncore_mmio_uncores = icx_mmio_uncores;
5753 }
5754
5755 /* end of ICX uncore support */
5756
5757 /* SPR uncore support */
5758
spr_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)5759 static void spr_uncore_msr_enable_event(struct intel_uncore_box *box,
5760 struct perf_event *event)
5761 {
5762 struct hw_perf_event *hwc = &event->hw;
5763 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5764
5765 if (reg1->idx != EXTRA_REG_NONE)
5766 wrmsrq(reg1->reg, reg1->config);
5767
5768 wrmsrq(hwc->config_base, hwc->config);
5769 }
5770
spr_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)5771 static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
5772 struct perf_event *event)
5773 {
5774 struct hw_perf_event *hwc = &event->hw;
5775 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5776
5777 if (reg1->idx != EXTRA_REG_NONE)
5778 wrmsrq(reg1->reg, 0);
5779
5780 wrmsrq(hwc->config_base, 0);
5781 }
5782
spr_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)5783 static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5784 {
5785 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5786 bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN);
5787 struct intel_uncore_type *type = box->pmu->type;
5788 int id = intel_uncore_find_discovery_unit_id(type->boxes, -1, box->pmu->pmu_idx);
5789
5790 if (tie_en) {
5791 reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 +
5792 HSWEP_CBO_MSR_OFFSET * id;
5793 reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID;
5794 reg1->idx = 0;
5795 }
5796
5797 return 0;
5798 }
5799
5800 static struct intel_uncore_ops spr_uncore_chabox_ops = {
5801 .init_box = intel_generic_uncore_msr_init_box,
5802 .disable_box = intel_generic_uncore_msr_disable_box,
5803 .enable_box = intel_generic_uncore_msr_enable_box,
5804 .disable_event = spr_uncore_msr_disable_event,
5805 .enable_event = spr_uncore_msr_enable_event,
5806 .read_counter = uncore_msr_read_counter,
5807 .hw_config = spr_cha_hw_config,
5808 .get_constraint = uncore_get_constraint,
5809 .put_constraint = uncore_put_constraint,
5810 };
5811
5812 static struct attribute *spr_uncore_cha_formats_attr[] = {
5813 &format_attr_event.attr,
5814 &format_attr_umask_ext5.attr,
5815 &format_attr_tid_en2.attr,
5816 &format_attr_edge.attr,
5817 &format_attr_inv.attr,
5818 &format_attr_thresh8.attr,
5819 &format_attr_filter_tid5.attr,
5820 NULL,
5821 };
5822 static const struct attribute_group spr_uncore_chabox_format_group = {
5823 .name = "format",
5824 .attrs = spr_uncore_cha_formats_attr,
5825 };
5826
alias_show(struct device * dev,struct device_attribute * attr,char * buf)5827 static ssize_t alias_show(struct device *dev,
5828 struct device_attribute *attr,
5829 char *buf)
5830 {
5831 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
5832 char pmu_name[UNCORE_PMU_NAME_LEN];
5833
5834 uncore_get_alias_name(pmu_name, pmu);
5835 return sysfs_emit(buf, "%s\n", pmu_name);
5836 }
5837
5838 static DEVICE_ATTR_RO(alias);
5839
5840 static struct attribute *uncore_alias_attrs[] = {
5841 &dev_attr_alias.attr,
5842 NULL
5843 };
5844
5845 ATTRIBUTE_GROUPS(uncore_alias);
5846
5847 static struct intel_uncore_type spr_uncore_chabox = {
5848 .name = "cha",
5849 .event_mask = SPR_CHA_PMON_EVENT_MASK,
5850 .event_mask_ext = SPR_CHA_EVENT_MASK_EXT,
5851 .num_shared_regs = 1,
5852 .constraints = skx_uncore_chabox_constraints,
5853 .ops = &spr_uncore_chabox_ops,
5854 .format_group = &spr_uncore_chabox_format_group,
5855 .attr_update = uncore_alias_groups,
5856 };
5857
5858 static struct intel_uncore_type spr_uncore_iio = {
5859 .name = "iio",
5860 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5861 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5862 .format_group = &snr_uncore_iio_format_group,
5863 .attr_update = uncore_alias_groups,
5864 .constraints = icx_uncore_iio_constraints,
5865 };
5866
5867 static struct attribute *spr_uncore_raw_formats_attr[] = {
5868 &format_attr_event.attr,
5869 &format_attr_umask_ext4.attr,
5870 &format_attr_edge.attr,
5871 &format_attr_inv.attr,
5872 &format_attr_thresh8.attr,
5873 NULL,
5874 };
5875
5876 static const struct attribute_group spr_uncore_raw_format_group = {
5877 .name = "format",
5878 .attrs = spr_uncore_raw_formats_attr,
5879 };
5880
5881 #define SPR_UNCORE_COMMON_FORMAT() \
5882 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
5883 .event_mask_ext = SPR_RAW_EVENT_MASK_EXT, \
5884 .format_group = &spr_uncore_raw_format_group, \
5885 .attr_update = uncore_alias_groups
5886
5887 static struct intel_uncore_type spr_uncore_irp = {
5888 SPR_UNCORE_COMMON_FORMAT(),
5889 .name = "irp",
5890
5891 };
5892
5893 static struct event_constraint spr_uncore_m2pcie_constraints[] = {
5894 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5895 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5896 EVENT_CONSTRAINT_END
5897 };
5898
5899 static struct intel_uncore_type spr_uncore_m2pcie = {
5900 SPR_UNCORE_COMMON_FORMAT(),
5901 .name = "m2pcie",
5902 .constraints = spr_uncore_m2pcie_constraints,
5903 };
5904
5905 static struct intel_uncore_type spr_uncore_pcu = {
5906 .name = "pcu",
5907 .attr_update = uncore_alias_groups,
5908 };
5909
spr_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)5910 static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box,
5911 struct perf_event *event)
5912 {
5913 struct hw_perf_event *hwc = &event->hw;
5914
5915 if (!box->io_addr)
5916 return;
5917
5918 if (uncore_pmc_fixed(hwc->idx))
5919 writel(SNBEP_PMON_CTL_EN, box->io_addr + hwc->config_base);
5920 else
5921 writel(hwc->config, box->io_addr + hwc->config_base);
5922 }
5923
5924 static struct intel_uncore_ops spr_uncore_mmio_ops = {
5925 .init_box = intel_generic_uncore_mmio_init_box,
5926 .exit_box = uncore_mmio_exit_box,
5927 .disable_box = intel_generic_uncore_mmio_disable_box,
5928 .enable_box = intel_generic_uncore_mmio_enable_box,
5929 .disable_event = intel_generic_uncore_mmio_disable_event,
5930 .enable_event = spr_uncore_mmio_enable_event,
5931 .read_counter = uncore_mmio_read_counter,
5932 };
5933
5934 static struct uncore_event_desc spr_uncore_imc_events[] = {
5935 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x01,umask=0x00"),
5936 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x05,umask=0xcf"),
5937 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
5938 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
5939 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x05,umask=0xf0"),
5940 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
5941 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
5942 { /* end: all zeroes */ },
5943 };
5944
5945 #define SPR_UNCORE_MMIO_COMMON_FORMAT() \
5946 SPR_UNCORE_COMMON_FORMAT(), \
5947 .ops = &spr_uncore_mmio_ops
5948
5949 static struct intel_uncore_type spr_uncore_imc = {
5950 SPR_UNCORE_MMIO_COMMON_FORMAT(),
5951 .name = "imc",
5952 .fixed_ctr_bits = 48,
5953 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5954 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5955 .event_descs = spr_uncore_imc_events,
5956 };
5957
spr_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)5958 static void spr_uncore_pci_enable_event(struct intel_uncore_box *box,
5959 struct perf_event *event)
5960 {
5961 struct pci_dev *pdev = box->pci_dev;
5962 struct hw_perf_event *hwc = &event->hw;
5963
5964 pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
5965 pci_write_config_dword(pdev, hwc->config_base, (u32)hwc->config);
5966 }
5967
5968 static struct intel_uncore_ops spr_uncore_pci_ops = {
5969 .init_box = intel_generic_uncore_pci_init_box,
5970 .disable_box = intel_generic_uncore_pci_disable_box,
5971 .enable_box = intel_generic_uncore_pci_enable_box,
5972 .disable_event = intel_generic_uncore_pci_disable_event,
5973 .enable_event = spr_uncore_pci_enable_event,
5974 .read_counter = intel_generic_uncore_pci_read_counter,
5975 };
5976
5977 #define SPR_UNCORE_PCI_COMMON_FORMAT() \
5978 SPR_UNCORE_COMMON_FORMAT(), \
5979 .ops = &spr_uncore_pci_ops
5980
5981 static struct intel_uncore_type spr_uncore_m2m = {
5982 SPR_UNCORE_PCI_COMMON_FORMAT(),
5983 .name = "m2m",
5984 };
5985
5986 static struct attribute_group spr_upi_mapping_group = {
5987 .is_visible = skx_upi_mapping_visible,
5988 };
5989
5990 static const struct attribute_group *spr_upi_attr_update[] = {
5991 &uncore_alias_group,
5992 &spr_upi_mapping_group,
5993 NULL
5994 };
5995
5996 #define SPR_UPI_REGS_ADDR_DEVICE_LINK0 0x01
5997
spr_upi_set_mapping(struct intel_uncore_type * type)5998 static void spr_upi_set_mapping(struct intel_uncore_type *type)
5999 {
6000 pmu_upi_set_mapping(type, &spr_upi_mapping_group);
6001 }
6002
spr_upi_cleanup_mapping(struct intel_uncore_type * type)6003 static void spr_upi_cleanup_mapping(struct intel_uncore_type *type)
6004 {
6005 pmu_cleanup_mapping(type, &spr_upi_mapping_group);
6006 }
6007
spr_upi_get_topology(struct intel_uncore_type * type)6008 static int spr_upi_get_topology(struct intel_uncore_type *type)
6009 {
6010 return discover_upi_topology(type, SPR_UBOX_DID, SPR_UPI_REGS_ADDR_DEVICE_LINK0);
6011 }
6012
6013 static struct intel_uncore_type spr_uncore_mdf = {
6014 SPR_UNCORE_COMMON_FORMAT(),
6015 .name = "mdf",
6016 };
6017
spr_uncore_mmio_offs8_init_box(struct intel_uncore_box * box)6018 static void spr_uncore_mmio_offs8_init_box(struct intel_uncore_box *box)
6019 {
6020 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
6021 intel_generic_uncore_mmio_init_box(box);
6022 }
6023
6024 static struct intel_uncore_ops spr_uncore_mmio_offs8_ops = {
6025 .init_box = spr_uncore_mmio_offs8_init_box,
6026 .exit_box = uncore_mmio_exit_box,
6027 .disable_box = intel_generic_uncore_mmio_disable_box,
6028 .enable_box = intel_generic_uncore_mmio_enable_box,
6029 .disable_event = intel_generic_uncore_mmio_disable_event,
6030 .enable_event = spr_uncore_mmio_enable_event,
6031 .read_counter = uncore_mmio_read_counter,
6032 };
6033
6034 #define SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT() \
6035 SPR_UNCORE_COMMON_FORMAT(), \
6036 .ops = &spr_uncore_mmio_offs8_ops
6037
6038 static struct event_constraint spr_uncore_cxlcm_constraints[] = {
6039 UNCORE_EVENT_CONSTRAINT(0x02, 0x0f),
6040 UNCORE_EVENT_CONSTRAINT(0x05, 0x0f),
6041 UNCORE_EVENT_CONSTRAINT_RANGE(0x40, 0x43, 0xf0),
6042 UNCORE_EVENT_CONSTRAINT(0x4b, 0xf0),
6043 UNCORE_EVENT_CONSTRAINT(0x52, 0xf0),
6044 EVENT_CONSTRAINT_END
6045 };
6046
6047 static struct intel_uncore_type spr_uncore_cxlcm = {
6048 SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(),
6049 .name = "cxlcm",
6050 .constraints = spr_uncore_cxlcm_constraints,
6051 };
6052
6053 static struct intel_uncore_type spr_uncore_cxldp = {
6054 SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(),
6055 .name = "cxldp",
6056 };
6057
6058 static struct intel_uncore_type spr_uncore_hbm = {
6059 SPR_UNCORE_COMMON_FORMAT(),
6060 .name = "hbm",
6061 };
6062
6063 #define UNCORE_SPR_NUM_UNCORE_TYPES 15
6064 #define UNCORE_SPR_CHA 0
6065 #define UNCORE_SPR_IIO 1
6066 #define UNCORE_SPR_IMC 6
6067 #define UNCORE_SPR_UPI 8
6068 #define UNCORE_SPR_M3UPI 9
6069
6070 /*
6071 * The uncore units, which are supported by the discovery table,
6072 * are defined here.
6073 */
6074 static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
6075 &spr_uncore_chabox,
6076 &spr_uncore_iio,
6077 &spr_uncore_irp,
6078 &spr_uncore_m2pcie,
6079 &spr_uncore_pcu,
6080 NULL,
6081 &spr_uncore_imc,
6082 &spr_uncore_m2m,
6083 NULL,
6084 NULL,
6085 NULL,
6086 &spr_uncore_mdf,
6087 &spr_uncore_cxlcm,
6088 &spr_uncore_cxldp,
6089 &spr_uncore_hbm,
6090 };
6091
6092 /*
6093 * The uncore units, which are not supported by the discovery table,
6094 * are implemented from here.
6095 */
6096 #define SPR_UNCORE_UPI_NUM_BOXES 4
6097
6098 static u64 spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = {
6099 0, 0x8000, 0x10000, 0x18000
6100 };
6101
spr_extra_boxes_cleanup(struct intel_uncore_type * type)6102 static void spr_extra_boxes_cleanup(struct intel_uncore_type *type)
6103 {
6104 struct intel_uncore_discovery_unit *pos;
6105 struct rb_node *node;
6106
6107 if (!type->boxes)
6108 return;
6109
6110 while (!RB_EMPTY_ROOT(type->boxes)) {
6111 node = rb_first(type->boxes);
6112 pos = rb_entry(node, struct intel_uncore_discovery_unit, node);
6113 rb_erase(node, type->boxes);
6114 kfree(pos);
6115 }
6116 kfree(type->boxes);
6117 type->boxes = NULL;
6118 }
6119
6120 static struct intel_uncore_type spr_uncore_upi = {
6121 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
6122 .event_mask_ext = SPR_RAW_EVENT_MASK_EXT,
6123 .format_group = &spr_uncore_raw_format_group,
6124 .ops = &spr_uncore_pci_ops,
6125 .name = "upi",
6126 .attr_update = spr_upi_attr_update,
6127 .get_topology = spr_upi_get_topology,
6128 .set_mapping = spr_upi_set_mapping,
6129 .cleanup_mapping = spr_upi_cleanup_mapping,
6130 .type_id = UNCORE_SPR_UPI,
6131 .num_counters = 4,
6132 .num_boxes = SPR_UNCORE_UPI_NUM_BOXES,
6133 .perf_ctr_bits = 48,
6134 .perf_ctr = ICX_UPI_PCI_PMON_CTR0 - ICX_UPI_PCI_PMON_BOX_CTL,
6135 .event_ctl = ICX_UPI_PCI_PMON_CTL0 - ICX_UPI_PCI_PMON_BOX_CTL,
6136 .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL,
6137 .pci_offsets = spr_upi_pci_offsets,
6138 .cleanup_extra_boxes = spr_extra_boxes_cleanup,
6139 };
6140
6141 static struct intel_uncore_type spr_uncore_m3upi = {
6142 SPR_UNCORE_PCI_COMMON_FORMAT(),
6143 .name = "m3upi",
6144 .type_id = UNCORE_SPR_M3UPI,
6145 .num_counters = 4,
6146 .num_boxes = SPR_UNCORE_UPI_NUM_BOXES,
6147 .perf_ctr_bits = 48,
6148 .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0 - ICX_M3UPI_PCI_PMON_BOX_CTL,
6149 .event_ctl = ICX_M3UPI_PCI_PMON_CTL0 - ICX_M3UPI_PCI_PMON_BOX_CTL,
6150 .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL,
6151 .pci_offsets = spr_upi_pci_offsets,
6152 .constraints = icx_uncore_m3upi_constraints,
6153 .cleanup_extra_boxes = spr_extra_boxes_cleanup,
6154 };
6155
6156 enum perf_uncore_spr_iio_freerunning_type_id {
6157 SPR_IIO_MSR_IOCLK,
6158 SPR_IIO_MSR_BW_IN,
6159 SPR_IIO_MSR_BW_OUT,
6160
6161 SPR_IIO_FREERUNNING_TYPE_MAX,
6162 };
6163
6164 static struct freerunning_counters spr_iio_freerunning[] = {
6165 [SPR_IIO_MSR_IOCLK] = { 0x340e, 0x1, 0x10, 1, 48 },
6166 [SPR_IIO_MSR_BW_IN] = { 0x3800, 0x1, 0x10, 8, 48 },
6167 [SPR_IIO_MSR_BW_OUT] = { 0x3808, 0x1, 0x10, 8, 48 },
6168 };
6169
6170 static struct intel_uncore_type spr_uncore_iio_free_running = {
6171 .name = "iio_free_running",
6172 .num_counters = 17,
6173 .num_freerunning_types = SPR_IIO_FREERUNNING_TYPE_MAX,
6174 .freerunning = spr_iio_freerunning,
6175 .ops = &skx_uncore_iio_freerunning_ops,
6176 .event_descs = snr_uncore_iio_freerunning_events,
6177 .format_group = &skx_uncore_iio_freerunning_format_group,
6178 };
6179
6180 enum perf_uncore_spr_imc_freerunning_type_id {
6181 SPR_IMC_DCLK,
6182 SPR_IMC_PQ_CYCLES,
6183
6184 SPR_IMC_FREERUNNING_TYPE_MAX,
6185 };
6186
6187 static struct freerunning_counters spr_imc_freerunning[] = {
6188 [SPR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
6189 [SPR_IMC_PQ_CYCLES] = { 0x2318, 0x8, 0, 2, 48 },
6190 };
6191
6192 static struct uncore_event_desc spr_uncore_imc_freerunning_events[] = {
6193 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
6194
6195 INTEL_UNCORE_EVENT_DESC(rpq_cycles, "event=0xff,umask=0x20"),
6196 INTEL_UNCORE_EVENT_DESC(wpq_cycles, "event=0xff,umask=0x21"),
6197 { /* end: all zeroes */ },
6198 };
6199
6200 #define SPR_MC_DEVICE_ID 0x3251
6201
spr_uncore_imc_freerunning_init_box(struct intel_uncore_box * box)6202 static void spr_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
6203 {
6204 int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + SNR_IMC_MMIO_MEM0_OFFSET;
6205
6206 snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
6207 mem_offset, SPR_MC_DEVICE_ID);
6208 }
6209
6210 static struct intel_uncore_ops spr_uncore_imc_freerunning_ops = {
6211 .init_box = spr_uncore_imc_freerunning_init_box,
6212 .exit_box = uncore_mmio_exit_box,
6213 .read_counter = uncore_mmio_read_counter,
6214 .hw_config = uncore_freerunning_hw_config,
6215 };
6216
6217 static struct intel_uncore_type spr_uncore_imc_free_running = {
6218 .name = "imc_free_running",
6219 .num_counters = 3,
6220 .mmio_map_size = SNR_IMC_MMIO_SIZE,
6221 .num_freerunning_types = SPR_IMC_FREERUNNING_TYPE_MAX,
6222 .freerunning = spr_imc_freerunning,
6223 .ops = &spr_uncore_imc_freerunning_ops,
6224 .event_descs = spr_uncore_imc_freerunning_events,
6225 .format_group = &skx_uncore_iio_freerunning_format_group,
6226 };
6227
6228 #define UNCORE_SPR_MSR_EXTRA_UNCORES 1
6229 #define UNCORE_SPR_MMIO_EXTRA_UNCORES 1
6230 #define UNCORE_SPR_PCI_EXTRA_UNCORES 2
6231
6232 static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = {
6233 &spr_uncore_iio_free_running,
6234 };
6235
6236 static struct intel_uncore_type *spr_mmio_uncores[UNCORE_SPR_MMIO_EXTRA_UNCORES] = {
6237 &spr_uncore_imc_free_running,
6238 };
6239
6240 static struct intel_uncore_type *spr_pci_uncores[UNCORE_SPR_PCI_EXTRA_UNCORES] = {
6241 &spr_uncore_upi,
6242 &spr_uncore_m3upi
6243 };
6244
6245 int spr_uncore_units_ignore[] = {
6246 UNCORE_SPR_UPI,
6247 UNCORE_SPR_M3UPI,
6248 UNCORE_IGNORE_END
6249 };
6250
uncore_type_customized_copy(struct intel_uncore_type * to_type,struct intel_uncore_type * from_type)6251 static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
6252 struct intel_uncore_type *from_type)
6253 {
6254 if (!to_type || !from_type)
6255 return;
6256
6257 if (from_type->name)
6258 to_type->name = from_type->name;
6259 if (from_type->fixed_ctr_bits)
6260 to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
6261 if (from_type->event_mask)
6262 to_type->event_mask = from_type->event_mask;
6263 if (from_type->event_mask_ext)
6264 to_type->event_mask_ext = from_type->event_mask_ext;
6265 if (from_type->fixed_ctr)
6266 to_type->fixed_ctr = from_type->fixed_ctr;
6267 if (from_type->fixed_ctl)
6268 to_type->fixed_ctl = from_type->fixed_ctl;
6269 if (from_type->fixed_ctr_bits)
6270 to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
6271 if (from_type->num_shared_regs)
6272 to_type->num_shared_regs = from_type->num_shared_regs;
6273 if (from_type->constraints)
6274 to_type->constraints = from_type->constraints;
6275 if (from_type->ops)
6276 to_type->ops = from_type->ops;
6277 if (from_type->event_descs)
6278 to_type->event_descs = from_type->event_descs;
6279 if (from_type->format_group)
6280 to_type->format_group = from_type->format_group;
6281 if (from_type->attr_update)
6282 to_type->attr_update = from_type->attr_update;
6283 if (from_type->set_mapping)
6284 to_type->set_mapping = from_type->set_mapping;
6285 if (from_type->get_topology)
6286 to_type->get_topology = from_type->get_topology;
6287 if (from_type->cleanup_mapping)
6288 to_type->cleanup_mapping = from_type->cleanup_mapping;
6289 if (from_type->mmio_map_size)
6290 to_type->mmio_map_size = from_type->mmio_map_size;
6291 }
6292
6293 struct intel_uncore_type **
uncore_get_uncores(enum uncore_access_type type_id,int num_extra,struct intel_uncore_type ** extra,int max_num_types,struct intel_uncore_type ** uncores)6294 uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
6295 struct intel_uncore_type **extra, int max_num_types,
6296 struct intel_uncore_type **uncores)
6297 {
6298 struct intel_uncore_type **types, **start_types;
6299 int i;
6300
6301 start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra);
6302
6303 /* Only copy the customized features */
6304 for (; *types; types++) {
6305 if ((*types)->type_id >= max_num_types)
6306 continue;
6307 uncore_type_customized_copy(*types, uncores[(*types)->type_id]);
6308 }
6309
6310 for (i = 0; i < num_extra; i++, types++)
6311 *types = extra[i];
6312
6313 return start_types;
6314 }
6315
6316 static struct intel_uncore_type *
uncore_find_type_by_id(struct intel_uncore_type ** types,int type_id)6317 uncore_find_type_by_id(struct intel_uncore_type **types, int type_id)
6318 {
6319 for (; *types; types++) {
6320 if (type_id == (*types)->type_id)
6321 return *types;
6322 }
6323
6324 return NULL;
6325 }
6326
uncore_type_max_boxes(struct intel_uncore_type ** types,int type_id)6327 static int uncore_type_max_boxes(struct intel_uncore_type **types,
6328 int type_id)
6329 {
6330 struct intel_uncore_discovery_unit *unit;
6331 struct intel_uncore_type *type;
6332 struct rb_node *node;
6333 int max = 0;
6334
6335 type = uncore_find_type_by_id(types, type_id);
6336 if (!type)
6337 return 0;
6338
6339 for (node = rb_first(type->boxes); node; node = rb_next(node)) {
6340 unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
6341
6342 /*
6343 * on DMR IMH2, the unit id starts from 0x8000,
6344 * and we don't need to count it.
6345 */
6346 if ((unit->id > max) && (unit->id < 0x8000))
6347 max = unit->id;
6348 }
6349 return max + 1;
6350 }
6351
6352 #define SPR_MSR_UNC_CBO_CONFIG 0x2FFE
6353
spr_uncore_cpu_init(void)6354 void spr_uncore_cpu_init(void)
6355 {
6356 struct intel_uncore_type *type;
6357 u64 num_cbo;
6358
6359 uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
6360 UNCORE_SPR_MSR_EXTRA_UNCORES,
6361 spr_msr_uncores,
6362 UNCORE_SPR_NUM_UNCORE_TYPES,
6363 spr_uncores);
6364
6365 type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA);
6366 if (type) {
6367 /*
6368 * The value from the discovery table (stored in the type->num_boxes
6369 * of UNCORE_SPR_CHA) is incorrect on some SPR variants because of a
6370 * firmware bug. Using the value from SPR_MSR_UNC_CBO_CONFIG to replace it.
6371 */
6372 rdmsrq(SPR_MSR_UNC_CBO_CONFIG, num_cbo);
6373 /*
6374 * The MSR doesn't work on the EMR XCC, but the firmware bug doesn't impact
6375 * the EMR XCC. Don't let the value from the MSR replace the existing value.
6376 */
6377 if (num_cbo)
6378 type->num_boxes = num_cbo;
6379 }
6380 spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
6381 }
6382
6383 #define SPR_UNCORE_UPI_PCIID 0x3241
6384 #define SPR_UNCORE_UPI0_DEVFN 0x9
6385 #define SPR_UNCORE_M3UPI_PCIID 0x3246
6386 #define SPR_UNCORE_M3UPI0_DEVFN 0x29
6387
spr_update_device_location(int type_id)6388 static void spr_update_device_location(int type_id)
6389 {
6390 struct intel_uncore_discovery_unit *unit;
6391 struct intel_uncore_type *type;
6392 struct pci_dev *dev = NULL;
6393 struct rb_root *root;
6394 u32 device, devfn;
6395 int die;
6396
6397 if (type_id == UNCORE_SPR_UPI) {
6398 type = &spr_uncore_upi;
6399 device = SPR_UNCORE_UPI_PCIID;
6400 devfn = SPR_UNCORE_UPI0_DEVFN;
6401 } else if (type_id == UNCORE_SPR_M3UPI) {
6402 type = &spr_uncore_m3upi;
6403 device = SPR_UNCORE_M3UPI_PCIID;
6404 devfn = SPR_UNCORE_M3UPI0_DEVFN;
6405 } else
6406 return;
6407
6408 root = kzalloc_obj(struct rb_root);
6409 if (!root) {
6410 type->num_boxes = 0;
6411 return;
6412 }
6413 *root = RB_ROOT;
6414
6415 while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) {
6416
6417 die = uncore_pcibus_to_dieid(dev->bus);
6418 if (die < 0)
6419 continue;
6420
6421 unit = kzalloc_obj(*unit);
6422 if (!unit)
6423 continue;
6424 unit->die = die;
6425 unit->id = PCI_SLOT(dev->devfn) - PCI_SLOT(devfn);
6426 unit->addr = pci_domain_nr(dev->bus) << UNCORE_DISCOVERY_PCI_DOMAIN_OFFSET |
6427 dev->bus->number << UNCORE_DISCOVERY_PCI_BUS_OFFSET |
6428 devfn << UNCORE_DISCOVERY_PCI_DEVFN_OFFSET |
6429 type->box_ctl;
6430
6431 unit->pmu_idx = unit->id;
6432
6433 uncore_find_add_unit(unit, root, NULL);
6434 }
6435
6436 type->boxes = root;
6437 }
6438
spr_uncore_pci_init(void)6439 int spr_uncore_pci_init(void)
6440 {
6441 int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true);
6442
6443 if (ret)
6444 return ret;
6445
6446 /*
6447 * The discovery table of UPI on some SPR variant is broken,
6448 * which impacts the detection of both UPI and M3UPI uncore PMON.
6449 * Use the pre-defined UPI and M3UPI table to replace.
6450 *
6451 * The accurate location, e.g., domain and BUS number,
6452 * can only be retrieved at load time.
6453 * Update the location of UPI and M3UPI.
6454 */
6455 spr_update_device_location(UNCORE_SPR_UPI);
6456 spr_update_device_location(UNCORE_SPR_M3UPI);
6457 uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI,
6458 UNCORE_SPR_PCI_EXTRA_UNCORES,
6459 spr_pci_uncores,
6460 UNCORE_SPR_NUM_UNCORE_TYPES,
6461 spr_uncores);
6462 return 0;
6463 }
6464
spr_uncore_mmio_init(void)6465 void spr_uncore_mmio_init(void)
6466 {
6467 int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true);
6468
6469 if (ret) {
6470 uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL,
6471 UNCORE_SPR_NUM_UNCORE_TYPES,
6472 spr_uncores);
6473 } else {
6474 uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
6475 UNCORE_SPR_MMIO_EXTRA_UNCORES,
6476 spr_mmio_uncores,
6477 UNCORE_SPR_NUM_UNCORE_TYPES,
6478 spr_uncores);
6479
6480 spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2;
6481 }
6482 }
6483
6484 /* end of SPR uncore support */
6485
6486 /* GNR uncore support */
6487
6488 #define UNCORE_GNR_NUM_UNCORE_TYPES 23
6489
6490 int gnr_uncore_units_ignore[] = {
6491 UNCORE_IGNORE_END
6492 };
6493
6494 static struct intel_uncore_type gnr_uncore_ubox = {
6495 .name = "ubox",
6496 .attr_update = uncore_alias_groups,
6497 };
6498
6499 static struct uncore_event_desc gnr_uncore_imc_events[] = {
6500 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x01,umask=0x00"),
6501 INTEL_UNCORE_EVENT_DESC(cas_count_read_sch0, "event=0x05,umask=0xcf"),
6502 INTEL_UNCORE_EVENT_DESC(cas_count_read_sch0.scale, "6.103515625e-5"),
6503 INTEL_UNCORE_EVENT_DESC(cas_count_read_sch0.unit, "MiB"),
6504 INTEL_UNCORE_EVENT_DESC(cas_count_read_sch1, "event=0x06,umask=0xcf"),
6505 INTEL_UNCORE_EVENT_DESC(cas_count_read_sch1.scale, "6.103515625e-5"),
6506 INTEL_UNCORE_EVENT_DESC(cas_count_read_sch1.unit, "MiB"),
6507 INTEL_UNCORE_EVENT_DESC(cas_count_write_sch0, "event=0x05,umask=0xf0"),
6508 INTEL_UNCORE_EVENT_DESC(cas_count_write_sch0.scale, "6.103515625e-5"),
6509 INTEL_UNCORE_EVENT_DESC(cas_count_write_sch0.unit, "MiB"),
6510 INTEL_UNCORE_EVENT_DESC(cas_count_write_sch1, "event=0x06,umask=0xf0"),
6511 INTEL_UNCORE_EVENT_DESC(cas_count_write_sch1.scale, "6.103515625e-5"),
6512 INTEL_UNCORE_EVENT_DESC(cas_count_write_sch1.unit, "MiB"),
6513 { /* end: all zeroes */ },
6514 };
6515
6516 static struct intel_uncore_type gnr_uncore_imc = {
6517 SPR_UNCORE_MMIO_COMMON_FORMAT(),
6518 .name = "imc",
6519 .fixed_ctr_bits = 48,
6520 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
6521 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
6522 .event_descs = gnr_uncore_imc_events,
6523 };
6524
6525 static struct intel_uncore_type gnr_uncore_pciex8 = {
6526 SPR_UNCORE_PCI_COMMON_FORMAT(),
6527 .name = "pciex8",
6528 };
6529
6530 static struct intel_uncore_type gnr_uncore_pciex16 = {
6531 SPR_UNCORE_PCI_COMMON_FORMAT(),
6532 .name = "pciex16",
6533 };
6534
6535 static struct intel_uncore_type gnr_uncore_upi = {
6536 SPR_UNCORE_PCI_COMMON_FORMAT(),
6537 .name = "upi",
6538 };
6539
6540 static struct intel_uncore_type gnr_uncore_b2upi = {
6541 SPR_UNCORE_PCI_COMMON_FORMAT(),
6542 .name = "b2upi",
6543 };
6544
6545 static struct intel_uncore_type gnr_uncore_b2hot = {
6546 .name = "b2hot",
6547 .attr_update = uncore_alias_groups,
6548 };
6549
6550 static struct intel_uncore_type gnr_uncore_b2cmi = {
6551 SPR_UNCORE_PCI_COMMON_FORMAT(),
6552 .name = "b2cmi",
6553 };
6554
6555 static struct intel_uncore_type gnr_uncore_b2cxl = {
6556 SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(),
6557 .name = "b2cxl",
6558 };
6559
6560 static struct intel_uncore_type gnr_uncore_mdf_sbo = {
6561 .name = "mdf_sbo",
6562 .attr_update = uncore_alias_groups,
6563 };
6564
6565 static struct intel_uncore_type *gnr_uncores[UNCORE_GNR_NUM_UNCORE_TYPES] = {
6566 &spr_uncore_chabox,
6567 &spr_uncore_iio,
6568 &spr_uncore_irp,
6569 NULL,
6570 &spr_uncore_pcu,
6571 &gnr_uncore_ubox,
6572 &gnr_uncore_imc,
6573 NULL,
6574 &gnr_uncore_upi,
6575 NULL,
6576 NULL,
6577 NULL,
6578 &spr_uncore_cxlcm,
6579 &spr_uncore_cxldp,
6580 NULL,
6581 &gnr_uncore_b2hot,
6582 &gnr_uncore_b2cmi,
6583 &gnr_uncore_b2cxl,
6584 &gnr_uncore_b2upi,
6585 NULL,
6586 &gnr_uncore_mdf_sbo,
6587 &gnr_uncore_pciex16,
6588 &gnr_uncore_pciex8,
6589 };
6590
6591 static struct freerunning_counters gnr_iio_freerunning[] = {
6592 [SPR_IIO_MSR_IOCLK] = { 0x290e, 0x01, 0x10, 1, 48 },
6593 [SPR_IIO_MSR_BW_IN] = { 0x360e, 0x10, 0x80, 8, 48 },
6594 [SPR_IIO_MSR_BW_OUT] = { 0x2e0e, 0x10, 0x80, 8, 48 },
6595 };
6596
gnr_uncore_cpu_init(void)6597 void gnr_uncore_cpu_init(void)
6598 {
6599 uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
6600 UNCORE_SPR_MSR_EXTRA_UNCORES,
6601 spr_msr_uncores,
6602 UNCORE_GNR_NUM_UNCORE_TYPES,
6603 gnr_uncores);
6604 spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
6605 spr_uncore_iio_free_running.freerunning = gnr_iio_freerunning;
6606 }
6607
gnr_uncore_pci_init(void)6608 int gnr_uncore_pci_init(void)
6609 {
6610 uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL,
6611 UNCORE_GNR_NUM_UNCORE_TYPES,
6612 gnr_uncores);
6613 return 0;
6614 }
6615
gnr_uncore_mmio_init(void)6616 void gnr_uncore_mmio_init(void)
6617 {
6618 uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL,
6619 UNCORE_GNR_NUM_UNCORE_TYPES,
6620 gnr_uncores);
6621 }
6622
6623 /* end of GNR uncore support */
6624
6625 /* DMR uncore support */
6626 #define UNCORE_DMR_NUM_UNCORE_TYPES 52
6627
6628 static struct attribute *dmr_imc_uncore_formats_attr[] = {
6629 &format_attr_event.attr,
6630 &format_attr_umask.attr,
6631 &format_attr_edge.attr,
6632 &format_attr_inv.attr,
6633 &format_attr_thresh10.attr,
6634 NULL,
6635 };
6636
6637 static const struct attribute_group dmr_imc_uncore_format_group = {
6638 .name = "format",
6639 .attrs = dmr_imc_uncore_formats_attr,
6640 };
6641
6642 static struct intel_uncore_type dmr_uncore_imc = {
6643 .name = "imc",
6644 .fixed_ctr_bits = 48,
6645 .fixed_ctr = DMR_IMC_PMON_FIXED_CTR,
6646 .fixed_ctl = DMR_IMC_PMON_FIXED_CTL,
6647 .ops = &spr_uncore_mmio_ops,
6648 .format_group = &dmr_imc_uncore_format_group,
6649 .attr_update = uncore_alias_groups,
6650 };
6651
6652 static struct attribute *dmr_sca_uncore_formats_attr[] = {
6653 &format_attr_event.attr,
6654 &format_attr_umask_ext5.attr,
6655 &format_attr_edge.attr,
6656 &format_attr_inv.attr,
6657 &format_attr_thresh8.attr,
6658 NULL,
6659 };
6660
6661 static const struct attribute_group dmr_sca_uncore_format_group = {
6662 .name = "format",
6663 .attrs = dmr_sca_uncore_formats_attr,
6664 };
6665
6666 static struct intel_uncore_type dmr_uncore_sca = {
6667 .name = "sca",
6668 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6669 .format_group = &dmr_sca_uncore_format_group,
6670 .attr_update = uncore_alias_groups,
6671 };
6672
6673 static struct attribute *dmr_cxlcm_uncore_formats_attr[] = {
6674 &format_attr_event.attr,
6675 &format_attr_umask.attr,
6676 &format_attr_edge.attr,
6677 &format_attr_inv2.attr,
6678 &format_attr_thresh9_2.attr,
6679 &format_attr_port_en.attr,
6680 NULL,
6681 };
6682
6683 static const struct attribute_group dmr_cxlcm_uncore_format_group = {
6684 .name = "format",
6685 .attrs = dmr_cxlcm_uncore_formats_attr,
6686 };
6687
6688 static struct event_constraint dmr_uncore_cxlcm_constraints[] = {
6689 UNCORE_EVENT_CONSTRAINT_RANGE(0x1, 0x24, 0x0f),
6690 UNCORE_EVENT_CONSTRAINT_RANGE(0x41, 0x41, 0xf0),
6691 UNCORE_EVENT_CONSTRAINT_RANGE(0x50, 0x5e, 0xf0),
6692 UNCORE_EVENT_CONSTRAINT_RANGE(0x60, 0x61, 0xf0),
6693 EVENT_CONSTRAINT_END
6694 };
6695
6696 static struct intel_uncore_type dmr_uncore_cxlcm = {
6697 .name = "cxlcm",
6698 .event_mask = GENERIC_PMON_RAW_EVENT_MASK,
6699 .event_mask_ext = DMR_CXLCM_EVENT_MASK_EXT,
6700 .constraints = dmr_uncore_cxlcm_constraints,
6701 .format_group = &dmr_cxlcm_uncore_format_group,
6702 .attr_update = uncore_alias_groups,
6703 };
6704
6705 static struct intel_uncore_type dmr_uncore_hamvf = {
6706 .name = "hamvf",
6707 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6708 .format_group = &dmr_sca_uncore_format_group,
6709 .attr_update = uncore_alias_groups,
6710 };
6711
6712 static struct event_constraint dmr_uncore_cbo_constraints[] = {
6713 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
6714 UNCORE_EVENT_CONSTRAINT_RANGE(0x19, 0x1a, 0x1),
6715 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
6716 UNCORE_EVENT_CONSTRAINT(0x21, 0x1),
6717 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
6718 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
6719 EVENT_CONSTRAINT_END
6720 };
6721
6722 static struct intel_uncore_type dmr_uncore_cbo = {
6723 .name = "cbo",
6724 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6725 .constraints = dmr_uncore_cbo_constraints,
6726 .format_group = &dmr_sca_uncore_format_group,
6727 .attr_update = uncore_alias_groups,
6728 };
6729
6730 static struct intel_uncore_type dmr_uncore_santa = {
6731 .name = "santa",
6732 .attr_update = uncore_alias_groups,
6733 };
6734
6735 static struct intel_uncore_type dmr_uncore_cncu = {
6736 .name = "cncu",
6737 .attr_update = uncore_alias_groups,
6738 };
6739
6740 static struct intel_uncore_type dmr_uncore_sncu = {
6741 .name = "sncu",
6742 .attr_update = uncore_alias_groups,
6743 };
6744
6745 static struct intel_uncore_type dmr_uncore_ula = {
6746 .name = "ula",
6747 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6748 .format_group = &dmr_sca_uncore_format_group,
6749 .attr_update = uncore_alias_groups,
6750 };
6751
6752 static struct intel_uncore_type dmr_uncore_dda = {
6753 .name = "dda",
6754 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6755 .format_group = &dmr_sca_uncore_format_group,
6756 .attr_update = uncore_alias_groups,
6757 };
6758
6759 static struct event_constraint dmr_uncore_sbo_constraints[] = {
6760 UNCORE_EVENT_CONSTRAINT(0x1f, 0x01),
6761 UNCORE_EVENT_CONSTRAINT(0x25, 0x01),
6762 EVENT_CONSTRAINT_END
6763 };
6764
6765 static struct intel_uncore_type dmr_uncore_sbo = {
6766 .name = "sbo",
6767 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6768 .constraints = dmr_uncore_sbo_constraints,
6769 .format_group = &dmr_sca_uncore_format_group,
6770 .attr_update = uncore_alias_groups,
6771 };
6772
6773 static struct intel_uncore_type dmr_uncore_ubr = {
6774 .name = "ubr",
6775 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6776 .format_group = &dmr_sca_uncore_format_group,
6777 .attr_update = uncore_alias_groups,
6778 };
6779
6780 static struct attribute *dmr_pcie4_uncore_formats_attr[] = {
6781 &format_attr_event.attr,
6782 &format_attr_umask.attr,
6783 &format_attr_edge.attr,
6784 &format_attr_inv.attr,
6785 &format_attr_thresh8.attr,
6786 &format_attr_thresh_ext.attr,
6787 &format_attr_rs3_sel.attr,
6788 &format_attr_rx_sel.attr,
6789 &format_attr_tx_sel.attr,
6790 &format_attr_iep_sel.attr,
6791 &format_attr_vc_sel.attr,
6792 &format_attr_port_sel.attr,
6793 NULL,
6794 };
6795
6796 static const struct attribute_group dmr_pcie4_uncore_format_group = {
6797 .name = "format",
6798 .attrs = dmr_pcie4_uncore_formats_attr,
6799 };
6800
6801 static struct intel_uncore_type dmr_uncore_pcie4 = {
6802 .name = "pcie4",
6803 .event_mask_ext = DMR_PCIE4_EVENT_MASK_EXT,
6804 .format_group = &dmr_pcie4_uncore_format_group,
6805 .attr_update = uncore_alias_groups,
6806 };
6807
6808 static struct intel_uncore_type dmr_uncore_crs = {
6809 .name = "crs",
6810 .attr_update = uncore_alias_groups,
6811 };
6812
6813 static struct intel_uncore_type dmr_uncore_cpc = {
6814 .name = "cpc",
6815 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6816 .format_group = &dmr_sca_uncore_format_group,
6817 .attr_update = uncore_alias_groups,
6818 };
6819
6820 static struct intel_uncore_type dmr_uncore_itc = {
6821 .name = "itc",
6822 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6823 .format_group = &dmr_sca_uncore_format_group,
6824 .attr_update = uncore_alias_groups,
6825 };
6826
6827 static struct intel_uncore_type dmr_uncore_otc = {
6828 .name = "otc",
6829 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6830 .format_group = &dmr_sca_uncore_format_group,
6831 .attr_update = uncore_alias_groups,
6832 };
6833
6834 static struct intel_uncore_type dmr_uncore_cms = {
6835 .name = "cms",
6836 .attr_update = uncore_alias_groups,
6837 };
6838
6839 static struct intel_uncore_type dmr_uncore_pcie6 = {
6840 .name = "pcie6",
6841 .event_mask_ext = DMR_PCIE4_EVENT_MASK_EXT,
6842 .format_group = &dmr_pcie4_uncore_format_group,
6843 .attr_update = uncore_alias_groups,
6844 };
6845
6846 static struct intel_uncore_type *dmr_uncores[UNCORE_DMR_NUM_UNCORE_TYPES] = {
6847 NULL, NULL, NULL, NULL,
6848 &spr_uncore_pcu,
6849 &gnr_uncore_ubox,
6850 &dmr_uncore_imc,
6851 NULL,
6852 NULL, NULL, NULL, NULL,
6853 NULL, NULL, NULL, NULL,
6854 NULL, NULL, NULL, NULL,
6855 NULL, NULL, NULL,
6856 &dmr_uncore_sca,
6857 &dmr_uncore_cxlcm,
6858 NULL, NULL, NULL,
6859 NULL, NULL,
6860 &dmr_uncore_hamvf,
6861 &dmr_uncore_cbo,
6862 &dmr_uncore_santa,
6863 &dmr_uncore_cncu,
6864 &dmr_uncore_sncu,
6865 &dmr_uncore_ula,
6866 &dmr_uncore_dda,
6867 NULL,
6868 &dmr_uncore_sbo,
6869 NULL,
6870 NULL, NULL, NULL,
6871 &dmr_uncore_ubr,
6872 NULL,
6873 &dmr_uncore_pcie4,
6874 &dmr_uncore_crs,
6875 &dmr_uncore_cpc,
6876 &dmr_uncore_itc,
6877 &dmr_uncore_otc,
6878 &dmr_uncore_cms,
6879 &dmr_uncore_pcie6,
6880 };
6881
6882 int dmr_uncore_imh_units_ignore[] = {
6883 0x13, /* MSE */
6884 UNCORE_IGNORE_END
6885 };
6886
6887 int dmr_uncore_cbb_units_ignore[] = {
6888 0x25, /* SB2UCIE */
6889 UNCORE_IGNORE_END
6890 };
6891
6892 static unsigned int dmr_iio_freerunning_box_offsets[] = {
6893 0x0, 0x8000, 0x18000, 0x20000
6894 };
6895
dmr_uncore_freerunning_init_box(struct intel_uncore_box * box)6896 static void dmr_uncore_freerunning_init_box(struct intel_uncore_box *box)
6897 {
6898 struct intel_uncore_type *type = box->pmu->type;
6899 u64 mmio_base;
6900
6901 if (box->pmu->pmu_idx >= type->num_boxes)
6902 return;
6903
6904 mmio_base = DMR_IMH1_HIOP_MMIO_BASE;
6905 mmio_base += dmr_iio_freerunning_box_offsets[box->pmu->pmu_idx];
6906
6907 box->io_addr = ioremap(mmio_base, type->mmio_map_size);
6908 if (!box->io_addr)
6909 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
6910 }
6911
6912 static struct intel_uncore_ops dmr_uncore_freerunning_ops = {
6913 .init_box = dmr_uncore_freerunning_init_box,
6914 .exit_box = uncore_mmio_exit_box,
6915 .read_counter = uncore_mmio_read_counter,
6916 .hw_config = uncore_freerunning_hw_config,
6917 };
6918
6919 enum perf_uncore_dmr_iio_freerunning_type_id {
6920 DMR_ITC_INB_DATA_BW,
6921 DMR_ITC_BW_IN,
6922 DMR_OTC_BW_OUT,
6923 DMR_OTC_CLOCK_TICKS,
6924
6925 DMR_IIO_FREERUNNING_TYPE_MAX,
6926 };
6927
6928 static struct freerunning_counters dmr_iio_freerunning[] = {
6929 [DMR_ITC_INB_DATA_BW] = { 0x4d40, 0x8, 0, 8, 48},
6930 [DMR_ITC_BW_IN] = { 0x6b00, 0x8, 0, 8, 48},
6931 [DMR_OTC_BW_OUT] = { 0x6b60, 0x8, 0, 8, 48},
6932 [DMR_OTC_CLOCK_TICKS] = { 0x6bb0, 0x8, 0, 1, 48},
6933 };
6934
6935 static struct uncore_event_desc dmr_uncore_iio_freerunning_events[] = {
6936 /* ITC Free Running Data BW counter for inbound traffic */
6937 INTEL_UNCORE_FR_EVENT_DESC(inb_data_port0, 0x10, 3.814697266e-6),
6938 INTEL_UNCORE_FR_EVENT_DESC(inb_data_port1, 0x11, 3.814697266e-6),
6939 INTEL_UNCORE_FR_EVENT_DESC(inb_data_port2, 0x12, 3.814697266e-6),
6940 INTEL_UNCORE_FR_EVENT_DESC(inb_data_port3, 0x13, 3.814697266e-6),
6941 INTEL_UNCORE_FR_EVENT_DESC(inb_data_port4, 0x14, 3.814697266e-6),
6942 INTEL_UNCORE_FR_EVENT_DESC(inb_data_port5, 0x15, 3.814697266e-6),
6943 INTEL_UNCORE_FR_EVENT_DESC(inb_data_port6, 0x16, 3.814697266e-6),
6944 INTEL_UNCORE_FR_EVENT_DESC(inb_data_port7, 0x17, 3.814697266e-6),
6945
6946 /* ITC Free Running BW IN counters */
6947 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port0, 0x20, 3.814697266e-6),
6948 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port1, 0x21, 3.814697266e-6),
6949 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port2, 0x22, 3.814697266e-6),
6950 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port3, 0x23, 3.814697266e-6),
6951 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port4, 0x24, 3.814697266e-6),
6952 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port5, 0x25, 3.814697266e-6),
6953 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port6, 0x26, 3.814697266e-6),
6954 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port7, 0x27, 3.814697266e-6),
6955
6956 /* ITC Free Running BW OUT counters */
6957 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port0, 0x30, 3.814697266e-6),
6958 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port1, 0x31, 3.814697266e-6),
6959 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port2, 0x32, 3.814697266e-6),
6960 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port3, 0x33, 3.814697266e-6),
6961 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port4, 0x34, 3.814697266e-6),
6962 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port5, 0x35, 3.814697266e-6),
6963 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port6, 0x36, 3.814697266e-6),
6964 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port7, 0x37, 3.814697266e-6),
6965
6966 /* Free Running Clock Counter */
6967 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x40"),
6968 { /* end: all zeroes */ },
6969 };
6970
6971 static struct intel_uncore_type dmr_uncore_iio_free_running = {
6972 .name = "iio_free_running",
6973 .num_counters = 25,
6974 .mmio_map_size = DMR_HIOP_MMIO_SIZE,
6975 .num_freerunning_types = DMR_IIO_FREERUNNING_TYPE_MAX,
6976 .freerunning = dmr_iio_freerunning,
6977 .ops = &dmr_uncore_freerunning_ops,
6978 .event_descs = dmr_uncore_iio_freerunning_events,
6979 .format_group = &skx_uncore_iio_freerunning_format_group,
6980 };
6981
6982 #define UNCORE_DMR_MMIO_EXTRA_UNCORES 1
6983 static struct intel_uncore_type *dmr_mmio_uncores[UNCORE_DMR_MMIO_EXTRA_UNCORES] = {
6984 &dmr_uncore_iio_free_running,
6985 };
6986
dmr_uncore_pci_init(void)6987 int dmr_uncore_pci_init(void)
6988 {
6989 uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL,
6990 UNCORE_DMR_NUM_UNCORE_TYPES,
6991 dmr_uncores);
6992 return 0;
6993 }
6994
dmr_uncore_mmio_init(void)6995 void dmr_uncore_mmio_init(void)
6996 {
6997 uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
6998 UNCORE_DMR_MMIO_EXTRA_UNCORES,
6999 dmr_mmio_uncores,
7000 UNCORE_DMR_NUM_UNCORE_TYPES,
7001 dmr_uncores);
7002
7003 dmr_uncore_iio_free_running.num_boxes =
7004 uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_DMR_ITC);
7005 }
7006 /* end of DMR uncore support */
7007