1 /*
2  * Per core/cpu state
3  *
4  * Used to coordinate shared registers between HT threads or
5  * among events on a single PMU.
6  */
7 
8 #include <linux/stddef.h>
9 #include <linux/types.h>
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 
14 #include <asm/hardirq.h>
15 #include <asm/apic.h>
16 
17 #include "perf_event.h"
18 
19 /*
20  * Intel PerfMon, used on Core and later.
21  */
22 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
23 {
24   [PERF_COUNT_HW_CPU_CYCLES]		= 0x003c,
25   [PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
26   [PERF_COUNT_HW_CACHE_REFERENCES]	= 0x4f2e,
27   [PERF_COUNT_HW_CACHE_MISSES]		= 0x412e,
28   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c4,
29   [PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5,
30   [PERF_COUNT_HW_BUS_CYCLES]		= 0x013c,
31   [PERF_COUNT_HW_REF_CPU_CYCLES]	= 0x0300, /* pseudo-encoding */
32 };
33 
34 static struct event_constraint intel_core_event_constraints[] __read_mostly =
35 {
36 	INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
37 	INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
38 	INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
39 	INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
40 	INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
41 	INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
42 	EVENT_CONSTRAINT_END
43 };
44 
45 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
46 {
47 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
48 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
49 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
50 	INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
51 	INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
52 	INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
53 	INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
54 	INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
55 	INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
56 	INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
57 	INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
58 	INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
59 	INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
60 	EVENT_CONSTRAINT_END
61 };
62 
63 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
64 {
65 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
66 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
67 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
68 	INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
69 	INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
70 	INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
71 	INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
72 	INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
73 	INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
74 	INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
75 	INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
76 	EVENT_CONSTRAINT_END
77 };
78 
79 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
80 {
81 	INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
82 	EVENT_EXTRA_END
83 };
84 
85 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
86 {
87 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
88 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
89 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
90 	INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
91 	INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
92 	INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
93 	INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
94 	EVENT_CONSTRAINT_END
95 };
96 
97 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
98 {
99 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
100 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
101 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
102 	INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
103 	INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
104 	INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
105 	EVENT_CONSTRAINT_END
106 };
107 
108 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
109 {
110 	INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
111 	INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
112 	EVENT_EXTRA_END
113 };
114 
115 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
116 {
117 	EVENT_CONSTRAINT_END
118 };
119 
120 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
121 {
122 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
123 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
124 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
125 	EVENT_CONSTRAINT_END
126 };
127 
128 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
129 	INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
130 	INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
131 	EVENT_EXTRA_END
132 };
133 
intel_pmu_event_map(int hw_event)134 static u64 intel_pmu_event_map(int hw_event)
135 {
136 	return intel_perfmon_event_map[hw_event];
137 }
138 
139 static __initconst const u64 snb_hw_cache_event_ids
140 				[PERF_COUNT_HW_CACHE_MAX]
141 				[PERF_COUNT_HW_CACHE_OP_MAX]
142 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
143 {
144  [ C(L1D) ] = {
145 	[ C(OP_READ) ] = {
146 		[ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS        */
147 		[ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPLACEMENT              */
148 	},
149 	[ C(OP_WRITE) ] = {
150 		[ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES       */
151 		[ C(RESULT_MISS)   ] = 0x0851, /* L1D.ALL_M_REPLACEMENT        */
152 	},
153 	[ C(OP_PREFETCH) ] = {
154 		[ C(RESULT_ACCESS) ] = 0x0,
155 		[ C(RESULT_MISS)   ] = 0x024e, /* HW_PRE_REQ.DL1_MISS          */
156 	},
157  },
158  [ C(L1I ) ] = {
159 	[ C(OP_READ) ] = {
160 		[ C(RESULT_ACCESS) ] = 0x0,
161 		[ C(RESULT_MISS)   ] = 0x0280, /* ICACHE.MISSES */
162 	},
163 	[ C(OP_WRITE) ] = {
164 		[ C(RESULT_ACCESS) ] = -1,
165 		[ C(RESULT_MISS)   ] = -1,
166 	},
167 	[ C(OP_PREFETCH) ] = {
168 		[ C(RESULT_ACCESS) ] = 0x0,
169 		[ C(RESULT_MISS)   ] = 0x0,
170 	},
171  },
172  [ C(LL  ) ] = {
173 	[ C(OP_READ) ] = {
174 		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
175 		[ C(RESULT_ACCESS) ] = 0x01b7,
176 		/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
177 		[ C(RESULT_MISS)   ] = 0x01b7,
178 	},
179 	[ C(OP_WRITE) ] = {
180 		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
181 		[ C(RESULT_ACCESS) ] = 0x01b7,
182 		/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
183 		[ C(RESULT_MISS)   ] = 0x01b7,
184 	},
185 	[ C(OP_PREFETCH) ] = {
186 		/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
187 		[ C(RESULT_ACCESS) ] = 0x01b7,
188 		/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
189 		[ C(RESULT_MISS)   ] = 0x01b7,
190 	},
191  },
192  [ C(DTLB) ] = {
193 	[ C(OP_READ) ] = {
194 		[ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
195 		[ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
196 	},
197 	[ C(OP_WRITE) ] = {
198 		[ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
199 		[ C(RESULT_MISS)   ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
200 	},
201 	[ C(OP_PREFETCH) ] = {
202 		[ C(RESULT_ACCESS) ] = 0x0,
203 		[ C(RESULT_MISS)   ] = 0x0,
204 	},
205  },
206  [ C(ITLB) ] = {
207 	[ C(OP_READ) ] = {
208 		[ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT         */
209 		[ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK    */
210 	},
211 	[ C(OP_WRITE) ] = {
212 		[ C(RESULT_ACCESS) ] = -1,
213 		[ C(RESULT_MISS)   ] = -1,
214 	},
215 	[ C(OP_PREFETCH) ] = {
216 		[ C(RESULT_ACCESS) ] = -1,
217 		[ C(RESULT_MISS)   ] = -1,
218 	},
219  },
220  [ C(BPU ) ] = {
221 	[ C(OP_READ) ] = {
222 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
223 		[ C(RESULT_MISS)   ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
224 	},
225 	[ C(OP_WRITE) ] = {
226 		[ C(RESULT_ACCESS) ] = -1,
227 		[ C(RESULT_MISS)   ] = -1,
228 	},
229 	[ C(OP_PREFETCH) ] = {
230 		[ C(RESULT_ACCESS) ] = -1,
231 		[ C(RESULT_MISS)   ] = -1,
232 	},
233  },
234  [ C(NODE) ] = {
235 	[ C(OP_READ) ] = {
236 		[ C(RESULT_ACCESS) ] = -1,
237 		[ C(RESULT_MISS)   ] = -1,
238 	},
239 	[ C(OP_WRITE) ] = {
240 		[ C(RESULT_ACCESS) ] = -1,
241 		[ C(RESULT_MISS)   ] = -1,
242 	},
243 	[ C(OP_PREFETCH) ] = {
244 		[ C(RESULT_ACCESS) ] = -1,
245 		[ C(RESULT_MISS)   ] = -1,
246 	},
247  },
248 
249 };
250 
251 static __initconst const u64 westmere_hw_cache_event_ids
252 				[PERF_COUNT_HW_CACHE_MAX]
253 				[PERF_COUNT_HW_CACHE_OP_MAX]
254 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
255 {
256  [ C(L1D) ] = {
257 	[ C(OP_READ) ] = {
258 		[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
259 		[ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
260 	},
261 	[ C(OP_WRITE) ] = {
262 		[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
263 		[ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
264 	},
265 	[ C(OP_PREFETCH) ] = {
266 		[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
267 		[ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
268 	},
269  },
270  [ C(L1I ) ] = {
271 	[ C(OP_READ) ] = {
272 		[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
273 		[ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
274 	},
275 	[ C(OP_WRITE) ] = {
276 		[ C(RESULT_ACCESS) ] = -1,
277 		[ C(RESULT_MISS)   ] = -1,
278 	},
279 	[ C(OP_PREFETCH) ] = {
280 		[ C(RESULT_ACCESS) ] = 0x0,
281 		[ C(RESULT_MISS)   ] = 0x0,
282 	},
283  },
284  [ C(LL  ) ] = {
285 	[ C(OP_READ) ] = {
286 		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
287 		[ C(RESULT_ACCESS) ] = 0x01b7,
288 		/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
289 		[ C(RESULT_MISS)   ] = 0x01b7,
290 	},
291 	/*
292 	 * Use RFO, not WRITEBACK, because a write miss would typically occur
293 	 * on RFO.
294 	 */
295 	[ C(OP_WRITE) ] = {
296 		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
297 		[ C(RESULT_ACCESS) ] = 0x01b7,
298 		/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
299 		[ C(RESULT_MISS)   ] = 0x01b7,
300 	},
301 	[ C(OP_PREFETCH) ] = {
302 		/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
303 		[ C(RESULT_ACCESS) ] = 0x01b7,
304 		/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
305 		[ C(RESULT_MISS)   ] = 0x01b7,
306 	},
307  },
308  [ C(DTLB) ] = {
309 	[ C(OP_READ) ] = {
310 		[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
311 		[ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
312 	},
313 	[ C(OP_WRITE) ] = {
314 		[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
315 		[ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
316 	},
317 	[ C(OP_PREFETCH) ] = {
318 		[ C(RESULT_ACCESS) ] = 0x0,
319 		[ C(RESULT_MISS)   ] = 0x0,
320 	},
321  },
322  [ C(ITLB) ] = {
323 	[ C(OP_READ) ] = {
324 		[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
325 		[ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
326 	},
327 	[ C(OP_WRITE) ] = {
328 		[ C(RESULT_ACCESS) ] = -1,
329 		[ C(RESULT_MISS)   ] = -1,
330 	},
331 	[ C(OP_PREFETCH) ] = {
332 		[ C(RESULT_ACCESS) ] = -1,
333 		[ C(RESULT_MISS)   ] = -1,
334 	},
335  },
336  [ C(BPU ) ] = {
337 	[ C(OP_READ) ] = {
338 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
339 		[ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
340 	},
341 	[ C(OP_WRITE) ] = {
342 		[ C(RESULT_ACCESS) ] = -1,
343 		[ C(RESULT_MISS)   ] = -1,
344 	},
345 	[ C(OP_PREFETCH) ] = {
346 		[ C(RESULT_ACCESS) ] = -1,
347 		[ C(RESULT_MISS)   ] = -1,
348 	},
349  },
350  [ C(NODE) ] = {
351 	[ C(OP_READ) ] = {
352 		[ C(RESULT_ACCESS) ] = 0x01b7,
353 		[ C(RESULT_MISS)   ] = 0x01b7,
354 	},
355 	[ C(OP_WRITE) ] = {
356 		[ C(RESULT_ACCESS) ] = 0x01b7,
357 		[ C(RESULT_MISS)   ] = 0x01b7,
358 	},
359 	[ C(OP_PREFETCH) ] = {
360 		[ C(RESULT_ACCESS) ] = 0x01b7,
361 		[ C(RESULT_MISS)   ] = 0x01b7,
362 	},
363  },
364 };
365 
366 /*
367  * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
368  * See IA32 SDM Vol 3B 30.6.1.3
369  */
370 
371 #define NHM_DMND_DATA_RD	(1 << 0)
372 #define NHM_DMND_RFO		(1 << 1)
373 #define NHM_DMND_IFETCH		(1 << 2)
374 #define NHM_DMND_WB		(1 << 3)
375 #define NHM_PF_DATA_RD		(1 << 4)
376 #define NHM_PF_DATA_RFO		(1 << 5)
377 #define NHM_PF_IFETCH		(1 << 6)
378 #define NHM_OFFCORE_OTHER	(1 << 7)
379 #define NHM_UNCORE_HIT		(1 << 8)
380 #define NHM_OTHER_CORE_HIT_SNP	(1 << 9)
381 #define NHM_OTHER_CORE_HITM	(1 << 10)
382         			/* reserved */
383 #define NHM_REMOTE_CACHE_FWD	(1 << 12)
384 #define NHM_REMOTE_DRAM		(1 << 13)
385 #define NHM_LOCAL_DRAM		(1 << 14)
386 #define NHM_NON_DRAM		(1 << 15)
387 
388 #define NHM_LOCAL		(NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
389 #define NHM_REMOTE		(NHM_REMOTE_DRAM)
390 
391 #define NHM_DMND_READ		(NHM_DMND_DATA_RD)
392 #define NHM_DMND_WRITE		(NHM_DMND_RFO|NHM_DMND_WB)
393 #define NHM_DMND_PREFETCH	(NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
394 
395 #define NHM_L3_HIT	(NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
396 #define NHM_L3_MISS	(NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
397 #define NHM_L3_ACCESS	(NHM_L3_HIT|NHM_L3_MISS)
398 
399 static __initconst const u64 nehalem_hw_cache_extra_regs
400 				[PERF_COUNT_HW_CACHE_MAX]
401 				[PERF_COUNT_HW_CACHE_OP_MAX]
402 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
403 {
404  [ C(LL  ) ] = {
405 	[ C(OP_READ) ] = {
406 		[ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
407 		[ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_L3_MISS,
408 	},
409 	[ C(OP_WRITE) ] = {
410 		[ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
411 		[ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_L3_MISS,
412 	},
413 	[ C(OP_PREFETCH) ] = {
414 		[ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
415 		[ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
416 	},
417  },
418  [ C(NODE) ] = {
419 	[ C(OP_READ) ] = {
420 		[ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
421 		[ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_REMOTE,
422 	},
423 	[ C(OP_WRITE) ] = {
424 		[ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
425 		[ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_REMOTE,
426 	},
427 	[ C(OP_PREFETCH) ] = {
428 		[ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
429 		[ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_REMOTE,
430 	},
431  },
432 };
433 
434 static __initconst const u64 nehalem_hw_cache_event_ids
435 				[PERF_COUNT_HW_CACHE_MAX]
436 				[PERF_COUNT_HW_CACHE_OP_MAX]
437 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
438 {
439  [ C(L1D) ] = {
440 	[ C(OP_READ) ] = {
441 		[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
442 		[ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
443 	},
444 	[ C(OP_WRITE) ] = {
445 		[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
446 		[ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
447 	},
448 	[ C(OP_PREFETCH) ] = {
449 		[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
450 		[ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
451 	},
452  },
453  [ C(L1I ) ] = {
454 	[ C(OP_READ) ] = {
455 		[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
456 		[ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
457 	},
458 	[ C(OP_WRITE) ] = {
459 		[ C(RESULT_ACCESS) ] = -1,
460 		[ C(RESULT_MISS)   ] = -1,
461 	},
462 	[ C(OP_PREFETCH) ] = {
463 		[ C(RESULT_ACCESS) ] = 0x0,
464 		[ C(RESULT_MISS)   ] = 0x0,
465 	},
466  },
467  [ C(LL  ) ] = {
468 	[ C(OP_READ) ] = {
469 		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
470 		[ C(RESULT_ACCESS) ] = 0x01b7,
471 		/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
472 		[ C(RESULT_MISS)   ] = 0x01b7,
473 	},
474 	/*
475 	 * Use RFO, not WRITEBACK, because a write miss would typically occur
476 	 * on RFO.
477 	 */
478 	[ C(OP_WRITE) ] = {
479 		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
480 		[ C(RESULT_ACCESS) ] = 0x01b7,
481 		/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
482 		[ C(RESULT_MISS)   ] = 0x01b7,
483 	},
484 	[ C(OP_PREFETCH) ] = {
485 		/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
486 		[ C(RESULT_ACCESS) ] = 0x01b7,
487 		/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
488 		[ C(RESULT_MISS)   ] = 0x01b7,
489 	},
490  },
491  [ C(DTLB) ] = {
492 	[ C(OP_READ) ] = {
493 		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
494 		[ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
495 	},
496 	[ C(OP_WRITE) ] = {
497 		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
498 		[ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
499 	},
500 	[ C(OP_PREFETCH) ] = {
501 		[ C(RESULT_ACCESS) ] = 0x0,
502 		[ C(RESULT_MISS)   ] = 0x0,
503 	},
504  },
505  [ C(ITLB) ] = {
506 	[ C(OP_READ) ] = {
507 		[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
508 		[ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
509 	},
510 	[ C(OP_WRITE) ] = {
511 		[ C(RESULT_ACCESS) ] = -1,
512 		[ C(RESULT_MISS)   ] = -1,
513 	},
514 	[ C(OP_PREFETCH) ] = {
515 		[ C(RESULT_ACCESS) ] = -1,
516 		[ C(RESULT_MISS)   ] = -1,
517 	},
518  },
519  [ C(BPU ) ] = {
520 	[ C(OP_READ) ] = {
521 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
522 		[ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
523 	},
524 	[ C(OP_WRITE) ] = {
525 		[ C(RESULT_ACCESS) ] = -1,
526 		[ C(RESULT_MISS)   ] = -1,
527 	},
528 	[ C(OP_PREFETCH) ] = {
529 		[ C(RESULT_ACCESS) ] = -1,
530 		[ C(RESULT_MISS)   ] = -1,
531 	},
532  },
533  [ C(NODE) ] = {
534 	[ C(OP_READ) ] = {
535 		[ C(RESULT_ACCESS) ] = 0x01b7,
536 		[ C(RESULT_MISS)   ] = 0x01b7,
537 	},
538 	[ C(OP_WRITE) ] = {
539 		[ C(RESULT_ACCESS) ] = 0x01b7,
540 		[ C(RESULT_MISS)   ] = 0x01b7,
541 	},
542 	[ C(OP_PREFETCH) ] = {
543 		[ C(RESULT_ACCESS) ] = 0x01b7,
544 		[ C(RESULT_MISS)   ] = 0x01b7,
545 	},
546  },
547 };
548 
549 static __initconst const u64 core2_hw_cache_event_ids
550 				[PERF_COUNT_HW_CACHE_MAX]
551 				[PERF_COUNT_HW_CACHE_OP_MAX]
552 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
553 {
554  [ C(L1D) ] = {
555 	[ C(OP_READ) ] = {
556 		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
557 		[ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
558 	},
559 	[ C(OP_WRITE) ] = {
560 		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
561 		[ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
562 	},
563 	[ C(OP_PREFETCH) ] = {
564 		[ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
565 		[ C(RESULT_MISS)   ] = 0,
566 	},
567  },
568  [ C(L1I ) ] = {
569 	[ C(OP_READ) ] = {
570 		[ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
571 		[ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
572 	},
573 	[ C(OP_WRITE) ] = {
574 		[ C(RESULT_ACCESS) ] = -1,
575 		[ C(RESULT_MISS)   ] = -1,
576 	},
577 	[ C(OP_PREFETCH) ] = {
578 		[ C(RESULT_ACCESS) ] = 0,
579 		[ C(RESULT_MISS)   ] = 0,
580 	},
581  },
582  [ C(LL  ) ] = {
583 	[ C(OP_READ) ] = {
584 		[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
585 		[ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
586 	},
587 	[ C(OP_WRITE) ] = {
588 		[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
589 		[ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
590 	},
591 	[ C(OP_PREFETCH) ] = {
592 		[ C(RESULT_ACCESS) ] = 0,
593 		[ C(RESULT_MISS)   ] = 0,
594 	},
595  },
596  [ C(DTLB) ] = {
597 	[ C(OP_READ) ] = {
598 		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
599 		[ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
600 	},
601 	[ C(OP_WRITE) ] = {
602 		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
603 		[ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
604 	},
605 	[ C(OP_PREFETCH) ] = {
606 		[ C(RESULT_ACCESS) ] = 0,
607 		[ C(RESULT_MISS)   ] = 0,
608 	},
609  },
610  [ C(ITLB) ] = {
611 	[ C(OP_READ) ] = {
612 		[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
613 		[ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
614 	},
615 	[ C(OP_WRITE) ] = {
616 		[ C(RESULT_ACCESS) ] = -1,
617 		[ C(RESULT_MISS)   ] = -1,
618 	},
619 	[ C(OP_PREFETCH) ] = {
620 		[ C(RESULT_ACCESS) ] = -1,
621 		[ C(RESULT_MISS)   ] = -1,
622 	},
623  },
624  [ C(BPU ) ] = {
625 	[ C(OP_READ) ] = {
626 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
627 		[ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
628 	},
629 	[ C(OP_WRITE) ] = {
630 		[ C(RESULT_ACCESS) ] = -1,
631 		[ C(RESULT_MISS)   ] = -1,
632 	},
633 	[ C(OP_PREFETCH) ] = {
634 		[ C(RESULT_ACCESS) ] = -1,
635 		[ C(RESULT_MISS)   ] = -1,
636 	},
637  },
638 };
639 
640 static __initconst const u64 atom_hw_cache_event_ids
641 				[PERF_COUNT_HW_CACHE_MAX]
642 				[PERF_COUNT_HW_CACHE_OP_MAX]
643 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
644 {
645  [ C(L1D) ] = {
646 	[ C(OP_READ) ] = {
647 		[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
648 		[ C(RESULT_MISS)   ] = 0,
649 	},
650 	[ C(OP_WRITE) ] = {
651 		[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
652 		[ C(RESULT_MISS)   ] = 0,
653 	},
654 	[ C(OP_PREFETCH) ] = {
655 		[ C(RESULT_ACCESS) ] = 0x0,
656 		[ C(RESULT_MISS)   ] = 0,
657 	},
658  },
659  [ C(L1I ) ] = {
660 	[ C(OP_READ) ] = {
661 		[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
662 		[ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
663 	},
664 	[ C(OP_WRITE) ] = {
665 		[ C(RESULT_ACCESS) ] = -1,
666 		[ C(RESULT_MISS)   ] = -1,
667 	},
668 	[ C(OP_PREFETCH) ] = {
669 		[ C(RESULT_ACCESS) ] = 0,
670 		[ C(RESULT_MISS)   ] = 0,
671 	},
672  },
673  [ C(LL  ) ] = {
674 	[ C(OP_READ) ] = {
675 		[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
676 		[ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
677 	},
678 	[ C(OP_WRITE) ] = {
679 		[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
680 		[ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
681 	},
682 	[ C(OP_PREFETCH) ] = {
683 		[ C(RESULT_ACCESS) ] = 0,
684 		[ C(RESULT_MISS)   ] = 0,
685 	},
686  },
687  [ C(DTLB) ] = {
688 	[ C(OP_READ) ] = {
689 		[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
690 		[ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
691 	},
692 	[ C(OP_WRITE) ] = {
693 		[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
694 		[ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
695 	},
696 	[ C(OP_PREFETCH) ] = {
697 		[ C(RESULT_ACCESS) ] = 0,
698 		[ C(RESULT_MISS)   ] = 0,
699 	},
700  },
701  [ C(ITLB) ] = {
702 	[ C(OP_READ) ] = {
703 		[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
704 		[ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
705 	},
706 	[ C(OP_WRITE) ] = {
707 		[ C(RESULT_ACCESS) ] = -1,
708 		[ C(RESULT_MISS)   ] = -1,
709 	},
710 	[ C(OP_PREFETCH) ] = {
711 		[ C(RESULT_ACCESS) ] = -1,
712 		[ C(RESULT_MISS)   ] = -1,
713 	},
714  },
715  [ C(BPU ) ] = {
716 	[ C(OP_READ) ] = {
717 		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
718 		[ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
719 	},
720 	[ C(OP_WRITE) ] = {
721 		[ C(RESULT_ACCESS) ] = -1,
722 		[ C(RESULT_MISS)   ] = -1,
723 	},
724 	[ C(OP_PREFETCH) ] = {
725 		[ C(RESULT_ACCESS) ] = -1,
726 		[ C(RESULT_MISS)   ] = -1,
727 	},
728  },
729 };
730 
intel_pmu_disable_all(void)731 static void intel_pmu_disable_all(void)
732 {
733 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
734 
735 	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
736 
737 	if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
738 		intel_pmu_disable_bts();
739 
740 	intel_pmu_pebs_disable_all();
741 	intel_pmu_lbr_disable_all();
742 }
743 
intel_pmu_enable_all(int added)744 static void intel_pmu_enable_all(int added)
745 {
746 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
747 
748 	intel_pmu_pebs_enable_all();
749 	intel_pmu_lbr_enable_all();
750 	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
751 			x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
752 
753 	if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
754 		struct perf_event *event =
755 			cpuc->events[X86_PMC_IDX_FIXED_BTS];
756 
757 		if (WARN_ON_ONCE(!event))
758 			return;
759 
760 		intel_pmu_enable_bts(event->hw.config);
761 	}
762 }
763 
764 /*
765  * Workaround for:
766  *   Intel Errata AAK100 (model 26)
767  *   Intel Errata AAP53  (model 30)
768  *   Intel Errata BD53   (model 44)
769  *
770  * The official story:
771  *   These chips need to be 'reset' when adding counters by programming the
772  *   magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
773  *   in sequence on the same PMC or on different PMCs.
774  *
775  * In practise it appears some of these events do in fact count, and
776  * we need to programm all 4 events.
777  */
intel_pmu_nhm_workaround(void)778 static void intel_pmu_nhm_workaround(void)
779 {
780 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
781 	static const unsigned long nhm_magic[4] = {
782 		0x4300B5,
783 		0x4300D2,
784 		0x4300B1,
785 		0x4300B1
786 	};
787 	struct perf_event *event;
788 	int i;
789 
790 	/*
791 	 * The Errata requires below steps:
792 	 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
793 	 * 2) Configure 4 PERFEVTSELx with the magic events and clear
794 	 *    the corresponding PMCx;
795 	 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
796 	 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
797 	 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
798 	 */
799 
800 	/*
801 	 * The real steps we choose are a little different from above.
802 	 * A) To reduce MSR operations, we don't run step 1) as they
803 	 *    are already cleared before this function is called;
804 	 * B) Call x86_perf_event_update to save PMCx before configuring
805 	 *    PERFEVTSELx with magic number;
806 	 * C) With step 5), we do clear only when the PERFEVTSELx is
807 	 *    not used currently.
808 	 * D) Call x86_perf_event_set_period to restore PMCx;
809 	 */
810 
811 	/* We always operate 4 pairs of PERF Counters */
812 	for (i = 0; i < 4; i++) {
813 		event = cpuc->events[i];
814 		if (event)
815 			x86_perf_event_update(event);
816 	}
817 
818 	for (i = 0; i < 4; i++) {
819 		wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
820 		wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
821 	}
822 
823 	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
824 	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
825 
826 	for (i = 0; i < 4; i++) {
827 		event = cpuc->events[i];
828 
829 		if (event) {
830 			x86_perf_event_set_period(event);
831 			__x86_pmu_enable_event(&event->hw,
832 					ARCH_PERFMON_EVENTSEL_ENABLE);
833 		} else
834 			wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
835 	}
836 }
837 
intel_pmu_nhm_enable_all(int added)838 static void intel_pmu_nhm_enable_all(int added)
839 {
840 	if (added)
841 		intel_pmu_nhm_workaround();
842 	intel_pmu_enable_all(added);
843 }
844 
intel_pmu_get_status(void)845 static inline u64 intel_pmu_get_status(void)
846 {
847 	u64 status;
848 
849 	rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
850 
851 	return status;
852 }
853 
intel_pmu_ack_status(u64 ack)854 static inline void intel_pmu_ack_status(u64 ack)
855 {
856 	wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
857 }
858 
intel_pmu_disable_fixed(struct hw_perf_event * hwc)859 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
860 {
861 	int idx = hwc->idx - X86_PMC_IDX_FIXED;
862 	u64 ctrl_val, mask;
863 
864 	mask = 0xfULL << (idx * 4);
865 
866 	rdmsrl(hwc->config_base, ctrl_val);
867 	ctrl_val &= ~mask;
868 	wrmsrl(hwc->config_base, ctrl_val);
869 }
870 
intel_pmu_disable_event(struct perf_event * event)871 static void intel_pmu_disable_event(struct perf_event *event)
872 {
873 	struct hw_perf_event *hwc = &event->hw;
874 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
875 
876 	if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
877 		intel_pmu_disable_bts();
878 		intel_pmu_drain_bts_buffer();
879 		return;
880 	}
881 
882 	cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
883 	cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
884 
885 	if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
886 		intel_pmu_disable_fixed(hwc);
887 		return;
888 	}
889 
890 	x86_pmu_disable_event(event);
891 
892 	if (unlikely(event->attr.precise_ip))
893 		intel_pmu_pebs_disable(event);
894 }
895 
intel_pmu_enable_fixed(struct hw_perf_event * hwc)896 static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
897 {
898 	int idx = hwc->idx - X86_PMC_IDX_FIXED;
899 	u64 ctrl_val, bits, mask;
900 
901 	/*
902 	 * Enable IRQ generation (0x8),
903 	 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
904 	 * if requested:
905 	 */
906 	bits = 0x8ULL;
907 	if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
908 		bits |= 0x2;
909 	if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
910 		bits |= 0x1;
911 
912 	/*
913 	 * ANY bit is supported in v3 and up
914 	 */
915 	if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
916 		bits |= 0x4;
917 
918 	bits <<= (idx * 4);
919 	mask = 0xfULL << (idx * 4);
920 
921 	rdmsrl(hwc->config_base, ctrl_val);
922 	ctrl_val &= ~mask;
923 	ctrl_val |= bits;
924 	wrmsrl(hwc->config_base, ctrl_val);
925 }
926 
intel_pmu_enable_event(struct perf_event * event)927 static void intel_pmu_enable_event(struct perf_event *event)
928 {
929 	struct hw_perf_event *hwc = &event->hw;
930 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
931 
932 	if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
933 		if (!__this_cpu_read(cpu_hw_events.enabled))
934 			return;
935 
936 		intel_pmu_enable_bts(hwc->config);
937 		return;
938 	}
939 
940 	if (event->attr.exclude_host)
941 		cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
942 	if (event->attr.exclude_guest)
943 		cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
944 
945 	if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
946 		intel_pmu_enable_fixed(hwc);
947 		return;
948 	}
949 
950 	if (unlikely(event->attr.precise_ip))
951 		intel_pmu_pebs_enable(event);
952 
953 	__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
954 }
955 
956 /*
957  * Save and restart an expired event. Called by NMI contexts,
958  * so it has to be careful about preempting normal event ops:
959  */
intel_pmu_save_and_restart(struct perf_event * event)960 int intel_pmu_save_and_restart(struct perf_event *event)
961 {
962 	x86_perf_event_update(event);
963 	return x86_perf_event_set_period(event);
964 }
965 
intel_pmu_reset(void)966 static void intel_pmu_reset(void)
967 {
968 	struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
969 	unsigned long flags;
970 	int idx;
971 
972 	if (!x86_pmu.num_counters)
973 		return;
974 
975 	local_irq_save(flags);
976 
977 	printk("clearing PMU state on CPU#%d\n", smp_processor_id());
978 
979 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
980 		checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
981 		checking_wrmsrl(x86_pmu_event_addr(idx),  0ull);
982 	}
983 	for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
984 		checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
985 
986 	if (ds)
987 		ds->bts_index = ds->bts_buffer_base;
988 
989 	local_irq_restore(flags);
990 }
991 
992 /*
993  * This handler is triggered by the local APIC, so the APIC IRQ handling
994  * rules apply:
995  */
intel_pmu_handle_irq(struct pt_regs * regs)996 static int intel_pmu_handle_irq(struct pt_regs *regs)
997 {
998 	struct perf_sample_data data;
999 	struct cpu_hw_events *cpuc;
1000 	int bit, loops;
1001 	u64 status;
1002 	int handled;
1003 
1004 	perf_sample_data_init(&data, 0);
1005 
1006 	cpuc = &__get_cpu_var(cpu_hw_events);
1007 
1008 	/*
1009 	 * Some chipsets need to unmask the LVTPC in a particular spot
1010 	 * inside the nmi handler.  As a result, the unmasking was pushed
1011 	 * into all the nmi handlers.
1012 	 *
1013 	 * This handler doesn't seem to have any issues with the unmasking
1014 	 * so it was left at the top.
1015 	 */
1016 	apic_write(APIC_LVTPC, APIC_DM_NMI);
1017 
1018 	intel_pmu_disable_all();
1019 	handled = intel_pmu_drain_bts_buffer();
1020 	status = intel_pmu_get_status();
1021 	if (!status) {
1022 		intel_pmu_enable_all(0);
1023 		return handled;
1024 	}
1025 
1026 	loops = 0;
1027 again:
1028 	intel_pmu_ack_status(status);
1029 	if (++loops > 100) {
1030 		WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1031 		perf_event_print_debug();
1032 		intel_pmu_reset();
1033 		goto done;
1034 	}
1035 
1036 	inc_irq_stat(apic_perf_irqs);
1037 
1038 	intel_pmu_lbr_read();
1039 
1040 	/*
1041 	 * PEBS overflow sets bit 62 in the global status register
1042 	 */
1043 	if (__test_and_clear_bit(62, (unsigned long *)&status)) {
1044 		handled++;
1045 		x86_pmu.drain_pebs(regs);
1046 	}
1047 
1048 	for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1049 		struct perf_event *event = cpuc->events[bit];
1050 
1051 		handled++;
1052 
1053 		if (!test_bit(bit, cpuc->active_mask))
1054 			continue;
1055 
1056 		if (!intel_pmu_save_and_restart(event))
1057 			continue;
1058 
1059 		data.period = event->hw.last_period;
1060 
1061 		if (perf_event_overflow(event, &data, regs))
1062 			x86_pmu_stop(event, 0);
1063 	}
1064 
1065 	/*
1066 	 * Repeat if there is more work to be done:
1067 	 */
1068 	status = intel_pmu_get_status();
1069 	if (status)
1070 		goto again;
1071 
1072 done:
1073 	intel_pmu_enable_all(0);
1074 	return handled;
1075 }
1076 
1077 static struct event_constraint *
intel_bts_constraints(struct perf_event * event)1078 intel_bts_constraints(struct perf_event *event)
1079 {
1080 	struct hw_perf_event *hwc = &event->hw;
1081 	unsigned int hw_event, bts_event;
1082 
1083 	if (event->attr.freq)
1084 		return NULL;
1085 
1086 	hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1087 	bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
1088 
1089 	if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
1090 		return &bts_constraint;
1091 
1092 	return NULL;
1093 }
1094 
intel_try_alt_er(struct perf_event * event,int orig_idx)1095 static bool intel_try_alt_er(struct perf_event *event, int orig_idx)
1096 {
1097 	if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
1098 		return false;
1099 
1100 	if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) {
1101 		event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1102 		event->hw.config |= 0x01bb;
1103 		event->hw.extra_reg.idx = EXTRA_REG_RSP_1;
1104 		event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
1105 	} else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) {
1106 		event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1107 		event->hw.config |= 0x01b7;
1108 		event->hw.extra_reg.idx = EXTRA_REG_RSP_0;
1109 		event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
1110 	}
1111 
1112 	if (event->hw.extra_reg.idx == orig_idx)
1113 		return false;
1114 
1115 	return true;
1116 }
1117 
1118 /*
1119  * manage allocation of shared extra msr for certain events
1120  *
1121  * sharing can be:
1122  * per-cpu: to be shared between the various events on a single PMU
1123  * per-core: per-cpu + shared by HT threads
1124  */
1125 static struct event_constraint *
__intel_shared_reg_get_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)1126 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
1127 				   struct perf_event *event)
1128 {
1129 	struct event_constraint *c = &emptyconstraint;
1130 	struct hw_perf_event_extra *reg = &event->hw.extra_reg;
1131 	struct er_account *era;
1132 	unsigned long flags;
1133 	int orig_idx = reg->idx;
1134 
1135 	/* already allocated shared msr */
1136 	if (reg->alloc)
1137 		return &unconstrained;
1138 
1139 again:
1140 	era = &cpuc->shared_regs->regs[reg->idx];
1141 	/*
1142 	 * we use spin_lock_irqsave() to avoid lockdep issues when
1143 	 * passing a fake cpuc
1144 	 */
1145 	raw_spin_lock_irqsave(&era->lock, flags);
1146 
1147 	if (!atomic_read(&era->ref) || era->config == reg->config) {
1148 
1149 		/* lock in msr value */
1150 		era->config = reg->config;
1151 		era->reg = reg->reg;
1152 
1153 		/* one more user */
1154 		atomic_inc(&era->ref);
1155 
1156 		/* no need to reallocate during incremental event scheduling */
1157 		reg->alloc = 1;
1158 
1159 		/*
1160 		 * All events using extra_reg are unconstrained.
1161 		 * Avoids calling x86_get_event_constraints()
1162 		 *
1163 		 * Must revisit if extra_reg controlling events
1164 		 * ever have constraints. Worst case we go through
1165 		 * the regular event constraint table.
1166 		 */
1167 		c = &unconstrained;
1168 	} else if (intel_try_alt_er(event, orig_idx)) {
1169 		raw_spin_unlock_irqrestore(&era->lock, flags);
1170 		goto again;
1171 	}
1172 	raw_spin_unlock_irqrestore(&era->lock, flags);
1173 
1174 	return c;
1175 }
1176 
1177 static void
__intel_shared_reg_put_constraints(struct cpu_hw_events * cpuc,struct hw_perf_event_extra * reg)1178 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
1179 				   struct hw_perf_event_extra *reg)
1180 {
1181 	struct er_account *era;
1182 
1183 	/*
1184 	 * only put constraint if extra reg was actually
1185 	 * allocated. Also takes care of event which do
1186 	 * not use an extra shared reg
1187 	 */
1188 	if (!reg->alloc)
1189 		return;
1190 
1191 	era = &cpuc->shared_regs->regs[reg->idx];
1192 
1193 	/* one fewer user */
1194 	atomic_dec(&era->ref);
1195 
1196 	/* allocate again next time */
1197 	reg->alloc = 0;
1198 }
1199 
1200 static struct event_constraint *
intel_shared_regs_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)1201 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1202 			      struct perf_event *event)
1203 {
1204 	struct event_constraint *c = NULL;
1205 
1206 	if (event->hw.extra_reg.idx != EXTRA_REG_NONE)
1207 		c = __intel_shared_reg_get_constraints(cpuc, event);
1208 
1209 	return c;
1210 }
1211 
1212 struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)1213 x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1214 {
1215 	struct event_constraint *c;
1216 
1217 	if (x86_pmu.event_constraints) {
1218 		for_each_event_constraint(c, x86_pmu.event_constraints) {
1219 			if ((event->hw.config & c->cmask) == c->code)
1220 				return c;
1221 		}
1222 	}
1223 
1224 	return &unconstrained;
1225 }
1226 
1227 static struct event_constraint *
intel_get_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)1228 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1229 {
1230 	struct event_constraint *c;
1231 
1232 	c = intel_bts_constraints(event);
1233 	if (c)
1234 		return c;
1235 
1236 	c = intel_pebs_constraints(event);
1237 	if (c)
1238 		return c;
1239 
1240 	c = intel_shared_regs_constraints(cpuc, event);
1241 	if (c)
1242 		return c;
1243 
1244 	return x86_get_event_constraints(cpuc, event);
1245 }
1246 
1247 static void
intel_put_shared_regs_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)1248 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
1249 					struct perf_event *event)
1250 {
1251 	struct hw_perf_event_extra *reg;
1252 
1253 	reg = &event->hw.extra_reg;
1254 	if (reg->idx != EXTRA_REG_NONE)
1255 		__intel_shared_reg_put_constraints(cpuc, reg);
1256 }
1257 
intel_put_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)1258 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1259 					struct perf_event *event)
1260 {
1261 	intel_put_shared_regs_event_constraints(cpuc, event);
1262 }
1263 
intel_pmu_hw_config(struct perf_event * event)1264 static int intel_pmu_hw_config(struct perf_event *event)
1265 {
1266 	int ret = x86_pmu_hw_config(event);
1267 
1268 	if (ret)
1269 		return ret;
1270 
1271 	if (event->attr.precise_ip &&
1272 	    (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1273 		/*
1274 		 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1275 		 * (0x003c) so that we can use it with PEBS.
1276 		 *
1277 		 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1278 		 * PEBS capable. However we can use INST_RETIRED.ANY_P
1279 		 * (0x00c0), which is a PEBS capable event, to get the same
1280 		 * count.
1281 		 *
1282 		 * INST_RETIRED.ANY_P counts the number of cycles that retires
1283 		 * CNTMASK instructions. By setting CNTMASK to a value (16)
1284 		 * larger than the maximum number of instructions that can be
1285 		 * retired per cycle (4) and then inverting the condition, we
1286 		 * count all cycles that retire 16 or less instructions, which
1287 		 * is every cycle.
1288 		 *
1289 		 * Thereby we gain a PEBS capable cycle counter.
1290 		 */
1291 		u64 alt_config = 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */
1292 
1293 		alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1294 		event->hw.config = alt_config;
1295 	}
1296 
1297 	if (event->attr.type != PERF_TYPE_RAW)
1298 		return 0;
1299 
1300 	if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
1301 		return 0;
1302 
1303 	if (x86_pmu.version < 3)
1304 		return -EINVAL;
1305 
1306 	if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1307 		return -EACCES;
1308 
1309 	event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
1310 
1311 	return 0;
1312 }
1313 
perf_guest_get_msrs(int * nr)1314 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
1315 {
1316 	if (x86_pmu.guest_get_msrs)
1317 		return x86_pmu.guest_get_msrs(nr);
1318 	*nr = 0;
1319 	return NULL;
1320 }
1321 EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
1322 
intel_guest_get_msrs(int * nr)1323 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1324 {
1325 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1326 	struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1327 
1328 	arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
1329 	arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
1330 	arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
1331 
1332 	*nr = 1;
1333 	return arr;
1334 }
1335 
core_guest_get_msrs(int * nr)1336 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
1337 {
1338 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1339 	struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1340 	int idx;
1341 
1342 	for (idx = 0; idx < x86_pmu.num_counters; idx++)  {
1343 		struct perf_event *event = cpuc->events[idx];
1344 
1345 		arr[idx].msr = x86_pmu_config_addr(idx);
1346 		arr[idx].host = arr[idx].guest = 0;
1347 
1348 		if (!test_bit(idx, cpuc->active_mask))
1349 			continue;
1350 
1351 		arr[idx].host = arr[idx].guest =
1352 			event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
1353 
1354 		if (event->attr.exclude_host)
1355 			arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1356 		else if (event->attr.exclude_guest)
1357 			arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1358 	}
1359 
1360 	*nr = x86_pmu.num_counters;
1361 	return arr;
1362 }
1363 
core_pmu_enable_event(struct perf_event * event)1364 static void core_pmu_enable_event(struct perf_event *event)
1365 {
1366 	if (!event->attr.exclude_host)
1367 		x86_pmu_enable_event(event);
1368 }
1369 
core_pmu_enable_all(int added)1370 static void core_pmu_enable_all(int added)
1371 {
1372 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1373 	int idx;
1374 
1375 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1376 		struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
1377 
1378 		if (!test_bit(idx, cpuc->active_mask) ||
1379 				cpuc->events[idx]->attr.exclude_host)
1380 			continue;
1381 
1382 		__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1383 	}
1384 }
1385 
1386 static __initconst const struct x86_pmu core_pmu = {
1387 	.name			= "core",
1388 	.handle_irq		= x86_pmu_handle_irq,
1389 	.disable_all		= x86_pmu_disable_all,
1390 	.enable_all		= core_pmu_enable_all,
1391 	.enable			= core_pmu_enable_event,
1392 	.disable		= x86_pmu_disable_event,
1393 	.hw_config		= x86_pmu_hw_config,
1394 	.schedule_events	= x86_schedule_events,
1395 	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
1396 	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
1397 	.event_map		= intel_pmu_event_map,
1398 	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
1399 	.apic			= 1,
1400 	/*
1401 	 * Intel PMCs cannot be accessed sanely above 32 bit width,
1402 	 * so we install an artificial 1<<31 period regardless of
1403 	 * the generic event period:
1404 	 */
1405 	.max_period		= (1ULL << 31) - 1,
1406 	.get_event_constraints	= intel_get_event_constraints,
1407 	.put_event_constraints	= intel_put_event_constraints,
1408 	.event_constraints	= intel_core_event_constraints,
1409 	.guest_get_msrs		= core_guest_get_msrs,
1410 };
1411 
allocate_shared_regs(int cpu)1412 struct intel_shared_regs *allocate_shared_regs(int cpu)
1413 {
1414 	struct intel_shared_regs *regs;
1415 	int i;
1416 
1417 	regs = kzalloc_node(sizeof(struct intel_shared_regs),
1418 			    GFP_KERNEL, cpu_to_node(cpu));
1419 	if (regs) {
1420 		/*
1421 		 * initialize the locks to keep lockdep happy
1422 		 */
1423 		for (i = 0; i < EXTRA_REG_MAX; i++)
1424 			raw_spin_lock_init(&regs->regs[i].lock);
1425 
1426 		regs->core_id = -1;
1427 	}
1428 	return regs;
1429 }
1430 
intel_pmu_cpu_prepare(int cpu)1431 static int intel_pmu_cpu_prepare(int cpu)
1432 {
1433 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1434 
1435 	if (!x86_pmu.extra_regs)
1436 		return NOTIFY_OK;
1437 
1438 	cpuc->shared_regs = allocate_shared_regs(cpu);
1439 	if (!cpuc->shared_regs)
1440 		return NOTIFY_BAD;
1441 
1442 	return NOTIFY_OK;
1443 }
1444 
intel_pmu_cpu_starting(int cpu)1445 static void intel_pmu_cpu_starting(int cpu)
1446 {
1447 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1448 	int core_id = topology_core_id(cpu);
1449 	int i;
1450 
1451 	init_debug_store_on_cpu(cpu);
1452 	/*
1453 	 * Deal with CPUs that don't clear their LBRs on power-up.
1454 	 */
1455 	intel_pmu_lbr_reset();
1456 
1457 	if (!cpuc->shared_regs || (x86_pmu.er_flags & ERF_NO_HT_SHARING))
1458 		return;
1459 
1460 	for_each_cpu(i, topology_thread_cpumask(cpu)) {
1461 		struct intel_shared_regs *pc;
1462 
1463 		pc = per_cpu(cpu_hw_events, i).shared_regs;
1464 		if (pc && pc->core_id == core_id) {
1465 			cpuc->kfree_on_online = cpuc->shared_regs;
1466 			cpuc->shared_regs = pc;
1467 			break;
1468 		}
1469 	}
1470 
1471 	cpuc->shared_regs->core_id = core_id;
1472 	cpuc->shared_regs->refcnt++;
1473 }
1474 
intel_pmu_cpu_dying(int cpu)1475 static void intel_pmu_cpu_dying(int cpu)
1476 {
1477 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1478 	struct intel_shared_regs *pc;
1479 
1480 	pc = cpuc->shared_regs;
1481 	if (pc) {
1482 		if (pc->core_id == -1 || --pc->refcnt == 0)
1483 			kfree(pc);
1484 		cpuc->shared_regs = NULL;
1485 	}
1486 
1487 	fini_debug_store_on_cpu(cpu);
1488 }
1489 
1490 static __initconst const struct x86_pmu intel_pmu = {
1491 	.name			= "Intel",
1492 	.handle_irq		= intel_pmu_handle_irq,
1493 	.disable_all		= intel_pmu_disable_all,
1494 	.enable_all		= intel_pmu_enable_all,
1495 	.enable			= intel_pmu_enable_event,
1496 	.disable		= intel_pmu_disable_event,
1497 	.hw_config		= intel_pmu_hw_config,
1498 	.schedule_events	= x86_schedule_events,
1499 	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
1500 	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
1501 	.event_map		= intel_pmu_event_map,
1502 	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
1503 	.apic			= 1,
1504 	/*
1505 	 * Intel PMCs cannot be accessed sanely above 32 bit width,
1506 	 * so we install an artificial 1<<31 period regardless of
1507 	 * the generic event period:
1508 	 */
1509 	.max_period		= (1ULL << 31) - 1,
1510 	.get_event_constraints	= intel_get_event_constraints,
1511 	.put_event_constraints	= intel_put_event_constraints,
1512 
1513 	.cpu_prepare		= intel_pmu_cpu_prepare,
1514 	.cpu_starting		= intel_pmu_cpu_starting,
1515 	.cpu_dying		= intel_pmu_cpu_dying,
1516 	.guest_get_msrs		= intel_guest_get_msrs,
1517 };
1518 
intel_clovertown_quirk(void)1519 static __init void intel_clovertown_quirk(void)
1520 {
1521 	/*
1522 	 * PEBS is unreliable due to:
1523 	 *
1524 	 *   AJ67  - PEBS may experience CPL leaks
1525 	 *   AJ68  - PEBS PMI may be delayed by one event
1526 	 *   AJ69  - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1527 	 *   AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1528 	 *
1529 	 * AJ67 could be worked around by restricting the OS/USR flags.
1530 	 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1531 	 *
1532 	 * AJ106 could possibly be worked around by not allowing LBR
1533 	 *       usage from PEBS, including the fixup.
1534 	 * AJ68  could possibly be worked around by always programming
1535 	 *	 a pebs_event_reset[0] value and coping with the lost events.
1536 	 *
1537 	 * But taken together it might just make sense to not enable PEBS on
1538 	 * these chips.
1539 	 */
1540 	printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
1541 	x86_pmu.pebs = 0;
1542 	x86_pmu.pebs_constraints = NULL;
1543 }
1544 
intel_sandybridge_quirk(void)1545 static __init void intel_sandybridge_quirk(void)
1546 {
1547 	printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
1548 	x86_pmu.pebs = 0;
1549 	x86_pmu.pebs_constraints = NULL;
1550 }
1551 
1552 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
1553 	{ PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
1554 	{ PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
1555 	{ PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
1556 	{ PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
1557 	{ PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
1558 	{ PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
1559 	{ PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
1560 };
1561 
intel_arch_events_quirk(void)1562 static __init void intel_arch_events_quirk(void)
1563 {
1564 	int bit;
1565 
1566 	/* disable event that reported as not presend by cpuid */
1567 	for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
1568 		intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
1569 		printk(KERN_WARNING "CPUID marked event: \'%s\' unavailable\n",
1570 				intel_arch_events_map[bit].name);
1571 	}
1572 }
1573 
intel_nehalem_quirk(void)1574 static __init void intel_nehalem_quirk(void)
1575 {
1576 	union cpuid10_ebx ebx;
1577 
1578 	ebx.full = x86_pmu.events_maskl;
1579 	if (ebx.split.no_branch_misses_retired) {
1580 		/*
1581 		 * Erratum AAJ80 detected, we work it around by using
1582 		 * the BR_MISP_EXEC.ANY event. This will over-count
1583 		 * branch-misses, but it's still much better than the
1584 		 * architectural event which is often completely bogus:
1585 		 */
1586 		intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
1587 		ebx.split.no_branch_misses_retired = 0;
1588 		x86_pmu.events_maskl = ebx.full;
1589 		printk(KERN_INFO "CPU erratum AAJ80 worked around\n");
1590 	}
1591 }
1592 
intel_pmu_init(void)1593 __init int intel_pmu_init(void)
1594 {
1595 	union cpuid10_edx edx;
1596 	union cpuid10_eax eax;
1597 	union cpuid10_ebx ebx;
1598 	unsigned int unused;
1599 	int version;
1600 
1601 	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
1602 		switch (boot_cpu_data.x86) {
1603 		case 0x6:
1604 			return p6_pmu_init();
1605 		case 0xf:
1606 			return p4_pmu_init();
1607 		}
1608 		return -ENODEV;
1609 	}
1610 
1611 	/*
1612 	 * Check whether the Architectural PerfMon supports
1613 	 * Branch Misses Retired hw_event or not.
1614 	 */
1615 	cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
1616 	if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
1617 		return -ENODEV;
1618 
1619 	version = eax.split.version_id;
1620 	if (version < 2)
1621 		x86_pmu = core_pmu;
1622 	else
1623 		x86_pmu = intel_pmu;
1624 
1625 	x86_pmu.version			= version;
1626 	x86_pmu.num_counters		= eax.split.num_counters;
1627 	x86_pmu.cntval_bits		= eax.split.bit_width;
1628 	x86_pmu.cntval_mask		= (1ULL << eax.split.bit_width) - 1;
1629 
1630 	x86_pmu.events_maskl		= ebx.full;
1631 	x86_pmu.events_mask_len		= eax.split.mask_length;
1632 
1633 	/*
1634 	 * Quirk: v2 perfmon does not report fixed-purpose events, so
1635 	 * assume at least 3 events:
1636 	 */
1637 	if (version > 1)
1638 		x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
1639 
1640 	/*
1641 	 * v2 and above have a perf capabilities MSR
1642 	 */
1643 	if (version > 1) {
1644 		u64 capabilities;
1645 
1646 		rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
1647 		x86_pmu.intel_cap.capabilities = capabilities;
1648 	}
1649 
1650 	intel_ds_init();
1651 
1652 	x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
1653 
1654 	/*
1655 	 * Install the hw-cache-events table:
1656 	 */
1657 	switch (boot_cpu_data.x86_model) {
1658 	case 14: /* 65 nm core solo/duo, "Yonah" */
1659 		pr_cont("Core events, ");
1660 		break;
1661 
1662 	case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1663 		x86_add_quirk(intel_clovertown_quirk);
1664 	case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1665 	case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1666 	case 29: /* six-core 45 nm xeon "Dunnington" */
1667 		memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
1668 		       sizeof(hw_cache_event_ids));
1669 
1670 		intel_pmu_lbr_init_core();
1671 
1672 		x86_pmu.event_constraints = intel_core2_event_constraints;
1673 		x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
1674 		pr_cont("Core2 events, ");
1675 		break;
1676 
1677 	case 26: /* 45 nm nehalem, "Bloomfield" */
1678 	case 30: /* 45 nm nehalem, "Lynnfield" */
1679 	case 46: /* 45 nm nehalem-ex, "Beckton" */
1680 		memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
1681 		       sizeof(hw_cache_event_ids));
1682 		memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
1683 		       sizeof(hw_cache_extra_regs));
1684 
1685 		intel_pmu_lbr_init_nhm();
1686 
1687 		x86_pmu.event_constraints = intel_nehalem_event_constraints;
1688 		x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
1689 		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
1690 		x86_pmu.extra_regs = intel_nehalem_extra_regs;
1691 
1692 		/* UOPS_ISSUED.STALLED_CYCLES */
1693 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
1694 		/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1695 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1;
1696 
1697 		x86_add_quirk(intel_nehalem_quirk);
1698 
1699 		pr_cont("Nehalem events, ");
1700 		break;
1701 
1702 	case 28: /* Atom */
1703 		memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
1704 		       sizeof(hw_cache_event_ids));
1705 
1706 		intel_pmu_lbr_init_atom();
1707 
1708 		x86_pmu.event_constraints = intel_gen_event_constraints;
1709 		x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
1710 		pr_cont("Atom events, ");
1711 		break;
1712 
1713 	case 37: /* 32 nm nehalem, "Clarkdale" */
1714 	case 44: /* 32 nm nehalem, "Gulftown" */
1715 	case 47: /* 32 nm Xeon E7 */
1716 		memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
1717 		       sizeof(hw_cache_event_ids));
1718 		memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
1719 		       sizeof(hw_cache_extra_regs));
1720 
1721 		intel_pmu_lbr_init_nhm();
1722 
1723 		x86_pmu.event_constraints = intel_westmere_event_constraints;
1724 		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
1725 		x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
1726 		x86_pmu.extra_regs = intel_westmere_extra_regs;
1727 		x86_pmu.er_flags |= ERF_HAS_RSP_1;
1728 
1729 		/* UOPS_ISSUED.STALLED_CYCLES */
1730 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
1731 		/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1732 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1;
1733 
1734 		pr_cont("Westmere events, ");
1735 		break;
1736 
1737 	case 42: /* SandyBridge */
1738 		x86_add_quirk(intel_sandybridge_quirk);
1739 	case 45: /* SandyBridge, "Romely-EP" */
1740 		memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
1741 		       sizeof(hw_cache_event_ids));
1742 
1743 		intel_pmu_lbr_init_nhm();
1744 
1745 		x86_pmu.event_constraints = intel_snb_event_constraints;
1746 		x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
1747 		x86_pmu.extra_regs = intel_snb_extra_regs;
1748 		/* all extra regs are per-cpu when HT is on */
1749 		x86_pmu.er_flags |= ERF_HAS_RSP_1;
1750 		x86_pmu.er_flags |= ERF_NO_HT_SHARING;
1751 
1752 		/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
1753 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
1754 		/* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
1755 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x18001b1;
1756 
1757 		pr_cont("SandyBridge events, ");
1758 		break;
1759 
1760 	default:
1761 		switch (x86_pmu.version) {
1762 		case 1:
1763 			x86_pmu.event_constraints = intel_v1_event_constraints;
1764 			pr_cont("generic architected perfmon v1, ");
1765 			break;
1766 		default:
1767 			/*
1768 			 * default constraints for v2 and up
1769 			 */
1770 			x86_pmu.event_constraints = intel_gen_event_constraints;
1771 			pr_cont("generic architected perfmon, ");
1772 			break;
1773 		}
1774 	}
1775 
1776 	return 0;
1777 }
1778