1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/bitops.h> 3 #include <linux/types.h> 4 #include <linux/slab.h> 5 #include <linux/sched/clock.h> 6 7 #include <asm/cpu_entry_area.h> 8 #include <asm/debugreg.h> 9 #include <asm/perf_event.h> 10 #include <asm/tlbflush.h> 11 #include <asm/insn.h> 12 #include <asm/io.h> 13 #include <asm/msr.h> 14 #include <asm/timer.h> 15 16 #include "../perf_event.h" 17 18 /* Waste a full page so it can be mapped into the cpu_entry_area */ 19 DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store); 20 21 /* The size of a BTS record in bytes: */ 22 #define BTS_RECORD_SIZE 24 23 24 #define PEBS_FIXUP_SIZE PAGE_SIZE 25 26 /* 27 * pebs_record_32 for p4 and core not supported 28 29 struct pebs_record_32 { 30 u32 flags, ip; 31 u32 ax, bc, cx, dx; 32 u32 si, di, bp, sp; 33 }; 34 35 */ 36 37 union intel_x86_pebs_dse { 38 u64 val; 39 struct { 40 unsigned int ld_dse:4; 41 unsigned int ld_stlb_miss:1; 42 unsigned int ld_locked:1; 43 unsigned int ld_data_blk:1; 44 unsigned int ld_addr_blk:1; 45 unsigned int ld_reserved:24; 46 }; 47 struct { 48 unsigned int st_l1d_hit:1; 49 unsigned int st_reserved1:3; 50 unsigned int st_stlb_miss:1; 51 unsigned int st_locked:1; 52 unsigned int st_reserved2:26; 53 }; 54 struct { 55 unsigned int st_lat_dse:4; 56 unsigned int st_lat_stlb_miss:1; 57 unsigned int st_lat_locked:1; 58 unsigned int ld_reserved3:26; 59 }; 60 struct { 61 unsigned int mtl_dse:5; 62 unsigned int mtl_locked:1; 63 unsigned int mtl_stlb_miss:1; 64 unsigned int mtl_fwd_blk:1; 65 unsigned int ld_reserved4:24; 66 }; 67 struct { 68 unsigned int lnc_dse:8; 69 unsigned int ld_reserved5:2; 70 unsigned int lnc_stlb_miss:1; 71 unsigned int lnc_locked:1; 72 unsigned int lnc_data_blk:1; 73 unsigned int lnc_addr_blk:1; 74 unsigned int ld_reserved6:18; 75 }; 76 }; 77 78 79 /* 80 * Map PEBS Load Latency Data Source encodings to generic 81 * memory data source information 82 */ 83 #define P(a, b) PERF_MEM_S(a, b) 84 #define OP_LH (P(OP, LOAD) | P(LVL, HIT)) 85 #define LEVEL(x) P(LVLNUM, x) 86 #define REM P(REMOTE, REMOTE) 87 #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS)) 88 89 /* Version for Sandy Bridge and later */ 90 static u64 pebs_data_source[PERF_PEBS_DATA_SOURCE_MAX] = { 91 P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA),/* 0x00:ukn L3 */ 92 OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* 0x01: L1 local */ 93 OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */ 94 OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* 0x03: L2 hit */ 95 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE), /* 0x04: L3 hit */ 96 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, MISS), /* 0x05: L3 hit, snoop miss */ 97 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT), /* 0x06: L3 hit, snoop hit */ 98 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* 0x07: L3 hit, snoop hitm */ 99 OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* 0x08: L3 miss snoop hit */ 100 OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/ 101 OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, HIT), /* 0x0a: L3 miss, shared */ 102 OP_LH | P(LVL, REM_RAM1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* 0x0b: L3 miss, shared */ 103 OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | SNOOP_NONE_MISS, /* 0x0c: L3 miss, excl */ 104 OP_LH | P(LVL, REM_RAM1) | LEVEL(RAM) | REM | SNOOP_NONE_MISS, /* 0x0d: L3 miss, excl */ 105 OP_LH | P(LVL, IO) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0e: I/O */ 106 OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0f: uncached */ 107 }; 108 109 /* Patch up minor differences in the bits */ 110 void __init intel_pmu_pebs_data_source_nhm(void) 111 { 112 pebs_data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT); 113 pebs_data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM); 114 pebs_data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM); 115 } 116 117 static void __init __intel_pmu_pebs_data_source_skl(bool pmem, u64 *data_source) 118 { 119 u64 pmem_or_l4 = pmem ? LEVEL(PMEM) : LEVEL(L4); 120 121 data_source[0x08] = OP_LH | pmem_or_l4 | P(SNOOP, HIT); 122 data_source[0x09] = OP_LH | pmem_or_l4 | REM | P(SNOOP, HIT); 123 data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE); 124 data_source[0x0c] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOPX, FWD); 125 data_source[0x0d] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOP, HITM); 126 } 127 128 void __init intel_pmu_pebs_data_source_skl(bool pmem) 129 { 130 __intel_pmu_pebs_data_source_skl(pmem, pebs_data_source); 131 } 132 133 static void __init __intel_pmu_pebs_data_source_grt(u64 *data_source) 134 { 135 data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT); 136 data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM); 137 data_source[0x08] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD); 138 } 139 140 void __init intel_pmu_pebs_data_source_grt(void) 141 { 142 __intel_pmu_pebs_data_source_grt(pebs_data_source); 143 } 144 145 void __init intel_pmu_pebs_data_source_adl(void) 146 { 147 u64 *data_source; 148 149 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source; 150 memcpy(data_source, pebs_data_source, sizeof(pebs_data_source)); 151 __intel_pmu_pebs_data_source_skl(false, data_source); 152 153 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source; 154 memcpy(data_source, pebs_data_source, sizeof(pebs_data_source)); 155 __intel_pmu_pebs_data_source_grt(data_source); 156 } 157 158 static void __init __intel_pmu_pebs_data_source_cmt(u64 *data_source) 159 { 160 data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD); 161 data_source[0x08] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM); 162 data_source[0x0a] = OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, NONE); 163 data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE); 164 data_source[0x0c] = OP_LH | LEVEL(RAM) | REM | P(SNOOPX, FWD); 165 data_source[0x0d] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, HITM); 166 } 167 168 void __init intel_pmu_pebs_data_source_mtl(void) 169 { 170 u64 *data_source; 171 172 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source; 173 memcpy(data_source, pebs_data_source, sizeof(pebs_data_source)); 174 __intel_pmu_pebs_data_source_skl(false, data_source); 175 176 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source; 177 memcpy(data_source, pebs_data_source, sizeof(pebs_data_source)); 178 __intel_pmu_pebs_data_source_cmt(data_source); 179 } 180 181 void __init intel_pmu_pebs_data_source_arl_h(void) 182 { 183 u64 *data_source; 184 185 intel_pmu_pebs_data_source_lnl(); 186 187 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_TINY_IDX].pebs_data_source; 188 memcpy(data_source, pebs_data_source, sizeof(pebs_data_source)); 189 __intel_pmu_pebs_data_source_cmt(data_source); 190 } 191 192 void __init intel_pmu_pebs_data_source_cmt(void) 193 { 194 __intel_pmu_pebs_data_source_cmt(pebs_data_source); 195 } 196 197 /* Version for Lion Cove and later */ 198 static u64 lnc_pebs_data_source[PERF_PEBS_DATA_SOURCE_MAX] = { 199 P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA), /* 0x00: ukn L3 */ 200 OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* 0x01: L1 hit */ 201 OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* 0x02: L1 hit */ 202 OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x03: LFB/L1 Miss Handling Buffer hit */ 203 0, /* 0x04: Reserved */ 204 OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* 0x05: L2 Hit */ 205 OP_LH | LEVEL(L2_MHB) | P(SNOOP, NONE), /* 0x06: L2 Miss Handling Buffer Hit */ 206 0, /* 0x07: Reserved */ 207 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE), /* 0x08: L3 Hit */ 208 0, /* 0x09: Reserved */ 209 0, /* 0x0a: Reserved */ 210 0, /* 0x0b: Reserved */ 211 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD), /* 0x0c: L3 Hit Snoop Fwd */ 212 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* 0x0d: L3 Hit Snoop HitM */ 213 0, /* 0x0e: Reserved */ 214 P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* 0x0f: L3 Miss Snoop HitM */ 215 OP_LH | LEVEL(MSC) | P(SNOOP, NONE), /* 0x10: Memory-side Cache Hit */ 216 OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, NONE), /* 0x11: Local Memory Hit */ 217 }; 218 219 void __init intel_pmu_pebs_data_source_lnl(void) 220 { 221 u64 *data_source; 222 223 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source; 224 memcpy(data_source, lnc_pebs_data_source, sizeof(lnc_pebs_data_source)); 225 226 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source; 227 memcpy(data_source, pebs_data_source, sizeof(pebs_data_source)); 228 __intel_pmu_pebs_data_source_cmt(data_source); 229 } 230 231 static u64 precise_store_data(u64 status) 232 { 233 union intel_x86_pebs_dse dse; 234 u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2); 235 236 dse.val = status; 237 238 /* 239 * bit 4: TLB access 240 * 1 = stored missed 2nd level TLB 241 * 242 * so it either hit the walker or the OS 243 * otherwise hit 2nd level TLB 244 */ 245 if (dse.st_stlb_miss) 246 val |= P(TLB, MISS); 247 else 248 val |= P(TLB, HIT); 249 250 /* 251 * bit 0: hit L1 data cache 252 * if not set, then all we know is that 253 * it missed L1D 254 */ 255 if (dse.st_l1d_hit) 256 val |= P(LVL, HIT); 257 else 258 val |= P(LVL, MISS); 259 260 /* 261 * bit 5: Locked prefix 262 */ 263 if (dse.st_locked) 264 val |= P(LOCK, LOCKED); 265 266 return val; 267 } 268 269 static u64 precise_datala_hsw(struct perf_event *event, u64 status) 270 { 271 union perf_mem_data_src dse; 272 273 dse.val = PERF_MEM_NA; 274 275 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) 276 dse.mem_op = PERF_MEM_OP_STORE; 277 else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW) 278 dse.mem_op = PERF_MEM_OP_LOAD; 279 280 /* 281 * L1 info only valid for following events: 282 * 283 * MEM_UOPS_RETIRED.STLB_MISS_STORES 284 * MEM_UOPS_RETIRED.LOCK_STORES 285 * MEM_UOPS_RETIRED.SPLIT_STORES 286 * MEM_UOPS_RETIRED.ALL_STORES 287 */ 288 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) { 289 if (status & 1) 290 dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT; 291 else 292 dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS; 293 } 294 return dse.val; 295 } 296 297 static inline void pebs_set_tlb_lock(u64 *val, bool tlb, bool lock) 298 { 299 /* 300 * TLB access 301 * 0 = did not miss 2nd level TLB 302 * 1 = missed 2nd level TLB 303 */ 304 if (tlb) 305 *val |= P(TLB, MISS) | P(TLB, L2); 306 else 307 *val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2); 308 309 /* locked prefix */ 310 if (lock) 311 *val |= P(LOCK, LOCKED); 312 } 313 314 /* Retrieve the latency data for e-core of ADL */ 315 static u64 __grt_latency_data(struct perf_event *event, u64 status, 316 u8 dse, bool tlb, bool lock, bool blk) 317 { 318 u64 val; 319 320 WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big); 321 322 dse &= PERF_PEBS_DATA_SOURCE_GRT_MASK; 323 val = hybrid_var(event->pmu, pebs_data_source)[dse]; 324 325 pebs_set_tlb_lock(&val, tlb, lock); 326 327 if (blk) 328 val |= P(BLK, DATA); 329 else 330 val |= P(BLK, NA); 331 332 return val; 333 } 334 335 u64 grt_latency_data(struct perf_event *event, u64 status) 336 { 337 union intel_x86_pebs_dse dse; 338 339 dse.val = status; 340 341 return __grt_latency_data(event, status, dse.ld_dse, 342 dse.ld_locked, dse.ld_stlb_miss, 343 dse.ld_data_blk); 344 } 345 346 /* Retrieve the latency data for e-core of MTL */ 347 u64 cmt_latency_data(struct perf_event *event, u64 status) 348 { 349 union intel_x86_pebs_dse dse; 350 351 dse.val = status; 352 353 return __grt_latency_data(event, status, dse.mtl_dse, 354 dse.mtl_stlb_miss, dse.mtl_locked, 355 dse.mtl_fwd_blk); 356 } 357 358 static u64 lnc_latency_data(struct perf_event *event, u64 status) 359 { 360 union intel_x86_pebs_dse dse; 361 union perf_mem_data_src src; 362 u64 val; 363 364 dse.val = status; 365 366 /* LNC core latency data */ 367 val = hybrid_var(event->pmu, pebs_data_source)[status & PERF_PEBS_DATA_SOURCE_MASK]; 368 if (!val) 369 val = P(OP, LOAD) | LEVEL(NA) | P(SNOOP, NA); 370 371 if (dse.lnc_stlb_miss) 372 val |= P(TLB, MISS) | P(TLB, L2); 373 else 374 val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2); 375 376 if (dse.lnc_locked) 377 val |= P(LOCK, LOCKED); 378 379 if (dse.lnc_data_blk) 380 val |= P(BLK, DATA); 381 if (dse.lnc_addr_blk) 382 val |= P(BLK, ADDR); 383 if (!dse.lnc_data_blk && !dse.lnc_addr_blk) 384 val |= P(BLK, NA); 385 386 src.val = val; 387 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) 388 src.mem_op = P(OP, STORE); 389 390 return src.val; 391 } 392 393 u64 lnl_latency_data(struct perf_event *event, u64 status) 394 { 395 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 396 397 if (pmu->pmu_type == hybrid_small) 398 return cmt_latency_data(event, status); 399 400 return lnc_latency_data(event, status); 401 } 402 403 u64 arl_h_latency_data(struct perf_event *event, u64 status) 404 { 405 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 406 407 if (pmu->pmu_type == hybrid_tiny) 408 return cmt_latency_data(event, status); 409 410 return lnl_latency_data(event, status); 411 } 412 413 static u64 load_latency_data(struct perf_event *event, u64 status) 414 { 415 union intel_x86_pebs_dse dse; 416 u64 val; 417 418 dse.val = status; 419 420 /* 421 * use the mapping table for bit 0-3 422 */ 423 val = hybrid_var(event->pmu, pebs_data_source)[dse.ld_dse]; 424 425 /* 426 * Nehalem models do not support TLB, Lock infos 427 */ 428 if (x86_pmu.pebs_no_tlb) { 429 val |= P(TLB, NA) | P(LOCK, NA); 430 return val; 431 } 432 433 pebs_set_tlb_lock(&val, dse.ld_stlb_miss, dse.ld_locked); 434 435 /* 436 * Ice Lake and earlier models do not support block infos. 437 */ 438 if (!x86_pmu.pebs_block) { 439 val |= P(BLK, NA); 440 return val; 441 } 442 /* 443 * bit 6: load was blocked since its data could not be forwarded 444 * from a preceding store 445 */ 446 if (dse.ld_data_blk) 447 val |= P(BLK, DATA); 448 449 /* 450 * bit 7: load was blocked due to potential address conflict with 451 * a preceding store 452 */ 453 if (dse.ld_addr_blk) 454 val |= P(BLK, ADDR); 455 456 if (!dse.ld_data_blk && !dse.ld_addr_blk) 457 val |= P(BLK, NA); 458 459 return val; 460 } 461 462 static u64 store_latency_data(struct perf_event *event, u64 status) 463 { 464 union intel_x86_pebs_dse dse; 465 union perf_mem_data_src src; 466 u64 val; 467 468 dse.val = status; 469 470 /* 471 * use the mapping table for bit 0-3 472 */ 473 val = hybrid_var(event->pmu, pebs_data_source)[dse.st_lat_dse]; 474 475 pebs_set_tlb_lock(&val, dse.st_lat_stlb_miss, dse.st_lat_locked); 476 477 val |= P(BLK, NA); 478 479 /* 480 * the pebs_data_source table is only for loads 481 * so override the mem_op to say STORE instead 482 */ 483 src.val = val; 484 src.mem_op = P(OP,STORE); 485 486 return src.val; 487 } 488 489 struct pebs_record_core { 490 u64 flags, ip; 491 u64 ax, bx, cx, dx; 492 u64 si, di, bp, sp; 493 u64 r8, r9, r10, r11; 494 u64 r12, r13, r14, r15; 495 }; 496 497 struct pebs_record_nhm { 498 u64 flags, ip; 499 u64 ax, bx, cx, dx; 500 u64 si, di, bp, sp; 501 u64 r8, r9, r10, r11; 502 u64 r12, r13, r14, r15; 503 u64 status, dla, dse, lat; 504 }; 505 506 /* 507 * Same as pebs_record_nhm, with two additional fields. 508 */ 509 struct pebs_record_hsw { 510 u64 flags, ip; 511 u64 ax, bx, cx, dx; 512 u64 si, di, bp, sp; 513 u64 r8, r9, r10, r11; 514 u64 r12, r13, r14, r15; 515 u64 status, dla, dse, lat; 516 u64 real_ip, tsx_tuning; 517 }; 518 519 union hsw_tsx_tuning { 520 struct { 521 u32 cycles_last_block : 32, 522 hle_abort : 1, 523 rtm_abort : 1, 524 instruction_abort : 1, 525 non_instruction_abort : 1, 526 retry : 1, 527 data_conflict : 1, 528 capacity_writes : 1, 529 capacity_reads : 1; 530 }; 531 u64 value; 532 }; 533 534 #define PEBS_HSW_TSX_FLAGS 0xff00000000ULL 535 536 /* Same as HSW, plus TSC */ 537 538 struct pebs_record_skl { 539 u64 flags, ip; 540 u64 ax, bx, cx, dx; 541 u64 si, di, bp, sp; 542 u64 r8, r9, r10, r11; 543 u64 r12, r13, r14, r15; 544 u64 status, dla, dse, lat; 545 u64 real_ip, tsx_tuning; 546 u64 tsc; 547 }; 548 549 void init_debug_store_on_cpu(int cpu) 550 { 551 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 552 553 if (!ds) 554 return; 555 556 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 557 (u32)((u64)(unsigned long)ds), 558 (u32)((u64)(unsigned long)ds >> 32)); 559 } 560 561 void fini_debug_store_on_cpu(int cpu) 562 { 563 if (!per_cpu(cpu_hw_events, cpu).ds) 564 return; 565 566 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); 567 } 568 569 static DEFINE_PER_CPU(void *, insn_buffer); 570 571 static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot) 572 { 573 unsigned long start = (unsigned long)cea; 574 phys_addr_t pa; 575 size_t msz = 0; 576 577 pa = virt_to_phys(addr); 578 579 preempt_disable(); 580 for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE) 581 cea_set_pte(cea, pa, prot); 582 583 /* 584 * This is a cross-CPU update of the cpu_entry_area, we must shoot down 585 * all TLB entries for it. 586 */ 587 flush_tlb_kernel_range(start, start + size); 588 preempt_enable(); 589 } 590 591 static void ds_clear_cea(void *cea, size_t size) 592 { 593 unsigned long start = (unsigned long)cea; 594 size_t msz = 0; 595 596 preempt_disable(); 597 for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE) 598 cea_set_pte(cea, 0, PAGE_NONE); 599 600 flush_tlb_kernel_range(start, start + size); 601 preempt_enable(); 602 } 603 604 static void *dsalloc_pages(size_t size, gfp_t flags, int cpu) 605 { 606 unsigned int order = get_order(size); 607 int node = cpu_to_node(cpu); 608 struct page *page; 609 610 page = __alloc_pages_node(node, flags | __GFP_ZERO, order); 611 return page ? page_address(page) : NULL; 612 } 613 614 static void dsfree_pages(const void *buffer, size_t size) 615 { 616 if (buffer) 617 free_pages((unsigned long)buffer, get_order(size)); 618 } 619 620 static int alloc_pebs_buffer(int cpu) 621 { 622 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); 623 struct debug_store *ds = hwev->ds; 624 size_t bsiz = x86_pmu.pebs_buffer_size; 625 int max, node = cpu_to_node(cpu); 626 void *buffer, *insn_buff, *cea; 627 628 if (!x86_pmu.ds_pebs) 629 return 0; 630 631 buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu); 632 if (unlikely(!buffer)) 633 return -ENOMEM; 634 635 /* 636 * HSW+ already provides us the eventing ip; no need to allocate this 637 * buffer then. 638 */ 639 if (x86_pmu.intel_cap.pebs_format < 2) { 640 insn_buff = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node); 641 if (!insn_buff) { 642 dsfree_pages(buffer, bsiz); 643 return -ENOMEM; 644 } 645 per_cpu(insn_buffer, cpu) = insn_buff; 646 } 647 hwev->ds_pebs_vaddr = buffer; 648 /* Update the cpu entry area mapping */ 649 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; 650 ds->pebs_buffer_base = (unsigned long) cea; 651 ds_update_cea(cea, buffer, bsiz, PAGE_KERNEL); 652 ds->pebs_index = ds->pebs_buffer_base; 653 max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size); 654 ds->pebs_absolute_maximum = ds->pebs_buffer_base + max; 655 return 0; 656 } 657 658 static void release_pebs_buffer(int cpu) 659 { 660 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); 661 void *cea; 662 663 if (!x86_pmu.ds_pebs) 664 return; 665 666 kfree(per_cpu(insn_buffer, cpu)); 667 per_cpu(insn_buffer, cpu) = NULL; 668 669 /* Clear the fixmap */ 670 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; 671 ds_clear_cea(cea, x86_pmu.pebs_buffer_size); 672 dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size); 673 hwev->ds_pebs_vaddr = NULL; 674 } 675 676 static int alloc_bts_buffer(int cpu) 677 { 678 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); 679 struct debug_store *ds = hwev->ds; 680 void *buffer, *cea; 681 int max; 682 683 if (!x86_pmu.bts) 684 return 0; 685 686 buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu); 687 if (unlikely(!buffer)) { 688 WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__); 689 return -ENOMEM; 690 } 691 hwev->ds_bts_vaddr = buffer; 692 /* Update the fixmap */ 693 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; 694 ds->bts_buffer_base = (unsigned long) cea; 695 ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL); 696 ds->bts_index = ds->bts_buffer_base; 697 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; 698 ds->bts_absolute_maximum = ds->bts_buffer_base + 699 max * BTS_RECORD_SIZE; 700 ds->bts_interrupt_threshold = ds->bts_absolute_maximum - 701 (max / 16) * BTS_RECORD_SIZE; 702 return 0; 703 } 704 705 static void release_bts_buffer(int cpu) 706 { 707 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); 708 void *cea; 709 710 if (!x86_pmu.bts) 711 return; 712 713 /* Clear the fixmap */ 714 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; 715 ds_clear_cea(cea, BTS_BUFFER_SIZE); 716 dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE); 717 hwev->ds_bts_vaddr = NULL; 718 } 719 720 static int alloc_ds_buffer(int cpu) 721 { 722 struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store; 723 724 memset(ds, 0, sizeof(*ds)); 725 per_cpu(cpu_hw_events, cpu).ds = ds; 726 return 0; 727 } 728 729 static void release_ds_buffer(int cpu) 730 { 731 per_cpu(cpu_hw_events, cpu).ds = NULL; 732 } 733 734 void release_ds_buffers(void) 735 { 736 int cpu; 737 738 if (!x86_pmu.bts && !x86_pmu.ds_pebs) 739 return; 740 741 for_each_possible_cpu(cpu) 742 release_ds_buffer(cpu); 743 744 for_each_possible_cpu(cpu) { 745 /* 746 * Again, ignore errors from offline CPUs, they will no longer 747 * observe cpu_hw_events.ds and not program the DS_AREA when 748 * they come up. 749 */ 750 fini_debug_store_on_cpu(cpu); 751 } 752 753 for_each_possible_cpu(cpu) { 754 if (x86_pmu.ds_pebs) 755 release_pebs_buffer(cpu); 756 release_bts_buffer(cpu); 757 } 758 } 759 760 void reserve_ds_buffers(void) 761 { 762 int bts_err = 0, pebs_err = 0; 763 int cpu; 764 765 x86_pmu.bts_active = 0; 766 767 if (x86_pmu.ds_pebs) 768 x86_pmu.pebs_active = 0; 769 770 if (!x86_pmu.bts && !x86_pmu.ds_pebs) 771 return; 772 773 if (!x86_pmu.bts) 774 bts_err = 1; 775 776 if (!x86_pmu.ds_pebs) 777 pebs_err = 1; 778 779 for_each_possible_cpu(cpu) { 780 if (alloc_ds_buffer(cpu)) { 781 bts_err = 1; 782 pebs_err = 1; 783 } 784 785 if (!bts_err && alloc_bts_buffer(cpu)) 786 bts_err = 1; 787 788 if (x86_pmu.ds_pebs && !pebs_err && 789 alloc_pebs_buffer(cpu)) 790 pebs_err = 1; 791 792 if (bts_err && pebs_err) 793 break; 794 } 795 796 if (bts_err) { 797 for_each_possible_cpu(cpu) 798 release_bts_buffer(cpu); 799 } 800 801 if (x86_pmu.ds_pebs && pebs_err) { 802 for_each_possible_cpu(cpu) 803 release_pebs_buffer(cpu); 804 } 805 806 if (bts_err && pebs_err) { 807 for_each_possible_cpu(cpu) 808 release_ds_buffer(cpu); 809 } else { 810 if (x86_pmu.bts && !bts_err) 811 x86_pmu.bts_active = 1; 812 813 if (x86_pmu.ds_pebs && !pebs_err) 814 x86_pmu.pebs_active = 1; 815 816 for_each_possible_cpu(cpu) { 817 /* 818 * Ignores wrmsr_on_cpu() errors for offline CPUs they 819 * will get this call through intel_pmu_cpu_starting(). 820 */ 821 init_debug_store_on_cpu(cpu); 822 } 823 } 824 } 825 826 /* 827 * BTS 828 */ 829 830 struct event_constraint bts_constraint = 831 EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0); 832 833 void intel_pmu_enable_bts(u64 config) 834 { 835 unsigned long debugctlmsr; 836 837 debugctlmsr = get_debugctlmsr(); 838 839 debugctlmsr |= DEBUGCTLMSR_TR; 840 debugctlmsr |= DEBUGCTLMSR_BTS; 841 if (config & ARCH_PERFMON_EVENTSEL_INT) 842 debugctlmsr |= DEBUGCTLMSR_BTINT; 843 844 if (!(config & ARCH_PERFMON_EVENTSEL_OS)) 845 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS; 846 847 if (!(config & ARCH_PERFMON_EVENTSEL_USR)) 848 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR; 849 850 update_debugctlmsr(debugctlmsr); 851 } 852 853 void intel_pmu_disable_bts(void) 854 { 855 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 856 unsigned long debugctlmsr; 857 858 if (!cpuc->ds) 859 return; 860 861 debugctlmsr = get_debugctlmsr(); 862 863 debugctlmsr &= 864 ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT | 865 DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR); 866 867 update_debugctlmsr(debugctlmsr); 868 } 869 870 int intel_pmu_drain_bts_buffer(void) 871 { 872 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 873 struct debug_store *ds = cpuc->ds; 874 struct bts_record { 875 u64 from; 876 u64 to; 877 u64 flags; 878 }; 879 struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; 880 struct bts_record *at, *base, *top; 881 struct perf_output_handle handle; 882 struct perf_event_header header; 883 struct perf_sample_data data; 884 unsigned long skip = 0; 885 struct pt_regs regs; 886 887 if (!event) 888 return 0; 889 890 if (!x86_pmu.bts_active) 891 return 0; 892 893 base = (struct bts_record *)(unsigned long)ds->bts_buffer_base; 894 top = (struct bts_record *)(unsigned long)ds->bts_index; 895 896 if (top <= base) 897 return 0; 898 899 memset(®s, 0, sizeof(regs)); 900 901 ds->bts_index = ds->bts_buffer_base; 902 903 perf_sample_data_init(&data, 0, event->hw.last_period); 904 905 /* 906 * BTS leaks kernel addresses in branches across the cpl boundary, 907 * such as traps or system calls, so unless the user is asking for 908 * kernel tracing (and right now it's not possible), we'd need to 909 * filter them out. But first we need to count how many of those we 910 * have in the current batch. This is an extra O(n) pass, however, 911 * it's much faster than the other one especially considering that 912 * n <= 2560 (BTS_BUFFER_SIZE / BTS_RECORD_SIZE * 15/16; see the 913 * alloc_bts_buffer()). 914 */ 915 for (at = base; at < top; at++) { 916 /* 917 * Note that right now *this* BTS code only works if 918 * attr::exclude_kernel is set, but let's keep this extra 919 * check here in case that changes. 920 */ 921 if (event->attr.exclude_kernel && 922 (kernel_ip(at->from) || kernel_ip(at->to))) 923 skip++; 924 } 925 926 /* 927 * Prepare a generic sample, i.e. fill in the invariant fields. 928 * We will overwrite the from and to address before we output 929 * the sample. 930 */ 931 rcu_read_lock(); 932 perf_prepare_sample(&data, event, ®s); 933 perf_prepare_header(&header, &data, event, ®s); 934 935 if (perf_output_begin(&handle, &data, event, 936 header.size * (top - base - skip))) 937 goto unlock; 938 939 for (at = base; at < top; at++) { 940 /* Filter out any records that contain kernel addresses. */ 941 if (event->attr.exclude_kernel && 942 (kernel_ip(at->from) || kernel_ip(at->to))) 943 continue; 944 945 data.ip = at->from; 946 data.addr = at->to; 947 948 perf_output_sample(&handle, &header, &data, event); 949 } 950 951 perf_output_end(&handle); 952 953 /* There's new data available. */ 954 event->hw.interrupts++; 955 event->pending_kill = POLL_IN; 956 unlock: 957 rcu_read_unlock(); 958 return 1; 959 } 960 961 void intel_pmu_drain_pebs_buffer(void) 962 { 963 struct perf_sample_data data; 964 965 static_call(x86_pmu_drain_pebs)(NULL, &data); 966 } 967 968 /* 969 * PEBS 970 */ 971 struct event_constraint intel_core2_pebs_event_constraints[] = { 972 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ 973 INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ 974 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ 975 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ 976 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ 977 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ 978 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01), 979 EVENT_CONSTRAINT_END 980 }; 981 982 struct event_constraint intel_atom_pebs_event_constraints[] = { 983 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ 984 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ 985 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ 986 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ 987 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01), 988 /* Allow all events as PEBS with no flags */ 989 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), 990 EVENT_CONSTRAINT_END 991 }; 992 993 struct event_constraint intel_slm_pebs_event_constraints[] = { 994 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ 995 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1), 996 /* Allow all events as PEBS with no flags */ 997 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), 998 EVENT_CONSTRAINT_END 999 }; 1000 1001 struct event_constraint intel_glm_pebs_event_constraints[] = { 1002 /* Allow all events as PEBS with no flags */ 1003 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), 1004 EVENT_CONSTRAINT_END 1005 }; 1006 1007 struct event_constraint intel_grt_pebs_event_constraints[] = { 1008 /* Allow all events as PEBS with no flags */ 1009 INTEL_HYBRID_LAT_CONSTRAINT(0x5d0, 0x3), 1010 INTEL_HYBRID_LAT_CONSTRAINT(0x6d0, 0xf), 1011 EVENT_CONSTRAINT_END 1012 }; 1013 1014 struct event_constraint intel_nehalem_pebs_event_constraints[] = { 1015 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ 1016 INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ 1017 INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ 1018 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */ 1019 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ 1020 INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ 1021 INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */ 1022 INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ 1023 INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ 1024 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ 1025 INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ 1026 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ 1027 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f), 1028 EVENT_CONSTRAINT_END 1029 }; 1030 1031 struct event_constraint intel_westmere_pebs_event_constraints[] = { 1032 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ 1033 INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ 1034 INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ 1035 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */ 1036 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ 1037 INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ 1038 INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ 1039 INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ 1040 INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ 1041 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ 1042 INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ 1043 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ 1044 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f), 1045 EVENT_CONSTRAINT_END 1046 }; 1047 1048 struct event_constraint intel_snb_pebs_event_constraints[] = { 1049 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ 1050 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ 1051 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ 1052 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ 1053 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), 1054 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ 1055 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 1056 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 1057 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 1058 /* Allow all events as PEBS with no flags */ 1059 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), 1060 EVENT_CONSTRAINT_END 1061 }; 1062 1063 struct event_constraint intel_ivb_pebs_event_constraints[] = { 1064 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ 1065 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ 1066 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ 1067 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ 1068 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), 1069 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ 1070 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), 1071 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ 1072 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 1073 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 1074 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 1075 /* Allow all events as PEBS with no flags */ 1076 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), 1077 EVENT_CONSTRAINT_END 1078 }; 1079 1080 struct event_constraint intel_hsw_pebs_event_constraints[] = { 1081 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ 1082 INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ 1083 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ 1084 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), 1085 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ 1086 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), 1087 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ 1088 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ 1089 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ 1090 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */ 1091 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */ 1092 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */ 1093 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */ 1094 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */ 1095 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 1096 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */ 1097 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */ 1098 /* Allow all events as PEBS with no flags */ 1099 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), 1100 EVENT_CONSTRAINT_END 1101 }; 1102 1103 struct event_constraint intel_bdw_pebs_event_constraints[] = { 1104 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ 1105 INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ 1106 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ 1107 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), 1108 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ 1109 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), 1110 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ 1111 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ 1112 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ 1113 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */ 1114 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */ 1115 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */ 1116 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */ 1117 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */ 1118 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 1119 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */ 1120 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */ 1121 /* Allow all events as PEBS with no flags */ 1122 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), 1123 EVENT_CONSTRAINT_END 1124 }; 1125 1126 1127 struct event_constraint intel_skl_pebs_event_constraints[] = { 1128 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */ 1129 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ 1130 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), 1131 /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */ 1132 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f), 1133 INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */ 1134 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ 1135 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ 1136 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ 1137 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf), /* MEM_INST_RETIRED.LOCK_STORES */ 1138 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ 1139 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ 1140 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ 1141 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ 1142 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */ 1143 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */ 1144 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_L3_MISS_RETIRED.* */ 1145 /* Allow all events as PEBS with no flags */ 1146 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), 1147 EVENT_CONSTRAINT_END 1148 }; 1149 1150 struct event_constraint intel_icl_pebs_event_constraints[] = { 1151 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x100000000ULL), /* old INST_RETIRED.PREC_DIST */ 1152 INTEL_FLAGS_UEVENT_CONSTRAINT(0x0100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ 1153 INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */ 1154 1155 INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 1156 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ 1157 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ 1158 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ 1159 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ 1160 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ 1161 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ 1162 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ 1163 1164 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */ 1165 1166 INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */ 1167 1168 /* 1169 * Everything else is handled by PMU_FL_PEBS_ALL, because we 1170 * need the full constraints from the main table. 1171 */ 1172 1173 EVENT_CONSTRAINT_END 1174 }; 1175 1176 struct event_constraint intel_glc_pebs_event_constraints[] = { 1177 INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ 1178 INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), 1179 1180 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xfe), 1181 INTEL_PLD_CONSTRAINT(0x1cd, 0xfe), 1182 INTEL_PSD_CONSTRAINT(0x2cd, 0x1), 1183 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ 1184 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ 1185 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ 1186 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ 1187 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ 1188 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ 1189 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ 1190 1191 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), 1192 1193 INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), 1194 1195 /* 1196 * Everything else is handled by PMU_FL_PEBS_ALL, because we 1197 * need the full constraints from the main table. 1198 */ 1199 1200 EVENT_CONSTRAINT_END 1201 }; 1202 1203 struct event_constraint intel_lnc_pebs_event_constraints[] = { 1204 INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ 1205 INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), 1206 1207 INTEL_HYBRID_LDLAT_CONSTRAINT(0x1cd, 0x3fc), 1208 INTEL_HYBRID_STLAT_CONSTRAINT(0x2cd, 0x3), 1209 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ 1210 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ 1211 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ 1212 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ 1213 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ 1214 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ 1215 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ 1216 1217 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), 1218 1219 INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), 1220 1221 /* 1222 * Everything else is handled by PMU_FL_PEBS_ALL, because we 1223 * need the full constraints from the main table. 1224 */ 1225 1226 EVENT_CONSTRAINT_END 1227 }; 1228 1229 struct event_constraint *intel_pebs_constraints(struct perf_event *event) 1230 { 1231 struct event_constraint *pebs_constraints = hybrid(event->pmu, pebs_constraints); 1232 struct event_constraint *c; 1233 1234 if (!event->attr.precise_ip) 1235 return NULL; 1236 1237 if (pebs_constraints) { 1238 for_each_event_constraint(c, pebs_constraints) { 1239 if (constraint_match(c, event->hw.config)) { 1240 event->hw.flags |= c->flags; 1241 return c; 1242 } 1243 } 1244 } 1245 1246 /* 1247 * Extended PEBS support 1248 * Makes the PEBS code search the normal constraints. 1249 */ 1250 if (x86_pmu.flags & PMU_FL_PEBS_ALL) 1251 return NULL; 1252 1253 return &emptyconstraint; 1254 } 1255 1256 /* 1257 * We need the sched_task callback even for per-cpu events when we use 1258 * the large interrupt threshold, such that we can provide PID and TID 1259 * to PEBS samples. 1260 */ 1261 static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc) 1262 { 1263 if (cpuc->n_pebs == cpuc->n_pebs_via_pt) 1264 return false; 1265 1266 return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs); 1267 } 1268 1269 void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) 1270 { 1271 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1272 1273 if (!sched_in && pebs_needs_sched_cb(cpuc)) 1274 intel_pmu_drain_pebs_buffer(); 1275 } 1276 1277 static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) 1278 { 1279 struct debug_store *ds = cpuc->ds; 1280 int max_pebs_events = intel_pmu_max_num_pebs(cpuc->pmu); 1281 u64 threshold; 1282 int reserved; 1283 1284 if (cpuc->n_pebs_via_pt) 1285 return; 1286 1287 if (x86_pmu.flags & PMU_FL_PEBS_ALL) 1288 reserved = max_pebs_events + x86_pmu_max_num_counters_fixed(cpuc->pmu); 1289 else 1290 reserved = max_pebs_events; 1291 1292 if (cpuc->n_pebs == cpuc->n_large_pebs) { 1293 threshold = ds->pebs_absolute_maximum - 1294 reserved * cpuc->pebs_record_size; 1295 } else { 1296 threshold = ds->pebs_buffer_base + cpuc->pebs_record_size; 1297 } 1298 1299 ds->pebs_interrupt_threshold = threshold; 1300 } 1301 1302 #define PEBS_DATACFG_CNTRS(x) \ 1303 ((x >> PEBS_DATACFG_CNTR_SHIFT) & PEBS_DATACFG_CNTR_MASK) 1304 1305 #define PEBS_DATACFG_CNTR_BIT(x) \ 1306 (((1ULL << x) & PEBS_DATACFG_CNTR_MASK) << PEBS_DATACFG_CNTR_SHIFT) 1307 1308 #define PEBS_DATACFG_FIX(x) \ 1309 ((x >> PEBS_DATACFG_FIX_SHIFT) & PEBS_DATACFG_FIX_MASK) 1310 1311 #define PEBS_DATACFG_FIX_BIT(x) \ 1312 (((1ULL << (x)) & PEBS_DATACFG_FIX_MASK) \ 1313 << PEBS_DATACFG_FIX_SHIFT) 1314 1315 static void adaptive_pebs_record_size_update(void) 1316 { 1317 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1318 u64 pebs_data_cfg = cpuc->pebs_data_cfg; 1319 int sz = sizeof(struct pebs_basic); 1320 1321 if (pebs_data_cfg & PEBS_DATACFG_MEMINFO) 1322 sz += sizeof(struct pebs_meminfo); 1323 if (pebs_data_cfg & PEBS_DATACFG_GP) 1324 sz += sizeof(struct pebs_gprs); 1325 if (pebs_data_cfg & PEBS_DATACFG_XMMS) 1326 sz += sizeof(struct pebs_xmm); 1327 if (pebs_data_cfg & PEBS_DATACFG_LBRS) 1328 sz += x86_pmu.lbr_nr * sizeof(struct lbr_entry); 1329 if (pebs_data_cfg & (PEBS_DATACFG_METRICS | PEBS_DATACFG_CNTR)) { 1330 sz += sizeof(struct pebs_cntr_header); 1331 1332 /* Metrics base and Metrics Data */ 1333 if (pebs_data_cfg & PEBS_DATACFG_METRICS) 1334 sz += 2 * sizeof(u64); 1335 1336 if (pebs_data_cfg & PEBS_DATACFG_CNTR) { 1337 sz += (hweight64(PEBS_DATACFG_CNTRS(pebs_data_cfg)) + 1338 hweight64(PEBS_DATACFG_FIX(pebs_data_cfg))) * 1339 sizeof(u64); 1340 } 1341 } 1342 1343 cpuc->pebs_record_size = sz; 1344 } 1345 1346 static void __intel_pmu_pebs_update_cfg(struct perf_event *event, 1347 int idx, u64 *pebs_data_cfg) 1348 { 1349 if (is_metric_event(event)) { 1350 *pebs_data_cfg |= PEBS_DATACFG_METRICS; 1351 return; 1352 } 1353 1354 *pebs_data_cfg |= PEBS_DATACFG_CNTR; 1355 1356 if (idx >= INTEL_PMC_IDX_FIXED) 1357 *pebs_data_cfg |= PEBS_DATACFG_FIX_BIT(idx - INTEL_PMC_IDX_FIXED); 1358 else 1359 *pebs_data_cfg |= PEBS_DATACFG_CNTR_BIT(idx); 1360 } 1361 1362 1363 void intel_pmu_pebs_late_setup(struct cpu_hw_events *cpuc) 1364 { 1365 struct perf_event *event; 1366 u64 pebs_data_cfg = 0; 1367 int i; 1368 1369 for (i = 0; i < cpuc->n_events; i++) { 1370 event = cpuc->event_list[i]; 1371 if (!is_pebs_counter_event_group(event)) 1372 continue; 1373 __intel_pmu_pebs_update_cfg(event, cpuc->assign[i], &pebs_data_cfg); 1374 } 1375 1376 if (pebs_data_cfg & ~cpuc->pebs_data_cfg) 1377 cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW; 1378 } 1379 1380 #define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC | \ 1381 PERF_SAMPLE_PHYS_ADDR | \ 1382 PERF_SAMPLE_WEIGHT_TYPE | \ 1383 PERF_SAMPLE_TRANSACTION | \ 1384 PERF_SAMPLE_DATA_PAGE_SIZE) 1385 1386 static u64 pebs_update_adaptive_cfg(struct perf_event *event) 1387 { 1388 struct perf_event_attr *attr = &event->attr; 1389 u64 sample_type = attr->sample_type; 1390 u64 pebs_data_cfg = 0; 1391 bool gprs, tsx_weight; 1392 1393 if (!(sample_type & ~(PERF_SAMPLE_IP|PERF_SAMPLE_TIME)) && 1394 attr->precise_ip > 1) 1395 return pebs_data_cfg; 1396 1397 if (sample_type & PERF_PEBS_MEMINFO_TYPE) 1398 pebs_data_cfg |= PEBS_DATACFG_MEMINFO; 1399 1400 /* 1401 * We need GPRs when: 1402 * + user requested them 1403 * + precise_ip < 2 for the non event IP 1404 * + For RTM TSX weight we need GPRs for the abort code. 1405 */ 1406 gprs = ((sample_type & PERF_SAMPLE_REGS_INTR) && 1407 (attr->sample_regs_intr & PEBS_GP_REGS)) || 1408 ((sample_type & PERF_SAMPLE_REGS_USER) && 1409 (attr->sample_regs_user & PEBS_GP_REGS)); 1410 1411 tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT_TYPE) && 1412 ((attr->config & INTEL_ARCH_EVENT_MASK) == 1413 x86_pmu.rtm_abort_event); 1414 1415 if (gprs || (attr->precise_ip < 2) || tsx_weight) 1416 pebs_data_cfg |= PEBS_DATACFG_GP; 1417 1418 if ((sample_type & PERF_SAMPLE_REGS_INTR) && 1419 (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK)) 1420 pebs_data_cfg |= PEBS_DATACFG_XMMS; 1421 1422 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 1423 /* 1424 * For now always log all LBRs. Could configure this 1425 * later. 1426 */ 1427 pebs_data_cfg |= PEBS_DATACFG_LBRS | 1428 ((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT); 1429 } 1430 1431 return pebs_data_cfg; 1432 } 1433 1434 static void 1435 pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, 1436 struct perf_event *event, bool add) 1437 { 1438 struct pmu *pmu = event->pmu; 1439 1440 /* 1441 * Make sure we get updated with the first PEBS event. 1442 * During removal, ->pebs_data_cfg is still valid for 1443 * the last PEBS event. Don't clear it. 1444 */ 1445 if ((cpuc->n_pebs == 1) && add) 1446 cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW; 1447 1448 if (needed_cb != pebs_needs_sched_cb(cpuc)) { 1449 if (!needed_cb) 1450 perf_sched_cb_inc(pmu); 1451 else 1452 perf_sched_cb_dec(pmu); 1453 1454 cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW; 1455 } 1456 1457 /* 1458 * The PEBS record doesn't shrink on pmu::del(). Doing so would require 1459 * iterating all remaining PEBS events to reconstruct the config. 1460 */ 1461 if (x86_pmu.intel_cap.pebs_baseline && add) { 1462 u64 pebs_data_cfg; 1463 1464 pebs_data_cfg = pebs_update_adaptive_cfg(event); 1465 /* 1466 * Be sure to update the thresholds when we change the record. 1467 */ 1468 if (pebs_data_cfg & ~cpuc->pebs_data_cfg) 1469 cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW; 1470 } 1471 } 1472 1473 void intel_pmu_pebs_add(struct perf_event *event) 1474 { 1475 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1476 struct hw_perf_event *hwc = &event->hw; 1477 bool needed_cb = pebs_needs_sched_cb(cpuc); 1478 1479 cpuc->n_pebs++; 1480 if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS) 1481 cpuc->n_large_pebs++; 1482 if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT) 1483 cpuc->n_pebs_via_pt++; 1484 1485 pebs_update_state(needed_cb, cpuc, event, true); 1486 } 1487 1488 static void intel_pmu_pebs_via_pt_disable(struct perf_event *event) 1489 { 1490 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1491 1492 if (!is_pebs_pt(event)) 1493 return; 1494 1495 if (!(cpuc->pebs_enabled & ~PEBS_VIA_PT_MASK)) 1496 cpuc->pebs_enabled &= ~PEBS_VIA_PT_MASK; 1497 } 1498 1499 static void intel_pmu_pebs_via_pt_enable(struct perf_event *event) 1500 { 1501 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1502 struct hw_perf_event *hwc = &event->hw; 1503 struct debug_store *ds = cpuc->ds; 1504 u64 value = ds->pebs_event_reset[hwc->idx]; 1505 u32 base = MSR_RELOAD_PMC0; 1506 unsigned int idx = hwc->idx; 1507 1508 if (!is_pebs_pt(event)) 1509 return; 1510 1511 if (!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS)) 1512 cpuc->pebs_enabled |= PEBS_PMI_AFTER_EACH_RECORD; 1513 1514 cpuc->pebs_enabled |= PEBS_OUTPUT_PT; 1515 1516 if (hwc->idx >= INTEL_PMC_IDX_FIXED) { 1517 base = MSR_RELOAD_FIXED_CTR0; 1518 idx = hwc->idx - INTEL_PMC_IDX_FIXED; 1519 if (x86_pmu.intel_cap.pebs_format < 5) 1520 value = ds->pebs_event_reset[MAX_PEBS_EVENTS_FMT4 + idx]; 1521 else 1522 value = ds->pebs_event_reset[MAX_PEBS_EVENTS + idx]; 1523 } 1524 wrmsrq(base + idx, value); 1525 } 1526 1527 static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc) 1528 { 1529 if (cpuc->n_pebs == cpuc->n_large_pebs && 1530 cpuc->n_pebs != cpuc->n_pebs_via_pt) 1531 intel_pmu_drain_pebs_buffer(); 1532 } 1533 1534 void intel_pmu_pebs_enable(struct perf_event *event) 1535 { 1536 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1537 u64 pebs_data_cfg = cpuc->pebs_data_cfg & ~PEBS_UPDATE_DS_SW; 1538 struct hw_perf_event *hwc = &event->hw; 1539 struct debug_store *ds = cpuc->ds; 1540 unsigned int idx = hwc->idx; 1541 1542 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; 1543 1544 cpuc->pebs_enabled |= 1ULL << hwc->idx; 1545 1546 if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5)) 1547 cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32); 1548 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) 1549 cpuc->pebs_enabled |= 1ULL << 63; 1550 1551 if (x86_pmu.intel_cap.pebs_baseline) { 1552 hwc->config |= ICL_EVENTSEL_ADAPTIVE; 1553 if (pebs_data_cfg != cpuc->active_pebs_data_cfg) { 1554 /* 1555 * drain_pebs() assumes uniform record size; 1556 * hence we need to drain when changing said 1557 * size. 1558 */ 1559 intel_pmu_drain_pebs_buffer(); 1560 adaptive_pebs_record_size_update(); 1561 wrmsrq(MSR_PEBS_DATA_CFG, pebs_data_cfg); 1562 cpuc->active_pebs_data_cfg = pebs_data_cfg; 1563 } 1564 } 1565 if (cpuc->pebs_data_cfg & PEBS_UPDATE_DS_SW) { 1566 cpuc->pebs_data_cfg = pebs_data_cfg; 1567 pebs_update_threshold(cpuc); 1568 } 1569 1570 if (idx >= INTEL_PMC_IDX_FIXED) { 1571 if (x86_pmu.intel_cap.pebs_format < 5) 1572 idx = MAX_PEBS_EVENTS_FMT4 + (idx - INTEL_PMC_IDX_FIXED); 1573 else 1574 idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED); 1575 } 1576 1577 /* 1578 * Use auto-reload if possible to save a MSR write in the PMI. 1579 * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD. 1580 */ 1581 if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { 1582 ds->pebs_event_reset[idx] = 1583 (u64)(-hwc->sample_period) & x86_pmu.cntval_mask; 1584 } else { 1585 ds->pebs_event_reset[idx] = 0; 1586 } 1587 1588 intel_pmu_pebs_via_pt_enable(event); 1589 } 1590 1591 void intel_pmu_pebs_del(struct perf_event *event) 1592 { 1593 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1594 struct hw_perf_event *hwc = &event->hw; 1595 bool needed_cb = pebs_needs_sched_cb(cpuc); 1596 1597 cpuc->n_pebs--; 1598 if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS) 1599 cpuc->n_large_pebs--; 1600 if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT) 1601 cpuc->n_pebs_via_pt--; 1602 1603 pebs_update_state(needed_cb, cpuc, event, false); 1604 } 1605 1606 void intel_pmu_pebs_disable(struct perf_event *event) 1607 { 1608 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1609 struct hw_perf_event *hwc = &event->hw; 1610 1611 intel_pmu_drain_large_pebs(cpuc); 1612 1613 cpuc->pebs_enabled &= ~(1ULL << hwc->idx); 1614 1615 if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && 1616 (x86_pmu.version < 5)) 1617 cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32)); 1618 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) 1619 cpuc->pebs_enabled &= ~(1ULL << 63); 1620 1621 intel_pmu_pebs_via_pt_disable(event); 1622 1623 if (cpuc->enabled) 1624 wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 1625 1626 hwc->config |= ARCH_PERFMON_EVENTSEL_INT; 1627 } 1628 1629 void intel_pmu_pebs_enable_all(void) 1630 { 1631 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1632 1633 if (cpuc->pebs_enabled) 1634 wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 1635 } 1636 1637 void intel_pmu_pebs_disable_all(void) 1638 { 1639 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1640 1641 if (cpuc->pebs_enabled) 1642 __intel_pmu_pebs_disable_all(); 1643 } 1644 1645 static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) 1646 { 1647 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1648 unsigned long from = cpuc->lbr_entries[0].from; 1649 unsigned long old_to, to = cpuc->lbr_entries[0].to; 1650 unsigned long ip = regs->ip; 1651 int is_64bit = 0; 1652 void *kaddr; 1653 int size; 1654 1655 /* 1656 * We don't need to fixup if the PEBS assist is fault like 1657 */ 1658 if (!x86_pmu.intel_cap.pebs_trap) 1659 return 1; 1660 1661 /* 1662 * No LBR entry, no basic block, no rewinding 1663 */ 1664 if (!cpuc->lbr_stack.nr || !from || !to) 1665 return 0; 1666 1667 /* 1668 * Basic blocks should never cross user/kernel boundaries 1669 */ 1670 if (kernel_ip(ip) != kernel_ip(to)) 1671 return 0; 1672 1673 /* 1674 * unsigned math, either ip is before the start (impossible) or 1675 * the basic block is larger than 1 page (sanity) 1676 */ 1677 if ((ip - to) > PEBS_FIXUP_SIZE) 1678 return 0; 1679 1680 /* 1681 * We sampled a branch insn, rewind using the LBR stack 1682 */ 1683 if (ip == to) { 1684 set_linear_ip(regs, from); 1685 return 1; 1686 } 1687 1688 size = ip - to; 1689 if (!kernel_ip(ip)) { 1690 int bytes; 1691 u8 *buf = this_cpu_read(insn_buffer); 1692 1693 /* 'size' must fit our buffer, see above */ 1694 bytes = copy_from_user_nmi(buf, (void __user *)to, size); 1695 if (bytes != 0) 1696 return 0; 1697 1698 kaddr = buf; 1699 } else { 1700 kaddr = (void *)to; 1701 } 1702 1703 do { 1704 struct insn insn; 1705 1706 old_to = to; 1707 1708 #ifdef CONFIG_X86_64 1709 is_64bit = kernel_ip(to) || any_64bit_mode(regs); 1710 #endif 1711 insn_init(&insn, kaddr, size, is_64bit); 1712 1713 /* 1714 * Make sure there was not a problem decoding the instruction. 1715 * This is doubly important because we have an infinite loop if 1716 * insn.length=0. 1717 */ 1718 if (insn_get_length(&insn)) 1719 break; 1720 1721 to += insn.length; 1722 kaddr += insn.length; 1723 size -= insn.length; 1724 } while (to < ip); 1725 1726 if (to == ip) { 1727 set_linear_ip(regs, old_to); 1728 return 1; 1729 } 1730 1731 /* 1732 * Even though we decoded the basic block, the instruction stream 1733 * never matched the given IP, either the TO or the IP got corrupted. 1734 */ 1735 return 0; 1736 } 1737 1738 static inline u64 intel_get_tsx_weight(u64 tsx_tuning) 1739 { 1740 if (tsx_tuning) { 1741 union hsw_tsx_tuning tsx = { .value = tsx_tuning }; 1742 return tsx.cycles_last_block; 1743 } 1744 return 0; 1745 } 1746 1747 static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax) 1748 { 1749 u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32; 1750 1751 /* For RTM XABORTs also log the abort code from AX */ 1752 if ((txn & PERF_TXN_TRANSACTION) && (ax & 1)) 1753 txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT; 1754 return txn; 1755 } 1756 1757 static inline u64 get_pebs_status(void *n) 1758 { 1759 if (x86_pmu.intel_cap.pebs_format < 4) 1760 return ((struct pebs_record_nhm *)n)->status; 1761 return ((struct pebs_basic *)n)->applicable_counters; 1762 } 1763 1764 #define PERF_X86_EVENT_PEBS_HSW_PREC \ 1765 (PERF_X86_EVENT_PEBS_ST_HSW | \ 1766 PERF_X86_EVENT_PEBS_LD_HSW | \ 1767 PERF_X86_EVENT_PEBS_NA_HSW) 1768 1769 static u64 get_data_src(struct perf_event *event, u64 aux) 1770 { 1771 u64 val = PERF_MEM_NA; 1772 int fl = event->hw.flags; 1773 bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC); 1774 1775 if (fl & PERF_X86_EVENT_PEBS_LDLAT) 1776 val = load_latency_data(event, aux); 1777 else if (fl & PERF_X86_EVENT_PEBS_STLAT) 1778 val = store_latency_data(event, aux); 1779 else if (fl & PERF_X86_EVENT_PEBS_LAT_HYBRID) 1780 val = x86_pmu.pebs_latency_data(event, aux); 1781 else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC)) 1782 val = precise_datala_hsw(event, aux); 1783 else if (fst) 1784 val = precise_store_data(aux); 1785 return val; 1786 } 1787 1788 static void setup_pebs_time(struct perf_event *event, 1789 struct perf_sample_data *data, 1790 u64 tsc) 1791 { 1792 /* Converting to a user-defined clock is not supported yet. */ 1793 if (event->attr.use_clockid != 0) 1794 return; 1795 1796 /* 1797 * Doesn't support the conversion when the TSC is unstable. 1798 * The TSC unstable case is a corner case and very unlikely to 1799 * happen. If it happens, the TSC in a PEBS record will be 1800 * dropped and fall back to perf_event_clock(). 1801 */ 1802 if (!using_native_sched_clock() || !sched_clock_stable()) 1803 return; 1804 1805 data->time = native_sched_clock_from_tsc(tsc) + __sched_clock_offset; 1806 data->sample_flags |= PERF_SAMPLE_TIME; 1807 } 1808 1809 #define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \ 1810 PERF_SAMPLE_PHYS_ADDR | \ 1811 PERF_SAMPLE_DATA_PAGE_SIZE) 1812 1813 static void setup_pebs_fixed_sample_data(struct perf_event *event, 1814 struct pt_regs *iregs, void *__pebs, 1815 struct perf_sample_data *data, 1816 struct pt_regs *regs) 1817 { 1818 /* 1819 * We cast to the biggest pebs_record but are careful not to 1820 * unconditionally access the 'extra' entries. 1821 */ 1822 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1823 struct pebs_record_skl *pebs = __pebs; 1824 u64 sample_type; 1825 int fll; 1826 1827 if (pebs == NULL) 1828 return; 1829 1830 sample_type = event->attr.sample_type; 1831 fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT; 1832 1833 perf_sample_data_init(data, 0, event->hw.last_period); 1834 1835 /* 1836 * Use latency for weight (only avail with PEBS-LL) 1837 */ 1838 if (fll && (sample_type & PERF_SAMPLE_WEIGHT_TYPE)) { 1839 data->weight.full = pebs->lat; 1840 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; 1841 } 1842 1843 /* 1844 * data.data_src encodes the data source 1845 */ 1846 if (sample_type & PERF_SAMPLE_DATA_SRC) { 1847 data->data_src.val = get_data_src(event, pebs->dse); 1848 data->sample_flags |= PERF_SAMPLE_DATA_SRC; 1849 } 1850 1851 /* 1852 * We must however always use iregs for the unwinder to stay sane; the 1853 * record BP,SP,IP can point into thin air when the record is from a 1854 * previous PMI context or an (I)RET happened between the record and 1855 * PMI. 1856 */ 1857 perf_sample_save_callchain(data, event, iregs); 1858 1859 /* 1860 * We use the interrupt regs as a base because the PEBS record does not 1861 * contain a full regs set, specifically it seems to lack segment 1862 * descriptors, which get used by things like user_mode(). 1863 * 1864 * In the simple case fix up only the IP for PERF_SAMPLE_IP. 1865 */ 1866 *regs = *iregs; 1867 1868 /* 1869 * Initialize regs_>flags from PEBS, 1870 * Clear exact bit (which uses x86 EFLAGS Reserved bit 3), 1871 * i.e., do not rely on it being zero: 1872 */ 1873 regs->flags = pebs->flags & ~PERF_EFLAGS_EXACT; 1874 1875 if (sample_type & PERF_SAMPLE_REGS_INTR) { 1876 regs->ax = pebs->ax; 1877 regs->bx = pebs->bx; 1878 regs->cx = pebs->cx; 1879 regs->dx = pebs->dx; 1880 regs->si = pebs->si; 1881 regs->di = pebs->di; 1882 1883 regs->bp = pebs->bp; 1884 regs->sp = pebs->sp; 1885 1886 #ifndef CONFIG_X86_32 1887 regs->r8 = pebs->r8; 1888 regs->r9 = pebs->r9; 1889 regs->r10 = pebs->r10; 1890 regs->r11 = pebs->r11; 1891 regs->r12 = pebs->r12; 1892 regs->r13 = pebs->r13; 1893 regs->r14 = pebs->r14; 1894 regs->r15 = pebs->r15; 1895 #endif 1896 } 1897 1898 if (event->attr.precise_ip > 1) { 1899 /* 1900 * Haswell and later processors have an 'eventing IP' 1901 * (real IP) which fixes the off-by-1 skid in hardware. 1902 * Use it when precise_ip >= 2 : 1903 */ 1904 if (x86_pmu.intel_cap.pebs_format >= 2) { 1905 set_linear_ip(regs, pebs->real_ip); 1906 regs->flags |= PERF_EFLAGS_EXACT; 1907 } else { 1908 /* Otherwise, use PEBS off-by-1 IP: */ 1909 set_linear_ip(regs, pebs->ip); 1910 1911 /* 1912 * With precise_ip >= 2, try to fix up the off-by-1 IP 1913 * using the LBR. If successful, the fixup function 1914 * corrects regs->ip and calls set_linear_ip() on regs: 1915 */ 1916 if (intel_pmu_pebs_fixup_ip(regs)) 1917 regs->flags |= PERF_EFLAGS_EXACT; 1918 } 1919 } else { 1920 /* 1921 * When precise_ip == 1, return the PEBS off-by-1 IP, 1922 * no fixup attempted: 1923 */ 1924 set_linear_ip(regs, pebs->ip); 1925 } 1926 1927 1928 if ((sample_type & PERF_SAMPLE_ADDR_TYPE) && 1929 x86_pmu.intel_cap.pebs_format >= 1) { 1930 data->addr = pebs->dla; 1931 data->sample_flags |= PERF_SAMPLE_ADDR; 1932 } 1933 1934 if (x86_pmu.intel_cap.pebs_format >= 2) { 1935 /* Only set the TSX weight when no memory weight. */ 1936 if ((sample_type & PERF_SAMPLE_WEIGHT_TYPE) && !fll) { 1937 data->weight.full = intel_get_tsx_weight(pebs->tsx_tuning); 1938 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; 1939 } 1940 if (sample_type & PERF_SAMPLE_TRANSACTION) { 1941 data->txn = intel_get_tsx_transaction(pebs->tsx_tuning, 1942 pebs->ax); 1943 data->sample_flags |= PERF_SAMPLE_TRANSACTION; 1944 } 1945 } 1946 1947 /* 1948 * v3 supplies an accurate time stamp, so we use that 1949 * for the time stamp. 1950 * 1951 * We can only do this for the default trace clock. 1952 */ 1953 if (x86_pmu.intel_cap.pebs_format >= 3) 1954 setup_pebs_time(event, data, pebs->tsc); 1955 1956 perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); 1957 } 1958 1959 static void adaptive_pebs_save_regs(struct pt_regs *regs, 1960 struct pebs_gprs *gprs) 1961 { 1962 regs->ax = gprs->ax; 1963 regs->bx = gprs->bx; 1964 regs->cx = gprs->cx; 1965 regs->dx = gprs->dx; 1966 regs->si = gprs->si; 1967 regs->di = gprs->di; 1968 regs->bp = gprs->bp; 1969 regs->sp = gprs->sp; 1970 #ifndef CONFIG_X86_32 1971 regs->r8 = gprs->r8; 1972 regs->r9 = gprs->r9; 1973 regs->r10 = gprs->r10; 1974 regs->r11 = gprs->r11; 1975 regs->r12 = gprs->r12; 1976 regs->r13 = gprs->r13; 1977 regs->r14 = gprs->r14; 1978 regs->r15 = gprs->r15; 1979 #endif 1980 } 1981 1982 static void intel_perf_event_update_pmc(struct perf_event *event, u64 pmc) 1983 { 1984 int shift = 64 - x86_pmu.cntval_bits; 1985 struct hw_perf_event *hwc; 1986 u64 delta, prev_pmc; 1987 1988 /* 1989 * A recorded counter may not have an assigned event in the 1990 * following cases. The value should be dropped. 1991 * - An event is deleted. There is still an active PEBS event. 1992 * The PEBS record doesn't shrink on pmu::del(). 1993 * If the counter of the deleted event once occurred in a PEBS 1994 * record, PEBS still records the counter until the counter is 1995 * reassigned. 1996 * - An event is stopped for some reason, e.g., throttled. 1997 * During this period, another event is added and takes the 1998 * counter of the stopped event. The stopped event is assigned 1999 * to another new and uninitialized counter, since the 2000 * x86_pmu_start(RELOAD) is not invoked for a stopped event. 2001 * The PEBS__DATA_CFG is updated regardless of the event state. 2002 * The uninitialized counter can be recorded in a PEBS record. 2003 * But the cpuc->events[uninitialized_counter] is always NULL, 2004 * because the event is stopped. The uninitialized value is 2005 * safely dropped. 2006 */ 2007 if (!event) 2008 return; 2009 2010 hwc = &event->hw; 2011 prev_pmc = local64_read(&hwc->prev_count); 2012 2013 /* Only update the count when the PMU is disabled */ 2014 WARN_ON(this_cpu_read(cpu_hw_events.enabled)); 2015 local64_set(&hwc->prev_count, pmc); 2016 2017 delta = (pmc << shift) - (prev_pmc << shift); 2018 delta >>= shift; 2019 2020 local64_add(delta, &event->count); 2021 local64_sub(delta, &hwc->period_left); 2022 } 2023 2024 static inline void __setup_pebs_counter_group(struct cpu_hw_events *cpuc, 2025 struct perf_event *event, 2026 struct pebs_cntr_header *cntr, 2027 void *next_record) 2028 { 2029 int bit; 2030 2031 for_each_set_bit(bit, (unsigned long *)&cntr->cntr, INTEL_PMC_MAX_GENERIC) { 2032 intel_perf_event_update_pmc(cpuc->events[bit], *(u64 *)next_record); 2033 next_record += sizeof(u64); 2034 } 2035 2036 for_each_set_bit(bit, (unsigned long *)&cntr->fixed, INTEL_PMC_MAX_FIXED) { 2037 /* The slots event will be handled with perf_metric later */ 2038 if ((cntr->metrics == INTEL_CNTR_METRICS) && 2039 (bit + INTEL_PMC_IDX_FIXED == INTEL_PMC_IDX_FIXED_SLOTS)) { 2040 next_record += sizeof(u64); 2041 continue; 2042 } 2043 intel_perf_event_update_pmc(cpuc->events[bit + INTEL_PMC_IDX_FIXED], 2044 *(u64 *)next_record); 2045 next_record += sizeof(u64); 2046 } 2047 2048 /* HW will reload the value right after the overflow. */ 2049 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) 2050 local64_set(&event->hw.prev_count, (u64)-event->hw.sample_period); 2051 2052 if (cntr->metrics == INTEL_CNTR_METRICS) { 2053 static_call(intel_pmu_update_topdown_event) 2054 (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS], 2055 (u64 *)next_record); 2056 next_record += 2 * sizeof(u64); 2057 } 2058 } 2059 2060 #define PEBS_LATENCY_MASK 0xffff 2061 2062 /* 2063 * With adaptive PEBS the layout depends on what fields are configured. 2064 */ 2065 static void setup_pebs_adaptive_sample_data(struct perf_event *event, 2066 struct pt_regs *iregs, void *__pebs, 2067 struct perf_sample_data *data, 2068 struct pt_regs *regs) 2069 { 2070 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2071 struct pebs_basic *basic = __pebs; 2072 void *next_record = basic + 1; 2073 u64 sample_type, format_group; 2074 struct pebs_meminfo *meminfo = NULL; 2075 struct pebs_gprs *gprs = NULL; 2076 struct x86_perf_regs *perf_regs; 2077 2078 if (basic == NULL) 2079 return; 2080 2081 perf_regs = container_of(regs, struct x86_perf_regs, regs); 2082 perf_regs->xmm_regs = NULL; 2083 2084 sample_type = event->attr.sample_type; 2085 format_group = basic->format_group; 2086 perf_sample_data_init(data, 0, event->hw.last_period); 2087 2088 setup_pebs_time(event, data, basic->tsc); 2089 2090 /* 2091 * We must however always use iregs for the unwinder to stay sane; the 2092 * record BP,SP,IP can point into thin air when the record is from a 2093 * previous PMI context or an (I)RET happened between the record and 2094 * PMI. 2095 */ 2096 perf_sample_save_callchain(data, event, iregs); 2097 2098 *regs = *iregs; 2099 /* The ip in basic is EventingIP */ 2100 set_linear_ip(regs, basic->ip); 2101 regs->flags = PERF_EFLAGS_EXACT; 2102 2103 if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) { 2104 if (x86_pmu.flags & PMU_FL_RETIRE_LATENCY) 2105 data->weight.var3_w = basic->retire_latency; 2106 else 2107 data->weight.var3_w = 0; 2108 } 2109 2110 /* 2111 * The record for MEMINFO is in front of GP 2112 * But PERF_SAMPLE_TRANSACTION needs gprs->ax. 2113 * Save the pointer here but process later. 2114 */ 2115 if (format_group & PEBS_DATACFG_MEMINFO) { 2116 meminfo = next_record; 2117 next_record = meminfo + 1; 2118 } 2119 2120 if (format_group & PEBS_DATACFG_GP) { 2121 gprs = next_record; 2122 next_record = gprs + 1; 2123 2124 if (event->attr.precise_ip < 2) { 2125 set_linear_ip(regs, gprs->ip); 2126 regs->flags &= ~PERF_EFLAGS_EXACT; 2127 } 2128 2129 if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)) 2130 adaptive_pebs_save_regs(regs, gprs); 2131 } 2132 2133 if (format_group & PEBS_DATACFG_MEMINFO) { 2134 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) { 2135 u64 latency = x86_pmu.flags & PMU_FL_INSTR_LATENCY ? 2136 meminfo->cache_latency : meminfo->mem_latency; 2137 2138 if (x86_pmu.flags & PMU_FL_INSTR_LATENCY) 2139 data->weight.var2_w = meminfo->instr_latency; 2140 2141 /* 2142 * Although meminfo::latency is defined as a u64, 2143 * only the lower 32 bits include the valid data 2144 * in practice on Ice Lake and earlier platforms. 2145 */ 2146 if (sample_type & PERF_SAMPLE_WEIGHT) { 2147 data->weight.full = latency ?: 2148 intel_get_tsx_weight(meminfo->tsx_tuning); 2149 } else { 2150 data->weight.var1_dw = (u32)latency ?: 2151 intel_get_tsx_weight(meminfo->tsx_tuning); 2152 } 2153 2154 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; 2155 } 2156 2157 if (sample_type & PERF_SAMPLE_DATA_SRC) { 2158 data->data_src.val = get_data_src(event, meminfo->aux); 2159 data->sample_flags |= PERF_SAMPLE_DATA_SRC; 2160 } 2161 2162 if (sample_type & PERF_SAMPLE_ADDR_TYPE) { 2163 data->addr = meminfo->address; 2164 data->sample_flags |= PERF_SAMPLE_ADDR; 2165 } 2166 2167 if (sample_type & PERF_SAMPLE_TRANSACTION) { 2168 data->txn = intel_get_tsx_transaction(meminfo->tsx_tuning, 2169 gprs ? gprs->ax : 0); 2170 data->sample_flags |= PERF_SAMPLE_TRANSACTION; 2171 } 2172 } 2173 2174 if (format_group & PEBS_DATACFG_XMMS) { 2175 struct pebs_xmm *xmm = next_record; 2176 2177 next_record = xmm + 1; 2178 perf_regs->xmm_regs = xmm->xmm; 2179 } 2180 2181 if (format_group & PEBS_DATACFG_LBRS) { 2182 struct lbr_entry *lbr = next_record; 2183 int num_lbr = ((format_group >> PEBS_DATACFG_LBR_SHIFT) 2184 & 0xff) + 1; 2185 next_record = next_record + num_lbr * sizeof(struct lbr_entry); 2186 2187 if (has_branch_stack(event)) { 2188 intel_pmu_store_pebs_lbrs(lbr); 2189 intel_pmu_lbr_save_brstack(data, cpuc, event); 2190 } 2191 } 2192 2193 if (format_group & (PEBS_DATACFG_CNTR | PEBS_DATACFG_METRICS)) { 2194 struct pebs_cntr_header *cntr = next_record; 2195 unsigned int nr; 2196 2197 next_record += sizeof(struct pebs_cntr_header); 2198 /* 2199 * The PEBS_DATA_CFG is a global register, which is the 2200 * superset configuration for all PEBS events. 2201 * For the PEBS record of non-sample-read group, ignore 2202 * the counter snapshot fields. 2203 */ 2204 if (is_pebs_counter_event_group(event)) { 2205 __setup_pebs_counter_group(cpuc, event, cntr, next_record); 2206 data->sample_flags |= PERF_SAMPLE_READ; 2207 } 2208 2209 nr = hweight32(cntr->cntr) + hweight32(cntr->fixed); 2210 if (cntr->metrics == INTEL_CNTR_METRICS) 2211 nr += 2; 2212 next_record += nr * sizeof(u64); 2213 } 2214 2215 WARN_ONCE(next_record != __pebs + basic->format_size, 2216 "PEBS record size %u, expected %llu, config %llx\n", 2217 basic->format_size, 2218 (u64)(next_record - __pebs), 2219 format_group); 2220 } 2221 2222 static inline void * 2223 get_next_pebs_record_by_bit(void *base, void *top, int bit) 2224 { 2225 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2226 void *at; 2227 u64 pebs_status; 2228 2229 /* 2230 * fmt0 does not have a status bitfield (does not use 2231 * perf_record_nhm format) 2232 */ 2233 if (x86_pmu.intel_cap.pebs_format < 1) 2234 return base; 2235 2236 if (base == NULL) 2237 return NULL; 2238 2239 for (at = base; at < top; at += cpuc->pebs_record_size) { 2240 unsigned long status = get_pebs_status(at); 2241 2242 if (test_bit(bit, (unsigned long *)&status)) { 2243 /* PEBS v3 has accurate status bits */ 2244 if (x86_pmu.intel_cap.pebs_format >= 3) 2245 return at; 2246 2247 if (status == (1 << bit)) 2248 return at; 2249 2250 /* clear non-PEBS bit and re-check */ 2251 pebs_status = status & cpuc->pebs_enabled; 2252 pebs_status &= PEBS_COUNTER_MASK; 2253 if (pebs_status == (1 << bit)) 2254 return at; 2255 } 2256 } 2257 return NULL; 2258 } 2259 2260 /* 2261 * Special variant of intel_pmu_save_and_restart() for auto-reload. 2262 */ 2263 static int 2264 intel_pmu_save_and_restart_reload(struct perf_event *event, int count) 2265 { 2266 struct hw_perf_event *hwc = &event->hw; 2267 int shift = 64 - x86_pmu.cntval_bits; 2268 u64 period = hwc->sample_period; 2269 u64 prev_raw_count, new_raw_count; 2270 s64 new, old; 2271 2272 WARN_ON(!period); 2273 2274 /* 2275 * drain_pebs() only happens when the PMU is disabled. 2276 */ 2277 WARN_ON(this_cpu_read(cpu_hw_events.enabled)); 2278 2279 prev_raw_count = local64_read(&hwc->prev_count); 2280 new_raw_count = rdpmc(hwc->event_base_rdpmc); 2281 local64_set(&hwc->prev_count, new_raw_count); 2282 2283 /* 2284 * Since the counter increments a negative counter value and 2285 * overflows on the sign switch, giving the interval: 2286 * 2287 * [-period, 0] 2288 * 2289 * the difference between two consecutive reads is: 2290 * 2291 * A) value2 - value1; 2292 * when no overflows have happened in between, 2293 * 2294 * B) (0 - value1) + (value2 - (-period)); 2295 * when one overflow happened in between, 2296 * 2297 * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period)); 2298 * when @n overflows happened in between. 2299 * 2300 * Here A) is the obvious difference, B) is the extension to the 2301 * discrete interval, where the first term is to the top of the 2302 * interval and the second term is from the bottom of the next 2303 * interval and C) the extension to multiple intervals, where the 2304 * middle term is the whole intervals covered. 2305 * 2306 * An equivalent of C, by reduction, is: 2307 * 2308 * value2 - value1 + n * period 2309 */ 2310 new = ((s64)(new_raw_count << shift) >> shift); 2311 old = ((s64)(prev_raw_count << shift) >> shift); 2312 local64_add(new - old + count * period, &event->count); 2313 2314 local64_set(&hwc->period_left, -new); 2315 2316 perf_event_update_userpage(event); 2317 2318 return 0; 2319 } 2320 2321 typedef void (*setup_fn)(struct perf_event *, struct pt_regs *, void *, 2322 struct perf_sample_data *, struct pt_regs *); 2323 2324 static struct pt_regs dummy_iregs; 2325 2326 static __always_inline void 2327 __intel_pmu_pebs_event(struct perf_event *event, 2328 struct pt_regs *iregs, 2329 struct pt_regs *regs, 2330 struct perf_sample_data *data, 2331 void *at, 2332 setup_fn setup_sample) 2333 { 2334 setup_sample(event, iregs, at, data, regs); 2335 perf_event_output(event, data, regs); 2336 } 2337 2338 static __always_inline void 2339 __intel_pmu_pebs_last_event(struct perf_event *event, 2340 struct pt_regs *iregs, 2341 struct pt_regs *regs, 2342 struct perf_sample_data *data, 2343 void *at, 2344 int count, 2345 setup_fn setup_sample) 2346 { 2347 struct hw_perf_event *hwc = &event->hw; 2348 2349 setup_sample(event, iregs, at, data, regs); 2350 if (iregs == &dummy_iregs) { 2351 /* 2352 * The PEBS records may be drained in the non-overflow context, 2353 * e.g., large PEBS + context switch. Perf should treat the 2354 * last record the same as other PEBS records, and doesn't 2355 * invoke the generic overflow handler. 2356 */ 2357 perf_event_output(event, data, regs); 2358 } else { 2359 /* 2360 * All but the last records are processed. 2361 * The last one is left to be able to call the overflow handler. 2362 */ 2363 perf_event_overflow(event, data, regs); 2364 } 2365 2366 if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { 2367 if ((is_pebs_counter_event_group(event))) { 2368 /* 2369 * The value of each sample has been updated when setup 2370 * the corresponding sample data. 2371 */ 2372 perf_event_update_userpage(event); 2373 } else { 2374 /* 2375 * Now, auto-reload is only enabled in fixed period mode. 2376 * The reload value is always hwc->sample_period. 2377 * May need to change it, if auto-reload is enabled in 2378 * freq mode later. 2379 */ 2380 intel_pmu_save_and_restart_reload(event, count); 2381 } 2382 } else { 2383 /* 2384 * For a non-precise event, it's possible the 2385 * counters-snapshotting records a positive value for the 2386 * overflowed event. Then the HW auto-reload mechanism 2387 * reset the counter to 0 immediately, because the 2388 * pebs_event_reset is cleared if the PERF_X86_EVENT_AUTO_RELOAD 2389 * is not set. The counter backwards may be observed in a 2390 * PMI handler. 2391 * 2392 * Since the event value has been updated when processing the 2393 * counters-snapshotting record, only needs to set the new 2394 * period for the counter. 2395 */ 2396 if (is_pebs_counter_event_group(event)) 2397 static_call(x86_pmu_set_period)(event); 2398 else 2399 intel_pmu_save_and_restart(event); 2400 } 2401 } 2402 2403 static __always_inline void 2404 __intel_pmu_pebs_events(struct perf_event *event, 2405 struct pt_regs *iregs, 2406 struct perf_sample_data *data, 2407 void *base, void *top, 2408 int bit, int count, 2409 setup_fn setup_sample) 2410 { 2411 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2412 struct x86_perf_regs perf_regs; 2413 struct pt_regs *regs = &perf_regs.regs; 2414 void *at = get_next_pebs_record_by_bit(base, top, bit); 2415 int cnt = count; 2416 2417 if (!iregs) 2418 iregs = &dummy_iregs; 2419 2420 while (cnt > 1) { 2421 __intel_pmu_pebs_event(event, iregs, regs, data, at, setup_sample); 2422 at += cpuc->pebs_record_size; 2423 at = get_next_pebs_record_by_bit(at, top, bit); 2424 cnt--; 2425 } 2426 2427 __intel_pmu_pebs_last_event(event, iregs, regs, data, at, count, setup_sample); 2428 } 2429 2430 static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_data *data) 2431 { 2432 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2433 struct debug_store *ds = cpuc->ds; 2434 struct perf_event *event = cpuc->events[0]; /* PMC0 only */ 2435 struct pebs_record_core *at, *top; 2436 int n; 2437 2438 if (!x86_pmu.pebs_active) 2439 return; 2440 2441 at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base; 2442 top = (struct pebs_record_core *)(unsigned long)ds->pebs_index; 2443 2444 /* 2445 * Whatever else happens, drain the thing 2446 */ 2447 ds->pebs_index = ds->pebs_buffer_base; 2448 2449 if (!test_bit(0, cpuc->active_mask)) 2450 return; 2451 2452 WARN_ON_ONCE(!event); 2453 2454 if (!event->attr.precise_ip) 2455 return; 2456 2457 n = top - at; 2458 if (n <= 0) { 2459 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) 2460 intel_pmu_save_and_restart_reload(event, 0); 2461 return; 2462 } 2463 2464 __intel_pmu_pebs_events(event, iregs, data, at, top, 0, n, 2465 setup_pebs_fixed_sample_data); 2466 } 2467 2468 static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, u64 mask) 2469 { 2470 u64 pebs_enabled = cpuc->pebs_enabled & mask; 2471 struct perf_event *event; 2472 int bit; 2473 2474 /* 2475 * The drain_pebs() could be called twice in a short period 2476 * for auto-reload event in pmu::read(). There are no 2477 * overflows have happened in between. 2478 * It needs to call intel_pmu_save_and_restart_reload() to 2479 * update the event->count for this case. 2480 */ 2481 for_each_set_bit(bit, (unsigned long *)&pebs_enabled, X86_PMC_IDX_MAX) { 2482 event = cpuc->events[bit]; 2483 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) 2484 intel_pmu_save_and_restart_reload(event, 0); 2485 } 2486 } 2487 2488 static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_data *data) 2489 { 2490 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2491 struct debug_store *ds = cpuc->ds; 2492 struct perf_event *event; 2493 void *base, *at, *top; 2494 short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; 2495 short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; 2496 int max_pebs_events = intel_pmu_max_num_pebs(NULL); 2497 int bit, i, size; 2498 u64 mask; 2499 2500 if (!x86_pmu.pebs_active) 2501 return; 2502 2503 base = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base; 2504 top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index; 2505 2506 ds->pebs_index = ds->pebs_buffer_base; 2507 2508 mask = x86_pmu.pebs_events_mask; 2509 size = max_pebs_events; 2510 if (x86_pmu.flags & PMU_FL_PEBS_ALL) { 2511 mask |= x86_pmu.fixed_cntr_mask64 << INTEL_PMC_IDX_FIXED; 2512 size = INTEL_PMC_IDX_FIXED + x86_pmu_max_num_counters_fixed(NULL); 2513 } 2514 2515 if (unlikely(base >= top)) { 2516 intel_pmu_pebs_event_update_no_drain(cpuc, mask); 2517 return; 2518 } 2519 2520 for (at = base; at < top; at += x86_pmu.pebs_record_size) { 2521 struct pebs_record_nhm *p = at; 2522 u64 pebs_status; 2523 2524 pebs_status = p->status & cpuc->pebs_enabled; 2525 pebs_status &= mask; 2526 2527 /* PEBS v3 has more accurate status bits */ 2528 if (x86_pmu.intel_cap.pebs_format >= 3) { 2529 for_each_set_bit(bit, (unsigned long *)&pebs_status, size) 2530 counts[bit]++; 2531 2532 continue; 2533 } 2534 2535 /* 2536 * On some CPUs the PEBS status can be zero when PEBS is 2537 * racing with clearing of GLOBAL_STATUS. 2538 * 2539 * Normally we would drop that record, but in the 2540 * case when there is only a single active PEBS event 2541 * we can assume it's for that event. 2542 */ 2543 if (!pebs_status && cpuc->pebs_enabled && 2544 !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1))) 2545 pebs_status = p->status = cpuc->pebs_enabled; 2546 2547 bit = find_first_bit((unsigned long *)&pebs_status, 2548 max_pebs_events); 2549 2550 if (!(x86_pmu.pebs_events_mask & (1 << bit))) 2551 continue; 2552 2553 /* 2554 * The PEBS hardware does not deal well with the situation 2555 * when events happen near to each other and multiple bits 2556 * are set. But it should happen rarely. 2557 * 2558 * If these events include one PEBS and multiple non-PEBS 2559 * events, it doesn't impact PEBS record. The record will 2560 * be handled normally. (slow path) 2561 * 2562 * If these events include two or more PEBS events, the 2563 * records for the events can be collapsed into a single 2564 * one, and it's not possible to reconstruct all events 2565 * that caused the PEBS record. It's called collision. 2566 * If collision happened, the record will be dropped. 2567 */ 2568 if (pebs_status != (1ULL << bit)) { 2569 for_each_set_bit(i, (unsigned long *)&pebs_status, size) 2570 error[i]++; 2571 continue; 2572 } 2573 2574 counts[bit]++; 2575 } 2576 2577 for_each_set_bit(bit, (unsigned long *)&mask, size) { 2578 if ((counts[bit] == 0) && (error[bit] == 0)) 2579 continue; 2580 2581 event = cpuc->events[bit]; 2582 if (WARN_ON_ONCE(!event)) 2583 continue; 2584 2585 if (WARN_ON_ONCE(!event->attr.precise_ip)) 2586 continue; 2587 2588 /* log dropped samples number */ 2589 if (error[bit]) { 2590 perf_log_lost_samples(event, error[bit]); 2591 2592 if (iregs) 2593 perf_event_account_interrupt(event); 2594 } 2595 2596 if (counts[bit]) { 2597 __intel_pmu_pebs_events(event, iregs, data, base, 2598 top, bit, counts[bit], 2599 setup_pebs_fixed_sample_data); 2600 } 2601 } 2602 } 2603 2604 static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_data *data) 2605 { 2606 short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; 2607 void *last[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS]; 2608 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2609 struct debug_store *ds = cpuc->ds; 2610 struct x86_perf_regs perf_regs; 2611 struct pt_regs *regs = &perf_regs.regs; 2612 struct pebs_basic *basic; 2613 struct perf_event *event; 2614 void *base, *at, *top; 2615 int bit; 2616 u64 mask; 2617 2618 if (!x86_pmu.pebs_active) 2619 return; 2620 2621 base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base; 2622 top = (struct pebs_basic *)(unsigned long)ds->pebs_index; 2623 2624 ds->pebs_index = ds->pebs_buffer_base; 2625 2626 mask = hybrid(cpuc->pmu, pebs_events_mask) | 2627 (hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED); 2628 2629 if (unlikely(base >= top)) { 2630 intel_pmu_pebs_event_update_no_drain(cpuc, mask); 2631 return; 2632 } 2633 2634 if (!iregs) 2635 iregs = &dummy_iregs; 2636 2637 /* Process all but the last event for each counter. */ 2638 for (at = base; at < top; at += basic->format_size) { 2639 u64 pebs_status; 2640 2641 basic = at; 2642 if (basic->format_size != cpuc->pebs_record_size) 2643 continue; 2644 2645 pebs_status = basic->applicable_counters & cpuc->pebs_enabled & mask; 2646 for_each_set_bit(bit, (unsigned long *)&pebs_status, X86_PMC_IDX_MAX) { 2647 event = cpuc->events[bit]; 2648 2649 if (WARN_ON_ONCE(!event) || 2650 WARN_ON_ONCE(!event->attr.precise_ip)) 2651 continue; 2652 2653 if (counts[bit]++) { 2654 __intel_pmu_pebs_event(event, iregs, regs, data, last[bit], 2655 setup_pebs_adaptive_sample_data); 2656 } 2657 last[bit] = at; 2658 } 2659 } 2660 2661 for_each_set_bit(bit, (unsigned long *)&mask, X86_PMC_IDX_MAX) { 2662 if (!counts[bit]) 2663 continue; 2664 2665 event = cpuc->events[bit]; 2666 2667 __intel_pmu_pebs_last_event(event, iregs, regs, data, last[bit], 2668 counts[bit], setup_pebs_adaptive_sample_data); 2669 } 2670 } 2671 2672 /* 2673 * PEBS probe and setup 2674 */ 2675 2676 void __init intel_pebs_init(void) 2677 { 2678 /* 2679 * No support for 32bit formats 2680 */ 2681 if (!boot_cpu_has(X86_FEATURE_DTES64)) 2682 return; 2683 2684 x86_pmu.ds_pebs = boot_cpu_has(X86_FEATURE_PEBS); 2685 x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE; 2686 if (x86_pmu.version <= 4) 2687 x86_pmu.pebs_no_isolation = 1; 2688 2689 if (x86_pmu.ds_pebs) { 2690 char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; 2691 char *pebs_qual = ""; 2692 int format = x86_pmu.intel_cap.pebs_format; 2693 2694 if (format < 4) 2695 x86_pmu.intel_cap.pebs_baseline = 0; 2696 2697 x86_pmu.pebs_enable = intel_pmu_pebs_enable; 2698 x86_pmu.pebs_disable = intel_pmu_pebs_disable; 2699 x86_pmu.pebs_enable_all = intel_pmu_pebs_enable_all; 2700 x86_pmu.pebs_disable_all = intel_pmu_pebs_disable_all; 2701 2702 switch (format) { 2703 case 0: 2704 pr_cont("PEBS fmt0%c, ", pebs_type); 2705 x86_pmu.pebs_record_size = sizeof(struct pebs_record_core); 2706 /* 2707 * Using >PAGE_SIZE buffers makes the WRMSR to 2708 * PERF_GLOBAL_CTRL in intel_pmu_enable_all() 2709 * mysteriously hang on Core2. 2710 * 2711 * As a workaround, we don't do this. 2712 */ 2713 x86_pmu.pebs_buffer_size = PAGE_SIZE; 2714 x86_pmu.drain_pebs = intel_pmu_drain_pebs_core; 2715 break; 2716 2717 case 1: 2718 pr_cont("PEBS fmt1%c, ", pebs_type); 2719 x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm); 2720 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; 2721 break; 2722 2723 case 2: 2724 pr_cont("PEBS fmt2%c, ", pebs_type); 2725 x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw); 2726 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; 2727 break; 2728 2729 case 3: 2730 pr_cont("PEBS fmt3%c, ", pebs_type); 2731 x86_pmu.pebs_record_size = 2732 sizeof(struct pebs_record_skl); 2733 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; 2734 x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME; 2735 break; 2736 2737 case 6: 2738 if (x86_pmu.intel_cap.pebs_baseline) { 2739 x86_pmu.large_pebs_flags |= PERF_SAMPLE_READ; 2740 x86_pmu.late_setup = intel_pmu_late_setup; 2741 } 2742 fallthrough; 2743 case 5: 2744 x86_pmu.pebs_ept = 1; 2745 fallthrough; 2746 case 4: 2747 x86_pmu.drain_pebs = intel_pmu_drain_pebs_icl; 2748 x86_pmu.pebs_record_size = sizeof(struct pebs_basic); 2749 if (x86_pmu.intel_cap.pebs_baseline) { 2750 x86_pmu.large_pebs_flags |= 2751 PERF_SAMPLE_BRANCH_STACK | 2752 PERF_SAMPLE_TIME; 2753 x86_pmu.flags |= PMU_FL_PEBS_ALL; 2754 x86_pmu.pebs_capable = ~0ULL; 2755 pebs_qual = "-baseline"; 2756 x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS; 2757 } else { 2758 /* Only basic record supported */ 2759 x86_pmu.large_pebs_flags &= 2760 ~(PERF_SAMPLE_ADDR | 2761 PERF_SAMPLE_TIME | 2762 PERF_SAMPLE_DATA_SRC | 2763 PERF_SAMPLE_TRANSACTION | 2764 PERF_SAMPLE_REGS_USER | 2765 PERF_SAMPLE_REGS_INTR); 2766 } 2767 pr_cont("PEBS fmt%d%c%s, ", format, pebs_type, pebs_qual); 2768 2769 /* 2770 * The PEBS-via-PT is not supported on hybrid platforms, 2771 * because not all CPUs of a hybrid machine support it. 2772 * The global x86_pmu.intel_cap, which only contains the 2773 * common capabilities, is used to check the availability 2774 * of the feature. The per-PMU pebs_output_pt_available 2775 * in a hybrid machine should be ignored. 2776 */ 2777 if (x86_pmu.intel_cap.pebs_output_pt_available) { 2778 pr_cont("PEBS-via-PT, "); 2779 x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_AUX_OUTPUT; 2780 } 2781 2782 break; 2783 2784 default: 2785 pr_cont("no PEBS fmt%d%c, ", format, pebs_type); 2786 x86_pmu.ds_pebs = 0; 2787 } 2788 } 2789 } 2790 2791 void perf_restore_debug_store(void) 2792 { 2793 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); 2794 2795 if (!x86_pmu.bts && !x86_pmu.ds_pebs) 2796 return; 2797 2798 wrmsrq(MSR_IA32_DS_AREA, (unsigned long)ds); 2799 } 2800