1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Support Intel/AMD RAPL energy consumption counters 4 * Copyright (C) 2013 Google, Inc., Stephane Eranian 5 * 6 * Intel RAPL interface is specified in the IA-32 Manual Vol3b 7 * section 14.7.1 (September 2013) 8 * 9 * AMD RAPL interface for Fam17h is described in the public PPR: 10 * https://bugzilla.kernel.org/show_bug.cgi?id=206537 11 * 12 * RAPL provides more controls than just reporting energy consumption 13 * however here we only expose the 3 energy consumption free running 14 * counters (pp0, pkg, dram). 15 * 16 * Each of those counters increments in a power unit defined by the 17 * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules 18 * but it can vary. 19 * 20 * Counter to rapl events mappings: 21 * 22 * pp0 counter: consumption of all physical cores (power plane 0) 23 * event: rapl_energy_cores 24 * perf code: 0x1 25 * 26 * pkg counter: consumption of the whole processor package 27 * event: rapl_energy_pkg 28 * perf code: 0x2 29 * 30 * dram counter: consumption of the dram domain (servers only) 31 * event: rapl_energy_dram 32 * perf code: 0x3 33 * 34 * gpu counter: consumption of the builtin-gpu domain (client only) 35 * event: rapl_energy_gpu 36 * perf code: 0x4 37 * 38 * psys counter: consumption of the builtin-psys domain (client only) 39 * event: rapl_energy_psys 40 * perf code: 0x5 41 * 42 * core counter: consumption of a single physical core 43 * event: rapl_energy_core (power_core PMU) 44 * perf code: 0x1 45 * 46 * We manage those counters as free running (read-only). They may be 47 * use simultaneously by other tools, such as turbostat. 48 * 49 * The events only support system-wide mode counting. There is no 50 * sampling support because it does not make sense and is not 51 * supported by the RAPL hardware. 52 * 53 * Because we want to avoid floating-point operations in the kernel, 54 * the events are all reported in fixed point arithmetic (32.32). 55 * Tools must adjust the counts to convert them to Watts using 56 * the duration of the measurement. Tools may use a function such as 57 * ldexp(raw_count, -32); 58 */ 59 60 #define pr_fmt(fmt) "RAPL PMU: " fmt 61 62 #include <linux/module.h> 63 #include <linux/slab.h> 64 #include <linux/perf_event.h> 65 #include <linux/nospec.h> 66 #include <asm/cpu_device_id.h> 67 #include <asm/intel-family.h> 68 #include "perf_event.h" 69 #include "probe.h" 70 71 MODULE_DESCRIPTION("Support Intel/AMD RAPL energy consumption counters"); 72 MODULE_LICENSE("GPL"); 73 74 /* 75 * RAPL energy status counters 76 */ 77 enum perf_rapl_pkg_events { 78 PERF_RAPL_PP0 = 0, /* all cores */ 79 PERF_RAPL_PKG, /* entire package */ 80 PERF_RAPL_RAM, /* DRAM */ 81 PERF_RAPL_PP1, /* gpu */ 82 PERF_RAPL_PSYS, /* psys */ 83 84 PERF_RAPL_PKG_EVENTS_MAX, 85 NR_RAPL_PKG_DOMAINS = PERF_RAPL_PKG_EVENTS_MAX, 86 }; 87 88 #define PERF_RAPL_CORE 0 /* single core */ 89 #define PERF_RAPL_CORE_EVENTS_MAX 1 90 #define NR_RAPL_CORE_DOMAINS PERF_RAPL_CORE_EVENTS_MAX 91 92 static const char *const rapl_pkg_domain_names[NR_RAPL_PKG_DOMAINS] __initconst = { 93 "pp0-core", 94 "package", 95 "dram", 96 "pp1-gpu", 97 "psys", 98 }; 99 100 static const char *const rapl_core_domain_name __initconst = "core"; 101 102 /* 103 * event code: LSB 8 bits, passed in attr->config 104 * any other bit is reserved 105 */ 106 #define RAPL_EVENT_MASK 0xFFULL 107 #define RAPL_CNTR_WIDTH 32 108 109 #define RAPL_EVENT_ATTR_STR(_name, v, str) \ 110 static struct perf_pmu_events_attr event_attr_##v = { \ 111 .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ 112 .id = 0, \ 113 .event_str = str, \ 114 }; 115 116 /* 117 * RAPL Package energy counter scope: 118 * 1. AMD/HYGON platforms have a per-PKG package energy counter 119 * 2. For Intel platforms 120 * 2.1. CLX-AP is multi-die and its RAPL MSRs are die-scope 121 * 2.2. Other Intel platforms are single die systems so the scope can be 122 * considered as either pkg-scope or die-scope, and we are considering 123 * them as die-scope. 124 */ 125 #define rapl_pkg_pmu_is_pkg_scope() \ 126 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || \ 127 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) 128 129 struct rapl_pmu { 130 raw_spinlock_t lock; 131 int n_active; 132 int cpu; 133 struct list_head active_list; 134 struct pmu *pmu; 135 ktime_t timer_interval; 136 struct hrtimer hrtimer; 137 }; 138 139 struct rapl_pmus { 140 struct pmu pmu; 141 unsigned int nr_rapl_pmu; 142 unsigned int cntr_mask; 143 struct rapl_pmu *rapl_pmu[] __counted_by(nr_rapl_pmu); 144 }; 145 146 enum rapl_unit_quirk { 147 RAPL_UNIT_QUIRK_NONE, 148 RAPL_UNIT_QUIRK_INTEL_HSW, 149 RAPL_UNIT_QUIRK_INTEL_SPR, 150 }; 151 152 struct rapl_model { 153 struct perf_msr *rapl_pkg_msrs; 154 struct perf_msr *rapl_core_msrs; 155 unsigned long pkg_events; 156 unsigned long core_events; 157 unsigned int msr_power_unit; 158 enum rapl_unit_quirk unit_quirk; 159 }; 160 161 /* 1/2^hw_unit Joule */ 162 static int rapl_pkg_hw_unit[NR_RAPL_PKG_DOMAINS] __read_mostly; 163 static int rapl_core_hw_unit __read_mostly; 164 static struct rapl_pmus *rapl_pmus_pkg; 165 static struct rapl_pmus *rapl_pmus_core; 166 static u64 rapl_timer_ms; 167 static struct rapl_model *rapl_model; 168 169 /* 170 * Helper function to get the correct topology id according to the 171 * RAPL PMU scope. 172 */ 173 static inline unsigned int get_rapl_pmu_idx(int cpu, int scope) 174 { 175 /* 176 * Returns unsigned int, which converts the '-1' return value 177 * (for non-existent mappings in topology map) to UINT_MAX, so 178 * the error check in the caller is simplified. 179 */ 180 switch (scope) { 181 case PERF_PMU_SCOPE_PKG: 182 return topology_logical_package_id(cpu); 183 case PERF_PMU_SCOPE_DIE: 184 return topology_logical_die_id(cpu); 185 case PERF_PMU_SCOPE_CORE: 186 return topology_logical_core_id(cpu); 187 default: 188 return -EINVAL; 189 } 190 } 191 192 static inline u64 rapl_read_counter(struct perf_event *event) 193 { 194 u64 raw; 195 rdmsrl(event->hw.event_base, raw); 196 return raw; 197 } 198 199 static inline u64 rapl_scale(u64 v, struct perf_event *event) 200 { 201 int hw_unit = rapl_pkg_hw_unit[event->hw.config - 1]; 202 203 if (event->pmu->scope == PERF_PMU_SCOPE_CORE) 204 hw_unit = rapl_core_hw_unit; 205 206 /* 207 * scale delta to smallest unit (1/2^32) 208 * users must then scale back: count * 1/(1e9*2^32) to get Joules 209 * or use ldexp(count, -32). 210 * Watts = Joules/Time delta 211 */ 212 return v << (32 - hw_unit); 213 } 214 215 static u64 rapl_event_update(struct perf_event *event) 216 { 217 struct hw_perf_event *hwc = &event->hw; 218 u64 prev_raw_count, new_raw_count; 219 s64 delta, sdelta; 220 int shift = RAPL_CNTR_WIDTH; 221 222 prev_raw_count = local64_read(&hwc->prev_count); 223 do { 224 rdmsrl(event->hw.event_base, new_raw_count); 225 } while (!local64_try_cmpxchg(&hwc->prev_count, 226 &prev_raw_count, new_raw_count)); 227 228 /* 229 * Now we have the new raw value and have updated the prev 230 * timestamp already. We can now calculate the elapsed delta 231 * (event-)time and add that to the generic event. 232 * 233 * Careful, not all hw sign-extends above the physical width 234 * of the count. 235 */ 236 delta = (new_raw_count << shift) - (prev_raw_count << shift); 237 delta >>= shift; 238 239 sdelta = rapl_scale(delta, event); 240 241 local64_add(sdelta, &event->count); 242 243 return new_raw_count; 244 } 245 246 static void rapl_start_hrtimer(struct rapl_pmu *pmu) 247 { 248 hrtimer_start(&pmu->hrtimer, pmu->timer_interval, 249 HRTIMER_MODE_REL_PINNED); 250 } 251 252 static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) 253 { 254 struct rapl_pmu *rapl_pmu = container_of(hrtimer, struct rapl_pmu, hrtimer); 255 struct perf_event *event; 256 unsigned long flags; 257 258 if (!rapl_pmu->n_active) 259 return HRTIMER_NORESTART; 260 261 raw_spin_lock_irqsave(&rapl_pmu->lock, flags); 262 263 list_for_each_entry(event, &rapl_pmu->active_list, active_entry) 264 rapl_event_update(event); 265 266 raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags); 267 268 hrtimer_forward_now(hrtimer, rapl_pmu->timer_interval); 269 270 return HRTIMER_RESTART; 271 } 272 273 static void rapl_hrtimer_init(struct rapl_pmu *rapl_pmu) 274 { 275 struct hrtimer *hr = &rapl_pmu->hrtimer; 276 277 hrtimer_setup(hr, rapl_hrtimer_handle, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 278 } 279 280 static void __rapl_pmu_event_start(struct rapl_pmu *rapl_pmu, 281 struct perf_event *event) 282 { 283 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 284 return; 285 286 event->hw.state = 0; 287 288 list_add_tail(&event->active_entry, &rapl_pmu->active_list); 289 290 local64_set(&event->hw.prev_count, rapl_read_counter(event)); 291 292 rapl_pmu->n_active++; 293 if (rapl_pmu->n_active == 1) 294 rapl_start_hrtimer(rapl_pmu); 295 } 296 297 static void rapl_pmu_event_start(struct perf_event *event, int mode) 298 { 299 struct rapl_pmu *rapl_pmu = event->pmu_private; 300 unsigned long flags; 301 302 raw_spin_lock_irqsave(&rapl_pmu->lock, flags); 303 __rapl_pmu_event_start(rapl_pmu, event); 304 raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags); 305 } 306 307 static void rapl_pmu_event_stop(struct perf_event *event, int mode) 308 { 309 struct rapl_pmu *rapl_pmu = event->pmu_private; 310 struct hw_perf_event *hwc = &event->hw; 311 unsigned long flags; 312 313 raw_spin_lock_irqsave(&rapl_pmu->lock, flags); 314 315 /* mark event as deactivated and stopped */ 316 if (!(hwc->state & PERF_HES_STOPPED)) { 317 WARN_ON_ONCE(rapl_pmu->n_active <= 0); 318 rapl_pmu->n_active--; 319 if (rapl_pmu->n_active == 0) 320 hrtimer_cancel(&rapl_pmu->hrtimer); 321 322 list_del(&event->active_entry); 323 324 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); 325 hwc->state |= PERF_HES_STOPPED; 326 } 327 328 /* check if update of sw counter is necessary */ 329 if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { 330 /* 331 * Drain the remaining delta count out of a event 332 * that we are disabling: 333 */ 334 rapl_event_update(event); 335 hwc->state |= PERF_HES_UPTODATE; 336 } 337 338 raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags); 339 } 340 341 static int rapl_pmu_event_add(struct perf_event *event, int mode) 342 { 343 struct rapl_pmu *rapl_pmu = event->pmu_private; 344 struct hw_perf_event *hwc = &event->hw; 345 unsigned long flags; 346 347 raw_spin_lock_irqsave(&rapl_pmu->lock, flags); 348 349 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 350 351 if (mode & PERF_EF_START) 352 __rapl_pmu_event_start(rapl_pmu, event); 353 354 raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags); 355 356 return 0; 357 } 358 359 static void rapl_pmu_event_del(struct perf_event *event, int flags) 360 { 361 rapl_pmu_event_stop(event, PERF_EF_UPDATE); 362 } 363 364 static int rapl_pmu_event_init(struct perf_event *event) 365 { 366 u64 cfg = event->attr.config & RAPL_EVENT_MASK; 367 int bit, rapl_pmus_scope, ret = 0; 368 struct rapl_pmu *rapl_pmu; 369 unsigned int rapl_pmu_idx; 370 struct rapl_pmus *rapl_pmus; 371 372 /* only look at RAPL events */ 373 if (event->attr.type != event->pmu->type) 374 return -ENOENT; 375 376 /* unsupported modes and filters */ 377 if (event->attr.sample_period) /* no sampling */ 378 return -EINVAL; 379 380 /* check only supported bits are set */ 381 if (event->attr.config & ~RAPL_EVENT_MASK) 382 return -EINVAL; 383 384 if (event->cpu < 0) 385 return -EINVAL; 386 387 rapl_pmus = container_of(event->pmu, struct rapl_pmus, pmu); 388 if (!rapl_pmus) 389 return -EINVAL; 390 rapl_pmus_scope = rapl_pmus->pmu.scope; 391 392 if (rapl_pmus_scope == PERF_PMU_SCOPE_PKG || rapl_pmus_scope == PERF_PMU_SCOPE_DIE) { 393 cfg = array_index_nospec((long)cfg, NR_RAPL_PKG_DOMAINS + 1); 394 if (!cfg || cfg >= NR_RAPL_PKG_DOMAINS + 1) 395 return -EINVAL; 396 397 bit = cfg - 1; 398 event->hw.event_base = rapl_model->rapl_pkg_msrs[bit].msr; 399 } else if (rapl_pmus_scope == PERF_PMU_SCOPE_CORE) { 400 cfg = array_index_nospec((long)cfg, NR_RAPL_CORE_DOMAINS + 1); 401 if (!cfg || cfg >= NR_RAPL_PKG_DOMAINS + 1) 402 return -EINVAL; 403 404 bit = cfg - 1; 405 event->hw.event_base = rapl_model->rapl_core_msrs[bit].msr; 406 } else 407 return -EINVAL; 408 409 /* check event supported */ 410 if (!(rapl_pmus->cntr_mask & (1 << bit))) 411 return -EINVAL; 412 413 rapl_pmu_idx = get_rapl_pmu_idx(event->cpu, rapl_pmus_scope); 414 if (rapl_pmu_idx >= rapl_pmus->nr_rapl_pmu) 415 return -EINVAL; 416 /* must be done before validate_group */ 417 rapl_pmu = rapl_pmus->rapl_pmu[rapl_pmu_idx]; 418 if (!rapl_pmu) 419 return -EINVAL; 420 421 event->pmu_private = rapl_pmu; 422 event->hw.config = cfg; 423 event->hw.idx = bit; 424 425 return ret; 426 } 427 428 static void rapl_pmu_event_read(struct perf_event *event) 429 { 430 rapl_event_update(event); 431 } 432 433 RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01"); 434 RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02"); 435 RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03"); 436 RAPL_EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04"); 437 RAPL_EVENT_ATTR_STR(energy-psys, rapl_psys, "event=0x05"); 438 RAPL_EVENT_ATTR_STR(energy-core, rapl_core, "event=0x01"); 439 440 RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules"); 441 RAPL_EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules"); 442 RAPL_EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules"); 443 RAPL_EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules"); 444 RAPL_EVENT_ATTR_STR(energy-psys.unit, rapl_psys_unit, "Joules"); 445 RAPL_EVENT_ATTR_STR(energy-core.unit, rapl_core_unit, "Joules"); 446 447 /* 448 * we compute in 0.23 nJ increments regardless of MSR 449 */ 450 RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10"); 451 RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10"); 452 RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10"); 453 RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10"); 454 RAPL_EVENT_ATTR_STR(energy-psys.scale, rapl_psys_scale, "2.3283064365386962890625e-10"); 455 RAPL_EVENT_ATTR_STR(energy-core.scale, rapl_core_scale, "2.3283064365386962890625e-10"); 456 457 /* 458 * There are no default events, but we need to create 459 * "events" group (with empty attrs) before updating 460 * it with detected events. 461 */ 462 static struct attribute *attrs_empty[] = { 463 NULL, 464 }; 465 466 static struct attribute_group rapl_pmu_events_group = { 467 .name = "events", 468 .attrs = attrs_empty, 469 }; 470 471 PMU_FORMAT_ATTR(event, "config:0-7"); 472 static struct attribute *rapl_formats_attr[] = { 473 &format_attr_event.attr, 474 NULL, 475 }; 476 477 static struct attribute_group rapl_pmu_format_group = { 478 .name = "format", 479 .attrs = rapl_formats_attr, 480 }; 481 482 static const struct attribute_group *rapl_attr_groups[] = { 483 &rapl_pmu_format_group, 484 &rapl_pmu_events_group, 485 NULL, 486 }; 487 488 static const struct attribute_group *rapl_core_attr_groups[] = { 489 &rapl_pmu_format_group, 490 &rapl_pmu_events_group, 491 NULL, 492 }; 493 494 static struct attribute *rapl_events_cores[] = { 495 EVENT_PTR(rapl_cores), 496 EVENT_PTR(rapl_cores_unit), 497 EVENT_PTR(rapl_cores_scale), 498 NULL, 499 }; 500 501 static struct attribute_group rapl_events_cores_group = { 502 .name = "events", 503 .attrs = rapl_events_cores, 504 }; 505 506 static struct attribute *rapl_events_pkg[] = { 507 EVENT_PTR(rapl_pkg), 508 EVENT_PTR(rapl_pkg_unit), 509 EVENT_PTR(rapl_pkg_scale), 510 NULL, 511 }; 512 513 static struct attribute_group rapl_events_pkg_group = { 514 .name = "events", 515 .attrs = rapl_events_pkg, 516 }; 517 518 static struct attribute *rapl_events_ram[] = { 519 EVENT_PTR(rapl_ram), 520 EVENT_PTR(rapl_ram_unit), 521 EVENT_PTR(rapl_ram_scale), 522 NULL, 523 }; 524 525 static struct attribute_group rapl_events_ram_group = { 526 .name = "events", 527 .attrs = rapl_events_ram, 528 }; 529 530 static struct attribute *rapl_events_gpu[] = { 531 EVENT_PTR(rapl_gpu), 532 EVENT_PTR(rapl_gpu_unit), 533 EVENT_PTR(rapl_gpu_scale), 534 NULL, 535 }; 536 537 static struct attribute_group rapl_events_gpu_group = { 538 .name = "events", 539 .attrs = rapl_events_gpu, 540 }; 541 542 static struct attribute *rapl_events_psys[] = { 543 EVENT_PTR(rapl_psys), 544 EVENT_PTR(rapl_psys_unit), 545 EVENT_PTR(rapl_psys_scale), 546 NULL, 547 }; 548 549 static struct attribute_group rapl_events_psys_group = { 550 .name = "events", 551 .attrs = rapl_events_psys, 552 }; 553 554 static struct attribute *rapl_events_core[] = { 555 EVENT_PTR(rapl_core), 556 EVENT_PTR(rapl_core_unit), 557 EVENT_PTR(rapl_core_scale), 558 NULL, 559 }; 560 561 static struct attribute_group rapl_events_core_group = { 562 .name = "events", 563 .attrs = rapl_events_core, 564 }; 565 566 static bool test_msr(int idx, void *data) 567 { 568 return test_bit(idx, (unsigned long *) data); 569 } 570 571 /* Only lower 32bits of the MSR represents the energy counter */ 572 #define RAPL_MSR_MASK 0xFFFFFFFF 573 574 static struct perf_msr intel_rapl_msrs[] = { 575 [PERF_RAPL_PP0] = { MSR_PP0_ENERGY_STATUS, &rapl_events_cores_group, test_msr, false, RAPL_MSR_MASK }, 576 [PERF_RAPL_PKG] = { MSR_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr, false, RAPL_MSR_MASK }, 577 [PERF_RAPL_RAM] = { MSR_DRAM_ENERGY_STATUS, &rapl_events_ram_group, test_msr, false, RAPL_MSR_MASK }, 578 [PERF_RAPL_PP1] = { MSR_PP1_ENERGY_STATUS, &rapl_events_gpu_group, test_msr, false, RAPL_MSR_MASK }, 579 [PERF_RAPL_PSYS] = { MSR_PLATFORM_ENERGY_STATUS, &rapl_events_psys_group, test_msr, false, RAPL_MSR_MASK }, 580 }; 581 582 static struct perf_msr intel_rapl_spr_msrs[] = { 583 [PERF_RAPL_PP0] = { MSR_PP0_ENERGY_STATUS, &rapl_events_cores_group, test_msr, false, RAPL_MSR_MASK }, 584 [PERF_RAPL_PKG] = { MSR_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr, false, RAPL_MSR_MASK }, 585 [PERF_RAPL_RAM] = { MSR_DRAM_ENERGY_STATUS, &rapl_events_ram_group, test_msr, false, RAPL_MSR_MASK }, 586 [PERF_RAPL_PP1] = { MSR_PP1_ENERGY_STATUS, &rapl_events_gpu_group, test_msr, false, RAPL_MSR_MASK }, 587 [PERF_RAPL_PSYS] = { MSR_PLATFORM_ENERGY_STATUS, &rapl_events_psys_group, test_msr, true, RAPL_MSR_MASK }, 588 }; 589 590 /* 591 * Force to PERF_RAPL_PKG_EVENTS_MAX size due to: 592 * - perf_msr_probe(PERF_RAPL_PKG_EVENTS_MAX) 593 * - want to use same event codes across both architectures 594 */ 595 static struct perf_msr amd_rapl_pkg_msrs[] = { 596 [PERF_RAPL_PP0] = { 0, &rapl_events_cores_group, NULL, false, 0 }, 597 [PERF_RAPL_PKG] = { MSR_AMD_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr, false, RAPL_MSR_MASK }, 598 [PERF_RAPL_RAM] = { 0, &rapl_events_ram_group, NULL, false, 0 }, 599 [PERF_RAPL_PP1] = { 0, &rapl_events_gpu_group, NULL, false, 0 }, 600 [PERF_RAPL_PSYS] = { 0, &rapl_events_psys_group, NULL, false, 0 }, 601 }; 602 603 static struct perf_msr amd_rapl_core_msrs[] = { 604 [PERF_RAPL_CORE] = { MSR_AMD_CORE_ENERGY_STATUS, &rapl_events_core_group, 605 test_msr, false, RAPL_MSR_MASK }, 606 }; 607 608 static int rapl_check_hw_unit(void) 609 { 610 u64 msr_rapl_power_unit_bits; 611 int i; 612 613 /* protect rdmsrl() to handle virtualization */ 614 if (rdmsrl_safe(rapl_model->msr_power_unit, &msr_rapl_power_unit_bits)) 615 return -1; 616 for (i = 0; i < NR_RAPL_PKG_DOMAINS; i++) 617 rapl_pkg_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL; 618 619 rapl_core_hw_unit = (msr_rapl_power_unit_bits >> 8) & 0x1FULL; 620 621 switch (rapl_model->unit_quirk) { 622 /* 623 * DRAM domain on HSW server and KNL has fixed energy unit which can be 624 * different than the unit from power unit MSR. See 625 * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2 626 * of 2. Datasheet, September 2014, Reference Number: 330784-001 " 627 */ 628 case RAPL_UNIT_QUIRK_INTEL_HSW: 629 rapl_pkg_hw_unit[PERF_RAPL_RAM] = 16; 630 break; 631 /* SPR uses a fixed energy unit for Psys domain. */ 632 case RAPL_UNIT_QUIRK_INTEL_SPR: 633 rapl_pkg_hw_unit[PERF_RAPL_PSYS] = 0; 634 break; 635 default: 636 break; 637 } 638 639 /* 640 * Calculate the timer rate: 641 * Use reference of 200W for scaling the timeout to avoid counter 642 * overflows. 200W = 200 Joules/sec 643 * Divide interval by 2 to avoid lockstep (2 * 100) 644 * if hw unit is 32, then we use 2 ms 1/200/2 645 */ 646 rapl_timer_ms = 2; 647 if (rapl_pkg_hw_unit[0] < 32) { 648 rapl_timer_ms = (1000 / (2 * 100)); 649 rapl_timer_ms *= (1ULL << (32 - rapl_pkg_hw_unit[0] - 1)); 650 } 651 return 0; 652 } 653 654 static void __init rapl_advertise(void) 655 { 656 int i; 657 int num_counters = hweight32(rapl_pmus_pkg->cntr_mask); 658 659 if (rapl_pmus_core) 660 num_counters += hweight32(rapl_pmus_core->cntr_mask); 661 662 pr_info("API unit is 2^-32 Joules, %d fixed counters, %llu ms ovfl timer\n", 663 num_counters, rapl_timer_ms); 664 665 for (i = 0; i < NR_RAPL_PKG_DOMAINS; i++) { 666 if (rapl_pmus_pkg->cntr_mask & (1 << i)) { 667 pr_info("hw unit of domain %s 2^-%d Joules\n", 668 rapl_pkg_domain_names[i], rapl_pkg_hw_unit[i]); 669 } 670 } 671 672 if (rapl_pmus_core && (rapl_pmus_core->cntr_mask & (1 << PERF_RAPL_CORE))) 673 pr_info("hw unit of domain %s 2^-%d Joules\n", 674 rapl_core_domain_name, rapl_core_hw_unit); 675 } 676 677 static void cleanup_rapl_pmus(struct rapl_pmus *rapl_pmus) 678 { 679 int i; 680 681 for (i = 0; i < rapl_pmus->nr_rapl_pmu; i++) 682 kfree(rapl_pmus->rapl_pmu[i]); 683 kfree(rapl_pmus); 684 } 685 686 static const struct attribute_group *rapl_attr_update[] = { 687 &rapl_events_cores_group, 688 &rapl_events_pkg_group, 689 &rapl_events_ram_group, 690 &rapl_events_gpu_group, 691 &rapl_events_psys_group, 692 NULL, 693 }; 694 695 static const struct attribute_group *rapl_core_attr_update[] = { 696 &rapl_events_core_group, 697 NULL, 698 }; 699 700 static int __init init_rapl_pmu(struct rapl_pmus *rapl_pmus) 701 { 702 struct rapl_pmu *rapl_pmu; 703 int idx; 704 705 for (idx = 0; idx < rapl_pmus->nr_rapl_pmu; idx++) { 706 rapl_pmu = kzalloc(sizeof(*rapl_pmu), GFP_KERNEL); 707 if (!rapl_pmu) 708 goto free; 709 710 raw_spin_lock_init(&rapl_pmu->lock); 711 INIT_LIST_HEAD(&rapl_pmu->active_list); 712 rapl_pmu->pmu = &rapl_pmus->pmu; 713 rapl_pmu->timer_interval = ms_to_ktime(rapl_timer_ms); 714 rapl_hrtimer_init(rapl_pmu); 715 716 rapl_pmus->rapl_pmu[idx] = rapl_pmu; 717 } 718 719 return 0; 720 free: 721 for (; idx > 0; idx--) 722 kfree(rapl_pmus->rapl_pmu[idx - 1]); 723 return -ENOMEM; 724 } 725 726 static int __init init_rapl_pmus(struct rapl_pmus **rapl_pmus_ptr, int rapl_pmu_scope, 727 const struct attribute_group **rapl_attr_groups, 728 const struct attribute_group **rapl_attr_update) 729 { 730 int nr_rapl_pmu = topology_max_packages(); 731 struct rapl_pmus *rapl_pmus; 732 int ret; 733 734 /* 735 * rapl_pmu_scope must be either PKG, DIE or CORE 736 */ 737 if (rapl_pmu_scope == PERF_PMU_SCOPE_DIE) 738 nr_rapl_pmu *= topology_max_dies_per_package(); 739 else if (rapl_pmu_scope == PERF_PMU_SCOPE_CORE) 740 nr_rapl_pmu *= topology_num_cores_per_package(); 741 else if (rapl_pmu_scope != PERF_PMU_SCOPE_PKG) 742 return -EINVAL; 743 744 rapl_pmus = kzalloc(struct_size(rapl_pmus, rapl_pmu, nr_rapl_pmu), GFP_KERNEL); 745 if (!rapl_pmus) 746 return -ENOMEM; 747 748 *rapl_pmus_ptr = rapl_pmus; 749 750 rapl_pmus->nr_rapl_pmu = nr_rapl_pmu; 751 rapl_pmus->pmu.attr_groups = rapl_attr_groups; 752 rapl_pmus->pmu.attr_update = rapl_attr_update; 753 rapl_pmus->pmu.task_ctx_nr = perf_invalid_context; 754 rapl_pmus->pmu.event_init = rapl_pmu_event_init; 755 rapl_pmus->pmu.add = rapl_pmu_event_add; 756 rapl_pmus->pmu.del = rapl_pmu_event_del; 757 rapl_pmus->pmu.start = rapl_pmu_event_start; 758 rapl_pmus->pmu.stop = rapl_pmu_event_stop; 759 rapl_pmus->pmu.read = rapl_pmu_event_read; 760 rapl_pmus->pmu.scope = rapl_pmu_scope; 761 rapl_pmus->pmu.module = THIS_MODULE; 762 rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; 763 764 ret = init_rapl_pmu(rapl_pmus); 765 if (ret) 766 kfree(rapl_pmus); 767 768 return ret; 769 } 770 771 static struct rapl_model model_snb = { 772 .pkg_events = BIT(PERF_RAPL_PP0) | 773 BIT(PERF_RAPL_PKG) | 774 BIT(PERF_RAPL_PP1), 775 .msr_power_unit = MSR_RAPL_POWER_UNIT, 776 .rapl_pkg_msrs = intel_rapl_msrs, 777 }; 778 779 static struct rapl_model model_snbep = { 780 .pkg_events = BIT(PERF_RAPL_PP0) | 781 BIT(PERF_RAPL_PKG) | 782 BIT(PERF_RAPL_RAM), 783 .msr_power_unit = MSR_RAPL_POWER_UNIT, 784 .rapl_pkg_msrs = intel_rapl_msrs, 785 }; 786 787 static struct rapl_model model_hsw = { 788 .pkg_events = BIT(PERF_RAPL_PP0) | 789 BIT(PERF_RAPL_PKG) | 790 BIT(PERF_RAPL_RAM) | 791 BIT(PERF_RAPL_PP1), 792 .msr_power_unit = MSR_RAPL_POWER_UNIT, 793 .rapl_pkg_msrs = intel_rapl_msrs, 794 }; 795 796 static struct rapl_model model_hsx = { 797 .pkg_events = BIT(PERF_RAPL_PP0) | 798 BIT(PERF_RAPL_PKG) | 799 BIT(PERF_RAPL_RAM), 800 .unit_quirk = RAPL_UNIT_QUIRK_INTEL_HSW, 801 .msr_power_unit = MSR_RAPL_POWER_UNIT, 802 .rapl_pkg_msrs = intel_rapl_msrs, 803 }; 804 805 static struct rapl_model model_knl = { 806 .pkg_events = BIT(PERF_RAPL_PKG) | 807 BIT(PERF_RAPL_RAM), 808 .unit_quirk = RAPL_UNIT_QUIRK_INTEL_HSW, 809 .msr_power_unit = MSR_RAPL_POWER_UNIT, 810 .rapl_pkg_msrs = intel_rapl_msrs, 811 }; 812 813 static struct rapl_model model_skl = { 814 .pkg_events = BIT(PERF_RAPL_PP0) | 815 BIT(PERF_RAPL_PKG) | 816 BIT(PERF_RAPL_RAM) | 817 BIT(PERF_RAPL_PP1) | 818 BIT(PERF_RAPL_PSYS), 819 .msr_power_unit = MSR_RAPL_POWER_UNIT, 820 .rapl_pkg_msrs = intel_rapl_msrs, 821 }; 822 823 static struct rapl_model model_spr = { 824 .pkg_events = BIT(PERF_RAPL_PP0) | 825 BIT(PERF_RAPL_PKG) | 826 BIT(PERF_RAPL_RAM) | 827 BIT(PERF_RAPL_PSYS), 828 .unit_quirk = RAPL_UNIT_QUIRK_INTEL_SPR, 829 .msr_power_unit = MSR_RAPL_POWER_UNIT, 830 .rapl_pkg_msrs = intel_rapl_spr_msrs, 831 }; 832 833 static struct rapl_model model_amd_hygon = { 834 .pkg_events = BIT(PERF_RAPL_PKG), 835 .core_events = BIT(PERF_RAPL_CORE), 836 .msr_power_unit = MSR_AMD_RAPL_POWER_UNIT, 837 .rapl_pkg_msrs = amd_rapl_pkg_msrs, 838 .rapl_core_msrs = amd_rapl_core_msrs, 839 }; 840 841 static const struct x86_cpu_id rapl_model_match[] __initconst = { 842 X86_MATCH_FEATURE(X86_FEATURE_RAPL, &model_amd_hygon), 843 X86_MATCH_VFM(INTEL_SANDYBRIDGE, &model_snb), 844 X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &model_snbep), 845 X86_MATCH_VFM(INTEL_IVYBRIDGE, &model_snb), 846 X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &model_snbep), 847 X86_MATCH_VFM(INTEL_HASWELL, &model_hsw), 848 X86_MATCH_VFM(INTEL_HASWELL_X, &model_hsx), 849 X86_MATCH_VFM(INTEL_HASWELL_L, &model_hsw), 850 X86_MATCH_VFM(INTEL_HASWELL_G, &model_hsw), 851 X86_MATCH_VFM(INTEL_BROADWELL, &model_hsw), 852 X86_MATCH_VFM(INTEL_BROADWELL_G, &model_hsw), 853 X86_MATCH_VFM(INTEL_BROADWELL_X, &model_hsx), 854 X86_MATCH_VFM(INTEL_BROADWELL_D, &model_hsx), 855 X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &model_knl), 856 X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &model_knl), 857 X86_MATCH_VFM(INTEL_SKYLAKE_L, &model_skl), 858 X86_MATCH_VFM(INTEL_SKYLAKE, &model_skl), 859 X86_MATCH_VFM(INTEL_SKYLAKE_X, &model_hsx), 860 X86_MATCH_VFM(INTEL_KABYLAKE_L, &model_skl), 861 X86_MATCH_VFM(INTEL_KABYLAKE, &model_skl), 862 X86_MATCH_VFM(INTEL_CANNONLAKE_L, &model_skl), 863 X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &model_hsw), 864 X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &model_hsw), 865 X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &model_hsw), 866 X86_MATCH_VFM(INTEL_ICELAKE_L, &model_skl), 867 X86_MATCH_VFM(INTEL_ICELAKE, &model_skl), 868 X86_MATCH_VFM(INTEL_ICELAKE_D, &model_hsx), 869 X86_MATCH_VFM(INTEL_ICELAKE_X, &model_hsx), 870 X86_MATCH_VFM(INTEL_COMETLAKE_L, &model_skl), 871 X86_MATCH_VFM(INTEL_COMETLAKE, &model_skl), 872 X86_MATCH_VFM(INTEL_TIGERLAKE_L, &model_skl), 873 X86_MATCH_VFM(INTEL_TIGERLAKE, &model_skl), 874 X86_MATCH_VFM(INTEL_ALDERLAKE, &model_skl), 875 X86_MATCH_VFM(INTEL_ALDERLAKE_L, &model_skl), 876 X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &model_skl), 877 X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &model_spr), 878 X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &model_spr), 879 X86_MATCH_VFM(INTEL_RAPTORLAKE, &model_skl), 880 X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &model_skl), 881 X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &model_skl), 882 X86_MATCH_VFM(INTEL_METEORLAKE, &model_skl), 883 X86_MATCH_VFM(INTEL_METEORLAKE_L, &model_skl), 884 X86_MATCH_VFM(INTEL_ARROWLAKE_H, &model_skl), 885 X86_MATCH_VFM(INTEL_ARROWLAKE, &model_skl), 886 X86_MATCH_VFM(INTEL_ARROWLAKE_U, &model_skl), 887 X86_MATCH_VFM(INTEL_LUNARLAKE_M, &model_skl), 888 {}, 889 }; 890 MODULE_DEVICE_TABLE(x86cpu, rapl_model_match); 891 892 static int __init rapl_pmu_init(void) 893 { 894 const struct x86_cpu_id *id; 895 int rapl_pkg_pmu_scope = PERF_PMU_SCOPE_DIE; 896 int ret; 897 898 if (rapl_pkg_pmu_is_pkg_scope()) 899 rapl_pkg_pmu_scope = PERF_PMU_SCOPE_PKG; 900 901 id = x86_match_cpu(rapl_model_match); 902 if (!id) 903 return -ENODEV; 904 905 rapl_model = (struct rapl_model *) id->driver_data; 906 907 ret = rapl_check_hw_unit(); 908 if (ret) 909 return ret; 910 911 ret = init_rapl_pmus(&rapl_pmus_pkg, rapl_pkg_pmu_scope, rapl_attr_groups, 912 rapl_attr_update); 913 if (ret) 914 return ret; 915 916 rapl_pmus_pkg->cntr_mask = perf_msr_probe(rapl_model->rapl_pkg_msrs, 917 PERF_RAPL_PKG_EVENTS_MAX, false, 918 (void *) &rapl_model->pkg_events); 919 920 ret = perf_pmu_register(&rapl_pmus_pkg->pmu, "power", -1); 921 if (ret) 922 goto out; 923 924 if (rapl_model->core_events) { 925 ret = init_rapl_pmus(&rapl_pmus_core, PERF_PMU_SCOPE_CORE, 926 rapl_core_attr_groups, 927 rapl_core_attr_update); 928 if (ret) { 929 pr_warn("power-core PMU initialization failed (%d)\n", ret); 930 goto core_init_failed; 931 } 932 933 rapl_pmus_core->cntr_mask = perf_msr_probe(rapl_model->rapl_core_msrs, 934 PERF_RAPL_CORE_EVENTS_MAX, false, 935 (void *) &rapl_model->core_events); 936 937 ret = perf_pmu_register(&rapl_pmus_core->pmu, "power_core", -1); 938 if (ret) { 939 pr_warn("power-core PMU registration failed (%d)\n", ret); 940 cleanup_rapl_pmus(rapl_pmus_core); 941 } 942 } 943 944 core_init_failed: 945 rapl_advertise(); 946 return 0; 947 948 out: 949 pr_warn("Initialization failed (%d), disabled\n", ret); 950 cleanup_rapl_pmus(rapl_pmus_pkg); 951 return ret; 952 } 953 module_init(rapl_pmu_init); 954 955 static void __exit intel_rapl_exit(void) 956 { 957 if (rapl_pmus_core) { 958 perf_pmu_unregister(&rapl_pmus_core->pmu); 959 cleanup_rapl_pmus(rapl_pmus_core); 960 } 961 perf_pmu_unregister(&rapl_pmus_pkg->pmu); 962 cleanup_rapl_pmus(rapl_pmus_pkg); 963 } 964 module_exit(intel_rapl_exit); 965