1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 Linaro Ltd. 4 * Author: Shannon Zhao <shannon.zhao@linaro.org> 5 */ 6 7 #include <linux/cpu.h> 8 #include <linux/kvm.h> 9 #include <linux/kvm_host.h> 10 #include <linux/list.h> 11 #include <linux/perf_event.h> 12 #include <linux/perf/arm_pmu.h> 13 #include <linux/uaccess.h> 14 #include <asm/kvm_emulate.h> 15 #include <kvm/arm_pmu.h> 16 #include <kvm/arm_vgic.h> 17 18 #define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0) 19 20 static LIST_HEAD(arm_pmus); 21 static DEFINE_MUTEX(arm_pmus_lock); 22 23 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc); 24 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc); 25 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc); 26 27 bool kvm_supports_guest_pmuv3(void) 28 { 29 guard(mutex)(&arm_pmus_lock); 30 return !list_empty(&arm_pmus); 31 } 32 33 static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc) 34 { 35 return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]); 36 } 37 38 static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx) 39 { 40 return &vcpu->arch.pmu.pmc[cnt_idx]; 41 } 42 43 static u32 __kvm_pmu_event_mask(unsigned int pmuver) 44 { 45 switch (pmuver) { 46 case ID_AA64DFR0_EL1_PMUVer_IMP: 47 return GENMASK(9, 0); 48 case ID_AA64DFR0_EL1_PMUVer_V3P1: 49 case ID_AA64DFR0_EL1_PMUVer_V3P4: 50 case ID_AA64DFR0_EL1_PMUVer_V3P5: 51 case ID_AA64DFR0_EL1_PMUVer_V3P7: 52 return GENMASK(15, 0); 53 default: /* Shouldn't be here, just for sanity */ 54 WARN_ONCE(1, "Unknown PMU version %d\n", pmuver); 55 return 0; 56 } 57 } 58 59 static u32 kvm_pmu_event_mask(struct kvm *kvm) 60 { 61 u64 dfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1); 62 u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, dfr0); 63 64 return __kvm_pmu_event_mask(pmuver); 65 } 66 67 u64 kvm_pmu_evtyper_mask(struct kvm *kvm) 68 { 69 u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 | 70 kvm_pmu_event_mask(kvm); 71 72 if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL2, IMP)) 73 mask |= ARMV8_PMU_INCLUDE_EL2; 74 75 if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL3, IMP)) 76 mask |= ARMV8_PMU_EXCLUDE_NS_EL0 | 77 ARMV8_PMU_EXCLUDE_NS_EL1 | 78 ARMV8_PMU_EXCLUDE_EL3; 79 80 return mask; 81 } 82 83 /** 84 * kvm_pmc_is_64bit - determine if counter is 64bit 85 * @pmc: counter context 86 */ 87 static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc) 88 { 89 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 90 91 return (pmc->idx == ARMV8_PMU_CYCLE_IDX || 92 kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5)); 93 } 94 95 static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc) 96 { 97 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 98 u64 val = kvm_vcpu_read_pmcr(vcpu); 99 100 if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx)) 101 return __vcpu_sys_reg(vcpu, MDCR_EL2) & MDCR_EL2_HLP; 102 103 return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) || 104 (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC)); 105 } 106 107 static bool kvm_pmu_counter_can_chain(struct kvm_pmc *pmc) 108 { 109 return (!(pmc->idx & 1) && (pmc->idx + 1) < ARMV8_PMU_CYCLE_IDX && 110 !kvm_pmc_has_64bit_overflow(pmc)); 111 } 112 113 static u32 counter_index_to_reg(u64 idx) 114 { 115 return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx; 116 } 117 118 static u32 counter_index_to_evtreg(u64 idx) 119 { 120 return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx; 121 } 122 123 static u64 kvm_pmc_read_evtreg(const struct kvm_pmc *pmc) 124 { 125 return __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), counter_index_to_evtreg(pmc->idx)); 126 } 127 128 static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc) 129 { 130 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 131 u64 counter, reg, enabled, running; 132 133 reg = counter_index_to_reg(pmc->idx); 134 counter = __vcpu_sys_reg(vcpu, reg); 135 136 /* 137 * The real counter value is equal to the value of counter register plus 138 * the value perf event counts. 139 */ 140 if (pmc->perf_event) 141 counter += perf_event_read_value(pmc->perf_event, &enabled, 142 &running); 143 144 if (!kvm_pmc_is_64bit(pmc)) 145 counter = lower_32_bits(counter); 146 147 return counter; 148 } 149 150 /** 151 * kvm_pmu_get_counter_value - get PMU counter value 152 * @vcpu: The vcpu pointer 153 * @select_idx: The counter index 154 */ 155 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) 156 { 157 return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx)); 158 } 159 160 static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force) 161 { 162 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 163 u64 reg; 164 165 kvm_pmu_release_perf_event(pmc); 166 167 reg = counter_index_to_reg(pmc->idx); 168 169 if (vcpu_mode_is_32bit(vcpu) && pmc->idx != ARMV8_PMU_CYCLE_IDX && 170 !force) { 171 /* 172 * Even with PMUv3p5, AArch32 cannot write to the top 173 * 32bit of the counters. The only possible course of 174 * action is to use PMCR.P, which will reset them to 175 * 0 (the only use of the 'force' parameter). 176 */ 177 val = __vcpu_sys_reg(vcpu, reg) & GENMASK(63, 32); 178 val |= lower_32_bits(val); 179 } 180 181 __vcpu_assign_sys_reg(vcpu, reg, val); 182 183 /* Recreate the perf event to reflect the updated sample_period */ 184 kvm_pmu_create_perf_event(pmc); 185 } 186 187 /** 188 * kvm_pmu_set_counter_value - set PMU counter value 189 * @vcpu: The vcpu pointer 190 * @select_idx: The counter index 191 * @val: The counter value 192 */ 193 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) 194 { 195 kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false); 196 } 197 198 /** 199 * kvm_pmu_set_counter_value_user - set PMU counter value from user 200 * @vcpu: The vcpu pointer 201 * @select_idx: The counter index 202 * @val: The counter value 203 */ 204 void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) 205 { 206 kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, select_idx)); 207 __vcpu_assign_sys_reg(vcpu, counter_index_to_reg(select_idx), val); 208 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 209 } 210 211 /** 212 * kvm_pmu_release_perf_event - remove the perf event 213 * @pmc: The PMU counter pointer 214 */ 215 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) 216 { 217 if (pmc->perf_event) { 218 perf_event_disable(pmc->perf_event); 219 perf_event_release_kernel(pmc->perf_event); 220 pmc->perf_event = NULL; 221 } 222 } 223 224 /** 225 * kvm_pmu_stop_counter - stop PMU counter 226 * @pmc: The PMU counter pointer 227 * 228 * If this counter has been configured to monitor some event, release it here. 229 */ 230 static void kvm_pmu_stop_counter(struct kvm_pmc *pmc) 231 { 232 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 233 u64 reg, val; 234 235 if (!pmc->perf_event) 236 return; 237 238 val = kvm_pmu_get_pmc_value(pmc); 239 240 reg = counter_index_to_reg(pmc->idx); 241 242 __vcpu_assign_sys_reg(vcpu, reg, val); 243 244 kvm_pmu_release_perf_event(pmc); 245 } 246 247 /** 248 * kvm_pmu_vcpu_init - assign pmu counter idx for cpu 249 * @vcpu: The vcpu pointer 250 * 251 */ 252 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) 253 { 254 int i; 255 struct kvm_pmu *pmu = &vcpu->arch.pmu; 256 257 for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) 258 pmu->pmc[i].idx = i; 259 } 260 261 /** 262 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu 263 * @vcpu: The vcpu pointer 264 * 265 */ 266 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) 267 { 268 int i; 269 270 for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) 271 kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, i)); 272 irq_work_sync(&vcpu->arch.pmu.overflow_work); 273 } 274 275 static u64 kvm_pmu_hyp_counter_mask(struct kvm_vcpu *vcpu) 276 { 277 unsigned int hpmn, n; 278 279 if (!vcpu_has_nv(vcpu)) 280 return 0; 281 282 hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2)); 283 n = vcpu->kvm->arch.nr_pmu_counters; 284 285 /* 286 * Programming HPMN to a value greater than PMCR_EL0.N is 287 * CONSTRAINED UNPREDICTABLE. Make the implementation choice that an 288 * UNKNOWN number of counters (in our case, zero) are reserved for EL2. 289 */ 290 if (hpmn >= n) 291 return 0; 292 293 /* 294 * Programming HPMN=0 is CONSTRAINED UNPREDICTABLE if FEAT_HPMN0 isn't 295 * implemented. Since KVM's ability to emulate HPMN=0 does not directly 296 * depend on hardware (all PMU registers are trapped), make the 297 * implementation choice that all counters are included in the second 298 * range reserved for EL2/EL3. 299 */ 300 return GENMASK(n - 1, hpmn); 301 } 302 303 bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx) 304 { 305 return kvm_pmu_hyp_counter_mask(vcpu) & BIT(idx); 306 } 307 308 u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu) 309 { 310 u64 mask = kvm_pmu_implemented_counter_mask(vcpu); 311 312 if (!vcpu_has_nv(vcpu) || vcpu_is_el2(vcpu)) 313 return mask; 314 315 return mask & ~kvm_pmu_hyp_counter_mask(vcpu); 316 } 317 318 u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu) 319 { 320 u64 val = FIELD_GET(ARMV8_PMU_PMCR_N, kvm_vcpu_read_pmcr(vcpu)); 321 322 if (val == 0) 323 return BIT(ARMV8_PMU_CYCLE_IDX); 324 else 325 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX); 326 } 327 328 static void kvm_pmc_enable_perf_event(struct kvm_pmc *pmc) 329 { 330 if (!pmc->perf_event) { 331 kvm_pmu_create_perf_event(pmc); 332 return; 333 } 334 335 perf_event_enable(pmc->perf_event); 336 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE) 337 kvm_debug("fail to enable perf event\n"); 338 } 339 340 static void kvm_pmc_disable_perf_event(struct kvm_pmc *pmc) 341 { 342 if (pmc->perf_event) 343 perf_event_disable(pmc->perf_event); 344 } 345 346 void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) 347 { 348 int i; 349 350 if (!val) 351 return; 352 353 for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) { 354 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i); 355 356 if (!(val & BIT(i))) 357 continue; 358 359 if (kvm_pmu_counter_is_enabled(pmc)) 360 kvm_pmc_enable_perf_event(pmc); 361 else 362 kvm_pmc_disable_perf_event(pmc); 363 } 364 365 kvm_vcpu_pmu_restore_guest(vcpu); 366 } 367 368 /* 369 * Returns the PMU overflow state, which is true if there exists an event 370 * counter where the values of the global enable control, PMOVSSET_EL0[n], and 371 * PMINTENSET_EL1[n] are all 1. 372 */ 373 static bool kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) 374 { 375 u64 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); 376 377 reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1); 378 379 /* 380 * PMCR_EL0.E is the global enable control for event counters available 381 * to EL0 and EL1. 382 */ 383 if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) 384 reg &= kvm_pmu_hyp_counter_mask(vcpu); 385 386 /* 387 * Otherwise, MDCR_EL2.HPME is the global enable control for event 388 * counters reserved for EL2. 389 */ 390 if (!(vcpu_read_sys_reg(vcpu, MDCR_EL2) & MDCR_EL2_HPME)) 391 reg &= ~kvm_pmu_hyp_counter_mask(vcpu); 392 393 return reg; 394 } 395 396 static void kvm_pmu_update_state(struct kvm_vcpu *vcpu) 397 { 398 struct kvm_pmu *pmu = &vcpu->arch.pmu; 399 bool overflow; 400 401 overflow = kvm_pmu_overflow_status(vcpu); 402 if (pmu->irq_level == overflow) 403 return; 404 405 pmu->irq_level = overflow; 406 407 if (likely(irqchip_in_kernel(vcpu->kvm))) { 408 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu, 409 pmu->irq_num, overflow, pmu); 410 WARN_ON(ret); 411 } 412 } 413 414 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) 415 { 416 struct kvm_pmu *pmu = &vcpu->arch.pmu; 417 struct kvm_sync_regs *sregs = &vcpu->run->s.regs; 418 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU; 419 420 if (likely(irqchip_in_kernel(vcpu->kvm))) 421 return false; 422 423 return pmu->irq_level != run_level; 424 } 425 426 /* 427 * Reflect the PMU overflow interrupt output level into the kvm_run structure 428 */ 429 void kvm_pmu_update_run(struct kvm_vcpu *vcpu) 430 { 431 struct kvm_sync_regs *regs = &vcpu->run->s.regs; 432 433 /* Populate the timer bitmap for user space */ 434 regs->device_irq_level &= ~KVM_ARM_DEV_PMU; 435 if (vcpu->arch.pmu.irq_level) 436 regs->device_irq_level |= KVM_ARM_DEV_PMU; 437 } 438 439 /** 440 * kvm_pmu_flush_hwstate - flush pmu state to cpu 441 * @vcpu: The vcpu pointer 442 * 443 * Check if the PMU has overflowed while we were running in the host, and inject 444 * an interrupt if that was the case. 445 */ 446 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) 447 { 448 kvm_pmu_update_state(vcpu); 449 } 450 451 /** 452 * kvm_pmu_sync_hwstate - sync pmu state from cpu 453 * @vcpu: The vcpu pointer 454 * 455 * Check if the PMU has overflowed while we were running in the guest, and 456 * inject an interrupt if that was the case. 457 */ 458 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) 459 { 460 kvm_pmu_update_state(vcpu); 461 } 462 463 /* 464 * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding 465 * to the event. 466 * This is why we need a callback to do it once outside of the NMI context. 467 */ 468 static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work) 469 { 470 struct kvm_vcpu *vcpu; 471 472 vcpu = container_of(work, struct kvm_vcpu, arch.pmu.overflow_work); 473 kvm_vcpu_kick(vcpu); 474 } 475 476 /* 477 * Perform an increment on any of the counters described in @mask, 478 * generating the overflow if required, and propagate it as a chained 479 * event if possible. 480 */ 481 static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu, 482 unsigned long mask, u32 event) 483 { 484 int i; 485 486 if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) 487 return; 488 489 /* Weed out disabled counters */ 490 mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); 491 492 for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) { 493 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i); 494 u64 type, reg; 495 496 /* Filter on event type */ 497 type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i)); 498 type &= kvm_pmu_event_mask(vcpu->kvm); 499 if (type != event) 500 continue; 501 502 /* Increment this counter */ 503 reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1; 504 if (!kvm_pmc_is_64bit(pmc)) 505 reg = lower_32_bits(reg); 506 __vcpu_assign_sys_reg(vcpu, counter_index_to_reg(i), reg); 507 508 /* No overflow? move on */ 509 if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg)) 510 continue; 511 512 /* Mark overflow */ 513 __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(i)); 514 515 if (kvm_pmu_counter_can_chain(pmc)) 516 kvm_pmu_counter_increment(vcpu, BIT(i + 1), 517 ARMV8_PMUV3_PERFCTR_CHAIN); 518 } 519 } 520 521 /* Compute the sample period for a given counter value */ 522 static u64 compute_period(struct kvm_pmc *pmc, u64 counter) 523 { 524 u64 val; 525 526 if (kvm_pmc_is_64bit(pmc) && kvm_pmc_has_64bit_overflow(pmc)) 527 val = (-counter) & GENMASK(63, 0); 528 else 529 val = (-counter) & GENMASK(31, 0); 530 531 return val; 532 } 533 534 /* 535 * When the perf event overflows, set the overflow status and inform the vcpu. 536 */ 537 static void kvm_pmu_perf_overflow(struct perf_event *perf_event, 538 struct perf_sample_data *data, 539 struct pt_regs *regs) 540 { 541 struct kvm_pmc *pmc = perf_event->overflow_handler_context; 542 struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu); 543 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 544 int idx = pmc->idx; 545 u64 period; 546 547 cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE); 548 549 /* 550 * Reset the sample period to the architectural limit, 551 * i.e. the point where the counter overflows. 552 */ 553 period = compute_period(pmc, local64_read(&perf_event->count)); 554 555 local64_set(&perf_event->hw.period_left, 0); 556 perf_event->attr.sample_period = period; 557 perf_event->hw.sample_period = period; 558 559 __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(idx)); 560 561 if (kvm_pmu_counter_can_chain(pmc)) 562 kvm_pmu_counter_increment(vcpu, BIT(idx + 1), 563 ARMV8_PMUV3_PERFCTR_CHAIN); 564 565 if (kvm_pmu_overflow_status(vcpu)) { 566 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); 567 568 if (!in_nmi()) 569 kvm_vcpu_kick(vcpu); 570 else 571 irq_work_queue(&vcpu->arch.pmu.overflow_work); 572 } 573 574 cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD); 575 } 576 577 /** 578 * kvm_pmu_software_increment - do software increment 579 * @vcpu: The vcpu pointer 580 * @val: the value guest writes to PMSWINC register 581 */ 582 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) 583 { 584 kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR); 585 } 586 587 /** 588 * kvm_pmu_handle_pmcr - handle PMCR register 589 * @vcpu: The vcpu pointer 590 * @val: the value guest writes to PMCR register 591 */ 592 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) 593 { 594 int i; 595 596 /* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */ 597 if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5)) 598 val &= ~ARMV8_PMU_PMCR_LP; 599 600 /* Request a reload of the PMU to enable/disable affected counters */ 601 if ((__vcpu_sys_reg(vcpu, PMCR_EL0) ^ val) & ARMV8_PMU_PMCR_E) 602 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 603 604 /* The reset bits don't indicate any state, and shouldn't be saved. */ 605 __vcpu_assign_sys_reg(vcpu, PMCR_EL0, (val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P))); 606 607 if (val & ARMV8_PMU_PMCR_C) 608 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0); 609 610 if (val & ARMV8_PMU_PMCR_P) { 611 unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu) & 612 ~BIT(ARMV8_PMU_CYCLE_IDX); 613 614 if (!vcpu_is_el2(vcpu)) 615 mask &= ~kvm_pmu_hyp_counter_mask(vcpu); 616 617 for_each_set_bit(i, &mask, 32) 618 kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true); 619 } 620 } 621 622 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc) 623 { 624 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 625 unsigned int mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2); 626 627 if (!(__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx))) 628 return false; 629 630 if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx)) 631 return mdcr & MDCR_EL2_HPME; 632 633 return kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E; 634 } 635 636 static bool kvm_pmc_counts_at_el0(struct kvm_pmc *pmc) 637 { 638 u64 evtreg = kvm_pmc_read_evtreg(pmc); 639 bool nsu = evtreg & ARMV8_PMU_EXCLUDE_NS_EL0; 640 bool u = evtreg & ARMV8_PMU_EXCLUDE_EL0; 641 642 return u == nsu; 643 } 644 645 static bool kvm_pmc_counts_at_el1(struct kvm_pmc *pmc) 646 { 647 u64 evtreg = kvm_pmc_read_evtreg(pmc); 648 bool nsk = evtreg & ARMV8_PMU_EXCLUDE_NS_EL1; 649 bool p = evtreg & ARMV8_PMU_EXCLUDE_EL1; 650 651 return p == nsk; 652 } 653 654 static bool kvm_pmc_counts_at_el2(struct kvm_pmc *pmc) 655 { 656 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 657 u64 mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2); 658 659 if (!kvm_pmu_counter_is_hyp(vcpu, pmc->idx) && (mdcr & MDCR_EL2_HPMD)) 660 return false; 661 662 return kvm_pmc_read_evtreg(pmc) & ARMV8_PMU_INCLUDE_EL2; 663 } 664 665 static int kvm_map_pmu_event(struct kvm *kvm, unsigned int eventsel) 666 { 667 struct arm_pmu *pmu = kvm->arch.arm_pmu; 668 669 /* 670 * The CPU PMU likely isn't PMUv3; let the driver provide a mapping 671 * for the guest's PMUv3 event ID. 672 */ 673 if (unlikely(pmu->map_pmuv3_event)) 674 return pmu->map_pmuv3_event(eventsel); 675 676 return eventsel; 677 } 678 679 /** 680 * kvm_pmu_create_perf_event - create a perf event for a counter 681 * @pmc: Counter context 682 */ 683 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc) 684 { 685 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); 686 struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu; 687 struct perf_event *event; 688 struct perf_event_attr attr; 689 int eventsel; 690 u64 evtreg; 691 692 evtreg = kvm_pmc_read_evtreg(pmc); 693 694 kvm_pmu_stop_counter(pmc); 695 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) 696 eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES; 697 else 698 eventsel = evtreg & kvm_pmu_event_mask(vcpu->kvm); 699 700 /* 701 * Neither SW increment nor chained events need to be backed 702 * by a perf event. 703 */ 704 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR || 705 eventsel == ARMV8_PMUV3_PERFCTR_CHAIN) 706 return; 707 708 /* 709 * If we have a filter in place and that the event isn't allowed, do 710 * not install a perf event either. 711 */ 712 if (vcpu->kvm->arch.pmu_filter && 713 !test_bit(eventsel, vcpu->kvm->arch.pmu_filter)) 714 return; 715 716 /* 717 * Don't create an event if we're running on hardware that requires 718 * PMUv3 event translation and we couldn't find a valid mapping. 719 */ 720 eventsel = kvm_map_pmu_event(vcpu->kvm, eventsel); 721 if (eventsel < 0) 722 return; 723 724 memset(&attr, 0, sizeof(struct perf_event_attr)); 725 attr.type = arm_pmu->pmu.type; 726 attr.size = sizeof(attr); 727 attr.pinned = 1; 728 attr.disabled = !kvm_pmu_counter_is_enabled(pmc); 729 attr.exclude_user = !kvm_pmc_counts_at_el0(pmc); 730 attr.exclude_hv = 1; /* Don't count EL2 events */ 731 attr.exclude_host = 1; /* Don't count host events */ 732 attr.config = eventsel; 733 734 /* 735 * Filter events at EL1 (i.e. vEL2) when in a hyp context based on the 736 * guest's EL2 filter. 737 */ 738 if (unlikely(is_hyp_ctxt(vcpu))) 739 attr.exclude_kernel = !kvm_pmc_counts_at_el2(pmc); 740 else 741 attr.exclude_kernel = !kvm_pmc_counts_at_el1(pmc); 742 743 /* 744 * If counting with a 64bit counter, advertise it to the perf 745 * code, carefully dealing with the initial sample period 746 * which also depends on the overflow. 747 */ 748 if (kvm_pmc_is_64bit(pmc)) 749 attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT; 750 751 attr.sample_period = compute_period(pmc, kvm_pmu_get_pmc_value(pmc)); 752 753 event = perf_event_create_kernel_counter(&attr, -1, current, 754 kvm_pmu_perf_overflow, pmc); 755 756 if (IS_ERR(event)) { 757 pr_err_once("kvm: pmu event creation failed %ld\n", 758 PTR_ERR(event)); 759 return; 760 } 761 762 pmc->perf_event = event; 763 } 764 765 /** 766 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event 767 * @vcpu: The vcpu pointer 768 * @data: The data guest writes to PMXEVTYPER_EL0 769 * @select_idx: The number of selected counter 770 * 771 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an 772 * event with given hardware event number. Here we call perf_event API to 773 * emulate this action and create a kernel perf event for it. 774 */ 775 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, 776 u64 select_idx) 777 { 778 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx); 779 u64 reg; 780 781 reg = counter_index_to_evtreg(pmc->idx); 782 __vcpu_assign_sys_reg(vcpu, reg, (data & kvm_pmu_evtyper_mask(vcpu->kvm))); 783 784 kvm_pmu_create_perf_event(pmc); 785 } 786 787 void kvm_host_pmu_init(struct arm_pmu *pmu) 788 { 789 struct arm_pmu_entry *entry; 790 791 /* 792 * Check the sanitised PMU version for the system, as KVM does not 793 * support implementations where PMUv3 exists on a subset of CPUs. 794 */ 795 if (!pmuv3_implemented(kvm_arm_pmu_get_pmuver_limit())) 796 return; 797 798 guard(mutex)(&arm_pmus_lock); 799 800 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 801 if (!entry) 802 return; 803 804 entry->arm_pmu = pmu; 805 list_add_tail(&entry->entry, &arm_pmus); 806 } 807 808 static struct arm_pmu *kvm_pmu_probe_armpmu(void) 809 { 810 struct arm_pmu_entry *entry; 811 struct arm_pmu *pmu; 812 int cpu; 813 814 guard(mutex)(&arm_pmus_lock); 815 816 /* 817 * It is safe to use a stale cpu to iterate the list of PMUs so long as 818 * the same value is used for the entirety of the loop. Given this, and 819 * the fact that no percpu data is used for the lookup there is no need 820 * to disable preemption. 821 * 822 * It is still necessary to get a valid cpu, though, to probe for the 823 * default PMU instance as userspace is not required to specify a PMU 824 * type. In order to uphold the preexisting behavior KVM selects the 825 * PMU instance for the core during vcpu init. A dependent use 826 * case would be a user with disdain of all things big.LITTLE that 827 * affines the VMM to a particular cluster of cores. 828 * 829 * In any case, userspace should just do the sane thing and use the UAPI 830 * to select a PMU type directly. But, be wary of the baggage being 831 * carried here. 832 */ 833 cpu = raw_smp_processor_id(); 834 list_for_each_entry(entry, &arm_pmus, entry) { 835 pmu = entry->arm_pmu; 836 837 if (cpumask_test_cpu(cpu, &pmu->supported_cpus)) 838 return pmu; 839 } 840 841 return NULL; 842 } 843 844 static u64 __compute_pmceid(struct arm_pmu *pmu, bool pmceid1) 845 { 846 u32 hi[2], lo[2]; 847 848 bitmap_to_arr32(lo, pmu->pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); 849 bitmap_to_arr32(hi, pmu->pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); 850 851 return ((u64)hi[pmceid1] << 32) | lo[pmceid1]; 852 } 853 854 static u64 compute_pmceid0(struct arm_pmu *pmu) 855 { 856 u64 val = __compute_pmceid(pmu, 0); 857 858 /* always support SW_INCR */ 859 val |= BIT(ARMV8_PMUV3_PERFCTR_SW_INCR); 860 /* always support CHAIN */ 861 val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN); 862 return val; 863 } 864 865 static u64 compute_pmceid1(struct arm_pmu *pmu) 866 { 867 u64 val = __compute_pmceid(pmu, 1); 868 869 /* 870 * Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled 871 * as RAZ 872 */ 873 val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) | 874 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) | 875 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32)); 876 return val; 877 } 878 879 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) 880 { 881 struct arm_pmu *cpu_pmu = vcpu->kvm->arch.arm_pmu; 882 unsigned long *bmap = vcpu->kvm->arch.pmu_filter; 883 u64 val, mask = 0; 884 int base, i, nr_events; 885 886 if (!pmceid1) { 887 val = compute_pmceid0(cpu_pmu); 888 base = 0; 889 } else { 890 val = compute_pmceid1(cpu_pmu); 891 base = 32; 892 } 893 894 if (!bmap) 895 return val; 896 897 nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1; 898 899 for (i = 0; i < 32; i += 8) { 900 u64 byte; 901 902 byte = bitmap_get_value8(bmap, base + i); 903 mask |= byte << i; 904 if (nr_events >= (0x4000 + base + 32)) { 905 byte = bitmap_get_value8(bmap, 0x4000 + base + i); 906 mask |= byte << (32 + i); 907 } 908 } 909 910 return val & mask; 911 } 912 913 void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) 914 { 915 u64 mask = kvm_pmu_implemented_counter_mask(vcpu); 916 917 __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, mask); 918 __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, mask); 919 __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, mask); 920 921 kvm_pmu_reprogram_counter_mask(vcpu, mask); 922 } 923 924 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) 925 { 926 if (!vcpu->arch.pmu.created) 927 return -EINVAL; 928 929 /* 930 * A valid interrupt configuration for the PMU is either to have a 931 * properly configured interrupt number and using an in-kernel 932 * irqchip, or to not have an in-kernel GIC and not set an IRQ. 933 */ 934 if (irqchip_in_kernel(vcpu->kvm)) { 935 int irq = vcpu->arch.pmu.irq_num; 936 /* 937 * If we are using an in-kernel vgic, at this point we know 938 * the vgic will be initialized, so we can check the PMU irq 939 * number against the dimensions of the vgic and make sure 940 * it's valid. 941 */ 942 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq)) 943 return -EINVAL; 944 } else if (kvm_arm_pmu_irq_initialized(vcpu)) { 945 return -EINVAL; 946 } 947 948 return 0; 949 } 950 951 static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) 952 { 953 if (irqchip_in_kernel(vcpu->kvm)) { 954 int ret; 955 956 /* 957 * If using the PMU with an in-kernel virtual GIC 958 * implementation, we require the GIC to be already 959 * initialized when initializing the PMU. 960 */ 961 if (!vgic_initialized(vcpu->kvm)) 962 return -ENODEV; 963 964 if (!kvm_arm_pmu_irq_initialized(vcpu)) 965 return -ENXIO; 966 967 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num, 968 &vcpu->arch.pmu); 969 if (ret) 970 return ret; 971 } 972 973 init_irq_work(&vcpu->arch.pmu.overflow_work, 974 kvm_pmu_perf_overflow_notify_vcpu); 975 976 vcpu->arch.pmu.created = true; 977 return 0; 978 } 979 980 /* 981 * For one VM the interrupt type must be same for each vcpu. 982 * As a PPI, the interrupt number is the same for all vcpus, 983 * while as an SPI it must be a separate number per vcpu. 984 */ 985 static bool pmu_irq_is_valid(struct kvm *kvm, int irq) 986 { 987 unsigned long i; 988 struct kvm_vcpu *vcpu; 989 990 kvm_for_each_vcpu(i, vcpu, kvm) { 991 if (!kvm_arm_pmu_irq_initialized(vcpu)) 992 continue; 993 994 if (irq_is_ppi(irq)) { 995 if (vcpu->arch.pmu.irq_num != irq) 996 return false; 997 } else { 998 if (vcpu->arch.pmu.irq_num == irq) 999 return false; 1000 } 1001 } 1002 1003 return true; 1004 } 1005 1006 /** 1007 * kvm_arm_pmu_get_max_counters - Return the max number of PMU counters. 1008 * @kvm: The kvm pointer 1009 */ 1010 u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm) 1011 { 1012 struct arm_pmu *arm_pmu = kvm->arch.arm_pmu; 1013 1014 /* 1015 * PMUv3 requires that all event counters are capable of counting any 1016 * event, though the same may not be true of non-PMUv3 hardware. 1017 */ 1018 if (cpus_have_final_cap(ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS)) 1019 return 1; 1020 1021 /* 1022 * The arm_pmu->cntr_mask considers the fixed counter(s) as well. 1023 * Ignore those and return only the general-purpose counters. 1024 */ 1025 return bitmap_weight(arm_pmu->cntr_mask, ARMV8_PMU_MAX_GENERAL_COUNTERS); 1026 } 1027 1028 static void kvm_arm_set_nr_counters(struct kvm *kvm, unsigned int nr) 1029 { 1030 kvm->arch.nr_pmu_counters = nr; 1031 1032 /* Reset MDCR_EL2.HPMN behind the vcpus' back... */ 1033 if (test_bit(KVM_ARM_VCPU_HAS_EL2, kvm->arch.vcpu_features)) { 1034 struct kvm_vcpu *vcpu; 1035 unsigned long i; 1036 1037 kvm_for_each_vcpu(i, vcpu, kvm) { 1038 u64 val = __vcpu_sys_reg(vcpu, MDCR_EL2); 1039 val &= ~MDCR_EL2_HPMN; 1040 val |= FIELD_PREP(MDCR_EL2_HPMN, kvm->arch.nr_pmu_counters); 1041 __vcpu_assign_sys_reg(vcpu, MDCR_EL2, val); 1042 } 1043 } 1044 } 1045 1046 static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu) 1047 { 1048 lockdep_assert_held(&kvm->arch.config_lock); 1049 1050 kvm->arch.arm_pmu = arm_pmu; 1051 kvm_arm_set_nr_counters(kvm, kvm_arm_pmu_get_max_counters(kvm)); 1052 } 1053 1054 /** 1055 * kvm_arm_set_default_pmu - No PMU set, get the default one. 1056 * @kvm: The kvm pointer 1057 * 1058 * The observant among you will notice that the supported_cpus 1059 * mask does not get updated for the default PMU even though it 1060 * is quite possible the selected instance supports only a 1061 * subset of cores in the system. This is intentional, and 1062 * upholds the preexisting behavior on heterogeneous systems 1063 * where vCPUs can be scheduled on any core but the guest 1064 * counters could stop working. 1065 */ 1066 int kvm_arm_set_default_pmu(struct kvm *kvm) 1067 { 1068 struct arm_pmu *arm_pmu = kvm_pmu_probe_armpmu(); 1069 1070 if (!arm_pmu) 1071 return -ENODEV; 1072 1073 kvm_arm_set_pmu(kvm, arm_pmu); 1074 return 0; 1075 } 1076 1077 static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) 1078 { 1079 struct kvm *kvm = vcpu->kvm; 1080 struct arm_pmu_entry *entry; 1081 struct arm_pmu *arm_pmu; 1082 int ret = -ENXIO; 1083 1084 lockdep_assert_held(&kvm->arch.config_lock); 1085 mutex_lock(&arm_pmus_lock); 1086 1087 list_for_each_entry(entry, &arm_pmus, entry) { 1088 arm_pmu = entry->arm_pmu; 1089 if (arm_pmu->pmu.type == pmu_id) { 1090 if (kvm_vm_has_ran_once(kvm) || 1091 (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) { 1092 ret = -EBUSY; 1093 break; 1094 } 1095 1096 kvm_arm_set_pmu(kvm, arm_pmu); 1097 cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus); 1098 ret = 0; 1099 break; 1100 } 1101 } 1102 1103 mutex_unlock(&arm_pmus_lock); 1104 return ret; 1105 } 1106 1107 static int kvm_arm_pmu_v3_set_nr_counters(struct kvm_vcpu *vcpu, unsigned int n) 1108 { 1109 struct kvm *kvm = vcpu->kvm; 1110 1111 if (!kvm->arch.arm_pmu) 1112 return -EINVAL; 1113 1114 if (n > kvm_arm_pmu_get_max_counters(kvm)) 1115 return -EINVAL; 1116 1117 kvm_arm_set_nr_counters(kvm, n); 1118 return 0; 1119 } 1120 1121 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) 1122 { 1123 struct kvm *kvm = vcpu->kvm; 1124 1125 lockdep_assert_held(&kvm->arch.config_lock); 1126 1127 if (!kvm_vcpu_has_pmu(vcpu)) 1128 return -ENODEV; 1129 1130 if (vcpu->arch.pmu.created) 1131 return -EBUSY; 1132 1133 switch (attr->attr) { 1134 case KVM_ARM_VCPU_PMU_V3_IRQ: { 1135 int __user *uaddr = (int __user *)(long)attr->addr; 1136 int irq; 1137 1138 if (!irqchip_in_kernel(kvm)) 1139 return -EINVAL; 1140 1141 if (get_user(irq, uaddr)) 1142 return -EFAULT; 1143 1144 /* The PMU overflow interrupt can be a PPI or a valid SPI. */ 1145 if (!(irq_is_ppi(irq) || irq_is_spi(irq))) 1146 return -EINVAL; 1147 1148 if (!pmu_irq_is_valid(kvm, irq)) 1149 return -EINVAL; 1150 1151 if (kvm_arm_pmu_irq_initialized(vcpu)) 1152 return -EBUSY; 1153 1154 kvm_debug("Set kvm ARM PMU irq: %d\n", irq); 1155 vcpu->arch.pmu.irq_num = irq; 1156 return 0; 1157 } 1158 case KVM_ARM_VCPU_PMU_V3_FILTER: { 1159 u8 pmuver = kvm_arm_pmu_get_pmuver_limit(); 1160 struct kvm_pmu_event_filter __user *uaddr; 1161 struct kvm_pmu_event_filter filter; 1162 int nr_events; 1163 1164 /* 1165 * Allow userspace to specify an event filter for the entire 1166 * event range supported by PMUVer of the hardware, rather 1167 * than the guest's PMUVer for KVM backward compatibility. 1168 */ 1169 nr_events = __kvm_pmu_event_mask(pmuver) + 1; 1170 1171 uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr; 1172 1173 if (copy_from_user(&filter, uaddr, sizeof(filter))) 1174 return -EFAULT; 1175 1176 if (((u32)filter.base_event + filter.nevents) > nr_events || 1177 (filter.action != KVM_PMU_EVENT_ALLOW && 1178 filter.action != KVM_PMU_EVENT_DENY)) 1179 return -EINVAL; 1180 1181 if (kvm_vm_has_ran_once(kvm)) 1182 return -EBUSY; 1183 1184 if (!kvm->arch.pmu_filter) { 1185 kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT); 1186 if (!kvm->arch.pmu_filter) 1187 return -ENOMEM; 1188 1189 /* 1190 * The default depends on the first applied filter. 1191 * If it allows events, the default is to deny. 1192 * Conversely, if the first filter denies a set of 1193 * events, the default is to allow. 1194 */ 1195 if (filter.action == KVM_PMU_EVENT_ALLOW) 1196 bitmap_zero(kvm->arch.pmu_filter, nr_events); 1197 else 1198 bitmap_fill(kvm->arch.pmu_filter, nr_events); 1199 } 1200 1201 if (filter.action == KVM_PMU_EVENT_ALLOW) 1202 bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents); 1203 else 1204 bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents); 1205 1206 return 0; 1207 } 1208 case KVM_ARM_VCPU_PMU_V3_SET_PMU: { 1209 int __user *uaddr = (int __user *)(long)attr->addr; 1210 int pmu_id; 1211 1212 if (get_user(pmu_id, uaddr)) 1213 return -EFAULT; 1214 1215 return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id); 1216 } 1217 case KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS: { 1218 unsigned int __user *uaddr = (unsigned int __user *)(long)attr->addr; 1219 unsigned int n; 1220 1221 if (get_user(n, uaddr)) 1222 return -EFAULT; 1223 1224 return kvm_arm_pmu_v3_set_nr_counters(vcpu, n); 1225 } 1226 case KVM_ARM_VCPU_PMU_V3_INIT: 1227 return kvm_arm_pmu_v3_init(vcpu); 1228 } 1229 1230 return -ENXIO; 1231 } 1232 1233 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) 1234 { 1235 switch (attr->attr) { 1236 case KVM_ARM_VCPU_PMU_V3_IRQ: { 1237 int __user *uaddr = (int __user *)(long)attr->addr; 1238 int irq; 1239 1240 if (!irqchip_in_kernel(vcpu->kvm)) 1241 return -EINVAL; 1242 1243 if (!kvm_vcpu_has_pmu(vcpu)) 1244 return -ENODEV; 1245 1246 if (!kvm_arm_pmu_irq_initialized(vcpu)) 1247 return -ENXIO; 1248 1249 irq = vcpu->arch.pmu.irq_num; 1250 return put_user(irq, uaddr); 1251 } 1252 } 1253 1254 return -ENXIO; 1255 } 1256 1257 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) 1258 { 1259 switch (attr->attr) { 1260 case KVM_ARM_VCPU_PMU_V3_IRQ: 1261 case KVM_ARM_VCPU_PMU_V3_INIT: 1262 case KVM_ARM_VCPU_PMU_V3_FILTER: 1263 case KVM_ARM_VCPU_PMU_V3_SET_PMU: 1264 case KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS: 1265 if (kvm_vcpu_has_pmu(vcpu)) 1266 return 0; 1267 } 1268 1269 return -ENXIO; 1270 } 1271 1272 u8 kvm_arm_pmu_get_pmuver_limit(void) 1273 { 1274 unsigned int pmuver; 1275 1276 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, 1277 read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1)); 1278 1279 /* 1280 * Spoof a barebones PMUv3 implementation if the system supports IMPDEF 1281 * traps of the PMUv3 sysregs 1282 */ 1283 if (cpus_have_final_cap(ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS)) 1284 return ID_AA64DFR0_EL1_PMUVer_IMP; 1285 1286 /* 1287 * Otherwise, treat IMPLEMENTATION DEFINED functionality as 1288 * unimplemented 1289 */ 1290 if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) 1291 return 0; 1292 1293 return min(pmuver, ID_AA64DFR0_EL1_PMUVer_V3P5); 1294 } 1295 1296 /** 1297 * kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU 1298 * @vcpu: The vcpu pointer 1299 */ 1300 u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) 1301 { 1302 u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0); 1303 u64 n = vcpu->kvm->arch.nr_pmu_counters; 1304 1305 if (vcpu_has_nv(vcpu) && !vcpu_is_el2(vcpu)) 1306 n = FIELD_GET(MDCR_EL2_HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2)); 1307 1308 return u64_replace_bits(pmcr, n, ARMV8_PMU_PMCR_N); 1309 } 1310 1311 void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) 1312 { 1313 bool reprogrammed = false; 1314 unsigned long mask; 1315 int i; 1316 1317 mask = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); 1318 for_each_set_bit(i, &mask, 32) { 1319 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i); 1320 1321 /* 1322 * We only need to reconfigure events where the filter is 1323 * different at EL1 vs. EL2, as we're multiplexing the true EL1 1324 * event filter bit for nested. 1325 */ 1326 if (kvm_pmc_counts_at_el1(pmc) == kvm_pmc_counts_at_el2(pmc)) 1327 continue; 1328 1329 kvm_pmu_create_perf_event(pmc); 1330 reprogrammed = true; 1331 } 1332 1333 if (reprogrammed) 1334 kvm_vcpu_pmu_restore_guest(vcpu); 1335 } 1336