Lines Matching +full:armv8 +full:- +full:based
1 // SPDX-License-Identifier: GPL-2.0-only
25 switch (kvm->arch.pmuver) { in kvm_pmu_event_mask()
26 case 1: /* ARMv8.0 */ in kvm_pmu_event_mask()
28 case 4: /* ARMv8.1 */ in kvm_pmu_event_mask()
29 case 5: /* ARMv8.4 */ in kvm_pmu_event_mask()
30 case 6: /* ARMv8.5 */ in kvm_pmu_event_mask()
33 WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver); in kvm_pmu_event_mask()
39 * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
54 pmc -= pmc->idx; in kvm_pmc_to_vcpu()
61 * kvm_pmu_pmc_is_chained - determine if the pmc is chained
68 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_pmc_is_chained()
72 * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
81 * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
90 kvm_pmu_idx_is_high_counter(pmc->idx)) in kvm_pmu_get_canonical_pmc()
91 return pmc - 1; in kvm_pmu_get_canonical_pmc()
97 if (kvm_pmu_idx_is_high_counter(pmc->idx)) in kvm_pmu_get_alternate_pmc()
98 return pmc - 1; in kvm_pmu_get_alternate_pmc()
104 * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
118 eventsel = __vcpu_sys_reg(vcpu, reg) & kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_idx_has_chain_evtype()
124 * kvm_pmu_get_pair_counter_value - get PMU counter value
135 reg = PMEVCNTR0_EL0 + pmc->idx; in kvm_pmu_get_pair_counter_value()
142 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) in kvm_pmu_get_pair_counter_value()
143 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx; in kvm_pmu_get_pair_counter_value()
151 if (pmc->perf_event) in kvm_pmu_get_pair_counter_value()
152 counter += perf_event_read_value(pmc->perf_event, &enabled, in kvm_pmu_get_pair_counter_value()
159 * kvm_pmu_get_counter_value - get PMU counter value
166 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_get_counter_value()
167 struct kvm_pmc *pmc = &pmu->pmc[select_idx]; in kvm_pmu_get_counter_value()
181 * kvm_pmu_set_counter_value - set PMU counter value
192 __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx); in kvm_pmu_set_counter_value()
199 * kvm_pmu_release_perf_event - remove the perf event
205 if (pmc->perf_event) { in kvm_pmu_release_perf_event()
206 perf_event_disable(pmc->perf_event); in kvm_pmu_release_perf_event()
207 perf_event_release_kernel(pmc->perf_event); in kvm_pmu_release_perf_event()
208 pmc->perf_event = NULL; in kvm_pmu_release_perf_event()
213 * kvm_pmu_stop_counter - stop PMU counter
223 if (!pmc->perf_event) in kvm_pmu_stop_counter()
228 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) { in kvm_pmu_stop_counter()
232 reg = PMEVCNTR0_EL0 + pmc->idx; in kvm_pmu_stop_counter()
245 * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
252 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_init()
255 pmu->pmc[i].idx = i; in kvm_pmu_vcpu_init()
259 * kvm_pmu_vcpu_reset - reset pmu state for cpu
266 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_reset()
270 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); in kvm_pmu_vcpu_reset()
272 bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS); in kvm_pmu_vcpu_reset()
276 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
283 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_destroy()
286 kvm_pmu_release_perf_event(&pmu->pmc[i]); in kvm_pmu_vcpu_destroy()
287 irq_work_sync(&vcpu->arch.pmu.overflow_work); in kvm_pmu_vcpu_destroy()
298 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX); in kvm_pmu_valid_counter_mask()
302 * kvm_pmu_enable_counter_mask - enable selected PMU counters
311 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_enable_counter_mask()
321 pmc = &pmu->pmc[i]; in kvm_pmu_enable_counter_mask()
328 if (pmc->perf_event) { in kvm_pmu_enable_counter_mask()
329 perf_event_enable(pmc->perf_event); in kvm_pmu_enable_counter_mask()
330 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE) in kvm_pmu_enable_counter_mask()
337 * kvm_pmu_disable_counter_mask - disable selected PMU counters
346 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_disable_counter_mask()
356 pmc = &pmu->pmc[i]; in kvm_pmu_disable_counter_mask()
363 if (pmc->perf_event) in kvm_pmu_disable_counter_mask()
364 perf_event_disable(pmc->perf_event); in kvm_pmu_disable_counter_mask()
384 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_update_state()
391 if (pmu->irq_level == overflow) in kvm_pmu_update_state()
394 pmu->irq_level = overflow; in kvm_pmu_update_state()
396 if (likely(irqchip_in_kernel(vcpu->kvm))) { in kvm_pmu_update_state()
397 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, in kvm_pmu_update_state()
398 pmu->irq_num, overflow, pmu); in kvm_pmu_update_state()
405 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_should_notify_user()
406 struct kvm_sync_regs *sregs = &vcpu->run->s.regs; in kvm_pmu_should_notify_user()
407 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU; in kvm_pmu_should_notify_user()
409 if (likely(irqchip_in_kernel(vcpu->kvm))) in kvm_pmu_should_notify_user()
412 return pmu->irq_level != run_level; in kvm_pmu_should_notify_user()
420 struct kvm_sync_regs *regs = &vcpu->run->s.regs; in kvm_pmu_update_run()
423 regs->device_irq_level &= ~KVM_ARM_DEV_PMU; in kvm_pmu_update_run()
424 if (vcpu->arch.pmu.irq_level) in kvm_pmu_update_run()
425 regs->device_irq_level |= KVM_ARM_DEV_PMU; in kvm_pmu_update_run()
429 * kvm_pmu_flush_hwstate - flush pmu state to cpu
441 * kvm_pmu_sync_hwstate - sync pmu state from cpu
463 vcpu = kvm_pmc_to_vcpu(pmu->pmc); in kvm_pmu_perf_overflow_notify_vcpu()
475 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_pmu_perf_overflow()
476 struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu); in kvm_pmu_perf_overflow()
478 int idx = pmc->idx; in kvm_pmu_perf_overflow()
481 cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE); in kvm_pmu_perf_overflow()
487 period = -(local64_read(&perf_event->count)); in kvm_pmu_perf_overflow()
489 if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) in kvm_pmu_perf_overflow()
492 local64_set(&perf_event->hw.period_left, 0); in kvm_pmu_perf_overflow()
493 perf_event->attr.sample_period = period; in kvm_pmu_perf_overflow()
494 perf_event->hw.sample_period = period; in kvm_pmu_perf_overflow()
504 irq_work_queue(&vcpu->arch.pmu.overflow_work); in kvm_pmu_perf_overflow()
507 cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD); in kvm_pmu_perf_overflow()
511 * kvm_pmu_software_increment - do software increment
517 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_software_increment()
534 type &= kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_software_increment()
546 if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) { in kvm_pmu_software_increment()
561 * kvm_pmu_handle_pmcr - handle PMCR register
593 * kvm_pmu_create_perf_event - create a perf event for a counter
599 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_create_perf_event()
610 pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]); in kvm_pmu_create_perf_event()
612 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) in kvm_pmu_create_perf_event()
613 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx; in kvm_pmu_create_perf_event()
617 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) in kvm_pmu_create_perf_event()
620 eventsel = data & kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_create_perf_event()
630 if (vcpu->kvm->arch.pmu_filter && in kvm_pmu_create_perf_event()
631 !test_bit(eventsel, vcpu->kvm->arch.pmu_filter)) in kvm_pmu_create_perf_event()
638 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx); in kvm_pmu_create_perf_event()
653 attr.sample_period = (-counter) & GENMASK(63, 0); in kvm_pmu_create_perf_event()
656 event = perf_event_create_kernel_counter(&attr, -1, current, in kvm_pmu_create_perf_event()
661 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) in kvm_pmu_create_perf_event()
662 attr.sample_period = (-counter) & GENMASK(63, 0); in kvm_pmu_create_perf_event()
664 attr.sample_period = (-counter) & GENMASK(31, 0); in kvm_pmu_create_perf_event()
666 event = perf_event_create_kernel_counter(&attr, -1, current, in kvm_pmu_create_perf_event()
676 pmc->perf_event = event; in kvm_pmu_create_perf_event()
680 * kvm_pmu_update_pmc_chained - update chained bitmap
684 * Update the chained bitmap based on the event type written in the
689 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_update_pmc_chained()
690 struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc; in kvm_pmu_update_pmc_chained()
694 new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) && in kvm_pmu_update_pmc_chained()
695 kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1); in kvm_pmu_update_pmc_chained()
708 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_update_pmc_chained()
711 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_update_pmc_chained()
715 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
731 mask |= kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_set_counter_event_type()
766 event = perf_event_create_kernel_counter(&attr, -1, current, in kvm_pmu_probe_pmuver()
775 if (event->pmu) { in kvm_pmu_probe_pmuver()
776 pmu = to_arm_pmu(event->pmu); in kvm_pmu_probe_pmuver()
777 if (pmu->pmuver) in kvm_pmu_probe_pmuver()
778 pmuver = pmu->pmuver; in kvm_pmu_probe_pmuver()
789 unsigned long *bmap = vcpu->kvm->arch.pmu_filter; in kvm_pmu_get_pmceid()
828 if (!vcpu->arch.pmu.created) in kvm_arm_pmu_v3_enable()
833 * properly configured interrupt number and using an in-kernel in kvm_arm_pmu_v3_enable()
834 * irqchip, or to not have an in-kernel GIC and not set an IRQ. in kvm_arm_pmu_v3_enable()
836 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arm_pmu_v3_enable()
837 int irq = vcpu->arch.pmu.irq_num; in kvm_arm_pmu_v3_enable()
839 return -EINVAL; in kvm_arm_pmu_v3_enable()
842 * If we are using an in-kernel vgic, at this point we know in kvm_arm_pmu_v3_enable()
847 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq)) in kvm_arm_pmu_v3_enable()
848 return -EINVAL; in kvm_arm_pmu_v3_enable()
850 return -EINVAL; in kvm_arm_pmu_v3_enable()
854 vcpu->arch.pmu.ready = true; in kvm_arm_pmu_v3_enable()
861 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arm_pmu_v3_init()
865 * If using the PMU with an in-kernel virtual GIC in kvm_arm_pmu_v3_init()
869 if (!vgic_initialized(vcpu->kvm)) in kvm_arm_pmu_v3_init()
870 return -ENODEV; in kvm_arm_pmu_v3_init()
873 return -ENXIO; in kvm_arm_pmu_v3_init()
875 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num, in kvm_arm_pmu_v3_init()
876 &vcpu->arch.pmu); in kvm_arm_pmu_v3_init()
881 init_irq_work(&vcpu->arch.pmu.overflow_work, in kvm_arm_pmu_v3_init()
884 vcpu->arch.pmu.created = true; in kvm_arm_pmu_v3_init()
903 if (vcpu->arch.pmu.irq_num != irq) in pmu_irq_is_valid()
906 if (vcpu->arch.pmu.irq_num == irq) in pmu_irq_is_valid()
917 !test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) in kvm_arm_pmu_v3_set_attr()
918 return -ENODEV; in kvm_arm_pmu_v3_set_attr()
920 if (vcpu->arch.pmu.created) in kvm_arm_pmu_v3_set_attr()
921 return -EBUSY; in kvm_arm_pmu_v3_set_attr()
923 if (!vcpu->kvm->arch.pmuver) in kvm_arm_pmu_v3_set_attr()
924 vcpu->kvm->arch.pmuver = kvm_pmu_probe_pmuver(); in kvm_arm_pmu_v3_set_attr()
926 if (vcpu->kvm->arch.pmuver == 0xf) in kvm_arm_pmu_v3_set_attr()
927 return -ENODEV; in kvm_arm_pmu_v3_set_attr()
929 switch (attr->attr) { in kvm_arm_pmu_v3_set_attr()
931 int __user *uaddr = (int __user *)(long)attr->addr; in kvm_arm_pmu_v3_set_attr()
934 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_arm_pmu_v3_set_attr()
935 return -EINVAL; in kvm_arm_pmu_v3_set_attr()
938 return -EFAULT; in kvm_arm_pmu_v3_set_attr()
942 return -EINVAL; in kvm_arm_pmu_v3_set_attr()
944 if (!pmu_irq_is_valid(vcpu->kvm, irq)) in kvm_arm_pmu_v3_set_attr()
945 return -EINVAL; in kvm_arm_pmu_v3_set_attr()
948 return -EBUSY; in kvm_arm_pmu_v3_set_attr()
951 vcpu->arch.pmu.irq_num = irq; in kvm_arm_pmu_v3_set_attr()
959 nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1; in kvm_arm_pmu_v3_set_attr()
961 uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr; in kvm_arm_pmu_v3_set_attr()
964 return -EFAULT; in kvm_arm_pmu_v3_set_attr()
969 return -EINVAL; in kvm_arm_pmu_v3_set_attr()
971 mutex_lock(&vcpu->kvm->lock); in kvm_arm_pmu_v3_set_attr()
973 if (!vcpu->kvm->arch.pmu_filter) { in kvm_arm_pmu_v3_set_attr()
974 vcpu->kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL); in kvm_arm_pmu_v3_set_attr()
975 if (!vcpu->kvm->arch.pmu_filter) { in kvm_arm_pmu_v3_set_attr()
976 mutex_unlock(&vcpu->kvm->lock); in kvm_arm_pmu_v3_set_attr()
977 return -ENOMEM; in kvm_arm_pmu_v3_set_attr()
987 bitmap_zero(vcpu->kvm->arch.pmu_filter, nr_events); in kvm_arm_pmu_v3_set_attr()
989 bitmap_fill(vcpu->kvm->arch.pmu_filter, nr_events); in kvm_arm_pmu_v3_set_attr()
993 bitmap_set(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents); in kvm_arm_pmu_v3_set_attr()
995 bitmap_clear(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents); in kvm_arm_pmu_v3_set_attr()
997 mutex_unlock(&vcpu->kvm->lock); in kvm_arm_pmu_v3_set_attr()
1005 return -ENXIO; in kvm_arm_pmu_v3_set_attr()
1010 switch (attr->attr) { in kvm_arm_pmu_v3_get_attr()
1012 int __user *uaddr = (int __user *)(long)attr->addr; in kvm_arm_pmu_v3_get_attr()
1015 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_arm_pmu_v3_get_attr()
1016 return -EINVAL; in kvm_arm_pmu_v3_get_attr()
1018 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) in kvm_arm_pmu_v3_get_attr()
1019 return -ENODEV; in kvm_arm_pmu_v3_get_attr()
1022 return -ENXIO; in kvm_arm_pmu_v3_get_attr()
1024 irq = vcpu->arch.pmu.irq_num; in kvm_arm_pmu_v3_get_attr()
1029 return -ENXIO; in kvm_arm_pmu_v3_get_attr()
1034 switch (attr->attr) { in kvm_arm_pmu_v3_has_attr()
1039 test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) in kvm_arm_pmu_v3_has_attr()
1043 return -ENXIO; in kvm_arm_pmu_v3_has_attr()