Lines Matching full:pmc
23 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
24 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
25 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc);
33 static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc) in kvm_pmc_to_vcpu() argument
35 return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]); in kvm_pmc_to_vcpu()
40 return &vcpu->arch.pmu.pmc[cnt_idx]; in kvm_vcpu_idx_to_pmc()
85 * @pmc: counter context
87 static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc) in kvm_pmc_is_64bit() argument
89 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmc_is_64bit()
91 return (pmc->idx == ARMV8_PMU_CYCLE_IDX || in kvm_pmc_is_64bit()
95 static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc) in kvm_pmc_has_64bit_overflow() argument
97 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmc_has_64bit_overflow()
100 if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx)) in kvm_pmc_has_64bit_overflow()
103 return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) || in kvm_pmc_has_64bit_overflow()
104 (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC)); in kvm_pmc_has_64bit_overflow()
107 static bool kvm_pmu_counter_can_chain(struct kvm_pmc *pmc) in kvm_pmu_counter_can_chain() argument
109 return (!(pmc->idx & 1) && (pmc->idx + 1) < ARMV8_PMU_CYCLE_IDX && in kvm_pmu_counter_can_chain()
110 !kvm_pmc_has_64bit_overflow(pmc)); in kvm_pmu_counter_can_chain()
123 static u64 kvm_pmc_read_evtreg(const struct kvm_pmc *pmc) in kvm_pmc_read_evtreg() argument
125 return __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), counter_index_to_evtreg(pmc->idx)); in kvm_pmc_read_evtreg()
128 static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc) in kvm_pmu_get_pmc_value() argument
130 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_get_pmc_value()
133 reg = counter_index_to_reg(pmc->idx); in kvm_pmu_get_pmc_value()
140 if (pmc->perf_event) in kvm_pmu_get_pmc_value()
141 counter += perf_event_read_value(pmc->perf_event, &enabled, in kvm_pmu_get_pmc_value()
144 if (!kvm_pmc_is_64bit(pmc)) in kvm_pmu_get_pmc_value()
160 static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force) in kvm_pmu_set_pmc_value() argument
162 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_set_pmc_value()
165 kvm_pmu_release_perf_event(pmc); in kvm_pmu_set_pmc_value()
167 reg = counter_index_to_reg(pmc->idx); in kvm_pmu_set_pmc_value()
169 if (vcpu_mode_is_32bit(vcpu) && pmc->idx != ARMV8_PMU_CYCLE_IDX && in kvm_pmu_set_pmc_value()
184 kvm_pmu_create_perf_event(pmc); in kvm_pmu_set_pmc_value()
213 * @pmc: The PMU counter pointer
215 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) in kvm_pmu_release_perf_event() argument
217 if (pmc->perf_event) { in kvm_pmu_release_perf_event()
218 perf_event_disable(pmc->perf_event); in kvm_pmu_release_perf_event()
219 perf_event_release_kernel(pmc->perf_event); in kvm_pmu_release_perf_event()
220 pmc->perf_event = NULL; in kvm_pmu_release_perf_event()
226 * @pmc: The PMU counter pointer
230 static void kvm_pmu_stop_counter(struct kvm_pmc *pmc) in kvm_pmu_stop_counter() argument
232 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_stop_counter()
235 if (!pmc->perf_event) in kvm_pmu_stop_counter()
238 val = kvm_pmu_get_pmc_value(pmc); in kvm_pmu_stop_counter()
240 reg = counter_index_to_reg(pmc->idx); in kvm_pmu_stop_counter()
244 kvm_pmu_release_perf_event(pmc); in kvm_pmu_stop_counter()
258 pmu->pmc[i].idx = i; in kvm_pmu_vcpu_init()
328 static void kvm_pmc_enable_perf_event(struct kvm_pmc *pmc) in kvm_pmc_enable_perf_event() argument
330 if (!pmc->perf_event) { in kvm_pmc_enable_perf_event()
331 kvm_pmu_create_perf_event(pmc); in kvm_pmc_enable_perf_event()
335 perf_event_enable(pmc->perf_event); in kvm_pmc_enable_perf_event()
336 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE) in kvm_pmc_enable_perf_event()
340 static void kvm_pmc_disable_perf_event(struct kvm_pmc *pmc) in kvm_pmc_disable_perf_event() argument
342 if (pmc->perf_event) in kvm_pmc_disable_perf_event()
343 perf_event_disable(pmc->perf_event); in kvm_pmc_disable_perf_event()
354 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i); in kvm_pmu_reprogram_counter_mask() local
359 if (kvm_pmu_counter_is_enabled(pmc)) in kvm_pmu_reprogram_counter_mask()
360 kvm_pmc_enable_perf_event(pmc); in kvm_pmu_reprogram_counter_mask()
362 kvm_pmc_disable_perf_event(pmc); in kvm_pmu_reprogram_counter_mask()
493 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i); in kvm_pmu_counter_increment() local
504 if (!kvm_pmc_is_64bit(pmc)) in kvm_pmu_counter_increment()
509 if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg)) in kvm_pmu_counter_increment()
515 if (kvm_pmu_counter_can_chain(pmc)) in kvm_pmu_counter_increment()
522 static u64 compute_period(struct kvm_pmc *pmc, u64 counter) in compute_period() argument
526 if (kvm_pmc_is_64bit(pmc) && kvm_pmc_has_64bit_overflow(pmc)) in compute_period()
541 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_pmu_perf_overflow() local
543 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_perf_overflow()
544 int idx = pmc->idx; in kvm_pmu_perf_overflow()
553 period = compute_period(pmc, local64_read(&perf_event->count)); in kvm_pmu_perf_overflow()
561 if (kvm_pmu_counter_can_chain(pmc)) in kvm_pmu_perf_overflow()
624 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc) in kvm_pmu_counter_is_enabled() argument
626 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_counter_is_enabled()
629 if (!(__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx))) in kvm_pmu_counter_is_enabled()
632 if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx)) in kvm_pmu_counter_is_enabled()
638 static bool kvm_pmc_counts_at_el0(struct kvm_pmc *pmc) in kvm_pmc_counts_at_el0() argument
640 u64 evtreg = kvm_pmc_read_evtreg(pmc); in kvm_pmc_counts_at_el0()
647 static bool kvm_pmc_counts_at_el1(struct kvm_pmc *pmc) in kvm_pmc_counts_at_el1() argument
649 u64 evtreg = kvm_pmc_read_evtreg(pmc); in kvm_pmc_counts_at_el1()
656 static bool kvm_pmc_counts_at_el2(struct kvm_pmc *pmc) in kvm_pmc_counts_at_el2() argument
658 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmc_counts_at_el2()
661 if (!kvm_pmu_counter_is_hyp(vcpu, pmc->idx) && (mdcr & MDCR_EL2_HPMD)) in kvm_pmc_counts_at_el2()
664 return kvm_pmc_read_evtreg(pmc) & ARMV8_PMU_INCLUDE_EL2; in kvm_pmc_counts_at_el2()
683 * @pmc: Counter context
685 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc) in kvm_pmu_create_perf_event() argument
687 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_create_perf_event()
694 evtreg = kvm_pmc_read_evtreg(pmc); in kvm_pmu_create_perf_event()
696 kvm_pmu_stop_counter(pmc); in kvm_pmu_create_perf_event()
697 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) in kvm_pmu_create_perf_event()
730 attr.disabled = !kvm_pmu_counter_is_enabled(pmc); in kvm_pmu_create_perf_event()
731 attr.exclude_user = !kvm_pmc_counts_at_el0(pmc); in kvm_pmu_create_perf_event()
741 attr.exclude_kernel = !kvm_pmc_counts_at_el2(pmc); in kvm_pmu_create_perf_event()
743 attr.exclude_kernel = !kvm_pmc_counts_at_el1(pmc); in kvm_pmu_create_perf_event()
750 if (kvm_pmc_is_64bit(pmc)) in kvm_pmu_create_perf_event()
753 attr.sample_period = compute_period(pmc, kvm_pmu_get_pmc_value(pmc)); in kvm_pmu_create_perf_event()
756 kvm_pmu_perf_overflow, pmc); in kvm_pmu_create_perf_event()
764 pmc->perf_event = event; in kvm_pmu_create_perf_event()
773 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
780 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx); in kvm_pmu_set_counter_event_type() local
783 reg = counter_index_to_evtreg(pmc->idx); in kvm_pmu_set_counter_event_type()
786 kvm_pmu_create_perf_event(pmc); in kvm_pmu_set_counter_event_type()
1275 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i); in kvm_pmu_nested_transition() local
1282 if (kvm_pmc_counts_at_el1(pmc) == kvm_pmc_counts_at_el2(pmc)) in kvm_pmu_nested_transition()
1285 kvm_pmu_create_perf_event(pmc); in kvm_pmu_nested_transition()