1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2051ff581SShannon Zhao /*
3051ff581SShannon Zhao * Copyright (C) 2015 Linaro Ltd.
4051ff581SShannon Zhao * Author: Shannon Zhao <shannon.zhao@linaro.org>
5051ff581SShannon Zhao */
6051ff581SShannon Zhao
7051ff581SShannon Zhao #include <linux/cpu.h>
8051ff581SShannon Zhao #include <linux/kvm.h>
9051ff581SShannon Zhao #include <linux/kvm_host.h>
10db858060SAlexandru Elisei #include <linux/list.h>
11051ff581SShannon Zhao #include <linux/perf_event.h>
128c3252c0SMarc Zyngier #include <linux/perf/arm_pmu.h>
13bb0c70bcSShannon Zhao #include <linux/uaccess.h>
14051ff581SShannon Zhao #include <asm/kvm_emulate.h>
15051ff581SShannon Zhao #include <kvm/arm_pmu.h>
16b02386ebSShannon Zhao #include <kvm/arm_vgic.h>
17051ff581SShannon Zhao
18bead0220SMarc Zyngier #define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0)
19bead0220SMarc Zyngier
20db858060SAlexandru Elisei static LIST_HEAD(arm_pmus);
21db858060SAlexandru Elisei static DEFINE_MUTEX(arm_pmus_lock);
22db858060SAlexandru Elisei
23d56bdce5SMarc Zyngier static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
249917264dSMarc Zyngier static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
25e22c3695SOliver Upton static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc);
2680f393a2SAndrew Murray
kvm_supports_guest_pmuv3(void)27a38b67d1SOliver Upton bool kvm_supports_guest_pmuv3(void)
28a38b67d1SOliver Upton {
29a38b67d1SOliver Upton guard(mutex)(&arm_pmus_lock);
30a38b67d1SOliver Upton return !list_empty(&arm_pmus);
31a38b67d1SOliver Upton }
32a38b67d1SOliver Upton
kvm_pmc_to_vcpu(const struct kvm_pmc * pmc)33d56bdce5SMarc Zyngier static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
34d56bdce5SMarc Zyngier {
35d56bdce5SMarc Zyngier return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]);
36d56bdce5SMarc Zyngier }
37d56bdce5SMarc Zyngier
kvm_vcpu_idx_to_pmc(struct kvm_vcpu * vcpu,int cnt_idx)38d56bdce5SMarc Zyngier static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx)
39d56bdce5SMarc Zyngier {
40d56bdce5SMarc Zyngier return &vcpu->arch.pmu.pmc[cnt_idx];
41d56bdce5SMarc Zyngier }
42d56bdce5SMarc Zyngier
__kvm_pmu_event_mask(unsigned int pmuver)43335ca49fSReiji Watanabe static u32 __kvm_pmu_event_mask(unsigned int pmuver)
44fd65a3b5SMarc Zyngier {
4546b18782SMarc Zyngier switch (pmuver) {
46121a8fc0SMark Brown case ID_AA64DFR0_EL1_PMUVer_IMP:
47fd65a3b5SMarc Zyngier return GENMASK(9, 0);
48121a8fc0SMark Brown case ID_AA64DFR0_EL1_PMUVer_V3P1:
49121a8fc0SMark Brown case ID_AA64DFR0_EL1_PMUVer_V3P4:
50121a8fc0SMark Brown case ID_AA64DFR0_EL1_PMUVer_V3P5:
51121a8fc0SMark Brown case ID_AA64DFR0_EL1_PMUVer_V3P7:
52fd65a3b5SMarc Zyngier return GENMASK(15, 0);
53fd65a3b5SMarc Zyngier default: /* Shouldn't be here, just for sanity */
5446b18782SMarc Zyngier WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
55fd65a3b5SMarc Zyngier return 0;
56fd65a3b5SMarc Zyngier }
57fd65a3b5SMarc Zyngier }
58fd65a3b5SMarc Zyngier
kvm_pmu_event_mask(struct kvm * kvm)59335ca49fSReiji Watanabe static u32 kvm_pmu_event_mask(struct kvm *kvm)
60335ca49fSReiji Watanabe {
6197ca3fccSOliver Upton u64 dfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1);
62335ca49fSReiji Watanabe u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, dfr0);
63335ca49fSReiji Watanabe
64335ca49fSReiji Watanabe return __kvm_pmu_event_mask(pmuver);
65335ca49fSReiji Watanabe }
66335ca49fSReiji Watanabe
kvm_pmu_evtyper_mask(struct kvm * kvm)67bc512d6aSOliver Upton u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
68bc512d6aSOliver Upton {
69bc512d6aSOliver Upton u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 |
70bc512d6aSOliver Upton kvm_pmu_event_mask(kvm);
71bc512d6aSOliver Upton
72c62d7a23SMarc Zyngier if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL2, IMP))
73bc512d6aSOliver Upton mask |= ARMV8_PMU_INCLUDE_EL2;
74bc512d6aSOliver Upton
75c62d7a23SMarc Zyngier if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL3, IMP))
76ae8d3522SOliver Upton mask |= ARMV8_PMU_EXCLUDE_NS_EL0 |
77ae8d3522SOliver Upton ARMV8_PMU_EXCLUDE_NS_EL1 |
78ae8d3522SOliver Upton ARMV8_PMU_EXCLUDE_EL3;
79ae8d3522SOliver Upton
80bc512d6aSOliver Upton return mask;
81bc512d6aSOliver Upton }
82bc512d6aSOliver Upton
83218907cbSAndrew Murray /**
84d56bdce5SMarc Zyngier * kvm_pmc_is_64bit - determine if counter is 64bit
85d56bdce5SMarc Zyngier * @pmc: counter context
86218907cbSAndrew Murray */
kvm_pmc_is_64bit(struct kvm_pmc * pmc)87d56bdce5SMarc Zyngier static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
88218907cbSAndrew Murray {
89c62d7a23SMarc Zyngier struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
90c62d7a23SMarc Zyngier
91d56bdce5SMarc Zyngier return (pmc->idx == ARMV8_PMU_CYCLE_IDX ||
92c62d7a23SMarc Zyngier kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5));
93c82d28cbSMarc Zyngier }
94c82d28cbSMarc Zyngier
kvm_pmc_has_64bit_overflow(struct kvm_pmc * pmc)95d56bdce5SMarc Zyngier static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
96c82d28cbSMarc Zyngier {
9716535d55SOliver Upton struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
9816535d55SOliver Upton u64 val = kvm_vcpu_read_pmcr(vcpu);
9916535d55SOliver Upton
10016535d55SOliver Upton if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx))
10116535d55SOliver Upton return __vcpu_sys_reg(vcpu, MDCR_EL2) & MDCR_EL2_HLP;
10211af4c37SMarc Zyngier
103d56bdce5SMarc Zyngier return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) ||
104d56bdce5SMarc Zyngier (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC));
105218907cbSAndrew Murray }
106218907cbSAndrew Murray
kvm_pmu_counter_can_chain(struct kvm_pmc * pmc)107d56bdce5SMarc Zyngier static bool kvm_pmu_counter_can_chain(struct kvm_pmc *pmc)
108bead0220SMarc Zyngier {
109d56bdce5SMarc Zyngier return (!(pmc->idx & 1) && (pmc->idx + 1) < ARMV8_PMU_CYCLE_IDX &&
110d56bdce5SMarc Zyngier !kvm_pmc_has_64bit_overflow(pmc));
11180f393a2SAndrew Murray }
11280f393a2SAndrew Murray
counter_index_to_reg(u64 idx)1130cb9c3c8SMarc Zyngier static u32 counter_index_to_reg(u64 idx)
1140cb9c3c8SMarc Zyngier {
1150cb9c3c8SMarc Zyngier return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx;
1160cb9c3c8SMarc Zyngier }
1170cb9c3c8SMarc Zyngier
counter_index_to_evtreg(u64 idx)1180cb9c3c8SMarc Zyngier static u32 counter_index_to_evtreg(u64 idx)
1190cb9c3c8SMarc Zyngier {
1200cb9c3c8SMarc Zyngier return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx;
1210cb9c3c8SMarc Zyngier }
1220cb9c3c8SMarc Zyngier
kvm_pmc_read_evtreg(const struct kvm_pmc * pmc)1239d15f829SOliver Upton static u64 kvm_pmc_read_evtreg(const struct kvm_pmc *pmc)
1249d15f829SOliver Upton {
1259d15f829SOliver Upton return __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), counter_index_to_evtreg(pmc->idx));
1269d15f829SOliver Upton }
1279d15f829SOliver Upton
kvm_pmu_get_pmc_value(struct kvm_pmc * pmc)128d56bdce5SMarc Zyngier static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc)
12980f393a2SAndrew Murray {
130d56bdce5SMarc Zyngier struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
131bead0220SMarc Zyngier u64 counter, reg, enabled, running;
13280f393a2SAndrew Murray
133d56bdce5SMarc Zyngier reg = counter_index_to_reg(pmc->idx);
13480f393a2SAndrew Murray counter = __vcpu_sys_reg(vcpu, reg);
13580f393a2SAndrew Murray
13680f393a2SAndrew Murray /*
13780f393a2SAndrew Murray * The real counter value is equal to the value of counter register plus
13880f393a2SAndrew Murray * the value perf event counts.
13980f393a2SAndrew Murray */
14080f393a2SAndrew Murray if (pmc->perf_event)
14180f393a2SAndrew Murray counter += perf_event_read_value(pmc->perf_event, &enabled,
14280f393a2SAndrew Murray &running);
14380f393a2SAndrew Murray
144d56bdce5SMarc Zyngier if (!kvm_pmc_is_64bit(pmc))
145218907cbSAndrew Murray counter = lower_32_bits(counter);
146218907cbSAndrew Murray
147218907cbSAndrew Murray return counter;
148051ff581SShannon Zhao }
149051ff581SShannon Zhao
150d56bdce5SMarc Zyngier /**
151d56bdce5SMarc Zyngier * kvm_pmu_get_counter_value - get PMU counter value
152d56bdce5SMarc Zyngier * @vcpu: The vcpu pointer
153d56bdce5SMarc Zyngier * @select_idx: The counter index
154d56bdce5SMarc Zyngier */
kvm_pmu_get_counter_value(struct kvm_vcpu * vcpu,u64 select_idx)155d56bdce5SMarc Zyngier u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
156051ff581SShannon Zhao {
157d56bdce5SMarc Zyngier return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
158d56bdce5SMarc Zyngier }
159d56bdce5SMarc Zyngier
kvm_pmu_set_pmc_value(struct kvm_pmc * pmc,u64 val,bool force)160d56bdce5SMarc Zyngier static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
161d56bdce5SMarc Zyngier {
162d56bdce5SMarc Zyngier struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
163051ff581SShannon Zhao u64 reg;
164051ff581SShannon Zhao
165d56bdce5SMarc Zyngier kvm_pmu_release_perf_event(pmc);
1668f6379e2SAlexandru Elisei
167d56bdce5SMarc Zyngier reg = counter_index_to_reg(pmc->idx);
1689917264dSMarc Zyngier
169d56bdce5SMarc Zyngier if (vcpu_mode_is_32bit(vcpu) && pmc->idx != ARMV8_PMU_CYCLE_IDX &&
17026d2d059SMarc Zyngier !force) {
17126d2d059SMarc Zyngier /*
17226d2d059SMarc Zyngier * Even with PMUv3p5, AArch32 cannot write to the top
17326d2d059SMarc Zyngier * 32bit of the counters. The only possible course of
17426d2d059SMarc Zyngier * action is to use PMCR.P, which will reset them to
17526d2d059SMarc Zyngier * 0 (the only use of the 'force' parameter).
17626d2d059SMarc Zyngier */
17726d2d059SMarc Zyngier val = __vcpu_sys_reg(vcpu, reg) & GENMASK(63, 32);
17826d2d059SMarc Zyngier val |= lower_32_bits(val);
17926d2d059SMarc Zyngier }
18026d2d059SMarc Zyngier
1819917264dSMarc Zyngier __vcpu_assign_sys_reg(vcpu, reg, val);
18230d97754SAndrew Murray
18330d97754SAndrew Murray /* Recreate the perf event to reflect the updated sample_period */
184d56bdce5SMarc Zyngier kvm_pmu_create_perf_event(pmc);
185051ff581SShannon Zhao }
18696b0eebcSShannon Zhao
1877f766358SShannon Zhao /**
18826d2d059SMarc Zyngier * kvm_pmu_set_counter_value - set PMU counter value
18926d2d059SMarc Zyngier * @vcpu: The vcpu pointer
19026d2d059SMarc Zyngier * @select_idx: The counter index
19126d2d059SMarc Zyngier * @val: The counter value
19226d2d059SMarc Zyngier */
kvm_pmu_set_counter_value(struct kvm_vcpu * vcpu,u64 select_idx,u64 val)19326d2d059SMarc Zyngier void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
19426d2d059SMarc Zyngier {
195d56bdce5SMarc Zyngier kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false);
19626d2d059SMarc Zyngier }
19726d2d059SMarc Zyngier
19826d2d059SMarc Zyngier /**
19964074ca8SAkihiko Odaki * kvm_pmu_set_counter_value_user - set PMU counter value from user
20064074ca8SAkihiko Odaki * @vcpu: The vcpu pointer
20164074ca8SAkihiko Odaki * @select_idx: The counter index
20264074ca8SAkihiko Odaki * @val: The counter value
20364074ca8SAkihiko Odaki */
kvm_pmu_set_counter_value_user(struct kvm_vcpu * vcpu,u64 select_idx,u64 val)20464074ca8SAkihiko Odaki void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
20564074ca8SAkihiko Odaki {
20664074ca8SAkihiko Odaki kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
20764074ca8SAkihiko Odaki __vcpu_assign_sys_reg(vcpu, counter_index_to_reg(select_idx), val);
20864074ca8SAkihiko Odaki kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
20964074ca8SAkihiko Odaki }
21064074ca8SAkihiko Odaki
21164074ca8SAkihiko Odaki /**
2126f4d2a0bSAndrew Murray * kvm_pmu_release_perf_event - remove the perf event
2136f4d2a0bSAndrew Murray * @pmc: The PMU counter pointer
2146f4d2a0bSAndrew Murray */
kvm_pmu_release_perf_event(struct kvm_pmc * pmc)2156f4d2a0bSAndrew Murray static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
2166f4d2a0bSAndrew Murray {
2176f4d2a0bSAndrew Murray if (pmc->perf_event) {
2186f4d2a0bSAndrew Murray perf_event_disable(pmc->perf_event);
2196f4d2a0bSAndrew Murray perf_event_release_kernel(pmc->perf_event);
2206f4d2a0bSAndrew Murray pmc->perf_event = NULL;
2216f4d2a0bSAndrew Murray }
2226f4d2a0bSAndrew Murray }
2236f4d2a0bSAndrew Murray
2246f4d2a0bSAndrew Murray /**
2257f766358SShannon Zhao * kvm_pmu_stop_counter - stop PMU counter
2267f766358SShannon Zhao * @pmc: The PMU counter pointer
2277f766358SShannon Zhao *
2287f766358SShannon Zhao * If this counter has been configured to monitor some event, release it here.
2297f766358SShannon Zhao */
kvm_pmu_stop_counter(struct kvm_pmc * pmc)230d56bdce5SMarc Zyngier static void kvm_pmu_stop_counter(struct kvm_pmc *pmc)
2317f766358SShannon Zhao {
232d56bdce5SMarc Zyngier struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
2330f1e172bSMarc Zyngier u64 reg, val;
2347f766358SShannon Zhao
23580f393a2SAndrew Murray if (!pmc->perf_event)
23680f393a2SAndrew Murray return;
23780f393a2SAndrew Murray
238d56bdce5SMarc Zyngier val = kvm_pmu_get_pmc_value(pmc);
23980f393a2SAndrew Murray
2400cb9c3c8SMarc Zyngier reg = counter_index_to_reg(pmc->idx);
24180f393a2SAndrew Murray
242f4e23cf9SMarc Zyngier __vcpu_assign_sys_reg(vcpu, reg, val);
243f4e23cf9SMarc Zyngier
24480f393a2SAndrew Murray kvm_pmu_release_perf_event(pmc);
2457f766358SShannon Zhao }
2467f766358SShannon Zhao
2472aa36e98SShannon Zhao /**
248bca031e2SZenghui Yu * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
249bca031e2SZenghui Yu * @vcpu: The vcpu pointer
250bca031e2SZenghui Yu *
251bca031e2SZenghui Yu */
kvm_pmu_vcpu_init(struct kvm_vcpu * vcpu)252bca031e2SZenghui Yu void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
253bca031e2SZenghui Yu {
254bca031e2SZenghui Yu int i;
255bca031e2SZenghui Yu struct kvm_pmu *pmu = &vcpu->arch.pmu;
256bca031e2SZenghui Yu
2572f62701fSRob Herring (Arm) for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++)
258bca031e2SZenghui Yu pmu->pmc[i].idx = i;
259bca031e2SZenghui Yu }
260bca031e2SZenghui Yu
261bca031e2SZenghui Yu /**
2625f0a714aSShannon Zhao * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
2635f0a714aSShannon Zhao * @vcpu: The vcpu pointer
2645f0a714aSShannon Zhao *
2655f0a714aSShannon Zhao */
kvm_pmu_vcpu_destroy(struct kvm_vcpu * vcpu)2665f0a714aSShannon Zhao void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
2675f0a714aSShannon Zhao {
2685f0a714aSShannon Zhao int i;
2695f0a714aSShannon Zhao
2702f62701fSRob Herring (Arm) for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++)
271d56bdce5SMarc Zyngier kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, i));
27295e92e45SJulien Thierry irq_work_sync(&vcpu->arch.pmu.overflow_work);
2735f0a714aSShannon Zhao }
2745f0a714aSShannon Zhao
kvm_pmu_hyp_counter_mask(struct kvm_vcpu * vcpu)27513905f45SOliver Upton static u64 kvm_pmu_hyp_counter_mask(struct kvm_vcpu *vcpu)
276336afe0cSOliver Upton {
27713905f45SOliver Upton unsigned int hpmn, n;
278336afe0cSOliver Upton
27913905f45SOliver Upton if (!vcpu_has_nv(vcpu))
28013905f45SOliver Upton return 0;
28113905f45SOliver Upton
28213905f45SOliver Upton hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
283f12b54d7SMarc Zyngier n = vcpu->kvm->arch.nr_pmu_counters;
28413905f45SOliver Upton
28513905f45SOliver Upton /*
28613905f45SOliver Upton * Programming HPMN to a value greater than PMCR_EL0.N is
28713905f45SOliver Upton * CONSTRAINED UNPREDICTABLE. Make the implementation choice that an
28813905f45SOliver Upton * UNKNOWN number of counters (in our case, zero) are reserved for EL2.
28913905f45SOliver Upton */
29013905f45SOliver Upton if (hpmn >= n)
29113905f45SOliver Upton return 0;
292336afe0cSOliver Upton
293336afe0cSOliver Upton /*
294336afe0cSOliver Upton * Programming HPMN=0 is CONSTRAINED UNPREDICTABLE if FEAT_HPMN0 isn't
295336afe0cSOliver Upton * implemented. Since KVM's ability to emulate HPMN=0 does not directly
296336afe0cSOliver Upton * depend on hardware (all PMU registers are trapped), make the
297336afe0cSOliver Upton * implementation choice that all counters are included in the second
298336afe0cSOliver Upton * range reserved for EL2/EL3.
299336afe0cSOliver Upton */
30013905f45SOliver Upton return GENMASK(n - 1, hpmn);
30113905f45SOliver Upton }
30213905f45SOliver Upton
kvm_pmu_counter_is_hyp(struct kvm_vcpu * vcpu,unsigned int idx)30313905f45SOliver Upton bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
30413905f45SOliver Upton {
30513905f45SOliver Upton return kvm_pmu_hyp_counter_mask(vcpu) & BIT(idx);
306336afe0cSOliver Upton }
307336afe0cSOliver Upton
kvm_pmu_accessible_counter_mask(struct kvm_vcpu * vcpu)3089a1c58cfSOliver Upton u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
3099a1c58cfSOliver Upton {
3109a1c58cfSOliver Upton u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
3119a1c58cfSOliver Upton
3129a1c58cfSOliver Upton if (!vcpu_has_nv(vcpu) || vcpu_is_el2(vcpu))
3139a1c58cfSOliver Upton return mask;
3149a1c58cfSOliver Upton
31513905f45SOliver Upton return mask & ~kvm_pmu_hyp_counter_mask(vcpu);
3169a1c58cfSOliver Upton }
3179a1c58cfSOliver Upton
kvm_pmu_implemented_counter_mask(struct kvm_vcpu * vcpu)318a3034dabSOliver Upton u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
31996b0eebcSShannon Zhao {
32062e1f212SJames Clark u64 val = FIELD_GET(ARMV8_PMU_PMCR_N, kvm_vcpu_read_pmcr(vcpu));
32196b0eebcSShannon Zhao
32296b0eebcSShannon Zhao if (val == 0)
32396b0eebcSShannon Zhao return BIT(ARMV8_PMU_CYCLE_IDX);
32496b0eebcSShannon Zhao else
32596b0eebcSShannon Zhao return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
32696b0eebcSShannon Zhao }
32796b0eebcSShannon Zhao
kvm_pmc_enable_perf_event(struct kvm_pmc * pmc)328e22c3695SOliver Upton static void kvm_pmc_enable_perf_event(struct kvm_pmc *pmc)
32996b0eebcSShannon Zhao {
330bead0220SMarc Zyngier if (!pmc->perf_event) {
331d56bdce5SMarc Zyngier kvm_pmu_create_perf_event(pmc);
332e22c3695SOliver Upton return;
333e22c3695SOliver Upton }
334e22c3695SOliver Upton
33596b0eebcSShannon Zhao perf_event_enable(pmc->perf_event);
33696b0eebcSShannon Zhao if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
33796b0eebcSShannon Zhao kvm_debug("fail to enable perf event\n");
33896b0eebcSShannon Zhao }
339e22c3695SOliver Upton
kvm_pmc_disable_perf_event(struct kvm_pmc * pmc)340e22c3695SOliver Upton static void kvm_pmc_disable_perf_event(struct kvm_pmc *pmc)
341e22c3695SOliver Upton {
342e22c3695SOliver Upton if (pmc->perf_event)
343e22c3695SOliver Upton perf_event_disable(pmc->perf_event);
34496b0eebcSShannon Zhao }
34596b0eebcSShannon Zhao
kvm_pmu_reprogram_counter_mask(struct kvm_vcpu * vcpu,u64 val)346e22c3695SOliver Upton void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
34796b0eebcSShannon Zhao {
34896b0eebcSShannon Zhao int i;
34996b0eebcSShannon Zhao
350be5ccac3SAkihiko Odaki if (!val)
35196b0eebcSShannon Zhao return;
35296b0eebcSShannon Zhao
3532f62701fSRob Herring (Arm) for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
354e22c3695SOliver Upton struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
355d56bdce5SMarc Zyngier
35696b0eebcSShannon Zhao if (!(val & BIT(i)))
35796b0eebcSShannon Zhao continue;
35896b0eebcSShannon Zhao
359e22c3695SOliver Upton if (kvm_pmu_counter_is_enabled(pmc))
360e22c3695SOliver Upton kvm_pmc_enable_perf_event(pmc);
361e22c3695SOliver Upton else
362e22c3695SOliver Upton kvm_pmc_disable_perf_event(pmc);
36396b0eebcSShannon Zhao }
364e22c3695SOliver Upton
365e22c3695SOliver Upton kvm_vcpu_pmu_restore_guest(vcpu);
36696b0eebcSShannon Zhao }
3677f766358SShannon Zhao
36813905f45SOliver Upton /*
36913905f45SOliver Upton * Returns the PMU overflow state, which is true if there exists an event
37013905f45SOliver Upton * counter where the values of the global enable control, PMOVSSET_EL0[n], and
37113905f45SOliver Upton * PMINTENSET_EL1[n] are all 1.
37213905f45SOliver Upton */
kvm_pmu_overflow_status(struct kvm_vcpu * vcpu)37313905f45SOliver Upton static bool kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
37476d883c4SShannon Zhao {
37513905f45SOliver Upton u64 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
37676d883c4SShannon Zhao
3778d404c4cSChristoffer Dall reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
37813905f45SOliver Upton
37913905f45SOliver Upton /*
38013905f45SOliver Upton * PMCR_EL0.E is the global enable control for event counters available
38113905f45SOliver Upton * to EL0 and EL1.
38213905f45SOliver Upton */
38313905f45SOliver Upton if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E))
38413905f45SOliver Upton reg &= kvm_pmu_hyp_counter_mask(vcpu);
38513905f45SOliver Upton
38613905f45SOliver Upton /*
38713905f45SOliver Upton * Otherwise, MDCR_EL2.HPME is the global enable control for event
38813905f45SOliver Upton * counters reserved for EL2.
38913905f45SOliver Upton */
39013905f45SOliver Upton if (!(vcpu_read_sys_reg(vcpu, MDCR_EL2) & MDCR_EL2_HPME))
39113905f45SOliver Upton reg &= ~kvm_pmu_hyp_counter_mask(vcpu);
39276d883c4SShannon Zhao
39376d883c4SShannon Zhao return reg;
39476d883c4SShannon Zhao }
39576d883c4SShannon Zhao
kvm_pmu_update_state(struct kvm_vcpu * vcpu)396d9f89b4eSAndrew Jones static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
397b7484931SAndrew Jones {
398b7484931SAndrew Jones struct kvm_pmu *pmu = &vcpu->arch.pmu;
399d9f89b4eSAndrew Jones bool overflow;
400b7484931SAndrew Jones
40113905f45SOliver Upton overflow = kvm_pmu_overflow_status(vcpu);
402b7484931SAndrew Jones if (pmu->irq_level == overflow)
403b7484931SAndrew Jones return;
404b7484931SAndrew Jones
405b7484931SAndrew Jones pmu->irq_level = overflow;
406b7484931SAndrew Jones
407b7484931SAndrew Jones if (likely(irqchip_in_kernel(vcpu->kvm))) {
4089a0a75d3SMarc Zyngier int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu,
409d9f89b4eSAndrew Jones pmu->irq_num, overflow, pmu);
410b7484931SAndrew Jones WARN_ON(ret);
411b7484931SAndrew Jones }
412b7484931SAndrew Jones }
413b7484931SAndrew Jones
kvm_pmu_should_notify_user(struct kvm_vcpu * vcpu)4143dbbdf78SChristoffer Dall bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
4153dbbdf78SChristoffer Dall {
4163dbbdf78SChristoffer Dall struct kvm_pmu *pmu = &vcpu->arch.pmu;
4173dbbdf78SChristoffer Dall struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
4183dbbdf78SChristoffer Dall bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
4193dbbdf78SChristoffer Dall
4203dbbdf78SChristoffer Dall if (likely(irqchip_in_kernel(vcpu->kvm)))
4213dbbdf78SChristoffer Dall return false;
4223dbbdf78SChristoffer Dall
4233dbbdf78SChristoffer Dall return pmu->irq_level != run_level;
4243dbbdf78SChristoffer Dall }
4253dbbdf78SChristoffer Dall
4263dbbdf78SChristoffer Dall /*
4273dbbdf78SChristoffer Dall * Reflect the PMU overflow interrupt output level into the kvm_run structure
4283dbbdf78SChristoffer Dall */
kvm_pmu_update_run(struct kvm_vcpu * vcpu)4293dbbdf78SChristoffer Dall void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
4303dbbdf78SChristoffer Dall {
4313dbbdf78SChristoffer Dall struct kvm_sync_regs *regs = &vcpu->run->s.regs;
4323dbbdf78SChristoffer Dall
4333dbbdf78SChristoffer Dall /* Populate the timer bitmap for user space */
4343dbbdf78SChristoffer Dall regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
4353dbbdf78SChristoffer Dall if (vcpu->arch.pmu.irq_level)
4363dbbdf78SChristoffer Dall regs->device_irq_level |= KVM_ARM_DEV_PMU;
4373dbbdf78SChristoffer Dall }
4383dbbdf78SChristoffer Dall
439b02386ebSShannon Zhao /**
440b02386ebSShannon Zhao * kvm_pmu_flush_hwstate - flush pmu state to cpu
441b02386ebSShannon Zhao * @vcpu: The vcpu pointer
442b02386ebSShannon Zhao *
443b02386ebSShannon Zhao * Check if the PMU has overflowed while we were running in the host, and inject
444b02386ebSShannon Zhao * an interrupt if that was the case.
445b02386ebSShannon Zhao */
kvm_pmu_flush_hwstate(struct kvm_vcpu * vcpu)446b02386ebSShannon Zhao void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
447b02386ebSShannon Zhao {
448b02386ebSShannon Zhao kvm_pmu_update_state(vcpu);
449b02386ebSShannon Zhao }
450b02386ebSShannon Zhao
451b02386ebSShannon Zhao /**
452b02386ebSShannon Zhao * kvm_pmu_sync_hwstate - sync pmu state from cpu
453b02386ebSShannon Zhao * @vcpu: The vcpu pointer
454b02386ebSShannon Zhao *
455b02386ebSShannon Zhao * Check if the PMU has overflowed while we were running in the guest, and
456b02386ebSShannon Zhao * inject an interrupt if that was the case.
457b02386ebSShannon Zhao */
kvm_pmu_sync_hwstate(struct kvm_vcpu * vcpu)458b02386ebSShannon Zhao void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
459b02386ebSShannon Zhao {
460b02386ebSShannon Zhao kvm_pmu_update_state(vcpu);
461b02386ebSShannon Zhao }
462b02386ebSShannon Zhao
4632a00f085SRandy Dunlap /*
46495e92e45SJulien Thierry * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
46595e92e45SJulien Thierry * to the event.
46695e92e45SJulien Thierry * This is why we need a callback to do it once outside of the NMI context.
46795e92e45SJulien Thierry */
kvm_pmu_perf_overflow_notify_vcpu(struct irq_work * work)46895e92e45SJulien Thierry static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
46995e92e45SJulien Thierry {
47095e92e45SJulien Thierry struct kvm_vcpu *vcpu;
47195e92e45SJulien Thierry
4729bad925dSMarc Zyngier vcpu = container_of(work, struct kvm_vcpu, arch.pmu.overflow_work);
47395e92e45SJulien Thierry kvm_vcpu_kick(vcpu);
47495e92e45SJulien Thierry }
47595e92e45SJulien Thierry
476bead0220SMarc Zyngier /*
477bead0220SMarc Zyngier * Perform an increment on any of the counters described in @mask,
478bead0220SMarc Zyngier * generating the overflow if required, and propagate it as a chained
479bead0220SMarc Zyngier * event if possible.
480bead0220SMarc Zyngier */
kvm_pmu_counter_increment(struct kvm_vcpu * vcpu,unsigned long mask,u32 event)481bead0220SMarc Zyngier static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
482bead0220SMarc Zyngier unsigned long mask, u32 event)
483bead0220SMarc Zyngier {
484bead0220SMarc Zyngier int i;
485bead0220SMarc Zyngier
48657fc267fSReiji Watanabe if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E))
487bead0220SMarc Zyngier return;
488bead0220SMarc Zyngier
489bead0220SMarc Zyngier /* Weed out disabled counters */
490bead0220SMarc Zyngier mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
491bead0220SMarc Zyngier
492bead0220SMarc Zyngier for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) {
493d56bdce5SMarc Zyngier struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
494bead0220SMarc Zyngier u64 type, reg;
495bead0220SMarc Zyngier
496bead0220SMarc Zyngier /* Filter on event type */
4970cb9c3c8SMarc Zyngier type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i));
498bead0220SMarc Zyngier type &= kvm_pmu_event_mask(vcpu->kvm);
499bead0220SMarc Zyngier if (type != event)
500bead0220SMarc Zyngier continue;
501bead0220SMarc Zyngier
502bead0220SMarc Zyngier /* Increment this counter */
5030cb9c3c8SMarc Zyngier reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
504d56bdce5SMarc Zyngier if (!kvm_pmc_is_64bit(pmc))
505bead0220SMarc Zyngier reg = lower_32_bits(reg);
5060cb9c3c8SMarc Zyngier __vcpu_assign_sys_reg(vcpu, counter_index_to_reg(i), reg);
507bead0220SMarc Zyngier
508001d85bdSMarc Zyngier /* No overflow? move on */
509d56bdce5SMarc Zyngier if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg))
510bead0220SMarc Zyngier continue;
511bead0220SMarc Zyngier
512bead0220SMarc Zyngier /* Mark overflow */
513bead0220SMarc Zyngier __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(i));
514bead0220SMarc Zyngier
515d56bdce5SMarc Zyngier if (kvm_pmu_counter_can_chain(pmc))
516bead0220SMarc Zyngier kvm_pmu_counter_increment(vcpu, BIT(i + 1),
517bead0220SMarc Zyngier ARMV8_PMUV3_PERFCTR_CHAIN);
518bead0220SMarc Zyngier }
519bead0220SMarc Zyngier }
520bead0220SMarc Zyngier
521c82d28cbSMarc Zyngier /* Compute the sample period for a given counter value */
compute_period(struct kvm_pmc * pmc,u64 counter)522d56bdce5SMarc Zyngier static u64 compute_period(struct kvm_pmc *pmc, u64 counter)
523c82d28cbSMarc Zyngier {
524c82d28cbSMarc Zyngier u64 val;
525c82d28cbSMarc Zyngier
52658ff6569SMarc Zyngier if (kvm_pmc_is_64bit(pmc) && kvm_pmc_has_64bit_overflow(pmc))
527c82d28cbSMarc Zyngier val = (-counter) & GENMASK(63, 0);
52858ff6569SMarc Zyngier else
529c82d28cbSMarc Zyngier val = (-counter) & GENMASK(31, 0);
530c82d28cbSMarc Zyngier
531c82d28cbSMarc Zyngier return val;
532c82d28cbSMarc Zyngier }
533c82d28cbSMarc Zyngier
5342a00f085SRandy Dunlap /*
535d9f89b4eSAndrew Jones * When the perf event overflows, set the overflow status and inform the vcpu.
536b02386ebSShannon Zhao */
kvm_pmu_perf_overflow(struct perf_event * perf_event,struct perf_sample_data * data,struct pt_regs * regs)537b02386ebSShannon Zhao static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
538b02386ebSShannon Zhao struct perf_sample_data *data,
539b02386ebSShannon Zhao struct pt_regs *regs)
540b02386ebSShannon Zhao {
541b02386ebSShannon Zhao struct kvm_pmc *pmc = perf_event->overflow_handler_context;
5428c3252c0SMarc Zyngier struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
543b02386ebSShannon Zhao struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
544b02386ebSShannon Zhao int idx = pmc->idx;
5458c3252c0SMarc Zyngier u64 period;
5468c3252c0SMarc Zyngier
5478c3252c0SMarc Zyngier cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
5488c3252c0SMarc Zyngier
5498c3252c0SMarc Zyngier /*
5508c3252c0SMarc Zyngier * Reset the sample period to the architectural limit,
5518c3252c0SMarc Zyngier * i.e. the point where the counter overflows.
5528c3252c0SMarc Zyngier */
553d56bdce5SMarc Zyngier period = compute_period(pmc, local64_read(&perf_event->count));
5548c3252c0SMarc Zyngier
5558c3252c0SMarc Zyngier local64_set(&perf_event->hw.period_left, 0);
5568c3252c0SMarc Zyngier perf_event->attr.sample_period = period;
5578c3252c0SMarc Zyngier perf_event->hw.sample_period = period;
558b02386ebSShannon Zhao
5598d404c4cSChristoffer Dall __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(idx));
560d9f89b4eSAndrew Jones
561d56bdce5SMarc Zyngier if (kvm_pmu_counter_can_chain(pmc))
562bead0220SMarc Zyngier kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
563bead0220SMarc Zyngier ARMV8_PMUV3_PERFCTR_CHAIN);
564bead0220SMarc Zyngier
565d9f89b4eSAndrew Jones if (kvm_pmu_overflow_status(vcpu)) {
566d9f89b4eSAndrew Jones kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
56795e92e45SJulien Thierry
56895e92e45SJulien Thierry if (!in_nmi())
569d9f89b4eSAndrew Jones kvm_vcpu_kick(vcpu);
57095e92e45SJulien Thierry else
57195e92e45SJulien Thierry irq_work_queue(&vcpu->arch.pmu.overflow_work);
572d9f89b4eSAndrew Jones }
5738c3252c0SMarc Zyngier
5748c3252c0SMarc Zyngier cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
575b02386ebSShannon Zhao }
576b02386ebSShannon Zhao
5777a0adc70SShannon Zhao /**
5787a0adc70SShannon Zhao * kvm_pmu_software_increment - do software increment
5797a0adc70SShannon Zhao * @vcpu: The vcpu pointer
5807a0adc70SShannon Zhao * @val: the value guest writes to PMSWINC register
5817a0adc70SShannon Zhao */
kvm_pmu_software_increment(struct kvm_vcpu * vcpu,u64 val)5827a0adc70SShannon Zhao void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
5837a0adc70SShannon Zhao {
584bead0220SMarc Zyngier kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR);
5857a0adc70SShannon Zhao }
5867a0adc70SShannon Zhao
58776993739SShannon Zhao /**
58876993739SShannon Zhao * kvm_pmu_handle_pmcr - handle PMCR register
58976993739SShannon Zhao * @vcpu: The vcpu pointer
59076993739SShannon Zhao * @val: the value guest writes to PMCR register
59176993739SShannon Zhao */
kvm_pmu_handle_pmcr(struct kvm_vcpu * vcpu,u64 val)59276993739SShannon Zhao void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
59376993739SShannon Zhao {
59476993739SShannon Zhao int i;
59576993739SShannon Zhao
59664d6820dSMarc Zyngier /* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
597c62d7a23SMarc Zyngier if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
59864d6820dSMarc Zyngier val &= ~ARMV8_PMU_PMCR_LP;
59964d6820dSMarc Zyngier
600adf8623bSOliver Upton /* Request a reload of the PMU to enable/disable affected counters */
601adf8623bSOliver Upton if ((__vcpu_sys_reg(vcpu, PMCR_EL0) ^ val) & ARMV8_PMU_PMCR_E)
602adf8623bSOliver Upton kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
603adf8623bSOliver Upton
604f6da81f6SReiji Watanabe /* The reset bits don't indicate any state, and shouldn't be saved. */
605f6da81f6SReiji Watanabe __vcpu_assign_sys_reg(vcpu, PMCR_EL0, (val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P)));
60664d6820dSMarc Zyngier
60776993739SShannon Zhao if (val & ARMV8_PMU_PMCR_C)
60876993739SShannon Zhao kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
60976993739SShannon Zhao
61076993739SShannon Zhao if (val & ARMV8_PMU_PMCR_P) {
611e96d8b80SOliver Upton unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu) &
612e96d8b80SOliver Upton ~BIT(ARMV8_PMU_CYCLE_IDX);
613e96d8b80SOliver Upton
61402243533SMarc Zyngier if (!vcpu_is_el2(vcpu))
61502243533SMarc Zyngier mask &= ~kvm_pmu_hyp_counter_mask(vcpu);
61602243533SMarc Zyngier
617c01d6a18SEric Auger for_each_set_bit(i, &mask, 32)
618d56bdce5SMarc Zyngier kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
61976993739SShannon Zhao }
62076993739SShannon Zhao }
62176993739SShannon Zhao
kvm_pmu_counter_is_enabled(struct kvm_pmc * pmc)622d56bdce5SMarc Zyngier static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
6237f766358SShannon Zhao {
624d56bdce5SMarc Zyngier struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
625fe827f91SOliver Upton unsigned int mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2);
626fe827f91SOliver Upton
627fe827f91SOliver Upton if (!(__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx)))
628fe827f91SOliver Upton return false;
629fe827f91SOliver Upton
630fe827f91SOliver Upton if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx))
631fe827f91SOliver Upton return mdcr & MDCR_EL2_HPME;
632fe827f91SOliver Upton
633fe827f91SOliver Upton return kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E;
6347f766358SShannon Zhao }
6357f766358SShannon Zhao
kvm_pmc_counts_at_el0(struct kvm_pmc * pmc)6369d15f829SOliver Upton static bool kvm_pmc_counts_at_el0(struct kvm_pmc *pmc)
6379d15f829SOliver Upton {
6389d15f829SOliver Upton u64 evtreg = kvm_pmc_read_evtreg(pmc);
6399d15f829SOliver Upton bool nsu = evtreg & ARMV8_PMU_EXCLUDE_NS_EL0;
6409d15f829SOliver Upton bool u = evtreg & ARMV8_PMU_EXCLUDE_EL0;
6419d15f829SOliver Upton
6429d15f829SOliver Upton return u == nsu;
6439d15f829SOliver Upton }
6449d15f829SOliver Upton
kvm_pmc_counts_at_el1(struct kvm_pmc * pmc)6459d15f829SOliver Upton static bool kvm_pmc_counts_at_el1(struct kvm_pmc *pmc)
6469d15f829SOliver Upton {
6479d15f829SOliver Upton u64 evtreg = kvm_pmc_read_evtreg(pmc);
6489d15f829SOliver Upton bool nsk = evtreg & ARMV8_PMU_EXCLUDE_NS_EL1;
6499d15f829SOliver Upton bool p = evtreg & ARMV8_PMU_EXCLUDE_EL1;
6509d15f829SOliver Upton
6519d15f829SOliver Upton return p == nsk;
6529d15f829SOliver Upton }
6539d15f829SOliver Upton
kvm_pmc_counts_at_el2(struct kvm_pmc * pmc)6548a349790SOliver Upton static bool kvm_pmc_counts_at_el2(struct kvm_pmc *pmc)
6558a349790SOliver Upton {
6568a349790SOliver Upton struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
6578a349790SOliver Upton u64 mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2);
6588a349790SOliver Upton
6598a349790SOliver Upton if (!kvm_pmu_counter_is_hyp(vcpu, pmc->idx) && (mdcr & MDCR_EL2_HPMD))
6608a349790SOliver Upton return false;
6618a349790SOliver Upton
6628a349790SOliver Upton return kvm_pmc_read_evtreg(pmc) & ARMV8_PMU_INCLUDE_EL2;
6638a349790SOliver Upton }
6648a349790SOliver Upton
kvm_map_pmu_event(struct kvm * kvm,unsigned int eventsel)6651e7dcbfaSOliver Upton static int kvm_map_pmu_event(struct kvm *kvm, unsigned int eventsel)
6661e7dcbfaSOliver Upton {
6671e7dcbfaSOliver Upton struct arm_pmu *pmu = kvm->arch.arm_pmu;
6681e7dcbfaSOliver Upton
6691e7dcbfaSOliver Upton /*
6701e7dcbfaSOliver Upton * The CPU PMU likely isn't PMUv3; let the driver provide a mapping
6711e7dcbfaSOliver Upton * for the guest's PMUv3 event ID.
6721e7dcbfaSOliver Upton */
6731e7dcbfaSOliver Upton if (unlikely(pmu->map_pmuv3_event))
6741e7dcbfaSOliver Upton return pmu->map_pmuv3_event(eventsel);
6751e7dcbfaSOliver Upton
6761e7dcbfaSOliver Upton return eventsel;
6771e7dcbfaSOliver Upton }
6781e7dcbfaSOliver Upton
6797f766358SShannon Zhao /**
68030d97754SAndrew Murray * kvm_pmu_create_perf_event - create a perf event for a counter
681d56bdce5SMarc Zyngier * @pmc: Counter context
6827f766358SShannon Zhao */
kvm_pmu_create_perf_event(struct kvm_pmc * pmc)683d56bdce5SMarc Zyngier static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
6847f766358SShannon Zhao {
685d56bdce5SMarc Zyngier struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
68646b18782SMarc Zyngier struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
6877f766358SShannon Zhao struct perf_event *event;
6887f766358SShannon Zhao struct perf_event_attr attr;
6891e7dcbfaSOliver Upton int eventsel;
6901e7dcbfaSOliver Upton u64 evtreg;
69130d97754SAndrew Murray
6929d15f829SOliver Upton evtreg = kvm_pmc_read_evtreg(pmc);
6937f766358SShannon Zhao
694d56bdce5SMarc Zyngier kvm_pmu_stop_counter(pmc);
695d7eec236SMarc Zyngier if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
696d7eec236SMarc Zyngier eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
697d7eec236SMarc Zyngier else
6989d15f829SOliver Upton eventsel = evtreg & kvm_pmu_event_mask(vcpu->kvm);
6997f766358SShannon Zhao
700bead0220SMarc Zyngier /*
701bead0220SMarc Zyngier * Neither SW increment nor chained events need to be backed
702bead0220SMarc Zyngier * by a perf event.
703bead0220SMarc Zyngier */
704bead0220SMarc Zyngier if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR ||
705bead0220SMarc Zyngier eventsel == ARMV8_PMUV3_PERFCTR_CHAIN)
706d7eec236SMarc Zyngier return;
707d7eec236SMarc Zyngier
708d7eec236SMarc Zyngier /*
709d7eec236SMarc Zyngier * If we have a filter in place and that the event isn't allowed, do
710d7eec236SMarc Zyngier * not install a perf event either.
711d7eec236SMarc Zyngier */
712d7eec236SMarc Zyngier if (vcpu->kvm->arch.pmu_filter &&
713d7eec236SMarc Zyngier !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
7147a0adc70SShannon Zhao return;
7157a0adc70SShannon Zhao
7161e7dcbfaSOliver Upton /*
7171e7dcbfaSOliver Upton * Don't create an event if we're running on hardware that requires
7181e7dcbfaSOliver Upton * PMUv3 event translation and we couldn't find a valid mapping.
7191e7dcbfaSOliver Upton */
7201e7dcbfaSOliver Upton eventsel = kvm_map_pmu_event(vcpu->kvm, eventsel);
7211e7dcbfaSOliver Upton if (eventsel < 0)
7221e7dcbfaSOliver Upton return;
7231e7dcbfaSOliver Upton
7247f766358SShannon Zhao memset(&attr, 0, sizeof(struct perf_event_attr));
72546b18782SMarc Zyngier attr.type = arm_pmu->pmu.type;
7267f766358SShannon Zhao attr.size = sizeof(attr);
7277f766358SShannon Zhao attr.pinned = 1;
728d56bdce5SMarc Zyngier attr.disabled = !kvm_pmu_counter_is_enabled(pmc);
7299d15f829SOliver Upton attr.exclude_user = !kvm_pmc_counts_at_el0(pmc);
7307f766358SShannon Zhao attr.exclude_hv = 1; /* Don't count EL2 events */
7317f766358SShannon Zhao attr.exclude_host = 1; /* Don't count host events */
732d7eec236SMarc Zyngier attr.config = eventsel;
7337f766358SShannon Zhao
734bead0220SMarc Zyngier /*
7358a349790SOliver Upton * Filter events at EL1 (i.e. vEL2) when in a hyp context based on the
7368a349790SOliver Upton * guest's EL2 filter.
7378a349790SOliver Upton */
7388a349790SOliver Upton if (unlikely(is_hyp_ctxt(vcpu)))
7398a349790SOliver Upton attr.exclude_kernel = !kvm_pmc_counts_at_el2(pmc);
7408a349790SOliver Upton else
7418a349790SOliver Upton attr.exclude_kernel = !kvm_pmc_counts_at_el1(pmc);
7428a349790SOliver Upton
7438a349790SOliver Upton /*
744bead0220SMarc Zyngier * If counting with a 64bit counter, advertise it to the perf
745c82d28cbSMarc Zyngier * code, carefully dealing with the initial sample period
746c82d28cbSMarc Zyngier * which also depends on the overflow.
74780f393a2SAndrew Murray */
748d56bdce5SMarc Zyngier if (kvm_pmc_is_64bit(pmc))
749bead0220SMarc Zyngier attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT;
750c82d28cbSMarc Zyngier
751d56bdce5SMarc Zyngier attr.sample_period = compute_period(pmc, kvm_pmu_get_pmc_value(pmc));
7527f766358SShannon Zhao
753b02386ebSShannon Zhao event = perf_event_create_kernel_counter(&attr, -1, current,
754b02386ebSShannon Zhao kvm_pmu_perf_overflow, pmc);
75580f393a2SAndrew Murray
7567f766358SShannon Zhao if (IS_ERR(event)) {
7577f766358SShannon Zhao pr_err_once("kvm: pmu event creation failed %ld\n",
7587f766358SShannon Zhao PTR_ERR(event));
7597f766358SShannon Zhao return;
7607f766358SShannon Zhao }
7617f766358SShannon Zhao
7627f766358SShannon Zhao pmc->perf_event = event;
7637f766358SShannon Zhao }
764808e7381SShannon Zhao
76530d97754SAndrew Murray /**
76630d97754SAndrew Murray * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
76730d97754SAndrew Murray * @vcpu: The vcpu pointer
76830d97754SAndrew Murray * @data: The data guest writes to PMXEVTYPER_EL0
76930d97754SAndrew Murray * @select_idx: The number of selected counter
77030d97754SAndrew Murray *
77130d97754SAndrew Murray * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
77230d97754SAndrew Murray * event with given hardware event number. Here we call perf_event API to
77330d97754SAndrew Murray * emulate this action and create a kernel perf event for it.
77430d97754SAndrew Murray */
kvm_pmu_set_counter_event_type(struct kvm_vcpu * vcpu,u64 data,u64 select_idx)77530d97754SAndrew Murray void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
77630d97754SAndrew Murray u64 select_idx)
77730d97754SAndrew Murray {
778d56bdce5SMarc Zyngier struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
779bc512d6aSOliver Upton u64 reg;
780fd65a3b5SMarc Zyngier
781d56bdce5SMarc Zyngier reg = counter_index_to_evtreg(pmc->idx);
782bc512d6aSOliver Upton __vcpu_assign_sys_reg(vcpu, reg, (data & kvm_pmu_evtyper_mask(vcpu->kvm)));
78380f393a2SAndrew Murray
784d56bdce5SMarc Zyngier kvm_pmu_create_perf_event(pmc);
78530d97754SAndrew Murray }
78630d97754SAndrew Murray
kvm_host_pmu_init(struct arm_pmu * pmu)787e840f42aSMarc Zyngier void kvm_host_pmu_init(struct arm_pmu *pmu)
788e840f42aSMarc Zyngier {
789db858060SAlexandru Elisei struct arm_pmu_entry *entry;
790db858060SAlexandru Elisei
791ec3eb9edSReiji Watanabe /*
792ec3eb9edSReiji Watanabe * Check the sanitised PMU version for the system, as KVM does not
793ec3eb9edSReiji Watanabe * support implementations where PMUv3 exists on a subset of CPUs.
794ec3eb9edSReiji Watanabe */
795ec3eb9edSReiji Watanabe if (!pmuv3_implemented(kvm_arm_pmu_get_pmuver_limit()))
796db858060SAlexandru Elisei return;
797db858060SAlexandru Elisei
7983d6d9172SOliver Upton guard(mutex)(&arm_pmus_lock);
799db858060SAlexandru Elisei
800db858060SAlexandru Elisei entry = kmalloc(sizeof(*entry), GFP_KERNEL);
801db858060SAlexandru Elisei if (!entry)
8023d6d9172SOliver Upton return;
803db858060SAlexandru Elisei
804db858060SAlexandru Elisei entry->arm_pmu = pmu;
805db858060SAlexandru Elisei list_add_tail(&entry->entry, &arm_pmus);
806e840f42aSMarc Zyngier }
807e840f42aSMarc Zyngier
kvm_pmu_probe_armpmu(void)80846b18782SMarc Zyngier static struct arm_pmu *kvm_pmu_probe_armpmu(void)
809fd65a3b5SMarc Zyngier {
8101c913a1cSOliver Upton struct arm_pmu_entry *entry;
8113d6d9172SOliver Upton struct arm_pmu *pmu;
8121c913a1cSOliver Upton int cpu;
813fd65a3b5SMarc Zyngier
8143d6d9172SOliver Upton guard(mutex)(&arm_pmus_lock);
815fd65a3b5SMarc Zyngier
81630c60ddaSOliver Upton /*
81730c60ddaSOliver Upton * It is safe to use a stale cpu to iterate the list of PMUs so long as
81830c60ddaSOliver Upton * the same value is used for the entirety of the loop. Given this, and
81930c60ddaSOliver Upton * the fact that no percpu data is used for the lookup there is no need
82030c60ddaSOliver Upton * to disable preemption.
82130c60ddaSOliver Upton *
82230c60ddaSOliver Upton * It is still necessary to get a valid cpu, though, to probe for the
82330c60ddaSOliver Upton * default PMU instance as userspace is not required to specify a PMU
82430c60ddaSOliver Upton * type. In order to uphold the preexisting behavior KVM selects the
82542773357SReiji Watanabe * PMU instance for the core during vcpu init. A dependent use
82642773357SReiji Watanabe * case would be a user with disdain of all things big.LITTLE that
82742773357SReiji Watanabe * affines the VMM to a particular cluster of cores.
82830c60ddaSOliver Upton *
82930c60ddaSOliver Upton * In any case, userspace should just do the sane thing and use the UAPI
83030c60ddaSOliver Upton * to select a PMU type directly. But, be wary of the baggage being
83130c60ddaSOliver Upton * carried here.
83230c60ddaSOliver Upton */
83330c60ddaSOliver Upton cpu = raw_smp_processor_id();
8341c913a1cSOliver Upton list_for_each_entry(entry, &arm_pmus, entry) {
8353d6d9172SOliver Upton pmu = entry->arm_pmu;
836fd65a3b5SMarc Zyngier
8373d6d9172SOliver Upton if (cpumask_test_cpu(cpu, &pmu->supported_cpus))
83846b18782SMarc Zyngier return pmu;
839fd65a3b5SMarc Zyngier }
840fd65a3b5SMarc Zyngier
8413d6d9172SOliver Upton return NULL;
8423d6d9172SOliver Upton }
8433d6d9172SOliver Upton
__compute_pmceid(struct arm_pmu * pmu,bool pmceid1)84493b01528SOliver Upton static u64 __compute_pmceid(struct arm_pmu *pmu, bool pmceid1)
84593b01528SOliver Upton {
84693b01528SOliver Upton u32 hi[2], lo[2];
84793b01528SOliver Upton
84893b01528SOliver Upton bitmap_to_arr32(lo, pmu->pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
84993b01528SOliver Upton bitmap_to_arr32(hi, pmu->pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
85093b01528SOliver Upton
85193b01528SOliver Upton return ((u64)hi[pmceid1] << 32) | lo[pmceid1];
85293b01528SOliver Upton }
85393b01528SOliver Upton
compute_pmceid0(struct arm_pmu * pmu)85493b01528SOliver Upton static u64 compute_pmceid0(struct arm_pmu *pmu)
85593b01528SOliver Upton {
85693b01528SOliver Upton u64 val = __compute_pmceid(pmu, 0);
85793b01528SOliver Upton
858ed335722SOliver Upton /* always support SW_INCR */
859ed335722SOliver Upton val |= BIT(ARMV8_PMUV3_PERFCTR_SW_INCR);
86093b01528SOliver Upton /* always support CHAIN */
86193b01528SOliver Upton val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN);
86293b01528SOliver Upton return val;
86393b01528SOliver Upton }
86493b01528SOliver Upton
compute_pmceid1(struct arm_pmu * pmu)86593b01528SOliver Upton static u64 compute_pmceid1(struct arm_pmu *pmu)
86693b01528SOliver Upton {
86793b01528SOliver Upton u64 val = __compute_pmceid(pmu, 1);
86893b01528SOliver Upton
86993b01528SOliver Upton /*
87093b01528SOliver Upton * Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled
87193b01528SOliver Upton * as RAZ
87293b01528SOliver Upton */
87393b01528SOliver Upton val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) |
87493b01528SOliver Upton BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) |
87593b01528SOliver Upton BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32));
87693b01528SOliver Upton return val;
87793b01528SOliver Upton }
87893b01528SOliver Upton
kvm_pmu_get_pmceid(struct kvm_vcpu * vcpu,bool pmceid1)87988865becSMarc Zyngier u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
88088865becSMarc Zyngier {
88193b01528SOliver Upton struct arm_pmu *cpu_pmu = vcpu->kvm->arch.arm_pmu;
88288865becSMarc Zyngier unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
88388865becSMarc Zyngier u64 val, mask = 0;
8849529aaa0SMarc Zyngier int base, i, nr_events;
88588865becSMarc Zyngier
88688865becSMarc Zyngier if (!pmceid1) {
88793b01528SOliver Upton val = compute_pmceid0(cpu_pmu);
88888865becSMarc Zyngier base = 0;
88988865becSMarc Zyngier } else {
89093b01528SOliver Upton val = compute_pmceid1(cpu_pmu);
89188865becSMarc Zyngier base = 32;
89288865becSMarc Zyngier }
89388865becSMarc Zyngier
89488865becSMarc Zyngier if (!bmap)
89588865becSMarc Zyngier return val;
89688865becSMarc Zyngier
8979529aaa0SMarc Zyngier nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
8989529aaa0SMarc Zyngier
89988865becSMarc Zyngier for (i = 0; i < 32; i += 8) {
90088865becSMarc Zyngier u64 byte;
90188865becSMarc Zyngier
90288865becSMarc Zyngier byte = bitmap_get_value8(bmap, base + i);
90388865becSMarc Zyngier mask |= byte << i;
9049529aaa0SMarc Zyngier if (nr_events >= (0x4000 + base + 32)) {
90588865becSMarc Zyngier byte = bitmap_get_value8(bmap, 0x4000 + base + i);
90688865becSMarc Zyngier mask |= byte << (32 + i);
90788865becSMarc Zyngier }
9089529aaa0SMarc Zyngier }
90988865becSMarc Zyngier
91088865becSMarc Zyngier return val & mask;
91188865becSMarc Zyngier }
91288865becSMarc Zyngier
kvm_vcpu_reload_pmu(struct kvm_vcpu * vcpu)91327131b19SRaghavendra Rao Ananta void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
91427131b19SRaghavendra Rao Ananta {
915a3034dabSOliver Upton u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
91627131b19SRaghavendra Rao Ananta
91727131b19SRaghavendra Rao Ananta __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, mask);
91827131b19SRaghavendra Rao Ananta __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, mask);
91927131b19SRaghavendra Rao Ananta __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, mask);
920adf8623bSOliver Upton
921adf8623bSOliver Upton kvm_pmu_reprogram_counter_mask(vcpu, mask);
92227131b19SRaghavendra Rao Ananta }
92327131b19SRaghavendra Rao Ananta
kvm_arm_pmu_v3_enable(struct kvm_vcpu * vcpu)924a2befacfSChristoffer Dall int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
925a2befacfSChristoffer Dall {
9269bbfa4b5SAlexandru Elisei if (!vcpu->arch.pmu.created)
9279bbfa4b5SAlexandru Elisei return -EINVAL;
9289bbfa4b5SAlexandru Elisei
929a2befacfSChristoffer Dall /*
930a2befacfSChristoffer Dall * A valid interrupt configuration for the PMU is either to have a
931a2befacfSChristoffer Dall * properly configured interrupt number and using an in-kernel
932ebb127f2SChristoffer Dall * irqchip, or to not have an in-kernel GIC and not set an IRQ.
933a2befacfSChristoffer Dall */
934ebb127f2SChristoffer Dall if (irqchip_in_kernel(vcpu->kvm)) {
935ebb127f2SChristoffer Dall int irq = vcpu->arch.pmu.irq_num;
936ebb127f2SChristoffer Dall /*
937ebb127f2SChristoffer Dall * If we are using an in-kernel vgic, at this point we know
938ebb127f2SChristoffer Dall * the vgic will be initialized, so we can check the PMU irq
939ebb127f2SChristoffer Dall * number against the dimensions of the vgic and make sure
940ebb127f2SChristoffer Dall * it's valid.
941ebb127f2SChristoffer Dall */
942ebb127f2SChristoffer Dall if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
943ebb127f2SChristoffer Dall return -EINVAL;
944ebb127f2SChristoffer Dall } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
945ebb127f2SChristoffer Dall return -EINVAL;
946ebb127f2SChristoffer Dall }
947ebb127f2SChristoffer Dall
948a2befacfSChristoffer Dall return 0;
949a2befacfSChristoffer Dall }
950a2befacfSChristoffer Dall
kvm_arm_pmu_v3_init(struct kvm_vcpu * vcpu)951bb0c70bcSShannon Zhao static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
952bb0c70bcSShannon Zhao {
953a2befacfSChristoffer Dall if (irqchip_in_kernel(vcpu->kvm)) {
954abcb851dSChristoffer Dall int ret;
955abcb851dSChristoffer Dall
956a2befacfSChristoffer Dall /*
957a2befacfSChristoffer Dall * If using the PMU with an in-kernel virtual GIC
958a2befacfSChristoffer Dall * implementation, we require the GIC to be already
959a2befacfSChristoffer Dall * initialized when initializing the PMU.
960a2befacfSChristoffer Dall */
961a2befacfSChristoffer Dall if (!vgic_initialized(vcpu->kvm))
962a2befacfSChristoffer Dall return -ENODEV;
963bb0c70bcSShannon Zhao
964a2befacfSChristoffer Dall if (!kvm_arm_pmu_irq_initialized(vcpu))
965a2befacfSChristoffer Dall return -ENXIO;
966abcb851dSChristoffer Dall
967abcb851dSChristoffer Dall ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
968abcb851dSChristoffer Dall &vcpu->arch.pmu);
969abcb851dSChristoffer Dall if (ret)
970abcb851dSChristoffer Dall return ret;
971a2befacfSChristoffer Dall }
972a2befacfSChristoffer Dall
97395e92e45SJulien Thierry init_irq_work(&vcpu->arch.pmu.overflow_work,
97495e92e45SJulien Thierry kvm_pmu_perf_overflow_notify_vcpu);
97595e92e45SJulien Thierry
976a2befacfSChristoffer Dall vcpu->arch.pmu.created = true;
977bb0c70bcSShannon Zhao return 0;
978bb0c70bcSShannon Zhao }
979bb0c70bcSShannon Zhao
9802defaff4SAndre Przywara /*
9812defaff4SAndre Przywara * For one VM the interrupt type must be same for each vcpu.
9822defaff4SAndre Przywara * As a PPI, the interrupt number is the same for all vcpus,
9832defaff4SAndre Przywara * while as an SPI it must be a separate number per vcpu.
9842defaff4SAndre Przywara */
pmu_irq_is_valid(struct kvm * kvm,int irq)9852defaff4SAndre Przywara static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
986bb0c70bcSShannon Zhao {
98746808a4cSMarc Zyngier unsigned long i;
988bb0c70bcSShannon Zhao struct kvm_vcpu *vcpu;
989bb0c70bcSShannon Zhao
990bb0c70bcSShannon Zhao kvm_for_each_vcpu(i, vcpu, kvm) {
991bb0c70bcSShannon Zhao if (!kvm_arm_pmu_irq_initialized(vcpu))
992bb0c70bcSShannon Zhao continue;
993bb0c70bcSShannon Zhao
9942defaff4SAndre Przywara if (irq_is_ppi(irq)) {
995bb0c70bcSShannon Zhao if (vcpu->arch.pmu.irq_num != irq)
996bb0c70bcSShannon Zhao return false;
997bb0c70bcSShannon Zhao } else {
998bb0c70bcSShannon Zhao if (vcpu->arch.pmu.irq_num == irq)
999bb0c70bcSShannon Zhao return false;
1000bb0c70bcSShannon Zhao }
1001bb0c70bcSShannon Zhao }
1002bb0c70bcSShannon Zhao
1003bb0c70bcSShannon Zhao return true;
1004bb0c70bcSShannon Zhao }
1005bb0c70bcSShannon Zhao
10064d20debfSRaghavendra Rao Ananta /**
10074d20debfSRaghavendra Rao Ananta * kvm_arm_pmu_get_max_counters - Return the max number of PMU counters.
10084d20debfSRaghavendra Rao Ananta * @kvm: The kvm pointer
10094d20debfSRaghavendra Rao Ananta */
kvm_arm_pmu_get_max_counters(struct kvm * kvm)10104d20debfSRaghavendra Rao Ananta u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
10114d20debfSRaghavendra Rao Ananta {
10124d20debfSRaghavendra Rao Ananta struct arm_pmu *arm_pmu = kvm->arch.arm_pmu;
10134d20debfSRaghavendra Rao Ananta
10144d20debfSRaghavendra Rao Ananta /*
10151b92e65fSOliver Upton * PMUv3 requires that all event counters are capable of counting any
10161b92e65fSOliver Upton * event, though the same may not be true of non-PMUv3 hardware.
10171b92e65fSOliver Upton */
10181b92e65fSOliver Upton if (cpus_have_final_cap(ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS))
10191b92e65fSOliver Upton return 1;
10201b92e65fSOliver Upton
10211b92e65fSOliver Upton /*
1022bf5ffc8cSRob Herring (Arm) * The arm_pmu->cntr_mask considers the fixed counter(s) as well.
1023bf5ffc8cSRob Herring (Arm) * Ignore those and return only the general-purpose counters.
10244d20debfSRaghavendra Rao Ananta */
1025bf5ffc8cSRob Herring (Arm) return bitmap_weight(arm_pmu->cntr_mask, ARMV8_PMU_MAX_GENERAL_COUNTERS);
10264d20debfSRaghavendra Rao Ananta }
10274d20debfSRaghavendra Rao Ananta
kvm_arm_set_nr_counters(struct kvm * kvm,unsigned int nr)1028c8823e51SMarc Zyngier static void kvm_arm_set_nr_counters(struct kvm *kvm, unsigned int nr)
1029c8823e51SMarc Zyngier {
1030c8823e51SMarc Zyngier kvm->arch.nr_pmu_counters = nr;
1031c8823e51SMarc Zyngier
1032c8823e51SMarc Zyngier /* Reset MDCR_EL2.HPMN behind the vcpus' back... */
1033c8823e51SMarc Zyngier if (test_bit(KVM_ARM_VCPU_HAS_EL2, kvm->arch.vcpu_features)) {
1034c8823e51SMarc Zyngier struct kvm_vcpu *vcpu;
1035c8823e51SMarc Zyngier unsigned long i;
1036c8823e51SMarc Zyngier
1037c8823e51SMarc Zyngier kvm_for_each_vcpu(i, vcpu, kvm) {
1038c8823e51SMarc Zyngier u64 val = __vcpu_sys_reg(vcpu, MDCR_EL2);
1039c8823e51SMarc Zyngier val &= ~MDCR_EL2_HPMN;
1040c8823e51SMarc Zyngier val |= FIELD_PREP(MDCR_EL2_HPMN, kvm->arch.nr_pmu_counters);
1041c8823e51SMarc Zyngier __vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
1042c8823e51SMarc Zyngier }
1043c8823e51SMarc Zyngier }
1044c8823e51SMarc Zyngier }
1045c8823e51SMarc Zyngier
kvm_arm_set_pmu(struct kvm * kvm,struct arm_pmu * arm_pmu)10461616ca6fSReiji Watanabe static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu)
10471616ca6fSReiji Watanabe {
10481616ca6fSReiji Watanabe lockdep_assert_held(&kvm->arch.config_lock);
10491616ca6fSReiji Watanabe
10501616ca6fSReiji Watanabe kvm->arch.arm_pmu = arm_pmu;
1051c8823e51SMarc Zyngier kvm_arm_set_nr_counters(kvm, kvm_arm_pmu_get_max_counters(kvm));
10521616ca6fSReiji Watanabe }
10531616ca6fSReiji Watanabe
10541616ca6fSReiji Watanabe /**
10551616ca6fSReiji Watanabe * kvm_arm_set_default_pmu - No PMU set, get the default one.
10561616ca6fSReiji Watanabe * @kvm: The kvm pointer
10571616ca6fSReiji Watanabe *
10581616ca6fSReiji Watanabe * The observant among you will notice that the supported_cpus
10591616ca6fSReiji Watanabe * mask does not get updated for the default PMU even though it
10601616ca6fSReiji Watanabe * is quite possible the selected instance supports only a
10611616ca6fSReiji Watanabe * subset of cores in the system. This is intentional, and
10621616ca6fSReiji Watanabe * upholds the preexisting behavior on heterogeneous systems
10631616ca6fSReiji Watanabe * where vCPUs can be scheduled on any core but the guest
10641616ca6fSReiji Watanabe * counters could stop working.
10651616ca6fSReiji Watanabe */
kvm_arm_set_default_pmu(struct kvm * kvm)106642773357SReiji Watanabe int kvm_arm_set_default_pmu(struct kvm *kvm)
10671616ca6fSReiji Watanabe {
10681616ca6fSReiji Watanabe struct arm_pmu *arm_pmu = kvm_pmu_probe_armpmu();
10691616ca6fSReiji Watanabe
10701616ca6fSReiji Watanabe if (!arm_pmu)
10711616ca6fSReiji Watanabe return -ENODEV;
10721616ca6fSReiji Watanabe
10731616ca6fSReiji Watanabe kvm_arm_set_pmu(kvm, arm_pmu);
10741616ca6fSReiji Watanabe return 0;
10751616ca6fSReiji Watanabe }
10761616ca6fSReiji Watanabe
kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu * vcpu,int pmu_id)10776ee7fca2SAlexandru Elisei static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
10786ee7fca2SAlexandru Elisei {
10796ee7fca2SAlexandru Elisei struct kvm *kvm = vcpu->kvm;
10806ee7fca2SAlexandru Elisei struct arm_pmu_entry *entry;
10816ee7fca2SAlexandru Elisei struct arm_pmu *arm_pmu;
10826ee7fca2SAlexandru Elisei int ret = -ENXIO;
10836ee7fca2SAlexandru Elisei
10844bba7f7dSOliver Upton lockdep_assert_held(&kvm->arch.config_lock);
10856ee7fca2SAlexandru Elisei mutex_lock(&arm_pmus_lock);
10866ee7fca2SAlexandru Elisei
10876ee7fca2SAlexandru Elisei list_for_each_entry(entry, &arm_pmus, entry) {
10886ee7fca2SAlexandru Elisei arm_pmu = entry->arm_pmu;
10896ee7fca2SAlexandru Elisei if (arm_pmu->pmu.type == pmu_id) {
1090de40bb8aSOliver Upton if (kvm_vm_has_ran_once(kvm) ||
10916ee7fca2SAlexandru Elisei (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) {
10926ee7fca2SAlexandru Elisei ret = -EBUSY;
10936ee7fca2SAlexandru Elisei break;
10946ee7fca2SAlexandru Elisei }
10956ee7fca2SAlexandru Elisei
10961616ca6fSReiji Watanabe kvm_arm_set_pmu(kvm, arm_pmu);
1097583cda1bSAlexandru Elisei cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus);
10986ee7fca2SAlexandru Elisei ret = 0;
10996ee7fca2SAlexandru Elisei break;
11006ee7fca2SAlexandru Elisei }
11016ee7fca2SAlexandru Elisei }
11026ee7fca2SAlexandru Elisei
11036ee7fca2SAlexandru Elisei mutex_unlock(&arm_pmus_lock);
11046ee7fca2SAlexandru Elisei return ret;
11056ee7fca2SAlexandru Elisei }
11066ee7fca2SAlexandru Elisei
kvm_arm_pmu_v3_set_nr_counters(struct kvm_vcpu * vcpu,unsigned int n)1107b7628c79SMarc Zyngier static int kvm_arm_pmu_v3_set_nr_counters(struct kvm_vcpu *vcpu, unsigned int n)
1108b7628c79SMarc Zyngier {
1109b7628c79SMarc Zyngier struct kvm *kvm = vcpu->kvm;
1110b7628c79SMarc Zyngier
1111b7628c79SMarc Zyngier if (!kvm->arch.arm_pmu)
1112b7628c79SMarc Zyngier return -EINVAL;
1113b7628c79SMarc Zyngier
1114b7628c79SMarc Zyngier if (n > kvm_arm_pmu_get_max_counters(kvm))
1115b7628c79SMarc Zyngier return -EINVAL;
1116b7628c79SMarc Zyngier
1117b7628c79SMarc Zyngier kvm_arm_set_nr_counters(kvm, n);
1118b7628c79SMarc Zyngier return 0;
1119b7628c79SMarc Zyngier }
1120b7628c79SMarc Zyngier
kvm_arm_pmu_v3_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1121bb0c70bcSShannon Zhao int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1122bb0c70bcSShannon Zhao {
11235177fe91SMarc Zyngier struct kvm *kvm = vcpu->kvm;
11245177fe91SMarc Zyngier
11254bba7f7dSOliver Upton lockdep_assert_held(&kvm->arch.config_lock);
11264bba7f7dSOliver Upton
112777da4303SMarc Zyngier if (!kvm_vcpu_has_pmu(vcpu))
112842223fb1SMarc Zyngier return -ENODEV;
112942223fb1SMarc Zyngier
113042223fb1SMarc Zyngier if (vcpu->arch.pmu.created)
113142223fb1SMarc Zyngier return -EBUSY;
113242223fb1SMarc Zyngier
1133bb0c70bcSShannon Zhao switch (attr->attr) {
1134bb0c70bcSShannon Zhao case KVM_ARM_VCPU_PMU_V3_IRQ: {
1135bb0c70bcSShannon Zhao int __user *uaddr = (int __user *)(long)attr->addr;
1136bb0c70bcSShannon Zhao int irq;
1137bb0c70bcSShannon Zhao
11385177fe91SMarc Zyngier if (!irqchip_in_kernel(kvm))
1139a2befacfSChristoffer Dall return -EINVAL;
1140a2befacfSChristoffer Dall
1141bb0c70bcSShannon Zhao if (get_user(irq, uaddr))
1142bb0c70bcSShannon Zhao return -EFAULT;
1143bb0c70bcSShannon Zhao
11442defaff4SAndre Przywara /* The PMU overflow interrupt can be a PPI or a valid SPI. */
1145ebb127f2SChristoffer Dall if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
11462defaff4SAndre Przywara return -EINVAL;
11472defaff4SAndre Przywara
11485177fe91SMarc Zyngier if (!pmu_irq_is_valid(kvm, irq))
1149bb0c70bcSShannon Zhao return -EINVAL;
1150bb0c70bcSShannon Zhao
1151bb0c70bcSShannon Zhao if (kvm_arm_pmu_irq_initialized(vcpu))
1152bb0c70bcSShannon Zhao return -EBUSY;
1153bb0c70bcSShannon Zhao
1154bb0c70bcSShannon Zhao kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
1155bb0c70bcSShannon Zhao vcpu->arch.pmu.irq_num = irq;
1156bb0c70bcSShannon Zhao return 0;
1157bb0c70bcSShannon Zhao }
1158d7eec236SMarc Zyngier case KVM_ARM_VCPU_PMU_V3_FILTER: {
1159335ca49fSReiji Watanabe u8 pmuver = kvm_arm_pmu_get_pmuver_limit();
1160d7eec236SMarc Zyngier struct kvm_pmu_event_filter __user *uaddr;
1161d7eec236SMarc Zyngier struct kvm_pmu_event_filter filter;
1162d7eec236SMarc Zyngier int nr_events;
1163d7eec236SMarc Zyngier
1164335ca49fSReiji Watanabe /*
1165335ca49fSReiji Watanabe * Allow userspace to specify an event filter for the entire
1166335ca49fSReiji Watanabe * event range supported by PMUVer of the hardware, rather
1167335ca49fSReiji Watanabe * than the guest's PMUVer for KVM backward compatibility.
1168335ca49fSReiji Watanabe */
1169335ca49fSReiji Watanabe nr_events = __kvm_pmu_event_mask(pmuver) + 1;
1170d7eec236SMarc Zyngier
1171d7eec236SMarc Zyngier uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
1172d7eec236SMarc Zyngier
1173d7eec236SMarc Zyngier if (copy_from_user(&filter, uaddr, sizeof(filter)))
1174d7eec236SMarc Zyngier return -EFAULT;
1175d7eec236SMarc Zyngier
1176d7eec236SMarc Zyngier if (((u32)filter.base_event + filter.nevents) > nr_events ||
1177d7eec236SMarc Zyngier (filter.action != KVM_PMU_EVENT_ALLOW &&
1178d7eec236SMarc Zyngier filter.action != KVM_PMU_EVENT_DENY))
1179d7eec236SMarc Zyngier return -EINVAL;
1180d7eec236SMarc Zyngier
11816dcf7316SMarc Zyngier if (kvm_vm_has_ran_once(kvm))
11825177fe91SMarc Zyngier return -EBUSY;
11835177fe91SMarc Zyngier
11845177fe91SMarc Zyngier if (!kvm->arch.pmu_filter) {
11855177fe91SMarc Zyngier kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
11864bba7f7dSOliver Upton if (!kvm->arch.pmu_filter)
1187d7eec236SMarc Zyngier return -ENOMEM;
1188d7eec236SMarc Zyngier
1189d7eec236SMarc Zyngier /*
1190d7eec236SMarc Zyngier * The default depends on the first applied filter.
1191d7eec236SMarc Zyngier * If it allows events, the default is to deny.
1192d7eec236SMarc Zyngier * Conversely, if the first filter denies a set of
1193d7eec236SMarc Zyngier * events, the default is to allow.
1194d7eec236SMarc Zyngier */
1195d7eec236SMarc Zyngier if (filter.action == KVM_PMU_EVENT_ALLOW)
11965177fe91SMarc Zyngier bitmap_zero(kvm->arch.pmu_filter, nr_events);
1197d7eec236SMarc Zyngier else
11985177fe91SMarc Zyngier bitmap_fill(kvm->arch.pmu_filter, nr_events);
1199d7eec236SMarc Zyngier }
1200d7eec236SMarc Zyngier
1201d7eec236SMarc Zyngier if (filter.action == KVM_PMU_EVENT_ALLOW)
12025177fe91SMarc Zyngier bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1203d7eec236SMarc Zyngier else
12045177fe91SMarc Zyngier bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1205d7eec236SMarc Zyngier
1206d7eec236SMarc Zyngier return 0;
1207d7eec236SMarc Zyngier }
12086ee7fca2SAlexandru Elisei case KVM_ARM_VCPU_PMU_V3_SET_PMU: {
12096ee7fca2SAlexandru Elisei int __user *uaddr = (int __user *)(long)attr->addr;
12106ee7fca2SAlexandru Elisei int pmu_id;
12116ee7fca2SAlexandru Elisei
12126ee7fca2SAlexandru Elisei if (get_user(pmu_id, uaddr))
12136ee7fca2SAlexandru Elisei return -EFAULT;
12146ee7fca2SAlexandru Elisei
12156ee7fca2SAlexandru Elisei return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id);
12166ee7fca2SAlexandru Elisei }
1217b7628c79SMarc Zyngier case KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS: {
1218b7628c79SMarc Zyngier unsigned int __user *uaddr = (unsigned int __user *)(long)attr->addr;
1219b7628c79SMarc Zyngier unsigned int n;
1220b7628c79SMarc Zyngier
1221b7628c79SMarc Zyngier if (get_user(n, uaddr))
1222b7628c79SMarc Zyngier return -EFAULT;
1223b7628c79SMarc Zyngier
1224b7628c79SMarc Zyngier return kvm_arm_pmu_v3_set_nr_counters(vcpu, n);
1225b7628c79SMarc Zyngier }
1226bb0c70bcSShannon Zhao case KVM_ARM_VCPU_PMU_V3_INIT:
1227bb0c70bcSShannon Zhao return kvm_arm_pmu_v3_init(vcpu);
1228bb0c70bcSShannon Zhao }
1229bb0c70bcSShannon Zhao
1230bb0c70bcSShannon Zhao return -ENXIO;
1231bb0c70bcSShannon Zhao }
1232bb0c70bcSShannon Zhao
kvm_arm_pmu_v3_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1233bb0c70bcSShannon Zhao int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1234bb0c70bcSShannon Zhao {
1235bb0c70bcSShannon Zhao switch (attr->attr) {
1236bb0c70bcSShannon Zhao case KVM_ARM_VCPU_PMU_V3_IRQ: {
1237bb0c70bcSShannon Zhao int __user *uaddr = (int __user *)(long)attr->addr;
1238bb0c70bcSShannon Zhao int irq;
1239bb0c70bcSShannon Zhao
1240a2befacfSChristoffer Dall if (!irqchip_in_kernel(vcpu->kvm))
1241a2befacfSChristoffer Dall return -EINVAL;
1242a2befacfSChristoffer Dall
124314bda7a9SMarc Zyngier if (!kvm_vcpu_has_pmu(vcpu))
1244bb0c70bcSShannon Zhao return -ENODEV;
1245bb0c70bcSShannon Zhao
1246bb0c70bcSShannon Zhao if (!kvm_arm_pmu_irq_initialized(vcpu))
1247bb0c70bcSShannon Zhao return -ENXIO;
1248bb0c70bcSShannon Zhao
1249bb0c70bcSShannon Zhao irq = vcpu->arch.pmu.irq_num;
1250bb0c70bcSShannon Zhao return put_user(irq, uaddr);
1251bb0c70bcSShannon Zhao }
1252bb0c70bcSShannon Zhao }
1253bb0c70bcSShannon Zhao
1254bb0c70bcSShannon Zhao return -ENXIO;
1255bb0c70bcSShannon Zhao }
1256bb0c70bcSShannon Zhao
kvm_arm_pmu_v3_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1257bb0c70bcSShannon Zhao int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1258bb0c70bcSShannon Zhao {
1259bb0c70bcSShannon Zhao switch (attr->attr) {
1260bb0c70bcSShannon Zhao case KVM_ARM_VCPU_PMU_V3_IRQ:
1261bb0c70bcSShannon Zhao case KVM_ARM_VCPU_PMU_V3_INIT:
1262d7eec236SMarc Zyngier case KVM_ARM_VCPU_PMU_V3_FILTER:
12636ee7fca2SAlexandru Elisei case KVM_ARM_VCPU_PMU_V3_SET_PMU:
1264b7628c79SMarc Zyngier case KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS:
126577da4303SMarc Zyngier if (kvm_vcpu_has_pmu(vcpu))
1266bb0c70bcSShannon Zhao return 0;
1267bb0c70bcSShannon Zhao }
1268bb0c70bcSShannon Zhao
1269bb0c70bcSShannon Zhao return -ENXIO;
1270bb0c70bcSShannon Zhao }
12713d0dba57SMarc Zyngier
kvm_arm_pmu_get_pmuver_limit(void)12723d0dba57SMarc Zyngier u8 kvm_arm_pmu_get_pmuver_limit(void)
12733d0dba57SMarc Zyngier {
127456290316SOliver Upton unsigned int pmuver;
12753d0dba57SMarc Zyngier
127656290316SOliver Upton pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer,
127756290316SOliver Upton read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1));
127856290316SOliver Upton
1279bed9b8ecSOliver Upton /*
1280bed9b8ecSOliver Upton * Spoof a barebones PMUv3 implementation if the system supports IMPDEF
1281bed9b8ecSOliver Upton * traps of the PMUv3 sysregs
1282bed9b8ecSOliver Upton */
1283bed9b8ecSOliver Upton if (cpus_have_final_cap(ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS))
1284bed9b8ecSOliver Upton return ID_AA64DFR0_EL1_PMUVer_IMP;
1285bed9b8ecSOliver Upton
1286bed9b8ecSOliver Upton /*
1287bed9b8ecSOliver Upton * Otherwise, treat IMPLEMENTATION DEFINED functionality as
1288bed9b8ecSOliver Upton * unimplemented
1289bed9b8ecSOliver Upton */
129056290316SOliver Upton if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
129156290316SOliver Upton return 0;
129256290316SOliver Upton
129356290316SOliver Upton return min(pmuver, ID_AA64DFR0_EL1_PMUVer_V3P5);
12943d0dba57SMarc Zyngier }
129557fc267fSReiji Watanabe
129657fc267fSReiji Watanabe /**
129757fc267fSReiji Watanabe * kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU
129857fc267fSReiji Watanabe * @vcpu: The vcpu pointer
129957fc267fSReiji Watanabe */
kvm_vcpu_read_pmcr(struct kvm_vcpu * vcpu)130057fc267fSReiji Watanabe u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
130157fc267fSReiji Watanabe {
130262e1f212SJames Clark u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
1303*600f6fa5SMarc Zyngier u64 n = vcpu->kvm->arch.nr_pmu_counters;
13044d20debfSRaghavendra Rao Ananta
1305*600f6fa5SMarc Zyngier if (vcpu_has_nv(vcpu) && !vcpu_is_el2(vcpu))
1306*600f6fa5SMarc Zyngier n = FIELD_GET(MDCR_EL2_HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
1307*600f6fa5SMarc Zyngier
1308*600f6fa5SMarc Zyngier return u64_replace_bits(pmcr, n, ARMV8_PMU_PMCR_N);
130957fc267fSReiji Watanabe }
1310ae323e03SOliver Upton
kvm_pmu_nested_transition(struct kvm_vcpu * vcpu)1311ae323e03SOliver Upton void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu)
1312ae323e03SOliver Upton {
1313ae323e03SOliver Upton bool reprogrammed = false;
1314ae323e03SOliver Upton unsigned long mask;
1315ae323e03SOliver Upton int i;
1316ae323e03SOliver Upton
1317ae323e03SOliver Upton mask = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
1318ae323e03SOliver Upton for_each_set_bit(i, &mask, 32) {
1319ae323e03SOliver Upton struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
1320ae323e03SOliver Upton
1321ae323e03SOliver Upton /*
1322ae323e03SOliver Upton * We only need to reconfigure events where the filter is
1323ae323e03SOliver Upton * different at EL1 vs. EL2, as we're multiplexing the true EL1
1324ae323e03SOliver Upton * event filter bit for nested.
1325ae323e03SOliver Upton */
1326ae323e03SOliver Upton if (kvm_pmc_counts_at_el1(pmc) == kvm_pmc_counts_at_el2(pmc))
1327ae323e03SOliver Upton continue;
1328ae323e03SOliver Upton
1329ae323e03SOliver Upton kvm_pmu_create_perf_event(pmc);
1330ae323e03SOliver Upton reprogrammed = true;
1331ae323e03SOliver Upton }
1332ae323e03SOliver Upton
1333ae323e03SOliver Upton if (reprogrammed)
1334ae323e03SOliver Upton kvm_vcpu_pmu_restore_guest(vcpu);
1335ae323e03SOliver Upton }
1336