xref: /linux/arch/powerpc/kvm/book3s_hv_p9_perf.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1*db536084SKajol Jain // SPDX-License-Identifier: GPL-2.0-only
2*db536084SKajol Jain 
3*db536084SKajol Jain #include <asm/kvm_ppc.h>
4*db536084SKajol Jain #include <asm/pmc.h>
5*db536084SKajol Jain 
6*db536084SKajol Jain #include "book3s_hv.h"
7*db536084SKajol Jain 
freeze_pmu(unsigned long mmcr0,unsigned long mmcra)8*db536084SKajol Jain static void freeze_pmu(unsigned long mmcr0, unsigned long mmcra)
9*db536084SKajol Jain {
10*db536084SKajol Jain 	if (!(mmcr0 & MMCR0_FC))
11*db536084SKajol Jain 		goto do_freeze;
12*db536084SKajol Jain 	if (mmcra & MMCRA_SAMPLE_ENABLE)
13*db536084SKajol Jain 		goto do_freeze;
14*db536084SKajol Jain 	if (cpu_has_feature(CPU_FTR_ARCH_31)) {
15*db536084SKajol Jain 		if (!(mmcr0 & MMCR0_PMCCEXT))
16*db536084SKajol Jain 			goto do_freeze;
17*db536084SKajol Jain 		if (!(mmcra & MMCRA_BHRB_DISABLE))
18*db536084SKajol Jain 			goto do_freeze;
19*db536084SKajol Jain 	}
20*db536084SKajol Jain 	return;
21*db536084SKajol Jain 
22*db536084SKajol Jain do_freeze:
23*db536084SKajol Jain 	mmcr0 = MMCR0_FC;
24*db536084SKajol Jain 	mmcra = 0;
25*db536084SKajol Jain 	if (cpu_has_feature(CPU_FTR_ARCH_31)) {
26*db536084SKajol Jain 		mmcr0 |= MMCR0_PMCCEXT;
27*db536084SKajol Jain 		mmcra = MMCRA_BHRB_DISABLE;
28*db536084SKajol Jain 	}
29*db536084SKajol Jain 
30*db536084SKajol Jain 	mtspr(SPRN_MMCR0, mmcr0);
31*db536084SKajol Jain 	mtspr(SPRN_MMCRA, mmcra);
32*db536084SKajol Jain 	isync();
33*db536084SKajol Jain }
34*db536084SKajol Jain 
switch_pmu_to_guest(struct kvm_vcpu * vcpu,struct p9_host_os_sprs * host_os_sprs)35*db536084SKajol Jain void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
36*db536084SKajol Jain 			 struct p9_host_os_sprs *host_os_sprs)
37*db536084SKajol Jain {
38*db536084SKajol Jain 	struct lppaca *lp;
39*db536084SKajol Jain 	int load_pmu = 1;
40*db536084SKajol Jain 
41*db536084SKajol Jain 	lp = vcpu->arch.vpa.pinned_addr;
42*db536084SKajol Jain 	if (lp)
43*db536084SKajol Jain 		load_pmu = lp->pmcregs_in_use;
44*db536084SKajol Jain 
45*db536084SKajol Jain 	/* Save host */
46*db536084SKajol Jain 	if (ppc_get_pmu_inuse()) {
47*db536084SKajol Jain 		/* POWER9, POWER10 do not implement HPMC or SPMC */
48*db536084SKajol Jain 
49*db536084SKajol Jain 		host_os_sprs->mmcr0 = mfspr(SPRN_MMCR0);
50*db536084SKajol Jain 		host_os_sprs->mmcra = mfspr(SPRN_MMCRA);
51*db536084SKajol Jain 
52*db536084SKajol Jain 		freeze_pmu(host_os_sprs->mmcr0, host_os_sprs->mmcra);
53*db536084SKajol Jain 
54*db536084SKajol Jain 		host_os_sprs->pmc1 = mfspr(SPRN_PMC1);
55*db536084SKajol Jain 		host_os_sprs->pmc2 = mfspr(SPRN_PMC2);
56*db536084SKajol Jain 		host_os_sprs->pmc3 = mfspr(SPRN_PMC3);
57*db536084SKajol Jain 		host_os_sprs->pmc4 = mfspr(SPRN_PMC4);
58*db536084SKajol Jain 		host_os_sprs->pmc5 = mfspr(SPRN_PMC5);
59*db536084SKajol Jain 		host_os_sprs->pmc6 = mfspr(SPRN_PMC6);
60*db536084SKajol Jain 		host_os_sprs->mmcr1 = mfspr(SPRN_MMCR1);
61*db536084SKajol Jain 		host_os_sprs->mmcr2 = mfspr(SPRN_MMCR2);
62*db536084SKajol Jain 		host_os_sprs->sdar = mfspr(SPRN_SDAR);
63*db536084SKajol Jain 		host_os_sprs->siar = mfspr(SPRN_SIAR);
64*db536084SKajol Jain 		host_os_sprs->sier1 = mfspr(SPRN_SIER);
65*db536084SKajol Jain 
66*db536084SKajol Jain 		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
67*db536084SKajol Jain 			host_os_sprs->mmcr3 = mfspr(SPRN_MMCR3);
68*db536084SKajol Jain 			host_os_sprs->sier2 = mfspr(SPRN_SIER2);
69*db536084SKajol Jain 			host_os_sprs->sier3 = mfspr(SPRN_SIER3);
70*db536084SKajol Jain 		}
71*db536084SKajol Jain 	}
72*db536084SKajol Jain 
73*db536084SKajol Jain #ifdef CONFIG_PPC_PSERIES
74*db536084SKajol Jain 	/* After saving PMU, before loading guest PMU, flip pmcregs_in_use */
75*db536084SKajol Jain 	if (kvmhv_on_pseries()) {
76*db536084SKajol Jain 		barrier();
77*db536084SKajol Jain 		get_lppaca()->pmcregs_in_use = load_pmu;
78*db536084SKajol Jain 		barrier();
79*db536084SKajol Jain 	}
80*db536084SKajol Jain #endif
81*db536084SKajol Jain 
82*db536084SKajol Jain 	/*
83*db536084SKajol Jain 	 * Load guest. If the VPA said the PMCs are not in use but the guest
84*db536084SKajol Jain 	 * tried to access them anyway, HFSCR[PM] will be set by the HFAC
85*db536084SKajol Jain 	 * fault so we can make forward progress.
86*db536084SKajol Jain 	 */
87*db536084SKajol Jain 	if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) {
88*db536084SKajol Jain 		mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
89*db536084SKajol Jain 		mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
90*db536084SKajol Jain 		mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
91*db536084SKajol Jain 		mtspr(SPRN_PMC4, vcpu->arch.pmc[3]);
92*db536084SKajol Jain 		mtspr(SPRN_PMC5, vcpu->arch.pmc[4]);
93*db536084SKajol Jain 		mtspr(SPRN_PMC6, vcpu->arch.pmc[5]);
94*db536084SKajol Jain 		mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]);
95*db536084SKajol Jain 		mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]);
96*db536084SKajol Jain 		mtspr(SPRN_SDAR, vcpu->arch.sdar);
97*db536084SKajol Jain 		mtspr(SPRN_SIAR, vcpu->arch.siar);
98*db536084SKajol Jain 		mtspr(SPRN_SIER, vcpu->arch.sier[0]);
99*db536084SKajol Jain 
100*db536084SKajol Jain 		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
101*db536084SKajol Jain 			mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]);
102*db536084SKajol Jain 			mtspr(SPRN_SIER2, vcpu->arch.sier[1]);
103*db536084SKajol Jain 			mtspr(SPRN_SIER3, vcpu->arch.sier[2]);
104*db536084SKajol Jain 		}
105*db536084SKajol Jain 
106*db536084SKajol Jain 		/* Set MMCRA then MMCR0 last */
107*db536084SKajol Jain 		mtspr(SPRN_MMCRA, vcpu->arch.mmcra);
108*db536084SKajol Jain 		mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]);
109*db536084SKajol Jain 		/* No isync necessary because we're starting counters */
110*db536084SKajol Jain 
111*db536084SKajol Jain 		if (!vcpu->arch.nested &&
112*db536084SKajol Jain 		    (vcpu->arch.hfscr_permitted & HFSCR_PM))
113*db536084SKajol Jain 			vcpu->arch.hfscr |= HFSCR_PM;
114*db536084SKajol Jain 	}
115*db536084SKajol Jain }
116*db536084SKajol Jain EXPORT_SYMBOL_GPL(switch_pmu_to_guest);
117*db536084SKajol Jain 
switch_pmu_to_host(struct kvm_vcpu * vcpu,struct p9_host_os_sprs * host_os_sprs)118*db536084SKajol Jain void switch_pmu_to_host(struct kvm_vcpu *vcpu,
119*db536084SKajol Jain 			struct p9_host_os_sprs *host_os_sprs)
120*db536084SKajol Jain {
121*db536084SKajol Jain 	struct lppaca *lp;
122*db536084SKajol Jain 	int save_pmu = 1;
123*db536084SKajol Jain 
124*db536084SKajol Jain 	lp = vcpu->arch.vpa.pinned_addr;
125*db536084SKajol Jain 	if (lp)
126*db536084SKajol Jain 		save_pmu = lp->pmcregs_in_use;
127*db536084SKajol Jain 	if (IS_ENABLED(CONFIG_KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND)) {
128*db536084SKajol Jain 		/*
129*db536084SKajol Jain 		 * Save pmu if this guest is capable of running nested guests.
130*db536084SKajol Jain 		 * This is option is for old L1s that do not set their
131*db536084SKajol Jain 		 * lppaca->pmcregs_in_use properly when entering their L2.
132*db536084SKajol Jain 		 */
133*db536084SKajol Jain 		save_pmu |= nesting_enabled(vcpu->kvm);
134*db536084SKajol Jain 	}
135*db536084SKajol Jain 
136*db536084SKajol Jain 	if (save_pmu) {
137*db536084SKajol Jain 		vcpu->arch.mmcr[0] = mfspr(SPRN_MMCR0);
138*db536084SKajol Jain 		vcpu->arch.mmcra = mfspr(SPRN_MMCRA);
139*db536084SKajol Jain 
140*db536084SKajol Jain 		freeze_pmu(vcpu->arch.mmcr[0], vcpu->arch.mmcra);
141*db536084SKajol Jain 
142*db536084SKajol Jain 		vcpu->arch.pmc[0] = mfspr(SPRN_PMC1);
143*db536084SKajol Jain 		vcpu->arch.pmc[1] = mfspr(SPRN_PMC2);
144*db536084SKajol Jain 		vcpu->arch.pmc[2] = mfspr(SPRN_PMC3);
145*db536084SKajol Jain 		vcpu->arch.pmc[3] = mfspr(SPRN_PMC4);
146*db536084SKajol Jain 		vcpu->arch.pmc[4] = mfspr(SPRN_PMC5);
147*db536084SKajol Jain 		vcpu->arch.pmc[5] = mfspr(SPRN_PMC6);
148*db536084SKajol Jain 		vcpu->arch.mmcr[1] = mfspr(SPRN_MMCR1);
149*db536084SKajol Jain 		vcpu->arch.mmcr[2] = mfspr(SPRN_MMCR2);
150*db536084SKajol Jain 		vcpu->arch.sdar = mfspr(SPRN_SDAR);
151*db536084SKajol Jain 		vcpu->arch.siar = mfspr(SPRN_SIAR);
152*db536084SKajol Jain 		vcpu->arch.sier[0] = mfspr(SPRN_SIER);
153*db536084SKajol Jain 
154*db536084SKajol Jain 		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
155*db536084SKajol Jain 			vcpu->arch.mmcr[3] = mfspr(SPRN_MMCR3);
156*db536084SKajol Jain 			vcpu->arch.sier[1] = mfspr(SPRN_SIER2);
157*db536084SKajol Jain 			vcpu->arch.sier[2] = mfspr(SPRN_SIER3);
158*db536084SKajol Jain 		}
159*db536084SKajol Jain 
160*db536084SKajol Jain 	} else if (vcpu->arch.hfscr & HFSCR_PM) {
161*db536084SKajol Jain 		/*
162*db536084SKajol Jain 		 * The guest accessed PMC SPRs without specifying they should
163*db536084SKajol Jain 		 * be preserved, or it cleared pmcregs_in_use after the last
164*db536084SKajol Jain 		 * access. Just ensure they are frozen.
165*db536084SKajol Jain 		 */
166*db536084SKajol Jain 		freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA));
167*db536084SKajol Jain 
168*db536084SKajol Jain 		/*
169*db536084SKajol Jain 		 * Demand-fault PMU register access in the guest.
170*db536084SKajol Jain 		 *
171*db536084SKajol Jain 		 * This is used to grab the guest's VPA pmcregs_in_use value
172*db536084SKajol Jain 		 * and reflect it into the host's VPA in the case of a nested
173*db536084SKajol Jain 		 * hypervisor.
174*db536084SKajol Jain 		 *
175*db536084SKajol Jain 		 * It also avoids having to zero-out SPRs after each guest
176*db536084SKajol Jain 		 * exit to avoid side-channels when.
177*db536084SKajol Jain 		 *
178*db536084SKajol Jain 		 * This is cleared here when we exit the guest, so later HFSCR
179*db536084SKajol Jain 		 * interrupt handling can add it back to run the guest with
180*db536084SKajol Jain 		 * PM enabled next time.
181*db536084SKajol Jain 		 */
182*db536084SKajol Jain 		if (!vcpu->arch.nested)
183*db536084SKajol Jain 			vcpu->arch.hfscr &= ~HFSCR_PM;
184*db536084SKajol Jain 	} /* otherwise the PMU should still be frozen */
185*db536084SKajol Jain 
186*db536084SKajol Jain #ifdef CONFIG_PPC_PSERIES
187*db536084SKajol Jain 	if (kvmhv_on_pseries()) {
188*db536084SKajol Jain 		barrier();
189*db536084SKajol Jain 		get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
190*db536084SKajol Jain 		barrier();
191*db536084SKajol Jain 	}
192*db536084SKajol Jain #endif
193*db536084SKajol Jain 
194*db536084SKajol Jain 	if (ppc_get_pmu_inuse()) {
195*db536084SKajol Jain 		mtspr(SPRN_PMC1, host_os_sprs->pmc1);
196*db536084SKajol Jain 		mtspr(SPRN_PMC2, host_os_sprs->pmc2);
197*db536084SKajol Jain 		mtspr(SPRN_PMC3, host_os_sprs->pmc3);
198*db536084SKajol Jain 		mtspr(SPRN_PMC4, host_os_sprs->pmc4);
199*db536084SKajol Jain 		mtspr(SPRN_PMC5, host_os_sprs->pmc5);
200*db536084SKajol Jain 		mtspr(SPRN_PMC6, host_os_sprs->pmc6);
201*db536084SKajol Jain 		mtspr(SPRN_MMCR1, host_os_sprs->mmcr1);
202*db536084SKajol Jain 		mtspr(SPRN_MMCR2, host_os_sprs->mmcr2);
203*db536084SKajol Jain 		mtspr(SPRN_SDAR, host_os_sprs->sdar);
204*db536084SKajol Jain 		mtspr(SPRN_SIAR, host_os_sprs->siar);
205*db536084SKajol Jain 		mtspr(SPRN_SIER, host_os_sprs->sier1);
206*db536084SKajol Jain 
207*db536084SKajol Jain 		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
208*db536084SKajol Jain 			mtspr(SPRN_MMCR3, host_os_sprs->mmcr3);
209*db536084SKajol Jain 			mtspr(SPRN_SIER2, host_os_sprs->sier2);
210*db536084SKajol Jain 			mtspr(SPRN_SIER3, host_os_sprs->sier3);
211*db536084SKajol Jain 		}
212*db536084SKajol Jain 
213*db536084SKajol Jain 		/* Set MMCRA then MMCR0 last */
214*db536084SKajol Jain 		mtspr(SPRN_MMCRA, host_os_sprs->mmcra);
215*db536084SKajol Jain 		mtspr(SPRN_MMCR0, host_os_sprs->mmcr0);
216*db536084SKajol Jain 		isync();
217*db536084SKajol Jain 	}
218*db536084SKajol Jain }
219*db536084SKajol Jain EXPORT_SYMBOL_GPL(switch_pmu_to_host);
220