1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #include <asm/kvm_hyp.h>
8 #include <asm/kvm_mmu.h>
9 #include <asm/tlbflush.h>
10
11 #include <nvhe/mem_protect.h>
12
13 struct tlb_inv_context {
14 u64 tcr;
15 };
16
__tlb_switch_to_guest(struct kvm_s2_mmu * mmu,struct tlb_inv_context * cxt,bool nsh)17 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
18 struct tlb_inv_context *cxt,
19 bool nsh)
20 {
21 /*
22 * We have two requirements:
23 *
24 * - ensure that the page table updates are visible to all
25 * CPUs, for which a dsb(DOMAIN-st) is what we need, DOMAIN
26 * being either ish or nsh, depending on the invalidation
27 * type.
28 *
29 * - complete any speculative page table walk started before
30 * we trapped to EL2 so that we can mess with the MM
31 * registers out of context, for which dsb(nsh) is enough
32 *
33 * The composition of these two barriers is a dsb(DOMAIN), and
34 * the 'nsh' parameter tracks the distinction between
35 * Inner-Shareable and Non-Shareable, as specified by the
36 * callers.
37 */
38 if (nsh)
39 dsb(nsh);
40 else
41 dsb(ish);
42
43 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
44 u64 val;
45
46 /*
47 * For CPUs that are affected by ARM 1319367, we need to
48 * avoid a host Stage-1 walk while we have the guest's
49 * VMID set in the VTTBR in order to invalidate TLBs.
50 * We're guaranteed that the S1 MMU is enabled, so we can
51 * simply set the EPD bits to avoid any further TLB fill.
52 */
53 val = cxt->tcr = read_sysreg_el1(SYS_TCR);
54 val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
55 write_sysreg_el1(val, SYS_TCR);
56 isb();
57 }
58
59 /*
60 * __load_stage2() includes an ISB only when the AT
61 * workaround is applied. Take care of the opposite condition,
62 * ensuring that we always have an ISB, but not two ISBs back
63 * to back.
64 */
65 __load_stage2(mmu, kern_hyp_va(mmu->arch));
66 asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
67 }
68
__tlb_switch_to_host(struct tlb_inv_context * cxt)69 static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
70 {
71 __load_host_stage2();
72
73 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
74 /* Ensure write of the host VMID */
75 isb();
76 /* Restore the host's TCR_EL1 */
77 write_sysreg_el1(cxt->tcr, SYS_TCR);
78 }
79 }
80
__kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu * mmu,phys_addr_t ipa,int level)81 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
82 phys_addr_t ipa, int level)
83 {
84 struct tlb_inv_context cxt;
85
86 /* Switch to requested VMID */
87 __tlb_switch_to_guest(mmu, &cxt, false);
88
89 /*
90 * We could do so much better if we had the VA as well.
91 * Instead, we invalidate Stage-2 for this IPA, and the
92 * whole of Stage-1. Weep...
93 */
94 ipa >>= 12;
95 __tlbi_level(ipas2e1is, ipa, level);
96
97 /*
98 * We have to ensure completion of the invalidation at Stage-2,
99 * since a table walk on another CPU could refill a TLB with a
100 * complete (S1 + S2) walk based on the old Stage-2 mapping if
101 * the Stage-1 invalidation happened first.
102 */
103 dsb(ish);
104 __tlbi(vmalle1is);
105 dsb(ish);
106 isb();
107
108 __tlb_switch_to_host(&cxt);
109 }
110
__kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu * mmu,phys_addr_t ipa,int level)111 void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
112 phys_addr_t ipa, int level)
113 {
114 struct tlb_inv_context cxt;
115
116 /* Switch to requested VMID */
117 __tlb_switch_to_guest(mmu, &cxt, true);
118
119 /*
120 * We could do so much better if we had the VA as well.
121 * Instead, we invalidate Stage-2 for this IPA, and the
122 * whole of Stage-1. Weep...
123 */
124 ipa >>= 12;
125 __tlbi_level(ipas2e1, ipa, level);
126
127 /*
128 * We have to ensure completion of the invalidation at Stage-2,
129 * since a table walk on another CPU could refill a TLB with a
130 * complete (S1 + S2) walk based on the old Stage-2 mapping if
131 * the Stage-1 invalidation happened first.
132 */
133 dsb(nsh);
134 __tlbi(vmalle1);
135 dsb(nsh);
136 isb();
137
138 __tlb_switch_to_host(&cxt);
139 }
140
__kvm_tlb_flush_vmid_range(struct kvm_s2_mmu * mmu,phys_addr_t start,unsigned long pages)141 void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
142 phys_addr_t start, unsigned long pages)
143 {
144 struct tlb_inv_context cxt;
145 unsigned long stride;
146
147 /*
148 * Since the range of addresses may not be mapped at
149 * the same level, assume the worst case as PAGE_SIZE
150 */
151 stride = PAGE_SIZE;
152 start = round_down(start, stride);
153
154 /* Switch to requested VMID */
155 __tlb_switch_to_guest(mmu, &cxt, false);
156
157 __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
158
159 dsb(ish);
160 __tlbi(vmalle1is);
161 dsb(ish);
162 isb();
163
164 __tlb_switch_to_host(&cxt);
165 }
166
__kvm_tlb_flush_vmid(struct kvm_s2_mmu * mmu)167 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
168 {
169 struct tlb_inv_context cxt;
170
171 /* Switch to requested VMID */
172 __tlb_switch_to_guest(mmu, &cxt, false);
173
174 __tlbi(vmalls12e1is);
175 dsb(ish);
176 isb();
177
178 __tlb_switch_to_host(&cxt);
179 }
180
__kvm_flush_cpu_context(struct kvm_s2_mmu * mmu)181 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
182 {
183 struct tlb_inv_context cxt;
184
185 /* Switch to requested VMID */
186 __tlb_switch_to_guest(mmu, &cxt, false);
187
188 __tlbi(vmalle1);
189 asm volatile("ic iallu");
190 dsb(nsh);
191 isb();
192
193 __tlb_switch_to_host(&cxt);
194 }
195
__kvm_flush_vm_context(void)196 void __kvm_flush_vm_context(void)
197 {
198 /* Same remark as in __tlb_switch_to_guest() */
199 dsb(ish);
200 __tlbi(alle1is);
201 dsb(ish);
202 }
203