Lines Matching +full:d +full:- +full:tlb +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0
16 #include <asm/insn-def.h>
164 vcpu->arch.last_exit_cpu == vcpu->cpu) in kvm_riscv_local_tlb_sanitize()
168 * On RISC-V platforms with hardware VMID support, we share same in kvm_riscv_local_tlb_sanitize()
170 * have stale G-stage TLB entries on the current Host CPU due to in kvm_riscv_local_tlb_sanitize()
174 * To cleanup stale TLB entries, we simply flush all G-stage TLB in kvm_riscv_local_tlb_sanitize()
178 vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid); in kvm_riscv_local_tlb_sanitize()
190 struct kvm_vmid *v = &vcpu->kvm->arch.vmid; in kvm_riscv_hfence_gvma_vmid_all_process()
191 unsigned long vmid = READ_ONCE(v->vmid); in kvm_riscv_hfence_gvma_vmid_all_process()
201 struct kvm_vmid *v = &vcpu->kvm->arch.vmid; in kvm_riscv_hfence_vvma_all_process()
202 unsigned long vmid = READ_ONCE(v->vmid); in kvm_riscv_hfence_vvma_all_process()
214 struct kvm_vcpu_arch *varch = &vcpu->arch; in vcpu_hfence_dequeue()
216 spin_lock(&varch->hfence_lock); in vcpu_hfence_dequeue()
218 if (varch->hfence_queue[varch->hfence_head].type) { in vcpu_hfence_dequeue()
219 memcpy(out_data, &varch->hfence_queue[varch->hfence_head], in vcpu_hfence_dequeue()
221 varch->hfence_queue[varch->hfence_head].type = 0; in vcpu_hfence_dequeue()
223 varch->hfence_head++; in vcpu_hfence_dequeue()
224 if (varch->hfence_head == KVM_RISCV_VCPU_MAX_HFENCE) in vcpu_hfence_dequeue()
225 varch->hfence_head = 0; in vcpu_hfence_dequeue()
230 spin_unlock(&varch->hfence_lock); in vcpu_hfence_dequeue()
239 struct kvm_vcpu_arch *varch = &vcpu->arch; in vcpu_hfence_enqueue()
241 spin_lock(&varch->hfence_lock); in vcpu_hfence_enqueue()
243 if (!varch->hfence_queue[varch->hfence_tail].type) { in vcpu_hfence_enqueue()
244 memcpy(&varch->hfence_queue[varch->hfence_tail], in vcpu_hfence_enqueue()
247 varch->hfence_tail++; in vcpu_hfence_enqueue()
248 if (varch->hfence_tail == KVM_RISCV_VCPU_MAX_HFENCE) in vcpu_hfence_enqueue()
249 varch->hfence_tail = 0; in vcpu_hfence_enqueue()
254 spin_unlock(&varch->hfence_lock); in vcpu_hfence_enqueue()
262 struct kvm_riscv_hfence d = { 0 }; in kvm_riscv_hfence_process() local
263 struct kvm_vmid *v = &vcpu->kvm->arch.vmid; in kvm_riscv_hfence_process()
265 while (vcpu_hfence_dequeue(vcpu, &d)) { in kvm_riscv_hfence_process()
266 switch (d.type) { in kvm_riscv_hfence_process()
270 vmid = READ_ONCE(v->vmid); in kvm_riscv_hfence_process()
273 d.addr, d.size, d.order); in kvm_riscv_hfence_process()
275 kvm_riscv_local_hfence_gvma_vmid_gpa(vmid, d.addr, in kvm_riscv_hfence_process()
276 d.size, d.order); in kvm_riscv_hfence_process()
280 vmid = READ_ONCE(v->vmid); in kvm_riscv_hfence_process()
282 nacl_hfence_vvma_asid(nacl_shmem(), vmid, d.asid, in kvm_riscv_hfence_process()
283 d.addr, d.size, d.order); in kvm_riscv_hfence_process()
285 kvm_riscv_local_hfence_vvma_asid_gva(vmid, d.asid, d.addr, in kvm_riscv_hfence_process()
286 d.size, d.order); in kvm_riscv_hfence_process()
290 vmid = READ_ONCE(v->vmid); in kvm_riscv_hfence_process()
292 nacl_hfence_vvma_asid_all(nacl_shmem(), vmid, d.asid); in kvm_riscv_hfence_process()
294 kvm_riscv_local_hfence_vvma_asid_all(vmid, d.asid); in kvm_riscv_hfence_process()
298 vmid = READ_ONCE(v->vmid); in kvm_riscv_hfence_process()
301 d.addr, d.size, d.order); in kvm_riscv_hfence_process()
303 kvm_riscv_local_hfence_vvma_gva(vmid, d.addr, in kvm_riscv_hfence_process()
304 d.size, d.order); in kvm_riscv_hfence_process()
324 if (hbase != -1UL) { in make_xfence_request()
325 if (vcpu->vcpu_id < hbase) in make_xfence_request()
327 if (!(hmask & (1UL << (vcpu->vcpu_id - hbase)))) in make_xfence_request()
333 if (!data || !data->type) in make_xfence_request()
365 data.size = gpsz; in kvm_riscv_hfence_gvma_vmid_gpa()
388 data.size = gvsz; in kvm_riscv_hfence_vvma_asid_gva()
402 data.addr = data.size = data.order = 0; in kvm_riscv_hfence_vvma_asid_all()
417 data.size = gvsz; in kvm_riscv_hfence_vvma_gva()