Lines Matching +full:i +full:- +full:tlb +full:- +full:sets

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
30 #include <asm/pte-walk.h>
38 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
45 return host_tlb_params[1].entries - tlbcam_index - 1; in tlb1_max_shadow_size()
70 * writing shadow tlb entry to host TLB
80 mtspr(SPRN_MAS1, stlbe->mas1); in __write_host_tlbe()
81 mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); in __write_host_tlbe()
82 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); in __write_host_tlbe()
83 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); in __write_host_tlbe()
96 trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, in __write_host_tlbe()
97 stlbe->mas2, stlbe->mas7_3); in __write_host_tlbe()
101 * Acquire a mas0 with victim hint, as if we just took a TLB miss.
104 * in the right set and is not present in the TLB. Using a zero PID and a
133 mas0 = get_host_mas0(stlbe->mas2); in write_host_tlbe()
134 __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid); in write_host_tlbe()
139 vcpu_e500->vcpu.kvm->arch.lpid); in write_host_tlbe()
152 stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); in write_stlbe()
154 stlbe->mas1 |= MAS1_TID(stid); in write_stlbe()
165 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; in kvmppc_map_magic()
177 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; in kvmppc_map_magic()
192 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; in inval_gtlbe_on_host()
195 if (!(ref->flags & E500_TLB_VALID)) { in inval_gtlbe_on_host()
196 WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), in inval_gtlbe_on_host()
197 "%s: flags %x\n", __func__, ref->flags); in inval_gtlbe_on_host()
198 WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); in inval_gtlbe_on_host()
201 if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { in inval_gtlbe_on_host()
202 u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; in inval_gtlbe_on_host()
208 hw_tlb_indx = __ilog2_u64(tmp & -tmp); in inval_gtlbe_on_host()
214 vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0; in inval_gtlbe_on_host()
215 tmp &= tmp - 1; in inval_gtlbe_on_host()
218 vcpu_e500->g2h_tlb1_map[esel] = 0; in inval_gtlbe_on_host()
219 ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); in inval_gtlbe_on_host()
223 if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) { in inval_gtlbe_on_host()
229 ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID); in inval_gtlbe_on_host()
233 * If TLB entry is still valid then it's a TLB0 entry, and thus in inval_gtlbe_on_host()
236 if (ref->flags & E500_TLB_VALID) in inval_gtlbe_on_host()
239 /* Mark the TLB as not backed by the host anymore */ in inval_gtlbe_on_host()
240 ref->flags = 0; in inval_gtlbe_on_host()
245 return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); in tlbe_is_writable()
253 ref->pfn = pfn; in kvmppc_e500_ref_setup()
254 ref->flags = E500_TLB_VALID; in kvmppc_e500_ref_setup()
256 ref->flags |= E500_TLB_WRITABLE; in kvmppc_e500_ref_setup()
259 ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg; in kvmppc_e500_ref_setup()
264 if (ref->flags & E500_TLB_VALID) { in kvmppc_e500_ref_release()
266 trace_kvm_booke206_ref_release(ref->pfn, ref->flags); in kvmppc_e500_ref_release()
267 ref->flags = 0; in kvmppc_e500_ref_release()
273 if (vcpu_e500->g2h_tlb1_map) in clear_tlb1_bitmap()
274 memset(vcpu_e500->g2h_tlb1_map, 0, in clear_tlb1_bitmap()
275 sizeof(u64) * vcpu_e500->gtlb_params[1].entries); in clear_tlb1_bitmap()
276 if (vcpu_e500->h2g_tlb1_rmap) in clear_tlb1_bitmap()
277 memset(vcpu_e500->h2g_tlb1_rmap, 0, in clear_tlb1_bitmap()
284 int i; in clear_tlb_privs() local
287 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { in clear_tlb_privs()
289 &vcpu_e500->gtlb_priv[tlbsel][i].ref; in clear_tlb_privs()
310 kvm_pfn_t pfn = ref->pfn; in kvmppc_e500_setup_stlbe()
311 u32 pr = vcpu->arch.shared->msr & MSR_PR; in kvmppc_e500_setup_stlbe()
312 bool writable = !!(ref->flags & E500_TLB_WRITABLE); in kvmppc_e500_setup_stlbe()
314 BUG_ON(!(ref->flags & E500_TLB_VALID)); in kvmppc_e500_setup_stlbe()
317 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; in kvmppc_e500_setup_stlbe()
318 stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR); in kvmppc_e500_setup_stlbe()
319 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | in kvmppc_e500_setup_stlbe()
320 e500_shadow_mas3_attrib(gtlbe->mas7_3, writable, pr); in kvmppc_e500_setup_stlbe()
336 struct kvm *kvm = vcpu_e500->vcpu.kvm; in kvmppc_e500_shadow_map()
344 mmu_seq = kvm->mmu_invalidate_seq; in kvmppc_e500_shadow_map()
349 * a page reference if it is normal, non-reserved memory. in kvmppc_e500_shadow_map()
355 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); in kvmppc_e500_shadow_map()
363 return -EINVAL; in kvmppc_e500_shadow_map()
366 spin_lock(&kvm->mmu_lock); in kvmppc_e500_shadow_map()
368 ret = -EAGAIN; in kvmppc_e500_shadow_map()
373 pgdir = vcpu_e500->vcpu.arch.pgdir; in kvmppc_e500_shadow_map()
377 * We are holding kvm->mmu_lock so a notifier invalidate in kvmppc_e500_shadow_map()
392 ret = -EINVAL; in kvmppc_e500_shadow_map()
403 psize_pages = 1UL << (psize - PAGE_SHIFT); in kvmppc_e500_shadow_map()
404 start = pfn & ~(psize_pages - 1); in kvmppc_e500_shadow_map()
407 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map()
408 slot_end = slot_start + slot->npages; in kvmppc_e500_shadow_map()
415 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> in kvmppc_e500_shadow_map()
422 tsize = min(psize - PAGE_SHIFT + BOOK3E_PAGESZ_4K, tsize); in kvmppc_e500_shadow_map()
437 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { in kvmppc_e500_shadow_map()
439 tsize_pages = 1UL << (tsize - 2); in kvmppc_e500_shadow_map()
441 gfn_start = gfn & ~(tsize_pages - 1); in kvmppc_e500_shadow_map()
444 if (gfn_start + pfn - gfn < start) in kvmppc_e500_shadow_map()
446 if (gfn_end + pfn - gfn > end) in kvmppc_e500_shadow_map()
448 if ((gfn & (tsize_pages - 1)) != in kvmppc_e500_shadow_map()
449 (pfn & (tsize_pages - 1))) in kvmppc_e500_shadow_map()
452 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); in kvmppc_e500_shadow_map()
453 pfn &= ~(tsize_pages - 1); in kvmppc_e500_shadow_map()
459 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, in kvmppc_e500_shadow_map()
463 /* Clear i-cache for new pages */ in kvmppc_e500_shadow_map()
468 spin_unlock(&kvm->mmu_lock); in kvmppc_e500_shadow_map()
472 /* XXX only map the one-one case, for now use TLB0 */
483 ref = &vcpu_e500->gtlb_priv[0][esel].ref; in kvmppc_e500_tlb0_map()
500 unsigned int sesel = vcpu_e500->host_tlb1_nv++; in kvmppc_e500_tlb1_map_tlb1()
502 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) in kvmppc_e500_tlb1_map_tlb1()
503 vcpu_e500->host_tlb1_nv = 0; in kvmppc_e500_tlb1_map_tlb1()
505 if (vcpu_e500->h2g_tlb1_rmap[sesel]) { in kvmppc_e500_tlb1_map_tlb1()
506 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1; in kvmppc_e500_tlb1_map_tlb1()
507 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); in kvmppc_e500_tlb1_map_tlb1()
510 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; in kvmppc_e500_tlb1_map_tlb1()
511 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; in kvmppc_e500_tlb1_map_tlb1()
512 vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; in kvmppc_e500_tlb1_map_tlb1()
513 WARN_ON(!(ref->flags & E500_TLB_VALID)); in kvmppc_e500_tlb1_map_tlb1()
518 /* Caller must ensure that the specified guest TLB entry is safe to insert into
519 * the shadow TLB. */
520 /* For both one-one and one-to-many */
525 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref; in kvmppc_e500_tlb1_map()
536 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0; in kvmppc_e500_tlb1_map()
561 priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; in kvmppc_mmu_map()
564 if (!(priv->ref.flags & E500_TLB_VALID)) { in kvmppc_mmu_map()
568 &priv->ref, eaddr, &stlbe); in kvmppc_mmu_map()
601 /* Search TLB for guest pc to get the real address */ in kvmppc_load_last_inst()
604 addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG; in kvmppc_load_last_inst()
607 mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space); in kvmppc_load_last_inst()
624 * If the TLB entry for guest pc was evicted, return to the guest. in kvmppc_load_last_inst()
625 * There are high chances to find a valid TLB entry next time. in kvmppc_load_last_inst()
631 * Another thread may rewrite the TLB entry in parallel, don't in kvmppc_load_last_inst()
634 pr = vcpu->arch.shared->msr & MSR_PR; in kvmppc_load_last_inst()
645 * write-back page. Check for mismatches when LRAT is used. in kvmppc_load_last_inst()
658 (geaddr & ((1ULL << psize_shift) - 1ULL)); in kvmppc_load_last_inst()
663 …pr_err_ratelimited("%s: Instruction emulation from non-RAM host address %08llx is not supported\n", in kvmppc_load_last_inst()
689 * Flush all shadow tlb entries everywhere. This is slow, but in kvm_e500_mmu_unmap_gfn()
721 * architecturally possible -- e.g. in some weird nested in e500_mmu_host_init()
726 pr_err("%s: need to know host tlb size\n", __func__); in e500_mmu_host_init()
727 return -ENODEV; in e500_mmu_host_init()
741 return -ENODEV; in e500_mmu_host_init()
744 host_tlb_params[0].sets = in e500_mmu_host_init()
746 host_tlb_params[1].sets = 1; in e500_mmu_host_init()
747 vcpu_e500->h2g_tlb1_rmap = kcalloc(host_tlb_params[1].entries, in e500_mmu_host_init()
748 sizeof(*vcpu_e500->h2g_tlb1_rmap), in e500_mmu_host_init()
750 if (!vcpu_e500->h2g_tlb1_rmap) in e500_mmu_host_init()
751 return -EINVAL; in e500_mmu_host_init()
758 kfree(vcpu_e500->h2g_tlb1_rmap); in e500_mmu_host_uninit()