Lines Matching +full:2 +full:- +full:way
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
8 * -Reintroduce duplicate PD fixup - some customer chips still have the issue
11 * -No need to flush_cache_page( ) for each call to update_mmu_cache()
13 * = page-fault thrice as fast (75 usec to 28 usec)
18 * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
22 * -MMU v2/v3 BCRs decoded differently
23 * -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512
24 * -tlb_entry_erase( ) can be void
25 * -local_flush_tlb_range( ):
30 * -Changes related to MMU v2 (Rel 4.8)
33 * -In TLB Flush operations (Metal Fix MMU) there is a explicit command to
34 * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
39 * -Reduced the duration of IRQ lockouts in TLB Flush routines
40 * -Multiple copies of TLB erase code separated into a "single" function
41 * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
42 * in interrupt-safe region.
62 * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc.
64 * map into same set, there would be contention for the 2 ways causing severe
67 * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
68 * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
70 * J-TLB entries are created (even though 3rd will knock out one of the prev
71 * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
73 * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs.
78 * - Existing TLB commands work as before
79 * - New command (TLBWriteNI) for TLB write without clearing uTLBs
80 * - New command (TLBIVUTLB) to invalidate uTLBs.
97 * J-TLB entry got evicted/replaced.
107 * Utility Routine to erase a J-TLB entry
123 #if (CONFIG_ARC_MMU_VER >= 2) in utlb_invalidate()
125 #if (CONFIG_ARC_MMU_VER == 2) in utlb_invalidate()
128 * fail when a prior probe for J-TLB (both totally unrelated) would in utlb_invalidate()
129 * return lkup err - because the entry didn't exist in MMU. in utlb_invalidate()
192 * with existing location. This will cause Write CMD to over-write in tlb_entry_insert()
231 * Un-conditionally (without lookup) erase the entire MMU contents
239 int num_tlb = mmu->sets * mmu->ways; in local_flush_tlb_all()
275 * Flush the entire MM for userland. The fastest way is to move to Next ASID
285 if (atomic_read(&mm->mm_users) == 0) in local_flush_tlb_mm()
289 * - Move to a new ASID, but only if the mm is still wired in in local_flush_tlb_mm()
290 * (Android Binder ended up calling this for vma->mm != tsk->mm, in local_flush_tlb_mm()
291 * causing h/w - s/w ASID to get out of sync) in local_flush_tlb_mm()
292 * - Also get_new_mmu_context() new implementation allocates a new in local_flush_tlb_mm()
293 * ASID only if it is not allocated already - so unallocate first in local_flush_tlb_mm()
296 if (current->mm == mm) in local_flush_tlb_mm()
304 * -Here the fastest way (if range is too large) is to move to next ASID
306 * -In case of kernel Flush, entry has to be shot down explicitly
321 if (unlikely((end - start) >= PAGE_SIZE * 32)) { in local_flush_tlb_range()
322 local_flush_tlb_mm(vma->vm_mm); in local_flush_tlb_range()
335 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_range()
337 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_range()
345 /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
357 if (unlikely((end - start) >= PAGE_SIZE * 32)) { in local_flush_tlb_kernel_range()
388 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_page()
389 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_page()
407 local_flush_tlb_page(ta->ta_vma, ta->ta_start); in ipi_flush_tlb_page()
414 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); in ipi_flush_tlb_range()
422 local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); in ipi_flush_pmd_tlb_range()
430 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); in ipi_flush_tlb_kernel_range()
451 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); in flush_tlb_page()
463 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); in flush_tlb_range()
476 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1); in flush_pmd_tlb_range()
502 * create_tlb() assumes that current->mm == vma->mm, since in create_tlb()
503 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr) in create_tlb()
504 * -completes the lazy write to SASID reg (again valid for curr tsk) in create_tlb()
507 * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg. in create_tlb()
508 * -Fix the TLB paranoid debug code to not trigger false negatives. in create_tlb()
509 * -More importantly it makes this handler inconsistent with fast-path in create_tlb()
512 * Lets see the use cases when current->mm != vma->mm and we land here in create_tlb()
513 * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault in create_tlb()
514 * Here VM wants to pre-install a TLB entry for user stack while in create_tlb()
515 * current->mm still points to pre-execve mm (hence the condition). in create_tlb()
520 * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a in create_tlb()
526 if (current->active_mm != vma->vm_mm) in create_tlb()
531 tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr); in create_tlb()
547 * however Linux only saves 1 set to save PTE real-estate in create_tlb()
549 * -Kernel only entries have Kr Kw Kx 0 0 0 in create_tlb()
550 * -User entries have mirrored K and U bits in create_tlb()
568 * -pre-install the corresponding TLB entry into MMU
569 * -Finalize the delayed D-cache flush of kernel mapping of page due to
572 * Note that flush (when done) involves both WBACK - so physical page is
573 * in sync as well as INV - so any non-congruent aliases don't remain
589 * Exec page : Independent of aliasing/page-color considerations, in update_mmu_cache()
591 * K-mapping of a code page needs to be wback+inv so that in update_mmu_cache()
593 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it in update_mmu_cache()
595 * (Avoids the flush for Non-exec + congruent mapping case) in update_mmu_cache()
597 if ((vma->vm_flags & VM_EXEC) || in update_mmu_cache()
600 int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); in update_mmu_cache()
602 /* wback + inv dcache lines (K-mapping) */ in update_mmu_cache()
605 /* invalidate any existing icache lines (U-mapping) */ in update_mmu_cache()
606 if (vma->vm_flags & VM_EXEC) in update_mmu_cache()
618 * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
624 * - MMU page size (typical 8K, RTL fixed)
625 * - software page walker address split between PGD:PTE:PFN (typical
627 * So for above default, THP size supported is 8K * (2^8) = 2M
629 * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
647 assert_spin_locked(&mm->page_table_lock); in pgtable_trans_huge_deposit()
662 assert_spin_locked(&mm->page_table_lock); in pgtable_trans_huge_withdraw()
669 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; in pgtable_trans_huge_withdraw()
689 if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) { in local_flush_pmd_tlb_range()
690 unsigned int asid = hw_pid(vma->vm_mm, cpu); in local_flush_pmd_tlb_range()
729 unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1, in read_decode_mmu_bcr()
730 n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3; in read_decode_mmu_bcr()
733 unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2, in read_decode_mmu_bcr()
734 pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8; in read_decode_mmu_bcr()
739 mmu->ver = (tmp >> 24); in read_decode_mmu_bcr()
742 if (mmu->ver <= 2) { in read_decode_mmu_bcr()
744 mmu->pg_sz_k = TO_KB(0x2000); in read_decode_mmu_bcr()
745 mmu->sets = 1 << mmu2->sets; in read_decode_mmu_bcr()
746 mmu->ways = 1 << mmu2->ways; in read_decode_mmu_bcr()
747 mmu->u_dtlb = mmu2->u_dtlb; in read_decode_mmu_bcr()
748 mmu->u_itlb = mmu2->u_itlb; in read_decode_mmu_bcr()
751 mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1); in read_decode_mmu_bcr()
752 mmu->sets = 1 << mmu3->sets; in read_decode_mmu_bcr()
753 mmu->ways = 1 << mmu3->ways; in read_decode_mmu_bcr()
754 mmu->u_dtlb = mmu3->u_dtlb; in read_decode_mmu_bcr()
755 mmu->u_itlb = mmu3->u_itlb; in read_decode_mmu_bcr()
756 mmu->sasid = mmu3->sasid; in read_decode_mmu_bcr()
760 mmu->pg_sz_k = 1 << (mmu4->sz0 - 1); in read_decode_mmu_bcr()
761 mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11); in read_decode_mmu_bcr()
762 mmu->sets = 64 << mmu4->n_entry; in read_decode_mmu_bcr()
763 mmu->ways = mmu4->n_ways * 2; in read_decode_mmu_bcr()
764 mmu->u_dtlb = mmu4->u_dtlb * 4; in read_decode_mmu_bcr()
765 mmu->u_itlb = mmu4->u_itlb * 4; in read_decode_mmu_bcr()
766 mmu->sasid = mmu4->sasid; in read_decode_mmu_bcr()
767 pae_exists = mmu->pae = mmu4->pae; in read_decode_mmu_bcr()
777 if (p_mmu->s_pg_sz_m) in arc_mmu_mumbojumbo()
779 p_mmu->s_pg_sz_m, in arc_mmu_mumbojumbo()
782 n += scnprintf(buf + n, len - n, in arc_mmu_mumbojumbo()
784 p_mmu->ver, p_mmu->pg_sz_k, super_pg, in arc_mmu_mumbojumbo()
785 p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways, in arc_mmu_mumbojumbo()
786 p_mmu->u_dtlb, p_mmu->u_itlb, in arc_mmu_mumbojumbo()
787 IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40)); in arc_mmu_mumbojumbo()
825 if (is_isa_arcompact() && mmu->ver == CONFIG_ARC_MMU_VER) in arc_mmu_init()
827 else if (is_isa_arcv2() && mmu->ver >= CONFIG_ARC_MMU_VER) in arc_mmu_init()
832 mmu->ver, CONFIG_ARC_MMU_VER); in arc_mmu_init()
835 if (mmu->pg_sz_k != TO_KB(PAGE_SIZE)) in arc_mmu_init()
839 mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE)) in arc_mmu_init()
843 if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae) in arc_mmu_init()
860 * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
861 * The mapping is Column-first.
862 * --------------------- -----------
864 * --------------------- -----------
865 * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
866 * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
869 * --------------------- -----------
874 #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way)) argument
877 * -Could be due to buggy customer tapeouts or obscure kernel bugs
878 * -MMU complaints not at the time of duplicate PD installation, but at the
880 * -Ideally these should never happen - but if they do - workaround by deleting
882 * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
891 int set, n_ways = mmu->ways; in do_tlb_overlap_fault()
894 BUG_ON(mmu->ways > 4); in do_tlb_overlap_fault()
899 for (set = 0; set < mmu->sets; set++) { in do_tlb_overlap_fault()
901 int is_valid, way; in do_tlb_overlap_fault() local
905 for (way = 0, is_valid = 0; way < n_ways; way++) { in do_tlb_overlap_fault()
907 SET_WAY_TO_IDX(mmu, set, way)); in do_tlb_overlap_fault()
909 pd0[way] = read_aux_reg(ARC_REG_TLBPD0); in do_tlb_overlap_fault()
910 is_valid |= pd0[way] & _PAGE_PRESENT; in do_tlb_overlap_fault()
911 pd0[way] &= PAGE_MASK; in do_tlb_overlap_fault()
919 for (way = 0; way < n_ways - 1; way++) { in do_tlb_overlap_fault()
923 if (!pd0[way]) in do_tlb_overlap_fault()
926 for (n = way + 1; n < n_ways; n++) { in do_tlb_overlap_fault()
927 if (pd0[way] != pd0[n]) in do_tlb_overlap_fault()
932 pd0[way], set, way, n); in do_tlb_overlap_fault()
935 * clear entry @way and not @n. in do_tlb_overlap_fault()
938 pd0[way] = 0; in do_tlb_overlap_fault()
940 SET_WAY_TO_IDX(mmu, set, way)); in do_tlb_overlap_fault()
951 * -Called from Low Level TLB Handlers if things don;t look good
962 pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n", in print_asid_mismatch()
976 * - HW version needs to match SW version in tlb_paranoid_check()
977 * - SW needs to have a valid ASID in tlb_paranoid_check()