Lines Matching +full:d +full:- +full:tlb +full:- +full:size
6 * Synthesize TLB refill handlers at runtime.
10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
34 #include <asm/cpu-type.h>
53 * TLB load/store/modify handlers.
132 * CVMSEG starts at address -32768 and extends for in scratchpad_offset()
136 return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768; in scratchpad_offset()
231 * TLB exception handlers.
242 pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT); in output_pgtable_bits_defines()
243 pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT); in output_pgtable_bits_defines()
244 pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT); in output_pgtable_bits_defines()
245 pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT); in output_pgtable_bits_defines()
246 pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT); in output_pgtable_bits_defines()
248 pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT); in output_pgtable_bits_defines()
252 pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT); in output_pgtable_bits_defines()
254 pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT); in output_pgtable_bits_defines()
255 pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT); in output_pgtable_bits_defines()
256 pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT); in output_pgtable_bits_defines()
257 pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT); in output_pgtable_bits_defines()
263 unsigned int count = (end - start) / sizeof(u32); in dump_handler()
280 /* The only general purpose registers allowed in TLB handlers. */
308 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
312 * We deliberately chose a buffer size of 128, so we won't scribble
317 /* simply assume worst case size for labels and relocs */
345 return -1; in allocate_kscratch()
347 r--; /* make it zero based */ in allocate_kscratch()
413 * The R3000 TLB handler is simple.
442 panic("TLB refill handler space exceeded"); in build_r3000_tlb_refill_handler()
444 pr_debug("Wrote TLB refill handler (%u instructions).\n", in build_r3000_tlb_refill_handler()
445 (unsigned int)(p - tlb_handler)); in build_r3000_tlb_refill_handler()
454 * The R4000 TLB handler is much more complicated. We have two
477 * The software work-around is to not allow the instruction preceding the TLBP
478 * to stall - make it an NOP or some other instruction guaranteed not to stall.
621 panic("No TLB refill handler yet (CPU type: %d)", in build_tlb_write_entry()
642 ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); in build_convert_pte_to_entrylo()
667 /* Reset default page size */ in build_restore_pagemask()
686 /* Reset default page size */ in build_restore_pagemask()
709 /* Set huge page tlb entry size */ in build_huge_tlb_write_entry()
741 * A huge PTE describes an area the size of the in build_huge_update_entries()
742 * configured huge page size. This is twice the in build_huge_update_entries()
743 * of the large TLB entry size we intend to use. in build_huge_update_entries()
744 * A TLB entry half the size of the configured in build_huge_update_entries()
745 * huge page size is configured into entrylo0 in build_huge_update_entries()
828 uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); in build_get_pmde64()
835 if (pgd_reg != -1) { in build_get_pmde64()
870 uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3); in build_get_pmde64()
872 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); in build_get_pmde64()
877 uasm_i_dsrl_safe(p, tmp, tmp, PUD_SHIFT - 3); /* get pud offset in bytes */ in build_get_pmde64()
878 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PUD - 1) << 3); in build_get_pmde64()
884 uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ in build_get_pmde64()
885 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); in build_get_pmde64()
973 if (pgd_reg != -1) { in build_get_pgde32()
1002 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; in build_adjust_context()
1003 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); in build_adjust_context()
1126 if (pgd_reg != -1) in build_fast_tlb_refill_handler()
1137 PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); in build_fast_tlb_refill_handler()
1140 if (pgd_reg == -1) { in build_fast_tlb_refill_handler()
1146 if (pgd_reg != -1) in build_fast_tlb_refill_handler()
1158 if (pgd_reg == -1) in build_fast_tlb_refill_handler()
1165 if (pgd_reg == -1) { in build_fast_tlb_refill_handler()
1180 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3); in build_fast_tlb_refill_handler()
1186 * fall-through case = badvaddr *pgd_current in build_fast_tlb_refill_handler()
1192 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3); in build_fast_tlb_refill_handler()
1197 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3); in build_fast_tlb_refill_handler()
1208 uasm_i_dsrl_safe(p, scratch, tmp, PUD_SHIFT - 3); in build_fast_tlb_refill_handler()
1209 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PUD - 1) << 3); in build_fast_tlb_refill_handler()
1223 uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3); in build_fast_tlb_refill_handler()
1224 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3); in build_fast_tlb_refill_handler()
1297 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
1300 * unused TLB refill exception.
1339 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); in build_r4000_tlb_refill_handler()
1376 * free instruction slot for the wrap-around branch. In worst in build_r4000_tlb_refill_handler()
1386 if ((p - tlb_handler) > 64) in build_r4000_tlb_refill_handler()
1387 panic("TLB refill handler space exceeded"); in build_r4000_tlb_refill_handler()
1389 * Now fold the handler in the TLB refill handler space. in build_r4000_tlb_refill_handler()
1394 final_len = p - tlb_handler; in build_r4000_tlb_refill_handler()
1397 if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) in build_r4000_tlb_refill_handler()
1398 || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) in build_r4000_tlb_refill_handler()
1400 tlb_handler + MIPS64_REFILL_INSNS - 3))) in build_r4000_tlb_refill_handler()
1401 panic("TLB refill handler space exceeded"); in build_r4000_tlb_refill_handler()
1403 * Now fold the handler in the TLB refill handler space. in build_r4000_tlb_refill_handler()
1406 if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { in build_r4000_tlb_refill_handler()
1409 final_len = p - tlb_handler; in build_r4000_tlb_refill_handler()
1429 split < p - MIPS64_REFILL_INSNS) in build_r4000_tlb_refill_handler()
1438 split = tlb_handler + MIPS64_REFILL_INSNS - 2; in build_r4000_tlb_refill_handler()
1445 if (uasm_insn_has_bdelay(relocs, split - 1)) in build_r4000_tlb_refill_handler()
1446 split--; in build_r4000_tlb_refill_handler()
1450 f += split - tlb_handler; in build_r4000_tlb_refill_handler()
1461 uasm_move_labels(labels, f, f + 1, -1); in build_r4000_tlb_refill_handler()
1469 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + in build_r4000_tlb_refill_handler()
1470 (p - split); in build_r4000_tlb_refill_handler()
1477 pr_debug("Wrote TLB refill handler (%u instructions).\n", in build_r4000_tlb_refill_handler()
1501 pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_ORDER; in setup_pw()
1504 pmd_w = PMD_SHIFT - PAGE_SHIFT; in setup_pw()
1506 pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_ORDER; in setup_pw()
1510 pt_w = PAGE_SHIFT - 3; in setup_pw()
1544 uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); in build_loongson3_tlb_refill_handler()
1601 memset(p, 0, tlbmiss_handler_setup_pgd_end - (char *)p); in build_setup_pgd()
1606 if (pgd_reg == -1) { in build_setup_pgd()
1621 uasm_i_dinsm(&p, a0, 0, 29, 64 - 29); in build_setup_pgd()
1650 if (pgd_reg != -1) { in build_setup_pgd()
1664 (unsigned int)(p - (u32 *)tlbmiss_handler_setup_pgd)); in build_setup_pgd()
1757 * the page table where this PTE is located, PTE will be re-loaded
1779 /* You lose the SMP race :-(*/ in build_pte_present()
1792 /* You lose the SMP race :-(*/ in build_pte_present()
1829 /* You lose the SMP race :-(*/ in build_pte_writable()
1866 /* You lose the SMP race :-(*/ in build_pte_modifiable()
1875 * R3000 style TLB load/store/modify handlers.
1942 memset(p, 0, handle_tlbl_end - (char *)p); in build_r3000_tlb_load_handler()
1947 build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl); in build_r3000_tlb_load_handler()
1949 build_make_valid(&p, &r, K0, K1, -1); in build_r3000_tlb_load_handler()
1957 panic("TLB load handler fastpath space exceeded"); in build_r3000_tlb_load_handler()
1960 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", in build_r3000_tlb_load_handler()
1961 (unsigned int)(p - (u32 *)handle_tlbl)); in build_r3000_tlb_load_handler()
1972 memset(p, 0, handle_tlbs_end - (char *)p); in build_r3000_tlb_store_handler()
1977 build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs); in build_r3000_tlb_store_handler()
1979 build_make_write(&p, &r, K0, K1, -1); in build_r3000_tlb_store_handler()
1987 panic("TLB store handler fastpath space exceeded"); in build_r3000_tlb_store_handler()
1990 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", in build_r3000_tlb_store_handler()
1991 (unsigned int)(p - (u32 *)handle_tlbs)); in build_r3000_tlb_store_handler()
2002 memset(p, 0, handle_tlbm_end - (char *)p); in build_r3000_tlb_modify_handler()
2007 build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm); in build_r3000_tlb_modify_handler()
2009 build_make_write(&p, &r, K0, K1, -1); in build_r3000_tlb_modify_handler()
2017 panic("TLB modify handler fastpath space exceeded"); in build_r3000_tlb_modify_handler()
2020 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", in build_r3000_tlb_modify_handler()
2021 (unsigned int)(p - (u32 *)handle_tlbm)); in build_r3000_tlb_modify_handler()
2030 * When a Hardware Table Walker is running it can replace TLB entries in cpu_has_tlbex_tlbp_race()
2048 * R4000 style TLB load/store/modify handlers.
2064 * For huge tlb entries, pmd doesn't contain an address but in build_r4000_tlbchange_handler_head()
2065 * instead contains the tlb pte. Check the PAGE_HUGE bit and in build_r4000_tlbchange_handler_head()
2066 * see if we need to jump to huge tlb processing. in build_r4000_tlbchange_handler_head()
2073 UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); in build_r4000_tlbchange_handler_head()
2074 uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2); in build_r4000_tlbchange_handler_head()
2119 memset(p, 0, handle_tlbl_end - (char *)p); in build_r4000_tlb_load_handler()
2131 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); in build_r4000_tlb_load_handler()
2157 * Warn if something may race with us & replace the TLB entry in build_r4000_tlb_load_handler()
2232 * Warn if something may race with us & replace the TLB entry in build_r4000_tlb_load_handler()
2303 panic("TLB load handler fastpath space exceeded"); in build_r4000_tlb_load_handler()
2306 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", in build_r4000_tlb_load_handler()
2307 (unsigned int)(p - (u32 *)handle_tlbl)); in build_r4000_tlb_load_handler()
2319 memset(p, 0, handle_tlbs_end - (char *)p); in build_r4000_tlb_store_handler()
2359 panic("TLB store handler fastpath space exceeded"); in build_r4000_tlb_store_handler()
2362 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", in build_r4000_tlb_store_handler()
2363 (unsigned int)(p - (u32 *)handle_tlbs)); in build_r4000_tlb_store_handler()
2375 memset(p, 0, handle_tlbm_end - (char *)p); in build_r4000_tlb_modify_handler()
2416 panic("TLB modify handler fastpath space exceeded"); in build_r4000_tlb_modify_handler()
2419 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", in build_r4000_tlb_modify_handler()
2420 (unsigned int)(p - (u32 *)handle_tlbm)); in build_r4000_tlb_modify_handler()
2480 * We are using 2-level page tables, so we only need to in config_htw_params()
2489 /* re-initialize the GDI field */ in config_htw_params()
2492 /* re-initialize the PTI field including the even/odd bit */ in config_htw_params()
2523 /* Set pointer size to size of directory pointers */ in config_htw_params()
2527 pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT) in config_htw_params()
2536 * Enable HTW (and only for XUSeg on 64-bit), and disable the rest of in config_htw_params()
2588 /* clear all non-PFN bits */ in check_pabits()
2589 entry &= ~((1 << MIPS_ENTRYLO_PFN_SHIFT) - 1); in check_pabits()
2594 fillbits = max_t(int, (int)BITS_PER_LONG - pabits, 0); in check_pabits()
2597 fillbits -= min_t(unsigned, fillbits, 2); in check_pabits()
2608 * The refill handler is generated per-CPU, multi-node systems in build_tlb_refill_handler()
2621 check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); in build_tlb_refill_handler()
2636 panic("No R3000 TLB refill handler"); in build_tlb_refill_handler()