1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 4 #include <linux/bug.h> 5 #include <linux/module.h> 6 #include <linux/init.h> 7 #include <linux/signal.h> 8 #include <linux/sched.h> 9 #include <linux/kernel.h> 10 #include <linux/errno.h> 11 #include <linux/string.h> 12 #include <linux/types.h> 13 #include <linux/pagemap.h> 14 #include <linux/ptrace.h> 15 #include <linux/mman.h> 16 #include <linux/mm.h> 17 #include <linux/highmem.h> 18 #include <linux/memblock.h> 19 #include <linux/swap.h> 20 #include <linux/proc_fs.h> 21 #include <linux/pfn.h> 22 #include <linux/initrd.h> 23 24 #include <asm/setup.h> 25 #include <asm/cachectl.h> 26 #include <asm/dma.h> 27 #include <asm/pgalloc.h> 28 #include <asm/mmu_context.h> 29 #include <asm/sections.h> 30 #include <asm/tlb.h> 31 #include <asm/cacheflush.h> 32 33 #define PTRS_KERN_TABLE \ 34 ((PTRS_PER_PGD - USER_PTRS_PER_PGD) * PTRS_PER_PTE) 35 36 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 37 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; 38 pte_t kernel_pte_tables[PTRS_KERN_TABLE] __page_aligned_bss; 39 40 EXPORT_SYMBOL(invalid_pte_table); 41 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] 42 __page_aligned_bss; 43 EXPORT_SYMBOL(empty_zero_page); 44 45 void free_initmem(void) 46 { 47 free_initmem_default(-1); 48 } 49 50 void pgd_init(unsigned long *p) 51 { 52 int i; 53 54 for (i = 0; i < PTRS_PER_PGD; i++) 55 p[i] = __pa(invalid_pte_table); 56 57 flush_tlb_all(); 58 local_icache_inv_all(NULL); 59 } 60 61 void __init mmu_init(unsigned long min_pfn, unsigned long max_pfn) 62 { 63 int i; 64 65 for (i = 0; i < USER_PTRS_PER_PGD; i++) 66 swapper_pg_dir[i].pgd = __pa(invalid_pte_table); 67 68 for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) 69 swapper_pg_dir[i].pgd = 70 __pa(kernel_pte_tables + (PTRS_PER_PTE * (i - USER_PTRS_PER_PGD))); 71 72 for (i = 0; i < PTRS_KERN_TABLE; i++) 73 set_pte(&kernel_pte_tables[i], __pte(_PAGE_GLOBAL)); 74 75 for (i = min_pfn; i < max_pfn; i++) 76 set_pte(&kernel_pte_tables[i - PFN_DOWN(va_pa_offset)], pfn_pte(i, PAGE_KERNEL)); 77 78 flush_tlb_all(); 79 local_icache_inv_all(NULL); 80 81 /* Setup page mask to 4k */ 82 write_mmu_pagemask(0); 83 84 setup_pgd(swapper_pg_dir, 0); 85 } 86 87 void __init fixrange_init(unsigned long start, unsigned long end, 88 pgd_t *pgd_base) 89 { 90 pgd_t *pgd; 91 pud_t *pud; 92 pmd_t *pmd; 93 pte_t *pte; 94 int i, j, k; 95 unsigned long vaddr; 96 97 vaddr = start; 98 i = pgd_index(vaddr); 99 j = pud_index(vaddr); 100 k = pmd_index(vaddr); 101 pgd = pgd_base + i; 102 103 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 104 pud = (pud_t *)pgd; 105 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { 106 pmd = (pmd_t *)pud; 107 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 108 if (pmd_none(*pmd)) { 109 pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); 110 if (!pte) 111 panic("%s: Failed to allocate %lu bytes align=%lx\n", 112 __func__, PAGE_SIZE, 113 PAGE_SIZE); 114 115 set_pmd(pmd, __pmd(__pa(pte))); 116 BUG_ON(pte != pte_offset_kernel(pmd, 0)); 117 } 118 vaddr += PMD_SIZE; 119 } 120 k = 0; 121 } 122 j = 0; 123 } 124 } 125 126 void __init fixaddr_init(void) 127 { 128 unsigned long vaddr; 129 130 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 131 fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir); 132 } 133 134 static const pgprot_t protection_map[16] = { 135 [VM_NONE] = PAGE_NONE, 136 [VM_READ] = PAGE_READ, 137 [VM_WRITE] = PAGE_READ, 138 [VM_WRITE | VM_READ] = PAGE_READ, 139 [VM_EXEC] = PAGE_READ, 140 [VM_EXEC | VM_READ] = PAGE_READ, 141 [VM_EXEC | VM_WRITE] = PAGE_READ, 142 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READ, 143 [VM_SHARED] = PAGE_NONE, 144 [VM_SHARED | VM_READ] = PAGE_READ, 145 [VM_SHARED | VM_WRITE] = PAGE_WRITE, 146 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_WRITE, 147 [VM_SHARED | VM_EXEC] = PAGE_READ, 148 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ, 149 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_WRITE, 150 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_WRITE 151 }; 152 DECLARE_VM_GET_PAGE_PROT 153