1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * OpenRISC idle.c
4 *
5 * Linux architectural port borrowing liberally from similar works of
6 * others. All original copyrights apply as per the original source
7 * declaration.
8 *
9 * Modifications for the OpenRISC architecture:
10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
12 */
13
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/ptrace.h>
21 #include <linux/mman.h>
22 #include <linux/mm.h>
23 #include <linux/swap.h>
24 #include <linux/smp.h>
25 #include <linux/memblock.h>
26 #include <linux/init.h>
27 #include <linux/delay.h>
28 #include <linux/pagemap.h>
29
30 #include <asm/pgalloc.h>
31 #include <asm/dma.h>
32 #include <asm/io.h>
33 #include <asm/tlb.h>
34 #include <asm/mmu_context.h>
35 #include <asm/fixmap.h>
36 #include <asm/tlbflush.h>
37 #include <asm/sections.h>
38 #include <asm/cacheflush.h>
39
40 int mem_init_done;
41
zone_sizes_init(void)42 static void __init zone_sizes_init(void)
43 {
44 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
45
46 /*
47 * We use only ZONE_NORMAL
48 */
49 max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
50
51 free_area_init(max_zone_pfn);
52 }
53
54 extern const char _s_kernel_ro[], _e_kernel_ro[];
55
56 /*
57 * Map all physical memory into kernel's address space.
58 *
59 * This is explicitly coded for two-level page tables, so if you need
60 * something else then this needs to change.
61 */
map_ram(void)62 static void __init map_ram(void)
63 {
64 phys_addr_t start, end;
65 unsigned long v, p, e;
66 pgprot_t prot;
67 pgd_t *pge;
68 p4d_t *p4e;
69 pud_t *pue;
70 pmd_t *pme;
71 pte_t *pte;
72 u64 i;
73 /* These mark extents of read-only kernel pages...
74 * ...from vmlinux.lds.S
75 */
76
77 v = PAGE_OFFSET;
78
79 for_each_mem_range(i, &start, &end) {
80 p = (u32) start & PAGE_MASK;
81 e = (u32) end;
82
83 v = (u32) __va(p);
84 pge = pgd_offset_k(v);
85
86 while (p < e) {
87 int j;
88 p4e = p4d_offset(pge, v);
89 pue = pud_offset(p4e, v);
90 pme = pmd_offset(pue, v);
91
92 if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
93 panic("%s: OR1K kernel hardcoded for "
94 "two-level page tables",
95 __func__);
96 }
97
98 /* Alloc one page for holding PTE's... */
99 pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
100 if (!pte)
101 panic("%s: Failed to allocate page for PTEs\n",
102 __func__);
103 set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
104
105 /* Fill the newly allocated page with PTE'S */
106 for (j = 0; p < e && j < PTRS_PER_PTE;
107 v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
108 if (v >= (u32) _e_kernel_ro ||
109 v < (u32) _s_kernel_ro)
110 prot = PAGE_KERNEL;
111 else
112 prot = PAGE_KERNEL_RO;
113
114 set_pte(pte, mk_pte_phys(p, prot));
115 }
116
117 pge++;
118 }
119
120 printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
121 start, end);
122 }
123 }
124
paging_init(void)125 void __init paging_init(void)
126 {
127 int i;
128
129 printk(KERN_INFO "Setting up paging and PTEs.\n");
130
131 /* clear out the init_mm.pgd that will contain the kernel's mappings */
132
133 for (i = 0; i < PTRS_PER_PGD; i++)
134 swapper_pg_dir[i] = __pgd(0);
135
136 /* make sure the current pgd table points to something sane
137 * (even if it is most probably not used until the next
138 * switch_mm)
139 */
140 current_pgd[smp_processor_id()] = init_mm.pgd;
141
142 map_ram();
143
144 zone_sizes_init();
145
146 /* self modifying code ;) */
147 /* Since the old TLB miss handler has been running up until now,
148 * the kernel pages are still all RW, so we can still modify the
149 * text directly... after this change and a TLB flush, the kernel
150 * pages will become RO.
151 */
152 {
153 extern unsigned long dtlb_miss_handler;
154 extern unsigned long itlb_miss_handler;
155
156 unsigned long *dtlb_vector = __va(0x900);
157 unsigned long *itlb_vector = __va(0xa00);
158
159 printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
160 *itlb_vector = ((unsigned long)&itlb_miss_handler -
161 (unsigned long)itlb_vector) >> 2;
162
163 /* Soft ordering constraint to ensure that dtlb_vector is
164 * the last thing updated
165 */
166 barrier();
167
168 printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
169 *dtlb_vector = ((unsigned long)&dtlb_miss_handler -
170 (unsigned long)dtlb_vector) >> 2;
171
172 }
173
174 /* Soft ordering constraint to ensure that cache invalidation and
175 * TLB flush really happen _after_ code has been modified.
176 */
177 barrier();
178
179 /* Invalidate instruction caches after code modification */
180 local_icache_block_inv(0x900);
181 local_icache_block_inv(0xa00);
182
183 /* New TLB miss handlers and kernel page tables are in now place.
184 * Make sure that page flags get updated for all pages in TLB by
185 * flushing the TLB and forcing all TLB entries to be recreated
186 * from their page table flags.
187 */
188 flush_tlb_all();
189 }
190
191 /* References to section boundaries */
192
mem_init(void)193 void __init mem_init(void)
194 {
195 BUG_ON(!mem_map);
196
197 /* clear the zero-page */
198 memset((void *)empty_zero_page, 0, PAGE_SIZE);
199
200 printk("mem_init_done ...........................................\n");
201 mem_init_done = 1;
202 return;
203 }
204
map_page(unsigned long va,phys_addr_t pa,pgprot_t prot)205 static int __init map_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
206 {
207 p4d_t *p4d;
208 pud_t *pud;
209 pmd_t *pmd;
210 pte_t *pte;
211
212 p4d = p4d_offset(pgd_offset_k(va), va);
213 pud = pud_offset(p4d, va);
214 pmd = pmd_offset(pud, va);
215 pte = pte_alloc_kernel(pmd, va);
216
217 if (pte == NULL)
218 return -ENOMEM;
219
220 if (pgprot_val(prot))
221 set_pte_at(&init_mm, va, pte, pfn_pte(pa >> PAGE_SHIFT, prot));
222 else
223 pte_clear(&init_mm, va, pte);
224
225 local_flush_tlb_page(NULL, va);
226 return 0;
227 }
228
__set_fixmap(enum fixed_addresses idx,phys_addr_t phys,pgprot_t prot)229 void __init __set_fixmap(enum fixed_addresses idx,
230 phys_addr_t phys, pgprot_t prot)
231 {
232 unsigned long address = __fix_to_virt(idx);
233
234 if (idx >= __end_of_fixed_addresses) {
235 BUG();
236 return;
237 }
238
239 map_page(address, phys, prot);
240 }
241
242 static const pgprot_t protection_map[16] = {
243 [VM_NONE] = PAGE_NONE,
244 [VM_READ] = PAGE_READONLY_X,
245 [VM_WRITE] = PAGE_COPY,
246 [VM_WRITE | VM_READ] = PAGE_COPY_X,
247 [VM_EXEC] = PAGE_READONLY,
248 [VM_EXEC | VM_READ] = PAGE_READONLY_X,
249 [VM_EXEC | VM_WRITE] = PAGE_COPY,
250 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
251 [VM_SHARED] = PAGE_NONE,
252 [VM_SHARED | VM_READ] = PAGE_READONLY_X,
253 [VM_SHARED | VM_WRITE] = PAGE_SHARED,
254 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X,
255 [VM_SHARED | VM_EXEC] = PAGE_READONLY,
256 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
257 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
258 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
259 };
260 DECLARE_VM_GET_PAGE_PROT
261