xref: /linux/arch/m68k/mm/motorola.c (revision e78f70bad29c5ae1e1076698b690b15794e9b81e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/arch/m68k/mm/motorola.c
4  *
5  * Routines specific to the Motorola MMU, originally from:
6  * linux/arch/m68k/init.c
7  * which are Copyright (C) 1995 Hamish Macdonald
8  *
9  * Moved 8/20/1999 Sam Creasey
10  */
11 
12 #include <linux/module.h>
13 #include <linux/signal.h>
14 #include <linux/sched.h>
15 #include <linux/mm.h>
16 #include <linux/swap.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/init.h>
21 #include <linux/memblock.h>
22 #include <linux/gfp.h>
23 
24 #include <asm/setup.h>
25 #include <linux/uaccess.h>
26 #include <asm/page.h>
27 #include <asm/pgalloc.h>
28 #include <asm/machdep.h>
29 #include <asm/io.h>
30 #ifdef CONFIG_ATARI
31 #include <asm/atari_stram.h>
32 #endif
33 #include <asm/sections.h>
34 
35 #undef DEBUG
36 
37 #ifndef mm_cachebits
38 /*
39  * Bits to add to page descriptors for "normal" caching mode.
40  * For 68020/030 this is 0.
41  * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
42  */
43 unsigned long mm_cachebits;
44 EXPORT_SYMBOL(mm_cachebits);
45 #endif
46 
47 /* Prior to calling these routines, the page should have been flushed
48  * from both the cache and ATC, or the CPU might not notice that the
49  * cache setting for the page has been changed. -jskov
50  */
51 static inline void nocache_page(void *vaddr)
52 {
53 	unsigned long addr = (unsigned long)vaddr;
54 
55 	if (CPU_IS_040_OR_060) {
56 		pte_t *ptep = virt_to_kpte(addr);
57 
58 		*ptep = pte_mknocache(*ptep);
59 	}
60 }
61 
62 static inline void cache_page(void *vaddr)
63 {
64 	unsigned long addr = (unsigned long)vaddr;
65 
66 	if (CPU_IS_040_OR_060) {
67 		pte_t *ptep = virt_to_kpte(addr);
68 
69 		*ptep = pte_mkcache(*ptep);
70 	}
71 }
72 
73 /*
74  * Motorola 680x0 user's manual recommends using uncached memory for address
75  * translation tables.
76  *
77  * Seeing how the MMU can be external on (some of) these chips, that seems like
78  * a very important recommendation to follow. Provide some helpers to combat
79  * 'variation' amongst the users of this.
80  */
81 
82 void mmu_page_ctor(void *page)
83 {
84 	__flush_pages_to_ram(page, 1);
85 	flush_tlb_kernel_page(page);
86 	nocache_page(page);
87 }
88 
89 void mmu_page_dtor(void *page)
90 {
91 	cache_page(page);
92 }
93 
94 /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
95    struct page instead of separately kmalloced struct.  Stolen from
96    arch/sparc/mm/srmmu.c ... */
97 
98 typedef struct list_head ptable_desc;
99 
100 static struct list_head ptable_list[3] = {
101 	LIST_HEAD_INIT(ptable_list[0]),
102 	LIST_HEAD_INIT(ptable_list[1]),
103 	LIST_HEAD_INIT(ptable_list[2]),
104 };
105 
106 #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page((void *)(page))->lru))
107 #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
108 #define PD_PTDESC(ptable) (list_entry(ptable, struct ptdesc, pt_list))
109 #define PD_MARKBITS(dp) (*(unsigned int *)&PD_PTDESC(dp)->pt_index)
110 
111 static const int ptable_shift[3] = {
112 	7+2, /* PGD */
113 	7+2, /* PMD */
114 	6+2, /* PTE */
115 };
116 
117 #define ptable_size(type) (1U << ptable_shift[type])
118 #define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1)
119 
120 void __init init_pointer_table(void *table, int type)
121 {
122 	ptable_desc *dp;
123 	unsigned long ptable = (unsigned long)table;
124 	unsigned long page = ptable & PAGE_MASK;
125 	unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
126 
127 	dp = PD_PTABLE(page);
128 	if (!(PD_MARKBITS(dp) & mask)) {
129 		PD_MARKBITS(dp) = ptable_mask(type);
130 		list_add(dp, &ptable_list[type]);
131 	}
132 
133 	PD_MARKBITS(dp) &= ~mask;
134 	pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
135 
136 	/* unreserve the page so it's possible to free that page */
137 	__ClearPageReserved(PD_PAGE(dp));
138 	init_page_count(PD_PAGE(dp));
139 
140 	return;
141 }
142 
143 void *get_pointer_table(struct mm_struct *mm, int type)
144 {
145 	ptable_desc *dp = ptable_list[type].next;
146 	unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp);
147 	unsigned int tmp, off;
148 
149 	/*
150 	 * For a pointer table for a user process address space, a
151 	 * table is taken from a page allocated for the purpose.  Each
152 	 * page can hold 8 pointer tables.  The page is remapped in
153 	 * virtual address space to be noncacheable.
154 	 */
155 	if (mask == 0) {
156 		void *page;
157 		ptable_desc *new;
158 
159 		if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
160 			return NULL;
161 
162 		switch (type) {
163 		case TABLE_PTE:
164 			/*
165 			 * m68k doesn't have SPLIT_PTE_PTLOCKS for not having
166 			 * SMP.
167 			 */
168 			pagetable_pte_ctor(mm, virt_to_ptdesc(page));
169 			break;
170 		case TABLE_PMD:
171 			pagetable_pmd_ctor(mm, virt_to_ptdesc(page));
172 			break;
173 		case TABLE_PGD:
174 			pagetable_pgd_ctor(virt_to_ptdesc(page));
175 			break;
176 		}
177 
178 		mmu_page_ctor(page);
179 
180 		new = PD_PTABLE(page);
181 		PD_MARKBITS(new) = ptable_mask(type) - 1;
182 		list_add_tail(new, dp);
183 
184 		return (pmd_t *)page;
185 	}
186 
187 	for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type))
188 		;
189 	PD_MARKBITS(dp) = mask & ~tmp;
190 	if (!PD_MARKBITS(dp)) {
191 		/* move to end of list */
192 		list_move_tail(dp, &ptable_list[type]);
193 	}
194 	return page_address(PD_PAGE(dp)) + off;
195 }
196 
197 int free_pointer_table(void *table, int type)
198 {
199 	ptable_desc *dp;
200 	unsigned long ptable = (unsigned long)table;
201 	unsigned long page = ptable & PAGE_MASK;
202 	unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
203 
204 	dp = PD_PTABLE(page);
205 	if (PD_MARKBITS (dp) & mask)
206 		panic ("table already free!");
207 
208 	PD_MARKBITS (dp) |= mask;
209 
210 	if (PD_MARKBITS(dp) == ptable_mask(type)) {
211 		/* all tables in page are free, free page */
212 		list_del(dp);
213 		mmu_page_dtor((void *)page);
214 		pagetable_dtor(virt_to_ptdesc((void *)page));
215 		free_page (page);
216 		return 1;
217 	} else if (ptable_list[type].next != dp) {
218 		/*
219 		 * move this descriptor to the front of the list, since
220 		 * it has one or more free tables.
221 		 */
222 		list_move(dp, &ptable_list[type]);
223 	}
224 	return 0;
225 }
226 
227 /* size of memory already mapped in head.S */
228 extern __initdata unsigned long m68k_init_mapped_size;
229 
230 extern unsigned long availmem;
231 
232 static pte_t *last_pte_table __initdata = NULL;
233 
234 static pte_t * __init kernel_page_table(void)
235 {
236 	pte_t *pte_table = last_pte_table;
237 
238 	if (PAGE_ALIGNED(last_pte_table)) {
239 		pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
240 		if (!pte_table) {
241 			panic("%s: Failed to allocate %lu bytes align=%lx\n",
242 					__func__, PAGE_SIZE, PAGE_SIZE);
243 		}
244 
245 		clear_page(pte_table);
246 		mmu_page_ctor(pte_table);
247 
248 		last_pte_table = pte_table;
249 	}
250 
251 	last_pte_table += PTRS_PER_PTE;
252 
253 	return pte_table;
254 }
255 
256 static pmd_t *last_pmd_table __initdata = NULL;
257 
258 static pmd_t * __init kernel_ptr_table(void)
259 {
260 	if (!last_pmd_table) {
261 		unsigned long pmd, last;
262 		int i;
263 
264 		/* Find the last ptr table that was used in head.S and
265 		 * reuse the remaining space in that page for further
266 		 * ptr tables.
267 		 */
268 		last = (unsigned long)kernel_pg_dir;
269 		for (i = 0; i < PTRS_PER_PGD; i++) {
270 			pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
271 
272 			if (!pud_present(*pud))
273 				continue;
274 			pmd = pgd_page_vaddr(kernel_pg_dir[i]);
275 			if (pmd > last)
276 				last = pmd;
277 		}
278 
279 		last_pmd_table = (pmd_t *)last;
280 #ifdef DEBUG
281 		printk("kernel_ptr_init: %p\n", last_pmd_table);
282 #endif
283 	}
284 
285 	last_pmd_table += PTRS_PER_PMD;
286 	if (PAGE_ALIGNED(last_pmd_table)) {
287 		last_pmd_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
288 		if (!last_pmd_table)
289 			panic("%s: Failed to allocate %lu bytes align=%lx\n",
290 			      __func__, PAGE_SIZE, PAGE_SIZE);
291 
292 		clear_page(last_pmd_table);
293 		mmu_page_ctor(last_pmd_table);
294 	}
295 
296 	return last_pmd_table;
297 }
298 
299 static void __init map_node(int node)
300 {
301 	unsigned long physaddr, virtaddr, size;
302 	pgd_t *pgd_dir;
303 	p4d_t *p4d_dir;
304 	pud_t *pud_dir;
305 	pmd_t *pmd_dir;
306 	pte_t *pte_dir;
307 
308 	size = m68k_memory[node].size;
309 	physaddr = m68k_memory[node].addr;
310 	virtaddr = (unsigned long)phys_to_virt(physaddr);
311 	physaddr |= m68k_supervisor_cachemode |
312 		    _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
313 	if (CPU_IS_040_OR_060)
314 		physaddr |= _PAGE_GLOBAL040;
315 
316 	while (size > 0) {
317 #ifdef DEBUG
318 		if (!(virtaddr & (PMD_SIZE-1)))
319 			printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
320 				virtaddr);
321 #endif
322 		pgd_dir = pgd_offset_k(virtaddr);
323 		if (virtaddr && CPU_IS_020_OR_030) {
324 			if (!(virtaddr & (PGDIR_SIZE-1)) &&
325 			    size >= PGDIR_SIZE) {
326 #ifdef DEBUG
327 				printk ("[very early term]");
328 #endif
329 				pgd_val(*pgd_dir) = physaddr;
330 				size -= PGDIR_SIZE;
331 				virtaddr += PGDIR_SIZE;
332 				physaddr += PGDIR_SIZE;
333 				continue;
334 			}
335 		}
336 		p4d_dir = p4d_offset(pgd_dir, virtaddr);
337 		pud_dir = pud_offset(p4d_dir, virtaddr);
338 		if (!pud_present(*pud_dir)) {
339 			pmd_dir = kernel_ptr_table();
340 #ifdef DEBUG
341 			printk ("[new pointer %p]", pmd_dir);
342 #endif
343 			pud_set(pud_dir, pmd_dir);
344 		} else
345 			pmd_dir = pmd_offset(pud_dir, virtaddr);
346 
347 		if (CPU_IS_020_OR_030) {
348 			if (virtaddr) {
349 #ifdef DEBUG
350 				printk ("[early term]");
351 #endif
352 				pmd_val(*pmd_dir) = physaddr;
353 				physaddr += PMD_SIZE;
354 			} else {
355 				int i;
356 #ifdef DEBUG
357 				printk ("[zero map]");
358 #endif
359 				pte_dir = kernel_page_table();
360 				pmd_set(pmd_dir, pte_dir);
361 
362 				pte_val(*pte_dir++) = 0;
363 				physaddr += PAGE_SIZE;
364 				for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++)
365 					pte_val(*pte_dir++) = physaddr;
366 			}
367 			size -= PMD_SIZE;
368 			virtaddr += PMD_SIZE;
369 		} else {
370 			if (!pmd_present(*pmd_dir)) {
371 #ifdef DEBUG
372 				printk ("[new table]");
373 #endif
374 				pte_dir = kernel_page_table();
375 				pmd_set(pmd_dir, pte_dir);
376 			}
377 			pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
378 
379 			if (virtaddr) {
380 				if (!pte_present(*pte_dir))
381 					pte_val(*pte_dir) = physaddr;
382 			} else
383 				pte_val(*pte_dir) = 0;
384 			size -= PAGE_SIZE;
385 			virtaddr += PAGE_SIZE;
386 			physaddr += PAGE_SIZE;
387 		}
388 
389 	}
390 #ifdef DEBUG
391 	printk("\n");
392 #endif
393 }
394 
395 /*
396  * Alternate definitions that are compile time constants, for
397  * initializing protection_map.  The cachebits are fixed later.
398  */
399 #define PAGE_NONE_C	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
400 #define PAGE_SHARED_C	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
401 #define PAGE_COPY_C	__pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
402 #define PAGE_READONLY_C	__pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
403 
404 static pgprot_t protection_map[16] __ro_after_init = {
405 	[VM_NONE]					= PAGE_NONE_C,
406 	[VM_READ]					= PAGE_READONLY_C,
407 	[VM_WRITE]					= PAGE_COPY_C,
408 	[VM_WRITE | VM_READ]				= PAGE_COPY_C,
409 	[VM_EXEC]					= PAGE_READONLY_C,
410 	[VM_EXEC | VM_READ]				= PAGE_READONLY_C,
411 	[VM_EXEC | VM_WRITE]				= PAGE_COPY_C,
412 	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY_C,
413 	[VM_SHARED]					= PAGE_NONE_C,
414 	[VM_SHARED | VM_READ]				= PAGE_READONLY_C,
415 	[VM_SHARED | VM_WRITE]				= PAGE_SHARED_C,
416 	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED_C,
417 	[VM_SHARED | VM_EXEC]				= PAGE_READONLY_C,
418 	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY_C,
419 	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED_C,
420 	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED_C
421 };
422 DECLARE_VM_GET_PAGE_PROT
423 
424 /*
425  * paging_init() continues the virtual memory environment setup which
426  * was begun by the code in arch/head.S.
427  */
428 void __init paging_init(void)
429 {
430 	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
431 	unsigned long min_addr, max_addr;
432 	unsigned long addr;
433 	int i;
434 
435 #ifdef DEBUG
436 	printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
437 #endif
438 
439 	/* Fix the cache mode in the page descriptors for the 680[46]0.  */
440 	if (CPU_IS_040_OR_060) {
441 		int i;
442 #ifndef mm_cachebits
443 		mm_cachebits = _PAGE_CACHE040;
444 #endif
445 		for (i = 0; i < 16; i++)
446 			pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
447 	}
448 
449 	min_addr = m68k_memory[0].addr;
450 	max_addr = min_addr + m68k_memory[0].size - 1;
451 	memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0,
452 			  MEMBLOCK_NONE);
453 	for (i = 1; i < m68k_num_memory;) {
454 		if (m68k_memory[i].addr < min_addr) {
455 			printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
456 				m68k_memory[i].addr, m68k_memory[i].size);
457 			printk("Fix your bootloader or use a memfile to make use of this area!\n");
458 			m68k_num_memory--;
459 			memmove(m68k_memory + i, m68k_memory + i + 1,
460 				(m68k_num_memory - i) * sizeof(struct m68k_mem_info));
461 			continue;
462 		}
463 		memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i,
464 				  MEMBLOCK_NONE);
465 		addr = m68k_memory[i].addr + m68k_memory[i].size - 1;
466 		if (addr > max_addr)
467 			max_addr = addr;
468 		i++;
469 	}
470 	m68k_memoffset = min_addr - PAGE_OFFSET;
471 	m68k_virt_to_node_shift = fls(max_addr - min_addr) - 6;
472 
473 	module_fixup(NULL, __start_fixup, __stop_fixup);
474 	flush_icache();
475 
476 	high_memory = phys_to_virt(max_addr) + 1;
477 
478 	min_low_pfn = availmem >> PAGE_SHIFT;
479 	max_pfn = max_low_pfn = (max_addr >> PAGE_SHIFT) + 1;
480 
481 	/* Reserve kernel text/data/bss and the memory allocated in head.S */
482 	memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
483 
484 	/*
485 	 * Map the physical memory available into the kernel virtual
486 	 * address space. Make sure memblock will not try to allocate
487 	 * pages beyond the memory we already mapped in head.S
488 	 */
489 	memblock_set_bottom_up(true);
490 
491 	for (i = 0; i < m68k_num_memory; i++) {
492 		m68k_setup_node(i);
493 		map_node(i);
494 	}
495 
496 	flush_tlb_all();
497 
498 	early_memtest(min_addr, max_addr);
499 
500 	/*
501 	 * initialize the bad page table and bad page to point
502 	 * to a couple of allocated pages
503 	 */
504 	empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
505 
506 	/*
507 	 * Set up SFC/DFC registers
508 	 */
509 	set_fc(USER_DATA);
510 
511 #ifdef DEBUG
512 	printk ("before free_area_init\n");
513 #endif
514 	for (i = 0; i < m68k_num_memory; i++)
515 		if (node_present_pages(i))
516 			node_set_state(i, N_NORMAL_MEMORY);
517 
518 	max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM();
519 	free_area_init(max_zone_pfn);
520 }
521