1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
10 */
11 #include <linux/bug.h>
12 #include <linux/init.h>
13 #include <linux/export.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/smp.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/pagemap.h>
22 #include <linux/ptrace.h>
23 #include <linux/mman.h>
24 #include <linux/mm.h>
25 #include <linux/memblock.h>
26 #include <linux/highmem.h>
27 #include <linux/swap.h>
28 #include <linux/proc_fs.h>
29 #include <linux/pfn.h>
30 #include <linux/hardirq.h>
31 #include <linux/gfp.h>
32 #include <linux/kcore.h>
33 #include <linux/initrd.h>
34 #include <linux/execmem.h>
35
36 #include <asm/bootinfo.h>
37 #include <asm/cachectl.h>
38 #include <asm/cpu.h>
39 #include <asm/dma.h>
40 #include <asm/maar.h>
41 #include <asm/mmu_context.h>
42 #include <asm/mmzone.h>
43 #include <asm/sections.h>
44 #include <asm/pgalloc.h>
45 #include <asm/tlb.h>
46 #include <asm/fixmap.h>
47
48 /*
49 * We have up to 8 empty zeroed pages so we can map one of the right colour
50 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
51 * where we have to avoid VCED / VECI exceptions for good performance at
52 * any price. Since page is never written to after the initialization we
53 * don't have to care about aliases on other CPUs.
54 */
55 unsigned long empty_zero_page, zero_page_mask;
56 EXPORT_SYMBOL_GPL(empty_zero_page);
57 EXPORT_SYMBOL(zero_page_mask);
58
arch_setup_zero_pages(void)59 void __init arch_setup_zero_pages(void)
60 {
61 unsigned int order;
62
63 if (cpu_has_vce)
64 order = 3;
65 else
66 order = 0;
67
68 empty_zero_page = (unsigned long)memblock_alloc_or_panic(PAGE_SIZE << order, PAGE_SIZE);
69
70 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
71 }
72
__kmap_pgprot(struct page * page,unsigned long addr,pgprot_t prot)73 static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
74 {
75 enum fixed_addresses idx;
76 unsigned int old_mmid;
77 unsigned long vaddr, flags, entrylo;
78 unsigned long old_ctx;
79 pte_t pte;
80 int tlbidx;
81
82 BUG_ON(folio_test_dcache_dirty(page_folio(page)));
83
84 preempt_disable();
85 pagefault_disable();
86 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
87 idx += in_interrupt() ? FIX_N_COLOURS : 0;
88 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
89 pte = mk_pte(page, prot);
90 #if defined(CONFIG_XPA)
91 entrylo = pte_to_entrylo(pte.pte_high);
92 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
93 entrylo = pte.pte_high;
94 #else
95 entrylo = pte_to_entrylo(pte_val(pte));
96 #endif
97
98 local_irq_save(flags);
99 old_ctx = read_c0_entryhi();
100 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
101 write_c0_entrylo0(entrylo);
102 write_c0_entrylo1(entrylo);
103 if (cpu_has_mmid) {
104 old_mmid = read_c0_memorymapid();
105 write_c0_memorymapid(MMID_KERNEL_WIRED);
106 }
107 #ifdef CONFIG_XPA
108 if (cpu_has_xpa) {
109 entrylo = (pte.pte_low & _PFNX_MASK);
110 writex_c0_entrylo0(entrylo);
111 writex_c0_entrylo1(entrylo);
112 }
113 #endif
114 tlbidx = num_wired_entries();
115 write_c0_wired(tlbidx + 1);
116 write_c0_index(tlbidx);
117 mtc0_tlbw_hazard();
118 tlb_write_indexed();
119 tlbw_use_hazard();
120 write_c0_entryhi(old_ctx);
121 if (cpu_has_mmid)
122 write_c0_memorymapid(old_mmid);
123 local_irq_restore(flags);
124
125 return (void*) vaddr;
126 }
127
kmap_coherent(struct page * page,unsigned long addr)128 void *kmap_coherent(struct page *page, unsigned long addr)
129 {
130 return __kmap_pgprot(page, addr, PAGE_KERNEL);
131 }
132
kmap_noncoherent(struct page * page,unsigned long addr)133 void *kmap_noncoherent(struct page *page, unsigned long addr)
134 {
135 return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
136 }
137
kunmap_coherent(void)138 void kunmap_coherent(void)
139 {
140 unsigned int wired;
141 unsigned long flags, old_ctx;
142
143 local_irq_save(flags);
144 old_ctx = read_c0_entryhi();
145 wired = num_wired_entries() - 1;
146 write_c0_wired(wired);
147 write_c0_index(wired);
148 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
149 write_c0_entrylo0(0);
150 write_c0_entrylo1(0);
151 mtc0_tlbw_hazard();
152 tlb_write_indexed();
153 tlbw_use_hazard();
154 write_c0_entryhi(old_ctx);
155 local_irq_restore(flags);
156 pagefault_enable();
157 preempt_enable();
158 }
159
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)160 void copy_user_highpage(struct page *to, struct page *from,
161 unsigned long vaddr, struct vm_area_struct *vma)
162 {
163 struct folio *src = page_folio(from);
164 void *vfrom, *vto;
165
166 vto = kmap_atomic(to);
167 if (cpu_has_dc_aliases &&
168 folio_mapped(src) && !folio_test_dcache_dirty(src)) {
169 vfrom = kmap_coherent(from, vaddr);
170 copy_page(vto, vfrom);
171 kunmap_coherent();
172 } else {
173 vfrom = kmap_atomic(from);
174 copy_page(vto, vfrom);
175 kunmap_atomic(vfrom);
176 }
177 if ((!cpu_has_ic_fills_f_dc) ||
178 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
179 flush_data_cache_page((unsigned long)vto);
180 kunmap_atomic(vto);
181 /* Make sure this page is cleared on other CPU's too before using it */
182 smp_wmb();
183 }
184
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,const void * src,unsigned long len)185 void copy_to_user_page(struct vm_area_struct *vma,
186 struct page *page, unsigned long vaddr, void *dst, const void *src,
187 unsigned long len)
188 {
189 struct folio *folio = page_folio(page);
190
191 if (cpu_has_dc_aliases &&
192 folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
193 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
194 memcpy(vto, src, len);
195 kunmap_coherent();
196 } else {
197 memcpy(dst, src, len);
198 if (cpu_has_dc_aliases)
199 folio_set_dcache_dirty(folio);
200 }
201 if (vma->vm_flags & VM_EXEC)
202 flush_cache_page(vma, vaddr, page_to_pfn(page));
203 }
204
copy_from_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,const void * src,unsigned long len)205 void copy_from_user_page(struct vm_area_struct *vma,
206 struct page *page, unsigned long vaddr, void *dst, const void *src,
207 unsigned long len)
208 {
209 struct folio *folio = page_folio(page);
210
211 if (cpu_has_dc_aliases &&
212 folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
213 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
214 memcpy(dst, vfrom, len);
215 kunmap_coherent();
216 } else {
217 memcpy(dst, src, len);
218 if (cpu_has_dc_aliases)
219 folio_set_dcache_dirty(folio);
220 }
221 }
222 EXPORT_SYMBOL_GPL(copy_from_user_page);
223
fixrange_init(unsigned long start,unsigned long end,pgd_t * pgd_base)224 void __init fixrange_init(unsigned long start, unsigned long end,
225 pgd_t *pgd_base)
226 {
227 #ifdef CONFIG_HIGHMEM
228 pgd_t *pgd;
229 pud_t *pud;
230 pmd_t *pmd;
231 pte_t *pte;
232 int i, j, k;
233 unsigned long vaddr;
234
235 vaddr = start;
236 i = pgd_index(vaddr);
237 j = pud_index(vaddr);
238 k = pmd_index(vaddr);
239 pgd = pgd_base + i;
240
241 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
242 pud = (pud_t *)pgd;
243 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
244 pmd = (pmd_t *)pud;
245 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
246 if (pmd_none(*pmd)) {
247 pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
248 PAGE_SIZE);
249 if (!pte)
250 panic("%s: Failed to allocate %lu bytes align=%lx\n",
251 __func__, PAGE_SIZE,
252 PAGE_SIZE);
253
254 set_pmd(pmd, __pmd((unsigned long)pte));
255 BUG_ON(pte != pte_offset_kernel(pmd, 0));
256 }
257 vaddr += PMD_SIZE;
258 }
259 k = 0;
260 }
261 j = 0;
262 }
263 #endif
264 }
265
266 struct maar_walk_info {
267 struct maar_config cfg[16];
268 unsigned int num_cfg;
269 };
270
maar_res_walk(unsigned long start_pfn,unsigned long nr_pages,void * data)271 static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages,
272 void *data)
273 {
274 struct maar_walk_info *wi = data;
275 struct maar_config *cfg = &wi->cfg[wi->num_cfg];
276 unsigned int maar_align;
277
278 /* MAAR registers hold physical addresses right shifted by 4 bits */
279 maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4);
280
281 /* Fill in the MAAR config entry */
282 cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align);
283 cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1;
284 cfg->attrs = MIPS_MAAR_S;
285
286 /* Ensure we don't overflow the cfg array */
287 if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg)))
288 wi->num_cfg++;
289
290 return 0;
291 }
292
293
platform_maar_init(unsigned num_pairs)294 unsigned __weak platform_maar_init(unsigned num_pairs)
295 {
296 unsigned int num_configured;
297 struct maar_walk_info wi;
298
299 wi.num_cfg = 0;
300 walk_system_ram_range(0, max_pfn, &wi, maar_res_walk);
301
302 num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs);
303 if (num_configured < wi.num_cfg)
304 pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n",
305 num_pairs, wi.num_cfg);
306
307 return num_configured;
308 }
309
maar_init(void)310 void maar_init(void)
311 {
312 unsigned num_maars, used, i;
313 phys_addr_t lower, upper, attr;
314 static struct {
315 struct maar_config cfgs[3];
316 unsigned used;
317 } recorded = { { { 0 } }, 0 };
318
319 if (!cpu_has_maar)
320 return;
321
322 /* Detect the number of MAARs */
323 write_c0_maari(~0);
324 back_to_back_c0_hazard();
325 num_maars = read_c0_maari() + 1;
326
327 /* MAARs should be in pairs */
328 WARN_ON(num_maars % 2);
329
330 /* Set MAARs using values we recorded already */
331 if (recorded.used) {
332 used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
333 BUG_ON(used != recorded.used);
334 } else {
335 /* Configure the required MAARs */
336 used = platform_maar_init(num_maars / 2);
337 }
338
339 /* Disable any further MAARs */
340 for (i = (used * 2); i < num_maars; i++) {
341 write_c0_maari(i);
342 back_to_back_c0_hazard();
343 write_c0_maar(0);
344 back_to_back_c0_hazard();
345 }
346
347 if (recorded.used)
348 return;
349
350 pr_info("MAAR configuration:\n");
351 for (i = 0; i < num_maars; i += 2) {
352 write_c0_maari(i);
353 back_to_back_c0_hazard();
354 upper = read_c0_maar();
355 #ifdef CONFIG_XPA
356 upper |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
357 #endif
358
359 write_c0_maari(i + 1);
360 back_to_back_c0_hazard();
361 lower = read_c0_maar();
362 #ifdef CONFIG_XPA
363 lower |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
364 #endif
365
366 attr = lower & upper;
367 lower = (lower & MIPS_MAAR_ADDR) << 4;
368 upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
369
370 pr_info(" [%d]: ", i / 2);
371 if ((attr & MIPS_MAAR_V) != MIPS_MAAR_V) {
372 pr_cont("disabled\n");
373 continue;
374 }
375
376 pr_cont("%pa-%pa", &lower, &upper);
377
378 if (attr & MIPS_MAAR_S)
379 pr_cont(" speculate");
380
381 pr_cont("\n");
382
383 /* Record the setup for use on secondary CPUs */
384 if (used <= ARRAY_SIZE(recorded.cfgs)) {
385 recorded.cfgs[recorded.used].lower = lower;
386 recorded.cfgs[recorded.used].upper = upper;
387 recorded.cfgs[recorded.used].attrs = attr;
388 recorded.used++;
389 }
390 }
391 }
392
393 #ifndef CONFIG_NUMA
arch_zone_limits_init(unsigned long * max_zone_pfns)394 void __init arch_zone_limits_init(unsigned long *max_zone_pfns)
395 {
396 #ifdef CONFIG_ZONE_DMA
397 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
398 #endif
399 #ifdef CONFIG_ZONE_DMA32
400 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
401 #endif
402 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
403 #ifdef CONFIG_HIGHMEM
404 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
405
406 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
407 printk(KERN_WARNING "This processor doesn't support highmem."
408 " %ldk highmem ignored\n",
409 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
410 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
411 }
412 #endif
413 }
414
415 #ifdef CONFIG_64BIT
416 static struct kcore_list kcore_kseg0;
417 #endif
418
highmem_init(void)419 static inline void __init highmem_init(void)
420 {
421 #ifdef CONFIG_HIGHMEM
422 unsigned long tmp;
423
424 /*
425 * If CPU cannot support HIGHMEM discard the memory above highstart_pfn
426 */
427 if (cpu_has_dc_aliases) {
428 memblock_remove(PFN_PHYS(highstart_pfn), -1);
429 return;
430 }
431
432 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
433 struct page *page = pfn_to_page(tmp);
434
435 if (!memblock_is_memory(PFN_PHYS(tmp)))
436 SetPageReserved(page);
437 }
438 #endif
439 }
440
arch_mm_preinit(void)441 void __init arch_mm_preinit(void)
442 {
443 /*
444 * When PFN_PTE_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
445 * bits to hold a full 32b physical address on MIPS32 systems.
446 */
447 BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT));
448
449 maar_init();
450 highmem_init();
451
452 #ifdef CONFIG_64BIT
453 if ((unsigned long) &_text > (unsigned long) CKSEG0)
454 /* The -4 is a hack so that user tools don't have to handle
455 the overflow. */
456 kclist_add(&kcore_kseg0, (void *) CKSEG0,
457 0x80000000 - 4, KCORE_TEXT);
458 #endif
459 }
460 #endif /* !CONFIG_NUMA */
461
free_init_pages(const char * what,unsigned long begin,unsigned long end)462 void free_init_pages(const char *what, unsigned long begin, unsigned long end)
463 {
464 unsigned long pfn;
465
466 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
467 struct page *page = pfn_to_page(pfn);
468 void *addr = phys_to_virt(PFN_PHYS(pfn));
469
470 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
471 free_reserved_page(page);
472 }
473 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
474 }
475
476 void (*free_init_pages_eva)(void *begin, void *end) = NULL;
477
prom_free_prom_memory(void)478 void __weak __init prom_free_prom_memory(void)
479 {
480 /* nothing to do */
481 }
482
free_initmem(void)483 void __ref free_initmem(void)
484 {
485 prom_free_prom_memory();
486 /*
487 * Let the platform define a specific function to free the
488 * init section since EVA may have used any possible mapping
489 * between virtual and physical addresses.
490 */
491 if (free_init_pages_eva)
492 free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
493 else
494 free_initmem_default(POISON_FREE_INITMEM);
495 }
496
497 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
498 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
499 EXPORT_SYMBOL(__per_cpu_offset);
500
pcpu_cpu_distance(unsigned int from,unsigned int to)501 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
502 {
503 return node_distance(cpu_to_node(from), cpu_to_node(to));
504 }
505
pcpu_cpu_to_node(int cpu)506 static int __init pcpu_cpu_to_node(int cpu)
507 {
508 return cpu_to_node(cpu);
509 }
510
setup_per_cpu_areas(void)511 void __init setup_per_cpu_areas(void)
512 {
513 unsigned long delta;
514 unsigned int cpu;
515 int rc;
516
517 /*
518 * Always reserve area for module percpu variables. That's
519 * what the legacy allocator did.
520 */
521 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
522 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
523 pcpu_cpu_distance,
524 pcpu_cpu_to_node);
525 if (rc < 0)
526 panic("Failed to initialize percpu areas.");
527
528 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
529 for_each_possible_cpu(cpu)
530 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
531 }
532 #endif
533
534 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
535 unsigned long pgd_current[NR_CPUS];
536 #endif
537
538 /*
539 * Align swapper_pg_dir in to 64K, allows its address to be loaded
540 * with a single LUI instruction in the TLB handlers. If we used
541 * __aligned(64K), its size would get rounded up to the alignment
542 * size, and waste space. So we place it in its own section and align
543 * it in the linker script.
544 */
545 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
546 #ifndef __PAGETABLE_PUD_FOLDED
547 pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
548 #endif
549 #ifndef __PAGETABLE_PMD_FOLDED
550 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
551 EXPORT_SYMBOL_GPL(invalid_pmd_table);
552 #endif
553 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
554 EXPORT_SYMBOL(invalid_pte_table);
555
556 #ifdef CONFIG_EXECMEM
557 #ifdef MODULES_VADDR
558 static struct execmem_info execmem_info __ro_after_init;
559
execmem_arch_setup(void)560 struct execmem_info __init *execmem_arch_setup(void)
561 {
562 execmem_info = (struct execmem_info){
563 .ranges = {
564 [EXECMEM_DEFAULT] = {
565 .start = MODULES_VADDR,
566 .end = MODULES_END,
567 .pgprot = PAGE_KERNEL,
568 .alignment = 1,
569 },
570 },
571 };
572
573 return &execmem_info;
574 }
575 #endif
576 #endif /* CONFIG_EXECMEM */
577