Lines Matching +full:linear +full:- +full:mapping +full:- +full:mode
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2020 FORTH-ICS/CARV
20 #include <linux/dma-map-ops.h>
102 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld kB)\n", name, b, t, in print_mlk()
103 (((t) - (b)) >> LOG2_SZ_1K)); in print_mlk()
108 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld MB)\n", name, b, t, in print_mlm()
109 (((t) - (b)) >> LOG2_SZ_1M)); in print_mlm()
114 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld GB)\n", name, b, t, in print_mlg()
115 (((t) - (b)) >> LOG2_SZ_1G)); in print_mlg()
121 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld TB)\n", name, b, t, in print_mlt()
122 (((t) - (b)) >> LOG2_SZ_1T)); in print_mlt()
130 unsigned long diff = t - b; in print_ml()
184 * non-coherent platforms. in arch_mm_preinit()
234 * map the kernel in the linear mapping as read-only: we do not want in setup_bootmem()
238 vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK; in setup_bootmem()
242 memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); in setup_bootmem()
246 * at worst, we map the linear mapping with PMD mappings. in setup_bootmem()
256 * In 64-bit, any use of __va/__pa before this point is wrong as we in setup_bootmem()
260 kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base; in setup_bootmem()
263 * The size of the linear page mapping may restrict the amount of in setup_bootmem()
270 max_mapped_addr - phys_ram_base); in setup_bootmem()
271 pr_warn("Physical memory overflows the linear mapping size: region above %pa removed", in setup_bootmem()
278 * addresses greater than (void *)(-PAGE_SIZE) because: in setup_bootmem()
279 * - This memory would overlap with ERR_PTR in setup_bootmem()
280 * - This memory belongs to high memory, which is not supported in setup_bootmem()
282 * This is not applicable to 64-bit kernel, because virtual addresses in setup_bootmem()
283 * after (void *)(-PAGE_SIZE) are not linearly mapped: they are in setup_bootmem()
284 * occupied by kernel mapping. Also it is unrealistic for high memory in setup_bootmem()
285 * to exist on 64-bit platforms. in setup_bootmem()
288 max_mapped_addr = __va_to_pa_nodebug(-PAGE_SIZE); in setup_bootmem()
289 memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr); in setup_bootmem()
318 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); in setup_bootmem()
331 uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR; in relocate_kernel()
336 uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr; in relocate_kernel()
339 Elf_Addr addr = (rela->r_offset - va_kernel_link_pa_offset); in relocate_kernel()
340 Elf_Addr relocated_addr = rela->r_addend; in relocate_kernel()
342 if (rela->r_info != R_RISCV_RELATIVE) in relocate_kernel()
349 * mm->context.vdso in VDSO_OFFSET macro. in relocate_kernel()
511 BUG_ON((va - kernel_map.virt_addr) >> PUD_SHIFT); in alloc_pmd_early()
574 /* Only one PUD is available for early mapping */ in alloc_pud_early()
575 BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT); in alloc_pud_early()
612 /* Only one P4D is available for early mapping */ in alloc_p4d_early()
613 BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT); in alloc_p4d_early()
747 !(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE) in best_map_size()
751 !(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE) in best_map_size()
755 !(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE) in best_map_size()
770 size_t sz = (size_t)((uintptr_t)(&_end) - (uintptr_t)(&_sdata)); in __copy_data()
783 * In 64-bit kernel, the kernel mapping is outside the linear mapping so in pgprot_from_va()
784 * we must protect its linear mapping alias from being executed and in pgprot_from_va()
830 pr_info("Disabled 4-level and 5-level paging"); in print_no4lvl()
837 pr_info("Disabled 5-level paging"); in print_no5lvl()
844 mmap_rnd_bits_max = MMAP_VA_BITS - PAGE_SHIFT - 3; in set_mmap_rnd_bits_max()
848 * There is a simple way to determine if 4-level is supported by the
849 * underlying hardware: establish 1:1 mapping in 4-level page table mode
914 * setup_vm() is called from head.S with MMU-off.
918 * 1) It should use PC-relative addressing for accessing kernel symbols.
941 kernel_map.xiprom + (va - kernel_map.virt_addr), in create_kernel_page_table()
945 start_va = kernel_map.virt_addr + (uintptr_t)&_sdata - (uintptr_t)&_start; in create_kernel_page_table()
949 kernel_map.phys_addr + (va - start_va), in create_kernel_page_table()
960 kernel_map.phys_addr + (va - kernel_map.virt_addr), in create_kernel_page_table()
968 * Setup a 4MB mapping that encompasses the device tree: for 64-bit kernel,
969 * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
976 uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1); in create_fdt_early_page_table()
981 /* In 32-bit only, the fdt lies in its own PGD */ in create_fdt_early_page_table()
992 dtb_early_va = (void *)fix_fdt_va + (dtb_pa & (PMD_SIZE - 1)); in create_fdt_early_page_table()
995 * For 64-bit kernel, __va can't be used since it would return a linear in create_fdt_early_page_table()
996 * mapping address whereas dtb_early_va will be used before in create_fdt_early_page_table()
997 * setup_vm_final installs the linear mapping. For 32-bit kernel, as the in create_fdt_early_page_table()
998 * kernel is mapped in the linear mapping, that makes no difference. in create_fdt_early_page_table()
1027 * map the allocated physical pages since the linear mapping does not exist yet.
1089 u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start); in setup_vm()
1099 nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE; in setup_vm()
1109 kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); in setup_vm()
1116 kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start); in setup_vm()
1118 kernel_map.va_kernel_xip_text_pa_offset = kernel_map.virt_addr - kernel_map.xiprom; in setup_vm()
1119 kernel_map.va_kernel_xip_data_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr in setup_vm()
1120 + (uintptr_t)&_sdata - (uintptr_t)&_start; in setup_vm()
1123 kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr; in setup_vm()
1124 kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr; in setup_vm()
1133 * In 64-bit, we defer the setup of va_pa_offset to setup_bootmem, in setup_vm()
1136 * for the linear mapping. This is only possible because the kernel in setup_vm()
1137 * mapping lies outside the linear mapping. in setup_vm()
1138 * In 32-bit however, as the kernel resides in the linear mapping, in setup_vm()
1139 * setup_vm_final can not change the mapping established here, in setup_vm()
1145 0UL : PAGE_OFFSET - kernel_map.phys_addr; in setup_vm()
1158 BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K); in setup_vm()
1169 BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size); in setup_vm()
1220 /* Setup early mapping for FDT early scan */ in setup_vm()
1224 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap in setup_vm()
1265 best_map_size(pa, va, end - pa); in create_linear_mapping_range()
1280 phys_addr_t ktext_size = __init_data_begin - _start; in create_linear_mapping_page_table()
1282 phys_addr_t krodata_size = _data - __start_rodata; in create_linear_mapping_page_table()
1292 * before we setup the linear mapping so that we avoid using hugepages in create_linear_mapping_page_table()
1302 /* Map all memory banks in the linear mapping */ in create_linear_mapping_page_table()
1333 * In 32-bit, the device tree lies in a pgd entry, so it must be copied in setup_vm_final()
1345 /* Map the linear mapping */ in setup_vm_final()
1387 * reserve_crashkernel() - reserves memory for crash kernel
1417 /* Depend on that Linear Mapping is ready */ in paging_init()
1454 * can't use hugepage mappings for 2-level page table because in case of in vmemmap_populate()
1464 * Pre-allocates page-table pages for a specific area in the kernel
1465 * page-table. Only the level which needs to be synchronized between
1466 * all page-tables is allocated because the synchronization can be
1507 * process page-tables later. in preallocate_pgd_pages_range()
1509 panic("Failed to pre-allocate %s pages for %s area\n", lvl, area); in preallocate_pgd_pages_range()
1638 while (nr_pages--) in free_vmemmap_storage()
1794 mhp_range.end = __pa(PAGE_END - 1); in arch_get_mappable_range()
1802 create_linear_mapping_range(start, start + size, 0, ¶ms->pgprot); in arch_add_memory()