1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/x86_64/mm/init.c
4 *
5 * Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz>
7 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
8 */
9
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/initrd.h>
23 #include <linux/pagemap.h>
24 #include <linux/memblock.h>
25 #include <linux/proc_fs.h>
26 #include <linux/pci.h>
27 #include <linux/pfn.h>
28 #include <linux/poison.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/memory.h>
31 #include <linux/memory_hotplug.h>
32 #include <linux/memremap.h>
33 #include <linux/nmi.h>
34 #include <linux/gfp.h>
35 #include <linux/kcore.h>
36 #include <linux/bootmem_info.h>
37
38 #include <asm/processor.h>
39 #include <asm/bios_ebda.h>
40 #include <linux/uaccess.h>
41 #include <asm/pgalloc.h>
42 #include <asm/dma.h>
43 #include <asm/fixmap.h>
44 #include <asm/e820/api.h>
45 #include <asm/apic.h>
46 #include <asm/tlb.h>
47 #include <asm/mmu_context.h>
48 #include <asm/proto.h>
49 #include <asm/smp.h>
50 #include <asm/sections.h>
51 #include <asm/kdebug.h>
52 #include <asm/numa.h>
53 #include <asm/set_memory.h>
54 #include <asm/init.h>
55 #include <asm/uv/uv.h>
56 #include <asm/setup.h>
57 #include <asm/ftrace.h>
58
59 #include "mm_internal.h"
60
61 #include "ident_map.c"
62
63 #define DEFINE_POPULATE(fname, type1, type2, init) \
64 static inline void fname##_init(struct mm_struct *mm, \
65 type1##_t *arg1, type2##_t *arg2, bool init) \
66 { \
67 if (init) \
68 fname##_safe(mm, arg1, arg2); \
69 else \
70 fname(mm, arg1, arg2); \
71 }
72
DEFINE_POPULATE(p4d_populate,p4d,pud,init)73 DEFINE_POPULATE(p4d_populate, p4d, pud, init)
74 DEFINE_POPULATE(pgd_populate, pgd, p4d, init)
75 DEFINE_POPULATE(pud_populate, pud, pmd, init)
76 DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init)
77
78 #define DEFINE_ENTRY(type1, type2, init) \
79 static inline void set_##type1##_init(type1##_t *arg1, \
80 type2##_t arg2, bool init) \
81 { \
82 if (init) \
83 set_##type1##_safe(arg1, arg2); \
84 else \
85 set_##type1(arg1, arg2); \
86 }
87
88 DEFINE_ENTRY(p4d, p4d, init)
89 DEFINE_ENTRY(pud, pud, init)
90 DEFINE_ENTRY(pmd, pmd, init)
91 DEFINE_ENTRY(pte, pte, init)
92
93 static inline pgprot_t prot_sethuge(pgprot_t prot)
94 {
95 WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PAT);
96
97 return __pgprot(pgprot_val(prot) | _PAGE_PSE);
98 }
99
100 /*
101 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
102 * physical space so we can cache the place of the first one and move
103 * around without checking the pgd every time.
104 */
105
106 /* Bits supported by the hardware: */
107 pteval_t __supported_pte_mask __read_mostly = ~0;
108 /* Bits allowed in normal kernel mappings: */
109 pteval_t __default_kernel_pte_mask __read_mostly = ~0;
110 EXPORT_SYMBOL_GPL(__supported_pte_mask);
111 /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
112 EXPORT_SYMBOL(__default_kernel_pte_mask);
113
114 int force_personality32;
115
116 /*
117 * noexec32=on|off
118 * Control non executable heap for 32bit processes.
119 *
120 * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
121 * off PROT_READ implies PROT_EXEC
122 */
nonx32_setup(char * str)123 static int __init nonx32_setup(char *str)
124 {
125 if (!strcmp(str, "on"))
126 force_personality32 &= ~READ_IMPLIES_EXEC;
127 else if (!strcmp(str, "off"))
128 force_personality32 |= READ_IMPLIES_EXEC;
129 return 1;
130 }
131 __setup("noexec32=", nonx32_setup);
132
sync_global_pgds_l5(unsigned long start,unsigned long end)133 static void sync_global_pgds_l5(unsigned long start, unsigned long end)
134 {
135 unsigned long addr;
136
137 for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
138 const pgd_t *pgd_ref = pgd_offset_k(addr);
139 struct page *page;
140
141 /* Check for overflow */
142 if (addr < start)
143 break;
144
145 if (pgd_none(*pgd_ref))
146 continue;
147
148 spin_lock(&pgd_lock);
149 list_for_each_entry(page, &pgd_list, lru) {
150 pgd_t *pgd;
151 spinlock_t *pgt_lock;
152
153 pgd = (pgd_t *)page_address(page) + pgd_index(addr);
154 /* the pgt_lock only for Xen */
155 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
156 spin_lock(pgt_lock);
157
158 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
159 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
160
161 if (pgd_none(*pgd))
162 set_pgd(pgd, *pgd_ref);
163
164 spin_unlock(pgt_lock);
165 }
166 spin_unlock(&pgd_lock);
167 }
168 }
169
sync_global_pgds_l4(unsigned long start,unsigned long end)170 static void sync_global_pgds_l4(unsigned long start, unsigned long end)
171 {
172 unsigned long addr;
173
174 for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
175 pgd_t *pgd_ref = pgd_offset_k(addr);
176 const p4d_t *p4d_ref;
177 struct page *page;
178
179 /*
180 * With folded p4d, pgd_none() is always false, we need to
181 * handle synchronization on p4d level.
182 */
183 MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref));
184 p4d_ref = p4d_offset(pgd_ref, addr);
185
186 if (p4d_none(*p4d_ref))
187 continue;
188
189 spin_lock(&pgd_lock);
190 list_for_each_entry(page, &pgd_list, lru) {
191 pgd_t *pgd;
192 p4d_t *p4d;
193 spinlock_t *pgt_lock;
194
195 pgd = (pgd_t *)page_address(page) + pgd_index(addr);
196 p4d = p4d_offset(pgd, addr);
197 /* the pgt_lock only for Xen */
198 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
199 spin_lock(pgt_lock);
200
201 if (!p4d_none(*p4d_ref) && !p4d_none(*p4d))
202 BUG_ON(p4d_pgtable(*p4d)
203 != p4d_pgtable(*p4d_ref));
204
205 if (p4d_none(*p4d))
206 set_p4d(p4d, *p4d_ref);
207
208 spin_unlock(pgt_lock);
209 }
210 spin_unlock(&pgd_lock);
211 }
212 }
213
214 /*
215 * When memory was added make sure all the processes MM have
216 * suitable PGD entries in the local PGD level page.
217 */
sync_global_pgds(unsigned long start,unsigned long end)218 static void sync_global_pgds(unsigned long start, unsigned long end)
219 {
220 if (pgtable_l5_enabled())
221 sync_global_pgds_l5(start, end);
222 else
223 sync_global_pgds_l4(start, end);
224 }
225
226 /*
227 * Make kernel mappings visible in all page tables in the system.
228 * This is necessary except when the init task populates kernel mappings
229 * during the boot process. In that case, all processes originating from
230 * the init task copies the kernel mappings, so there is no issue.
231 * Otherwise, missing synchronization could lead to kernel crashes due
232 * to missing page table entries for certain kernel mappings.
233 *
234 * Synchronization is performed at the top level, which is the PGD in
235 * 5-level paging systems. But in 4-level paging systems, however,
236 * pgd_populate() is a no-op, so synchronization is done at the P4D level.
237 * sync_global_pgds() handles this difference between paging levels.
238 */
arch_sync_kernel_mappings(unsigned long start,unsigned long end)239 void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
240 {
241 sync_global_pgds(start, end);
242 }
243
244 /*
245 * NOTE: This function is marked __ref because it calls __init function
246 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
247 */
spp_getpage(void)248 static __ref void *spp_getpage(void)
249 {
250 void *ptr;
251
252 if (after_bootmem)
253 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
254 else
255 ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
256
257 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
258 panic("set_pte_phys: cannot allocate page data %s\n",
259 after_bootmem ? "after bootmem" : "");
260 }
261
262 pr_debug("spp_getpage %p\n", ptr);
263
264 return ptr;
265 }
266
fill_p4d(pgd_t * pgd,unsigned long vaddr)267 static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr)
268 {
269 if (pgd_none(*pgd)) {
270 p4d_t *p4d = (p4d_t *)spp_getpage();
271 pgd_populate(&init_mm, pgd, p4d);
272 if (p4d != p4d_offset(pgd, 0))
273 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
274 p4d, p4d_offset(pgd, 0));
275 }
276 return p4d_offset(pgd, vaddr);
277 }
278
fill_pud(p4d_t * p4d,unsigned long vaddr)279 static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr)
280 {
281 if (p4d_none(*p4d)) {
282 pud_t *pud = (pud_t *)spp_getpage();
283 p4d_populate(&init_mm, p4d, pud);
284 if (pud != pud_offset(p4d, 0))
285 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
286 pud, pud_offset(p4d, 0));
287 }
288 return pud_offset(p4d, vaddr);
289 }
290
fill_pmd(pud_t * pud,unsigned long vaddr)291 static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
292 {
293 if (pud_none(*pud)) {
294 pmd_t *pmd = (pmd_t *) spp_getpage();
295 pud_populate(&init_mm, pud, pmd);
296 if (pmd != pmd_offset(pud, 0))
297 printk(KERN_ERR "PAGETABLE BUG #02! %p <-> %p\n",
298 pmd, pmd_offset(pud, 0));
299 }
300 return pmd_offset(pud, vaddr);
301 }
302
fill_pte(pmd_t * pmd,unsigned long vaddr)303 static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
304 {
305 if (pmd_none(*pmd)) {
306 pte_t *pte = (pte_t *) spp_getpage();
307 pmd_populate_kernel(&init_mm, pmd, pte);
308 if (pte != pte_offset_kernel(pmd, 0))
309 printk(KERN_ERR "PAGETABLE BUG #03!\n");
310 }
311 return pte_offset_kernel(pmd, vaddr);
312 }
313
__set_pte_vaddr(pud_t * pud,unsigned long vaddr,pte_t new_pte)314 static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
315 {
316 pmd_t *pmd = fill_pmd(pud, vaddr);
317 pte_t *pte = fill_pte(pmd, vaddr);
318
319 set_pte(pte, new_pte);
320
321 /*
322 * It's enough to flush this one mapping.
323 * (PGE mappings get flushed as well)
324 */
325 flush_tlb_one_kernel(vaddr);
326 }
327
set_pte_vaddr_p4d(p4d_t * p4d_page,unsigned long vaddr,pte_t new_pte)328 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
329 {
330 p4d_t *p4d = p4d_page + p4d_index(vaddr);
331 pud_t *pud = fill_pud(p4d, vaddr);
332
333 __set_pte_vaddr(pud, vaddr, new_pte);
334 }
335
set_pte_vaddr_pud(pud_t * pud_page,unsigned long vaddr,pte_t new_pte)336 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
337 {
338 pud_t *pud = pud_page + pud_index(vaddr);
339
340 __set_pte_vaddr(pud, vaddr, new_pte);
341 }
342
set_pte_vaddr(unsigned long vaddr,pte_t pteval)343 void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
344 {
345 pgd_t *pgd;
346 p4d_t *p4d_page;
347
348 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
349
350 pgd = pgd_offset_k(vaddr);
351 if (pgd_none(*pgd)) {
352 printk(KERN_ERR
353 "PGD FIXMAP MISSING, it should be setup in head.S!\n");
354 return;
355 }
356
357 p4d_page = p4d_offset(pgd, 0);
358 set_pte_vaddr_p4d(p4d_page, vaddr, pteval);
359 }
360
populate_extra_pmd(unsigned long vaddr)361 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
362 {
363 pgd_t *pgd;
364 p4d_t *p4d;
365 pud_t *pud;
366
367 pgd = pgd_offset_k(vaddr);
368 p4d = fill_p4d(pgd, vaddr);
369 pud = fill_pud(p4d, vaddr);
370 return fill_pmd(pud, vaddr);
371 }
372
populate_extra_pte(unsigned long vaddr)373 pte_t * __init populate_extra_pte(unsigned long vaddr)
374 {
375 pmd_t *pmd;
376
377 pmd = populate_extra_pmd(vaddr);
378 return fill_pte(pmd, vaddr);
379 }
380
381 /*
382 * Create large page table mappings for a range of physical addresses.
383 */
__init_extra_mapping(unsigned long phys,unsigned long size,enum page_cache_mode cache)384 static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
385 enum page_cache_mode cache)
386 {
387 pgd_t *pgd;
388 p4d_t *p4d;
389 pud_t *pud;
390 pmd_t *pmd;
391 pgprot_t prot;
392
393 pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) |
394 protval_4k_2_large(cachemode2protval(cache));
395 BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
396 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
397 pgd = pgd_offset_k((unsigned long)__va(phys));
398 if (pgd_none(*pgd)) {
399 p4d = (p4d_t *) spp_getpage();
400 set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE |
401 _PAGE_USER));
402 }
403 p4d = p4d_offset(pgd, (unsigned long)__va(phys));
404 if (p4d_none(*p4d)) {
405 pud = (pud_t *) spp_getpage();
406 set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE |
407 _PAGE_USER));
408 }
409 pud = pud_offset(p4d, (unsigned long)__va(phys));
410 if (pud_none(*pud)) {
411 pmd = (pmd_t *) spp_getpage();
412 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
413 _PAGE_USER));
414 }
415 pmd = pmd_offset(pud, phys);
416 BUG_ON(!pmd_none(*pmd));
417 set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
418 }
419 }
420
init_extra_mapping_wb(unsigned long phys,unsigned long size)421 void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
422 {
423 __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB);
424 }
425
init_extra_mapping_uc(unsigned long phys,unsigned long size)426 void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
427 {
428 __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC);
429 }
430
431 /*
432 * The head.S code sets up the kernel high mapping:
433 *
434 * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
435 *
436 * phys_base holds the negative offset to the kernel, which is added
437 * to the compile time generated pmds. This results in invalid pmds up
438 * to the point where we hit the physaddr 0 mapping.
439 *
440 * We limit the mappings to the region from _text to _brk_end. _brk_end
441 * is rounded up to the 2MB boundary. This catches the invalid pmds as
442 * well, as they are located before _text:
443 */
cleanup_highmap(void)444 void __init cleanup_highmap(void)
445 {
446 unsigned long vaddr = __START_KERNEL_map;
447 unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
448 unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
449 pmd_t *pmd = level2_kernel_pgt;
450
451 /*
452 * Native path, max_pfn_mapped is not set yet.
453 * Xen has valid max_pfn_mapped set in
454 * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
455 */
456 if (max_pfn_mapped)
457 vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
458
459 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
460 if (pmd_none(*pmd))
461 continue;
462 if (vaddr < (unsigned long) _text || vaddr > end)
463 set_pmd(pmd, __pmd(0));
464 }
465 }
466
467 /*
468 * Create PTE level page table mapping for physical addresses.
469 * It returns the last physical address mapped.
470 */
471 static unsigned long __meminit
phys_pte_init(pte_t * pte_page,unsigned long paddr,unsigned long paddr_end,pgprot_t prot,bool init)472 phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
473 pgprot_t prot, bool init)
474 {
475 unsigned long pages = 0, paddr_next;
476 unsigned long paddr_last = paddr_end;
477 pte_t *pte;
478 int i;
479
480 pte = pte_page + pte_index(paddr);
481 i = pte_index(paddr);
482
483 for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) {
484 paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE;
485 if (paddr >= paddr_end) {
486 if (!after_bootmem &&
487 !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
488 E820_TYPE_RAM) &&
489 !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
490 E820_TYPE_ACPI))
491 set_pte_init(pte, __pte(0), init);
492 continue;
493 }
494
495 /*
496 * We will re-use the existing mapping.
497 * Xen for example has some special requirements, like mapping
498 * pagetable pages as RO. So assume someone who pre-setup
499 * these mappings are more intelligent.
500 */
501 if (!pte_none(*pte)) {
502 if (!after_bootmem)
503 pages++;
504 continue;
505 }
506
507 pages++;
508 set_pte_init(pte, pfn_pte(paddr >> PAGE_SHIFT, prot), init);
509 paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
510 }
511
512 update_page_count(PG_LEVEL_4K, pages);
513
514 return paddr_last;
515 }
516
517 /*
518 * Create PMD level page table mapping for physical addresses. The virtual
519 * and physical address have to be aligned at this level.
520 * It returns the last physical address mapped.
521 */
522 static unsigned long __meminit
phys_pmd_init(pmd_t * pmd_page,unsigned long paddr,unsigned long paddr_end,unsigned long page_size_mask,pgprot_t prot,bool init)523 phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
524 unsigned long page_size_mask, pgprot_t prot, bool init)
525 {
526 unsigned long pages = 0, paddr_next;
527 unsigned long paddr_last = paddr_end;
528
529 int i = pmd_index(paddr);
530
531 for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) {
532 pmd_t *pmd = pmd_page + pmd_index(paddr);
533 pte_t *pte;
534 pgprot_t new_prot = prot;
535
536 paddr_next = (paddr & PMD_MASK) + PMD_SIZE;
537 if (paddr >= paddr_end) {
538 if (!after_bootmem &&
539 !e820__mapped_any(paddr & PMD_MASK, paddr_next,
540 E820_TYPE_RAM) &&
541 !e820__mapped_any(paddr & PMD_MASK, paddr_next,
542 E820_TYPE_ACPI))
543 set_pmd_init(pmd, __pmd(0), init);
544 continue;
545 }
546
547 if (!pmd_none(*pmd)) {
548 if (!pmd_leaf(*pmd)) {
549 spin_lock(&init_mm.page_table_lock);
550 pte = (pte_t *)pmd_page_vaddr(*pmd);
551 paddr_last = phys_pte_init(pte, paddr,
552 paddr_end, prot,
553 init);
554 spin_unlock(&init_mm.page_table_lock);
555 continue;
556 }
557 /*
558 * If we are ok with PG_LEVEL_2M mapping, then we will
559 * use the existing mapping,
560 *
561 * Otherwise, we will split the large page mapping but
562 * use the same existing protection bits except for
563 * large page, so that we don't violate Intel's TLB
564 * Application note (317080) which says, while changing
565 * the page sizes, new and old translations should
566 * not differ with respect to page frame and
567 * attributes.
568 */
569 if (page_size_mask & (1 << PG_LEVEL_2M)) {
570 if (!after_bootmem)
571 pages++;
572 paddr_last = paddr_next;
573 continue;
574 }
575 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
576 }
577
578 if (page_size_mask & (1<<PG_LEVEL_2M)) {
579 pages++;
580 spin_lock(&init_mm.page_table_lock);
581 set_pmd_init(pmd,
582 pfn_pmd(paddr >> PAGE_SHIFT, prot_sethuge(prot)),
583 init);
584 spin_unlock(&init_mm.page_table_lock);
585 paddr_last = paddr_next;
586 continue;
587 }
588
589 pte = alloc_low_page();
590 paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot, init);
591
592 spin_lock(&init_mm.page_table_lock);
593 pmd_populate_kernel_init(&init_mm, pmd, pte, init);
594 spin_unlock(&init_mm.page_table_lock);
595 }
596 update_page_count(PG_LEVEL_2M, pages);
597 return paddr_last;
598 }
599
600 /*
601 * Create PUD level page table mapping for physical addresses. The virtual
602 * and physical address do not have to be aligned at this level. KASLR can
603 * randomize virtual addresses up to this level.
604 * It returns the last physical address mapped.
605 */
606 static unsigned long __meminit
phys_pud_init(pud_t * pud_page,unsigned long paddr,unsigned long paddr_end,unsigned long page_size_mask,pgprot_t _prot,bool init)607 phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
608 unsigned long page_size_mask, pgprot_t _prot, bool init)
609 {
610 unsigned long pages = 0, paddr_next;
611 unsigned long paddr_last = paddr_end;
612 unsigned long vaddr = (unsigned long)__va(paddr);
613 int i = pud_index(vaddr);
614
615 for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
616 pud_t *pud;
617 pmd_t *pmd;
618 pgprot_t prot = _prot;
619
620 vaddr = (unsigned long)__va(paddr);
621 pud = pud_page + pud_index(vaddr);
622 paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
623
624 if (paddr >= paddr_end) {
625 if (!after_bootmem &&
626 !e820__mapped_any(paddr & PUD_MASK, paddr_next,
627 E820_TYPE_RAM) &&
628 !e820__mapped_any(paddr & PUD_MASK, paddr_next,
629 E820_TYPE_ACPI))
630 set_pud_init(pud, __pud(0), init);
631 continue;
632 }
633
634 if (!pud_none(*pud)) {
635 if (!pud_leaf(*pud)) {
636 pmd = pmd_offset(pud, 0);
637 paddr_last = phys_pmd_init(pmd, paddr,
638 paddr_end,
639 page_size_mask,
640 prot, init);
641 continue;
642 }
643 /*
644 * If we are ok with PG_LEVEL_1G mapping, then we will
645 * use the existing mapping.
646 *
647 * Otherwise, we will split the gbpage mapping but use
648 * the same existing protection bits except for large
649 * page, so that we don't violate Intel's TLB
650 * Application note (317080) which says, while changing
651 * the page sizes, new and old translations should
652 * not differ with respect to page frame and
653 * attributes.
654 */
655 if (page_size_mask & (1 << PG_LEVEL_1G)) {
656 if (!after_bootmem)
657 pages++;
658 paddr_last = paddr_next;
659 continue;
660 }
661 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
662 }
663
664 if (page_size_mask & (1<<PG_LEVEL_1G)) {
665 pages++;
666 spin_lock(&init_mm.page_table_lock);
667 set_pud_init(pud,
668 pfn_pud(paddr >> PAGE_SHIFT, prot_sethuge(prot)),
669 init);
670 spin_unlock(&init_mm.page_table_lock);
671 paddr_last = paddr_next;
672 continue;
673 }
674
675 pmd = alloc_low_page();
676 paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
677 page_size_mask, prot, init);
678
679 spin_lock(&init_mm.page_table_lock);
680 pud_populate_init(&init_mm, pud, pmd, init);
681 spin_unlock(&init_mm.page_table_lock);
682 }
683
684 update_page_count(PG_LEVEL_1G, pages);
685
686 return paddr_last;
687 }
688
689 static unsigned long __meminit
phys_p4d_init(p4d_t * p4d_page,unsigned long paddr,unsigned long paddr_end,unsigned long page_size_mask,pgprot_t prot,bool init)690 phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
691 unsigned long page_size_mask, pgprot_t prot, bool init)
692 {
693 unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last;
694
695 paddr_last = paddr_end;
696 vaddr = (unsigned long)__va(paddr);
697 vaddr_end = (unsigned long)__va(paddr_end);
698
699 if (!pgtable_l5_enabled())
700 return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end,
701 page_size_mask, prot, init);
702
703 for (; vaddr < vaddr_end; vaddr = vaddr_next) {
704 p4d_t *p4d = p4d_page + p4d_index(vaddr);
705 pud_t *pud;
706
707 vaddr_next = (vaddr & P4D_MASK) + P4D_SIZE;
708 paddr = __pa(vaddr);
709
710 if (paddr >= paddr_end) {
711 paddr_next = __pa(vaddr_next);
712 if (!after_bootmem &&
713 !e820__mapped_any(paddr & P4D_MASK, paddr_next,
714 E820_TYPE_RAM) &&
715 !e820__mapped_any(paddr & P4D_MASK, paddr_next,
716 E820_TYPE_ACPI))
717 set_p4d_init(p4d, __p4d(0), init);
718 continue;
719 }
720
721 if (!p4d_none(*p4d)) {
722 pud = pud_offset(p4d, 0);
723 paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
724 page_size_mask, prot, init);
725 continue;
726 }
727
728 pud = alloc_low_page();
729 paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
730 page_size_mask, prot, init);
731
732 spin_lock(&init_mm.page_table_lock);
733 p4d_populate_init(&init_mm, p4d, pud, init);
734 spin_unlock(&init_mm.page_table_lock);
735 }
736
737 return paddr_last;
738 }
739
740 static unsigned long __meminit
__kernel_physical_mapping_init(unsigned long paddr_start,unsigned long paddr_end,unsigned long page_size_mask,pgprot_t prot,bool init)741 __kernel_physical_mapping_init(unsigned long paddr_start,
742 unsigned long paddr_end,
743 unsigned long page_size_mask,
744 pgprot_t prot, bool init)
745 {
746 bool pgd_changed = false;
747 unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
748
749 paddr_last = paddr_end;
750 vaddr = (unsigned long)__va(paddr_start);
751 vaddr_end = (unsigned long)__va(paddr_end);
752 vaddr_start = vaddr;
753
754 for (; vaddr < vaddr_end; vaddr = vaddr_next) {
755 pgd_t *pgd = pgd_offset_k(vaddr);
756 p4d_t *p4d;
757
758 vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
759
760 if (pgd_val(*pgd)) {
761 p4d = (p4d_t *)pgd_page_vaddr(*pgd);
762 paddr_last = phys_p4d_init(p4d, __pa(vaddr),
763 __pa(vaddr_end),
764 page_size_mask,
765 prot, init);
766 continue;
767 }
768
769 p4d = alloc_low_page();
770 paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
771 page_size_mask, prot, init);
772
773 spin_lock(&init_mm.page_table_lock);
774 if (pgtable_l5_enabled())
775 pgd_populate_init(&init_mm, pgd, p4d, init);
776 else
777 p4d_populate_init(&init_mm, p4d_offset(pgd, vaddr),
778 (pud_t *) p4d, init);
779
780 spin_unlock(&init_mm.page_table_lock);
781 pgd_changed = true;
782 }
783
784 if (pgd_changed)
785 sync_global_pgds(vaddr_start, vaddr_end - 1);
786
787 return paddr_last;
788 }
789
790
791 /*
792 * Create page table mapping for the physical memory for specific physical
793 * addresses. Note that it can only be used to populate non-present entries.
794 * The virtual and physical addresses have to be aligned on PMD level
795 * down. It returns the last physical address mapped.
796 */
797 unsigned long __meminit
kernel_physical_mapping_init(unsigned long paddr_start,unsigned long paddr_end,unsigned long page_size_mask,pgprot_t prot)798 kernel_physical_mapping_init(unsigned long paddr_start,
799 unsigned long paddr_end,
800 unsigned long page_size_mask, pgprot_t prot)
801 {
802 return __kernel_physical_mapping_init(paddr_start, paddr_end,
803 page_size_mask, prot, true);
804 }
805
806 /*
807 * This function is similar to kernel_physical_mapping_init() above with the
808 * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe()
809 * when updating the mapping. The caller is responsible to flush the TLBs after
810 * the function returns.
811 */
812 unsigned long __meminit
kernel_physical_mapping_change(unsigned long paddr_start,unsigned long paddr_end,unsigned long page_size_mask)813 kernel_physical_mapping_change(unsigned long paddr_start,
814 unsigned long paddr_end,
815 unsigned long page_size_mask)
816 {
817 return __kernel_physical_mapping_init(paddr_start, paddr_end,
818 page_size_mask, PAGE_KERNEL,
819 false);
820 }
821
822 #ifndef CONFIG_NUMA
x86_numa_init(void)823 static __always_inline void x86_numa_init(void)
824 {
825 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
826 }
827 #endif
828
initmem_init(void)829 void __init initmem_init(void)
830 {
831 x86_numa_init();
832 }
833
paging_init(void)834 void __init paging_init(void)
835 {
836 /*
837 * clear the default setting with node 0
838 * note: don't use nodes_clear here, that is really clearing when
839 * numa support is not compiled in, and later node_set_state
840 * will not set it back.
841 */
842 node_clear_state(0, N_MEMORY);
843 node_clear_state(0, N_NORMAL_MEMORY);
844 }
845
846 #define PAGE_UNUSED 0xFD
847
848 /*
849 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED), ranges
850 * from unused_pmd_start to next PMD_SIZE boundary.
851 */
852 static unsigned long unused_pmd_start __meminitdata;
853
vmemmap_flush_unused_pmd(void)854 static void __meminit vmemmap_flush_unused_pmd(void)
855 {
856 if (!unused_pmd_start)
857 return;
858 /*
859 * Clears (unused_pmd_start, PMD_END]
860 */
861 memset((void *)unused_pmd_start, PAGE_UNUSED,
862 ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start);
863 unused_pmd_start = 0;
864 }
865
866 #ifdef CONFIG_MEMORY_HOTPLUG
867 /* Returns true if the PMD is completely unused and thus it can be freed */
vmemmap_pmd_is_unused(unsigned long addr,unsigned long end)868 static bool __meminit vmemmap_pmd_is_unused(unsigned long addr, unsigned long end)
869 {
870 unsigned long start = ALIGN_DOWN(addr, PMD_SIZE);
871
872 /*
873 * Flush the unused range cache to ensure that memchr_inv() will work
874 * for the whole range.
875 */
876 vmemmap_flush_unused_pmd();
877 memset((void *)addr, PAGE_UNUSED, end - addr);
878
879 return !memchr_inv((void *)start, PAGE_UNUSED, PMD_SIZE);
880 }
881 #endif
882
__vmemmap_use_sub_pmd(unsigned long start)883 static void __meminit __vmemmap_use_sub_pmd(unsigned long start)
884 {
885 /*
886 * As we expect to add in the same granularity as we remove, it's
887 * sufficient to mark only some piece used to block the memmap page from
888 * getting removed when removing some other adjacent memmap (just in
889 * case the first memmap never gets initialized e.g., because the memory
890 * block never gets onlined).
891 */
892 memset((void *)start, 0, sizeof(struct page));
893 }
894
vmemmap_use_sub_pmd(unsigned long start,unsigned long end)895 static void __meminit vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
896 {
897 /*
898 * We only optimize if the new used range directly follows the
899 * previously unused range (esp., when populating consecutive sections).
900 */
901 if (unused_pmd_start == start) {
902 if (likely(IS_ALIGNED(end, PMD_SIZE)))
903 unused_pmd_start = 0;
904 else
905 unused_pmd_start = end;
906 return;
907 }
908
909 /*
910 * If the range does not contiguously follows previous one, make sure
911 * to mark the unused range of the previous one so it can be removed.
912 */
913 vmemmap_flush_unused_pmd();
914 __vmemmap_use_sub_pmd(start);
915 }
916
917
vmemmap_use_new_sub_pmd(unsigned long start,unsigned long end)918 static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
919 {
920 const unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
921
922 vmemmap_flush_unused_pmd();
923
924 /*
925 * Could be our memmap page is filled with PAGE_UNUSED already from a
926 * previous remove. Make sure to reset it.
927 */
928 __vmemmap_use_sub_pmd(start);
929
930 /*
931 * Mark with PAGE_UNUSED the unused parts of the new memmap range
932 */
933 if (!IS_ALIGNED(start, PMD_SIZE))
934 memset((void *)page, PAGE_UNUSED, start - page);
935
936 /*
937 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
938 * consecutive sections. Remember for the last added PMD where the
939 * unused range begins.
940 */
941 if (!IS_ALIGNED(end, PMD_SIZE))
942 unused_pmd_start = end;
943 }
944
945 /*
946 * Memory hotplug specific functions
947 */
948 #ifdef CONFIG_MEMORY_HOTPLUG
949 /*
950 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
951 * updating.
952 */
update_end_of_memory_vars(u64 start,u64 size)953 static void update_end_of_memory_vars(u64 start, u64 size)
954 {
955 unsigned long end_pfn = PFN_UP(start + size);
956
957 if (end_pfn > max_pfn) {
958 max_pfn = end_pfn;
959 max_low_pfn = end_pfn;
960 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
961 }
962 }
963
add_pages(int nid,unsigned long start_pfn,unsigned long nr_pages,struct mhp_params * params)964 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
965 struct mhp_params *params)
966 {
967 unsigned long end = ((start_pfn + nr_pages) << PAGE_SHIFT) - 1;
968 int ret;
969
970 if (WARN_ON_ONCE(end > DIRECT_MAP_PHYSMEM_END))
971 return -ERANGE;
972
973 ret = __add_pages(nid, start_pfn, nr_pages, params);
974 WARN_ON_ONCE(ret);
975
976 /*
977 * Special case: add_pages() is called by memremap_pages() for adding device
978 * private pages. Do not bump up max_pfn in the device private path,
979 * because max_pfn changes affect dma_addressing_limited().
980 *
981 * dma_addressing_limited() returning true when max_pfn is the device's
982 * addressable memory can force device drivers to use bounce buffers
983 * and impact their performance negatively:
984 */
985 if (!params->pgmap)
986 /* update max_pfn, max_low_pfn and high_memory */
987 update_end_of_memory_vars(start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
988
989 return ret;
990 }
991
arch_add_memory(int nid,u64 start,u64 size,struct mhp_params * params)992 int arch_add_memory(int nid, u64 start, u64 size,
993 struct mhp_params *params)
994 {
995 unsigned long start_pfn = start >> PAGE_SHIFT;
996 unsigned long nr_pages = size >> PAGE_SHIFT;
997
998 init_memory_mapping(start, start + size, params->pgprot);
999
1000 return add_pages(nid, start_pfn, nr_pages, params);
1001 }
1002
free_reserved_pages(struct page * page,unsigned long nr_pages)1003 static void free_reserved_pages(struct page *page, unsigned long nr_pages)
1004 {
1005 while (nr_pages--)
1006 free_reserved_page(page++);
1007 }
1008
free_pagetable(struct page * page,int order)1009 static void __meminit free_pagetable(struct page *page, int order)
1010 {
1011 /* bootmem page has reserved flag */
1012 if (PageReserved(page)) {
1013 unsigned long nr_pages = 1 << order;
1014 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
1015 enum bootmem_type type = bootmem_type(page);
1016
1017 if (type == SECTION_INFO || type == MIX_SECTION_INFO) {
1018 while (nr_pages--)
1019 put_page_bootmem(page++);
1020 } else {
1021 free_reserved_pages(page, nr_pages);
1022 }
1023 #else
1024 free_reserved_pages(page, nr_pages);
1025 #endif
1026 } else {
1027 pagetable_free(page_ptdesc(page));
1028 }
1029 }
1030
free_hugepage_table(struct page * page,struct vmem_altmap * altmap)1031 static void __meminit free_hugepage_table(struct page *page,
1032 struct vmem_altmap *altmap)
1033 {
1034 if (altmap)
1035 vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE);
1036 else
1037 free_pagetable(page, get_order(PMD_SIZE));
1038 }
1039
free_pte_table(pte_t * pte_start,pmd_t * pmd)1040 static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
1041 {
1042 pte_t *pte;
1043 int i;
1044
1045 for (i = 0; i < PTRS_PER_PTE; i++) {
1046 pte = pte_start + i;
1047 if (!pte_none(*pte))
1048 return;
1049 }
1050
1051 /* free a pte table */
1052 free_pagetable(pmd_page(*pmd), 0);
1053 spin_lock(&init_mm.page_table_lock);
1054 pmd_clear(pmd);
1055 spin_unlock(&init_mm.page_table_lock);
1056 }
1057
free_pmd_table(pmd_t * pmd_start,pud_t * pud)1058 static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
1059 {
1060 pmd_t *pmd;
1061 int i;
1062
1063 for (i = 0; i < PTRS_PER_PMD; i++) {
1064 pmd = pmd_start + i;
1065 if (!pmd_none(*pmd))
1066 return;
1067 }
1068
1069 /* free a pmd table */
1070 free_pagetable(pud_page(*pud), 0);
1071 spin_lock(&init_mm.page_table_lock);
1072 pud_clear(pud);
1073 spin_unlock(&init_mm.page_table_lock);
1074 }
1075
free_pud_table(pud_t * pud_start,p4d_t * p4d)1076 static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
1077 {
1078 pud_t *pud;
1079 int i;
1080
1081 for (i = 0; i < PTRS_PER_PUD; i++) {
1082 pud = pud_start + i;
1083 if (!pud_none(*pud))
1084 return;
1085 }
1086
1087 /* free a pud table */
1088 free_pagetable(p4d_page(*p4d), 0);
1089 spin_lock(&init_mm.page_table_lock);
1090 p4d_clear(p4d);
1091 spin_unlock(&init_mm.page_table_lock);
1092 }
1093
1094 static void __meminit
remove_pte_table(pte_t * pte_start,unsigned long addr,unsigned long end,bool direct)1095 remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
1096 bool direct)
1097 {
1098 unsigned long next, pages = 0;
1099 pte_t *pte;
1100 phys_addr_t phys_addr;
1101
1102 pte = pte_start + pte_index(addr);
1103 for (; addr < end; addr = next, pte++) {
1104 next = (addr + PAGE_SIZE) & PAGE_MASK;
1105 if (next > end)
1106 next = end;
1107
1108 if (!pte_present(*pte))
1109 continue;
1110
1111 /*
1112 * We mapped [0,1G) memory as identity mapping when
1113 * initializing, in arch/x86/kernel/head_64.S. These
1114 * pagetables cannot be removed.
1115 */
1116 phys_addr = pte_val(*pte) + (addr & PAGE_MASK);
1117 if (phys_addr < (phys_addr_t)0x40000000)
1118 return;
1119
1120 if (!direct)
1121 free_pagetable(pte_page(*pte), 0);
1122
1123 spin_lock(&init_mm.page_table_lock);
1124 pte_clear(&init_mm, addr, pte);
1125 spin_unlock(&init_mm.page_table_lock);
1126
1127 /* For non-direct mapping, pages means nothing. */
1128 pages++;
1129 }
1130
1131 /* Call free_pte_table() in remove_pmd_table(). */
1132 flush_tlb_all();
1133 if (direct)
1134 update_page_count(PG_LEVEL_4K, -pages);
1135 }
1136
1137 static void __meminit
remove_pmd_table(pmd_t * pmd_start,unsigned long addr,unsigned long end,bool direct,struct vmem_altmap * altmap)1138 remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
1139 bool direct, struct vmem_altmap *altmap)
1140 {
1141 unsigned long next, pages = 0;
1142 pte_t *pte_base;
1143 pmd_t *pmd;
1144
1145 pmd = pmd_start + pmd_index(addr);
1146 for (; addr < end; addr = next, pmd++) {
1147 next = pmd_addr_end(addr, end);
1148
1149 if (!pmd_present(*pmd))
1150 continue;
1151
1152 if (pmd_leaf(*pmd)) {
1153 if (IS_ALIGNED(addr, PMD_SIZE) &&
1154 IS_ALIGNED(next, PMD_SIZE)) {
1155 if (!direct)
1156 free_hugepage_table(pmd_page(*pmd),
1157 altmap);
1158
1159 spin_lock(&init_mm.page_table_lock);
1160 pmd_clear(pmd);
1161 spin_unlock(&init_mm.page_table_lock);
1162 pages++;
1163 } else if (vmemmap_pmd_is_unused(addr, next)) {
1164 free_hugepage_table(pmd_page(*pmd),
1165 altmap);
1166 spin_lock(&init_mm.page_table_lock);
1167 pmd_clear(pmd);
1168 spin_unlock(&init_mm.page_table_lock);
1169 }
1170 continue;
1171 }
1172
1173 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
1174 remove_pte_table(pte_base, addr, next, direct);
1175 free_pte_table(pte_base, pmd);
1176 }
1177
1178 /* Call free_pmd_table() in remove_pud_table(). */
1179 if (direct)
1180 update_page_count(PG_LEVEL_2M, -pages);
1181 }
1182
1183 static void __meminit
remove_pud_table(pud_t * pud_start,unsigned long addr,unsigned long end,struct vmem_altmap * altmap,bool direct)1184 remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
1185 struct vmem_altmap *altmap, bool direct)
1186 {
1187 unsigned long next, pages = 0;
1188 pmd_t *pmd_base;
1189 pud_t *pud;
1190
1191 pud = pud_start + pud_index(addr);
1192 for (; addr < end; addr = next, pud++) {
1193 next = pud_addr_end(addr, end);
1194
1195 if (!pud_present(*pud))
1196 continue;
1197
1198 if (pud_leaf(*pud) &&
1199 IS_ALIGNED(addr, PUD_SIZE) &&
1200 IS_ALIGNED(next, PUD_SIZE)) {
1201 spin_lock(&init_mm.page_table_lock);
1202 pud_clear(pud);
1203 spin_unlock(&init_mm.page_table_lock);
1204 pages++;
1205 continue;
1206 }
1207
1208 pmd_base = pmd_offset(pud, 0);
1209 remove_pmd_table(pmd_base, addr, next, direct, altmap);
1210 free_pmd_table(pmd_base, pud);
1211 }
1212
1213 if (direct)
1214 update_page_count(PG_LEVEL_1G, -pages);
1215 }
1216
1217 static void __meminit
remove_p4d_table(p4d_t * p4d_start,unsigned long addr,unsigned long end,struct vmem_altmap * altmap,bool direct)1218 remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
1219 struct vmem_altmap *altmap, bool direct)
1220 {
1221 unsigned long next, pages = 0;
1222 pud_t *pud_base;
1223 p4d_t *p4d;
1224
1225 p4d = p4d_start + p4d_index(addr);
1226 for (; addr < end; addr = next, p4d++) {
1227 next = p4d_addr_end(addr, end);
1228
1229 if (!p4d_present(*p4d))
1230 continue;
1231
1232 BUILD_BUG_ON(p4d_leaf(*p4d));
1233
1234 pud_base = pud_offset(p4d, 0);
1235 remove_pud_table(pud_base, addr, next, altmap, direct);
1236 /*
1237 * For 4-level page tables we do not want to free PUDs, but in the
1238 * 5-level case we should free them. This code will have to change
1239 * to adapt for boot-time switching between 4 and 5 level page tables.
1240 */
1241 if (pgtable_l5_enabled())
1242 free_pud_table(pud_base, p4d);
1243 }
1244
1245 if (direct)
1246 update_page_count(PG_LEVEL_512G, -pages);
1247 }
1248
1249 /* start and end are both virtual address. */
1250 static void __meminit
remove_pagetable(unsigned long start,unsigned long end,bool direct,struct vmem_altmap * altmap)1251 remove_pagetable(unsigned long start, unsigned long end, bool direct,
1252 struct vmem_altmap *altmap)
1253 {
1254 unsigned long next;
1255 unsigned long addr;
1256 pgd_t *pgd;
1257 p4d_t *p4d;
1258
1259 for (addr = start; addr < end; addr = next) {
1260 next = pgd_addr_end(addr, end);
1261
1262 pgd = pgd_offset_k(addr);
1263 if (!pgd_present(*pgd))
1264 continue;
1265
1266 p4d = p4d_offset(pgd, 0);
1267 remove_p4d_table(p4d, addr, next, altmap, direct);
1268 }
1269
1270 flush_tlb_all();
1271 }
1272
vmemmap_free(unsigned long start,unsigned long end,struct vmem_altmap * altmap)1273 void __ref vmemmap_free(unsigned long start, unsigned long end,
1274 struct vmem_altmap *altmap)
1275 {
1276 VM_BUG_ON(!PAGE_ALIGNED(start));
1277 VM_BUG_ON(!PAGE_ALIGNED(end));
1278
1279 remove_pagetable(start, end, false, altmap);
1280 }
1281
1282 static void __meminit
kernel_physical_mapping_remove(unsigned long start,unsigned long end)1283 kernel_physical_mapping_remove(unsigned long start, unsigned long end)
1284 {
1285 start = (unsigned long)__va(start);
1286 end = (unsigned long)__va(end);
1287
1288 remove_pagetable(start, end, true, NULL);
1289 }
1290
arch_remove_memory(u64 start,u64 size,struct vmem_altmap * altmap)1291 void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
1292 {
1293 unsigned long start_pfn = start >> PAGE_SHIFT;
1294 unsigned long nr_pages = size >> PAGE_SHIFT;
1295
1296 __remove_pages(start_pfn, nr_pages, altmap);
1297 kernel_physical_mapping_remove(start, start + size);
1298 }
1299 #endif /* CONFIG_MEMORY_HOTPLUG */
1300
1301 static struct kcore_list kcore_vsyscall;
1302
register_page_bootmem_info(void)1303 static void __init register_page_bootmem_info(void)
1304 {
1305 #if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP)
1306 int i;
1307
1308 for_each_online_node(i)
1309 register_page_bootmem_info_node(NODE_DATA(i));
1310 #endif
1311 }
1312
1313 /*
1314 * Pre-allocates page-table pages for the vmalloc area in the kernel page-table.
1315 * Only the level which needs to be synchronized between all page-tables is
1316 * allocated because the synchronization can be expensive.
1317 */
preallocate_vmalloc_pages(void)1318 static void __init preallocate_vmalloc_pages(void)
1319 {
1320 unsigned long addr;
1321 const char *lvl;
1322
1323 for (addr = VMALLOC_START; addr <= VMEMORY_END; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
1324 pgd_t *pgd = pgd_offset_k(addr);
1325 p4d_t *p4d;
1326 pud_t *pud;
1327
1328 lvl = "p4d";
1329 p4d = p4d_alloc(&init_mm, pgd, addr);
1330 if (!p4d)
1331 goto failed;
1332
1333 if (pgtable_l5_enabled())
1334 continue;
1335
1336 /*
1337 * The goal here is to allocate all possibly required
1338 * hardware page tables pointed to by the top hardware
1339 * level.
1340 *
1341 * On 4-level systems, the P4D layer is folded away and
1342 * the above code does no preallocation. Below, go down
1343 * to the pud _software_ level to ensure the second
1344 * hardware level is allocated on 4-level systems too.
1345 */
1346 lvl = "pud";
1347 pud = pud_alloc(&init_mm, p4d, addr);
1348 if (!pud)
1349 goto failed;
1350 }
1351
1352 return;
1353
1354 failed:
1355
1356 /*
1357 * The pages have to be there now or they will be missing in
1358 * process page-tables later.
1359 */
1360 panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl);
1361 }
1362
arch_mm_preinit(void)1363 void __init arch_mm_preinit(void)
1364 {
1365 pci_iommu_alloc();
1366 }
1367
mem_init(void)1368 void __init mem_init(void)
1369 {
1370 /* clear_bss() already clear the empty_zero_page */
1371
1372 after_bootmem = 1;
1373 x86_init.hyper.init_after_bootmem();
1374
1375 /*
1376 * Must be done after boot memory is put on freelist, because here we
1377 * might set fields in deferred struct pages that have not yet been
1378 * initialized, and memblock_free_all() initializes all the reserved
1379 * deferred pages for us.
1380 */
1381 register_page_bootmem_info();
1382
1383 /* Register memory areas for /proc/kcore */
1384 if (get_gate_vma(&init_mm))
1385 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
1386
1387 preallocate_vmalloc_pages();
1388 }
1389
1390 int kernel_set_to_readonly;
1391
mark_rodata_ro(void)1392 void mark_rodata_ro(void)
1393 {
1394 unsigned long start = PFN_ALIGN(_text);
1395 unsigned long rodata_start = PFN_ALIGN(__start_rodata);
1396 unsigned long end = (unsigned long)__end_rodata_hpage_align;
1397 unsigned long text_end = PFN_ALIGN(_etext);
1398 unsigned long rodata_end = PFN_ALIGN(__end_rodata);
1399 unsigned long all_end;
1400
1401 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1402 (end - start) >> 10);
1403 set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1404
1405 kernel_set_to_readonly = 1;
1406
1407 /*
1408 * The rodata/data/bss/brk section (but not the kernel text!)
1409 * should also be not-executable.
1410 *
1411 * We align all_end to PMD_SIZE because the existing mapping
1412 * is a full PMD. If we would align _brk_end to PAGE_SIZE we
1413 * split the PMD and the reminder between _brk_end and the end
1414 * of the PMD will remain mapped executable.
1415 *
1416 * Any PMD which was setup after the one which covers _brk_end
1417 * has been zapped already via cleanup_highmem().
1418 */
1419 all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
1420 set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
1421
1422 set_ftrace_ops_ro();
1423
1424 #ifdef CONFIG_CPA_DEBUG
1425 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
1426 set_memory_rw(start, (end-start) >> PAGE_SHIFT);
1427
1428 printk(KERN_INFO "Testing CPA: again\n");
1429 set_memory_ro(start, (end-start) >> PAGE_SHIFT);
1430 #endif
1431
1432 free_kernel_image_pages("unused kernel image (text/rodata gap)",
1433 (void *)text_end, (void *)rodata_start);
1434 free_kernel_image_pages("unused kernel image (rodata/data gap)",
1435 (void *)rodata_end, (void *)_sdata);
1436 }
1437
1438 /*
1439 * Block size is the minimum amount of memory which can be hotplugged or
1440 * hotremoved. It must be power of two and must be equal or larger than
1441 * MIN_MEMORY_BLOCK_SIZE.
1442 */
1443 #define MAX_BLOCK_SIZE (2UL << 30)
1444
1445 /* Amount of ram needed to start using large blocks */
1446 #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
1447
1448 /* Adjustable memory block size */
1449 static unsigned long set_memory_block_size;
set_memory_block_size_order(unsigned int order)1450 int __init set_memory_block_size_order(unsigned int order)
1451 {
1452 unsigned long size = 1UL << order;
1453
1454 if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE)
1455 return -EINVAL;
1456
1457 set_memory_block_size = size;
1458 return 0;
1459 }
1460
probe_memory_block_size(void)1461 static unsigned long probe_memory_block_size(void)
1462 {
1463 unsigned long boot_mem_end = max_pfn << PAGE_SHIFT;
1464 unsigned long bz;
1465
1466 /* If memory block size has been set, then use it */
1467 bz = set_memory_block_size;
1468 if (bz)
1469 goto done;
1470
1471 /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
1472 if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) {
1473 bz = MIN_MEMORY_BLOCK_SIZE;
1474 goto done;
1475 }
1476
1477 /*
1478 * When hotplug alignment is not a concern, maximize blocksize
1479 * to minimize overhead. Otherwise, align to the lesser of advice
1480 * alignment and end of memory alignment.
1481 */
1482 bz = memory_block_advised_max_size();
1483 if (!bz) {
1484 bz = MAX_BLOCK_SIZE;
1485 if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
1486 goto done;
1487 } else {
1488 bz = max(min(bz, MAX_BLOCK_SIZE), MIN_MEMORY_BLOCK_SIZE);
1489 }
1490
1491 /* Find the largest allowed block size that aligns to memory end */
1492 for (; bz > MIN_MEMORY_BLOCK_SIZE; bz >>= 1) {
1493 if (IS_ALIGNED(boot_mem_end, bz))
1494 break;
1495 }
1496 done:
1497 pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20);
1498
1499 return bz;
1500 }
1501
1502 static unsigned long memory_block_size_probed;
memory_block_size_bytes(void)1503 unsigned long memory_block_size_bytes(void)
1504 {
1505 if (!memory_block_size_probed)
1506 memory_block_size_probed = probe_memory_block_size();
1507
1508 return memory_block_size_probed;
1509 }
1510
1511 /*
1512 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1513 */
1514 static long __meminitdata addr_start, addr_end;
1515 static void __meminitdata *p_start, *p_end;
1516 static int __meminitdata node_start;
1517
vmemmap_set_pmd(pmd_t * pmd,void * p,int node,unsigned long addr,unsigned long next)1518 void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
1519 unsigned long addr, unsigned long next)
1520 {
1521 pte_t entry;
1522
1523 entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1524 PAGE_KERNEL_LARGE);
1525 set_pmd(pmd, __pmd(pte_val(entry)));
1526
1527 /* check to see if we have contiguous blocks */
1528 if (p_end != p || node_start != node) {
1529 if (p_start)
1530 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1531 addr_start, addr_end-1, p_start, p_end-1, node_start);
1532 addr_start = addr;
1533 node_start = node;
1534 p_start = p;
1535 }
1536
1537 addr_end = addr + PMD_SIZE;
1538 p_end = p + PMD_SIZE;
1539
1540 if (!IS_ALIGNED(addr, PMD_SIZE) ||
1541 !IS_ALIGNED(next, PMD_SIZE))
1542 vmemmap_use_new_sub_pmd(addr, next);
1543 }
1544
vmemmap_check_pmd(pmd_t * pmd,int node,unsigned long addr,unsigned long next)1545 int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
1546 unsigned long addr, unsigned long next)
1547 {
1548 int large = pmd_leaf(*pmd);
1549
1550 if (pmd_leaf(*pmd)) {
1551 vmemmap_verify((pte_t *)pmd, node, addr, next);
1552 vmemmap_use_sub_pmd(addr, next);
1553 }
1554
1555 return large;
1556 }
1557
vmemmap_populate(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)1558 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1559 struct vmem_altmap *altmap)
1560 {
1561 int err;
1562
1563 VM_BUG_ON(!PAGE_ALIGNED(start));
1564 VM_BUG_ON(!PAGE_ALIGNED(end));
1565
1566 if (end - start < PAGES_PER_SECTION * sizeof(struct page))
1567 err = vmemmap_populate_basepages(start, end, node, NULL);
1568 else if (boot_cpu_has(X86_FEATURE_PSE))
1569 err = vmemmap_populate_hugepages(start, end, node, altmap);
1570 else if (altmap) {
1571 pr_err_once("%s: no cpu support for altmap allocations\n",
1572 __func__);
1573 err = -ENOMEM;
1574 } else
1575 err = vmemmap_populate_basepages(start, end, node, NULL);
1576 if (!err)
1577 sync_global_pgds(start, end - 1);
1578 return err;
1579 }
1580
1581 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
register_page_bootmem_memmap(unsigned long section_nr,struct page * start_page,unsigned long nr_pages)1582 void register_page_bootmem_memmap(unsigned long section_nr,
1583 struct page *start_page, unsigned long nr_pages)
1584 {
1585 unsigned long addr = (unsigned long)start_page;
1586 unsigned long end = (unsigned long)(start_page + nr_pages);
1587 unsigned long next;
1588 pgd_t *pgd;
1589 p4d_t *p4d;
1590 pud_t *pud;
1591 pmd_t *pmd;
1592 unsigned int nr_pmd_pages;
1593 struct page *page;
1594
1595 for (; addr < end; addr = next) {
1596 pte_t *pte = NULL;
1597
1598 pgd = pgd_offset_k(addr);
1599 if (pgd_none(*pgd)) {
1600 next = (addr + PAGE_SIZE) & PAGE_MASK;
1601 continue;
1602 }
1603 get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO);
1604
1605 p4d = p4d_offset(pgd, addr);
1606 if (p4d_none(*p4d)) {
1607 next = (addr + PAGE_SIZE) & PAGE_MASK;
1608 continue;
1609 }
1610 get_page_bootmem(section_nr, p4d_page(*p4d), MIX_SECTION_INFO);
1611
1612 pud = pud_offset(p4d, addr);
1613 if (pud_none(*pud)) {
1614 next = (addr + PAGE_SIZE) & PAGE_MASK;
1615 continue;
1616 }
1617 get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
1618
1619 pmd = pmd_offset(pud, addr);
1620 if (pmd_none(*pmd)) {
1621 next = (addr + PAGE_SIZE) & PAGE_MASK;
1622 continue;
1623 }
1624
1625 if (!boot_cpu_has(X86_FEATURE_PSE) || !pmd_leaf(*pmd)) {
1626 next = (addr + PAGE_SIZE) & PAGE_MASK;
1627 get_page_bootmem(section_nr, pmd_page(*pmd),
1628 MIX_SECTION_INFO);
1629
1630 pte = pte_offset_kernel(pmd, addr);
1631 if (pte_none(*pte))
1632 continue;
1633 get_page_bootmem(section_nr, pte_page(*pte),
1634 SECTION_INFO);
1635 } else {
1636 next = pmd_addr_end(addr, end);
1637 nr_pmd_pages = (next - addr) >> PAGE_SHIFT;
1638 page = pmd_page(*pmd);
1639 while (nr_pmd_pages--)
1640 get_page_bootmem(section_nr, page++,
1641 SECTION_INFO);
1642 }
1643 }
1644 }
1645 #endif
1646
vmemmap_populate_print_last(void)1647 void __meminit vmemmap_populate_print_last(void)
1648 {
1649 if (p_start) {
1650 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1651 addr_start, addr_end-1, p_start, p_end-1, node_start);
1652 p_start = NULL;
1653 p_end = NULL;
1654 node_start = 0;
1655 }
1656 }
1657