1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/arch/x86_64/mm/init.c 4 * 5 * Copyright (C) 1995 Linus Torvalds 6 * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz> 7 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> 8 */ 9 10 #include <linux/signal.h> 11 #include <linux/sched.h> 12 #include <linux/kernel.h> 13 #include <linux/errno.h> 14 #include <linux/string.h> 15 #include <linux/types.h> 16 #include <linux/ptrace.h> 17 #include <linux/mman.h> 18 #include <linux/mm.h> 19 #include <linux/swap.h> 20 #include <linux/smp.h> 21 #include <linux/init.h> 22 #include <linux/initrd.h> 23 #include <linux/pagemap.h> 24 #include <linux/memblock.h> 25 #include <linux/proc_fs.h> 26 #include <linux/pci.h> 27 #include <linux/pfn.h> 28 #include <linux/poison.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/memory.h> 31 #include <linux/memory_hotplug.h> 32 #include <linux/memremap.h> 33 #include <linux/nmi.h> 34 #include <linux/gfp.h> 35 #include <linux/kcore.h> 36 #include <linux/bootmem_info.h> 37 38 #include <asm/processor.h> 39 #include <asm/bios_ebda.h> 40 #include <linux/uaccess.h> 41 #include <asm/pgalloc.h> 42 #include <asm/dma.h> 43 #include <asm/fixmap.h> 44 #include <asm/e820/api.h> 45 #include <asm/apic.h> 46 #include <asm/tlb.h> 47 #include <asm/mmu_context.h> 48 #include <asm/proto.h> 49 #include <asm/smp.h> 50 #include <asm/sections.h> 51 #include <asm/kdebug.h> 52 #include <asm/numa.h> 53 #include <asm/set_memory.h> 54 #include <asm/init.h> 55 #include <asm/uv/uv.h> 56 #include <asm/setup.h> 57 #include <asm/ftrace.h> 58 59 #include "mm_internal.h" 60 61 #include "ident_map.c" 62 63 #define DEFINE_POPULATE(fname, type1, type2, init) \ 64 static inline void fname##_init(struct mm_struct *mm, \ 65 type1##_t *arg1, type2##_t *arg2, bool init) \ 66 { \ 67 if (init) \ 68 fname##_safe(mm, arg1, arg2); \ 69 else \ 70 fname(mm, arg1, arg2); \ 71 } 72 73 DEFINE_POPULATE(p4d_populate, p4d, pud, init) 74 DEFINE_POPULATE(pgd_populate, pgd, p4d, init) 75 DEFINE_POPULATE(pud_populate, pud, pmd, init) 76 DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init) 77 78 #define DEFINE_ENTRY(type1, type2, init) \ 79 static inline void set_##type1##_init(type1##_t *arg1, \ 80 type2##_t arg2, bool init) \ 81 { \ 82 if (init) \ 83 set_##type1##_safe(arg1, arg2); \ 84 else \ 85 set_##type1(arg1, arg2); \ 86 } 87 88 DEFINE_ENTRY(p4d, p4d, init) 89 DEFINE_ENTRY(pud, pud, init) 90 DEFINE_ENTRY(pmd, pmd, init) 91 DEFINE_ENTRY(pte, pte, init) 92 93 static inline pgprot_t prot_sethuge(pgprot_t prot) 94 { 95 WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PAT); 96 97 return __pgprot(pgprot_val(prot) | _PAGE_PSE); 98 } 99 100 /* 101 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the 102 * physical space so we can cache the place of the first one and move 103 * around without checking the pgd every time. 104 */ 105 106 /* Bits supported by the hardware: */ 107 pteval_t __supported_pte_mask __read_mostly = ~0; 108 /* Bits allowed in normal kernel mappings: */ 109 pteval_t __default_kernel_pte_mask __read_mostly = ~0; 110 EXPORT_SYMBOL_GPL(__supported_pte_mask); 111 /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */ 112 EXPORT_SYMBOL(__default_kernel_pte_mask); 113 114 int force_personality32; 115 116 /* 117 * noexec32=on|off 118 * Control non executable heap for 32bit processes. 119 * 120 * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default) 121 * off PROT_READ implies PROT_EXEC 122 */ 123 static int __init nonx32_setup(char *str) 124 { 125 if (!strcmp(str, "on")) 126 force_personality32 &= ~READ_IMPLIES_EXEC; 127 else if (!strcmp(str, "off")) 128 force_personality32 |= READ_IMPLIES_EXEC; 129 return 1; 130 } 131 __setup("noexec32=", nonx32_setup); 132 133 static void sync_global_pgds_l5(unsigned long start, unsigned long end) 134 { 135 unsigned long addr; 136 137 for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { 138 const pgd_t *pgd_ref = pgd_offset_k(addr); 139 struct page *page; 140 141 /* Check for overflow */ 142 if (addr < start) 143 break; 144 145 if (pgd_none(*pgd_ref)) 146 continue; 147 148 spin_lock(&pgd_lock); 149 list_for_each_entry(page, &pgd_list, lru) { 150 pgd_t *pgd; 151 spinlock_t *pgt_lock; 152 153 pgd = (pgd_t *)page_address(page) + pgd_index(addr); 154 /* the pgt_lock only for Xen */ 155 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 156 spin_lock(pgt_lock); 157 158 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd)) 159 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 160 161 if (pgd_none(*pgd)) 162 set_pgd(pgd, *pgd_ref); 163 164 spin_unlock(pgt_lock); 165 } 166 spin_unlock(&pgd_lock); 167 } 168 } 169 170 static void sync_global_pgds_l4(unsigned long start, unsigned long end) 171 { 172 unsigned long addr; 173 174 for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { 175 pgd_t *pgd_ref = pgd_offset_k(addr); 176 const p4d_t *p4d_ref; 177 struct page *page; 178 179 /* 180 * With folded p4d, pgd_none() is always false, we need to 181 * handle synchronization on p4d level. 182 */ 183 MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref)); 184 p4d_ref = p4d_offset(pgd_ref, addr); 185 186 if (p4d_none(*p4d_ref)) 187 continue; 188 189 spin_lock(&pgd_lock); 190 list_for_each_entry(page, &pgd_list, lru) { 191 pgd_t *pgd; 192 p4d_t *p4d; 193 spinlock_t *pgt_lock; 194 195 pgd = (pgd_t *)page_address(page) + pgd_index(addr); 196 p4d = p4d_offset(pgd, addr); 197 /* the pgt_lock only for Xen */ 198 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 199 spin_lock(pgt_lock); 200 201 if (!p4d_none(*p4d_ref) && !p4d_none(*p4d)) 202 BUG_ON(p4d_pgtable(*p4d) 203 != p4d_pgtable(*p4d_ref)); 204 205 if (p4d_none(*p4d)) 206 set_p4d(p4d, *p4d_ref); 207 208 spin_unlock(pgt_lock); 209 } 210 spin_unlock(&pgd_lock); 211 } 212 } 213 214 /* 215 * When memory was added make sure all the processes MM have 216 * suitable PGD entries in the local PGD level page. 217 */ 218 static void sync_global_pgds(unsigned long start, unsigned long end) 219 { 220 if (pgtable_l5_enabled()) 221 sync_global_pgds_l5(start, end); 222 else 223 sync_global_pgds_l4(start, end); 224 } 225 226 /* 227 * NOTE: This function is marked __ref because it calls __init function 228 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. 229 */ 230 static __ref void *spp_getpage(void) 231 { 232 void *ptr; 233 234 if (after_bootmem) 235 ptr = (void *) get_zeroed_page(GFP_ATOMIC); 236 else 237 ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 238 239 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { 240 panic("set_pte_phys: cannot allocate page data %s\n", 241 after_bootmem ? "after bootmem" : ""); 242 } 243 244 pr_debug("spp_getpage %p\n", ptr); 245 246 return ptr; 247 } 248 249 static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr) 250 { 251 if (pgd_none(*pgd)) { 252 p4d_t *p4d = (p4d_t *)spp_getpage(); 253 pgd_populate(&init_mm, pgd, p4d); 254 if (p4d != p4d_offset(pgd, 0)) 255 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", 256 p4d, p4d_offset(pgd, 0)); 257 } 258 return p4d_offset(pgd, vaddr); 259 } 260 261 static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr) 262 { 263 if (p4d_none(*p4d)) { 264 pud_t *pud = (pud_t *)spp_getpage(); 265 p4d_populate(&init_mm, p4d, pud); 266 if (pud != pud_offset(p4d, 0)) 267 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", 268 pud, pud_offset(p4d, 0)); 269 } 270 return pud_offset(p4d, vaddr); 271 } 272 273 static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) 274 { 275 if (pud_none(*pud)) { 276 pmd_t *pmd = (pmd_t *) spp_getpage(); 277 pud_populate(&init_mm, pud, pmd); 278 if (pmd != pmd_offset(pud, 0)) 279 printk(KERN_ERR "PAGETABLE BUG #02! %p <-> %p\n", 280 pmd, pmd_offset(pud, 0)); 281 } 282 return pmd_offset(pud, vaddr); 283 } 284 285 static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) 286 { 287 if (pmd_none(*pmd)) { 288 pte_t *pte = (pte_t *) spp_getpage(); 289 pmd_populate_kernel(&init_mm, pmd, pte); 290 if (pte != pte_offset_kernel(pmd, 0)) 291 printk(KERN_ERR "PAGETABLE BUG #03!\n"); 292 } 293 return pte_offset_kernel(pmd, vaddr); 294 } 295 296 static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte) 297 { 298 pmd_t *pmd = fill_pmd(pud, vaddr); 299 pte_t *pte = fill_pte(pmd, vaddr); 300 301 set_pte(pte, new_pte); 302 303 /* 304 * It's enough to flush this one mapping. 305 * (PGE mappings get flushed as well) 306 */ 307 flush_tlb_one_kernel(vaddr); 308 } 309 310 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte) 311 { 312 p4d_t *p4d = p4d_page + p4d_index(vaddr); 313 pud_t *pud = fill_pud(p4d, vaddr); 314 315 __set_pte_vaddr(pud, vaddr, new_pte); 316 } 317 318 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) 319 { 320 pud_t *pud = pud_page + pud_index(vaddr); 321 322 __set_pte_vaddr(pud, vaddr, new_pte); 323 } 324 325 void set_pte_vaddr(unsigned long vaddr, pte_t pteval) 326 { 327 pgd_t *pgd; 328 p4d_t *p4d_page; 329 330 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval)); 331 332 pgd = pgd_offset_k(vaddr); 333 if (pgd_none(*pgd)) { 334 printk(KERN_ERR 335 "PGD FIXMAP MISSING, it should be setup in head.S!\n"); 336 return; 337 } 338 339 p4d_page = p4d_offset(pgd, 0); 340 set_pte_vaddr_p4d(p4d_page, vaddr, pteval); 341 } 342 343 pmd_t * __init populate_extra_pmd(unsigned long vaddr) 344 { 345 pgd_t *pgd; 346 p4d_t *p4d; 347 pud_t *pud; 348 349 pgd = pgd_offset_k(vaddr); 350 p4d = fill_p4d(pgd, vaddr); 351 pud = fill_pud(p4d, vaddr); 352 return fill_pmd(pud, vaddr); 353 } 354 355 pte_t * __init populate_extra_pte(unsigned long vaddr) 356 { 357 pmd_t *pmd; 358 359 pmd = populate_extra_pmd(vaddr); 360 return fill_pte(pmd, vaddr); 361 } 362 363 /* 364 * Create large page table mappings for a range of physical addresses. 365 */ 366 static void __init __init_extra_mapping(unsigned long phys, unsigned long size, 367 enum page_cache_mode cache) 368 { 369 pgd_t *pgd; 370 p4d_t *p4d; 371 pud_t *pud; 372 pmd_t *pmd; 373 pgprot_t prot; 374 375 pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) | 376 protval_4k_2_large(cachemode2protval(cache)); 377 BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); 378 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { 379 pgd = pgd_offset_k((unsigned long)__va(phys)); 380 if (pgd_none(*pgd)) { 381 p4d = (p4d_t *) spp_getpage(); 382 set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE | 383 _PAGE_USER)); 384 } 385 p4d = p4d_offset(pgd, (unsigned long)__va(phys)); 386 if (p4d_none(*p4d)) { 387 pud = (pud_t *) spp_getpage(); 388 set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE | 389 _PAGE_USER)); 390 } 391 pud = pud_offset(p4d, (unsigned long)__va(phys)); 392 if (pud_none(*pud)) { 393 pmd = (pmd_t *) spp_getpage(); 394 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | 395 _PAGE_USER)); 396 } 397 pmd = pmd_offset(pud, phys); 398 BUG_ON(!pmd_none(*pmd)); 399 set_pmd(pmd, __pmd(phys | pgprot_val(prot))); 400 } 401 } 402 403 void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) 404 { 405 __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB); 406 } 407 408 void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) 409 { 410 __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC); 411 } 412 413 /* 414 * The head.S code sets up the kernel high mapping: 415 * 416 * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) 417 * 418 * phys_base holds the negative offset to the kernel, which is added 419 * to the compile time generated pmds. This results in invalid pmds up 420 * to the point where we hit the physaddr 0 mapping. 421 * 422 * We limit the mappings to the region from _text to _brk_end. _brk_end 423 * is rounded up to the 2MB boundary. This catches the invalid pmds as 424 * well, as they are located before _text: 425 */ 426 void __init cleanup_highmap(void) 427 { 428 unsigned long vaddr = __START_KERNEL_map; 429 unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE; 430 unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; 431 pmd_t *pmd = level2_kernel_pgt; 432 433 /* 434 * Native path, max_pfn_mapped is not set yet. 435 * Xen has valid max_pfn_mapped set in 436 * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable(). 437 */ 438 if (max_pfn_mapped) 439 vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); 440 441 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { 442 if (pmd_none(*pmd)) 443 continue; 444 if (vaddr < (unsigned long) _text || vaddr > end) 445 set_pmd(pmd, __pmd(0)); 446 } 447 } 448 449 /* 450 * Create PTE level page table mapping for physical addresses. 451 * It returns the last physical address mapped. 452 */ 453 static unsigned long __meminit 454 phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end, 455 pgprot_t prot, bool init) 456 { 457 unsigned long pages = 0, paddr_next; 458 unsigned long paddr_last = paddr_end; 459 pte_t *pte; 460 int i; 461 462 pte = pte_page + pte_index(paddr); 463 i = pte_index(paddr); 464 465 for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) { 466 paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE; 467 if (paddr >= paddr_end) { 468 if (!after_bootmem && 469 !e820__mapped_any(paddr & PAGE_MASK, paddr_next, 470 E820_TYPE_RAM) && 471 !e820__mapped_any(paddr & PAGE_MASK, paddr_next, 472 E820_TYPE_ACPI)) 473 set_pte_init(pte, __pte(0), init); 474 continue; 475 } 476 477 /* 478 * We will re-use the existing mapping. 479 * Xen for example has some special requirements, like mapping 480 * pagetable pages as RO. So assume someone who pre-setup 481 * these mappings are more intelligent. 482 */ 483 if (!pte_none(*pte)) { 484 if (!after_bootmem) 485 pages++; 486 continue; 487 } 488 489 if (0) 490 pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr, 491 pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte); 492 pages++; 493 set_pte_init(pte, pfn_pte(paddr >> PAGE_SHIFT, prot), init); 494 paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE; 495 } 496 497 update_page_count(PG_LEVEL_4K, pages); 498 499 return paddr_last; 500 } 501 502 /* 503 * Create PMD level page table mapping for physical addresses. The virtual 504 * and physical address have to be aligned at this level. 505 * It returns the last physical address mapped. 506 */ 507 static unsigned long __meminit 508 phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end, 509 unsigned long page_size_mask, pgprot_t prot, bool init) 510 { 511 unsigned long pages = 0, paddr_next; 512 unsigned long paddr_last = paddr_end; 513 514 int i = pmd_index(paddr); 515 516 for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) { 517 pmd_t *pmd = pmd_page + pmd_index(paddr); 518 pte_t *pte; 519 pgprot_t new_prot = prot; 520 521 paddr_next = (paddr & PMD_MASK) + PMD_SIZE; 522 if (paddr >= paddr_end) { 523 if (!after_bootmem && 524 !e820__mapped_any(paddr & PMD_MASK, paddr_next, 525 E820_TYPE_RAM) && 526 !e820__mapped_any(paddr & PMD_MASK, paddr_next, 527 E820_TYPE_ACPI)) 528 set_pmd_init(pmd, __pmd(0), init); 529 continue; 530 } 531 532 if (!pmd_none(*pmd)) { 533 if (!pmd_leaf(*pmd)) { 534 spin_lock(&init_mm.page_table_lock); 535 pte = (pte_t *)pmd_page_vaddr(*pmd); 536 paddr_last = phys_pte_init(pte, paddr, 537 paddr_end, prot, 538 init); 539 spin_unlock(&init_mm.page_table_lock); 540 continue; 541 } 542 /* 543 * If we are ok with PG_LEVEL_2M mapping, then we will 544 * use the existing mapping, 545 * 546 * Otherwise, we will split the large page mapping but 547 * use the same existing protection bits except for 548 * large page, so that we don't violate Intel's TLB 549 * Application note (317080) which says, while changing 550 * the page sizes, new and old translations should 551 * not differ with respect to page frame and 552 * attributes. 553 */ 554 if (page_size_mask & (1 << PG_LEVEL_2M)) { 555 if (!after_bootmem) 556 pages++; 557 paddr_last = paddr_next; 558 continue; 559 } 560 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); 561 } 562 563 if (page_size_mask & (1<<PG_LEVEL_2M)) { 564 pages++; 565 spin_lock(&init_mm.page_table_lock); 566 set_pmd_init(pmd, 567 pfn_pmd(paddr >> PAGE_SHIFT, prot_sethuge(prot)), 568 init); 569 spin_unlock(&init_mm.page_table_lock); 570 paddr_last = paddr_next; 571 continue; 572 } 573 574 pte = alloc_low_page(); 575 paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot, init); 576 577 spin_lock(&init_mm.page_table_lock); 578 pmd_populate_kernel_init(&init_mm, pmd, pte, init); 579 spin_unlock(&init_mm.page_table_lock); 580 } 581 update_page_count(PG_LEVEL_2M, pages); 582 return paddr_last; 583 } 584 585 /* 586 * Create PUD level page table mapping for physical addresses. The virtual 587 * and physical address do not have to be aligned at this level. KASLR can 588 * randomize virtual addresses up to this level. 589 * It returns the last physical address mapped. 590 */ 591 static unsigned long __meminit 592 phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, 593 unsigned long page_size_mask, pgprot_t _prot, bool init) 594 { 595 unsigned long pages = 0, paddr_next; 596 unsigned long paddr_last = paddr_end; 597 unsigned long vaddr = (unsigned long)__va(paddr); 598 int i = pud_index(vaddr); 599 600 for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) { 601 pud_t *pud; 602 pmd_t *pmd; 603 pgprot_t prot = _prot; 604 605 vaddr = (unsigned long)__va(paddr); 606 pud = pud_page + pud_index(vaddr); 607 paddr_next = (paddr & PUD_MASK) + PUD_SIZE; 608 609 if (paddr >= paddr_end) { 610 if (!after_bootmem && 611 !e820__mapped_any(paddr & PUD_MASK, paddr_next, 612 E820_TYPE_RAM) && 613 !e820__mapped_any(paddr & PUD_MASK, paddr_next, 614 E820_TYPE_ACPI)) 615 set_pud_init(pud, __pud(0), init); 616 continue; 617 } 618 619 if (!pud_none(*pud)) { 620 if (!pud_leaf(*pud)) { 621 pmd = pmd_offset(pud, 0); 622 paddr_last = phys_pmd_init(pmd, paddr, 623 paddr_end, 624 page_size_mask, 625 prot, init); 626 continue; 627 } 628 /* 629 * If we are ok with PG_LEVEL_1G mapping, then we will 630 * use the existing mapping. 631 * 632 * Otherwise, we will split the gbpage mapping but use 633 * the same existing protection bits except for large 634 * page, so that we don't violate Intel's TLB 635 * Application note (317080) which says, while changing 636 * the page sizes, new and old translations should 637 * not differ with respect to page frame and 638 * attributes. 639 */ 640 if (page_size_mask & (1 << PG_LEVEL_1G)) { 641 if (!after_bootmem) 642 pages++; 643 paddr_last = paddr_next; 644 continue; 645 } 646 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); 647 } 648 649 if (page_size_mask & (1<<PG_LEVEL_1G)) { 650 pages++; 651 spin_lock(&init_mm.page_table_lock); 652 set_pud_init(pud, 653 pfn_pud(paddr >> PAGE_SHIFT, prot_sethuge(prot)), 654 init); 655 spin_unlock(&init_mm.page_table_lock); 656 paddr_last = paddr_next; 657 continue; 658 } 659 660 pmd = alloc_low_page(); 661 paddr_last = phys_pmd_init(pmd, paddr, paddr_end, 662 page_size_mask, prot, init); 663 664 spin_lock(&init_mm.page_table_lock); 665 pud_populate_init(&init_mm, pud, pmd, init); 666 spin_unlock(&init_mm.page_table_lock); 667 } 668 669 update_page_count(PG_LEVEL_1G, pages); 670 671 return paddr_last; 672 } 673 674 static unsigned long __meminit 675 phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, 676 unsigned long page_size_mask, pgprot_t prot, bool init) 677 { 678 unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last; 679 680 paddr_last = paddr_end; 681 vaddr = (unsigned long)__va(paddr); 682 vaddr_end = (unsigned long)__va(paddr_end); 683 684 if (!pgtable_l5_enabled()) 685 return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, 686 page_size_mask, prot, init); 687 688 for (; vaddr < vaddr_end; vaddr = vaddr_next) { 689 p4d_t *p4d = p4d_page + p4d_index(vaddr); 690 pud_t *pud; 691 692 vaddr_next = (vaddr & P4D_MASK) + P4D_SIZE; 693 paddr = __pa(vaddr); 694 695 if (paddr >= paddr_end) { 696 paddr_next = __pa(vaddr_next); 697 if (!after_bootmem && 698 !e820__mapped_any(paddr & P4D_MASK, paddr_next, 699 E820_TYPE_RAM) && 700 !e820__mapped_any(paddr & P4D_MASK, paddr_next, 701 E820_TYPE_ACPI)) 702 set_p4d_init(p4d, __p4d(0), init); 703 continue; 704 } 705 706 if (!p4d_none(*p4d)) { 707 pud = pud_offset(p4d, 0); 708 paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end), 709 page_size_mask, prot, init); 710 continue; 711 } 712 713 pud = alloc_low_page(); 714 paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end), 715 page_size_mask, prot, init); 716 717 spin_lock(&init_mm.page_table_lock); 718 p4d_populate_init(&init_mm, p4d, pud, init); 719 spin_unlock(&init_mm.page_table_lock); 720 } 721 722 return paddr_last; 723 } 724 725 static unsigned long __meminit 726 __kernel_physical_mapping_init(unsigned long paddr_start, 727 unsigned long paddr_end, 728 unsigned long page_size_mask, 729 pgprot_t prot, bool init) 730 { 731 bool pgd_changed = false; 732 unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last; 733 734 paddr_last = paddr_end; 735 vaddr = (unsigned long)__va(paddr_start); 736 vaddr_end = (unsigned long)__va(paddr_end); 737 vaddr_start = vaddr; 738 739 for (; vaddr < vaddr_end; vaddr = vaddr_next) { 740 pgd_t *pgd = pgd_offset_k(vaddr); 741 p4d_t *p4d; 742 743 vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE; 744 745 if (pgd_val(*pgd)) { 746 p4d = (p4d_t *)pgd_page_vaddr(*pgd); 747 paddr_last = phys_p4d_init(p4d, __pa(vaddr), 748 __pa(vaddr_end), 749 page_size_mask, 750 prot, init); 751 continue; 752 } 753 754 p4d = alloc_low_page(); 755 paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end), 756 page_size_mask, prot, init); 757 758 spin_lock(&init_mm.page_table_lock); 759 if (pgtable_l5_enabled()) 760 pgd_populate_init(&init_mm, pgd, p4d, init); 761 else 762 p4d_populate_init(&init_mm, p4d_offset(pgd, vaddr), 763 (pud_t *) p4d, init); 764 765 spin_unlock(&init_mm.page_table_lock); 766 pgd_changed = true; 767 } 768 769 if (pgd_changed) 770 sync_global_pgds(vaddr_start, vaddr_end - 1); 771 772 return paddr_last; 773 } 774 775 776 /* 777 * Create page table mapping for the physical memory for specific physical 778 * addresses. Note that it can only be used to populate non-present entries. 779 * The virtual and physical addresses have to be aligned on PMD level 780 * down. It returns the last physical address mapped. 781 */ 782 unsigned long __meminit 783 kernel_physical_mapping_init(unsigned long paddr_start, 784 unsigned long paddr_end, 785 unsigned long page_size_mask, pgprot_t prot) 786 { 787 return __kernel_physical_mapping_init(paddr_start, paddr_end, 788 page_size_mask, prot, true); 789 } 790 791 /* 792 * This function is similar to kernel_physical_mapping_init() above with the 793 * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe() 794 * when updating the mapping. The caller is responsible to flush the TLBs after 795 * the function returns. 796 */ 797 unsigned long __meminit 798 kernel_physical_mapping_change(unsigned long paddr_start, 799 unsigned long paddr_end, 800 unsigned long page_size_mask) 801 { 802 return __kernel_physical_mapping_init(paddr_start, paddr_end, 803 page_size_mask, PAGE_KERNEL, 804 false); 805 } 806 807 #ifndef CONFIG_NUMA 808 static inline void x86_numa_init(void) 809 { 810 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); 811 } 812 #endif 813 814 void __init initmem_init(void) 815 { 816 x86_numa_init(); 817 } 818 819 void __init paging_init(void) 820 { 821 sparse_init(); 822 823 /* 824 * clear the default setting with node 0 825 * note: don't use nodes_clear here, that is really clearing when 826 * numa support is not compiled in, and later node_set_state 827 * will not set it back. 828 */ 829 node_clear_state(0, N_MEMORY); 830 node_clear_state(0, N_NORMAL_MEMORY); 831 832 zone_sizes_init(); 833 } 834 835 #define PAGE_UNUSED 0xFD 836 837 /* 838 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED), ranges 839 * from unused_pmd_start to next PMD_SIZE boundary. 840 */ 841 static unsigned long unused_pmd_start __meminitdata; 842 843 static void __meminit vmemmap_flush_unused_pmd(void) 844 { 845 if (!unused_pmd_start) 846 return; 847 /* 848 * Clears (unused_pmd_start, PMD_END] 849 */ 850 memset((void *)unused_pmd_start, PAGE_UNUSED, 851 ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start); 852 unused_pmd_start = 0; 853 } 854 855 #ifdef CONFIG_MEMORY_HOTPLUG 856 /* Returns true if the PMD is completely unused and thus it can be freed */ 857 static bool __meminit vmemmap_pmd_is_unused(unsigned long addr, unsigned long end) 858 { 859 unsigned long start = ALIGN_DOWN(addr, PMD_SIZE); 860 861 /* 862 * Flush the unused range cache to ensure that memchr_inv() will work 863 * for the whole range. 864 */ 865 vmemmap_flush_unused_pmd(); 866 memset((void *)addr, PAGE_UNUSED, end - addr); 867 868 return !memchr_inv((void *)start, PAGE_UNUSED, PMD_SIZE); 869 } 870 #endif 871 872 static void __meminit __vmemmap_use_sub_pmd(unsigned long start) 873 { 874 /* 875 * As we expect to add in the same granularity as we remove, it's 876 * sufficient to mark only some piece used to block the memmap page from 877 * getting removed when removing some other adjacent memmap (just in 878 * case the first memmap never gets initialized e.g., because the memory 879 * block never gets onlined). 880 */ 881 memset((void *)start, 0, sizeof(struct page)); 882 } 883 884 static void __meminit vmemmap_use_sub_pmd(unsigned long start, unsigned long end) 885 { 886 /* 887 * We only optimize if the new used range directly follows the 888 * previously unused range (esp., when populating consecutive sections). 889 */ 890 if (unused_pmd_start == start) { 891 if (likely(IS_ALIGNED(end, PMD_SIZE))) 892 unused_pmd_start = 0; 893 else 894 unused_pmd_start = end; 895 return; 896 } 897 898 /* 899 * If the range does not contiguously follows previous one, make sure 900 * to mark the unused range of the previous one so it can be removed. 901 */ 902 vmemmap_flush_unused_pmd(); 903 __vmemmap_use_sub_pmd(start); 904 } 905 906 907 static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) 908 { 909 const unsigned long page = ALIGN_DOWN(start, PMD_SIZE); 910 911 vmemmap_flush_unused_pmd(); 912 913 /* 914 * Could be our memmap page is filled with PAGE_UNUSED already from a 915 * previous remove. Make sure to reset it. 916 */ 917 __vmemmap_use_sub_pmd(start); 918 919 /* 920 * Mark with PAGE_UNUSED the unused parts of the new memmap range 921 */ 922 if (!IS_ALIGNED(start, PMD_SIZE)) 923 memset((void *)page, PAGE_UNUSED, start - page); 924 925 /* 926 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of 927 * consecutive sections. Remember for the last added PMD where the 928 * unused range begins. 929 */ 930 if (!IS_ALIGNED(end, PMD_SIZE)) 931 unused_pmd_start = end; 932 } 933 934 /* 935 * Memory hotplug specific functions 936 */ 937 #ifdef CONFIG_MEMORY_HOTPLUG 938 /* 939 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need 940 * updating. 941 */ 942 static void update_end_of_memory_vars(u64 start, u64 size) 943 { 944 unsigned long end_pfn = PFN_UP(start + size); 945 946 if (end_pfn > max_pfn) { 947 max_pfn = end_pfn; 948 max_low_pfn = end_pfn; 949 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; 950 } 951 } 952 953 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 954 struct mhp_params *params) 955 { 956 unsigned long end = ((start_pfn + nr_pages) << PAGE_SHIFT) - 1; 957 int ret; 958 959 if (WARN_ON_ONCE(end > DIRECT_MAP_PHYSMEM_END)) 960 return -ERANGE; 961 962 ret = __add_pages(nid, start_pfn, nr_pages, params); 963 WARN_ON_ONCE(ret); 964 965 /* 966 * Special case: add_pages() is called by memremap_pages() for adding device 967 * private pages. Do not bump up max_pfn in the device private path, 968 * because max_pfn changes affect dma_addressing_limited(). 969 * 970 * dma_addressing_limited() returning true when max_pfn is the device's 971 * addressable memory can force device drivers to use bounce buffers 972 * and impact their performance negatively: 973 */ 974 if (!params->pgmap) 975 /* update max_pfn, max_low_pfn and high_memory */ 976 update_end_of_memory_vars(start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); 977 978 return ret; 979 } 980 981 int arch_add_memory(int nid, u64 start, u64 size, 982 struct mhp_params *params) 983 { 984 unsigned long start_pfn = start >> PAGE_SHIFT; 985 unsigned long nr_pages = size >> PAGE_SHIFT; 986 987 init_memory_mapping(start, start + size, params->pgprot); 988 989 return add_pages(nid, start_pfn, nr_pages, params); 990 } 991 992 static void free_reserved_pages(struct page *page, unsigned long nr_pages) 993 { 994 while (nr_pages--) 995 free_reserved_page(page++); 996 } 997 998 static void __meminit free_pagetable(struct page *page, int order) 999 { 1000 /* bootmem page has reserved flag */ 1001 if (PageReserved(page)) { 1002 unsigned long nr_pages = 1 << order; 1003 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE 1004 enum bootmem_type type = bootmem_type(page); 1005 1006 if (type == SECTION_INFO || type == MIX_SECTION_INFO) { 1007 while (nr_pages--) 1008 put_page_bootmem(page++); 1009 } else { 1010 free_reserved_pages(page, nr_pages); 1011 } 1012 #else 1013 free_reserved_pages(page, nr_pages); 1014 #endif 1015 } else { 1016 free_pages((unsigned long)page_address(page), order); 1017 } 1018 } 1019 1020 static void __meminit free_hugepage_table(struct page *page, 1021 struct vmem_altmap *altmap) 1022 { 1023 if (altmap) 1024 vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE); 1025 else 1026 free_pagetable(page, get_order(PMD_SIZE)); 1027 } 1028 1029 static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) 1030 { 1031 pte_t *pte; 1032 int i; 1033 1034 for (i = 0; i < PTRS_PER_PTE; i++) { 1035 pte = pte_start + i; 1036 if (!pte_none(*pte)) 1037 return; 1038 } 1039 1040 /* free a pte table */ 1041 free_pagetable(pmd_page(*pmd), 0); 1042 spin_lock(&init_mm.page_table_lock); 1043 pmd_clear(pmd); 1044 spin_unlock(&init_mm.page_table_lock); 1045 } 1046 1047 static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) 1048 { 1049 pmd_t *pmd; 1050 int i; 1051 1052 for (i = 0; i < PTRS_PER_PMD; i++) { 1053 pmd = pmd_start + i; 1054 if (!pmd_none(*pmd)) 1055 return; 1056 } 1057 1058 /* free a pmd table */ 1059 free_pagetable(pud_page(*pud), 0); 1060 spin_lock(&init_mm.page_table_lock); 1061 pud_clear(pud); 1062 spin_unlock(&init_mm.page_table_lock); 1063 } 1064 1065 static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d) 1066 { 1067 pud_t *pud; 1068 int i; 1069 1070 for (i = 0; i < PTRS_PER_PUD; i++) { 1071 pud = pud_start + i; 1072 if (!pud_none(*pud)) 1073 return; 1074 } 1075 1076 /* free a pud table */ 1077 free_pagetable(p4d_page(*p4d), 0); 1078 spin_lock(&init_mm.page_table_lock); 1079 p4d_clear(p4d); 1080 spin_unlock(&init_mm.page_table_lock); 1081 } 1082 1083 static void __meminit 1084 remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, 1085 bool direct) 1086 { 1087 unsigned long next, pages = 0; 1088 pte_t *pte; 1089 phys_addr_t phys_addr; 1090 1091 pte = pte_start + pte_index(addr); 1092 for (; addr < end; addr = next, pte++) { 1093 next = (addr + PAGE_SIZE) & PAGE_MASK; 1094 if (next > end) 1095 next = end; 1096 1097 if (!pte_present(*pte)) 1098 continue; 1099 1100 /* 1101 * We mapped [0,1G) memory as identity mapping when 1102 * initializing, in arch/x86/kernel/head_64.S. These 1103 * pagetables cannot be removed. 1104 */ 1105 phys_addr = pte_val(*pte) + (addr & PAGE_MASK); 1106 if (phys_addr < (phys_addr_t)0x40000000) 1107 return; 1108 1109 if (!direct) 1110 free_pagetable(pte_page(*pte), 0); 1111 1112 spin_lock(&init_mm.page_table_lock); 1113 pte_clear(&init_mm, addr, pte); 1114 spin_unlock(&init_mm.page_table_lock); 1115 1116 /* For non-direct mapping, pages means nothing. */ 1117 pages++; 1118 } 1119 1120 /* Call free_pte_table() in remove_pmd_table(). */ 1121 flush_tlb_all(); 1122 if (direct) 1123 update_page_count(PG_LEVEL_4K, -pages); 1124 } 1125 1126 static void __meminit 1127 remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, 1128 bool direct, struct vmem_altmap *altmap) 1129 { 1130 unsigned long next, pages = 0; 1131 pte_t *pte_base; 1132 pmd_t *pmd; 1133 1134 pmd = pmd_start + pmd_index(addr); 1135 for (; addr < end; addr = next, pmd++) { 1136 next = pmd_addr_end(addr, end); 1137 1138 if (!pmd_present(*pmd)) 1139 continue; 1140 1141 if (pmd_leaf(*pmd)) { 1142 if (IS_ALIGNED(addr, PMD_SIZE) && 1143 IS_ALIGNED(next, PMD_SIZE)) { 1144 if (!direct) 1145 free_hugepage_table(pmd_page(*pmd), 1146 altmap); 1147 1148 spin_lock(&init_mm.page_table_lock); 1149 pmd_clear(pmd); 1150 spin_unlock(&init_mm.page_table_lock); 1151 pages++; 1152 } else if (vmemmap_pmd_is_unused(addr, next)) { 1153 free_hugepage_table(pmd_page(*pmd), 1154 altmap); 1155 spin_lock(&init_mm.page_table_lock); 1156 pmd_clear(pmd); 1157 spin_unlock(&init_mm.page_table_lock); 1158 } 1159 continue; 1160 } 1161 1162 pte_base = (pte_t *)pmd_page_vaddr(*pmd); 1163 remove_pte_table(pte_base, addr, next, direct); 1164 free_pte_table(pte_base, pmd); 1165 } 1166 1167 /* Call free_pmd_table() in remove_pud_table(). */ 1168 if (direct) 1169 update_page_count(PG_LEVEL_2M, -pages); 1170 } 1171 1172 static void __meminit 1173 remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, 1174 struct vmem_altmap *altmap, bool direct) 1175 { 1176 unsigned long next, pages = 0; 1177 pmd_t *pmd_base; 1178 pud_t *pud; 1179 1180 pud = pud_start + pud_index(addr); 1181 for (; addr < end; addr = next, pud++) { 1182 next = pud_addr_end(addr, end); 1183 1184 if (!pud_present(*pud)) 1185 continue; 1186 1187 if (pud_leaf(*pud) && 1188 IS_ALIGNED(addr, PUD_SIZE) && 1189 IS_ALIGNED(next, PUD_SIZE)) { 1190 spin_lock(&init_mm.page_table_lock); 1191 pud_clear(pud); 1192 spin_unlock(&init_mm.page_table_lock); 1193 pages++; 1194 continue; 1195 } 1196 1197 pmd_base = pmd_offset(pud, 0); 1198 remove_pmd_table(pmd_base, addr, next, direct, altmap); 1199 free_pmd_table(pmd_base, pud); 1200 } 1201 1202 if (direct) 1203 update_page_count(PG_LEVEL_1G, -pages); 1204 } 1205 1206 static void __meminit 1207 remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end, 1208 struct vmem_altmap *altmap, bool direct) 1209 { 1210 unsigned long next, pages = 0; 1211 pud_t *pud_base; 1212 p4d_t *p4d; 1213 1214 p4d = p4d_start + p4d_index(addr); 1215 for (; addr < end; addr = next, p4d++) { 1216 next = p4d_addr_end(addr, end); 1217 1218 if (!p4d_present(*p4d)) 1219 continue; 1220 1221 BUILD_BUG_ON(p4d_leaf(*p4d)); 1222 1223 pud_base = pud_offset(p4d, 0); 1224 remove_pud_table(pud_base, addr, next, altmap, direct); 1225 /* 1226 * For 4-level page tables we do not want to free PUDs, but in the 1227 * 5-level case we should free them. This code will have to change 1228 * to adapt for boot-time switching between 4 and 5 level page tables. 1229 */ 1230 if (pgtable_l5_enabled()) 1231 free_pud_table(pud_base, p4d); 1232 } 1233 1234 if (direct) 1235 update_page_count(PG_LEVEL_512G, -pages); 1236 } 1237 1238 /* start and end are both virtual address. */ 1239 static void __meminit 1240 remove_pagetable(unsigned long start, unsigned long end, bool direct, 1241 struct vmem_altmap *altmap) 1242 { 1243 unsigned long next; 1244 unsigned long addr; 1245 pgd_t *pgd; 1246 p4d_t *p4d; 1247 1248 for (addr = start; addr < end; addr = next) { 1249 next = pgd_addr_end(addr, end); 1250 1251 pgd = pgd_offset_k(addr); 1252 if (!pgd_present(*pgd)) 1253 continue; 1254 1255 p4d = p4d_offset(pgd, 0); 1256 remove_p4d_table(p4d, addr, next, altmap, direct); 1257 } 1258 1259 flush_tlb_all(); 1260 } 1261 1262 void __ref vmemmap_free(unsigned long start, unsigned long end, 1263 struct vmem_altmap *altmap) 1264 { 1265 VM_BUG_ON(!PAGE_ALIGNED(start)); 1266 VM_BUG_ON(!PAGE_ALIGNED(end)); 1267 1268 remove_pagetable(start, end, false, altmap); 1269 } 1270 1271 static void __meminit 1272 kernel_physical_mapping_remove(unsigned long start, unsigned long end) 1273 { 1274 start = (unsigned long)__va(start); 1275 end = (unsigned long)__va(end); 1276 1277 remove_pagetable(start, end, true, NULL); 1278 } 1279 1280 void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) 1281 { 1282 unsigned long start_pfn = start >> PAGE_SHIFT; 1283 unsigned long nr_pages = size >> PAGE_SHIFT; 1284 1285 __remove_pages(start_pfn, nr_pages, altmap); 1286 kernel_physical_mapping_remove(start, start + size); 1287 } 1288 #endif /* CONFIG_MEMORY_HOTPLUG */ 1289 1290 static struct kcore_list kcore_vsyscall; 1291 1292 static void __init register_page_bootmem_info(void) 1293 { 1294 #if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP) 1295 int i; 1296 1297 for_each_online_node(i) 1298 register_page_bootmem_info_node(NODE_DATA(i)); 1299 #endif 1300 } 1301 1302 /* 1303 * Pre-allocates page-table pages for the vmalloc area in the kernel page-table. 1304 * Only the level which needs to be synchronized between all page-tables is 1305 * allocated because the synchronization can be expensive. 1306 */ 1307 static void __init preallocate_vmalloc_pages(void) 1308 { 1309 unsigned long addr; 1310 const char *lvl; 1311 1312 for (addr = VMALLOC_START; addr <= VMEMORY_END; addr = ALIGN(addr + 1, PGDIR_SIZE)) { 1313 pgd_t *pgd = pgd_offset_k(addr); 1314 p4d_t *p4d; 1315 pud_t *pud; 1316 1317 lvl = "p4d"; 1318 p4d = p4d_alloc(&init_mm, pgd, addr); 1319 if (!p4d) 1320 goto failed; 1321 1322 if (pgtable_l5_enabled()) 1323 continue; 1324 1325 /* 1326 * The goal here is to allocate all possibly required 1327 * hardware page tables pointed to by the top hardware 1328 * level. 1329 * 1330 * On 4-level systems, the P4D layer is folded away and 1331 * the above code does no preallocation. Below, go down 1332 * to the pud _software_ level to ensure the second 1333 * hardware level is allocated on 4-level systems too. 1334 */ 1335 lvl = "pud"; 1336 pud = pud_alloc(&init_mm, p4d, addr); 1337 if (!pud) 1338 goto failed; 1339 } 1340 1341 return; 1342 1343 failed: 1344 1345 /* 1346 * The pages have to be there now or they will be missing in 1347 * process page-tables later. 1348 */ 1349 panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl); 1350 } 1351 1352 void __init arch_mm_preinit(void) 1353 { 1354 pci_iommu_alloc(); 1355 } 1356 1357 void __init mem_init(void) 1358 { 1359 /* clear_bss() already clear the empty_zero_page */ 1360 1361 after_bootmem = 1; 1362 x86_init.hyper.init_after_bootmem(); 1363 1364 /* 1365 * Must be done after boot memory is put on freelist, because here we 1366 * might set fields in deferred struct pages that have not yet been 1367 * initialized, and memblock_free_all() initializes all the reserved 1368 * deferred pages for us. 1369 */ 1370 register_page_bootmem_info(); 1371 1372 /* Register memory areas for /proc/kcore */ 1373 if (get_gate_vma(&init_mm)) 1374 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); 1375 1376 preallocate_vmalloc_pages(); 1377 } 1378 1379 int kernel_set_to_readonly; 1380 1381 void mark_rodata_ro(void) 1382 { 1383 unsigned long start = PFN_ALIGN(_text); 1384 unsigned long rodata_start = PFN_ALIGN(__start_rodata); 1385 unsigned long end = (unsigned long)__end_rodata_hpage_align; 1386 unsigned long text_end = PFN_ALIGN(_etext); 1387 unsigned long rodata_end = PFN_ALIGN(__end_rodata); 1388 unsigned long all_end; 1389 1390 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", 1391 (end - start) >> 10); 1392 set_memory_ro(start, (end - start) >> PAGE_SHIFT); 1393 1394 kernel_set_to_readonly = 1; 1395 1396 /* 1397 * The rodata/data/bss/brk section (but not the kernel text!) 1398 * should also be not-executable. 1399 * 1400 * We align all_end to PMD_SIZE because the existing mapping 1401 * is a full PMD. If we would align _brk_end to PAGE_SIZE we 1402 * split the PMD and the reminder between _brk_end and the end 1403 * of the PMD will remain mapped executable. 1404 * 1405 * Any PMD which was setup after the one which covers _brk_end 1406 * has been zapped already via cleanup_highmem(). 1407 */ 1408 all_end = roundup((unsigned long)_brk_end, PMD_SIZE); 1409 set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT); 1410 1411 set_ftrace_ops_ro(); 1412 1413 #ifdef CONFIG_CPA_DEBUG 1414 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); 1415 set_memory_rw(start, (end-start) >> PAGE_SHIFT); 1416 1417 printk(KERN_INFO "Testing CPA: again\n"); 1418 set_memory_ro(start, (end-start) >> PAGE_SHIFT); 1419 #endif 1420 1421 free_kernel_image_pages("unused kernel image (text/rodata gap)", 1422 (void *)text_end, (void *)rodata_start); 1423 free_kernel_image_pages("unused kernel image (rodata/data gap)", 1424 (void *)rodata_end, (void *)_sdata); 1425 } 1426 1427 /* 1428 * Block size is the minimum amount of memory which can be hotplugged or 1429 * hotremoved. It must be power of two and must be equal or larger than 1430 * MIN_MEMORY_BLOCK_SIZE. 1431 */ 1432 #define MAX_BLOCK_SIZE (2UL << 30) 1433 1434 /* Amount of ram needed to start using large blocks */ 1435 #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30) 1436 1437 /* Adjustable memory block size */ 1438 static unsigned long set_memory_block_size; 1439 int __init set_memory_block_size_order(unsigned int order) 1440 { 1441 unsigned long size = 1UL << order; 1442 1443 if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE) 1444 return -EINVAL; 1445 1446 set_memory_block_size = size; 1447 return 0; 1448 } 1449 1450 static unsigned long probe_memory_block_size(void) 1451 { 1452 unsigned long boot_mem_end = max_pfn << PAGE_SHIFT; 1453 unsigned long bz; 1454 1455 /* If memory block size has been set, then use it */ 1456 bz = set_memory_block_size; 1457 if (bz) 1458 goto done; 1459 1460 /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */ 1461 if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) { 1462 bz = MIN_MEMORY_BLOCK_SIZE; 1463 goto done; 1464 } 1465 1466 /* 1467 * When hotplug alignment is not a concern, maximize blocksize 1468 * to minimize overhead. Otherwise, align to the lesser of advice 1469 * alignment and end of memory alignment. 1470 */ 1471 bz = memory_block_advised_max_size(); 1472 if (!bz) { 1473 bz = MAX_BLOCK_SIZE; 1474 if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) 1475 goto done; 1476 } else { 1477 bz = max(min(bz, MAX_BLOCK_SIZE), MIN_MEMORY_BLOCK_SIZE); 1478 } 1479 1480 /* Find the largest allowed block size that aligns to memory end */ 1481 for (; bz > MIN_MEMORY_BLOCK_SIZE; bz >>= 1) { 1482 if (IS_ALIGNED(boot_mem_end, bz)) 1483 break; 1484 } 1485 done: 1486 pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20); 1487 1488 return bz; 1489 } 1490 1491 static unsigned long memory_block_size_probed; 1492 unsigned long memory_block_size_bytes(void) 1493 { 1494 if (!memory_block_size_probed) 1495 memory_block_size_probed = probe_memory_block_size(); 1496 1497 return memory_block_size_probed; 1498 } 1499 1500 /* 1501 * Initialise the sparsemem vmemmap using huge-pages at the PMD level. 1502 */ 1503 static long __meminitdata addr_start, addr_end; 1504 static void __meminitdata *p_start, *p_end; 1505 static int __meminitdata node_start; 1506 1507 void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, 1508 unsigned long addr, unsigned long next) 1509 { 1510 pte_t entry; 1511 1512 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, 1513 PAGE_KERNEL_LARGE); 1514 set_pmd(pmd, __pmd(pte_val(entry))); 1515 1516 /* check to see if we have contiguous blocks */ 1517 if (p_end != p || node_start != node) { 1518 if (p_start) 1519 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", 1520 addr_start, addr_end-1, p_start, p_end-1, node_start); 1521 addr_start = addr; 1522 node_start = node; 1523 p_start = p; 1524 } 1525 1526 addr_end = addr + PMD_SIZE; 1527 p_end = p + PMD_SIZE; 1528 1529 if (!IS_ALIGNED(addr, PMD_SIZE) || 1530 !IS_ALIGNED(next, PMD_SIZE)) 1531 vmemmap_use_new_sub_pmd(addr, next); 1532 } 1533 1534 int __meminit vmemmap_check_pmd(pmd_t *pmd, int node, 1535 unsigned long addr, unsigned long next) 1536 { 1537 int large = pmd_leaf(*pmd); 1538 1539 if (pmd_leaf(*pmd)) { 1540 vmemmap_verify((pte_t *)pmd, node, addr, next); 1541 vmemmap_use_sub_pmd(addr, next); 1542 } 1543 1544 return large; 1545 } 1546 1547 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 1548 struct vmem_altmap *altmap) 1549 { 1550 int err; 1551 1552 VM_BUG_ON(!PAGE_ALIGNED(start)); 1553 VM_BUG_ON(!PAGE_ALIGNED(end)); 1554 1555 if (end - start < PAGES_PER_SECTION * sizeof(struct page)) 1556 err = vmemmap_populate_basepages(start, end, node, NULL); 1557 else if (boot_cpu_has(X86_FEATURE_PSE)) 1558 err = vmemmap_populate_hugepages(start, end, node, altmap); 1559 else if (altmap) { 1560 pr_err_once("%s: no cpu support for altmap allocations\n", 1561 __func__); 1562 err = -ENOMEM; 1563 } else 1564 err = vmemmap_populate_basepages(start, end, node, NULL); 1565 if (!err) 1566 sync_global_pgds(start, end - 1); 1567 return err; 1568 } 1569 1570 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE 1571 void register_page_bootmem_memmap(unsigned long section_nr, 1572 struct page *start_page, unsigned long nr_pages) 1573 { 1574 unsigned long addr = (unsigned long)start_page; 1575 unsigned long end = (unsigned long)(start_page + nr_pages); 1576 unsigned long next; 1577 pgd_t *pgd; 1578 p4d_t *p4d; 1579 pud_t *pud; 1580 pmd_t *pmd; 1581 unsigned int nr_pmd_pages; 1582 struct page *page; 1583 1584 for (; addr < end; addr = next) { 1585 pte_t *pte = NULL; 1586 1587 pgd = pgd_offset_k(addr); 1588 if (pgd_none(*pgd)) { 1589 next = (addr + PAGE_SIZE) & PAGE_MASK; 1590 continue; 1591 } 1592 get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO); 1593 1594 p4d = p4d_offset(pgd, addr); 1595 if (p4d_none(*p4d)) { 1596 next = (addr + PAGE_SIZE) & PAGE_MASK; 1597 continue; 1598 } 1599 get_page_bootmem(section_nr, p4d_page(*p4d), MIX_SECTION_INFO); 1600 1601 pud = pud_offset(p4d, addr); 1602 if (pud_none(*pud)) { 1603 next = (addr + PAGE_SIZE) & PAGE_MASK; 1604 continue; 1605 } 1606 get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO); 1607 1608 pmd = pmd_offset(pud, addr); 1609 if (pmd_none(*pmd)) { 1610 next = (addr + PAGE_SIZE) & PAGE_MASK; 1611 continue; 1612 } 1613 1614 if (!boot_cpu_has(X86_FEATURE_PSE) || !pmd_leaf(*pmd)) { 1615 next = (addr + PAGE_SIZE) & PAGE_MASK; 1616 get_page_bootmem(section_nr, pmd_page(*pmd), 1617 MIX_SECTION_INFO); 1618 1619 pte = pte_offset_kernel(pmd, addr); 1620 if (pte_none(*pte)) 1621 continue; 1622 get_page_bootmem(section_nr, pte_page(*pte), 1623 SECTION_INFO); 1624 } else { 1625 next = pmd_addr_end(addr, end); 1626 nr_pmd_pages = (next - addr) >> PAGE_SHIFT; 1627 page = pmd_page(*pmd); 1628 while (nr_pmd_pages--) 1629 get_page_bootmem(section_nr, page++, 1630 SECTION_INFO); 1631 } 1632 } 1633 } 1634 #endif 1635 1636 void __meminit vmemmap_populate_print_last(void) 1637 { 1638 if (p_start) { 1639 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", 1640 addr_start, addr_end-1, p_start, p_end-1, node_start); 1641 p_start = NULL; 1642 p_end = NULL; 1643 node_start = 0; 1644 } 1645 } 1646