1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * S390 version 4 * Copyright IBM Corp. 1999, 2012 5 * Author(s): Hartmut Penner (hp@de.ibm.com), 6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * 8 * Derived from "arch/i386/kernel/setup.c" 9 * Copyright (C) 1995, Linus Torvalds 10 */ 11 12 /* 13 * This file handles the architecture-dependent parts of initialization 14 */ 15 16 #define KMSG_COMPONENT "setup" 17 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 18 19 #include <linux/errno.h> 20 #include <linux/export.h> 21 #include <linux/sched.h> 22 #include <linux/sched/task.h> 23 #include <linux/cpu.h> 24 #include <linux/kernel.h> 25 #include <linux/memblock.h> 26 #include <linux/mm.h> 27 #include <linux/stddef.h> 28 #include <linux/unistd.h> 29 #include <linux/ptrace.h> 30 #include <linux/random.h> 31 #include <linux/user.h> 32 #include <linux/tty.h> 33 #include <linux/ioport.h> 34 #include <linux/delay.h> 35 #include <linux/init.h> 36 #include <linux/initrd.h> 37 #include <linux/root_dev.h> 38 #include <linux/console.h> 39 #include <linux/kernel_stat.h> 40 #include <linux/dma-map-ops.h> 41 #include <linux/device.h> 42 #include <linux/notifier.h> 43 #include <linux/pfn.h> 44 #include <linux/ctype.h> 45 #include <linux/reboot.h> 46 #include <linux/topology.h> 47 #include <linux/kexec.h> 48 #include <linux/crash_dump.h> 49 #include <linux/memory.h> 50 #include <linux/compat.h> 51 #include <linux/start_kernel.h> 52 #include <linux/hugetlb.h> 53 #include <linux/kmemleak.h> 54 55 #include <asm/archrandom.h> 56 #include <asm/boot_data.h> 57 #include <asm/machine.h> 58 #include <asm/ipl.h> 59 #include <asm/facility.h> 60 #include <asm/smp.h> 61 #include <asm/mmu_context.h> 62 #include <asm/cpcmd.h> 63 #include <asm/abs_lowcore.h> 64 #include <asm/nmi.h> 65 #include <asm/irq.h> 66 #include <asm/page.h> 67 #include <asm/ptrace.h> 68 #include <asm/sections.h> 69 #include <asm/ebcdic.h> 70 #include <asm/diag.h> 71 #include <asm/os_info.h> 72 #include <asm/sclp.h> 73 #include <asm/stacktrace.h> 74 #include <asm/sysinfo.h> 75 #include <asm/numa.h> 76 #include <asm/alternative.h> 77 #include <asm/nospec-branch.h> 78 #include <asm/physmem_info.h> 79 #include <asm/maccess.h> 80 #include <asm/uv.h> 81 #include <asm/asm-offsets.h> 82 #include "entry.h" 83 84 /* 85 * Machine setup.. 86 */ 87 unsigned int console_mode = 0; 88 EXPORT_SYMBOL(console_mode); 89 90 unsigned int console_devno = -1; 91 EXPORT_SYMBOL(console_devno); 92 93 unsigned int console_irq = -1; 94 EXPORT_SYMBOL(console_irq); 95 96 /* 97 * Some code and data needs to stay below 2 GB, even when the kernel would be 98 * relocated above 2 GB, because it has to use 31 bit addresses. 99 * Such code and data is part of the .amode31 section. 100 */ 101 char __amode31_ref *__samode31 = _samode31; 102 char __amode31_ref *__eamode31 = _eamode31; 103 char __amode31_ref *__stext_amode31 = _stext_amode31; 104 char __amode31_ref *__etext_amode31 = _etext_amode31; 105 struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table; 106 struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table; 107 108 /* 109 * Control registers CR2, CR5 and CR15 are initialized with addresses 110 * of tables that must be placed below 2G which is handled by the AMODE31 111 * sections. 112 * Because the AMODE31 sections are relocated below 2G at startup, 113 * the content of control registers CR2, CR5 and CR15 must be updated 114 * with new addresses after the relocation. The initial initialization of 115 * control registers occurs in head64.S and then gets updated again after AMODE31 116 * relocation. We must access the relevant AMODE31 tables indirectly via 117 * pointers placed in the .amode31.refs linker section. Those pointers get 118 * updated automatically during AMODE31 relocation and always contain a valid 119 * address within AMODE31 sections. 120 */ 121 122 static __amode31_data u32 __ctl_duct_amode31[16] __aligned(64); 123 124 static __amode31_data u64 __ctl_aste_amode31[8] __aligned(64) = { 125 [1] = 0xffffffffffffffff 126 }; 127 128 static __amode31_data u32 __ctl_duald_amode31[32] __aligned(128) = { 129 0x80000000, 0, 0, 0, 130 0x80000000, 0, 0, 0, 131 0x80000000, 0, 0, 0, 132 0x80000000, 0, 0, 0, 133 0x80000000, 0, 0, 0, 134 0x80000000, 0, 0, 0, 135 0x80000000, 0, 0, 0, 136 0x80000000, 0, 0, 0 137 }; 138 139 static __amode31_data u32 __ctl_linkage_stack_amode31[8] __aligned(64) = { 140 0, 0, 0x89000000, 0, 141 0, 0, 0x8a000000, 0 142 }; 143 144 static u64 __amode31_ref *__ctl_aste = __ctl_aste_amode31; 145 static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31; 146 static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31; 147 static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31; 148 149 unsigned long __bootdata_preserved(max_mappable); 150 struct physmem_info __bootdata(physmem_info); 151 152 struct vm_layout __bootdata_preserved(vm_layout); 153 EXPORT_SYMBOL(vm_layout); 154 int __bootdata_preserved(__kaslr_enabled); 155 unsigned int __bootdata_preserved(zlib_dfltcc_support); 156 EXPORT_SYMBOL(zlib_dfltcc_support); 157 u64 __bootdata_preserved(stfle_fac_list[16]); 158 EXPORT_SYMBOL(stfle_fac_list); 159 struct oldmem_data __bootdata_preserved(oldmem_data); 160 161 char __bootdata(boot_rb)[PAGE_SIZE * 2]; 162 bool __bootdata(boot_earlyprintk); 163 size_t __bootdata(boot_rb_off); 164 char __bootdata(bootdebug_filter)[128]; 165 bool __bootdata(bootdebug); 166 167 unsigned long __bootdata_preserved(VMALLOC_START); 168 EXPORT_SYMBOL(VMALLOC_START); 169 170 unsigned long __bootdata_preserved(VMALLOC_END); 171 EXPORT_SYMBOL(VMALLOC_END); 172 173 struct page *__bootdata_preserved(vmemmap); 174 EXPORT_SYMBOL(vmemmap); 175 unsigned long __bootdata_preserved(vmemmap_size); 176 177 unsigned long __bootdata_preserved(MODULES_VADDR); 178 unsigned long __bootdata_preserved(MODULES_END); 179 180 /* An array with a pointer to the lowcore of every CPU. */ 181 struct lowcore *lowcore_ptr[NR_CPUS]; 182 EXPORT_SYMBOL(lowcore_ptr); 183 184 /* 185 * The Write Back bit position in the physaddr is given by the SLPC PCI. 186 * Leaving the mask zero always uses write through which is safe 187 */ 188 unsigned long mio_wb_bit_mask __ro_after_init; 189 190 /* 191 * This is set up by the setup-routine at boot-time 192 * for S390 need to find out, what we have to setup 193 * using address 0x10400 ... 194 */ 195 196 #include <asm/setup.h> 197 198 /* 199 * condev= and conmode= setup parameter. 200 */ 201 202 static int __init condev_setup(char *str) 203 { 204 int vdev; 205 206 vdev = simple_strtoul(str, &str, 0); 207 if (vdev >= 0 && vdev < 65536) { 208 console_devno = vdev; 209 console_irq = -1; 210 } 211 return 1; 212 } 213 214 __setup("condev=", condev_setup); 215 216 static void __init set_preferred_console(void) 217 { 218 if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) 219 add_preferred_console("ttyS", 0, NULL); 220 else if (CONSOLE_IS_3270) 221 add_preferred_console("tty3270", 0, NULL); 222 else if (CONSOLE_IS_VT220) 223 add_preferred_console("ttysclp", 0, NULL); 224 else if (CONSOLE_IS_HVC) 225 add_preferred_console("hvc", 0, NULL); 226 } 227 228 static int __init conmode_setup(char *str) 229 { 230 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 231 if (!strcmp(str, "hwc") || !strcmp(str, "sclp")) 232 SET_CONSOLE_SCLP; 233 #endif 234 #if defined(CONFIG_TN3215_CONSOLE) 235 if (!strcmp(str, "3215")) 236 SET_CONSOLE_3215; 237 #endif 238 #if defined(CONFIG_TN3270_CONSOLE) 239 if (!strcmp(str, "3270")) 240 SET_CONSOLE_3270; 241 #endif 242 set_preferred_console(); 243 return 1; 244 } 245 246 __setup("conmode=", conmode_setup); 247 248 static void __init conmode_default(void) 249 { 250 char query_buffer[1024]; 251 char *ptr; 252 253 if (machine_is_vm()) { 254 cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL); 255 console_devno = simple_strtoul(query_buffer + 5, NULL, 16); 256 ptr = strstr(query_buffer, "SUBCHANNEL ="); 257 console_irq = simple_strtoul(ptr + 13, NULL, 16); 258 cpcmd("QUERY TERM", query_buffer, 1024, NULL); 259 ptr = strstr(query_buffer, "CONMODE"); 260 /* 261 * Set the conmode to 3215 so that the device recognition 262 * will set the cu_type of the console to 3215. If the 263 * conmode is 3270 and we don't set it back then both 264 * 3215 and the 3270 driver will try to access the console 265 * device (3215 as console and 3270 as normal tty). 266 */ 267 cpcmd("TERM CONMODE 3215", NULL, 0, NULL); 268 if (ptr == NULL) { 269 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 270 SET_CONSOLE_SCLP; 271 #endif 272 return; 273 } 274 if (str_has_prefix(ptr + 8, "3270")) { 275 #if defined(CONFIG_TN3270_CONSOLE) 276 SET_CONSOLE_3270; 277 #elif defined(CONFIG_TN3215_CONSOLE) 278 SET_CONSOLE_3215; 279 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 280 SET_CONSOLE_SCLP; 281 #endif 282 } else if (str_has_prefix(ptr + 8, "3215")) { 283 #if defined(CONFIG_TN3215_CONSOLE) 284 SET_CONSOLE_3215; 285 #elif defined(CONFIG_TN3270_CONSOLE) 286 SET_CONSOLE_3270; 287 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 288 SET_CONSOLE_SCLP; 289 #endif 290 } 291 } else if (machine_is_kvm()) { 292 if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE)) 293 SET_CONSOLE_VT220; 294 else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE)) 295 SET_CONSOLE_SCLP; 296 else 297 SET_CONSOLE_HVC; 298 } else { 299 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 300 SET_CONSOLE_SCLP; 301 #endif 302 } 303 } 304 305 #ifdef CONFIG_CRASH_DUMP 306 static void __init setup_zfcpdump(void) 307 { 308 if (!is_ipl_type_dump()) 309 return; 310 if (oldmem_data.start) 311 return; 312 strlcat(boot_command_line, " cio_ignore=all,!ipldev,!condev", COMMAND_LINE_SIZE); 313 console_loglevel = 2; 314 } 315 #else 316 static inline void setup_zfcpdump(void) {} 317 #endif /* CONFIG_CRASH_DUMP */ 318 319 /* 320 * Reboot, halt and power_off stubs. They just call _machine_restart, 321 * _machine_halt or _machine_power_off. 322 */ 323 324 void machine_restart(char *command) 325 { 326 if ((!in_interrupt() && !in_atomic()) || oops_in_progress) 327 /* 328 * Only unblank the console if we are called in enabled 329 * context or a bust_spinlocks cleared the way for us. 330 */ 331 console_unblank(); 332 _machine_restart(command); 333 } 334 335 void machine_halt(void) 336 { 337 if (!in_interrupt() || oops_in_progress) 338 /* 339 * Only unblank the console if we are called in enabled 340 * context or a bust_spinlocks cleared the way for us. 341 */ 342 console_unblank(); 343 _machine_halt(); 344 } 345 346 void machine_power_off(void) 347 { 348 if (!in_interrupt() || oops_in_progress) 349 /* 350 * Only unblank the console if we are called in enabled 351 * context or a bust_spinlocks cleared the way for us. 352 */ 353 console_unblank(); 354 _machine_power_off(); 355 } 356 357 /* 358 * Dummy power off function. 359 */ 360 void (*pm_power_off)(void) = machine_power_off; 361 EXPORT_SYMBOL_GPL(pm_power_off); 362 363 void *restart_stack; 364 365 unsigned long stack_alloc(void) 366 { 367 void *stack; 368 369 stack = __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP, 370 NUMA_NO_NODE, __builtin_return_address(0)); 371 kmemleak_not_leak(stack); 372 return (unsigned long)stack; 373 } 374 375 void stack_free(unsigned long stack) 376 { 377 vfree((void *)stack); 378 } 379 380 static unsigned long __init stack_alloc_early(void) 381 { 382 unsigned long stack; 383 384 stack = (unsigned long)memblock_alloc_or_panic(THREAD_SIZE, THREAD_SIZE); 385 return stack; 386 } 387 388 static void __init setup_lowcore(void) 389 { 390 struct lowcore *lc, *abs_lc; 391 392 /* 393 * Setup lowcore for boot cpu 394 */ 395 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE); 396 lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc)); 397 if (!lc) 398 panic("%s: Failed to allocate %zu bytes align=%zx\n", 399 __func__, sizeof(*lc), sizeof(*lc)); 400 401 lc->pcpu = (unsigned long)per_cpu_ptr(&pcpu_devices, 0); 402 lc->restart_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT; 403 lc->restart_psw.addr = __pa(restart_int_handler); 404 lc->external_new_psw.mask = PSW_KERNEL_BITS; 405 lc->external_new_psw.addr = (unsigned long) ext_int_handler; 406 lc->svc_new_psw.mask = PSW_KERNEL_BITS; 407 lc->svc_new_psw.addr = (unsigned long) system_call; 408 lc->program_new_psw.mask = PSW_KERNEL_BITS; 409 lc->program_new_psw.addr = (unsigned long) pgm_check_handler; 410 lc->mcck_new_psw.mask = PSW_KERNEL_BITS; 411 lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler; 412 lc->io_new_psw.mask = PSW_KERNEL_BITS; 413 lc->io_new_psw.addr = (unsigned long) io_int_handler; 414 lc->clock_comparator = clock_comparator_max; 415 lc->current_task = (unsigned long)&init_task; 416 lc->lpp = LPP_MAGIC; 417 lc->preempt_count = get_lowcore()->preempt_count; 418 nmi_alloc_mcesa_early(&lc->mcesad); 419 lc->sys_enter_timer = get_lowcore()->sys_enter_timer; 420 lc->exit_timer = get_lowcore()->exit_timer; 421 lc->user_timer = get_lowcore()->user_timer; 422 lc->system_timer = get_lowcore()->system_timer; 423 lc->steal_timer = get_lowcore()->steal_timer; 424 lc->last_update_timer = get_lowcore()->last_update_timer; 425 lc->last_update_clock = get_lowcore()->last_update_clock; 426 /* 427 * Allocate the global restart stack which is the same for 428 * all CPUs in case *one* of them does a PSW restart. 429 */ 430 restart_stack = (void *)(stack_alloc_early() + STACK_INIT_OFFSET); 431 lc->mcck_stack = stack_alloc_early() + STACK_INIT_OFFSET; 432 lc->async_stack = stack_alloc_early() + STACK_INIT_OFFSET; 433 lc->nodat_stack = stack_alloc_early() + STACK_INIT_OFFSET; 434 lc->kernel_stack = get_lowcore()->kernel_stack; 435 /* 436 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant 437 * restart data to the absolute zero lowcore. This is necessary if 438 * PSW restart is done on an offline CPU that has lowcore zero. 439 */ 440 lc->restart_stack = (unsigned long) restart_stack; 441 lc->restart_fn = (unsigned long) do_restart; 442 lc->restart_data = 0; 443 lc->restart_source = -1U; 444 lc->spinlock_lockval = arch_spin_lockval(0); 445 lc->spinlock_index = 0; 446 arch_spin_lock_setup(0); 447 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); 448 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); 449 lc->preempt_count = PREEMPT_DISABLED; 450 lc->kernel_asce = get_lowcore()->kernel_asce; 451 lc->user_asce = get_lowcore()->user_asce; 452 453 system_ctlreg_init_save_area(lc); 454 abs_lc = get_abs_lowcore(); 455 abs_lc->restart_stack = lc->restart_stack; 456 abs_lc->restart_fn = lc->restart_fn; 457 abs_lc->restart_data = lc->restart_data; 458 abs_lc->restart_source = lc->restart_source; 459 abs_lc->restart_psw = lc->restart_psw; 460 abs_lc->restart_flags = RESTART_FLAG_CTLREGS; 461 abs_lc->program_new_psw = lc->program_new_psw; 462 abs_lc->mcesad = lc->mcesad; 463 put_abs_lowcore(abs_lc); 464 465 set_prefix(__pa(lc)); 466 lowcore_ptr[0] = lc; 467 if (abs_lowcore_map(0, lowcore_ptr[0], false)) 468 panic("Couldn't setup absolute lowcore"); 469 } 470 471 static struct resource code_resource = { 472 .name = "Kernel code", 473 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, 474 }; 475 476 static struct resource data_resource = { 477 .name = "Kernel data", 478 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, 479 }; 480 481 static struct resource bss_resource = { 482 .name = "Kernel bss", 483 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, 484 }; 485 486 static struct resource __initdata *standard_resources[] = { 487 &code_resource, 488 &data_resource, 489 &bss_resource, 490 }; 491 492 static void __init setup_resources(void) 493 { 494 struct resource *res, *std_res, *sub_res; 495 phys_addr_t start, end; 496 int j; 497 u64 i; 498 499 code_resource.start = __pa_symbol(_text); 500 code_resource.end = __pa_symbol(_etext) - 1; 501 data_resource.start = __pa_symbol(_etext); 502 data_resource.end = __pa_symbol(_edata) - 1; 503 bss_resource.start = __pa_symbol(__bss_start); 504 bss_resource.end = __pa_symbol(__bss_stop) - 1; 505 506 for_each_mem_range(i, &start, &end) { 507 res = memblock_alloc_or_panic(sizeof(*res), 8); 508 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; 509 510 res->name = "System RAM"; 511 res->start = start; 512 /* 513 * In memblock, end points to the first byte after the 514 * range while in resources, end points to the last byte in 515 * the range. 516 */ 517 res->end = end - 1; 518 request_resource(&iomem_resource, res); 519 520 for (j = 0; j < ARRAY_SIZE(standard_resources); j++) { 521 std_res = standard_resources[j]; 522 if (std_res->start < res->start || 523 std_res->start > res->end) 524 continue; 525 if (std_res->end > res->end) { 526 sub_res = memblock_alloc_or_panic(sizeof(*sub_res), 8); 527 *sub_res = *std_res; 528 sub_res->end = res->end; 529 std_res->start = res->end + 1; 530 request_resource(res, sub_res); 531 } else { 532 request_resource(res, std_res); 533 } 534 } 535 } 536 #ifdef CONFIG_CRASH_DUMP 537 /* 538 * Re-add removed crash kernel memory as reserved memory. This makes 539 * sure it will be mapped with the identity mapping and struct pages 540 * will be created, so it can be resized later on. 541 * However add it later since the crash kernel resource should not be 542 * part of the System RAM resource. 543 */ 544 if (crashk_res.end) { 545 memblock_add_node(crashk_res.start, resource_size(&crashk_res), 546 0, MEMBLOCK_NONE); 547 memblock_reserve(crashk_res.start, resource_size(&crashk_res)); 548 insert_resource(&iomem_resource, &crashk_res); 549 } 550 #endif 551 } 552 553 static void __init setup_memory_end(void) 554 { 555 max_pfn = max_low_pfn = PFN_DOWN(ident_map_size); 556 pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20); 557 } 558 559 #ifdef CONFIG_CRASH_DUMP 560 561 /* 562 * When kdump is enabled, we have to ensure that no memory from the area 563 * [0 - crashkernel memory size] is set offline - it will be exchanged with 564 * the crashkernel memory region when kdump is triggered. The crashkernel 565 * memory region can never get offlined (pages are unmovable). 566 */ 567 static int kdump_mem_notifier(struct notifier_block *nb, 568 unsigned long action, void *data) 569 { 570 struct memory_notify *arg = data; 571 572 if (action != MEM_GOING_OFFLINE) 573 return NOTIFY_OK; 574 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res))) 575 return NOTIFY_BAD; 576 return NOTIFY_OK; 577 } 578 579 static struct notifier_block kdump_mem_nb = { 580 .notifier_call = kdump_mem_notifier, 581 }; 582 583 #endif 584 585 /* 586 * Reserve page tables created by decompressor 587 */ 588 static void __init reserve_pgtables(void) 589 { 590 unsigned long start, end; 591 struct reserved_range *range; 592 593 for_each_physmem_reserved_type_range(RR_VMEM, range, &start, &end) 594 memblock_reserve(start, end - start); 595 } 596 597 /* 598 * Reserve memory for kdump kernel to be loaded with kexec 599 */ 600 static void __init reserve_crashkernel(void) 601 { 602 #ifdef CONFIG_CRASH_DUMP 603 unsigned long long crash_base, crash_size; 604 phys_addr_t low, high; 605 int rc; 606 607 rc = parse_crashkernel(boot_command_line, ident_map_size, 608 &crash_size, &crash_base, NULL, NULL); 609 610 crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); 611 crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); 612 if (rc || crash_size == 0) 613 return; 614 615 if (memblock.memory.regions[0].size < crash_size) { 616 pr_info("crashkernel reservation failed: %s\n", 617 "first memory chunk must be at least crashkernel size"); 618 return; 619 } 620 621 low = crash_base ?: oldmem_data.start; 622 high = low + crash_size; 623 if (low >= oldmem_data.start && high <= oldmem_data.start + oldmem_data.size) { 624 /* The crashkernel fits into OLDMEM, reuse OLDMEM */ 625 crash_base = low; 626 } else { 627 /* Find suitable area in free memory */ 628 low = max_t(unsigned long, crash_size, sclp.hsa_size); 629 high = crash_base ? crash_base + crash_size : ULONG_MAX; 630 631 if (crash_base && crash_base < low) { 632 pr_info("crashkernel reservation failed: %s\n", 633 "crash_base too low"); 634 return; 635 } 636 low = crash_base ?: low; 637 crash_base = memblock_phys_alloc_range(crash_size, 638 KEXEC_CRASH_MEM_ALIGN, 639 low, high); 640 } 641 642 if (!crash_base) { 643 pr_info("crashkernel reservation failed: %s\n", 644 "no suitable area found"); 645 return; 646 } 647 648 if (register_memory_notifier(&kdump_mem_nb)) { 649 memblock_phys_free(crash_base, crash_size); 650 return; 651 } 652 653 if (!oldmem_data.start && machine_is_vm()) 654 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); 655 crashk_res.start = crash_base; 656 crashk_res.end = crash_base + crash_size - 1; 657 memblock_remove(crash_base, crash_size); 658 pr_info("Reserving %lluMB of memory at %lluMB " 659 "for crashkernel (System RAM: %luMB)\n", 660 crash_size >> 20, crash_base >> 20, 661 (unsigned long)memblock.memory.total_size >> 20); 662 os_info_crashkernel_add(crash_base, crash_size); 663 #endif 664 } 665 666 /* 667 * Reserve the initrd from being used by memblock 668 */ 669 static void __init reserve_initrd(void) 670 { 671 unsigned long addr, size; 672 673 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD) || !get_physmem_reserved(RR_INITRD, &addr, &size)) 674 return; 675 initrd_start = (unsigned long)__va(addr); 676 initrd_end = initrd_start + size; 677 memblock_reserve(addr, size); 678 } 679 680 /* 681 * Reserve the memory area used to pass the certificate lists 682 */ 683 static void __init reserve_certificate_list(void) 684 { 685 if (ipl_cert_list_addr) 686 memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size); 687 } 688 689 static void __init reserve_physmem_info(void) 690 { 691 unsigned long addr, size; 692 693 if (get_physmem_reserved(RR_MEM_DETECT_EXT, &addr, &size)) 694 memblock_reserve(addr, size); 695 } 696 697 static void __init free_physmem_info(void) 698 { 699 unsigned long addr, size; 700 701 if (get_physmem_reserved(RR_MEM_DETECT_EXT, &addr, &size)) 702 memblock_phys_free(addr, size); 703 } 704 705 static void __init memblock_add_physmem_info(void) 706 { 707 unsigned long start, end; 708 int i; 709 710 pr_debug("physmem info source: %s (%hhd)\n", 711 get_physmem_info_source(), physmem_info.info_source); 712 /* keep memblock lists close to the kernel */ 713 memblock_set_bottom_up(true); 714 for_each_physmem_usable_range(i, &start, &end) 715 memblock_add(start, end - start); 716 for_each_physmem_online_range(i, &start, &end) 717 memblock_physmem_add(start, end - start); 718 memblock_set_bottom_up(false); 719 memblock_set_node(0, ULONG_MAX, &memblock.memory, 0); 720 } 721 722 /* 723 * Reserve memory used for lowcore. 724 */ 725 static void __init reserve_lowcore(void) 726 { 727 void *lowcore_start = get_lowcore(); 728 void *lowcore_end = lowcore_start + sizeof(struct lowcore); 729 void *start, *end; 730 731 if (absolute_pointer(__identity_base) < lowcore_end) { 732 start = max(lowcore_start, (void *)__identity_base); 733 end = min(lowcore_end, (void *)(__identity_base + ident_map_size)); 734 memblock_reserve(__pa(start), __pa(end)); 735 } 736 } 737 738 /* 739 * Reserve memory used for absolute lowcore/command line/kernel image. 740 */ 741 static void __init reserve_kernel(void) 742 { 743 memblock_reserve(0, STARTUP_NORMAL_OFFSET); 744 memblock_reserve(OLDMEM_BASE, sizeof(unsigned long)); 745 memblock_reserve(OLDMEM_SIZE, sizeof(unsigned long)); 746 memblock_reserve(physmem_info.reserved[RR_AMODE31].start, __eamode31 - __samode31); 747 memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP); 748 memblock_reserve(__pa(_stext), _end - _stext); 749 } 750 751 static void __init setup_memory(void) 752 { 753 phys_addr_t start, end; 754 u64 i; 755 756 /* 757 * Init storage key for present memory 758 */ 759 for_each_mem_range(i, &start, &end) 760 storage_key_init_range(start, end); 761 762 psw_set_key(PAGE_DEFAULT_KEY); 763 } 764 765 static void __init relocate_amode31_section(void) 766 { 767 unsigned long amode31_size = __eamode31 - __samode31; 768 long amode31_offset, *ptr; 769 770 amode31_offset = AMODE31_START - (unsigned long)__samode31; 771 pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size); 772 773 /* Move original AMODE31 section to the new one */ 774 memmove((void *)physmem_info.reserved[RR_AMODE31].start, __samode31, amode31_size); 775 /* Zero out the old AMODE31 section to catch invalid accesses within it */ 776 memset(__samode31, 0, amode31_size); 777 778 /* Update all AMODE31 region references */ 779 for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++) 780 *ptr += amode31_offset; 781 } 782 783 /* This must be called after AMODE31 relocation */ 784 static void __init setup_cr(void) 785 { 786 union ctlreg2 cr2; 787 union ctlreg5 cr5; 788 union ctlreg15 cr15; 789 790 __ctl_duct[1] = (unsigned long)__ctl_aste; 791 __ctl_duct[2] = (unsigned long)__ctl_aste; 792 __ctl_duct[4] = (unsigned long)__ctl_duald; 793 794 /* Update control registers CR2, CR5 and CR15 */ 795 local_ctl_store(2, &cr2.reg); 796 local_ctl_store(5, &cr5.reg); 797 local_ctl_store(15, &cr15.reg); 798 cr2.ducto = (unsigned long)__ctl_duct >> 6; 799 cr5.pasteo = (unsigned long)__ctl_duct >> 6; 800 cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3; 801 system_ctl_load(2, &cr2.reg); 802 system_ctl_load(5, &cr5.reg); 803 system_ctl_load(15, &cr15.reg); 804 } 805 806 /* 807 * Add system information as device randomness 808 */ 809 static void __init setup_randomness(void) 810 { 811 struct sysinfo_3_2_2 *vmms; 812 813 vmms = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); 814 if (stsi(vmms, 3, 2, 2) == 0 && vmms->count) 815 add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count); 816 memblock_free(vmms, PAGE_SIZE); 817 818 if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG)) 819 static_branch_enable(&s390_arch_random_available); 820 } 821 822 /* 823 * Issue diagnose 318 to set the control program name and 824 * version codes. 825 */ 826 static void __init setup_control_program_code(void) 827 { 828 union diag318_info diag318_info = { 829 .cpnc = CPNC_LINUX, 830 .cpvc = 0, 831 }; 832 833 if (!sclp.has_diag318) 834 return; 835 836 diag_stat_inc(DIAG_STAT_X318); 837 asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val)); 838 } 839 840 /* 841 * Print the component list from the IPL report 842 */ 843 static void __init log_component_list(void) 844 { 845 struct ipl_rb_component_entry *ptr, *end; 846 char *str; 847 848 if (!early_ipl_comp_list_addr) 849 return; 850 if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL) 851 pr_info("Linux is running with Secure-IPL enabled\n"); 852 else 853 pr_info("Linux is running with Secure-IPL disabled\n"); 854 ptr = __va(early_ipl_comp_list_addr); 855 end = (void *) ptr + early_ipl_comp_list_size; 856 pr_info("The IPL report contains the following components:\n"); 857 while (ptr < end) { 858 if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) { 859 if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED) 860 str = "signed, verified"; 861 else 862 str = "signed, verification failed"; 863 } else { 864 str = "not signed"; 865 } 866 pr_info("%016llx - %016llx (%s)\n", 867 ptr->addr, ptr->addr + ptr->len, str); 868 ptr++; 869 } 870 } 871 872 /* 873 * Print avoiding interpretation of % in buf and taking bootdebug option 874 * into consideration. 875 */ 876 static void __init print_rb_entry(const char *buf) 877 { 878 char fmt[] = KERN_SOH "0boot: %s"; 879 int level = printk_get_level(buf); 880 881 buf = skip_timestamp(printk_skip_level(buf)); 882 if (level == KERN_DEBUG[1] && (!bootdebug || !bootdebug_filter_match(buf))) 883 return; 884 885 fmt[1] = level; 886 printk(fmt, buf); 887 } 888 889 /* 890 * Setup function called from init/main.c just after the banner 891 * was printed. 892 */ 893 894 void __init setup_arch(char **cmdline_p) 895 { 896 /* 897 * print what head.S has found out about the machine 898 */ 899 if (machine_is_vm()) 900 pr_info("Linux is running as a z/VM " 901 "guest operating system in 64-bit mode\n"); 902 else if (machine_is_kvm()) 903 pr_info("Linux is running under KVM in 64-bit mode\n"); 904 else if (machine_is_lpar()) 905 pr_info("Linux is running natively in 64-bit mode\n"); 906 else 907 pr_info("Linux is running as a guest in 64-bit mode\n"); 908 /* Print decompressor messages if not already printed */ 909 if (!boot_earlyprintk) 910 boot_rb_foreach(print_rb_entry); 911 912 if (machine_has_relocated_lowcore()) 913 pr_info("Lowcore relocated to 0x%px\n", get_lowcore()); 914 915 log_component_list(); 916 917 /* Have one command line that is parsed and saved in /proc/cmdline */ 918 /* boot_command_line has been already set up in early.c */ 919 *cmdline_p = boot_command_line; 920 921 ROOT_DEV = Root_RAM0; 922 923 setup_initial_init_mm(_text, _etext, _edata, _end); 924 925 if (IS_ENABLED(CONFIG_EXPOLINE_AUTO)) 926 nospec_auto_detect(); 927 928 jump_label_init(); 929 parse_early_param(); 930 #ifdef CONFIG_CRASH_DUMP 931 /* Deactivate elfcorehdr= kernel parameter */ 932 elfcorehdr_addr = ELFCORE_ADDR_MAX; 933 #endif 934 935 os_info_init(); 936 setup_ipl(); 937 setup_control_program_code(); 938 939 /* Do some memory reservations *before* memory is added to memblock */ 940 reserve_pgtables(); 941 reserve_lowcore(); 942 reserve_kernel(); 943 reserve_initrd(); 944 reserve_certificate_list(); 945 reserve_physmem_info(); 946 memblock_set_current_limit(ident_map_size); 947 memblock_allow_resize(); 948 949 /* Get information about *all* installed memory */ 950 memblock_add_physmem_info(); 951 952 free_physmem_info(); 953 setup_memory_end(); 954 memblock_dump_all(); 955 setup_memory(); 956 957 relocate_amode31_section(); 958 setup_cr(); 959 setup_uv(); 960 dma_contiguous_reserve(ident_map_size); 961 vmcp_cma_reserve(); 962 if (cpu_has_edat2()) 963 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); 964 965 reserve_crashkernel(); 966 #ifdef CONFIG_CRASH_DUMP 967 /* 968 * Be aware that smp_save_dump_secondary_cpus() triggers a system reset. 969 * Therefore CPU and device initialization should be done afterwards. 970 */ 971 smp_save_dump_secondary_cpus(); 972 #endif 973 974 setup_resources(); 975 setup_lowcore(); 976 smp_fill_possible_mask(); 977 cpu_detect_mhz_feature(); 978 cpu_init(); 979 numa_setup(); 980 smp_detect_cpus(); 981 topology_init_early(); 982 setup_protection_map(); 983 /* 984 * Create kernel page tables. 985 */ 986 paging_init(); 987 988 /* 989 * After paging_init created the kernel page table, the new PSWs 990 * in lowcore can now run with DAT enabled. 991 */ 992 #ifdef CONFIG_CRASH_DUMP 993 smp_save_dump_ipl_cpu(); 994 #endif 995 996 /* Setup default console */ 997 conmode_default(); 998 set_preferred_console(); 999 1000 apply_alternative_instructions(); 1001 if (IS_ENABLED(CONFIG_EXPOLINE)) 1002 nospec_init_branches(); 1003 1004 /* Setup zfcp/nvme dump support */ 1005 setup_zfcpdump(); 1006 1007 /* Add system specific data to the random pool */ 1008 setup_randomness(); 1009 } 1010 1011 void __init arch_cpu_finalize_init(void) 1012 { 1013 sclp_init(); 1014 } 1015