xref: /linux/arch/s390/boot/startup.c (revision e78f70bad29c5ae1e1076698b690b15794e9b81e)
1 // SPDX-License-Identifier: GPL-2.0
2 #define boot_fmt(fmt) "startup: " fmt
3 #include <linux/string.h>
4 #include <linux/elf.h>
5 #include <asm/page-states.h>
6 #include <asm/boot_data.h>
7 #include <asm/extmem.h>
8 #include <asm/sections.h>
9 #include <asm/diag288.h>
10 #include <asm/maccess.h>
11 #include <asm/machine.h>
12 #include <asm/sysinfo.h>
13 #include <asm/cpu_mf.h>
14 #include <asm/setup.h>
15 #include <asm/timex.h>
16 #include <asm/kasan.h>
17 #include <asm/kexec.h>
18 #include <asm/sclp.h>
19 #include <asm/diag.h>
20 #include <asm/uv.h>
21 #include <asm/abs_lowcore.h>
22 #include <asm/physmem_info.h>
23 #include "decompressor.h"
24 #include "boot.h"
25 #include "uv.h"
26 
27 struct vm_layout __bootdata_preserved(vm_layout);
28 unsigned long __bootdata_preserved(__abs_lowcore);
29 unsigned long __bootdata_preserved(__memcpy_real_area);
30 pte_t *__bootdata_preserved(memcpy_real_ptep);
31 unsigned long __bootdata_preserved(VMALLOC_START);
32 unsigned long __bootdata_preserved(VMALLOC_END);
33 struct page *__bootdata_preserved(vmemmap);
34 unsigned long __bootdata_preserved(vmemmap_size);
35 unsigned long __bootdata_preserved(MODULES_VADDR);
36 unsigned long __bootdata_preserved(MODULES_END);
37 unsigned long __bootdata_preserved(max_mappable);
38 unsigned long __bootdata_preserved(page_noexec_mask);
39 unsigned long __bootdata_preserved(segment_noexec_mask);
40 unsigned long __bootdata_preserved(region_noexec_mask);
41 union tod_clock __bootdata_preserved(tod_clock_base);
42 u64 __bootdata_preserved(clock_comparator_max) = -1UL;
43 
44 u64 __bootdata_preserved(stfle_fac_list[16]);
45 struct oldmem_data __bootdata_preserved(oldmem_data);
46 
47 void error(char *x)
48 {
49 	boot_emerg("%s\n", x);
50 	boot_emerg(" -- System halted\n");
51 	disabled_wait();
52 }
53 
54 static char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
55 
56 static void detect_machine_type(void)
57 {
58 	struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
59 
60 	/* Check current-configuration-level */
61 	if (stsi(NULL, 0, 0, 0) <= 2) {
62 		set_machine_feature(MFEATURE_LPAR);
63 		return;
64 	}
65 	/* Get virtual-machine cpu information. */
66 	if (stsi(vmms, 3, 2, 2) || !vmms->count)
67 		return;
68 	/* Detect known hypervisors */
69 	if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
70 		set_machine_feature(MFEATURE_KVM);
71 	else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
72 		set_machine_feature(MFEATURE_VM);
73 }
74 
75 static void detect_diag288(void)
76 {
77 	/* "BEGIN" in EBCDIC character set */
78 	static const char cmd[] = "\xc2\xc5\xc7\xc9\xd5";
79 	unsigned long action, len;
80 
81 	action = machine_is_vm() ? (unsigned long)cmd : LPARWDT_RESTART;
82 	len = machine_is_vm() ? sizeof(cmd) : 0;
83 	if (__diag288(WDT_FUNC_INIT, MIN_INTERVAL, action, len))
84 		return;
85 	__diag288(WDT_FUNC_CANCEL, 0, 0, 0);
86 	set_machine_feature(MFEATURE_DIAG288);
87 }
88 
89 static void detect_diag9c(void)
90 {
91 	unsigned int cpu;
92 	int rc = 1;
93 
94 	cpu = stap();
95 	asm_inline volatile(
96 		"	diag	%[cpu],%%r0,0x9c\n"
97 		"0:	lhi	%[rc],0\n"
98 		"1:\n"
99 		EX_TABLE(0b, 1b)
100 		: [rc] "+d" (rc)
101 		: [cpu] "d" (cpu)
102 		: "cc", "memory");
103 	if (!rc)
104 		set_machine_feature(MFEATURE_DIAG9C);
105 }
106 
107 static void reset_tod_clock(void)
108 {
109 	union tod_clock clk;
110 
111 	if (store_tod_clock_ext_cc(&clk) == 0)
112 		return;
113 	/* TOD clock not running. Set the clock to Unix Epoch. */
114 	if (set_tod_clock(TOD_UNIX_EPOCH) || store_tod_clock_ext_cc(&clk))
115 		disabled_wait();
116 	memset(&tod_clock_base, 0, sizeof(tod_clock_base));
117 	tod_clock_base.tod = TOD_UNIX_EPOCH;
118 	get_lowcore()->last_update_clock = TOD_UNIX_EPOCH;
119 }
120 
121 static void detect_facilities(void)
122 {
123 	if (cpu_has_edat1())
124 		local_ctl_set_bit(0, CR0_EDAT_BIT);
125 	page_noexec_mask = -1UL;
126 	segment_noexec_mask = -1UL;
127 	region_noexec_mask = -1UL;
128 	if (!cpu_has_nx()) {
129 		page_noexec_mask &= ~_PAGE_NOEXEC;
130 		segment_noexec_mask &= ~_SEGMENT_ENTRY_NOEXEC;
131 		region_noexec_mask &= ~_REGION_ENTRY_NOEXEC;
132 	}
133 	if (IS_ENABLED(CONFIG_PCI) && test_facility(153))
134 		set_machine_feature(MFEATURE_PCI_MIO);
135 	reset_tod_clock();
136 	if (test_facility(139) && (tod_clock_base.tod >> 63)) {
137 		/* Enable signed clock comparator comparisons */
138 		set_machine_feature(MFEATURE_SCC);
139 		clock_comparator_max = -1UL >> 1;
140 		local_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SIGN_BIT);
141 	}
142 	if (test_facility(50) && test_facility(73)) {
143 		set_machine_feature(MFEATURE_TX);
144 		local_ctl_set_bit(0, CR0_TRANSACTIONAL_EXECUTION_BIT);
145 	}
146 	if (cpu_has_vx())
147 		local_ctl_set_bit(0, CR0_VECTOR_BIT);
148 }
149 
150 static int cmma_test_essa(void)
151 {
152 	unsigned long tmp = 0;
153 	int rc = 1;
154 
155 	/* Test ESSA_GET_STATE */
156 	asm_inline volatile(
157 		"	.insn	rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
158 		"0:	lhi	%[rc],0\n"
159 		"1:\n"
160 		EX_TABLE(0b, 1b)
161 		: [rc] "+d" (rc), [tmp] "+d" (tmp)
162 		: [cmd] "i" (ESSA_GET_STATE)
163 		: "cc", "memory");
164 	return rc;
165 }
166 
167 static void cmma_init(void)
168 {
169 	if (!cmma_flag)
170 		return;
171 	if (cmma_test_essa()) {
172 		cmma_flag = 0;
173 		return;
174 	}
175 	if (test_facility(147))
176 		cmma_flag = 2;
177 }
178 
179 static void setup_lpp(void)
180 {
181 	get_lowcore()->current_pid = 0;
182 	get_lowcore()->lpp = LPP_MAGIC;
183 	if (test_facility(40))
184 		lpp(&get_lowcore()->lpp);
185 }
186 
187 #ifdef CONFIG_KERNEL_UNCOMPRESSED
188 static unsigned long mem_safe_offset(void)
189 {
190 	return (unsigned long)_compressed_start;
191 }
192 
193 static void deploy_kernel(void *output)
194 {
195 	void *uncompressed_start = (void *)_compressed_start;
196 
197 	if (output == uncompressed_start)
198 		return;
199 	memmove(output, uncompressed_start, vmlinux.image_size);
200 	memset(uncompressed_start, 0, vmlinux.image_size);
201 }
202 #endif
203 
204 static void rescue_initrd(unsigned long min, unsigned long max)
205 {
206 	unsigned long old_addr, addr, size;
207 
208 	if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
209 		return;
210 	if (!get_physmem_reserved(RR_INITRD, &addr, &size))
211 		return;
212 	if (addr >= min && addr + size <= max)
213 		return;
214 	old_addr = addr;
215 	physmem_free(RR_INITRD);
216 	addr = physmem_alloc_or_die(RR_INITRD, size, 0);
217 	memmove((void *)addr, (void *)old_addr, size);
218 }
219 
220 static void copy_bootdata(void)
221 {
222 	if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
223 		error(".boot.data section size mismatch");
224 	memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
225 	if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
226 		error(".boot.preserved.data section size mismatch");
227 	memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
228 }
229 
230 static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
231 				unsigned long offset, unsigned long phys_offset)
232 {
233 	int *reloc;
234 	long loc;
235 
236 	/* Adjust R_390_64 relocations */
237 	for (reloc = (int *)__vmlinux_relocs_64_start; reloc < (int *)__vmlinux_relocs_64_end; reloc++) {
238 		loc = (long)*reloc + phys_offset;
239 		if (loc < min_addr || loc > max_addr)
240 			error("64-bit relocation outside of kernel!\n");
241 		*(u64 *)loc += offset;
242 	}
243 }
244 
245 static void kaslr_adjust_got(unsigned long offset)
246 {
247 	u64 *entry;
248 
249 	/*
250 	 * Adjust GOT entries, except for ones for undefined weak symbols
251 	 * that resolved to zero. This also skips the first three reserved
252 	 * entries on s390x that are zero.
253 	 */
254 	for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) {
255 		if (*entry)
256 			*entry += offset;
257 	}
258 }
259 
260 /*
261  * Merge information from several sources into a single ident_map_size value.
262  * "ident_map_size" represents the upper limit of physical memory we may ever
263  * reach. It might not be all online memory, but also include standby (offline)
264  * memory or memory areas reserved for other means (e.g., memory devices such as
265  * virtio-mem).
266  *
267  * "ident_map_size" could be lower then actual standby/reserved or even online
268  * memory present, due to limiting factors. We should never go above this limit.
269  * It is the size of our identity mapping.
270  *
271  * Consider the following factors:
272  * 1. max_physmem_end - end of physical memory online, standby or reserved.
273  *    Always >= end of the last online memory range (get_physmem_online_end()).
274  * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
275  *    kernel is able to support.
276  * 3. "mem=" kernel command line option which limits physical memory usage.
277  * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
278  *    crash kernel.
279  * 5. "hsa" size which is a memory limit when the kernel is executed during
280  *    zfcp/nvme dump.
281  */
282 static void setup_ident_map_size(unsigned long max_physmem_end)
283 {
284 	unsigned long hsa_size;
285 
286 	ident_map_size = max_physmem_end;
287 	if (memory_limit)
288 		ident_map_size = min(ident_map_size, memory_limit);
289 	ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
290 
291 #ifdef CONFIG_CRASH_DUMP
292 	if (oldmem_data.start) {
293 		__kaslr_enabled = 0;
294 		ident_map_size = min(ident_map_size, oldmem_data.size);
295 		boot_debug("kdump memory limit:  0x%016lx\n", oldmem_data.size);
296 	} else if (ipl_block_valid && is_ipl_block_dump()) {
297 		__kaslr_enabled = 0;
298 		if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) {
299 			ident_map_size = min(ident_map_size, hsa_size);
300 			boot_debug("Stand-alone dump limit: 0x%016lx\n", hsa_size);
301 		}
302 	}
303 #endif
304 	boot_debug("Identity map size:   0x%016lx\n", ident_map_size);
305 }
306 
307 #define FIXMAP_SIZE	round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore))
308 
309 static unsigned long get_vmem_size(unsigned long identity_size,
310 				   unsigned long vmemmap_size,
311 				   unsigned long vmalloc_size,
312 				   unsigned long rte_size)
313 {
314 	unsigned long max_mappable, vsize;
315 
316 	max_mappable = max(identity_size, MAX_DCSS_ADDR);
317 	vsize = round_up(SZ_2G + max_mappable, rte_size) +
318 		round_up(vmemmap_size, rte_size) +
319 		FIXMAP_SIZE + MODULES_LEN + KASLR_LEN;
320 	if (IS_ENABLED(CONFIG_KMSAN))
321 		vsize += MODULES_LEN * 2;
322 	return size_add(vsize, vmalloc_size);
323 }
324 
325 static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
326 {
327 	unsigned long vmemmap_start;
328 	unsigned long kernel_start;
329 	unsigned long asce_limit;
330 	unsigned long rte_size;
331 	unsigned long pages;
332 	unsigned long vsize;
333 	unsigned long vmax;
334 
335 	pages = ident_map_size / PAGE_SIZE;
336 	/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
337 	vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
338 
339 	/* choose kernel address space layout: 4 or 3 levels. */
340 	BUILD_BUG_ON(!IS_ALIGNED(TEXT_OFFSET, THREAD_SIZE));
341 	BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE));
342 	BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE);
343 	vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE);
344 	boot_debug("vmem size estimated: 0x%016lx\n", vsize);
345 	if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE ||
346 	    (vsize > _REGION2_SIZE && kaslr_enabled())) {
347 		asce_limit = _REGION1_SIZE;
348 		if (__NO_KASLR_END_KERNEL > _REGION2_SIZE) {
349 			rte_size = _REGION2_SIZE;
350 			vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION2_SIZE);
351 		} else {
352 			rte_size = _REGION3_SIZE;
353 		}
354 	} else {
355 		asce_limit = _REGION2_SIZE;
356 		rte_size = _REGION3_SIZE;
357 	}
358 
359 	/*
360 	 * Forcing modules and vmalloc area under the ultravisor
361 	 * secure storage limit, so that any vmalloc allocation
362 	 * we do could be used to back secure guest storage.
363 	 *
364 	 * Assume the secure storage limit always exceeds _REGION2_SIZE,
365 	 * otherwise asce_limit and rte_size would have been adjusted.
366 	 */
367 	vmax = adjust_to_uv_max(asce_limit);
368 	boot_debug("%d level paging       0x%016lx vmax\n", vmax == _REGION1_SIZE ? 4 : 3, vmax);
369 #ifdef CONFIG_KASAN
370 	BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START);
371 	boot_debug("KASAN shadow area:   0x%016lx-0x%016lx\n", KASAN_SHADOW_START, KASAN_SHADOW_END);
372 	/* force vmalloc and modules below kasan shadow */
373 	vmax = min(vmax, KASAN_SHADOW_START);
374 #endif
375 	vsize = min(vsize, vmax);
376 	if (kaslr_enabled()) {
377 		unsigned long kernel_end, kaslr_len, slots, pos;
378 
379 		kaslr_len = max(KASLR_LEN, vmax - vsize);
380 		slots = DIV_ROUND_UP(kaslr_len - kernel_size, THREAD_SIZE);
381 		if (get_random(slots, &pos))
382 			pos = 0;
383 		kernel_end = vmax - pos * THREAD_SIZE;
384 		kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
385 		boot_debug("Randomization range: 0x%016lx-0x%016lx\n", vmax - kaslr_len, vmax);
386 		boot_debug("kernel image:        0x%016lx-0x%016lx (kaslr)\n", kernel_start,
387 			   kernel_size + kernel_size);
388 	} else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) {
389 		kernel_start = round_down(vmax - kernel_size, THREAD_SIZE);
390 		boot_debug("kernel image:        0x%016lx-0x%016lx (constrained)\n", kernel_start,
391 			   kernel_start + kernel_size);
392 	} else {
393 		kernel_start = __NO_KASLR_START_KERNEL;
394 		boot_debug("kernel image:        0x%016lx-0x%016lx (nokaslr)\n", kernel_start,
395 			   kernel_start + kernel_size);
396 	}
397 	__kaslr_offset = kernel_start;
398 	boot_debug("__kaslr_offset:      0x%016lx\n", __kaslr_offset);
399 
400 	MODULES_END = round_down(kernel_start, _SEGMENT_SIZE);
401 	MODULES_VADDR = MODULES_END - MODULES_LEN;
402 	VMALLOC_END = MODULES_VADDR;
403 	if (IS_ENABLED(CONFIG_KMSAN))
404 		VMALLOC_END -= MODULES_LEN * 2;
405 	boot_debug("modules area:        0x%016lx-0x%016lx\n", MODULES_VADDR, MODULES_END);
406 
407 	/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
408 	vsize = (VMALLOC_END - FIXMAP_SIZE) / 2;
409 	vsize = round_down(vsize, _SEGMENT_SIZE);
410 	vmalloc_size = min(vmalloc_size, vsize);
411 	if (IS_ENABLED(CONFIG_KMSAN)) {
412 		/* take 2/3 of vmalloc area for KMSAN shadow and origins */
413 		vmalloc_size = round_down(vmalloc_size / 3, _SEGMENT_SIZE);
414 		VMALLOC_END -= vmalloc_size * 2;
415 	}
416 	VMALLOC_START = VMALLOC_END - vmalloc_size;
417 	boot_debug("vmalloc area:        0x%016lx-0x%016lx\n", VMALLOC_START, VMALLOC_END);
418 
419 	__memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE);
420 	boot_debug("memcpy real area:    0x%016lx-0x%016lx\n", __memcpy_real_area,
421 		   __memcpy_real_area + MEMCPY_REAL_SIZE);
422 	__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
423 				   sizeof(struct lowcore));
424 	boot_debug("abs lowcore:         0x%016lx-0x%016lx\n", __abs_lowcore,
425 		   __abs_lowcore + ABS_LOWCORE_MAP_SIZE);
426 
427 	/* split remaining virtual space between 1:1 mapping & vmemmap array */
428 	pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page));
429 	pages = SECTION_ALIGN_UP(pages);
430 	/* keep vmemmap_start aligned to a top level region table entry */
431 	vmemmap_start = round_down(__abs_lowcore - pages * sizeof(struct page), rte_size);
432 	/* make sure identity map doesn't overlay with vmemmap */
433 	ident_map_size = min(ident_map_size, vmemmap_start);
434 	vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
435 	/* make sure vmemmap doesn't overlay with absolute lowcore area */
436 	if (vmemmap_start + vmemmap_size > __abs_lowcore) {
437 		vmemmap_size = SECTION_ALIGN_DOWN(ident_map_size / PAGE_SIZE) * sizeof(struct page);
438 		ident_map_size = vmemmap_size / sizeof(struct page) * PAGE_SIZE;
439 	}
440 	vmemmap = (struct page *)vmemmap_start;
441 	/* maximum address for which linear mapping could be created (DCSS, memory) */
442 	BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS));
443 	max_mappable = max(ident_map_size, MAX_DCSS_ADDR);
444 	max_mappable = min(max_mappable, vmemmap_start);
445 #ifdef CONFIG_RANDOMIZE_IDENTITY_BASE
446 	__identity_base = round_down(vmemmap_start - max_mappable, rte_size);
447 #endif
448 	boot_debug("identity map:        0x%016lx-0x%016lx\n", __identity_base,
449 		   __identity_base + ident_map_size);
450 
451 	return asce_limit;
452 }
453 
454 /*
455  * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
456  */
457 static void clear_bss_section(unsigned long kernel_start)
458 {
459 	memset((void *)kernel_start + vmlinux.image_size, 0, vmlinux.bss_size);
460 }
461 
462 /*
463  * Set vmalloc area size to an 8th of (potential) physical memory
464  * size, unless size has been set by kernel command line parameter.
465  */
466 static void setup_vmalloc_size(void)
467 {
468 	unsigned long size;
469 
470 	if (vmalloc_size_set)
471 		return;
472 	size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
473 	vmalloc_size = max(size, vmalloc_size);
474 }
475 
476 static void kaslr_adjust_vmlinux_info(long offset)
477 {
478 	vmlinux.bootdata_off += offset;
479 	vmlinux.bootdata_preserved_off += offset;
480 	vmlinux.got_start += offset;
481 	vmlinux.got_end += offset;
482 	vmlinux.init_mm_off += offset;
483 	vmlinux.swapper_pg_dir_off += offset;
484 	vmlinux.invalid_pg_dir_off += offset;
485 	vmlinux.alt_instructions += offset;
486 	vmlinux.alt_instructions_end += offset;
487 #ifdef CONFIG_KASAN
488 	vmlinux.kasan_early_shadow_page_off += offset;
489 	vmlinux.kasan_early_shadow_pte_off += offset;
490 	vmlinux.kasan_early_shadow_pmd_off += offset;
491 	vmlinux.kasan_early_shadow_pud_off += offset;
492 	vmlinux.kasan_early_shadow_p4d_off += offset;
493 #endif
494 }
495 
496 void startup_kernel(void)
497 {
498 	unsigned long vmlinux_size = vmlinux.image_size + vmlinux.bss_size;
499 	unsigned long nokaslr_text_lma, text_lma = 0, amode31_lma = 0;
500 	unsigned long kernel_size = TEXT_OFFSET + vmlinux_size;
501 	unsigned long kaslr_large_page_offset;
502 	unsigned long max_physmem_end;
503 	unsigned long asce_limit;
504 	unsigned long safe_addr;
505 	psw_t psw;
506 
507 	setup_lpp();
508 	store_ipl_parmblock();
509 	uv_query_info();
510 	setup_boot_command_line();
511 	parse_boot_command_line();
512 
513 	/*
514 	 * Non-randomized kernel physical start address must be _SEGMENT_SIZE
515 	 * aligned (see blow).
516 	 */
517 	nokaslr_text_lma = ALIGN(mem_safe_offset(), _SEGMENT_SIZE);
518 	safe_addr = PAGE_ALIGN(nokaslr_text_lma + vmlinux_size);
519 
520 	/*
521 	 * Reserve decompressor memory together with decompression heap,
522 	 * buffer and memory which might be occupied by uncompressed kernel
523 	 * (if KASLR is off or failed).
524 	 */
525 	physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr);
526 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size)
527 		physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size);
528 	oldmem_data.start = parmarea.oldmem_base;
529 	oldmem_data.size = parmarea.oldmem_size;
530 
531 	read_ipl_report();
532 	sclp_early_read_info();
533 	sclp_early_detect_machine_features();
534 	detect_facilities();
535 	detect_diag9c();
536 	detect_machine_type();
537 	/* detect_diag288() needs machine type */
538 	detect_diag288();
539 	cmma_init();
540 	sanitize_prot_virt_host();
541 	max_physmem_end = detect_max_physmem_end();
542 	setup_ident_map_size(max_physmem_end);
543 	setup_vmalloc_size();
544 	asce_limit = setup_kernel_memory_layout(kernel_size);
545 	/* got final ident_map_size, physmem allocations could be performed now */
546 	physmem_set_usable_limit(ident_map_size);
547 	detect_physmem_online_ranges(max_physmem_end);
548 	save_ipl_cert_comp_list();
549 	rescue_initrd(safe_addr, ident_map_size);
550 
551 	/*
552 	 * __kaslr_offset_phys must be _SEGMENT_SIZE aligned, so the lower
553 	 * 20 bits (the offset within a large page) are zero. Copy the last
554 	 * 20 bits of __kaslr_offset, which is THREAD_SIZE aligned, to
555 	 * __kaslr_offset_phys.
556 	 *
557 	 * With this the last 20 bits of __kaslr_offset_phys and __kaslr_offset
558 	 * are identical, which is required to allow for large mappings of the
559 	 * kernel image.
560 	 */
561 	kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK;
562 	if (kaslr_enabled()) {
563 		unsigned long size = vmlinux_size + kaslr_large_page_offset;
564 
565 		text_lma = randomize_within_range(size, _SEGMENT_SIZE, TEXT_OFFSET, ident_map_size);
566 	}
567 	if (!text_lma)
568 		text_lma = nokaslr_text_lma;
569 	text_lma |= kaslr_large_page_offset;
570 
571 	/*
572 	 * [__kaslr_offset_phys..__kaslr_offset_phys + TEXT_OFFSET] region is
573 	 * never accessed via the kernel image mapping as per the linker script:
574 	 *
575 	 *	. = TEXT_OFFSET;
576 	 *
577 	 * Therefore, this region could be used for something else and does
578 	 * not need to be reserved. See how it is skipped in setup_vmem().
579 	 */
580 	__kaslr_offset_phys = text_lma - TEXT_OFFSET;
581 	kaslr_adjust_vmlinux_info(__kaslr_offset_phys);
582 	physmem_reserve(RR_VMLINUX, text_lma, vmlinux_size);
583 	deploy_kernel((void *)text_lma);
584 
585 	/* vmlinux decompression is done, shrink reserved low memory */
586 	physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
587 
588 	/*
589 	 * In case KASLR is enabled the randomized location of .amode31
590 	 * section might overlap with .vmlinux.relocs section. To avoid that
591 	 * the below randomize_within_range() could have been called with
592 	 * __vmlinux_relocs_64_end as the lower range address. However,
593 	 * .amode31 section is written to by the decompressed kernel - at
594 	 * that time the contents of .vmlinux.relocs is not needed anymore.
595 	 * Conversely, .vmlinux.relocs is read only by the decompressor, even
596 	 * before the kernel started. Therefore, in case the two sections
597 	 * overlap there is no risk of corrupting any data.
598 	 */
599 	if (kaslr_enabled()) {
600 		unsigned long amode31_min;
601 
602 		amode31_min = (unsigned long)_decompressor_end;
603 		amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, amode31_min, SZ_2G);
604 	}
605 	if (!amode31_lma)
606 		amode31_lma = text_lma - vmlinux.amode31_size;
607 	physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
608 
609 	/*
610 	 * The order of the following operations is important:
611 	 *
612 	 * - kaslr_adjust_relocs() must follow clear_bss_section() to establish
613 	 *   static memory references to data in .bss to be used by setup_vmem()
614 	 *   (i.e init_mm.pgd)
615 	 *
616 	 * - setup_vmem() must follow kaslr_adjust_relocs() to be able using
617 	 *   static memory references to data in .bss (i.e init_mm.pgd)
618 	 *
619 	 * - copy_bootdata() must follow setup_vmem() to propagate changes
620 	 *   to bootdata made by setup_vmem()
621 	 */
622 	clear_bss_section(text_lma);
623 	kaslr_adjust_relocs(text_lma, text_lma + vmlinux.image_size,
624 			    __kaslr_offset, __kaslr_offset_phys);
625 	kaslr_adjust_got(__kaslr_offset);
626 	setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit);
627 	dump_physmem_reserved();
628 	copy_bootdata();
629 	__apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions,
630 			     (struct alt_instr *)_vmlinux_info.alt_instructions_end,
631 			     ALT_CTX_EARLY);
632 
633 	/*
634 	 * Save KASLR offset for early dumps, before vmcore_info is set.
635 	 * Mark as uneven to distinguish from real vmcore_info pointer.
636 	 */
637 	get_lowcore()->vmcore_info = __kaslr_offset_phys ? __kaslr_offset_phys | 0x1UL : 0;
638 
639 	/*
640 	 * Jump to the decompressed kernel entry point and switch DAT mode on.
641 	 */
642 	psw.addr = __kaslr_offset + vmlinux.entry;
643 	psw.mask = PSW_KERNEL_BITS;
644 	boot_debug("Starting kernel at:  0x%016lx\n", psw.addr);
645 	__load_psw(psw);
646 }
647