xref: /linux/arch/x86/kernel/crash.c (revision 0074281bb6316108e0cff094bd4db78ab3eee236)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
4  *
5  * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6  *
7  * Copyright (C) IBM Corporation, 2004. All rights reserved.
8  * Copyright (C) Red Hat Inc., 2014. All rights reserved.
9  * Authors:
10  *      Vivek Goyal <vgoyal@redhat.com>
11  *
12  */
13 
14 #define pr_fmt(fmt)	"kexec: " fmt
15 
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/smp.h>
19 #include <linux/reboot.h>
20 #include <linux/kexec.h>
21 #include <linux/delay.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/memblock.h>
28 
29 #include <asm/bootparam.h>
30 #include <asm/processor.h>
31 #include <asm/hardirq.h>
32 #include <asm/nmi.h>
33 #include <asm/hw_irq.h>
34 #include <asm/apic.h>
35 #include <asm/e820/types.h>
36 #include <asm/io_apic.h>
37 #include <asm/hpet.h>
38 #include <linux/kdebug.h>
39 #include <asm/cpu.h>
40 #include <asm/reboot.h>
41 #include <asm/intel_pt.h>
42 #include <asm/crash.h>
43 #include <asm/cmdline.h>
44 #include <asm/sev.h>
45 
46 /* Used while preparing memory map entries for second kernel */
47 struct crash_memmap_data {
48 	struct boot_params *params;
49 	/* Type of memory */
50 	unsigned int type;
51 };
52 
53 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
54 
kdump_nmi_callback(int cpu,struct pt_regs * regs)55 static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
56 {
57 	crash_save_cpu(regs, cpu);
58 
59 	/*
60 	 * Disable Intel PT to stop its logging
61 	 */
62 	cpu_emergency_stop_pt();
63 
64 	kdump_sev_callback();
65 
66 	disable_local_APIC();
67 }
68 
kdump_nmi_shootdown_cpus(void)69 void kdump_nmi_shootdown_cpus(void)
70 {
71 	nmi_shootdown_cpus(kdump_nmi_callback);
72 
73 	disable_local_APIC();
74 }
75 
76 /* Override the weak function in kernel/panic.c */
crash_smp_send_stop(void)77 void crash_smp_send_stop(void)
78 {
79 	static int cpus_stopped;
80 
81 	if (cpus_stopped)
82 		return;
83 
84 	if (smp_ops.crash_stop_other_cpus)
85 		smp_ops.crash_stop_other_cpus();
86 	else
87 		smp_send_stop();
88 
89 	cpus_stopped = 1;
90 }
91 
92 #else
crash_smp_send_stop(void)93 void crash_smp_send_stop(void)
94 {
95 	/* There are no cpus to shootdown */
96 }
97 #endif
98 
native_machine_crash_shutdown(struct pt_regs * regs)99 void native_machine_crash_shutdown(struct pt_regs *regs)
100 {
101 	/* This function is only called after the system
102 	 * has panicked or is otherwise in a critical state.
103 	 * The minimum amount of code to allow a kexec'd kernel
104 	 * to run successfully needs to happen here.
105 	 *
106 	 * In practice this means shooting down the other cpus in
107 	 * an SMP system.
108 	 */
109 	/* The kernel is broken so disable interrupts */
110 	local_irq_disable();
111 
112 	crash_smp_send_stop();
113 
114 	cpu_emergency_disable_virtualization();
115 
116 	/*
117 	 * Disable Intel PT to stop its logging
118 	 */
119 	cpu_emergency_stop_pt();
120 
121 #ifdef CONFIG_X86_IO_APIC
122 	/* Prevent crash_kexec() from deadlocking on ioapic_lock. */
123 	ioapic_zap_locks();
124 	clear_IO_APIC();
125 #endif
126 	lapic_shutdown();
127 	restore_boot_irq_mode();
128 #ifdef CONFIG_HPET_TIMER
129 	hpet_disable();
130 #endif
131 
132 	/*
133 	 * Non-crash kexec calls enc_kexec_begin() while scheduling is still
134 	 * active. This allows the callback to wait until all in-flight
135 	 * shared<->private conversions are complete. In a crash scenario,
136 	 * enc_kexec_begin() gets called after all but one CPU have been shut
137 	 * down and interrupts have been disabled. This allows the callback to
138 	 * detect a race with the conversion and report it.
139 	 */
140 	x86_platform.guest.enc_kexec_begin();
141 	x86_platform.guest.enc_kexec_finish();
142 
143 	crash_save_cpu(regs, smp_processor_id());
144 }
145 
146 #if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_HOTPLUG)
get_nr_ram_ranges_callback(struct resource * res,void * arg)147 static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
148 {
149 	unsigned int *nr_ranges = arg;
150 
151 	(*nr_ranges)++;
152 	return 0;
153 }
154 
155 /* Gather all the required information to prepare elf headers for ram regions */
fill_up_crash_elf_data(void)156 static struct crash_mem *fill_up_crash_elf_data(void)
157 {
158 	unsigned int nr_ranges = 0;
159 	struct crash_mem *cmem;
160 
161 	walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback);
162 	if (!nr_ranges)
163 		return NULL;
164 
165 	/*
166 	 * Exclusion of crash region, crashk_low_res and/or crashk_cma_ranges
167 	 * may cause range splits. So add extra slots here.
168 	 */
169 	nr_ranges += 2 + crashk_cma_cnt;
170 	cmem = vzalloc(struct_size(cmem, ranges, nr_ranges));
171 	if (!cmem)
172 		return NULL;
173 
174 	cmem->max_nr_ranges = nr_ranges;
175 	cmem->nr_ranges = 0;
176 
177 	return cmem;
178 }
179 
180 /*
181  * Look for any unwanted ranges between mstart, mend and remove them. This
182  * might lead to split and split ranges are put in cmem->ranges[] array
183  */
elf_header_exclude_ranges(struct crash_mem * cmem)184 static int elf_header_exclude_ranges(struct crash_mem *cmem)
185 {
186 	int ret = 0;
187 	int i;
188 
189 	/* Exclude the low 1M because it is always reserved */
190 	ret = crash_exclude_mem_range(cmem, 0, SZ_1M - 1);
191 	if (ret)
192 		return ret;
193 
194 	/* Exclude crashkernel region */
195 	ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
196 	if (ret)
197 		return ret;
198 
199 	if (crashk_low_res.end)
200 		ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
201 					      crashk_low_res.end);
202 	if (ret)
203 		return ret;
204 
205 	for (i = 0; i < crashk_cma_cnt; ++i) {
206 		ret = crash_exclude_mem_range(cmem, crashk_cma_ranges[i].start,
207 					      crashk_cma_ranges[i].end);
208 		if (ret)
209 			return ret;
210 	}
211 
212 	return 0;
213 }
214 
prepare_elf64_ram_headers_callback(struct resource * res,void * arg)215 static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
216 {
217 	struct crash_mem *cmem = arg;
218 
219 	cmem->ranges[cmem->nr_ranges].start = res->start;
220 	cmem->ranges[cmem->nr_ranges].end = res->end;
221 	cmem->nr_ranges++;
222 
223 	return 0;
224 }
225 
226 /* Prepare elf headers. Return addr and size */
prepare_elf_headers(void ** addr,unsigned long * sz,unsigned long * nr_mem_ranges)227 static int prepare_elf_headers(void **addr, unsigned long *sz,
228 			       unsigned long *nr_mem_ranges)
229 {
230 	struct crash_mem *cmem;
231 	int ret;
232 
233 	cmem = fill_up_crash_elf_data();
234 	if (!cmem)
235 		return -ENOMEM;
236 
237 	ret = walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback);
238 	if (ret)
239 		goto out;
240 
241 	/* Exclude unwanted mem ranges */
242 	ret = elf_header_exclude_ranges(cmem);
243 	if (ret)
244 		goto out;
245 
246 	/* Return the computed number of memory ranges, for hotplug usage */
247 	*nr_mem_ranges = cmem->nr_ranges;
248 
249 	/* By default prepare 64bit headers */
250 	ret = crash_prepare_elf64_headers(cmem, IS_ENABLED(CONFIG_X86_64), addr, sz);
251 
252 out:
253 	vfree(cmem);
254 	return ret;
255 }
256 #endif
257 
258 #ifdef CONFIG_KEXEC_FILE
add_e820_entry(struct boot_params * params,struct e820_entry * entry)259 static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
260 {
261 	unsigned int nr_e820_entries;
262 
263 	nr_e820_entries = params->e820_entries;
264 	if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE)
265 		return 1;
266 
267 	memcpy(&params->e820_table[nr_e820_entries], entry, sizeof(struct e820_entry));
268 	params->e820_entries++;
269 	return 0;
270 }
271 
memmap_entry_callback(struct resource * res,void * arg)272 static int memmap_entry_callback(struct resource *res, void *arg)
273 {
274 	struct crash_memmap_data *cmd = arg;
275 	struct boot_params *params = cmd->params;
276 	struct e820_entry ei;
277 
278 	ei.addr = res->start;
279 	ei.size = resource_size(res);
280 	ei.type = cmd->type;
281 	add_e820_entry(params, &ei);
282 
283 	return 0;
284 }
285 
memmap_exclude_ranges(struct kimage * image,struct crash_mem * cmem,unsigned long long mstart,unsigned long long mend)286 static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
287 				 unsigned long long mstart,
288 				 unsigned long long mend)
289 {
290 	unsigned long start, end;
291 	int ret;
292 
293 	cmem->ranges[0].start = mstart;
294 	cmem->ranges[0].end = mend;
295 	cmem->nr_ranges = 1;
296 
297 	/* Exclude elf header region */
298 	start = image->elf_load_addr;
299 	end = start + image->elf_headers_sz - 1;
300 	ret = crash_exclude_mem_range(cmem, start, end);
301 
302 	if (ret)
303 		return ret;
304 
305 	/* Exclude dm crypt keys region */
306 	if (image->dm_crypt_keys_addr) {
307 		start = image->dm_crypt_keys_addr;
308 		end = start + image->dm_crypt_keys_sz - 1;
309 		return crash_exclude_mem_range(cmem, start, end);
310 	}
311 
312 	return ret;
313 }
314 
315 /* Prepare memory map for crash dump kernel */
crash_setup_memmap_entries(struct kimage * image,struct boot_params * params)316 int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
317 {
318 	unsigned int nr_ranges = 0;
319 	int i, ret = 0;
320 	unsigned long flags;
321 	struct e820_entry ei;
322 	struct crash_memmap_data cmd;
323 	struct crash_mem *cmem;
324 
325 	/*
326 	 * Using random kexec_buf for passing dm crypt keys may cause a range
327 	 * split. So use two slots here.
328 	 */
329 	nr_ranges = 2;
330 	cmem = vzalloc(struct_size(cmem, ranges, nr_ranges));
331 	if (!cmem)
332 		return -ENOMEM;
333 
334 	cmem->max_nr_ranges = nr_ranges;
335 	cmem->nr_ranges = 0;
336 
337 	memset(&cmd, 0, sizeof(struct crash_memmap_data));
338 	cmd.params = params;
339 
340 	/* Add the low 1M */
341 	cmd.type = E820_TYPE_RAM;
342 	flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
343 	walk_iomem_res_desc(IORES_DESC_NONE, flags, 0, (1<<20)-1, &cmd,
344 			    memmap_entry_callback);
345 
346 	/* Add ACPI tables */
347 	cmd.type = E820_TYPE_ACPI;
348 	flags = IORESOURCE_MEM | IORESOURCE_BUSY;
349 	walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
350 			    memmap_entry_callback);
351 
352 	/* Add ACPI Non-volatile Storage */
353 	cmd.type = E820_TYPE_NVS;
354 	walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
355 			    memmap_entry_callback);
356 
357 	/* Add e820 reserved ranges */
358 	cmd.type = E820_TYPE_RESERVED;
359 	flags = IORESOURCE_MEM;
360 	walk_iomem_res_desc(IORES_DESC_RESERVED, flags, 0, -1, &cmd,
361 			    memmap_entry_callback);
362 
363 	/* Add crashk_low_res region */
364 	if (crashk_low_res.end) {
365 		ei.addr = crashk_low_res.start;
366 		ei.size = resource_size(&crashk_low_res);
367 		ei.type = E820_TYPE_RAM;
368 		add_e820_entry(params, &ei);
369 	}
370 
371 	/* Exclude some ranges from crashk_res and add rest to memmap */
372 	ret = memmap_exclude_ranges(image, cmem, crashk_res.start, crashk_res.end);
373 	if (ret)
374 		goto out;
375 
376 	for (i = 0; i < cmem->nr_ranges; i++) {
377 		ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
378 
379 		/* If entry is less than a page, skip it */
380 		if (ei.size < PAGE_SIZE)
381 			continue;
382 		ei.addr = cmem->ranges[i].start;
383 		ei.type = E820_TYPE_RAM;
384 		add_e820_entry(params, &ei);
385 	}
386 
387 	for (i = 0; i < crashk_cma_cnt; ++i) {
388 		ei.addr = crashk_cma_ranges[i].start;
389 		ei.size = crashk_cma_ranges[i].end -
390 			  crashk_cma_ranges[i].start + 1;
391 		ei.type = E820_TYPE_RAM;
392 		add_e820_entry(params, &ei);
393 	}
394 
395 out:
396 	vfree(cmem);
397 	return ret;
398 }
399 
crash_load_segments(struct kimage * image)400 int crash_load_segments(struct kimage *image)
401 {
402 	int ret;
403 	unsigned long pnum = 0;
404 	struct kexec_buf kbuf = { .image = image, .buf_min = 0,
405 				  .buf_max = ULONG_MAX, .top_down = false };
406 
407 	/* Prepare elf headers and add a segment */
408 	ret = prepare_elf_headers(&kbuf.buffer, &kbuf.bufsz, &pnum);
409 	if (ret)
410 		return ret;
411 
412 	image->elf_headers	= kbuf.buffer;
413 	image->elf_headers_sz	= kbuf.bufsz;
414 	kbuf.memsz		= kbuf.bufsz;
415 
416 #ifdef CONFIG_CRASH_HOTPLUG
417 	/*
418 	 * The elfcorehdr segment size accounts for VMCOREINFO, kernel_map,
419 	 * maximum CPUs and maximum memory ranges.
420 	 */
421 	if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG))
422 		pnum = 2 + CONFIG_NR_CPUS_DEFAULT + CONFIG_CRASH_MAX_MEMORY_RANGES;
423 	else
424 		pnum += 2 + CONFIG_NR_CPUS_DEFAULT;
425 
426 	if (pnum < (unsigned long)PN_XNUM) {
427 		kbuf.memsz = pnum * sizeof(Elf64_Phdr);
428 		kbuf.memsz += sizeof(Elf64_Ehdr);
429 
430 		image->elfcorehdr_index = image->nr_segments;
431 
432 		/* Mark as usable to crash kernel, else crash kernel fails on boot */
433 		image->elf_headers_sz = kbuf.memsz;
434 	} else {
435 		pr_err("number of Phdrs %lu exceeds max\n", pnum);
436 	}
437 #endif
438 
439 	kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
440 	kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
441 	ret = kexec_add_buffer(&kbuf);
442 	if (ret)
443 		return ret;
444 	image->elf_load_addr = kbuf.mem;
445 	kexec_dprintk("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
446 		      image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
447 
448 	return ret;
449 }
450 #endif /* CONFIG_KEXEC_FILE */
451 
452 #ifdef CONFIG_CRASH_HOTPLUG
453 
454 #undef pr_fmt
455 #define pr_fmt(fmt) "crash hp: " fmt
456 
arch_crash_hotplug_support(struct kimage * image,unsigned long kexec_flags)457 int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags)
458 {
459 
460 #ifdef CONFIG_KEXEC_FILE
461 	if (image->file_mode)
462 		return 1;
463 #endif
464 	/*
465 	 * Initially, crash hotplug support for kexec_load was added
466 	 * with the KEXEC_UPDATE_ELFCOREHDR flag. Later, this
467 	 * functionality was expanded to accommodate multiple kexec
468 	 * segment updates, leading to the introduction of the
469 	 * KEXEC_CRASH_HOTPLUG_SUPPORT kexec flag bit. Consequently,
470 	 * when the kexec tool sends either of these flags, it indicates
471 	 * that the required kexec segment (elfcorehdr) is excluded from
472 	 * the SHA calculation.
473 	 */
474 	return (kexec_flags & KEXEC_UPDATE_ELFCOREHDR ||
475 		kexec_flags & KEXEC_CRASH_HOTPLUG_SUPPORT);
476 }
477 
arch_crash_get_elfcorehdr_size(void)478 unsigned int arch_crash_get_elfcorehdr_size(void)
479 {
480 	unsigned int sz;
481 
482 	/* kernel_map, VMCOREINFO and maximum CPUs */
483 	sz = 2 + CONFIG_NR_CPUS_DEFAULT;
484 	if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG))
485 		sz += CONFIG_CRASH_MAX_MEMORY_RANGES;
486 	sz *= sizeof(Elf64_Phdr);
487 	return sz;
488 }
489 
490 /**
491  * arch_crash_handle_hotplug_event() - Handle hotplug elfcorehdr changes
492  * @image: a pointer to kexec_crash_image
493  * @arg: struct memory_notify handler for memory hotplug case and
494  *       NULL for CPU hotplug case.
495  *
496  * Prepare the new elfcorehdr and replace the existing elfcorehdr.
497  */
arch_crash_handle_hotplug_event(struct kimage * image,void * arg)498 void arch_crash_handle_hotplug_event(struct kimage *image, void *arg)
499 {
500 	void *elfbuf = NULL, *old_elfcorehdr;
501 	unsigned long nr_mem_ranges;
502 	unsigned long mem, memsz;
503 	unsigned long elfsz = 0;
504 
505 	/*
506 	 * As crash_prepare_elf64_headers() has already described all
507 	 * possible CPUs, there is no need to update the elfcorehdr
508 	 * for additional CPU changes.
509 	 */
510 	if ((image->file_mode || image->elfcorehdr_updated) &&
511 		((image->hp_action == KEXEC_CRASH_HP_ADD_CPU) ||
512 		(image->hp_action == KEXEC_CRASH_HP_REMOVE_CPU)))
513 		return;
514 
515 	/*
516 	 * Create the new elfcorehdr reflecting the changes to CPU and/or
517 	 * memory resources.
518 	 */
519 	if (prepare_elf_headers(&elfbuf, &elfsz, &nr_mem_ranges)) {
520 		pr_err("unable to create new elfcorehdr");
521 		goto out;
522 	}
523 
524 	/*
525 	 * Obtain address and size of the elfcorehdr segment, and
526 	 * check it against the new elfcorehdr buffer.
527 	 */
528 	mem = image->segment[image->elfcorehdr_index].mem;
529 	memsz = image->segment[image->elfcorehdr_index].memsz;
530 	if (elfsz > memsz) {
531 		pr_err("update elfcorehdr elfsz %lu > memsz %lu",
532 			elfsz, memsz);
533 		goto out;
534 	}
535 
536 	/*
537 	 * Copy new elfcorehdr over the old elfcorehdr at destination.
538 	 */
539 	old_elfcorehdr = kmap_local_page(pfn_to_page(mem >> PAGE_SHIFT));
540 	if (!old_elfcorehdr) {
541 		pr_err("mapping elfcorehdr segment failed\n");
542 		goto out;
543 	}
544 
545 	/*
546 	 * Temporarily invalidate the crash image while the
547 	 * elfcorehdr is updated.
548 	 */
549 	xchg(&kexec_crash_image, NULL);
550 	memcpy_flushcache(old_elfcorehdr, elfbuf, elfsz);
551 	xchg(&kexec_crash_image, image);
552 	kunmap_local(old_elfcorehdr);
553 	pr_debug("updated elfcorehdr\n");
554 
555 out:
556 	vfree(elfbuf);
557 }
558 #endif
559